id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
72470
|
from vehiculo import vehiculo
class autobus(vehiculo):
def tarifaAutobus(self):
return float(vehiculo.tarifa(self)) + ((vehiculo.random * 100) * 0.10)
|
72472
|
import os
import tempfile
import shutil
import multiprocessing
import pickle
from copy import deepcopy
from enum import Enum
from typing import Optional, List, Tuple, Dict, Any
from typing_extensions import Literal
import rdkit.Chem as Chem
from ccdc.docking import Docker as DockerGold
from ccdc.io import MoleculeReader, EntryWriter
from ccdc.protein import Protein
from pydantic import BaseModel
from dockstream.core.Schrodinger.Glide_docker import Parallelization
from dockstream.utils.enums.logging_enums import LoggingConfigEnum
from dockstream.utils.execute_external.Gold import GoldExecutor
from dockstream.core.docker import Docker
from dockstream.core.Gold.Gold_result_parser import GoldResultParser
from dockstream.utils.enums.Gold_enums import GoldLigandPreparationEnum
from dockstream.utils.enums.Gold_enums import GoldTargetKeywordEnum, GoldExecutablesEnum, GoldOutputEnum
from dockstream.utils.general_utils import gen_temp_file
from dockstream.utils.translations.molecule_translator import MoleculeTranslator
from dockstream.utils.dockstream_exceptions import DockingRunFailed
class GoldFitnessFunction(str, Enum):
GOLDSCORE = "goldscore"
CHEMSCORE = "chemscore"
ASP = "asp"
PLP = "plp"
class GoldResponseValue(str, Enum):
FITNESS = "fitness"
VALUE = "value"
class GoldParameters(BaseModel):
prefix_execution: Optional[str] = None
binary_location: Optional[str] = None
receptor_paths: Optional[List[str]] = None
time_limit_per_compound: Optional[int] = None
parallelization: Optional[Parallelization]
fitness_function: GoldFitnessFunction
response_value: GoldResponseValue = GoldResponseValue.FITNESS
early_termination: bool
autoscale: float # Autoscale percentage. very fast: 10, medium: 50, very slow: 100.
ndocks: int = 10
diverse_solutions: Optional[Tuple[bool, Optional[int], Optional[float]]] = None # If diverse solutions is enabled this will be (True, cluster size, rmsd), otherwise (False, None, None). TODO: rework for GUI.
def get(self, key: str) -> Any:
"""Temporary method to support nested_get"""
return self.dict()[key]
_LP = GoldLigandPreparationEnum()
_TK = GoldTargetKeywordEnum()
_EE = GoldExecutablesEnum()
_ROE = GoldOutputEnum()
_LE = LoggingConfigEnum()
class Gold(Docker):
"""Interface to the Gold backend."""
backend: Literal["Gold"] = "Gold"
parameters: GoldParameters
_target_dict: Dict = None
_Gold_executor: GoldExecutor = None
_scoring_function_parameters: Dict[str, str] = None
class Config:
underscore_attrs_are_private = True
def __init__(self, **run_parameters):
# invoke base class's constructor first
super().__init__(**run_parameters)
# prepare and check Gold backend availability
self._check_Gold_backend_availability()
# parse the fitness function and response value set
self._parse_fitness_function()
# set the tag name for the scoring function and whether minimial or maximum values are better
self._scoring_function_parameters = self._get_scoring_function_parameters()
def _check_Gold_backend_availability(self):
self._Gold_executor = GoldExecutor(
prefix_execution=self.parameters.prefix_execution,
binary_location=self.parameters.binary_location)
if not self._Gold_executor.is_available():
raise DockingRunFailed("Cannot initialize Gold docker, as Gold backend is not available - abort.")
self._logger.log(f"Checked Gold backend availability (prefix_execution={self.parameters.prefix_execution}).", _LE.DEBUG)
def _parse_fitness_function(self):
self._logger.log(f"Set fitness function to {self.parameters.fitness_function} and response value to {self.parameters.response_value}.", _LE.DEBUG)
def _initialize_cavity(self, settings):
# load the target dictionary specification and initialize the cavity
target_path = self.parameters.receptor_paths[0]
with open(target_path, "rb") as file:
self._target_dict = pickle.load(file)
self._logger.log(f"Loaded pickled cavity dictionary stored in file {target_path}.", _LE.DEBUG)
if self._target_dict[_TK.VERSION] != _TK.CURRENT_VERSION:
self._logger.log(f"Version of pickled target ({self._target_dict[_TK.VERSION]}) is not the same as DockStream's ({_TK.CURRENT_VERSION}).", _LE.WARNING)
self._logger.log(f"Unpacked the target dictionary.", _LE.DEBUG)
tmpdir = tempfile.mkdtemp()
if self._target_dict[_TK.CAVITY_METHOD] == _TK.CAVITY_METHOD_REFERENCE:
# write ligand to temporary file (ending copied over in settings)
tmp_ref_ligand_path = gen_temp_file(suffix=self._target_dict[_TK.REFERENCE_LIGAND_FILENAME], dir=tmpdir)
with open(tmp_ref_ligand_path, 'w') as file:
for line in self._target_dict[_TK.REFERENCE_LIGAND]:
file.write(line)
self._logger.log(f"Wrote temporary ligand file {tmp_ref_ligand_path} with {len(self._target_dict[_TK.REFERENCE_LIGAND])} lines.", _LE.DEBUG)
# write target PDB to temporary file
tmp_target_path = gen_temp_file(suffix=".pdb", dir=tmpdir)
with open(tmp_target_path, 'w') as file:
for line in self._target_dict[_TK.TARGET_PDB]:
file.write(line)
self._logger.log(f"Wrote temporary target file {tmp_target_path} with {len(self._target_dict[_TK.TARGET_PDB])} lines.", _LE.DEBUG)
# build the cavity
ref_ligand = MoleculeReader(filename=tmp_ref_ligand_path)[0]
self._prepare_protein(settings, tmp_target_path)
protein = settings.proteins[0]
settings.binding_site = settings.BindingSiteFromLigand(protein,
ref_ligand,
distance=self._target_dict[_TK.CAVITY_REFERENCE_DISTANCE])
settings.reference_ligand_file = tmp_ref_ligand_path
elif self._target_dict[_TK.CAVITY_METHOD] == _TK.CAVITY_METHOD_POINT:
raise NotImplementedError
# origin (x,x,x)
# distance x
else:
raise DockingRunFailed("Specified cavity determination method not defined for GOLD.")
self._logger.log(f"Initialized GOLD Protein.BindingSite with method {self._target_dict[_TK.CAVITY_METHOD]}.", _LE.DEBUG)
def add_molecules(self, molecules: list):
"""This method overrides the parent class, docker.py add_molecules method. This method appends prepared
ligands to a list for subsequent docking. Note, that while internally we will store the ligands for "GOLD"
in RDkit format, they will need to be written out as an SDF file before docking can commence later.
:param molecules: A list that is to contain all prepared ligands for subsequent docking
:type molecules: list
:raises NotImplementedError: Each backend must override the parent class, docker.py add_molecules method.
Inability to do so or a bug causing incorrect implementation will raise a NotImplementedError
"""
mol_trans = MoleculeTranslator(self.ligands, force_mol_type=_LP.TYPE_RDKIT)
mol_trans.add_molecules(molecules)
self.ligands = mol_trans.get_as_rdkit()
self._docking_performed = False
def _generate_temporary_input_output_files(self, start_indices, sublists):
# in case singletons are handed over, wrap them in a list for "zipping" later
if not isinstance(start_indices, list):
start_indices = [start_indices]
if not isinstance(sublists, list):
sublists = [sublists]
tmp_output_dirs = []
tmp_input_sdf_paths = []
tmp_output_sdf_paths = []
for start_index, sublist in zip(start_indices, sublists):
# generate temporary input files and output directory
cur_tmp_output_dir = tempfile.mkdtemp()
cur_tmp_sdf = gen_temp_file(prefix=str(start_index), suffix=".sdf", dir=cur_tmp_output_dir)
# write-out the temporary input file
writer = Chem.SDWriter(cur_tmp_sdf)
one_written = False
for ligand in sublist:
# initialize all ligands (as they could have failed)
if ligand.get_molecule() is not None:
mol = deepcopy(ligand.get_molecule())
mol.SetProp("_Name", ligand.get_identifier())
one_written = True
writer.write(mol)
writer.close()
if one_written is False:
if os.path.isdir(cur_tmp_output_dir):
shutil.rmtree(cur_tmp_output_dir)
continue
# add the path to which "_dock_subjob()" will write the result SDF
output_sdf_path = gen_temp_file(prefix=str(start_index), suffix="_result.sdf", dir=cur_tmp_output_dir)
tmp_output_dirs.append(cur_tmp_output_dir)
tmp_output_sdf_paths.append(output_sdf_path)
tmp_input_sdf_paths.append(cur_tmp_sdf)
return tmp_output_dirs, tmp_input_sdf_paths, tmp_output_sdf_paths
def _dock(self, number_cores: int):
# partition ligands into sublists and distribute to processor cores for docking
start_indices, sublists = self.get_sublists_for_docking(number_cores=number_cores)
number_sublists = len(sublists)
self._logger.log(f"Split ligands into {number_sublists} sublists for docking.", _LE.DEBUG)
sublists_submitted = 0
slices_per_iteration = min(number_cores, number_sublists)
while sublists_submitted < len(sublists):
upper_bound_slice = min((sublists_submitted + slices_per_iteration), len(sublists))
cur_slice_start_indices = start_indices[sublists_submitted:upper_bound_slice]
cur_slice_sublists = sublists[sublists_submitted:upper_bound_slice]
# generate paths and initialize molecules (so that if they fail, this can be covered)
tmp_output_dirs, tmp_input_sdf_paths, \
tmp_output_sdf_paths = self._generate_temporary_input_output_files(cur_slice_start_indices,
cur_slice_sublists)
# run in parallel; wait for all subjobs to finish before proceeding
processes = []
for chunk_index in range(len(tmp_output_dirs)):
p = multiprocessing.Process(target=self._dock_subjob, args=(tmp_input_sdf_paths[chunk_index],
tmp_output_sdf_paths[chunk_index],
tmp_output_dirs[chunk_index]))
processes.append(p)
p.start()
for p in processes:
p.join()
# add the number of input sublists rather than the output temporary folders to account for cases where
# entire sublists failed to produce an input structure
sublists_submitted += len(cur_slice_sublists)
# load the chunks and recombine the result; add conformations
for chunk_index in range(len(tmp_output_dirs)):
# this is a protection against the case where empty (file size == 0 bytes) files are generated due to
# a failure during docking
if not os.path.isfile(tmp_output_sdf_paths[chunk_index]) or os.path.getsize(tmp_output_sdf_paths[chunk_index]) == 0:
continue
for molecule in Chem.SDMolSupplier(tmp_output_sdf_paths[chunk_index], removeHs=False):
# it can happen, that ligands have "impossible chemistry" and will be loaded by RDkit as "None"
if molecule is None:
continue
# parse the molecule name (sorted by FITNESS not the score) which looks like:
# "0:0|0xa6enezm|sdf|1|dock6"
cur_conformer_name = str(molecule.GetProp("_Name")).split(sep='|')[0]
# add molecule to the appropriate ligand
for ligand in self.ligands:
if ligand.get_identifier() == cur_conformer_name:
ligand.add_conformer(molecule)
break
# clean-up
for path in tmp_output_dirs:
shutil.rmtree(path)
self._log_docking_progress(number_done=sublists_submitted, number_total=number_sublists)
# update conformer names to contain the conformer id
# -> <ligand_number>:<enumeration>:<conformer_number>
reverse = True if self._get_scoring_function_parameters()[_ROE.BEST] == "max" else False
for ligand in self.ligands:
ligand.set_conformers(sorted(ligand.get_conformers(),
key=lambda x: self._get_score_from_conformer(conformer=x),
reverse=reverse))
ligand.add_tags_to_conformers()
# log any docking fails
self._docking_fail_check()
# generate docking results as dataframe
result_parser = GoldResultParser(ligands=[ligand.get_clone() for ligand in self.ligands],
fitness_function=self.parameters.fitness_function,
response_value=self.parameters.response_value)
self._df_results = result_parser.as_dataframe()
# set docking flag
self._docking_performed = True
def _dock_subjob(self, sdf_ligand_path, path_sdf_results, tmp_output_dir):
# 1) prepare Gold docker: (i) "clone" the docker instance, (ii) set remaining, ligang-specific settings and
# (iii) initialize this chunk's ligands
cur_docker = DockerGold()
settings = cur_docker.settings
settings.output_directory = tmp_output_dir
settings.output_file = os.path.basename(path_sdf_results)
settings.output_format = "sdf"
settings.fitness_function = self.parameters.fitness_function
settings.early_termination = self.parameters.early_termination
settings.autoscale = self.parameters.autoscale
if self.parameters.diverse_solutions is not None:
settings.diverse_solutions = self.parameters.diverse_solutions
self._initialize_cavity(settings)
settings.add_ligand_file(sdf_ligand_path, ndocks=self.parameters.ndocks)
# 2) write settings file
settings_file_path = os.path.join(tmp_output_dir, _EE.GOLD_AUTO_CONFIG_NAME)
settings.write(settings_file_path)
with open(settings_file_path, 'r') as file:
self._logger.log(f"Contents of configurations file {settings_file_path}:", _LE.DEBUG)
for line in file:
self._logger_blank.log(line.rstrip("\n"), _LE.DEBUG)
# 3) run Gold docker
arguments = [settings_file_path]
execution_result = self._Gold_executor.execute(command=_EE.GOLD_AUTO,
arguments=arguments,
check=False)
self._delay4file_system(path=path_sdf_results)
self._logger.log(f"Finished sublist (input: {sdf_ligand_path}, output directory: {tmp_output_dir}), with return code '{execution_result.returncode}'.", _LE.DEBUG)
def _prepare_protein(self, settings, tmp_protein_path):
protein = Protein.from_file(tmp_protein_path)
protein.remove_all_waters()
protein.remove_unknown_atoms()
protein.add_hydrogens()
ligands = protein.ligands
for l in ligands:
protein.remove_ligand(l.identifier)
protein_file_name = os.path.join(settings.output_directory, 'clean_%s.mol2' % protein.identifier)
with EntryWriter(protein_file_name) as writer:
writer.write(protein)
settings.add_protein_file(protein_file_name)
return ligands
def write_docked_ligands(self, path, mode="all"):
self._write_docked_ligands(path, mode, mol_type=_LP.TYPE_RDKIT)
def _get_scoring_function_parameters(self):
# get the appropriate name of the tag and whether minimal or maximal values are best for
# the specified scoring function
if self.parameters.response_value == GoldResponseValue.FITNESS:
scoring_function_parameters = _ROE.DICT_FITNESS[self.parameters.fitness_function]
elif self.parameters.response_value == GoldResponseValue.VALUE:
scoring_function_parameters = _ROE.DICT_VALUE[self.parameters.fitness_function]
else:
raise ValueError("Parameter response value must be either fitness or value.")
self._logger.log(f"Set scoring_function_parameters to {scoring_function_parameters} for obtaining the scores.",
_LE.DEBUG)
return scoring_function_parameters
def get_scores(self, best_only):
"""This method overrides the parent class, docker.py get_scores method. This method returns a list containing
all docking scores. This method allows returning the best docking scores only. "best" can mean the minimum
or maximum values for this given scoring function. By default, it will return the minimum values. Returning
all the docking scores (of different poses for instance) is also possible if best only is not enforced
:param best_only: Determines whether the best (either minimum or maximum) docking scores are returned
:type best_only: boolean, True of False
:return: list of returned docking scores
:raises ValueError: If best_only is True but neither "min" nor "max" was specified, a ValueError is raised
"""
return self._get_scores(best_only=best_only, best=self._scoring_function_parameters[_ROE.BEST])
def write_result(self, path, mode="all"):
"""This method overrides the parent class, docker.py write_result method.
This method writes the docking results to a csv file. There is the option to write out either the best
predicted binding pose per enumeration or all the predicted binding poses. Output for the best predicted
binding pose per ligand has yet to be implemented
:param path: Contains information on results output path
:type path: string
:param mode: Determines whether the output contains the best predicted binding pose per ligand, the best
predicted binding pose per enumeration, or all the predicted binding poses
:type mode: string, optional, default value is "all". Other possible value is "best_per_enumeration"
:param best: Determines whether lower or higher values are better (typically lower ones)
:type best: string, optional, default value is "min". Other possible value is "max"
"""
return self._write_result(path=path, mode=mode, best=self._scoring_function_parameters[_ROE.BEST])
def _get_score_from_conformer(self, conformer):
return float(conformer.GetProp(self._scoring_function_parameters[_ROE.TAG]))
def _sort_conformers(self, conformers: list, best=None) -> list:
return super()._sort_conformers(conformers=conformers,
best=self._scoring_function_parameters[_ROE.BEST])
|
72499
|
import info
class subinfo(info.infoclass):
def setTargets(self):
self.targets['0.2'] = ""
self.defaultTarget = '0.2'
self.description = "deprecated: use virtual/base instead"
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
from Package.VirtualPackageBase import *
class Package(VirtualPackageBase):
def __init__(self):
VirtualPackageBase.__init__(self)
|
72509
|
import os
import zstandard
import json
import jsonlines
import io
import datetime
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime.datetime,)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj))
# Modified version of lm_dataformat Archive for single file.
class Archive:
def __init__(self, file_path, compression_level=3):
self.file_path = file_path
dir_name = os.path.dirname(file_path)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
self.fh = open(self.file_path, 'wb')
self.cctx = zstandard.ZstdCompressor(level=compression_level)
self.compressor = self.cctx.stream_writer(self.fh)
def add_data(self, data, meta={}):
self.compressor.write(json.dumps({'text': data, 'meta': meta}, default=json_serial).encode('UTF-8') + b'\n')
def commit(self):
self.compressor.flush(zstandard.FLUSH_FRAME)
self.fh.flush()
self.fh.close()
# Modified version of lm_dataformat Reader with self.fh set, allowing peeking for tqdm.
class Reader:
def __init__(self):
pass
def read_jsonl(self, file, get_meta=False, autojoin_paragraphs=True, para_joiner='\n\n'):
with open(file, 'rb') as fh:
self.fh = fh
cctx = zstandard.ZstdDecompressor()
reader = io.BufferedReader(cctx.stream_reader(fh))
rdr = jsonlines.Reader(reader)
for ob in rdr:
# naive jsonl where each object is just the string itself, with no meta. For legacy compatibility.
if isinstance(ob, str):
assert not get_meta
yield ob
continue
text = ob['text']
if autojoin_paragraphs and isinstance(text, list):
text = para_joiner.join(text)
if get_meta:
yield text, (ob['meta'] if 'meta' in ob else {})
else:
yield text
|
72518
|
import os
from PIL import Image
from math import ceil, sqrt
from .exceptions import CollageOfZeroFramesError
from .utils import does_path_exists
from typing import List
# Module to create collage from list of images, the
# images are the extracted frames of the input video.
class MakeCollage:
"""
Class that creates the collage from list of images.
Collage that should be as close to the shape of a square.
The images are arranged by timestamp of the frames, their
index in the image_list is based on thier timestamp on the
video. The image with the index 2 is a frame from the 3rd
second and an index 39 is from at the 40th second. The index
is one less due to zero-based indexing.
Let's say we have a list with 9 images.
As the images should be arranged in a way to resemble a
square, we take the square root of 9 and that is 3. Now
we need to make a 3x3 frames collage.
Arrangement should be:
Img1 Img2 Img3
Img4 Img5 Img6
Img7 Img8 Img9
If the number of images is not a perfect square, calculate the
square root and round it to the nearest integer.
If number of images is 13, which is not a perfect square.
sqrt(13) = 3.605551275463989
round(3.605551275463989) = 4
Thus the image should be 4x4 frames of collage.
Arrangement should be:
-----------------------------
| Img1 Img2 Img3 Img4 |
| Img5 Img6 Img7 Img8 |
| Img9 Img10 Img11 Img12 |
| Img13 X X X |
-----------------------------
X denotes the empty space due to lack of images.
But the empty spaces will not affect the robustness
as downsized/transcoded version of the video will also
produce these vacant spaces.
"""
def __init__(
self,
image_list: List[str],
output_path: str,
collage_image_width: int = 1024,
) -> None:
"""
Checks if the list passed is not an empty list.
Also makes sure that the output_path directory exists.
And calls the make method, the make method creates the collage.
:param image_list: A python list containing the list of absolute
path of images that are to be added in the collage.
The order of images is kept intact and is very important.
:param output_path: Absolute path of the collage including
the image name. (This is where the collage is saved.)
Example: '/home/username/projects/collage.jpeg'.
:param collage_image_width: An integer specifying the image width of the
output collage. Default value is 1024 pixels.
:return: None
:rtype: NoneType
"""
self.image_list = image_list
self.number_of_images = len(self.image_list)
self.output_path = output_path
self.collage_image_width = collage_image_width
self.images_per_row_in_collage = int(round(sqrt(self.number_of_images)))
if self.number_of_images == 0:
raise CollageOfZeroFramesError("Can not make a collage of zero images.")
output_path_dir = os.path.dirname(self.output_path) + "/"
if not does_path_exists(output_path_dir):
raise FileNotFoundError(
"Directory at which output collage is to be saved does not exists."
)
self.make()
def make(self) -> None:
"""
Creates the collage from the list of images.
It calculates the scale of the images on collage by
measuring the first image width and height, there's no
reason for choosing first one and it's arbitrary. But
we assume that all the images passed should have same size.
A base image of 'collage_image_width' width and of 'number
of rows times scaled frame image height' height is created.
The base image has all pixels with RGB value 0,0,0 that is
the base image is pure black. The frame images are now embeded
on it.
The frame images are scaled to fit the collage base image such
that the shape of collage is as close to the shape of a square.
:return: None
:rtype: NoneType
"""
# arbitrarily selecting the first image from the list, index 0
with Image.open(self.image_list[0]) as first_frame_image_in_list:
# Find the width and height of the first image of the list.
# Assuming all the images have same size.
frame_image_width, frame_image_height = first_frame_image_in_list.size
# scale is the ratio of collage_image_width and product of
# images_per_row_in_collage with frame_image_width.
# The scale will always lie between 0 and 1, which implies that
# the images are always going to get downsized.
scale = (self.collage_image_width) / (
self.images_per_row_in_collage * frame_image_width
)
# Calculating the scaled height and width for the frame image.
scaled_frame_image_width = ceil(frame_image_width * scale)
scaled_frame_image_height = ceil(frame_image_height * scale)
# Divide the number of images by images_per_row_in_collage. The later
# was calculated by taking the square root of total number of images.
number_of_rows = ceil(self.number_of_images / self.images_per_row_in_collage)
# Multiplying the height of one downsized image with number of rows.
# Height of 1 downsized image is product of scale and frame_image_height
# Total height is number of rows times the height of one downsized image.
self.collage_image_height = ceil(scale * frame_image_height * number_of_rows)
# Create an image of passed collage_image_width and calculated collage_image_height.
# The downsized images will be pasted on this new base image.
# The image is 0,0,0 RGB(black).
collage_image = Image.new(
"RGB", (self.collage_image_width, self.collage_image_height)
)
# keep track of the x and y coordinates of the resized frame images
i, j = (0, 0)
# iterate the frames and paste them on their position on the collage_image
for count, frame_path in enumerate(self.image_list):
# Set the x coordinate to zero if we are on the first column
# If self.images_per_row_in_collage is 4
# then 0,4,8 and so on should have their x coordinate as 0
if (count % self.images_per_row_in_collage) == 0:
i = 0
# open the frame image, must open it to resize it using the thumbnail method
frame = Image.open(frame_path)
# scale the opened frame images
frame.thumbnail(
(scaled_frame_image_width, scaled_frame_image_height), Image.ANTIALIAS
)
# set the value of x to that of i's value.
# i is set to 0 if we are on the first column.
x = i
# It ensures that y coordinate stays the same for any given row.
# The floor of a real number is the largest integer that is less
# than or equal to the number. floor division is used because of
# the zero based indexing, the floor of the division stays same
# for an entier row as the decimal values are negled by the floor.
# for the first row the result of floor division is always zero and
# the product of 0 with scaled_frame_image_height is also zero, they
# y coordinate for the first row is 0.
# For the second row the result of floor division is one and the prodcut
# with scaled_frame_image_height ensures that the y coordinate is
# scaled_frame_image_height below the first row.
y = (j // self.images_per_row_in_collage) * scaled_frame_image_height
# paste the frame image on the newly created base image(base image is black)
collage_image.paste(frame, (x, y))
frame.close()
# increase the x coordinate by scaled_frame_image_width
# to get the x coordinate of the next frame. unless the next image
# will be on the very first column this will be the x coordinate.
i = i + scaled_frame_image_width
# increase the value of j by 1, this is to calculate the y coordinate of
# next image. The increased number will be floor divided by images_per_row_in_collage
# therefore the y coordinate stays the same for any given row.
j += 1
# save the base image with all the scaled frame images embeded on it.
collage_image.save(self.output_path)
collage_image.close()
|
72562
|
import copy
from proxy import Proxy
class ProxyList(list):
"""
A proxy wrapper for a normal Python list.
A lot of functionality is being reproduced from Proxy. Inheriting Proxy would
simplify things a lot but I get type errors when I try to do so. It is not exactly
clear what a partial copy entails for a ProxyList so we will not consider this
option for now.
"""
__slots__ = ["_obj", "__weakref__", "__slots__", "_is_copied",
"_enable_partial_copy", "_attr_map"]
_is_copied = False
_special_names = [
'__add__', '__contains__', '__delitem__', '__delslice__',
'__eq__', '__ge__', '__getitem__', '__getslice__', '__gt__', '__hash__',
'__iadd__', '__imul__', '__iter__', '__le__', '__len__',
'__lt__', '__mul__', '__ne__', '__reduce__', '__reduce_ex__', '__repr__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__', '__sizeof__',
'__str__', '__subclasshook__', '__xor__', 'next',
]
def __init__(self, obj, _partial_copy=False):
object.__setattr__(self, "_obj", obj)
object.__setattr__(self, "_enable_partial_copy", _partial_copy)
def append(self, obj):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.append(obj)
def count(self, obj):
return self._obj.count(obj)
def extend(self, iterable):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.extend(iterable)
def index(self, obj):
return self._obj.index(obj)
def insert(self, idx, obj):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.insert(idx, obj)
def pop(self):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
return self._obj.pop()
def remove(self, obj):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.remove(obj)
def reverse(self):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.reverse()
def sort(self, cm='None', key='None', reverse='False'):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.sort(cm, key, reverse)
@classmethod
def _create_class_proxy(cls, theclass):
"""creates a proxy for the given class"""
def make_method(name):
def method(self, *args, **kw):
if name in cls._special_names and args is not ():
args = map(lambda x: x._obj if isinstance(x, Proxy) or
isinstance(x, ProxyList) else x, args)
return getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw)
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = list.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
|
72574
|
from jaxrl.agents.awac.awac_learner import AWACLearner
from jaxrl.agents.bc.bc_learner import BCLearner
from jaxrl.agents.ddpg.ddpg_learner import DDPGLearner
from jaxrl.agents.drq.drq_learner import DrQLearner
from jaxrl.agents.sac.sac_learner import SACLearner
from jaxrl.agents.sac_v1.sac_v1_learner import SACV1Learner
|
72577
|
import csv
import os
import random
import uuid
import pickle
from multiprocessing import Pool
from collections import Counter
import numpy as np
import imgaug.augmenters as iaa
from PIL import Image
def rotate_save(img, flip, angle, label, new_label_dict, out_dir):
filename = str(uuid.uuid4()) + ".png"
new_label_dict[filename] = label
if not flip:
img.rotate(angle, expand=True).save(os.path.join(out_dir, filename))
else:
img.rotate(angle, expand=True).transpose(Image.FLIP_LEFT_RIGHT).save(os.path.join(out_dir, filename))
def process_image(image_filename, in_dir, out_dir, label_dict, count):
new_label_dict = {}
img = Image.open(os.path.join(in_dir, image_filename))
config = [
(False, 0),
(False, 90),
(False, 180),
(False, 270),
(True, 0),
(True, 90),
(True, 180),
(True, 270)
]
aug = iaa.Sequential([
iaa.OneOf([
iaa.Affine(scale={"x": (0.7, 1.3), "y": (0.7, 1.3)}),
iaa.Affine(rotate=(-25, 25))
])
])
while count > 0:
flip, angle = config[(count - 1) % len(config)]
rotate_save(Image.fromarray(aug(images=[np.array(img)])[0]), flip, angle, label_dict[image_filename[:-4]], new_label_dict, out_dir)
count -= 1
return new_label_dict
def main(in_dir, out_dir, labels_file):
label_dict = {}
with open(labels_file, "r") as f:
csvfile = csv.reader(f)
# Skip column description
next(csvfile)
for row in csvfile:
label_dict[row[0]] = row[1:].index("1.0")
new_label_dict = {}
counter = {}
files = os.listdir(in_dir)
random.shuffle(files)
for f in files:
counter[label_dict[f[:-4]]] = counter.get(label_dict[f[:-4]], 0) + 1
print(counter)
desired_counts = {k:int(max(0.5*(max(counter.values()) - n) + n, n)) for k, n in counter.items()}
print(desired_counts)
print(len(files))
p = Pool(16)
dicts = p.starmap(
process_image,
[
(
image_filename,
in_dir,
out_dir,
label_dict,
int(desired_counts[label_dict[image_filename[:-4]]] / counter[label_dict[image_filename[:-4]]])
)
for image_filename in files
]
)
combined_dict = {}
for d in dicts:
combined_dict.update(d)
with open("label_dict.pkl", "wb") as f:
pickle.dump(combined_dict, f)
if __name__ == "__main__":
main("train/", "train_aug/", "ISIC2018_Task3_Training_GroundTruth.csv")
|
72592
|
from typing import List, Optional, Tuple
from django.http import HttpRequest
from django_scim.filters import UserFilterQuery
from zerver.lib.request import RequestNotes
# This is in a separate file due to circular import issues django-scim2 runs into
# when this is placed in zerver.lib.scim.
class ZulipUserFilterQuery(UserFilterQuery):
"""This class implements the filter functionality of SCIM2.
E.g. requests such as
/scim/v2/Users?filter=userName eq "<EMAIL>"
can be made to refer to resources via their properties.
This gets fairly complicated in its full scope
(https://datatracker.ietf.org/doc/html/rfc7644#section-3.4.2.2)
and django-scim2 implements an entire mechanism of converting
this SCIM2 filter syntax into SQL queries.
What we have to do in this class is to customize django-scim2 so
that it knows which SCIM attributes map to which UserProfile
fields. We can assume that get_extra_model_filter_kwargs_getter
has already ensured that we will only interact with non-bot user
accounts in the realm associated with this SCIM configuration.
"""
# attr_map describes which table.column the given SCIM2 User
# attributes refer to.
attr_map = {
# attr, sub attr, uri
("userName", None, None): "zerver_userprofile.delivery_email",
# We can only reasonably support filtering by name.formatted
# as UserProfile.full_name is its equivalent. We don't store
# first/last name information for UserProfile, so we can't
# support filtering based on name.givenName or name.familyName.
("name", "formatted", None): "zerver_userprofile.full_name",
("active", None, None): "zerver_userprofile.is_active",
}
# joins tells django-scim2 to always add the specified JOINS
# to the formed SQL queries. We need to JOIN the Realm table
# because we need to limit the results to the realm (subdomain)
# of the request.
joins = ("INNER JOIN zerver_realm ON zerver_realm.id = realm_id",)
@classmethod
def get_extras(cls, q: str, request: Optional[HttpRequest] = None) -> Tuple[str, List[object]]:
"""
Return extra SQL and params to be attached to end of current Query's
SQL and params. The return format matches the format that should be used
for providing raw SQL with params to Django's .raw():
https://docs.djangoproject.com/en/3.2/topics/db/sql/#passing-parameters-into-raw
Here we ensure that results are limited to the subdomain of the request
and also exclude bots, as we currently don't want them to be managed by SCIM2.
"""
assert request is not None
realm = RequestNotes.get_notes(request).realm
assert realm is not None
return "AND zerver_realm.id = %s AND zerver_userprofile.is_bot = False", [realm.id]
|
72606
|
from geospacelab import preferences
import geospacelab.datahub.sources.madrigal.utilities as utilities
from geospacelab.datahub import DatabaseModel
class WDCDatabase(DatabaseModel):
def __new__(cls, str_in, **kwargs):
obj = super().__new__(cls, str_in, **kwargs)
return obj
wdc_database = WDCDatabase('WDC')
wdc_database.url = 'http://wdc.kugi.kyoto-u.ac.jp/'
wdc_database.category = 'online database'
wdc_database.Notes = '''
- Data Usage Rules
The rules for the data use and exchange are defined by the International Council for Science - World Data System (ICSU-WDS) Data Sharing Principles.
The data and services at the WDC Kyoto are available for scientific use without restrictions,
but for the real-time (quicklook) data, please contact our staff (<EMAIL>) before using those in publications and presentations.
The WDC Kyoto does not allow commercial applications of the geomagnetic indices.
'''
try:
default_user_email = preferences.user_config['datahub']['wdc']['user_email']
except KeyError:
if preferences._on_rtd:
default_user_email = '<EMAIL>'
save = 'y'
else:
print("Inputs for accessing the WDC (wdc.kugi.kyoto-u.ac.jp) database.")
default_user_email = input("User's email: ")
save = input("Save as default? [y]/n: ")
if save.lower() in ['', 'y', 'yes']:
uc = preferences.user_config
uc.setdefault('datahub', {})
uc['datahub'].setdefault('wdc', {})
uc['datahub']['wdc']['user_email'] = default_user_email
preferences.set_user_config(user_config=uc, set_as_default=True)
|
72607
|
import cv2
import numpy as np
import sys
sys.path.append('build')
import kosutils
from tracker import *
# Setting the dimensions for output window
H = 700
W = 700
dispWindow = np.zeros((H,W,3),dtype=np.uint8)
PREDICTOR_PATH = "../shape_predictor_5_face_landmarks.dat"
# Creating the object for obj3D class
obj1 = kosutils.kos_Obj3D(dispWindow.shape[:2])
# Creating the object for kos_vcam class
cam1 = kosutils.kos_vcam(dispWindow.shape[:2])
cap = cv2.VideoCapture(0)
tr = tracker(PREDICTOR_PATH)
angle = 0
diff = 0
while True:
ret, frame = cap.read()
if ret:
frame = cv2.flip(frame,1)
img = np.copy(frame)
size = img.shape[:2]
x,y = tr.getNose(p1x_,p1y_,frame)
p1x_ = x
p1y_ = y
# rect = tr.rect
angle+=2*np.pi/180;
if(angle > 2*np.pi):
angle = 0
# try:
# cv2.rectangle(frame,(rect[0],rect[1]),(rect[2],rect[3]),(0,255,0),3)
# except:
# pass
# cv2.imshow("Face Tracking",frame)
# cv2.waitKey(1)
drift_x = x - size[1]//2
drift_y = size[0]//2 - y
drift_z = -cam1.focal_length-2*(500 - 2*tr.face_width)
cam1.updtTxMat(drift_x,drift_y,drift_z)
obj1.rotateObj(np.pi/4,angle,np.pi)
cam1.render(obj1.pts3d,dispWindow)
|
72643
|
from django.conf.urls import url
from .views import TakeLevelView
urlpatterns = [
url(r'^source$', TakeLevelView.as_view(), name='source_examen'),
]
|
72655
|
import itertools
from contextlib import suppress
from copy import deepcopy
from pymongo import MongoClient
from tinydb_serialization import SerializationMiddleware
from tinymongo import TinyMongoClient
from tinymongo.serializers import DateTimeSerializer
from tinymongo.tinymongo import generate_id
from quokka.utils.text import split_all_category_roots
class QuokkaTinyMongoClient(TinyMongoClient):
@property
def _storage(self):
serialization = SerializationMiddleware()
serialization.register_serializer(DateTimeSerializer(), 'TinyDate')
# TODO: Read custom serializers from settings and extensions
return serialization
class QuokkaDB(object):
config = {}
system = 'tinydb'
folder = 'databases'
host = 'localhost'
port = 27017
name = 'quokka_db'
collections = {
'index': 'index',
'contents': 'contents',
'uploads': 'uploads',
'users': 'users',
}
def __init__(self, app=None):
self.app = None
if app is not None:
self.init_app(app)
def init_app(self, app):
self.config = app.config.get('DATABASE', {})
# update atributes with config counterparts
for key, value in self.config.items():
if key.lower() != 'collections':
setattr(self, key.lower(), value)
else:
self.collections.update(value)
self._register(app)
def _register(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'db' in app.extensions:
raise RuntimeError("Flask extension already initialized")
app.extensions['db'] = self
self.app = app
def get_db_name(self, collection):
"""return db_name for collection"""
if self.system == "mongo":
return self.name
return collection
def get_collection(self, collection):
"""Get the corresponding database collection/table"""
col_name = self.collections.get(collection, collection)
db_name = self.get_db_name(col_name)
return self.connection[db_name][col_name]
def get_content_collection(self, content_id):
return self.connection[content_id]['contents']
def get_content_collection_mongo(self, content_id):
return self.connection[self.name]['contents']
@property
def connection(self):
if getattr(self, '_connection', None) is None:
if self.system == 'tinydb':
self._connection = QuokkaTinyMongoClient(self.folder)
elif self.system == 'mongo':
self._connection = MongoClient(
host=self.host,
port=self.port
)
return self._connection
def __dir__(self):
"""Return existing attributes + collection names"""
attrs = []
for attr in super().__dir__():
if attr.endswith(('_mongo', '_tinydb')):
attrs.append(attr.rpartition('_')[0])
else:
attrs.append(attr)
return sorted(list(set(attrs)) + list(self.collections.keys()))
def __getattribute__(self, name):
collections = super().__getattribute__('collections')
get_collection = super().__getattribute__('get_collection')
if name in collections:
return get_collection(name)
# Try to get system specific method e.g: self.categories_mongo
try:
system = super().__getattribute__('system')
return super().__getattribute__(f'{name}_{system}')
except AttributeError:
return super().__getattribute__(name)
# [ <-- DB query helpers --> ]
def generate_id(self):
return generate_id()
def value_set(self, colname, key, filter=None,
sort=True, flat=False, **kwargs):
"""Return a set of all values in a key"""
if filter is not None:
data = self.get_collection(colname).find(filter, **kwargs)
else:
data = self.get_collection(colname).find(**kwargs)
values = [item.get(key) for item in data if item.get(key) is not None]
if flat is True:
values = list(itertools.chain(*values))
with suppress(TypeError):
values = list(set(values))
return sorted(values) if sort is True else values
def author_set(self, sort=True, **kwargs):
users = [
item.get('fullname', item.get('username'))
for item in self.users.find()
]
authors = self.value_set('index', 'authors', flat=True, **kwargs)
values = list(set(users + authors))
return sorted(values) if sort is True else values
def tag_set(self, sort=True, **kwargs):
return self.value_set('index', 'tags', flat=True, sort=sort, **kwargs)
def category_set(self, sort=True, **kwargs):
results = self.value_set('index', 'category', sort=sort, **kwargs)
cats = []
for result in results:
cats.extend(split_all_category_roots(result))
return sorted(set(cats)) if sort is True else set(cats)
def content_set(self, *args, **kwargs):
return self.index.find(*args, **kwargs)
def article_set(self, *args, **kwargs):
kwargs.setdefault(
'sort',
self.app.theme_context.get('ARTICLE_ORDER_BY', [('date', -1)])
)
if not args:
args = [{'content_type': 'article'}]
elif isinstance(args[0], dict):
args[0]['content_type'] = 'article'
return self.content_set(*args, **kwargs)
def page_set(self, *args, **kwargs):
kwargs.setdefault(
'sort',
self.app.theme_context.get('PAGE_ORDER_BY', [('title', -1)])
)
if not args:
args = [{'content_type': 'page'}]
elif isinstance(args[0], dict):
args[0]['content_type'] = 'page'
return self.content_set(*args, **kwargs)
def block_set(self, *args, **kwargs):
kwargs.setdefault(
'sort',
self.app.theme_context.get(
'BLOCK_ORDER_BY', [('title', -1)]
)
)
if not args:
args = [{'content_type': 'block'}]
elif isinstance(args[0], dict):
args[0]['content_type'] = 'block'
return self.content_set(*args, **kwargs)
def select(self, colname, *args, **kwargs):
return self.get_collection(colname).find(*args, **kwargs)
def count(self, colname, *args, **kwargs):
return self.get_collection(colname).find(*args, **kwargs).count()
def get(self, colname, *args, **kwargs):
return self.get_collection(colname).find_one(*args, **kwargs)
def insert(self, colname, *args, **kwargs):
return self.get_collection(colname).insert(*args, **kwargs)
def update(self, colname, query, doc):
return self.get_collection(colname).update_one(query, doc)
def push_content(self, model):
"""Insert or Update content related to model"""
collection = self.get_content_collection(model['_id'])
current_saved = collection.find_one({
'content_id': model['_id'],
'version': model.get('version', 0)
})
if is_equal(model, current_saved):
model.pop('content', None)
return
model_to_save = deepcopy(model)
if not current_saved:
version = 0
else:
version = model.get('version', 0) + 1
model['version'] = model_to_save['version'] = version
model_to_save['content_id'] = model_to_save.pop('_id')
collection.insert(model_to_save)
model.pop('content', None)
def pull_content(self, model):
if not isinstance(model, dict):
model = self.get('index', {'_id': model})
if not model or (
model.get('version') == 0 and not model.get('_isclone')):
return
collection = self.get_content_collection(model['_id'])
record = collection.find_one({
'content_id': model['_id'],
'version': model['version']
})
return record['content'] if record else None
def get_with_content(self, **kwargs):
model = self.get('index', kwargs)
if model:
model['content'] = self.pull_content(model)
return model
def is_equal(model, other):
if not other:
return False
versioned_keys = [
'title', 'summary', 'tags', 'category', 'date',
'content', 'authors', 'slug', 'status', 'published',
'comments', 'block_items'
]
for key in versioned_keys:
if model.get(key) != other.get(key):
return False
return True
|
72718
|
import numpy as np
from tsfuse.transformers.uniqueness import *
from tsfuse.data import Collection
def test_has_duplicate_true():
x = Collection.from_array([1, 2, 3, 3])
actual = HasDuplicate().transform(x).values
np.testing.assert_equal(actual, True)
def test_has_duplicate_false():
x = Collection.from_array([1, 2, 3, 4])
actual = HasDuplicate().transform(x).values
np.testing.assert_equal(actual, False)
def test_has_duplicate_min_true():
x = Collection.from_array([1, 1, 2, 3])
actual = HasDuplicateMin().transform(x).values
np.testing.assert_equal(actual, True)
def test_has_duplicate_min_false():
x = Collection.from_array([2, 3, 4, 4])
actual = HasDuplicateMin().transform(x).values
np.testing.assert_equal(actual, False)
def test_has_duplicate_max_true():
x = Collection.from_array([2, 3, 4, 4])
actual = HasDuplicateMax().transform(x).values
np.testing.assert_equal(actual, True)
def test_has_duplicate_max_false():
x = Collection.from_array([1, 1, 2, 3])
actual = HasDuplicateMax().transform(x).values
np.testing.assert_equal(actual, False)
def test_has_duplicate_empty():
x = Collection.from_array([np.nan])
actual = HasDuplicate().transform(x).values
np.testing.assert_equal(actual, False)
def test_number_of_unique_values_rel():
x = Collection.from_array([1, 2, 3, 4])
actual = NumberUniqueValues(rel=True).transform(x).values
np.testing.assert_equal(actual, 1.0)
def test_number_of_unique_values_abs():
x = Collection.from_array([1, 2, 3, 4])
actual = NumberUniqueValues(rel=False).transform(x).values
np.testing.assert_equal(actual, 4)
def test_number_of_unique_values_1_rel():
x = Collection.from_array([2, 2, 2, 2])
actual = NumberUniqueValues(rel=True).transform(x).values
np.testing.assert_equal(actual, 0.25)
def test_number_of_unique_values_1_abs():
x = Collection.from_array([2, 2, 2, 2])
actual = NumberUniqueValues(rel=False).transform(x).values
np.testing.assert_equal(actual, 1)
def test_number_of_unique_values_0_rel():
x = Collection.from_array([np.nan])
actual = NumberUniqueValues(rel=True).transform(x).values
np.testing.assert_equal(actual, np.nan)
def test_number_of_unique_values_0_abs():
x = Collection.from_array([np.nan])
actual = NumberUniqueValues(rel=False).transform(x).values
np.testing.assert_equal(actual, np.nan)
def test_sum_of_reoccurring_data_poins():
x = Collection.from_array([1, 1, 2, 3, 3, 4])
actual = SumReoccurringDataPoints().transform(x).values
np.testing.assert_equal(actual, 8)
def test_sum_of_reoccurring_data_points_0():
x = Collection.from_array([1, 2, 3, 4])
actual = SumReoccurringDataPoints().transform(x).values
np.testing.assert_equal(actual, 0)
def test_sum_of_reoccurring_values():
x = Collection.from_array([1, 1, 2, 3, 3, 4])
actual = SumReoccurringValues().transform(x).values
np.testing.assert_equal(actual, 4)
def test_sum_of_reoccurring_values_0():
x = Collection.from_array([1, 2, 3, 4])
actual = SumReoccurringValues().transform(x).values
np.testing.assert_equal(actual, 0)
|
72722
|
from paiargparse import PAIArgumentParser
from tfaip.util.logging import logger
from calamari_ocr.ocr.training.cross_fold_trainer import (
CrossFoldTrainer,
CrossFoldTrainerParams,
)
logger = logger(__name__)
def run():
return main(parse_args())
def parse_args(args=None):
parser = PAIArgumentParser()
parser.add_root_argument("root", CrossFoldTrainerParams, CrossFoldTrainerParams())
params: CrossFoldTrainerParams = parser.parse_args(args).root
# TODO: add the training args (omit those params, that are set by the cross fold training)
# setup_train_args(parser, omit=["files", "validation", "weights",
# "early_stopping_best_model_output_dir", "early_stopping_best_model_prefix",
# "output_dir"])
return params
def main(params):
trainer = CrossFoldTrainer(params)
logger.info("Running cross fold train with params")
logger.info(params.to_json(indent=2))
trainer.run()
if __name__ == "__main__":
run()
|
72726
|
import sys
import unittest
import pendulum
from src import (
Stocks,
StocksCommandService,
)
from minos.networks import (
InMemoryRequest,
Response,
)
from tests.utils import (
build_dependency_injector,
)
class TestStocksCommandService(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.injector = build_dependency_injector()
async def asyncSetUp(self) -> None:
await self.injector.wire(modules=[sys.modules[__name__]])
async def asyncTearDown(self) -> None:
await self.injector.unwire()
def test_constructor(self):
service = StocksCommandService()
self.assertIsInstance(service, StocksCommandService)
async def test_get_remote_quotes(self):
service = StocksCommandService()
now = pendulum.now()
now_minus_one_month = now.subtract(months=1)
response = service.call_remote("AAPL", now_minus_one_month.to_date_string(), now.to_date_string())
self.assertIsInstance(response, list)
if __name__ == "__main__":
unittest.main()
|
72787
|
import logging
import os
from typing import Optional, List
import pandas as pd
from .sketch import sketch_fasta, sketch_fastqs
from .parser import mash_dist_output_to_dataframe
from ..utils import run_command
from ..const import MASH_REFSEQ_MSH
def mash_dist_refseq(sketch_path: str, mash_bin: str = "mash") -> str:
"""Compute Mash distances of sketch file of genome fasta to RefSeq sketch DB.
Args:
mash_bin (str): Mash binary path
sketch_path (str): Mash sketch file path or genome fasta file path
Returns:
(str): Mash STDOUT string
"""
assert os.path.exists(sketch_path)
cmd_list = [mash_bin,
'dist',
MASH_REFSEQ_MSH,
sketch_path]
exit_code, stdout, stderr = run_command(cmd_list)
if exit_code != 0:
raise Exception(
'Could not run Mash dist. EXITCODE="{}" STDERR="{}" STDOUT="{}"'.format(exit_code, stderr, stdout))
return stdout
def fasta_vs_refseq(fasta_path: str,
mash_bin: str = "mash",
sample_name: Optional[str] = None,
tmp_dir: str = "/tmp",
k: int = 16,
s: int = 400) -> pd.DataFrame:
"""Compute Mash distances between input FASTA against all RefSeq genomes
Args:
fasta_path: FASTA file path
mash_bin: Mash binary path
sample_name: Sample name
tmp_dir: Temporary working directory
k: Mash kmer size
s: Mash number of min-hashes
Returns:
(pd.DataFrame): Mash genomic distance results ordered by ascending distance
"""
sketch_path = None
try:
sketch_path = sketch_fasta(fasta_path,
mash_bin=mash_bin,
tmp_dir=tmp_dir,
sample_name=sample_name,
k=k,
s=s)
mashout = mash_dist_refseq(sketch_path, mash_bin=mash_bin)
logging.info('Ran Mash dist successfully (output length=%s). Parsing Mash dist output', len(mashout))
df_mash = mash_dist_output_to_dataframe(mashout)
df_mash['sample'] = sample_name
logging.info('Parsed Mash dist output into Pandas DataFrame with %s rows', df_mash.shape[0])
logging.debug('df_mash: %s', df_mash.head(5))
return df_mash
finally:
if sketch_path and os.path.exists(sketch_path):
logging.info('Deleting temporary sketch file "%s"', sketch_path)
os.remove(sketch_path)
logging.info('Sketch file "%s" deleted!', sketch_path)
def fastq_vs_refseq(fastqs: List[str],
mash_bin: str = 'mash',
sample_name: str = None,
tmp_dir: str = '/tmp',
k: int = 16,
s: int = 400,
m: int = 8) -> pd.DataFrame:
"""Compute Mash distances between input reads against all RefSeq genomes
Args:
fastqs: FASTQ paths
mash_bin: Mash binary path
sample_name: Sample name
tmp_dir: Temporary working directory
k: Mash kmer size
s: Mash number of min-hashes
m: Mash number of times a k-mer needs to be observed in order to be considered for Mash sketch DB
Returns:
(pd.DataFrame): Mash genomic distance results ordered by ascending distance
"""
assert len(fastqs) > 0, "Must supply one or more FASTQ paths"
sketch_path = None
try:
sketch_path = sketch_fastqs(fastqs,
mash_bin=mash_bin,
tmp_dir=tmp_dir,
sample_name=sample_name,
k=k,
s=s,
m=m)
logging.info('Mash sketch database created for "%s" at "%s"', fastqs, sketch_path)
logging.info('Querying Mash sketches "%s" against RefSeq sketch database', sketch_path)
mashout = mash_dist_refseq(sketch_path, mash_bin=mash_bin)
logging.info('Queried "%s" against RefSeq sketch database. Parsing into Pandas DataFrame', sketch_path)
df_mash = mash_dist_output_to_dataframe(mashout)
df_mash['sample'] = sample_name
logging.info('Parsed Mash distance results into DataFrame with %s entries', df_mash.shape[0])
logging.debug('df_mash %s', df_mash.head(5))
return df_mash
finally:
if sketch_path and os.path.exists(sketch_path):
logging.info('Deleting temporary sketch file "%s"', sketch_path)
os.remove(sketch_path)
logging.info('Sketch file "%s" deleted!', sketch_path)
|
72790
|
from Screens import PluginBrowser as PBBase
from Screens.InfoBarGenerics import InfoBarNotifications
OriginalPluginBrowser = PBBase.PluginBrowser
if not issubclass(OriginalPluginBrowser, InfoBarNotifications):
class PluginBrowser(OriginalPluginBrowser, InfoBarNotifications):
def __init__(self, *args, **kwargs):
OriginalPluginBrowser.__init__(self, *args, **kwargs)
#if self.skinName in ("NotifiablePluginBrowser", "OriginalPluginBrowser"):
# self.skinName = "PluginBrowser"
InfoBarNotifications.__init__(self)
NotifiablePluginBrowser = PluginBrowser
else:
NotifiablePluginBrowser = OriginalPluginBrowser
def install():
PBBase.PluginBrowser = NotifiablePluginBrowser
def uninstall():
PBBase.PluginBrowser = OriginalPluginBrowser
__all__ = ['OriginalPluginBrowser', 'NotifiablePluginBrowser', 'install', 'uninstall']
|
72808
|
import json
import os
import pytest
import requests
from tests.acceptance.helpers import ENDPOINT_ACTIVATE
from tests.acceptance.helpers import ENDPOINT_CONFIG
from tests.acceptance.helpers import create_and_validate_request_and_response
from tests.acceptance.helpers import sort_response
expected_activate_ab = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
}
]"""
expected_activate_ab_empty_experimentKey = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "",
"variationKey": "",
"type": "",
"enabled": false,
"error": "experimentKey not found"
}
]"""
expected_activate_ab_invalid_experimentKey = """[
{
"userId": "matjaz",
"experimentKey": "invalid exper key",
"featureKey": "",
"variationKey": "",
"type": "",
"enabled": false,
"error": "experimentKey not found"
}
]"""
@pytest.mark.parametrize("experiment_key, expected_response, expected_status_code", [
("ab_test1", expected_activate_ab, 200),
("", expected_activate_ab_empty_experimentKey, 200),
("invalid exper key", expected_activate_ab_invalid_experimentKey, 200),
], ids=["valid case", "empty exper key", "invalid exper key"])
def test_activate__experiment(session_obj, experiment_key, expected_response,
expected_status_code):
"""
Test validates:
1. Presence of correct variation in the returned decision for AB experiment
Instead of on single field (variation, enabled), validation is done on the whole
response (that includes variations and enabled fields).
This is to add extra robustness to the test.
Sort the reponses because dictionaries shuffle order.
:param session_obj: session object
:param experiment_key: experiment_key
:param expected_response: expected_response
:param expected_status_code: expected_status_code
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"experimentKey": experiment_key}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, payload=payload,
params=params)
assert json.loads(expected_response) == resp.json()
assert resp.status_code == expected_status_code, resp.text
resp.raise_for_status()
expected_activate_feat = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
}
]"""
expected_activate_feat_empty_featureKey = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "",
"variationKey": "",
"type": "",
"enabled": false,
"error": "featureKey not found"
}
]"""
expected_activate_feat_invalid_featureKey = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "invalid feat key",
"variationKey": "",
"type": "",
"enabled": false,
"error": "featureKey not found"
}
]"""
@pytest.mark.parametrize("feature_key, expected_response, expected_status_code", [
("feature_1", expected_activate_feat, 200),
("", expected_activate_feat_empty_featureKey, 200),
("invalid feat key", expected_activate_feat_invalid_featureKey, 200),
], ids=["valid case", "empty feat key", "invalid feat key"])
def test_activate__feature(session_obj, feature_key, expected_response,
expected_status_code):
"""
Test validates:
That feature is enabled in the decision for the feature test
Instead of on single field (variation, enabled), validation is done on the whole
response (that includes variations and enabled fields).
This is to add extra robustness to the test.
Sort the reponses because dictionaries shuffle order.
:param session_obj: session object
:param feature_key: API request feature key
:param expected_response: API expected response
:param expected_status_code: API response expected status code
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"featureKey": feature_key}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, payload=payload,
params=params)
if isinstance(resp.json(), dict) and resp.json()['error']:
with pytest.raises(requests.exceptions.HTTPError):
assert resp.json() == json.loads(expected_response)
assert resp.status_code == expected_status_code, resp.text
resp.raise_for_status()
assert json.loads(expected_response) == resp.json()
assert resp.status_code == expected_status_code, resp.text
expected_activate_type_exper = """[
{
"userId": "matjaz",
"experimentKey": "feature_2_test",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
}
]"""
expected_activate_type_feat = """[
{
"userId": "matjaz",
"experimentKey": "feature_2_test",
"featureKey": "feature_2",
"variationKey": "variation_1",
"type": "feature",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_3",
"variationKey": "",
"type": "feature",
"enabled": false
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_4",
"variationKey": "",
"type": "feature",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_5",
"variationKey": "",
"type": "feature",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
}
]"""
@pytest.mark.parametrize("decision_type, expected_response, expected_status_code, bypass_validation_request", [
("experiment", expected_activate_type_exper, 200, False),
("feature", expected_activate_type_feat, 200, False),
("invalid decision type", {'error': 'type "invalid decision type" not supported'}, 400, True),
("", {'error': 'type "" not supported'}, 400, True)
], ids=["experiment decision type", "feature decision type", "invalid decision type", "empty decision type"])
def test_activate__type(session_obj, decision_type, expected_response,
expected_status_code, bypass_validation_request):
"""
Test cases:
1. Get decisions with "experiment" type
2. Get decisions with "feature" type
3. Get empty list when non-existent decision type -> bug OASIS-6031
:param session_obj: session object
:param decision_type: parameterized decision type
:param expected_response: expected response
:param bypass_validation: option to bypass schema validation
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"type": decision_type}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, bypass_validation_request,
payload=payload, params=params)
if decision_type in ['experiment', 'feature']:
sorted_actual = sort_response(
resp.json(), 'experimentKey', 'featureKey')
sorted_expected = sort_response(json.loads(expected_response), 'experimentKey',
'featureKey')
assert sorted_actual == sorted_expected
elif resp.json()['error']:
with pytest.raises(requests.exceptions.HTTPError):
assert resp.json() == expected_response
resp.raise_for_status()
def test_activate_403(session_override_sdk_key):
"""
Test that 403 Forbidden is returned. We use invalid SDK key to trigger 403.
:param session_override_sdk_key: sdk key to override the session using invalid sdk key
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"type": "experiment"}
with pytest.raises(requests.exceptions.HTTPError):
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_override_sdk_key,
payload=payload, params=params)
assert resp.status_code == 403
assert resp.json()['error'] == 'unable to fetch fresh datafile (consider ' \
'rechecking SDK key), status code: 403 Forbidden'
resp.raise_for_status()
@pytest.mark.parametrize(
"experiment, disableTracking, expected_status_code, bypass_validation_request", [
("ab_test1", "true", 200, False),
("ab_test1", "false", 200, False),
("feature_2_test", "true", 200, False),
("feature_2_test", "false", 200, False),
("ab_test1", "", 200, True),
("ab_test1", "invalid_boolean", 200, True),
], ids=["ab_experiment and decision_tr true", "ab_experiment and decision_tr false",
"feature test and decision_tr true",
"feature test and decision_tr false", "empty disableTracking",
"invalid disableTracking"])
def test_activate__disable_tracking(session_obj, experiment, disableTracking,
expected_status_code, bypass_validation_request):
"""
Setting to true will disable impression tracking for ab experiments and feature tests.
It's equivalent to previous "get_variation".
Can not test it in acceptance tests. Just testing basic status code.
FS compatibility test suite uses proxy event displatcher where they test this by
validating that event was not sent.
:param session_obj: session fixture
:param experiment: ab experiment or feature test
:param disableTracking: true or false
:param expected_status_code
:param bypass_validation: option to bypass schema validation
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {
"experimentKey": experiment,
"disableTracking": disableTracking
}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, bypass_validation_request,
payload=payload, params=params)
resp.raise_for_status()
assert resp.status_code == expected_status_code
expected_enabled_true_all_true = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
}
]"""
expected_enabled_true_feature_off = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
}
]"""
expected_enabled_false_feature_on = """[]"""
expected_enabled_false_feature_off = """[
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_3",
"variationKey": "",
"type": "feature",
"enabled": false
}
]"""
expected_enabled_empty = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
}
]"""
expected_enabled_invalid = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
}
]"""
@pytest.mark.parametrize(
"enabled, experimentKey, featureKey, expected_response, expected_status_code, bypass_validation_request", [
("true", "ab_test1", "feature_1", expected_enabled_true_all_true, 200, False),
("true", "ab_test1", "feature_3", expected_enabled_true_feature_off, 200, False),
("false", "ab_test1", "feature_1", expected_enabled_false_feature_on, 200, False),
("false", "ab_test1", "feature_3", expected_enabled_false_feature_off, 200, False),
("", "ab_test1", "feature_1", expected_enabled_empty, 200, True),
("invalid for enabled", "ab_test1",
"feature_1", expected_enabled_invalid, 200, True)
], ids=["enabled true, all true", "enabled true, feature off",
"enabled false, feature on",
"enabled false, feature off", "empty value for enabled",
"invalid value for enabled"])
def test_activate__enabled(session_obj, enabled, experimentKey, featureKey,
expected_response, expected_status_code, bypass_validation_request):
"""
Filter the activation response to return only enabled decisions.
Value for enabled key needs to be a string: "true" or "false"
- feature_1 feature is enabled - should not appear in response when enabled is set to False
- feature_3 feature is not enabled in the project - should not appear in the project when enabled is True
:param session_obj: session fixture
:param enabled: boolean is feature enabled
:param experimentKey: experiment key
:param featureKey: feature key
:param expected_response: API expected response
:param expected_status_code: expected status code
:param bypass_validation: option to bypass schema validation
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {
"experimentKey": experimentKey,
"featureKey": featureKey,
"enabled": enabled
}
resp = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, bypass_validation_request,
payload=payload, params=params)
actual_response = sort_response(resp.json(), 'experimentKey', 'featureKey')
expected_response = sort_response(json.loads(expected_response), 'experimentKey',
'featureKey')
assert actual_response == expected_response
assert resp.status_code == expected_status_code
resp.raise_for_status()
# #######################################################
# MISCELANEOUS ALTERNATIVE TEST CASES
# #######################################################
expected_activate_with_config = """[
{
"userId": "matjaz",
"experimentKey": "ab_test1",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "feature_2_test",
"featureKey": "",
"variationKey": "variation_1",
"type": "experiment",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_1",
"variationKey": "",
"type": "feature",
"variables": {
"bool_var": true,
"double_var": 5.6,
"int_var": 1,
"str_var": "hello"
},
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "feature_2_test",
"featureKey": "feature_2",
"variationKey": "variation_1",
"type": "feature",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_3",
"variationKey": "",
"type": "feature",
"enabled": false
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_4",
"variationKey": "",
"type": "feature",
"enabled": true
},
{
"userId": "matjaz",
"experimentKey": "",
"featureKey": "feature_5",
"variationKey": "",
"type": "feature",
"enabled": true
}
]"""
def test_activate_with_config(session_obj):
"""
Tests experimentKeys, featureKeys, variables and variations because it
validates against the whole response body.
In "activate"
Request payload defines the “who” (user id and attributes)
while the query parameters define the “what” (feature, experiment, etc)
Request parameter is a list of experiment keys or feature keys.
If you want both add both and separate them with comma.
Example:
params = {
"featureKey": <list of feature keys>,
"experimentKey": <list of experiment keys>
}
Need to sort the response (list of dictionaries). And the sorting needs to be primary
and secondary, because we are getting response for two params - experimentKey and
featureKey and they have different responses. experimentKey has experimentKey field
always populated and it has featureKey empty.
Whereas featureKey response has featureKey field populated and experimentKey empty.
When we sort on both then the responses are properly sorted and ready for being
asserted on.
:param session_obj: session object
"""
# config
BASE_URL = os.getenv('host')
resp = session_obj.get(BASE_URL + ENDPOINT_CONFIG)
resp_config = resp.json()
# activate
feat = [key for key in resp_config['featuresMap']]
exp = [key for key in resp_config['experimentsMap']]
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {
"featureKey": feat,
"experimentKey": exp
}
resp_activate = create_and_validate_request_and_response(ENDPOINT_ACTIVATE, 'post', session_obj, payload=payload,
params=params)
sorted_actual = sort_response(resp_activate.json(), 'experimentKey', 'featureKey')
sorted_expected = sort_response(json.loads(expected_activate_with_config),
'experimentKey',
'featureKey')
assert sorted_actual == sorted_expected
|
72830
|
import matlab.engine
import argparse
import torch
from torch.autograd import Variable
import numpy as np
import time, math, glob
import scipy.io as sio
import cv2
parser = argparse.ArgumentParser(description="PyTorch EDSR Eval")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--model", default="checkpoint/model_edsr.pth", type=str, help="model path")
parser.add_argument("--dataset", default="Set5", type=str, help="dataset name, Default: Set5")
parser.add_argument("--scale", default=4, type=int, help="scale factor, Default: 4")
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
opt = parser.parse_args()
cuda = opt.cuda
eng = matlab.engine.start_matlab()
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
model = torch.load(opt.model)["model"]
image_list = glob.glob(opt.dataset+"/*.*")
avg_psnr_predicted = 0.0
avg_psnr_bicubic = 0.0
avg_elapsed_time = 0.0
for image_name in image_list:
print("Processing ", image_name)
im_gt_y = sio.loadmat(image_name)['im_gt_y']
im_b_y = sio.loadmat(image_name)['im_b_y']
im_l = sio.loadmat(image_name)['im_l']
im_gt_y = im_gt_y.astype(float)
im_b_y = im_b_y.astype(float)
im_l = im_l.astype(float)
psnr_bicubic = PSNR(im_gt_y, im_b_y,shave_border=opt.scale)
avg_psnr_bicubic += psnr_bicubic
im_input = im_l.astype(np.float32).transpose(2,0,1)
im_input = im_input.reshape(1,im_input.shape[0],im_input.shape[1],im_input.shape[2])
im_input = Variable(torch.from_numpy(im_input/255.).float())
if cuda:
model = model.cuda()
im_input = im_input.cuda()
else:
model = model.cpu()
start_time = time.time()
HR_4x = model(im_input)
elapsed_time = time.time() - start_time
avg_elapsed_time += elapsed_time
HR_4x = HR_4x.cpu()
im_h = HR_4x.data[0].numpy().astype(np.float32)
im_h = im_h*255.
im_h = np.clip(im_h, 0., 255.)
im_h = im_h.transpose(1,2,0).astype(np.float32)
im_h_matlab = matlab.double((im_h / 255.).tolist())
im_h_ycbcr = eng.rgb2ycbcr(im_h_matlab)
im_h_ycbcr = np.array(im_h_ycbcr._data).reshape(im_h_ycbcr.size, order='F').astype(np.float32) * 255.
im_h_y = im_h_ycbcr[:,:,0]
psnr_predicted = PSNR(im_gt_y, im_h_y,shave_border=opt.scale)
avg_psnr_predicted += psnr_predicted
print("Scale=", opt.scale)
print("Dataset=", opt.dataset)
print("PSNR_predicted=", avg_psnr_predicted/len(image_list))
print("PSNR_bicubic=", avg_psnr_bicubic/len(image_list))
print("It takes average {}s for processing".format(avg_elapsed_time/len(image_list)))
|
72832
|
from trading_ig import IGService
from trading_ig.config import config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# if you need to cache to DB your requests
from datetime import timedelta
import requests_cache
from predefined_functions.initialisation import Initialisation
class Order_Management():
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.log = logging.getLogger(__name__)
# set object and then set connection
self.initial = Initialisation()
self.initialise_connection()
def initialise_connection(self):
self.ig_service = self.initial.initialise_connection()
self.ig_service.create_session()
# limit orders
def create_working_order(self, direction, epic, size, price, stop_distance,limit_distance,force_open=False):
currency_code = "GBP"
direction = direction
epic = epic
expiry = "DFB"
if force_open == True:
guaranteed_stop = False
else:
guaranteed_stop = True
# entering price
level = price
# Pound per point size
size = size
time_in_force = "GOOD_TILL_CANCELLED"
# LIMIT orders are now STOP
order_type = "STOP"
limit_distance = limit_distance
stop_distance = stop_distance
# currency_code = "GBP"
# direction = "SELL"
# epic = "CS.D.BITCOIN.TODAY.IP"
# expiry = "DFB"
# guaranteed_stop = False
# # entering price
# level = 13109
# # Pound per point size
# size = 0.50
# time_in_force = "GOOD_TILL_CANCELLED"
# order_type = "LIMIT"
# limit_distance = 4000.0
# stop_distance = 160.0
# """Creates an OTC working order"""
try:
response = self.ig_service.create_working_order(
currency_code=currency_code,
direction=direction,
epic=epic,
expiry=expiry,
guaranteed_stop=guaranteed_stop,
level=level,
size=size,
time_in_force=time_in_force,
order_type=order_type,
limit_distance=limit_distance,
stop_distance=stop_distance,
force_open=force_open
)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when creating a working order")
return None
# market orders
def create_open_position(self, direction, epic, size, limit_distance, stop_distance, force_open):
currency_code = "GBP"
direction = direction
epic = epic
expiry = "DFB"
# no matter what you are doing force open always has to be True other wise stop losses do not work
force_open = force_open
if force_open:
guaranteed_stop = False
else:
guaranteed_stop = True
stop_distance = stop_distance
size = size
trailing_stop = False
trailing_stop_increment = None
trailing_stop_distance = None
time_in_force = "FILL_OR_KILL"
order_type = "MARKET"
limit_distance = limit_distance
try:
response = self.ig_service.create_open_position(
currency_code=currency_code,
direction=direction,
epic=epic,
expiry=expiry,
# no matter what you are doing force open always has to be True other wise stop losses do not work
force_open=True,
guaranteed_stop=guaranteed_stop,
stop_distance=stop_distance,
size=size,
trailing_stop=trailing_stop,
trailing_stop_increment=trailing_stop_increment,
# trailing_stop_distance = trailing_stop_distance,
# time_in_force=time_in_force,
order_type=order_type,
limit_distance=limit_distance)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when opening a position")
return None
# market orders to close positions
def close_open_position(self, position, size):
# set randomly
try:
direction = "BUY"
position_direction = position["direction"]
if position_direction == "BUY":
direction = "SELL"
deal_id = position["dealId"]
order_type = "MARKET"
size = size
response = self.ig_service.close_open_position(
deal_id=deal_id,
direction=direction,
order_type=order_type,
size=size)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when closing position")
return None
def delete_working_order(self, deal_id):
try:
deal_id = deal_id
response = self.ig_service.delete_working_order(deal_id)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when deleting working order")
return None
def update_position(self, limit_level, stop_level, deal_id, guaranteed_stop):
limit_level = limit_level
guaranteed_stop = guaranteed_stop
stop_level=stop_level
deal_id=deal_id
trailing_stop = False
trailing_stop_distance = None
trailing_stop_increment = None
try:
response = self.ig_service.update_open_position(
limit_level=limit_level,
stop_level=stop_level,
# guaranteed_stop=guaranteed_stop,
deal_id =deal_id,
# trailing_stop=trailing_stop,
# trailing_stop_distance=trailing_stop_distance,
# trailing_stop_increment=trailing_stop_increment
)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when updating position or maybe the order is no longer open")
return None
def get_open_positions(self):
while(True):
try:
return self.ig_service.fetch_open_positions()
except Exception as e:
self.log.info(str(e) + " error occurred when getting open positions")
# resets the connection
self.initialise_connection()
def get_working_orders(self):
while(True):
try:
return self.ig_service.fetch_working_orders()
except Exception as e:
self.log.info(str(e) + " error occurred when getting working orders")
self.initialise_connection()
|
72882
|
from textbox.data.dataloader.abstract_dataloader import AbstractDataLoader
from textbox.data.dataloader.single_sent_dataloader import SingleSentenceDataLoader
from textbox.data.dataloader.paired_sent_dataloader import PairedSentenceDataLoader
from textbox.data.dataloader.attr_sent_dataloader import AttributedSentenceDataLoader
from textbox.data.dataloader.kg_sent_dataloader import KGSentenceDataLoader
from textbox.data.dataloader.wikibio_sent_dataloader import WikiBioSentenceDataLoader
from textbox.data.dataloader.rotowire_sent_dataloader import RotoWireSentenceDataLoader
|
72894
|
from records_mover.url.s3.awscli import aws_cli
from mock import patch, call
import unittest
class TestAwsCli(unittest.TestCase):
@patch("records_mover.url.s3.awscli.dict")
@patch("records_mover.url.s3.awscli.os")
@patch("records_mover.url.s3.awscli.create_clidriver")
def test_aws_cli(self,
mock_create_cli_driver,
mock_os,
mock_dict):
mock_cli_driver = mock_create_cli_driver.return_value
mock_cli_driver.main.return_value = 0
mock_os.environ.copy.return_value = {
'old_value': 'old',
'LC_CTYPE': 'orig_LC_CTYPE'
}
aws_cli('a', 'b', 'c')
mock_dict.assert_called_with(mock_os.environ)
mock_cli_driver.main.assert_called_with(args=('a', 'b', 'c'))
mock_os.environ.update.assert_has_calls([
call({'old_value': 'old', 'LC_CTYPE': 'en_US.UTF'}),
call(mock_dict.return_value)
])
mock_os.environ.clear.assert_called_with()
|
72895
|
from .assetmapper import AssetMapper
from .assetfactory import AssetFactory
from .sqlserver import SqlServerTableMapper
|
72916
|
import torch.nn as nn
import torch.nn.functional as F
import params as P
import utils
class Net(nn.Module):
# Layer names
FLAT = 'flat'
FC5 = 'fc5'
RELU5 = 'relu5'
BN5 = 'bn5'
FC6 = 'fc6'
CLASS_SCORES = FC6 # Symbolic name of the layer providing the class scores as output
def __init__(self, input_shape=P.INPUT_SHAPE):
super(Net, self).__init__()
# Shape of the tensors that we expect to receive as input
self.input_shape = input_shape
self.input_size = utils.shape2size(self.input_shape)
# Here we define the layers of our network
# FC Layers
self.fc5 = nn.Linear(self.input_size, 300) # conv_output_size-dimensional input, 300-dimensional output
self.bn5 = nn.BatchNorm1d(300) # Batch Norm layer
self.fc6 = nn.Linear(300, P.NUM_CLASSES) # 300-dimensional input, 10-dimensional output (one per class)
# Here we define the flow of information through the network
def forward(self, x):
out = {}
# Stretch out the feature map before feeding it to the FC layers
flat = x.view(-1, self.input_size)
# Fifth Layer: FC with ReLU activations + batch norm
fc5_out = self.fc5(flat)
relu5_out = F.relu(fc5_out)
bn5_out = self.bn5(relu5_out)
# Sixth Layer: dropout + FC, outputs are the class scores
fc6_out = self.fc6(F.dropout(bn5_out, p=0.5, training=self.training))
# Build dictionary containing outputs from convolutional and FC layers
out[self.FLAT] = flat
out[self.FC5] = fc5_out
out[self.RELU5] = relu5_out
out[self.BN5] = bn5_out
out[self.FC6] = fc6_out
return out
|
72957
|
from array_stack import ArrayStack
def delimiter_matched_v1(expr):
"""Return True if all delimiters are properly match; False otherwise.
>>> delimiter_matched_v1('[(2+x)*(3+y)]')
True
>>> delimiter_matched_v1('{[{(xbcd))]}')
False
"""
left, right = '({[', ')}]'
S = ArrayStack()
for c in expr:
if c in left:
S.push(c)
elif c in right:
if S.is_empty() or right.index(c) != left.index(S.pop()):
return False
return S.is_empty()
def delimiter_matched_v2(expr: str) -> bool:
"""
>>> delimiter_matched_v2('[(2+x)*(3+y)]')
True
>>> delimiter_matched_v2('{[{(xbcd))]}')
False
"""
S = ArrayStack()
d = {")": "(", "]": "[", "}": "{"}
for c in expr:
if c in d.values():
S.push(c)
elif c in d:
if S.is_empty() or d[c] != S.pop():
return False
return S.is_empty()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
72984
|
import urllib.parse
assert urllib.parse.unquote("foo%20bar") == "foo bar"
import urllib.request
with urllib.request.urlopen('https://httpbin.org/headers') as f:
f.read()
# issue 1424
text = """Hello
World"""
assert urllib.parse.urlencode({"text": text}) == "text=Hello%0AWorld"
print('passed all tests')
|
73012
|
from hknweb.academics.views.base_viewset import AcademicEntityViewSet
from hknweb.academics.models import Instructor
from hknweb.academics.serializers import InstructorSerializer
class InstructorViewSet(AcademicEntityViewSet):
queryset = Instructor.objects.all()
serializer_class = InstructorSerializer
|
73035
|
from sevenbridges.meta.fields import (
StringField, DateTimeField, CompoundField, BooleanField
)
from sevenbridges.meta.resource import Resource
from sevenbridges.models.compound.jobs.job_docker import JobDocker
from sevenbridges.models.compound.jobs.job_instance import Instance
from sevenbridges.models.compound.jobs.job_log import Logs
class Job(Resource):
"""
Job resource contains information for a single executed node
in the analysis.
"""
name = StringField(read_only=True)
start_time = DateTimeField(read_only=True)
end_time = DateTimeField(read_only=True)
status = StringField(read_only=True)
command_line = StringField(read_only=True)
retried = BooleanField(read_only=True)
instance = CompoundField(Instance, read_only=True)
docker = CompoundField(JobDocker, read_only=True)
logs = CompoundField(Logs, read_only=True)
def __str__(self):
return f'<Job: name={self.name}, status={self.status}>'
|
73042
|
from ldtcommon import ATTR_SURFACING_PROJECT
from ldtcommon import ATTR_SURFACING_OBJECT
from ldtcommon import TEXTURE_FILE_PATTERN
from ldt import context
from ldtui import qtutils
"""
.. module:: Maya import material
:synopsis: MayaTextureImport Plugin. Imports textureSets to maya Surfacing Projects
.. moduleauthor:: <NAME>
"""
import os
import logging
from functools import partial
from Qt import QtGui, QtWidgets, QtCore
from Qt.QtWidgets import QApplication, QWidget, QLabel, QMainWindow
from yapsy.IPlugin import IPlugin
import ldtutils
import ldtmaya
import ldttextures
logger = logging.getLogger(__name__)
class MayaTextureImport(IPlugin):
"""Plug-in to Import textures to surfacing projects and surfacing objects."""
name = "MayaTextureImport Plugin"
plugin_layout = None
def __init__(self):
"""Check dcc context, and build the ui if context is correct."""
# Load dcc python packages inside a try, to catch the application
# environment, this will be replaced by IPlugin Categories
dcc = context.dcc()
if dcc == 'Maya':
logger.info('MayaTextureImport loaded')
import pymel.core as pm
import ldtmaya
self.build_ui()
else:
logger.warning(
'MayaTextureImport not loaded, dcc libs not found')
self.plugin_layout = QtWidgets.QWidget()
self.label_ui = QtWidgets.QLabel(self.plugin_layout)
self.label_ui.setText(
'MayaTextureImport\nPlugin not available in this application')
def build_ui(self):
"""Build the Plug-in UI and append it to the main ui as a tab."""
self.plugin_layout = QtWidgets.QWidget()
main_layout = QtWidgets.QVBoxLayout()
self.lbl_extension = QtWidgets.QLabel('Find files with pattern')
self.ln_pattern = QtWidgets.QLineEdit('.tex')
self.btn_search_files = QtWidgets.QPushButton(
"Search files in folder"
)
self.form_widget = QtWidgets.QTableWidget(0, 2)
self.form_widget.setWordWrap(True)
# col_headers = ['filepath', 'import', 'Select in Maya']
col_headers = ['filepath', 'import']
self.form_widget.setHorizontalHeaderLabels(col_headers)
self.form_widget.setRowCount(0)
self.form_widget.setColumnWidth(0, 300)
main_layout = QtWidgets.QVBoxLayout()
# Attach widgets to the main layout
main_layout.addWidget(self.lbl_extension)
main_layout.addWidget(self.ln_pattern)
main_layout.addWidget(self.btn_search_files)
main_layout.addWidget(self.form_widget)
# Set main layout
self.plugin_layout.setLayout(main_layout)
self.btn_search_files.clicked.connect(
self.load_textures
)
def load_textures(self):
"""Load textures and populates form."""
search_folder = qtutils.get_folder_path()
if search_folder:
logger.info('Search folder: %s' % search_folder)
file_list = ldtutils.get_files_in_folder(
search_folder, recursive=True, pattern=self.ln_pattern.text())
self.populate_form(file_list)
def populate_form(self, file_list):
"""
Populate form with texture lucidity parsed files.
Args:
file_templates (list): A list of lucidity parsed files, with file_path key added.
"""
texture_finder = ldttextures.TextureFinder(file_list)
udim_file_list = texture_finder.merge_udims()
self.form_widget.setRowCount(len(udim_file_list))
buttons = {}
for num, file_path in enumerate(udim_file_list):
self.form_widget.setCellWidget(
num, 0, QtWidgets.QLabel(file_path))
buttons[num] = QtWidgets.QPushButton('import')
self.form_widget.setCellWidget(
num, 1, buttons[num])
# TODO if there is a file node in the scene with
# the same path, add the option to select it.
# self.form_widget.setCellWidget(
# num, 2, QtWidgets.QPushButton('select'))
buttons[num].clicked.connect(
lambda x=file_path: self.import_texture(x))
def import_texture(self, file_path):
""" Creates a maya file node with the given file_path."""
file_path = file_path.replace('udim', '<UDIM>')
file_node = ldtmaya.create_file_node(name=os.path.basename(file_path))
file_node.fileTextureName.set(file_path)
|
73043
|
getObject = {
'id': 37401,
'memoryCapacity': 242,
'modifyDate': '',
'name': 'test-dedicated',
'diskCapacity': 1200,
'createDate': '2017-10-16T12:50:23-05:00',
'cpuCount': 56,
'accountId': 1199911
}
getAvailableRouters = [
{'hostname': 'bcr01a.dal05', 'id': 12345},
{'hostname': 'bcr02a.dal05', 'id': 12346},
{'hostname': 'bcr03a.dal05', 'id': 12347},
{'hostname': 'bcr04a.dal05', 'id': 12348}
]
getObjectById = {
'datacenter': {
'id': 12345,
'name': 'dal05',
'longName': 'Dallas 5'
},
'memoryCapacity': 242,
'modifyDate': '2017-11-06T11:38:20-06:00',
'name': 'test-dedicated',
'diskCapacity': 1200,
'backendRouter': {
'domain': 'test.com',
'hostname': 'bcr01a.dal05',
'id': 12345
},
'guestCount': 1,
'cpuCount': 56,
'guests': [{
'domain': 'test.com',
'hostname': 'test-dedicated',
'id': 12345,
'uuid': 'F9329795-4220-4B0A-B970-C86B950667FA'
}],
'billingItem': {
'nextInvoiceTotalRecurringAmount': 1515.556,
'orderItem': {
'id': 12345,
'order': {
'status': 'APPROVED',
'privateCloudOrderFlag': False,
'modifyDate': '2017-11-02T11:42:50-07:00',
'orderQuoteId': '',
'userRecordId': 12345,
'createDate': '2017-11-02T11:40:56-07:00',
'impersonatingUserRecordId': '',
'orderTypeId': 7,
'presaleEventId': '',
'userRecord': {
'username': 'test-dedicated'
},
'id': 12345,
'accountId': 12345
}
},
'id': 12345,
'children': [
{
'nextInvoiceTotalRecurringAmount': 0.0,
'categoryCode': 'dedicated_host_ram'
},
{
'nextInvoiceTotalRecurringAmount': 0.0,
'categoryCode': 'dedicated_host_disk'
}
]
},
'id': 12345,
'createDate': '2017-11-02T11:40:56-07:00'
}
deleteObject = True
getGuests = [{
'id': 200,
'hostname': 'vs-test1',
'domain': 'test.sftlyr.ws',
'fullyQualifiedDomainName': 'vs-test1.test.sftlyr.ws',
'status': {'keyName': 'ACTIVE', 'name': 'Active'},
'datacenter': {'id': 50, 'name': 'TEST00',
'description': 'Test Data Center'},
'powerState': {'keyName': 'RUNNING', 'name': 'Running'},
'maxCpu': 2,
'maxMemory': 1024,
'primaryIpAddress': '172.16.240.2',
'globalIdentifier': '1a2b3c-1701',
'primaryBackendIpAddress': '10.45.19.37',
'hourlyBillingFlag': False,
'billingItem': {
'id': 6327,
'recurringFee': 1.54,
'orderItem': {
'order': {
'userRecord': {
'username': 'chechu',
}
}
}
},
}, {
'id': 202,
'hostname': 'vs-test2',
'domain': 'test.sftlyr.ws',
'fullyQualifiedDomainName': 'vs-test2.test.sftlyr.ws',
'status': {'keyName': 'ACTIVE', 'name': 'Active'},
'datacenter': {'id': 50, 'name': 'TEST00',
'description': 'Test Data Center'},
'powerState': {'keyName': 'RUNNING', 'name': 'Running'},
'maxCpu': 4,
'maxMemory': 4096,
'primaryIpAddress': '172.16.240.7',
'globalIdentifier': '05a8ac-6abf0',
'primaryBackendIpAddress': '10.45.19.35',
'hourlyBillingFlag': True,
'billingItem': {
'id': 6327,
'recurringFee': 1.54,
'orderItem': {
'order': {
'userRecord': {
'username': 'chechu',
}
}
}
}
}]
|
73065
|
class FileSystemAuditRule(AuditRule):
"""
Represents an abstraction of an access control entry (ACE) that defines an audit rule for a file or directory. This class cannot be inherited.
FileSystemAuditRule(identity: IdentityReference,fileSystemRights: FileSystemRights,flags: AuditFlags)
FileSystemAuditRule(identity: IdentityReference,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
FileSystemAuditRule(identity: str,fileSystemRights: FileSystemRights,flags: AuditFlags)
FileSystemAuditRule(identity: str,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
"""
@staticmethod
def __new__(self,identity,fileSystemRights,*__args):
"""
__new__(cls: type,identity: IdentityReference,fileSystemRights: FileSystemRights,flags: AuditFlags)
__new__(cls: type,identity: IdentityReference,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
__new__(cls: type,identity: str,fileSystemRights: FileSystemRights,flags: AuditFlags)
__new__(cls: type,identity: str,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
"""
pass
AccessMask=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access mask for this rule.
"""
FileSystemRights=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Security.AccessControl.FileSystemRights flags associated with the current System.Security.AccessControl.FileSystemAuditRule object.
Get: FileSystemRights(self: FileSystemAuditRule) -> FileSystemRights
"""
|
73104
|
import json
from uuid import UUID
from pymongo import MongoClient
data = {}
def _convertForJson(d):
for k,v in d.items():
if isinstance(v, UUID):
d[k] = str(v)
if isinstance(v, list):
v = [str(s) for s in v]
d[k] = v
return d
db = MongoClient().artifact_database
data['gem5Data'] = []
for i in db.artifacts.find(limit=20):
data['gem5Data'].append(_convertForJson(i))
with open('data.json', 'w') as outfile:
json.dump(data['gem5Data'],outfile)
|
73121
|
import sys
import numpy as np
import pandas as pd
from pspy import so_dict, so_map
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
binary = so_map.read_map(d["template"])
if binary.data.ndim > 2:
# Only use temperature
binary.data = binary.data[0]
binary.data = binary.data.astype(np.int16)
binary.data[:] = 1
# Sigurd point sources
if "point_source_file" in d:
print("Adding point sources...")
df = pd.read_table(d["point_source_file"], escapechar="#", sep="\s+")
high_flux_good_SNR = (df.Tflux > d.get("point_source_Tflux", 15)) & (
df.SNR > d.get("point_source_SNR", 5)
)
df = df[high_flux_good_SNR]
coordinates = np.deg2rad([df.dec, df.ra])
mask = so_map.generate_source_mask(binary, coordinates, d.get("point_source_radius", 5.0))
# Monster sources
if "monster_source_file" in d:
print("Adding monster point sources...")
df = pd.read_csv(d["monster_source_file"], comment="#")
for index, row in df.iterrows():
mask.data *= so_map.generate_source_mask(
binary, np.deg2rad([row.dec, row.ra]), row.radius
).data
# Dust
if "dust_file" in d:
print("Adding dust sources...")
dust = so_map.read_map(d["dust_file"])
mask.data *= dust.data
print("Writing mask...")
mask.write_map(d["output_file"])
mask.downgrade(4).plot(file_name=d["output_file"].replace(".fits", ""))
|
73146
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
def plot_2ala_ramachandran(traj, ax=None, weights=None):
import mdtraj as md
if ax == None:
ax = plt.gca()
if isinstance(weights, np.ndarray):
ax.hist2d(
md.compute_phi(traj)[1].reshape(-1),
md.compute_psi(traj)[1].reshape(-1),
bins=[np.linspace(-np.pi, np.pi, 64), np.linspace(-np.pi, np.pi, 64)],
norm=mpl.colors.LogNorm(),
weights=weights,
)
else:
ax.hist2d(
md.compute_phi(traj)[1].reshape(-1),
md.compute_psi(traj)[1].reshape(-1),
bins=[np.linspace(-np.pi, np.pi, 64), np.linspace(-np.pi, np.pi, 64)],
norm=mpl.colors.LogNorm(),
)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-np.pi, np.pi)
ax.set_xlabel(r"$\phi$")
ax.set_ylabel(r"$\psi$")
|
73182
|
import sys, getopt
import json
import requests
from pprint import pprint
def handleRequest(argv):
"""Function to read user request, form http message and send it"""
try:
opts, args = getopt.getopt(argv,"",["help","version=","repo=","org=","branch=","user=","apitoken=","updateversion="])
except getopt.GetoptError:
print 'usage: python deploy_pxscene.py --version=<version> --repo=<repo> --org=<org> --branch=<branch> --user=<user> --apitoken=<apitoken> --updateversion=<true/false>'
exit (1)
#initialize input values
version="";
repo_name="pxCore";
user_name="";
api_token="";
update_version="false";
branch="master";
#read input values
for opt, arg in opts:
if opt == "--help":
print 'python deploy_pxscene.py --version=<version> --repo=<repo> --org=<org> --branch=<branch> --user=<user> --apitoken=<apitoken> --updateversion=<true/false>'
exit (1)
elif len(arg) == 0:
print "argument cannot be empty for option", opt
exit (1)
elif opt == "--version":
version=arg;
elif opt == "--repo":
repo_name=arg;
elif opt == "--user":
user_name=arg;
elif opt == "--apitoken":
api_token=arg;
elif opt == "--updateversion":
update_version=arg;
elif opt == "--org":
org_name=arg;
elif opt == "--branch":
branch=arg;
if version=="" or user_name=="" or api_token=="":
print 'usage: python deploy_pxscene.py --version=<version> --repo=<repo> --org=<org> --branch=<branch> --user=<user> --apitoken=<apitoken> --updateversion=<true/false>'
exit (1)
if update_version != "true" and update_version != "false":
print ("please enter proper update version value true/false");
exit (1)
with open('pxscene_deploy_rules.json') as data_file:
data = json.load(data_file)
#populate environment variables
data["request"][unicode("branch")] = unicode(branch);
data["request"][unicode("message")] = unicode("OS X Release build")+" "+unicode(version);
data["request"]["config"][unicode("env")] = {}
data["request"]["config"]["env"][unicode("PX_VERSION")] = unicode(version);
data["request"]["config"]["env"][unicode("REPO_USER_NAME")] = unicode(user_name);
data["request"]["config"]["env"][unicode("REPO_NAME")] = unicode(repo_name);
data["request"]["config"]["env"][unicode("UPDATE_VERSION")] = unicode(update_version);
string = json.dumps(data);
#print the request json data
pprint(string);
tokendata = "token ";
tokendata += str(api_token);
#populate http header
headers = {}
headers["Content-Type" ] = "application/json";
headers["Accept" ] = "application/json";
headers["Travis-API-Version"] = "3";
headers["Authorization"] = tokendata;
url = "https://api.travis-ci.org/repo/" + str(org_name) + "%2F" + str(repo_name) + "/requests";
#send http request
response = requests.post(url, headers=headers, data=string)
print response.content
if __name__ == "__main__":
if len(sys.argv) < 2:
print 'usage: python deploy_pxscene.py --version=<version> --repo=<repo> --org=<org> --branch=<branch> --user=<user> --apitoken=<apitoken> --updateversion=<true/false>'
exit (1)
handleRequest(sys.argv[1:])
|
73207
|
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types import Message
from aiogram_dialog import Dialog, Window, DialogManager
from aiogram_dialog.tools import render_transitions
from aiogram_dialog.widgets.input import MessageInput
from aiogram_dialog.widgets.kbd import Next, Back
from aiogram_dialog.widgets.text import Const
class RenderSG(StatesGroup):
first = State()
second = State()
last = State()
async def on_input(m: Message, dialog: Dialog, manager: DialogManager):
manager.current_context().dialog_data["name"] = m.text
await dialog.next()
dialog = Dialog(
Window(
Const("1. First"),
Next(),
state=RenderSG.first,
),
Window(
Const("2. Second"),
Back(),
MessageInput(on_input),
state=RenderSG.second,
),
Window(
Const("3. Last"),
Back(),
state=RenderSG.last,
),
)
# this is diagram rendering
render_transitions([dialog])
|
73220
|
import unittest
from lib import Monitor
from cloudasr.test_doubles import PollerSpy
from cloudasr.messages.helpers import *
class TestMonitor(unittest.TestCase):
def setUp(self):
self.poller = PollerSpy()
self.scale_workers = ScaleWorkersSpy()
self.create_poller = lambda: self.poller
self.monitor = Monitor(self.create_poller, self.emit, self.scale_workers, self.poller.has_next_message)
self.emmited_messages = []
def test_monitor_forwards_messages_to_socketio(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "STARTED", 1).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 2).SerializeToString(),
]
self.run_monitor(messages)
expected_messages = [
{"address": "tcp://127.0.0.1:1", "model": "en-GB", "status": "STARTED", "time": 1},
{"address": "tcp://127.0.0.1:1", "model": "en-GB", "status": "WORKING", "time": 2},
]
self.assertThatMonitorForwardedMessages(expected_messages)
def test_monitor_saves_worker_statuses(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "STARTED", 1).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:2", "en-GB", "WORKING", 2).SerializeToString(),
]
self.run_monitor(messages)
expected_messages = [
{"address": "tcp://127.0.0.1:1", "model": "en-GB", "status": "STARTED", "time": 1},
{"address": "tcp://127.0.0.1:2", "model": "en-GB", "status": "WORKING", "time": 2},
]
self.assertEqual(expected_messages, self.monitor.get_statuses())
def test_monitor_will_add_new_workers_when_all_workers_are_working(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 1).SerializeToString()
]
self.run_monitor(messages)
expected_messages = [
{"en-GB": +1}
]
self.assertEqual(expected_messages, self.scale_workers.scaling_history)
def test_monitor_will_not_add_new_workers_when_it_is_currently_adding_new_workers(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 1).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 2).SerializeToString()
]
self.run_monitor(messages)
expected_messages = [{"en-GB": +1}, {}]
self.assertEqual(expected_messages, self.scale_workers.scaling_history)
def test_monitor_will_add_new_workers_when_it_finished_scaling_and_it_needs_new_workers(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 1).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 2).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:2", "en-GB", "STARTED", 3).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 4).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:2", "en-GB", "WORKING", 5).SerializeToString()
]
self.run_monitor(messages)
expected_messages = [{"en-GB": +1}, {}, {}, {}, {"en-GB": +1}]
self.assertEqual(expected_messages, self.scale_workers.scaling_history)
def run_monitor(self, messages):
self.poller.add_messages([{"master": message} for message in messages])
self.monitor.run()
def assertThatMonitorForwardedMessages(self, messages):
forwarded_messages = self.emmited_messages
self.assertEqual(messages, forwarded_messages)
def emit(self, message):
self.emmited_messages.append(message)
class ScaleWorkersSpy:
def __init__(self):
self.scaling_history = []
def __call__(self, commands):
self.scaling_history.append(commands)
|
73260
|
import os
import yaml
import argparse
import datetime
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--preprocessedoutputdir', type=str, help="intermediate preprocessed pipeline data directory")
parser.add_argument('--trainoutputdir', type=str, help="intermediate training pipeline data directory")
parser.add_argument('--interpretabilityoutputdir', type=str, help="intermediate interpretability pipeline data directory")
parser.add_argument('--outputsdir', type=str, help="persistent outputs directory on blob")
args = parser.parse_args()
cur_date = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
# Get paths of data on intermediate pipeline storage
cfg = yaml.full_load(open("./config.yml", 'r')) # Load config data
train_set_path = args.preprocessedoutputdir + '/' + cfg['PATHS']['TRAIN_SET'].split('/')[-1]
test_set_path = args.preprocessedoutputdir + '/' + cfg['PATHS']['TEST_SET'].split('/')[-1]
data_info_path = args.preprocessedoutputdir + '/' + cfg['PATHS']['DATA_INFO'].split('/')[-1]
ordinal_col_transformer_path = args.preprocessedoutputdir + '/' + cfg['PATHS']['ORDINAL_COL_TRANSFORMER'].split('/')[-1]
ohe_col_transformer_mv_path = args.preprocessedoutputdir + '/' + cfg['PATHS']['OHE_COL_TRANSFORMER_MV'].split('/')[-1]
ohe_col_transformer_sv_path = args.preprocessedoutputdir + '/' + cfg['PATHS']['OHE_COL_TRANSFORMER_SV'].split('/')[-1]
scaler_col_transformer_path = args.trainoutputdir + '/' + cfg['PATHS']['SCALER_COL_TRANSFORMER'].split('/')[-1]
model_to_load_path = args.trainoutputdir + '/' + cfg['PATHS']['MODEL_TO_LOAD'].split('/')[-1]
logs_path = args.trainoutputdir + '/logs'
multi_train_test_metrics_path = args.trainoutputdir + '/' + cfg['PATHS']['MULTI_TRAIN_TEST_METRICS'].split('/')[-1]
lime_explainer_path = args.interpretabilityoutputdir + '/' + cfg['PATHS']['LIME_EXPLAINER'].split('/')[-1]
lime_submodular_pick_path = args.interpretabilityoutputdir + '/' + cfg['PATHS']['LIME_SUBMODULAR_PICK'].split('/')[-1]
submod_pick_image_path = args.interpretabilityoutputdir + '/' + cfg['PATHS']['IMAGES'].split('/')[-1] + '/submodular_pick.png'
# Build destination paths in output folder on blob datastore
destination_dir = args.outputsdir + cur_date + '/'
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
business_outputs_dir = destination_dir + 'business_outputs/'
if not os.path.exists(business_outputs_dir):
os.makedirs(business_outputs_dir)
# Move all outputs from intermediate data to outputs folder on blob
shutil.copy(train_set_path, destination_dir)
shutil.copy(test_set_path, destination_dir)
shutil.copy(data_info_path, destination_dir)
shutil.copy(ordinal_col_transformer_path, destination_dir)
shutil.copy(ohe_col_transformer_mv_path, destination_dir)
shutil.copy(ohe_col_transformer_sv_path, destination_dir)
shutil.copy(scaler_col_transformer_path, destination_dir)
shutil.copy(model_to_load_path, destination_dir)
shutil.copytree(logs_path, destination_dir + 'logs')
shutil.copy(multi_train_test_metrics_path, business_outputs_dir)
shutil.copy(lime_explainer_path, destination_dir)
shutil.copy(lime_submodular_pick_path, destination_dir)
shutil.copy(submod_pick_image_path, business_outputs_dir)
|
73264
|
import asyncio
import random
import pytest
import uuid
from collections import defaultdict
import aiotask_context as context
@asyncio.coroutine
def dummy3():
yield from asyncio.sleep(random.uniform(0, 2))
return context.get("key")
@asyncio.coroutine
def dummy2(a, b):
yield from asyncio.sleep(random.uniform(0, 2))
res = context.get("key")
yield from asyncio.sleep(random.uniform(0, 2))
res1 = yield from dummy3()
assert res == res1
return a, b, res
@asyncio.coroutine
def dummy1(n_tasks):
context.set("key", str(uuid.uuid4()))
tasks = [
asyncio.ensure_future(
dummy2(id(context.asyncio_current_task()), n)) for n in range(n_tasks)]
results = yield from asyncio.gather(*tasks)
info = defaultdict(list)
for taskid, n, key in results:
info[key].append([taskid, n])
return info
@pytest.mark.asyncio
@asyncio.coroutine
def test_ensure_future_concurrent():
n_tasks = 10
results = yield from asyncio.gather(*[dummy1(n_tasks=n_tasks) for x in range(1000)])
for r in results:
assert len(r) == 1
for key, value in r.items():
assert len(value) == n_tasks
@pytest.mark.asyncio
@asyncio.coroutine
def test_ensurefuture_context_propagation():
context.set("key", "value")
@asyncio.coroutine
def change_context():
assert context.get("key") == "value"
context.set("key", "what")
context.set("other", "data")
yield from asyncio.ensure_future(change_context())
assert context.get("key") == "what"
assert context.get("other") == "data"
@pytest.mark.asyncio
@asyncio.coroutine
def test_waitfor_context_propagation():
context.set("key", "value")
@asyncio.coroutine
def change_context():
assert context.get("key") == "value"
context.set("key", "what")
context.set("other", "data")
yield from asyncio.wait_for(change_context(), 1)
assert context.get("key") == "what"
assert context.get("other") == "data"
@pytest.mark.asyncio
@asyncio.coroutine
def test_gather_context_propagation():
context.set("key", "value")
@asyncio.coroutine
def change_context():
assert context.get("key") == "value"
context.set("key", "what")
context.set("other", "data")
yield from asyncio.gather(change_context())
assert context.get("key") == "what"
assert context.get("other") == "data"
|
73297
|
from binaryninja_cortex.platforms import MCU
class Chip(MCU):
NAME="STM32F3"
ROM_OFF=0x08000000
RAM_OFF=0x20000000
IRQ=MCU.IRQ+ [
"NVIC_WWDG_IRQ",
"NVIC_PVD_IRQ",
"NVIC_TAMP_STAMP_IRQ",
"NVIC_RTC_WKUP_IRQ",
"NVIC_FLASH_IRQ",
"NVIC_RCC_IRQ",
"NVIC_EXTI0_IRQ",
"NVIC_EXTI1_IRQ",
"NVIC_EXTI2_TSC_IRQ",
"NVIC_EXTI3_IRQ",
"NVIC_EXTI4_IRQ",
"NVIC_DMA1_CHANNEL1_IRQ",
"NVIC_DMA1_CHANNEL2_IRQ",
"NVIC_DMA1_CHANNEL3_IRQ",
"NVIC_DMA1_CHANNEL4_IRQ",
"NVIC_DMA1_CHANNEL5_IRQ",
"NVIC_DMA1_CHANNEL6_IRQ",
"NVIC_DMA1_CHANNEL7_IRQ",
"NVIC_ADC1_2_IRQ",
"NVIC_USB_HP_CAN1_TX_IRQ",
"NVIC_USB_LP_CAN1_RX0_IRQ",
"NVIC_CAN1_RX1_IRQ",
"NVIC_CAN1_SCE_IRQ",
"NVIC_EXTI9_5_IRQ",
"NVIC_TIM1_BRK_TIM15_IRQ",
"NVIC_TIM1_UP_TIM16_IRQ",
"NVIC_TIM1_TRG_COM_TIM17_IRQ",
"NVIC_TIM1_CC_IRQ",
"NVIC_TIM2_IRQ",
"NVIC_TIM3_IRQ",
"NVIC_TIM4_IRQ",
"NVIC_I2C1_EV_EXTI23_IRQ",
"NVIC_I2C1_ER_IRQ",
"NVIC_I2C2_EV_EXTI24_IRQ",
"NVIC_I2C2_ER_IRQ",
"NVIC_SPI1_IRQ",
"NVIC_SPI2_IRQ",
"NVIC_USART1_EXTI25_IRQ",
"NVIC_USART2_EXTI26_IRQ",
"NVIC_USART3_EXTI28_IRQ",
"NVIC_EXTI15_10_IRQ",
"NVIC_RTC_ALARM_IRQ",
"NVIC_USB_WKUP_A_IRQ",
"NVIC_TIM8_BRK_IRQ",
"NVIC_TIM8_UP_IRQ",
"NVIC_TIM8_TRG_COM_IRQ",
"NVIC_TIM8_CC_IRQ",
"NVIC_ADC3_IRQ",
"NVIC_RESERVED_1_IRQ",
"NVIC_RESERVED_2_IRQ",
"NVIC_RESERVED_3_IRQ",
"NVIC_SPI3_IRQ",
"NVIC_UART4_EXTI34_IRQ",
"NVIC_UART5_EXTI35_IRQ",
"NVIC_TIM6_DAC_IRQ",
"NVIC_TIM7_IRQ",
"NVIC_DMA2_CHANNEL1_IRQ",
"NVIC_DMA2_CHANNEL2_IRQ",
"NVIC_DMA2_CHANNEL3_IRQ",
"NVIC_DMA2_CHANNEL4_IRQ",
"NVIC_DMA2_CHANNEL5_IRQ",
"NVIC_ETH_IRQ",
"NVIC_RESERVED_4_IRQ",
"NVIC_RESERVED_5_IRQ",
"NVIC_COMP123_IRQ",
"NVIC_COMP456_IRQ",
"NVIC_COMP7_IRQ",
"NVIC_HRTIM_MASTER_IRQ",
"NVIC_HRTIM_TIMA_IRQ",
"NVIC_HRTIM_TIMB_IRQ",
"NVIC_HRTIM_TIMC_IRQ",
"NVIC_HRTIM_TIMD_IRQ",
"NVIC_HRTIM_TIME_IRQ",
"NVIC_HRTIM_FLT_IRQ",
"NVIC_USB_HP_IRQ",
"NVIC_USB_LP_IRQ",
"NVIC_USB_WKUP_IRQ",
"NVIC_RESERVED_13_IRQ",
"NVIC_RESERVED_14_IRQ",
"NVIC_RESERVED_15_IRQ",
"NVIC_RESERVED_16_IRQ",
]
|
73313
|
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import random
from phi.fluidformat import *
def text_to_pixels(text, size=10, binary=False, as_numpy_array=True):
image = Image.new("1" if binary else "L", (len(text)*size*3//4, size), 0)
draw = ImageDraw.Draw(image)
try:
font = ImageFont.truetype("arial.ttf", size)
except:
font = ImageFont.truetype('Pillow/Tests/fonts/DejaVuSans.ttf', size=size)
draw.text((0,0), text, fill=255, font=font)
del draw
if as_numpy_array:
return np.array(image).astype(np.float32) / 255.0
else:
return image
# image = text_to_pixels("The", as_numpy_array=False)
# image.save("testimg.png", "PNG")
def alphabet_soup(shape, count, margin=1, total_content=100, fontsize=10):
if len(shape) != 4: raise ValueError("shape must be 4D")
array = np.zeros(shape)
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
for batch in range(shape[0]):
for i in range(count):
letter = letters[random.randint(0, len(letters)-1)]
tile = text_to_pixels(letter, fontsize)#[::-1, :]
y = random.randint(margin, shape[1] - margin - tile.shape[0] - 2)
x = random.randint(margin, shape[2] - margin - tile.shape[1] - 2)
array[batch, y:(y+tile.shape[0]), x:(x+tile.shape[1]), 0] += tile
return array.astype(np.float32) * total_content / np.sum(array)
def random_word(shape, min_count, max_count, margin=1, total_content=100, fontsize=10, y=40):
if len(shape) != 4: raise ValueError("shape must be 4D")
array = np.zeros(shape)
letters = '<KEY>'
for b in range(shape[0]):
count = random.randint(min_count, max_count)
for i in range(count):
letter = letters[random.randint(0, len(letters)-1)]
tile = text_to_pixels(letter, fontsize)#[::-1, :]
x = random.randint(margin, shape[2] - margin - tile.shape[1] - 2)
array[b, y:(y+tile.shape[0]), x:(x+tile.shape[1]), 0] += tile
return array.astype(np.float32) * total_content / np.sum(array)
def single_shape(shape, scene, margin=1, fluid_mask=None):
if len(shape) != 4: raise ValueError("shape must be 4D")
array = np.zeros(shape)
for batch in range(shape[0]):
img = scene.read_array("Shape", random.choice(scene.indices))[0,...]
while True:
y = random.randint(margin, shape[1] - margin - img.shape[0] - 2)
x = random.randint(margin, shape[2] - margin - img.shape[1] - 2)
array[batch, y:(y + img.shape[0]), x:(x + img.shape[1]), :] = img
if _all_density_valid(array[batch:batch+1,...], fluid_mask):
break
else:
array[batch,...] = 0
return array.astype(np.float32)
def _all_density_valid(density, fluid_mask):
if fluid_mask is None:
return True
return np.sum(density * fluid_mask) == np.sum(density)
def push_density_inside(density_tile, tile_location, fluid_mask): # (y, x)
"""
Tries to adjust the tile_location so that the density_tile does not overlap with any obstacles.
:param density_tile: 2D binary array, representing the density mask to be shifted
:param tile_location: the initial location of the tile, (1D array with 2 values)
:param fluid_mask: 2D binary array (must be larger than the tile)
:return: the shifted location (1D array with 2 values)
"""
x, y = np.meshgrid(*[np.linspace(-1, 1, d) for d in density_tile.shape])
location = np.array(tile_location, dtype=np.int)
def cropped_mask(location):
slices = [slice(location[i], location[i]+density_tile.shape[i]) for i in range(2)]
return fluid_mask[slices]
while True:
cropped_fluid_mask = cropped_mask(location)
overlap = density_tile * (1-cropped_fluid_mask)
if np.sum(overlap) == 0:
return location
update = -np.sign([np.sum(overlap * y), np.sum(overlap * x)]).astype(np.int)
if np.all(update == 0):
raise ValueError("Failed to push tile with initial location %s out of obstacle" % (tile_location,))
location += update
# print(alphabet_soup([1, 16, 16, 1], 1000)[0,:,:,0])
# result = single_shape((2, 64, 64, 1), scene_at("data/shapelib/sim_000000"))
# print(result.shape, np.sum(result))
# Test push_density_inside
# fluid_mask = np.ones([64, 64])
# fluid_mask[10:20, 10:20] = 0
# density_tile = np.ones([5,5])
# tile_location = (18,9)
# print(push_density_inside(density_tile, tile_location, fluid_mask))
|
73334
|
Experiment(description='Testing the pure linear kernel',
data_dir='../data/tsdlr/',
max_depth=10,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=500,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2013-10-01-pure-lin/',
iters=250,
base_kernels='SE,PureLin,Const,Exp,Fourier,Noise',
zero_mean=True,
random_seed=1,
period_heuristic=5,
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
model_noise=True,
no_noise=True)
|
73335
|
import crypt
from base64 import encodestring
try:
from django.conf import settings
_CHARSET = settings.DEFAULT_CHARSET
_LDAP_SALT_LENGHT = settings.LDAP_PASSWORD_SALT_SIZE
except:
_CHARSET = 'utf-8'
_LDAP_SALT_LENGHT = 8
from hashlib import (sha1,
sha256,
sha384,
sha512)
from passlib.hash import (ldap_plaintext,
lmhash,
nthash,
ldap_md5,
ldap_md5_crypt,
ldap_salted_md5,
ldap_sha1,
ldap_salted_sha1,
atlassian_pbkdf2_sha1,
ldap_md5_crypt,
ldap_sha256_crypt,
ldap_sha512_crypt)
from os import urandom
# how many bytes the salt is long
def encode_secret(enc, new_value=None):
"""
https://docs.python.org/3.5/library/hashlib.html
http://passlib.readthedocs.io/en/stable/lib/passlib.hash.ldap_std.html
"""
password_renewed = None
if enc == 'Plaintext':
password_renewed = ldap_plaintext.hash(new_value)
elif enc == 'NT':
password_renewed = nthash.hash(new_value)
elif enc == 'LM':
password_renewed = lmhash.hash(new_value)
elif enc == 'MD5':
password_renewed = ldap_md5.hash(new_value.encode(_CHARSET))
elif enc == 'SMD5':
password_renewed = ldap_salted_md5.hash(new_value.encode(_CHARSET))
elif enc == 'SHA':
password_renewed = ldap_sha1.hash(new_value.encode(_CHARSET))
elif enc == 'SSHA':
salt = urandom(8)
hash = sha1(new_value.encode(_CHARSET))
hash.update(salt)
hash_encoded = encodestring(hash.digest() + salt)
password_renewed = hash_encoded.decode(_CHARSET)[:-1]
password_renewed = '{%<PASSWORD>' % (enc, password_renewed)
elif enc == 'SHA256':
password_renewed = sha256(new_value.encode(_CHARSET)).digest()
password_renewed = '{%s}%s' % (enc, encodestring(password_renewed).decode(_CHARSET)[:-1])
elif enc == 'SSHA256':
salt = urandom(_LDAP_SALT_LENGHT)
hash = sha256(new_value.encode(_CHARSET))
hash.update(salt)
hash_encoded = encodestring(hash.digest() + salt)
password_renewed = hash_encoded.decode(_CHARSET)[:-1]
password_renewed = '{%<PASSWORD>' % (enc, password_renewed)
elif enc == 'SHA384':
password_renewed = sha<PASSWORD>(new_value.encode(_CHARSET)).digest()
password_renewed = '{%<PASSWORD>' % (enc, encodestring(password_renewed).decode(_CHARSET)[:-1])
elif enc == 'SSHA384':
salt = urandom(_LDAP_SALT_LENGHT)
hash = sha384(new_value.encode(_CHARSET))
hash.update(salt)
hash_encoded = encodestring(hash.digest() + salt)
password_renewed = hash_encoded.decode(_CHARSET)[:-1]
password_renewed = '{%<PASSWORD>' % (enc, password_renewed)
elif enc == 'SHA512':
password_renewed = sha512(new_value.encode(_CHARSET)).<PASSWORD>()
password_renewed = '{%s}%s' % (enc, encodestring(password_renewed).decode(_CHARSET)[:-1])
elif enc == 'SSHA512':
salt = urandom(_LDAP_SALT_LENGHT)
hash = sha512(new_value.encode(_CHARSET))
hash.update(salt)
hash_encoded = encodestring(hash.digest() + salt)
password_renewed = hash_encoded.decode(_CHARSET)[:-1]
password_renewed = '{%<PASSWORD>' % (enc, password_renewed)
elif enc == 'PKCS5S2':
return atlassian_pbkdf2_sha1.encrypt(new_value)
elif enc == 'CRYPT':
password_renewed = crypt.crypt(new_value, crypt.mksalt(crypt.METHOD_CRYPT))
password_renewed = '{%s}%s' % (enc, password_renewed)
elif enc == 'CRYPT-MD5':
# this worked too
# return ldap_md5_crypt.encrypt(new_value)
password_renewed = crypt.crypt(new_value, crypt.mksalt(crypt.METHOD_MD5))
password_renewed = '{CRYPT}%s' % (password_renewed)
elif enc == 'CRYPT-SHA-256':
password_renewed = crypt.crypt(new_value, crypt.mksalt(crypt.METHOD_SHA256))
password_renewed = '{CRYPT}%s' % (password_renewed)
elif enc == 'CRYPT-SHA-512':
password_renewed = crypt.crypt(new_value, crypt.mksalt(crypt.METHOD_SHA512))
password_renewed = '{CRYPT}%s' % (password_renewed)
return password_renewed
def test_encoding_secrets():
for i in settings.SECRET_PASSWD_TYPE:
p = encode_secret(i, 'zio')
print(i, ':', p)
# additionals
for i in ['NT', 'LM']:
p = encode_secret(i, 'zio')
print(i, ':', p)
if __name__ == '__main__':
test_encoding_secrets()
|
73378
|
import json
import logging
from django.http import HttpResponse
from pollaris.app.models import SearchLog
def log_failed_search(request):
"""API route to log searches that failed on FE to the Pollaris DB"""
body = json.loads(request.body)
logging.info(f"Logging failed search: {body}")
address_json = body.pop("address_entered", {})
status = body.pop("status", "")
if not address_json or not status:
logging.error(
"address_entered and status are required in failed search log request"
)
log = SearchLog(
success=False,
search_status=status,
referrer=request.META.get("HTTP_REFERER"),
other_data={},
)
add_address_data(log, address_json)
add_metadata(log, body)
log.save()
return HttpResponse(status=204)
def add_metadata(log, metadata):
if not metadata:
return
# Copy metadata so we don't alter the original
logging_metadata = metadata.copy()
if logging_metadata.get("heap_id"):
log.heap_id = logging_metadata.pop("heap_id")
else:
# If this request comes from Mobile Commons instead of the Web UI, use phone number as main person identifier
log.heap_id = logging_metadata.get("phone_number")
log.autocomplete_selected = logging_metadata.pop("autocomplete_selected", None)
log.source = logging_metadata.pop("source", None)
log.search_id = logging_metadata.pop("pollaris_search_id", None)
log.other_data.update(logging_metadata)
def add_address_data(search_log, address_json=None, search_string=None):
if not search_string:
search_string = address_json.get("search_string")
if search_string:
search_log.search_string = search_string[:1000]
if address_json:
search_log.street_number = address_json.get("street_number")
search_log.street = address_json.get("street")
search_log.county = address_json.get("county")
search_log.city = address_json.get("city")
search_log.state_code = address_json.get("state")
search_log.zip5 = address_json.get("zip5")
search_log.zip9 = address_json.get("zip9")
search_log.other_data["latitude"] = address_json.get("latitude")
search_log.other_data["longitude"] = address_json.get("longitude")
|
73397
|
from conans import ConanFile, CMake, tools
import os
class LibmikmodConan(ConanFile):
name = "libmikmod"
description = "Module player and library supporting many formats, including mod, s3m, it, and xm."
topics = ("libmikmod", "audio")
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://mikmod.sourceforge.net"
license = "LGPL-2.1-or-later"
exports_sources = ["patches/*", "CMakeLists.txt"]
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_dsound": [True, False],
"with_mmsound": [True, False],
"with_alsa": [True, False],
"with_oss": [True, False],
"with_pulse": [True, False],
"with_coreaudio": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"with_dsound": True,
"with_mmsound": True,
"with_alsa": True,
"with_oss": True,
"with_pulse": True,
"with_coreaudio": True
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
else:
del self.options.with_dsound
del self.options.with_mmsound
if self.settings.os != "Linux":
del self.options.with_alsa
# Non-Apple Unices
if self.settings.os not in ["Linux", "FreeBSD"]:
del self.options.with_oss
del self.options.with_pulse
# Apple
if tools.is_apple_os(self.settings.os):
del self.options.with_coreaudio
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def requirements(self):
if self.settings.os == "Linux":
if self.options.with_alsa:
self.requires("libalsa/1.2.4")
if self.options.with_pulse:
self.requires("pulseaudio/13.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self, set_cmake_flags=True)
self._cmake.definitions["ENABLE_STATIC"] = not self.options.shared
self._cmake.definitions["ENABLE_DOC"] = False
self._cmake.definitions["ENABLE_DSOUND"] = self.options.get_safe("with_dsound", False)
self._cmake.definitions["ENABLE_MMSOUND"] = self.options.get_safe("with_mmsound", False)
self._cmake.definitions["ENABLE_ALSA"] = self.options.get_safe("with_alsa", False)
self._cmake.definitions["ENABLE_OSS"] = self.options.get_safe("with_oss", False)
self._cmake.definitions["ENABLE_PULSE"] = self.options.get_safe("with_pulse", False)
self._cmake.definitions["ENABLE_COREAUDIO"] = self.options.get_safe("with_coreaudio", False)
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"CMAKE_SOURCE_DIR",
"PROJECT_SOURCE_DIR")
# Ensure missing dependencies yields errors
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"MESSAGE(WARNING",
"MESSAGE(FATAL_ERROR")
tools.replace_in_file(os.path.join(self._source_subfolder, "drivers", "drv_alsa.c"),
"alsa_pcm_close(pcm_h);",
"if (pcm_h) alsa_pcm_close(pcm_h);")
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING.LESSER", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
os.remove(os.path.join(self.package_folder, "bin", "libmikmod-config"))
if not self.options.shared:
tools.rmdir(os.path.join(self.package_folder, "bin"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if not self.options.shared:
self.cpp_info.defines = ["MIKMOD_STATIC"]
self.cpp_info.filenames["pkg_config"] = "libmikmod"
if self.options.get_safe("with_dsound"):
self.cpp_info.system_libs.append("dsound")
if self.options.get_safe("with_mmsound"):
self.cpp_info.system_libs.append("winmm")
if self.options.get_safe("with_coreaudio"):
self.cpp_info.frameworks.append("CoreAudio")
|
73437
|
from collections import OrderedDict
class MyObj(object):
b = 1
a = 2
def __init__(self):
object.__setattr__(self, '_attrs', OrderedDict())
self.c = 1
self.d = 2
def __setattr__(self, key, value):
assert key != '_attrs'
self._attrs[key] = value
def __getattr__(self, item):
try:
return self._attrs[item]
except KeyError:
return self.__class__.__dict__[item]
@property
def __dict__(self):
return self._attrs
a = MyObj()
a.e = 3
print(a.__dict__)
print(MyObj.__dict__)
print(a.a)
|
73456
|
import pytest
from plenum.common.exceptions import MissingNodeOp, InvalidNodeOp
from plenum.common.messages.fields import NonNegativeNumberField, AnyValueField, HexField, BooleanField, Base58Field
from plenum.common.messages.message_base import MessageBase
from plenum.common.messages.node_message_factory import MessageFactory, NodeMessageFactory
from plenum.test.input_validation.stub_messages import Message1, Message2, Message3, Message4
@pytest.fixture
def factory():
return MessageFactory('plenum.test.input_validation.stub_messages')
def test_message_factory_module_is_not_found_fails():
with pytest.raises(ImportError):
MessageFactory('foo.bar')
def test_message_factory_classes_not_found_fails():
with pytest.raises(ValueError) as excinfo:
# TODO assumes that __init__ won't import any
# MessageBase child classes
MessageFactory('plenum.test.input_validation.__init__')
assert "no messages classes found" in str(excinfo.value)
def test_message_factory_missed_op_fails(factory):
msg = {'a': 0, 'b': 'bar'}
with pytest.raises(MissingNodeOp):
factory.get_instance(**msg)
def test_message_factory_invalid_op_fails(factory):
msg = {'op': 'unknown_op', 'a': 0, 'b': 'bar'}
with pytest.raises(InvalidNodeOp):
factory.get_instance(**msg)
def test_message_factory_stub_module_is_loaded(factory):
msg = {'op': 'Message1', 'a': 0, 'b': 'bar'}
assert isinstance(factory.get_instance(**msg), Message1)
def test_message_factory_set_non_message_class_fails(factory):
class NonMessageClass:
pass
with pytest.raises(ValueError):
factory.set_message_class(NonMessageClass)
def test_message_factory_set_message_class_can_add_message_class(factory):
class ANewMessageClass(MessageBase):
typename = 'NewMessage'
schema = (
('a', NonNegativeNumberField()),
)
factory.set_message_class(ANewMessageClass)
msg = {'op': 'NewMessage', 'a': 0}
assert isinstance(factory.get_instance(**msg), ANewMessageClass)
def test_node_message_factory_module_is_loaded():
NodeMessageFactory()
def test_message_factory_can_replace_field(factory):
# check precondition
msg = {'op': 'Message2', 'a': 0, 'b': 'foo'}
assert isinstance(factory.get_instance(**msg), Message2)
factory.update_schemas_by_field_type(AnyValueField, NonNegativeNumberField)
with pytest.raises(TypeError) as exc_info:
factory.get_instance(**msg)
exc_info.match("expected types 'int', got 'str'")
def test_message_factory_can_replace_iterable_field(factory):
# check precondition
msg = {'op': 'Message3', 'a': 0, 'b': [True, False]}
assert isinstance(factory.get_instance(**msg), Message3)
factory.update_schemas_by_field_type(BooleanField, Base58Field)
with pytest.raises(TypeError) as exc_info:
factory.get_instance(**msg)
exc_info.match("expected types 'str', got 'bool'")
def test_message_factory_can_replace_map_field(factory):
# check precondition
msg = {'op': 'Message4', 'a': 0, 'b': {'123': 'abc'}}
assert isinstance(factory.get_instance(**msg), Message4)
factory.update_schemas_by_field_type(HexField, NonNegativeNumberField)
with pytest.raises(TypeError) as exc_info:
factory.get_instance(**msg)
exc_info.match("expected types 'int', got 'str'")
|
73462
|
from ...accounts import Account, account_factory
import tempfile
import os
import pickle
from os import listdir
from os.path import isfile, join
class LocalFileSystemAccountAdapter():
def __init__(self, root=None):
if root is None: root = tempfile.gettempdir()
if not os.path.exists(root+"/accounts/"):
os.makedirs(root+"/accounts/")
self.root = root
def get_account(self, account_id: str, current_date=None):
with open(self.root + "/accounts/" + account_id + ".pickle", 'rb') as f:
return pickle.load(file=f)
def has_account(self, account_id: str, current_date=None):
try:
pickle.load(file=self.root + "/accounts/" + account_id + ".pickle")
return True
except:
return False
def put_account(self, account: Account, current_date=None):
with open(self.root + "/accounts/" + account.account_id + ".pickle", 'wb') as f:
pickle.dump(account, file=f)
def get_account_ids(self, current_date=None):
mypath = self.root + "/accounts/"
return [f.split(".")[0] for f in listdir(mypath) if isfile(join(mypath, f))]
def delete_account(self, account, current_date=None):
try:
os.remove(self.root + "/accounts/" + account_factory(account).account_id + ".pickle")
except:
pass
def delete_accounts(self, accounts, current_date=None):
[self.delete_account(account) for account in accounts]
|
73471
|
from setuptools import setup, find_packages
from dexy.version import DEXY_VERSION
import platform
is_windows = platform.system() == 'Windows'
if is_windows:
os_specific_requires = []
else:
os_specific_requires = ['pexpect']
setup(
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Documentation",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Documentation",
"Topic :: Text Processing",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: LaTeX"
],
description='Document Automation',
### "entry-points"
entry_points = {
'console_scripts' : [
'dexy = dexy.commands:run'
],
'pygments.lexers' : [
'rst+django = dexy.filters.utils:RstDjangoLexer'
]
},
### @end
include_package_data = True,
install_requires = os_specific_requires + [
# for internal dexy use or used in many common plugins
'BeautifulSoup4',
'PyYAML',
'cashew>=0.4.1',
'chardet',
'inflection>=0.2.0',
'jinja2',
'ply>=3.4',
'pygments',
'python3-modargs',
'requests>=0.10.6',
# for convenience of running additional filters
'Markdown',
'docutils'
],
name='dexy',
packages=find_packages(),
url='http://dexy.it',
version=DEXY_VERSION
)
|
73486
|
import json
import logging.config
import os
import re
from string import Template
from jans.pycloudlib import get_manager
from jans.pycloudlib.persistence import render_couchbase_properties
from jans.pycloudlib.persistence import render_base_properties
from jans.pycloudlib.persistence import render_hybrid_properties
from jans.pycloudlib.persistence import render_ldap_properties
from jans.pycloudlib.persistence import render_salt
from jans.pycloudlib.persistence import sync_couchbase_truststore
from jans.pycloudlib.persistence import sync_ldap_truststore
from jans.pycloudlib.persistence import render_sql_properties
from jans.pycloudlib.persistence import render_spanner_properties
from jans.pycloudlib.persistence.utils import PersistenceMapper
from jans.pycloudlib.utils import cert_to_truststore
from settings import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("entrypoint")
manager = get_manager()
def modify_jetty_xml():
fn = "/opt/jetty/etc/jetty.xml"
with open(fn) as f:
txt = f.read()
# disable contexts
updates = re.sub(
r'<New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler"/>',
r'<New id="DefaultHandler" class="org.eclipse.jetty.server.handler.DefaultHandler">\n\t\t\t\t <Set name="showContexts">false</Set>\n\t\t\t </New>',
txt,
flags=re.DOTALL | re.M,
)
with open(fn, "w") as f:
f.write(updates)
def modify_webdefault_xml():
fn = "/opt/jetty/etc/webdefault.xml"
with open(fn) as f:
txt = f.read()
# disable dirAllowed
updates = re.sub(
r'(<param-name>dirAllowed</param-name>)(\s*)(<param-value>)true(</param-value>)',
r'\1\2\3false\4',
txt,
flags=re.DOTALL | re.M,
)
with open(fn, "w") as f:
f.write(updates)
def main():
persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap")
render_salt(manager, "/app/templates/salt.tmpl", "/etc/jans/conf/salt")
render_base_properties("/app/templates/jans.properties.tmpl", "/etc/jans/conf/jans.properties")
mapper = PersistenceMapper()
persistence_groups = mapper.groups()
if persistence_type == "hybrid":
render_hybrid_properties("/etc/jans/conf/jans-hybrid.properties")
if "ldap" in persistence_groups:
render_ldap_properties(
manager,
"/app/templates/jans-ldap.properties.tmpl",
"/etc/jans/conf/jans-ldap.properties",
)
sync_ldap_truststore(manager)
if "couchbase" in persistence_groups:
render_couchbase_properties(
manager,
"/app/templates/jans-couchbase.properties.tmpl",
"/etc/jans/conf/jans-couchbase.properties",
)
sync_couchbase_truststore(manager)
if "sql" in persistence_groups:
render_sql_properties(
manager,
"/app/templates/jans-sql.properties.tmpl",
"/etc/jans/conf/jans-sql.properties",
)
if "spanner" in persistence_groups:
render_spanner_properties(
manager,
"/app/templates/jans-spanner.properties.tmpl",
"/etc/jans/conf/jans-spanner.properties",
)
if not os.path.isfile("/etc/certs/web_https.crt"):
manager.secret.to_file("ssl_cert", "/etc/certs/web_https.crt")
cert_to_truststore(
"web_https",
"/etc/certs/web_https.crt",
"/usr/java/latest/jre/lib/security/cacerts",
"changeit",
)
modify_jetty_xml()
modify_webdefault_xml()
configure_logging()
def configure_logging():
# default config
config = {
"fido2_log_target": "STDOUT",
"fido2_log_level": "INFO",
"persistence_log_target": "FILE",
"persistence_log_level": "INFO",
}
# pre-populate custom config; format is JSON string of ``dict``
try:
custom_config = json.loads(os.environ.get("CN_FIDO2_APP_LOGGERS", "{}"))
except json.decoder.JSONDecodeError as exc:
logger.warning(f"Unable to load logging configuration from environment variable; reason={exc}; fallback to defaults")
custom_config = {}
# ensure custom config is ``dict`` type
if not isinstance(custom_config, dict):
logger.warning("Invalid data type for CN_FIDO2_APP_LOGGERS; fallback to defaults")
custom_config = {}
# list of supported levels; OFF is not supported
log_levels = ("FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE",)
# list of supported outputs
log_targets = ("STDOUT", "FILE",)
for k, v in custom_config.items():
if k not in config:
continue
if k.endswith("_log_level") and v not in log_levels:
logger.warning(f"Invalid {v} log level for {k}; fallback to defaults")
v = config[k]
if k.endswith("_log_target") and v not in log_targets:
logger.warning(f"Invalid {v} log output for {k}; fallback to defaults")
v = config[k]
# update the config
config[k] = v
# mapping between the ``log_target`` value and their appenders
file_aliases = {
"fido2_log_target": "FILE",
"persistence_log_target": "PERSISTENCE_FILE",
}
for key, value in file_aliases.items():
if config[key] == "FILE":
config[key] = value
logfile = "/opt/jans/jetty/jans-fido2/resources/log4j2.xml"
with open(logfile) as f:
txt = f.read()
tmpl = Template(txt)
with open(logfile, "w") as f:
f.write(tmpl.safe_substitute(config))
if __name__ == "__main__":
main()
|
73496
|
import importlib
import sys
import tensorflow as tf
LSTM_SIZE = 2048
def resnet_rnn_model(features, model_params, example_description, training):
# Get hyperparameters
dropout_rate = model_params['resnet_rnn'].get('dropout_rate', 0.5)
# Reshape inputs into proper dimensions
for (name, f), d in zip(features.items(), example_description):
if name.endswith('images'):
telescope_data = tf.reshape(f, [-1, *d['shape']])
num_telescopes = d['shape'][0]
if name.endswith('triggers'):
telescope_triggers = tf.cast(f, tf.float32)
# Transpose telescope_data from [batch_size,num_tel,length,width,channels]
# to [num_tel,batch_size,length,width,channels].
telescope_data = tf.transpose(telescope_data, perm=[1, 0, 2, 3, 4])
# Define the network being used. Each CNN block analyzes a single
# telescope. The outputs for non-triggering telescopes are zeroed out
# (effectively, those channels are dropped out).
# Unlike standard dropout, this zeroing-out procedure is performed both at
# training and test time since it encodes meaningful aspects of the data.
# The telescope outputs are then stacked into input for the array-level
# network, either into 1D feature vectors or into 3D convolutional
# feature maps, depending on the requirements of the network head.
# The array-level processing is then performed by the network head. The
# logits are returned and fed into a classifier.
# Load ResNet block model
sys.path.append(model_params['model_directory'])
resnet_block_module = importlib.import_module(model_params['resnet_rnn']['network']['module'])
resnet_block = getattr(resnet_block_module, model_params['resnet_rnn']['network']['function'])
trainable = model_params['resnet_rnn'].get('trainable_backbone', False)
#calculate number of valid images per event
num_tels_triggered = tf.to_int32(tf.reduce_sum(telescope_triggers,1))
telescope_outputs = []
for telescope_index in range(num_telescopes):
# Set all telescopes after the first to share weights
reuse = None if telescope_index == 0 else True
with tf.variable_scope("resnet_block"):
x = tf.gather(telescope_data, telescope_index)
# The original ResNet implementation use this padding, but we pad the images in the ImageMapper.
#x = tf.pad(telescope_data, tf.constant([[3, 3], [3, 3]]), name='conv1_pad')
init_layer = model_params['res_net'].get('init_layer', False)
if init_layer:
x = tf.layers.conv2d(x, filters=init_layer['filters'], kernel_size=init_layer['kernel_size'],
strides=init_layer['strides'], trainable=trainable, name='conv1_conv')
#x = tf.pad(x, tf.constant([[1, 1], [1, 1]]), name='pool1_pad')
init_max_pool = model_params['res_net'].get('init_max_pool', False)
if init_max_pool:
x = tf.layers.max_pooling2d(x, init_max_pool['size'], strides=init_max_pool['strides'], trainable=trainable, name='pool1_pool')
output = resnet_block(x, params=model_params, reuse=reuse, trainable=trainable)
output = tf.reduce_mean(output, axis=[1,2], name='global_avgpool')
if model_params['resnet_rnn']['pretrained_weights']:
tf.contrib.framework.init_from_checkpoint(model_params['resnet_rnn']['pretrained_weights'],{'Network/':'resnet_block/'})
#flatten output of embedding CNN to (batch_size, _)
image_embedding = tf.layers.flatten(output, name='image_embedding')
image_embedding_dropout = tf.layers.dropout(image_embedding, training=training)
telescope_outputs.append(image_embedding_dropout)
with tf.variable_scope("NetworkHead"):
#combine image embeddings (batch_size, num_tel, num_units_embedding)
embeddings = tf.stack(telescope_outputs,axis=1)
#implement attention mechanism with range num_tel (covering all timesteps)
#define LSTM cell size
rnn_cell = tf.nn.rnn_cell.LSTMCell(LSTM_SIZE)
outputs, _ = tf.nn.dynamic_rnn(
rnn_cell,
embeddings,
dtype=tf.float32,
swap_memory=True,
sequence_length=num_tels_triggered)
# (batch_size, max_num_tel * LSTM_SIZE)
outputs = tf.layers.flatten(outputs)
output_dropout = tf.layers.dropout(outputs, rate=dropout_rate,
training=training, name="rnn_output_dropout")
fc1 = tf.layers.dense(inputs=output_dropout, units=1024, kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.004), name="fc1")
dropout_1 = tf.layers.dropout(inputs=fc1, rate=dropout_rate,
training=training)
fc2 = tf.layers.dense(inputs=dropout_1, units=512, kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.004), name="fc2")
dropout_2 = tf.layers.dropout(inputs=fc2, rate=dropout_rate,
training=training)
return dropout_2
|
73561
|
import os, sys
import numpy as np
from math import sqrt
# testing without install
#sys.path.insert(0, '../build/lib.macosx-10.9-x86_64-3.8')
import poppunk_refine
# Original PopPUNK function (with some improvements)
def withinBoundary(dists, x_max, y_max, slope=2):
boundary_test = np.ones((dists.shape[0]))
for row in range(boundary_test.size):
if slope == 2:
in_tri = dists[row, 1] * x_max + dists[row, 0] * y_max - x_max * y_max
elif slope == 0:
in_tri = dists[row, 0] - x_max
elif slope == 1:
in_tri = dists[row, 1] - y_max
if abs(in_tri) < np.finfo(np.float32).eps:
boundary_test[row] = 0
elif in_tri < 0:
boundary_test[row] = -1
return(boundary_test)
def check_tuples(t1, t2):
for t in t1:
if t not in t2:
raise RuntimeError("Results don't match")
def iter_tuples(assign_results, n_samples):
tuple_list = []
idx = 0
for i in range(n_samples):
for j in range(i + 1, n_samples):
if assign_results[idx] == -1:
tuple_list.append((i, j))
idx += 1
return tuple_list
def check_res(res, expected):
if (not np.all(res == expected)):
print(res)
print(expected)
raise RuntimeError("Results don't match")
# assigning
x = np.arange(0, 1, 0.1, dtype=np.float32)
y = np.arange(0, 1, 0.1, dtype=np.float32)
xv, yv = np.meshgrid(x, y)
distMat = np.hstack((xv.reshape(-1,1), yv.reshape(-1,1)))
assign0 = poppunk_refine.assignThreshold(distMat, 0, 0.5, 0.5, 2)
assign1 = poppunk_refine.assignThreshold(distMat, 1, 0.5, 0.5, 2)
assign2 = poppunk_refine.assignThreshold(distMat, 2, 0.5, 0.5, 2)
assign0_res = withinBoundary(distMat, 0.5, 0.5, 0)
assign1_res = withinBoundary(distMat, 0.5, 0.5, 1)
assign2_res = withinBoundary(distMat, 0.5, 0.5, 2)
check_res(assign0, assign0_res)
check_res(assign1, assign1_res)
check_res(assign2, assign2_res)
# Check results when returned as tuple
samples = 100
distMat = np.random.rand(int(0.5 * samples * (samples - 1)), 2)
distMat = np.array(distMat, dtype = np.float32)
assign0_res = withinBoundary(distMat, 0.5, 0.5, 0)
assign0_edge_res = iter_tuples(assign0_res, samples)
check_tuples(assign0_edge_res,
poppunk_refine.generateTuples([int(x) for x in assign0_res], -1))
assign1_edge_res = iter_tuples(withinBoundary(distMat, 0.5, 0.5, 1), samples)
assign2_edge_res = iter_tuples(withinBoundary(distMat, 0.5, 0.5, 2), samples)
assign0_edges = poppunk_refine.edgeThreshold(distMat, 0, 0.5, 0.5)
assign1_edges = poppunk_refine.edgeThreshold(distMat, 1, 0.5, 0.5)
assign2_edges = poppunk_refine.edgeThreshold(distMat, 2, 0.5, 0.5)
check_tuples(assign0_edges, assign0_edge_res)
check_tuples(assign1_edges, assign1_edge_res)
check_tuples(assign2_edges, assign2_edge_res)
# move boundary 1D
# example is symmetrical at points (0.1, 0.1); (0.2, 0.2); (0.3, 0.3)
offsets = [x * sqrt(2) for x in [-0.1, 0.0, 0.1]]
i_vec, j_vec, idx_vec = poppunk_refine.thresholdIterate1D(distMat, offsets, 2, 0.2, 0.2, 0.3, 0.3)
sketchlib_i = []
sketchlib_j = []
for offset_idx, offset in enumerate(offsets):
for i, j, idx in zip(i_vec, j_vec, idx_vec):
if idx > offset_idx:
break
elif idx == offset_idx:
sketchlib_i.append(i)
sketchlib_j.append(j)
py_i = []
py_j = []
xmax = 0.4 + (2 * (offset/sqrt(2)))
assign = poppunk_refine.assignThreshold(distMat, 2, xmax, xmax, 1)
dist_idx = 0
for i in range(samples):
for j in range(i + 1, samples):
if assign[dist_idx] <= 0:
py_i.append(i)
py_j.append(j)
dist_idx += 1
if set(zip(py_i, py_j)) != set(zip(sketchlib_i, sketchlib_j)):
raise RuntimeError("Threshold 1D iterate mismatch at offset " + str(offset))
# move boundary 2D
# example is for boundaries (0.1, 0.2); (0.2, 0.2); (0.3, 0.2)
offsets = [0.1, 0.2, 0.3]
y_max = 0.2
i_vec, j_vec, idx_vec = poppunk_refine.thresholdIterate2D(distMat, offsets, y_max)
sketchlib_i = []
sketchlib_j = []
for offset_idx, offset in enumerate(offsets):
for i, j, idx in zip(i_vec, j_vec, idx_vec):
if idx > offset_idx:
break
elif idx == offset_idx:
sketchlib_i.append(i)
sketchlib_j.append(j)
py_i = []
py_j = []
assign = poppunk_refine.assignThreshold(distMat, 2, offset, y_max, 1)
dist_idx = 0
for i in range(samples):
for j in range(i + 1, samples):
if assign[dist_idx] <= 0:
py_i.append(i)
py_j.append(j)
dist_idx += 1
if set(zip(py_i, py_j)) != set(zip(sketchlib_i, sketchlib_j)):
raise RuntimeError("Threshold 2D iterate mismatch at offset " + str(offset))
|
73567
|
from Raspi_MotorHAT.Raspi_PWM_Servo_Driver import PWM
class Servos(object):
def __init__(self, addr=0x6f, deflect_90_in_ms = 0.9):
"""addr: The i2c address of the PWM chip.
deflect_90_in_ms: set this to calibrate the servo motors.
it is what a deflection of 90 degrees is
in terms of a pulse length in milliseconds."""
self._pwm = PWM(addr)
# This sets the timebase for it all
pwm_frequency = 60
self._pwm.setPWMFreq(pwm_frequency)
# Frequency is 1/period, but working ms, we can use 1000
period_in_ms = 1000.0 / pwm_frequency
# The chip has 4096 steps in each period.
pulse_steps = 4096.0
# Mid point of the servo pulse length in milliseconds.
servo_mid_point_ms = 1.5
# Steps for every millisecond.
steps_per_ms = pulse_steps / period_in_ms
# Steps for a degree
self.steps_per_degree = (deflect_90_in_ms * steps_per_ms) / 90.0
# Mid point of the servo in steps
self.servo_mid_point_steps = servo_mid_point_ms * steps_per_ms
# Prepare servo's turned off
self.stop_all()
def stop_all(self):
# 0 in start is nothing, 4096 sets the OFF bit.
self._pwm.setPWM(0, 0, 4096)
self._pwm.setPWM(1, 0, 4096)
self._pwm.setPWM(14, 0, 4096)
self._pwm.setPWM(15, 0, 4096)
def _convert_degrees_to_pwm(self, position):
return int(self.servo_mid_point_steps + (position * self.steps_per_degree))
def set_servo_angle(self, channel, angle):
"""position: The position in degrees from the center. -90 to 90"""
# Validate
if angle > 90 or angle < -90:
raise ValueError("Angle outside of range")
# Then set the position
off_step = self._convert_degrees_to_pwm(angle)
self._pwm.setPWM(channel, 0, off_step)
|
73569
|
def verify_format(_, res):
return res
def format_index(body): # pragma: no cover
"""Format index data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {
'id': body['id'].split('/', 1)[-1],
'fields': body['fields']
}
if 'type' in body:
result['type'] = body['type']
if 'name' in body:
result['name'] = body['name']
if 'deduplicate' in body:
result['deduplicate'] = body['deduplicate']
if 'sparse' in body:
result['sparse'] = body['sparse']
if 'unique' in body:
result['unique'] = body['unique']
if 'minLength' in body:
result['min_length'] = body['minLength']
if 'geoJson' in body:
result['geo_json'] = body['geoJson']
if 'ignoreNull' in body:
result['ignore_none'] = body['ignoreNull']
if 'selectivityEstimate' in body:
result['selectivity'] = body['selectivityEstimate']
if 'isNewlyCreated' in body:
result['new'] = body['isNewlyCreated']
if 'expireAfter' in body:
result['expiry_time'] = body['expireAfter']
if 'inBackground' in body:
result['in_background'] = body['inBackground']
if 'bestIndexedLevel' in body:
result['best_indexed_level'] = body['bestIndexedLevel']
if 'worstIndexedLevel' in body:
result['worst_indexed_level'] = body['worstIndexedLevel']
if 'maxNumCoverCells' in body:
result['max_num_cover_cells'] = body['maxNumCoverCells']
return verify_format(body, result)
def format_key_options(body): # pragma: no cover
"""Format collection key options data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'type' in body:
result['key_generator'] = body['type']
if 'increment' in body:
result['key_increment'] = body['increment']
if 'offset' in body:
result['key_offset'] = body['offset']
if 'allowUserKeys' in body:
result['user_keys'] = body['allowUserKeys']
if 'lastValue' in body:
result['key_last_value'] = body['lastValue']
return verify_format(body, result)
def format_database(body): # pragma: no cover
"""Format databases info.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'id' in body:
result['id'] = body['id']
if 'name' in body:
result['name'] = body['name']
if 'path' in body:
result['path'] = body['path']
if 'system' in body:
result['system'] = body['system']
if 'isSystem' in body:
result['system'] = body['isSystem']
# Cluster only
if 'sharding' in body:
result['sharding'] = body['sharding']
if 'replicationFactor' in body:
result['replication_factor'] = body['replicationFactor']
if 'writeConcern' in body:
result['write_concern'] = body['writeConcern']
return verify_format(body, result)
def format_collection(body): # pragma: no cover
"""Format collection data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'id' in body:
result['id'] = body['id']
if 'objectId' in body:
result['object_id'] = body['objectId']
if 'name' in body:
result['name'] = body['name']
if 'isSystem' in body:
result['system'] = body['isSystem']
if 'isSmart' in body:
result['smart'] = body['isSmart']
if 'type' in body:
result['type'] = body['type']
result['edge'] = body['type'] == 3
if 'waitForSync' in body:
result['sync'] = body['waitForSync']
if 'status' in body:
result['status'] = body['status']
if 'statusString' in body:
result['status_string'] = body['statusString']
if 'globallyUniqueId' in body:
result['global_id'] = body['globallyUniqueId']
if 'cacheEnabled' in body:
result['cache'] = body['cacheEnabled']
if 'replicationFactor' in body:
result['replication_factor'] = body['replicationFactor']
if 'minReplicationFactor' in body:
result['min_replication_factor'] = body['minReplicationFactor']
if 'writeConcern' in body:
result['write_concern'] = body['writeConcern']
# MMFiles only
if 'doCompact' in body:
result['compact'] = body['doCompact']
if 'journalSize' in body:
result['journal_size'] = body['journalSize']
if 'isVolatile' in body:
result['volatile'] = body['isVolatile']
if 'indexBuckets' in body:
result['index_bucket_count'] = body['indexBuckets']
# Cluster only
if 'shards' in body:
result['shards'] = body['shards']
if 'replicationFactor' in body:
result['replication_factor'] = body['replicationFactor']
if 'numberOfShards' in body:
result['shard_count'] = body['numberOfShards']
if 'shardKeys' in body:
result['shard_fields'] = body['shardKeys']
if 'distributeShardsLike' in body:
result['shard_like'] = body['distributeShardsLike']
if 'shardingStrategy' in body:
result['sharding_strategy'] = body['shardingStrategy']
if 'smartJoinAttribute' in body:
result['smart_join_attribute'] = body['smartJoinAttribute']
# Key Generator
if 'keyOptions' in body:
result['key_options'] = format_key_options(body['keyOptions'])
# Replication only
if 'cid' in body:
result['cid'] = body['cid']
if 'version' in body:
result['version'] = body['version']
if 'allowUserKeys' in body:
result['user_keys'] = body['allowUserKeys']
if 'planId' in body:
result['plan_id'] = body['planId']
if 'deleted' in body:
result['deleted'] = body['deleted']
# New in 3.7
if 'syncByRevision' in body:
result['sync_by_revision'] = body['syncByRevision']
if 'tempObjectId' in body:
result['temp_object_id'] = body['tempObjectId']
if 'usesRevisionsAsDocumentIds' in body:
result['rev_as_id'] = body['usesRevisionsAsDocumentIds']
if 'isDisjoint' in body:
result['disjoint'] = body['isDisjoint']
if 'isSmartChild' in body:
result['smart_child'] = body['isSmartChild']
if 'minRevision' in body:
result['min_revision'] = body['minRevision']
if 'schema' in body:
result['schema'] = body['schema']
return verify_format(body, result)
def format_aql_cache(body):
"""Format AQL cache data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {
'mode': body['mode'],
'max_results': body['maxResults'],
'max_results_size': body['maxResultsSize'],
'max_entry_size': body['maxEntrySize'],
'include_system': body['includeSystem']
}
return verify_format(body, result)
def format_wal_properties(body): # pragma: no cover
"""Format WAL properties.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'allowOversizeEntries' in body:
result['oversized_ops'] = body['allowOversizeEntries']
if 'logfileSize' in body:
result['log_size'] = body['logfileSize']
if 'historicLogfiles' in body:
result['historic_logs'] = body['historicLogfiles']
if 'reserveLogfiles' in body:
result['reserve_logs'] = body['reserveLogfiles']
if 'syncInterval' in body:
result['sync_interval'] = body['syncInterval']
if 'throttleWait' in body:
result['throttle_wait'] = body['throttleWait']
if 'throttleWhenPending' in body:
result['throttle_limit'] = body['throttleWhenPending']
return verify_format(body, result)
def format_wal_transactions(body): # pragma: no cover
"""Format WAL transactions.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'minLastCollected' in body:
result['last_collected'] = body['minLastCollected']
if 'minLastSealed' in body:
result['last_sealed'] = body['minLastSealed']
if 'runningTransactions' in body:
result['count'] = body['runningTransactions']
return verify_format(body, result)
def format_aql_query(body): # pragma: no cover
"""Format AQL query data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {'id': body['id'], 'query': body['query']}
if 'started' in body:
result['started'] = body['started']
if 'state' in body:
result['state'] = body['state']
if 'stream' in body:
result['stream'] = body['stream']
if 'bindVars' in body:
result['bind_vars'] = body['bindVars']
if 'runTime' in body:
result['runtime'] = body['runTime']
return verify_format(body, result)
def format_aql_tracking(body): # pragma: no cover
"""Format AQL tracking data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'enabled' in body:
result['enabled'] = body['enabled']
if 'maxQueryStringLength' in body:
result['max_query_string_length'] = body['maxQueryStringLength']
if 'maxSlowQueries' in body:
result['max_slow_queries'] = body['maxSlowQueries']
if 'slowQueryThreshold' in body:
result['slow_query_threshold'] = body['slowQueryThreshold']
if 'slowStreamingQueryThreshold' in body:
result['slow_streaming_query_threshold'] = \
body['slowStreamingQueryThreshold']
if 'trackBindVars' in body:
result['track_bind_vars'] = body['trackBindVars']
if 'trackSlowQueries' in body:
result['track_slow_queries'] = body['trackSlowQueries']
return verify_format(body, result)
def format_tick_values(body): # pragma: no cover
"""Format tick data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'tickMin' in body:
result['tick_min'] = body['tickMin']
if 'tickMax' in body:
result['tick_max'] = body['tickMax']
if 'tick' in body:
result['tick'] = body['tick']
if 'time' in body:
result['time'] = body['time']
if 'server' in body:
result['server'] = format_server_info(body['server'])
return verify_format(body, result)
def format_server_info(body): # pragma: no cover
"""Format server data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
return {'version': body['version'], 'server_id': body['serverId']}
def format_replication_applier_config(body): # pragma: no cover
"""Format replication applier configuration data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'endpoint' in body:
result['endpoint'] = body['endpoint']
if 'database' in body:
result['database'] = body['database']
if 'username' in body:
result['username'] = body['username']
if 'verbose' in body:
result['verbose'] = body['verbose']
if 'incremental' in body:
result['incremental'] = body['incremental']
if 'requestTimeout' in body:
result['request_timeout'] = body['requestTimeout']
if 'connectTimeout' in body:
result['connect_timeout'] = body['connectTimeout']
if 'ignoreErrors' in body:
result['ignore_errors'] = body['ignoreErrors']
if 'maxConnectRetries' in body:
result['max_connect_retries'] = body['maxConnectRetries']
if 'lockTimeoutRetries' in body:
result['lock_timeout_retries'] = body['lockTimeoutRetries']
if 'sslProtocol' in body:
result['ssl_protocol'] = body['sslProtocol']
if 'chunkSize' in body:
result['chunk_size'] = body['chunkSize']
if 'skipCreateDrop' in body:
result['skip_create_drop'] = body['skipCreateDrop']
if 'autoStart' in body:
result['auto_start'] = body['autoStart']
if 'adaptivePolling' in body:
result['adaptive_polling'] = body['adaptivePolling']
if 'autoResync' in body:
result['auto_resync'] = body['autoResync']
if 'autoResyncRetries' in body:
result['auto_resync_retries'] = body['autoResyncRetries']
if 'maxPacketSize' in body:
result['max_packet_size'] = body['maxPacketSize']
if 'includeSystem' in body:
result['include_system'] = body['includeSystem']
if 'includeFoxxQueues' in body:
result['include_foxx_queues'] = body['includeFoxxQueues']
if 'requireFromPresent' in body:
result['require_from_present'] = body['requireFromPresent']
if 'restrictType' in body:
result['restrict_type'] = body['restrictType']
if 'restrictCollections' in body:
result['restrict_collections'] = body['restrictCollections']
if 'connectionRetryWaitTime' in body:
result['connection_retry_wait_time'] = body['connectionRetryWaitTime']
if 'initialSyncMaxWaitTime' in body:
result['initial_sync_max_wait_time'] = body['initialSyncMaxWaitTime']
if 'idleMinWaitTime' in body:
result['idle_min_wait_time'] = body['idleMinWaitTime']
if 'idleMaxWaitTime' in body:
result['idle_max_wait_time'] = body['idleMaxWaitTime']
return verify_format(body, result)
def format_applier_progress(body): # pragma: no cover
"""Format replication applier progress data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'time' in body:
result['time'] = body['time']
if 'message' in body:
result['message'] = body['message']
if 'failedConnects' in body:
result['failed_connects'] = body['failedConnects']
return verify_format(body, result)
def format_applier_error(body): # pragma: no cover
"""Format replication applier error data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'errorNum' in body:
result['error_num'] = body['errorNum']
if 'errorMessage' in body:
result['error_message'] = body['errorMessage']
if 'time' in body:
result['time'] = body['time']
return verify_format(body, result)
def format_applier_state_details(body): # pragma: no cover
"""Format replication applier state details.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'started' in body:
result['started'] = body['started']
if 'running' in body:
result['running'] = body['running']
if 'phase' in body:
result['phase'] = body['phase']
if 'time' in body:
result['time'] = body['time']
if 'safeResumeTick' in body:
result['safe_resume_tick'] = body['safeResumeTick']
if 'ticksBehind' in body:
result['ticks_behind'] = body['ticksBehind']
if 'lastAppliedContinuousTick' in body:
result['last_applied_continuous_tick'] = \
body['lastAppliedContinuousTick']
if 'lastProcessedContinuousTick' in body:
result['last_processed_continuous_tick'] = \
body['lastProcessedContinuousTick']
if 'lastAvailableContinuousTick' in body:
result['last_available_continuous_tick'] = \
body['lastAvailableContinuousTick']
if 'progress' in body:
result['progress'] = format_applier_progress(body['progress'])
if 'totalRequests' in body:
result['total_requests'] = body['totalRequests']
if 'totalFailedConnects' in body:
result['total_failed_connects'] = body['totalFailedConnects']
if 'totalEvents' in body:
result['total_events'] = body['totalEvents']
if 'totalDocuments' in body:
result['total_documents'] = body['totalDocuments']
if 'totalRemovals' in body:
result['total_removals'] = body['totalRemovals']
if 'totalResyncs' in body:
result['total_resyncs'] = body['totalResyncs']
if 'totalOperationsExcluded' in body:
result['total_operations_excluded'] = body['totalOperationsExcluded']
if 'totalApplyTime' in body:
result['total_apply_time'] = body['totalApplyTime']
if 'averageApplyTime' in body:
result['average_apply_time'] = body['averageApplyTime']
if 'totalFetchTime' in body:
result['total_fetch_time'] = body['totalFetchTime']
if 'averageFetchTime' in body:
result['average_fetch_time'] = body['averageFetchTime']
if 'lastError' in body:
result['last_error'] = format_applier_error(body['lastError'])
return verify_format(body, result)
def format_replication_applier_state(body): # pragma: no cover
"""Format replication applier state.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'endpoint' in body:
result['endpoint'] = body['endpoint']
if 'database' in body:
result['database'] = body['database']
if 'username' in body:
result['username'] = body['username']
if 'state' in body:
result['state'] = format_applier_state_details(body['state'])
if 'server' in body:
result['server'] = format_server_info(body['server'])
return verify_format(body, result)
def format_replication_state(body): # pragma: no cover
"""Format replication state.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
if not isinstance(body, dict):
return body
result = {}
if 'running' in body:
result['running'] = body['running']
if 'time' in body:
result['time'] = body['time']
if 'lastLogTick' in body:
result['last_log_tick'] = body['lastLogTick']
if 'totalEvents' in body:
result['total_events'] = body['totalEvents']
if 'lastUncommittedLogTick' in body:
result['last_uncommitted_log_tick'] = body['lastUncommittedLogTick']
return verify_format(body, result)
def format_replication_logger_state(body): # pragma: no cover
"""Format replication collection data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'state' in body:
result['state'] = format_replication_state(body['state'])
if 'server' in body:
result['server'] = format_server_info(body['server'])
if 'clients' in body:
result['clients'] = body['clients']
return verify_format(body, result)
def format_replication_collection(body): # pragma: no cover
"""Format replication collection data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'planVersion' in body:
result['plan_version'] = body['planVersion']
if 'isReady' in body:
result['is_ready'] = body['isReady']
if 'allInSync' in body:
result['all_in_sync'] = body['allInSync']
if 'indexes' in body:
result['indexes'] = [format_index(index) for index in body['indexes']]
if 'parameters' in body:
result['parameters'] = format_collection(body['parameters'])
return verify_format(body, result)
def format_replication_database(body): # pragma: no cover
"""Format replication database data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {
'id': body['id'],
'name': body['name'],
'collections': [
format_replication_collection(col)
for col in body['collections']
],
'views': [format_view(view) for view in body['views']]
}
if 'properties' in body:
result['properties'] = format_database(body['properties'])
return verify_format(body, result)
def format_replication_inventory(body): # pragma: no cover
"""Format replication inventory data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'tick' in body:
result['tick'] = body['tick']
if 'state' in body:
result['state'] = format_replication_state(body['state'])
if 'databases' in body:
result['databases'] = {
k: format_replication_database(v)
for k, v in body['databases'].items()
}
if 'collections' in body:
result['collections'] = [
format_replication_collection(col)
for col in body['collections']
]
if 'views' in body:
result['views'] = [format_view(view) for view in body['views']]
if 'properties' in body:
result['properties'] = format_database(body['properties'])
return verify_format(body, result)
def format_replication_sync(body): # pragma: no cover
"""Format replication sync result.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'collections' in body:
result['collections'] = body['collections']
if 'lastLogTick' in body:
result['last_log_tick'] = body['lastLogTick']
return verify_format(body, result)
def format_replication_header(headers): # pragma: no cover
"""Format replication headers.
:param headers: Request headers.
:type headers: dict
:return: Formatted body.
:rtype: dict
"""
headers = {k.lower(): v for k, v in headers.items()}
result = {}
if 'x-arango-replication-frompresent' in headers:
result['from_present'] = \
headers['x-arango-replication-frompresent'] == 'true'
if 'x-arango-replication-lastincluded' in headers:
result['last_included'] = \
headers['x-arango-replication-lastincluded']
if 'x-arango-replication-lastscanned' in headers:
result['last_scanned'] = \
headers['x-arango-replication-lastscanned']
if 'x-arango-replication-lasttick' in headers:
result['last_tick'] = \
headers['x-arango-replication-lasttick']
if 'x-arango-replication-active' in headers:
result['active'] = \
headers['x-arango-replication-active'] == 'true'
if 'x-arango-replication-checkmore' in headers:
result['check_more'] = \
headers['x-arango-replication-checkmore'] == 'true'
return result
def format_view_link(body): # pragma: no cover
"""Format view link data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'analyzers' in body:
result['analyzers'] = body['analyzers']
if 'fields' in body:
result['fields'] = body['fields']
if 'includeAllFields' in body:
result['include_all_fields'] = body['includeAllFields']
if 'trackListPositions' in body:
result['track_list_positions'] = body['trackListPositions']
if 'storeValues' in body:
result['store_values'] = body['storeValues']
return verify_format(body, result)
def format_view_consolidation_policy(body): # pragma: no cover
"""Format view consolidation policy data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'type' in body:
result['type'] = body['type']
if 'threshold' in body:
result['threshold'] = body['threshold']
if 'segmentsMin' in body:
result['segments_min'] = body['segmentsMin']
if 'segmentsMax' in body:
result['segments_max'] = body['segmentsMax']
if 'segmentsBytesMax' in body:
result['segments_bytes_max'] = body['segmentsBytesMax']
if 'segmentsBytesFloor' in body:
result['segments_bytes_floor'] = body['segmentsBytesFloor']
if 'minScore' in body:
result['min_score'] = body['minScore']
return verify_format(body, result)
def format_view(body): # pragma: no cover
"""Format view data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = {}
if 'globallyUniqueId' in body:
result['global_id'] = body['globallyUniqueId']
if 'id' in body:
result['id'] = body['id']
if 'name' in body:
result['name'] = body['name']
if 'type' in body:
result['type'] = body['type']
if 'cleanupIntervalStep' in body:
result['cleanup_interval_step'] = body['cleanupIntervalStep']
if 'commitIntervalMsec' in body:
result['commit_interval_msec'] = body['commitIntervalMsec']
if 'consolidationIntervalMsec' in body:
result['consolidation_interval_msec'] = \
body['consolidationIntervalMsec']
if 'consolidationPolicy' in body:
result['consolidation_policy'] = \
format_view_consolidation_policy(body['consolidationPolicy'])
if 'primarySort' in body:
result['primary_sort'] = body['primarySort']
if 'primarySortCompression' in body:
result['primary_sort_compression'] = body['primarySortCompression']
if 'storedValues' in body:
result['stored_values'] = body['storedValues']
if 'writebufferIdle' in body:
result['writebuffer_idle'] = body['writebufferIdle']
if 'writebufferActive' in body:
result['writebuffer_active'] = body['writebufferActive']
if 'writebufferSizeMax' in body:
result['writebuffer_max_size'] = body['writebufferSizeMax']
if 'links' in body:
result['links'] = {link_key: format_view_link(link_content) for link_key, link_content in body['links'].items()}
return verify_format(body, result)
def format_vertex(body): # pragma: no cover
"""Format vertex data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
vertex = body['vertex']
if '_oldRev' in vertex:
vertex['_old_rev'] = vertex.pop('_oldRev')
if 'new' in body or 'old' in body:
result = {'vertex': vertex}
if 'new' in body:
result['new'] = body['new']
if 'old' in body:
result['old'] = body['old']
return result
else:
return vertex
def format_edge(body): # pragma: no cover
"""Format edge data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
edge = body['edge']
if '_oldRev' in edge:
edge['_old_rev'] = edge.pop('_oldRev')
if 'new' in body or 'old' in body:
result = {'edge': edge}
if 'new' in body:
result['new'] = body['new']
if 'old' in body:
result['old'] = body['old']
return result
else:
return edge
def format_tls(body): # pragma: no cover
"""Format TLS data.
:param body: Input body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
result = body
return verify_format(body, result)
|
73574
|
import pandas as pd
def read_clean_csv(path, sep='|'):
"""will read and clean a csv file'"""
df = pd.read_csv(path, sep=sep)
if('Unnamed: 0' in df.columns.values):
df = remove_unnamed(df)
return(df)
def remove_unnamed(df):
df = df.drop('Unnamed: 0', 1)
return df
|
73624
|
from .version import __version__
from .scienceworld import ScienceWorldEnv
from .scienceworld import BufferedHistorySaver
|
73642
|
import numpy as np
import pickle
"""
The first part of this file is to test if the data.py prepare the data correctly
The second part of this file is to test if the data_FlIC_plus.py prepare the data correctly
"""
### The first part
n_joint = 9 # the number of joint that you want to display
y_test = np.load('y_test_flic.npy')
x_test = np.load('x_test_flic.npy')
print('x_test shape is', x_test.shape)
i = np.random.randint(0, high=x_test.shape[0])
print('Show the %dth image and the heat map for n_joint:' % i)
y_test = y_test.astype(np.float32)
y_test = y_test / 256
coords = np.zeros([2, n_joint])
img = x_test[i, :, :, :]
img = np.reshape(img, (x_test.shape[1], x_test.shape[2], x_test.shape[3]))
for joint in range(n_joint):
print(joint)
hmap = y_test[i, :, :, joint]
hmap = np.reshape(hmap, (y_test.shape[1], y_test.shape[2]))
print(hmap.shape)
x, y = np.where(hmap == np.max(hmap))
print(x, y)
coords[:, joint] = [x, y]
coords = coords * 8
print('coords:', coords)
with open('pairwise_distribution.pickle', 'rb') as handle:
pairwise_distribution = pickle.load(handle)
import matplotlib.pyplot as plt
# plt.figure(1)
# plt.imshow((img))
# plt.figure(2)
# plt.imshow((hmap))
for name in ['nose_torso', 'rsho_torso', 'relb_torso', 'rwri_torso', 'rhip_torso']:
plt.imshow(pairwise_distribution[name])
plt.savefig('img/0epoch_' + name + '.png', dpi=300)
plt.clf()
### The second part
n_joint = 9 # the number of joint that you want to display
y_test = np.load('y_test_flic_plus.npy')
x_test = np.load('x_test_flic_plus.npy')
print('x_test shape is', x_test.shape)
i = np.random.randint(0, high=x_test.shape[0])
print('Show the %dth image and the heat map for n_joint:' % i)
y_test = y_test.astype(np.float32)
y_test = y_test / 256
coords = np.zeros([2, n_joint])
img = x_test[i, :, :, :]
img = np.reshape(img, (x_test.shape[1], x_test.shape[2], x_test.shape[3]))
for joint in range(n_joint):
print(joint)
hmap = y_test[i, :, :, joint]
hmap = np.reshape(hmap, (y_test.shape[1], y_test.shape[2]))
print(hmap.shape)
x, y = np.where(hmap == np.max(hmap))
print(x, y)
coords[:, joint] = [x, y]
coords = coords * 8
print('coords:', coords)
with open('pairwise_distribution_plus.pickle', 'rb') as handle:
pairwise_distribution = pickle.load(handle)
import matplotlib.pyplot as plt
plt.figure(1)
plt.imshow((img))
plt.figure(2)
plt.imshow((hmap))
plt.figure(3)
plt.imshow((pairwise_distribution['lwri_torso']))
plt.show()
|
73647
|
from collections import namedtuple
Point2D = namedtuple('Point2D', 'x y')
Point2D.__doc__ = 'Represents a 2D Cartesian coordinate'
Point2D.x.__doc__ = 'x-coordinate'
Point2D.y.__doc__ = 'y-coordinate'
print(help(help(Point2D)))
|
73692
|
from __future__ import print_function
from __future__ import division
import random
import time
import itertools as it
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from utils import load_data
from train_lstm import LSTM
from train_tdlstm import TDLSTM
from train_tclstm import TCLSTM
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval
from skopt import gp_minimize, forest_minimize, gbrt_minimize
from skopt.space import Categorical
def random_search(param_grid, sampsize=None):
expanded_param_grid = expand_grid(param_grid)
if sampsize == None:
sampsize = int(len(expanded_param_grid) / 2.0)
samp = random.sample(expanded_param_grid, sampsize)
return samp
def expand_grid(param_grid):
varNames = sorted(param_grid)
return [dict(zip(varNames, prod))
for prod in it.product(*(param_grid[varName]
for varName in varNames))]
def skopt_search(args, data, model, param_grid, skopt_method, n_calls):
param_keys, param_vecs = zip(*param_grid.items())
param_keys = list(param_keys)
param_vecs = list(param_vecs)
def skopt_scorer(param_vec):
params = dict(zip(param_keys, param_vec))
args.num_hidden = params['num_hidden']
args.dropout_output = params['dropout_output']
args.dropout_input = params['dropout_input']
args.clip_norm = params['clip_norm']
args.batch_size = params['batch_size']
print(args)
print()
scores = run_network(args, data, model, tuning=args.tune)
test_score, eval_score = scores
tf.reset_default_graph()
eval_score = -eval_score[0]
return eval_score
outcome = skopt_method(skopt_scorer, list(param_vecs), n_calls=n_calls)
results = []
for err, param_vec in zip(outcome.func_vals, outcome.x_iters):
params = dict(zip(param_keys, param_vec))
results.append({'loss': err, 'params': params})
return results
def skoptTUNE(args, model, n_calls):
"""
Hyper-parameter optimization using scikit-opt.
It has 3 algorithms: forest_minimize (decision-tree regression search);
gbrt_minimize (gradient-boosted-tree search);
and hp_minimize (Gaussian process regression search).
"""
hyperparameters = {
'batch_size': (40, 120),
'num_hidden': (100, 500),
'dropout_output': (0.3, 1.0),
'dropout_input': (0.3, 1.0),
'clip_norm': (0.5, 1.0),
}
data = load_data(args, args.data, saved=args.load_data)
all_res = skopt_search(args, data, model, hyperparameters, gp_minimize, n_calls=n_calls)
print(all_res)
def hyperopt_search(args, data, model, param_grid, max_evals):
def objective(param_grid):
args.num_hidden = param_grid['num_hidden']
args.dropout_output = param_grid['dropout_output']
args.dropout_input = param_grid['dropout_input']
args.clip_norm = param_grid['clip_norm']
args.batch_size = param_grid['batch_size']
# args.learning_rate = param_grid['learning_rate']
print(args)
print()
scores = run_network(args, data, model, tuning=args.tune)
test_score, eval_score = scores
tf.reset_default_graph()
eval_score = -eval_score[0]
return {'loss': eval_score, 'params': args, 'status': STATUS_OK}
trials = Trials()
results = fmin(
objective, param_grid, algo=tpe.suggest,
trials=trials, max_evals=max_evals)
return results, trials.results
def hyperoptTUNE(args, model, n_calls):
"""
Search the hyper-parameter space according to the tree of Parzen estimators;
a Bayesian approach.
"""
hyperparameters = {
'batch_size': hp.choice('batch_size', range(40, 130, 20)),
'num_hidden': hp.quniform('num_hidden', 100, 500, 1),
# 'learning_rate': hp.choice('learning_rate', [0.0005]),
'dropout_output': hp.quniform('dropout_output', 0.3, 1.0, 0.1),
'dropout_input': hp.quniform('dropout_input', 0.3, 1.0, 0.1),
'clip_norm': hp.quniform('clip_norm', 0.5, 1.0, 0.1),
}
data = load_data(args, args.data, saved=args.load_data)
best_params, all_res = hyperopt_search(args, data, model, hyperparameters, max_evals=n_calls)
print(best_params)
def TUNE(args, model, mode, n_calls=5):
hyperparameters_all = {
'batch_size': range(40, 130, 20),
'seq_len': [42],
'num_hidden': np.random.randint(100, 501, 10),
'learning_rate': [0.0005],
'dropout_output': np.arange(0.3, 1.1, 0.1),
'dropout_input': np.arange(0.3, 1.1, 0.1),
'clip_norm': np.arange(0.5, 1.01, 0.1),
}
maxx = 0
data = load_data(args, args.data, saved=args.load_data)
if mode == 'rand':
samp = random_search(hyperparameters_all, n_calls) #random search
else:
samp = expand_grid(hyperparameters_all) #grid-search
for hyperparameters in samp:
print("Evaluating hyperparameters:", hyperparameters)
for attr, value in hyperparameters.items():
setattr(args, attr, value)
scores = run_network(args, data, model, tuning=args.tune)
test_score, eval_score = scores
if eval_score[0] > maxx:
maxx = eval_score[0]
best_score = test_score
hyperparameters_best = hyperparameters
tf.reset_default_graph()
print()
print("Optimisation finished..")
print("Optimised hyperparameters:")
with open(os.path.dirname(args.checkpoint_file)+'/checkpoint', 'w') as fp:
fp.write('%s:"%s"\n' % ('model',args.model))
for attr, value in sorted(hyperparameters_best.items()):
print("{}={}".format(attr.upper(), value))
fp.write('%s:"%s"\n' % (attr, value))
print()
print("Final Test Data Accuracy = {:.5f}; 3-class F1 = {:.5f}; 2-class F1 = {:.5f}"
.format(best_score[0], best_score[1], best_score[2]))
def TRAIN(args, model):
t0 = time.time()
print("\nParameters:")
for attr, value in sorted(vars(args).items()):
print("{}={}".format(attr.upper(), value))
print()
print("Graph initialized..")
t1 = time.time()
print("time taken:", t1-t0)
print()
data = load_data(args, args.data, saved=args.load_data)
run_network(args, data, model, tuning=args.tune)
def run_network(args, data, model, tuning=False):
if model == 'LSTM':
nn = LSTM(args, data, tuning=tuning)
scores = nn.train_lstm(args, data)
return scores
elif model =='TDLSTM':
nn = TDLSTM(args, data, tuning=tuning)
scores = nn.train_tdlstm(args, data)
return scores
elif model =='TCLSTM':
nn = TCLSTM(args, data, tuning=tuning)
scores = nn.train_tclstm(args, data)
return scores
else:
print("No such model; please select from LSTM, TDLSTM or TCLSTM")
|
73695
|
import torch
from torch import nn
from torch.nn import functional as F
from .activations import sigmoid, HardSwish, Swish
from .utils_i2rnet import (
relu_fn,
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
Conv2dDynamicSamePadding,
get_model_params,
efficientnet_params,
load_pretrained_weights,
)
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
padding = self._block_args.kernel_size //2
# Conv2d = Conv2dDynamicSamePadding
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False, padding = padding)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = relu_fn(self._bn0(self._expand_conv(inputs)))
x = relu_fn(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class I2RConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
padding = self._block_args.kernel_size //2
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
if self._block_args.expand_ratio == 2:
self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
stride = s, groups = final_oup)
self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
elif inp != final_oup and s == 1:
self._project_conv = None
self._expand_conv = None
self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
elif inp != final_oup and s == 2:
self._project_conv = None
self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
stride = s, groups = final_oup)
self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
else:
# if inp == final_oup:
self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# if not (self._block_args.expand_ratio == 2):
self.identity = True
self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) # Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
x = inputs
# NOTE:remove the first 3x3 conv to reduce running mem, need to verfy the performance
if self._project_conv is not None:
x = relu_fn(self._bn0(self._project_conv(inputs)))
x = self._bn1(self._linear1(x))
x = relu_fn(self._bn2(self._linear2(x)))
if self._expand_conv is not None:
x = self._bn3(self._expand_conv(x))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class MBConvBlockV1(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
group_1x1 = 1
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
oup = _make_divisible(oup,2)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self._block_args.expand_ratio == 2:
self.features = nn.Sequential(
Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp),
nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
#first linear layer
Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1),
nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps),
# sec linear layer
Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups=group_1x1),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
# expand layer
Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
)
elif inp != final_oup and s == 1:
self.features=nn.Sequential(
Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False),
nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps),
# only two linear layers are needed
Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
)
elif inp != final_oup and s == 2:
self.features = nn.Sequential(
Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False),
nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps),
Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
)
else:
self.identity = True
self.features = nn.Sequential(
Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp),
nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1),
nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps),
Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups=group_1x1),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
)
# Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
if self.has_se:
se_expand_ratio = 1
# num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
x = self.features(inputs)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class GhostI2RBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
group_1x1 = 1
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
oup = _make_divisible(oup,2)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# apply repeat scheme
self.split_ratio = 2
self.ghost_idx_inp = inp // self.split_ratio
self.ghost_idx_oup = int(final_oup - self.ghost_idx_inp)
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self._block_args.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
#first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.ghost_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups = group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
# )
elif inp != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=self.ghost_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
# Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
if self.has_se:
se_mode = 'small'
if se_mode == 'large':
se_expand_ratio = 0.5
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
else:
se_expand_ratio = 1
num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self._block_args.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:,self.ghost_idx_inp:,:,:]
x = self.bn2(self.project_layer(x[:,:self.ghost_idx_inp,:,:]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x,ghost_id],dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:,self.ghost_idx_inp:,:,:]
x = self.bn2(self.project_layer(x[:,:self.ghost_idx_inp,:,:]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x,ghost_id],dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# import pdb;pdb.set_trace()
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class GhostI2RBlock_change_droppath_pos(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
group_1x1 = 1
apply_ghost = True
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
oup = _make_divisible(oup,2)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
if apply_ghost:
# apply repeat scheme
self.split_ratio = 2
self.ghost_idx_inp = inp // self.split_ratio
self.ghost_idx_oup = int(final_oup - self.ghost_idx_inp)
else:
self.ghost_idx_inp = inp
self.ghost_idx_oup = final_oup
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self._block_args.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
#first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.ghost_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups = group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
# )
elif inp != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=self.ghost_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
# Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
if self.has_se:
se_expand_ratio = 0.5
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
# num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self._block_args.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:,self.ghost_idx_inp:,:,:]
x = self.bn2(self.project_layer(x[:,:self.ghost_idx_inp,:,:]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x,ghost_id],dim=1)
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:,self.ghost_idx_inp:,:,:]
x = self.bn2(self.project_layer(x[:,:self.ghost_idx_inp,:,:]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x,ghost_id],dim=1)
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = self.bn4(self.dwise_conv2(x))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class NESI2RBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
group_1x1 = 1
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
oup = _make_divisible(oup,2)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# apply repeat scheme
self.split_ratio = 2
self.nes_idx_inp = inp // self.split_ratio
self.nes_idx_oup = final_oup // self.split_ratio
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self._block_args.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
#first linear layer
self.project_layer = Conv2d(in_channels=self.nes_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.nes_idx_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.nes_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups = group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
# )
elif inp != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.project_layer = Conv2d(in_channels=self.nes_idx_inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.nes_idx_oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=self.nes_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
# Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
if self.has_se:
se_expand_ratio = 0.5
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
# num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self._block_args.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
nes_x = x[:,:self.nes_idx_inp,:,:] + x[:,self.nes_idx_inp:,:,:]
x = self.bn2(self.project_layer(nes_x))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x,x],dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
nes_x = x[:,:self.nes_idx_inp,:,:] + x[:,self.nes_idx_inp:,:,:]
x = self.bn2(self.project_layer(nes_x))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x,x],dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class EfficientNet(nn.Module):
"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
model = EfficientNet.from_pretrained('efficientnet-b0')
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Stem
in_channels = 3 # rgb
# NOTE change first filter to be 16 to follow MOBILENETV3
# NOTE change back to 32 for efficientnet series
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# build_block = NESI2RBlock
build_block = GhostI2RBlock
# build_block = GhostI2RBlock_change_droppath_pos
# build_block = MBConvBlockV1
# build_block = I2RConvBlock
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(build_block(block_args, self._global_params))
if block_args.num_repeat > 1:
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(build_block(block_args, self._global_params))
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
# self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._dropout = self._global_params.dropout_rate
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = relu_fn(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
# x = relu_fn(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
""" Calls extract_features to extract features, applies final linear layer, and returns logits. """
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)
if self._dropout:
x = F.dropout(x, p=self._dropout, training=self.training)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
return EfficientNet(blocks_args, global_params)
@classmethod
def from_pretrained(cls, model_name, num_classes=1000):
model = EfficientNet.from_name(model_name, override_params={'num_classes': num_classes})
load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000))
return model
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):
""" Validates model name. None that pretrained weights are only available for
the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. """
num_models = 4 if also_need_pretrained_weights else 8
valid_models = ['efficientnet_b'+str(i) for i in range(num_models)] + ['i2rnet_b' + str(i) for i in range(num_models)] + ['mnext_l', 'mnext_s',' mnext_mbv2_cfg']
if model_name.replace('-','_') not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
def efficient_i2rnet(progress=None,width_mult=1, rm_1x1=None, interpolation=None, group_1x1=None):
return EfficientNet.from_name('efficientnet-b0')
# class I2RConvBlock_half_id(nn.Module):
# """
# Mobile Inverted Residual Bottleneck Block
# Args:
# block_args (namedtuple): BlockArgs, see above
# global_params (namedtuple): GlobalParam, see above
# Attributes:
# has_se (bool): Whether the block contains a Squeeze and Excitation layer.
# """
# def __init__(self, block_args, global_params):
# super().__init__()
# self._block_args = block_args
# self._bn_mom = 1 - global_params.batch_norm_momentum
# self._bn_eps = global_params.batch_norm_epsilon
# self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
# self.id_skip = block_args.id_skip # skip connection and drop connect
# # Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# # Conv2d = nn.Conv2d
# padding = self._block_args.kernel_size //2
# # Expansion phase
# inp = self._block_args.input_filters # number of input channels
# oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
# final_oup = self._block_args.output_filters
# self.inp, self.final_oup = inp, final_oup
# self.identity = False
# if oup < oup / 6.:
# oup = math.ceil(oup / 6.)
# oup = _make_divisible(oup,16)
# k = self._block_args.kernel_size
# s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# if self._block_args.expand_ratio == 2:
# self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
# self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
# stride = s, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# elif inp != final_oup and s == 1:
# self._project_conv = None
# self._expand_conv = None
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# elif inp != final_oup and s == 2:
# self._project_conv = None
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
# stride = s, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# else:
# # if inp == final_oup:
# self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
# self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# # if not (self._block_args.expand_ratio == 2):
# self.identity = True
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) # Depthwise convolution phase
# # self._depthwise_conv = Conv2d(
# # in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# # kernel_size=k, stride=s, bias=False)
# # self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# # Squeeze and Excitation layer, if desired
# if self.has_se:
# num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio))
# self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
# self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # # Output phase
# # self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# # self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# def forward(self, inputs, drop_connect_rate=None):
# """
# :param inputs: input tensor
# :param drop_connect_rate: drop connect rate (float, between 0 and 1)
# :return: output of block
# """
# # Expansion and Depthwise Convolution
# # import pdb;pdb.set_trace()
# x = inputs
# # NOTE:remove the first 3x3 conv to reduce running mem, need to verfy the performance
# if self._project_conv is not None:
# x = relu_fn(self._bn0(self._project_conv(inputs)))
# x = self._bn1(self._linear1(x))
# x = relu_fn(self._bn2(self._linear2(x)))
# if self._expand_conv is not None:
# x = self._bn3(self._expand_conv(x))
# # Squeeze and Excitation
# if self.has_se:
# x_squeezed = F.adaptive_avg_pool2d(x, 1)
# x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
# x = torch.sigmoid(x_squeezed) * x
# # Skip connection and drop connect
# input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# shape = inputs.shape
# # shape[1] = shape[1]//2
# id_tensor = torch.cat([inputs[:,:shape[1]//2,:,:],torch.zeros(shape)[:,shape[1]//2:,:,:].cuda()],dim=1)
# x = x + id_tensor
# # import pdb;pdb.set_trace()
# # x = x + inputs # skip connection
# return x
|
73696
|
from minos.cqrs import (
CommandService,
)
from minos.networks import (
Request,
Response,
ResponseException,
enroute,
)
from ..aggregates import (
PaymentAggregate,
)
class PaymentCommandService(CommandService):
"""PaymentCommandService class."""
def validate_card(self, card_number: str) -> bool:
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(card_number)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
checksum = 0
checksum += sum(odd_digits)
for d in even_digits:
checksum += sum(digits_of(d * 2))
return_value = checksum % 10
if return_value == 0:
return True
return False
@enroute.broker.command("CreatePayment")
async def create_payment(self, request: Request) -> Response:
"""Create a new ``Payment`` instance.
:param request: The ``Request`` instance.
:return: A ``Response`` instance.
"""
try:
content = await request.content()
if self.validate_card(content["card_number"]):
payment = await PaymentAggregate.create(
content["card_number"],
content["validity"],
content["security_code"],
content["name"],
content["surname"],
)
return Response({"status": "payment accepted"})
except Exception as exc:
raise ResponseException(f"An error occurred during Payment creation: {exc}")
|
73726
|
import torch
from torch.utils.data import Dataset
from random import choice
from pytorch_pretrained_bert import BertTokenizer, BertModel
max_len = 400
device = "cuda:0"
# bert_path = './pretrain/bert-base-chinese/'
# tokenizer = BertTokenizer.from_pretrained(bert_path + 'vocab.txt')
# BERT = BertModel.from_pretrained(bert_path).to(device)
# BERT.eval()
tokenizer = None
BERT = None
def text2bert(texts):
texts = [text.lower() for text in texts]
mask_loss = []
text_seqs = []
segments_ids = []
text_len = [min(max_len, 1 + len(text) + 1) for text in texts]
text_max = max(text_len)
for num, text in enumerate(texts):
text_cat = ['[CLS]'] + list(text[:(max_len - 2)]) + ['[SEP]']
text_bert = []
for c in text_cat:
if c in tokenizer.vocab:
text_bert.append(c)
else:
text_bert.append('[UNK]')
# 用于损失的mask,除了sentence其余都是0
mask_loss.append([0] + [1] * (len(text_cat) - 2) + [0] * (text_max - text_len[num] + 1))
# 输入bert
text_seq = tokenizer.convert_tokens_to_ids(text_bert) + [0] * (text_max - text_len[num])
text_seqs.append(text_seq)
segments_ids.append([0] * text_max)
text_seqs = torch.LongTensor(text_seqs).to(device)
segments_ids = torch.LongTensor(segments_ids).to(device)
# bert的mask编码
mask_bert = 1 - torch.eq(text_seqs, 0)
with torch.no_grad():
sentence_features, _ = BERT(text_seqs, segments_ids, mask_bert)
sentence_features = sentence_features[-1]
mask_loss = torch.LongTensor(mask_loss).to(device)
mask_feature = mask_loss.unsqueeze(-1).repeat(1, 1, 768)
# 最终只保留sentence的序列输出
sentence_features = torch.where(torch.eq(mask_feature, 0),
torch.zeros_like(sentence_features),
sentence_features)
return sentence_features, mask_loss
# 定义数据读取方式
class MyDataset(Dataset):
def __init__(self, dataset, subject_data, alias_data, n=1):
self.dataset = dataset
self.subject_data = subject_data
self.alias_data = alias_data
self.kb_ids = list(subject_data.keys())
self.n = n
def __getitem__(self, index):
data_one = self.dataset[index]
entity_list = data_one['entity_list']
entity_ses = []
kb_seqs = []
labels = []
for entity_info in entity_list:
kb_id, entity, s, e = entity_info
# 正样本
kb_seq = self.subject_data[kb_id]['data_seq'][:max_len]
kb_seqs.append(kb_seq)
labels.append(1)
kb_id_other_remove = [kb_id]
# 负样本
for i in range(1, self.n + 1):
# link_label == 0,当存在歧义,从歧义选0,否则任意抽一个0
kb_id_other = choice(self.kb_ids) # 抽样的效率
while kb_id_other in kb_id_other_remove:
kb_id_other = choice(self.kb_ids)
if entity in self.alias_data:
kb_id_others = list(self.alias_data[entity].keys())
if len(kb_id_others) > i:
kb_id_other = choice(kb_id_others)
while kb_id_other in kb_id_other_remove:
kb_id_other = choice(kb_id_others)
kb_seq = self.subject_data[kb_id_other]['data_seq'][:max_len]
kb_seqs.append(kb_seq)
labels.append(0)
kb_id_other_remove.append(kb_id_other)
return data_one, kb_seqs, labels, entity_ses
def __len__(self):
return len(self.dataset)
def seqs2batch(seqs):
seqs_len = [min(max_len, len(i)) for i in seqs]
seqs_max = max(seqs_len)
seqs_batch = []
for num, seq in enumerate(seqs):
seqs_batch.append(seq[:max_len] + [0] * (seqs_max - seqs_len[num]))
return seqs_batch, seqs_max
def collate_fn(batch):
# 实体识别
text_seqs_ner, text_max = seqs2batch([i[0]['text_seq'] for i in batch])
entity_starts = []
entity_ends = []
text_seqs_link = []
kb_seqs_link = []
labels_link = []
for num, i in enumerate(batch):
data_one, kb_seqs, labels, entity_ses = i
entity_start = [0] * (text_max + 2)
for j in data_one['entity_start']:
entity_start[j + 1] = 1
entity_starts.append(entity_start)
entity_end = [0] * (text_max + 2)
for j in data_one['entity_end']:
entity_end[j + 1] = 1
entity_ends.append(entity_end)
text_seqs_link += [text_seqs_ner[num]] * len(labels)
kb_seqs_link += kb_seqs
labels_link += labels
texts = [i[0]['text'] for i in batch]
text_features, mask_loss_texts = text2bert(texts)
entity_starts = torch.Tensor(entity_starts).to(device)
entity_ends = torch.Tensor(entity_ends).to(device)
return (text_features, mask_loss_texts, entity_starts, entity_ends), (text_seqs_link, kb_seqs_link, labels_link)
def deal_eval(batch):
text_seq = []
for i, j in enumerate(batch):
text_seq.append(j['text_seq'])
text_seq = torch.LongTensor(text_seq)
return text_seq
def collate_fn_link(batch):
text_seqs, text_max = seqs2batch([i[0]['text_seq'] for i in batch])
entity_starts = []
entity_ends = []
text_seqs_link = []
kb_seqs_link = []
labels_link = []
for num, i in enumerate(batch):
data_one, kb_seqs, labels, entity_ses = i
entity_start = [0] * (text_max + 2)
for j in data_one['entity_start']:
entity_start[j + 1] = 1
entity_starts.append(entity_start)
entity_end = [0] * (text_max + 2)
for j in data_one['entity_end']:
entity_end[j + 1] = 1
entity_ends.append(entity_end)
text_seqs_link += [text_seqs[num]] * len(labels)
kb_seqs_link += kb_seqs
labels_link += labels
return None, (text_seqs_link, kb_seqs_link, labels_link)
|
73728
|
import unittest
from hidden_word import checkio
class Tests(unittest.TestCase):
TESTS = {
"Basics": [
{
"input": [
"""DREAMING of apples on a wall,
And dreaming often, dear,
I dreamed that, if I counted all,
-How many would appear?""",
"ten",
],
"answer": [2, 14, 2, 16],
},
{
"input": [
"""He took his vorpal sword in hand:
Long time the manxome foe he sought--
So rested he by the Tumtum tree,
And stood awhile in thought.
And as in uffish thought he stood,
The Jabberwock, with eyes of flame,
Came whiffling through the tulgey wood,
And burbled as it came!""",
"noir",
],
"answer": [4, 16, 7, 16],
},
],
"Extra": [
{
"input": [
"""Twas brillig, and the slithy toves
Did gyre and gimble in the wabe;
All mimsy were the borogoves,
And the mome raths outgrabe.""",
"them",
],
"answer": [4, 4, 4, 7],
},
{
"input": [
"""Twas brillig, and the slithy toves
Did gyre and gimble in the wabe;
All mimsy were the borogoves,
And the mome raths outgrabe.""",
"stog",
],
"answer": [1, 19, 4, 19],
},
{
"input": [
"""One, two! One, two! And through and through
The vorpal blade went snicker-snack!
He left it dead, and with its head
He went galumphing back.""",
"back",
],
"answer": [4, 17, 4, 20],
},
{
"input": [
"""And hast thou slain the Jabberwock?
Come to my arms, my beamish boy!
O frabjous day! Callooh! Callay!'
He chortled in his joy.
'Twas brillig, and the slithy toves
Did gyre and gimble in the wabe;
All mimsy were the borogoves,
And the mome raths outgrabe.'""",
"tomy",
],
"answer": [2, 5, 2, 8],
},
{
"input": [
"""<NAME> sat on a wall:
<NAME> had a great fall.
All the King's horses and all the King's men
Couldn't put <NAME> in his place again.""",
"oast",
],
"answer": [1, 16, 4, 16],
},
{
"input": [
"""Hi all!
And all goodbye!
Of course goodbye.
or not""",
"haoo",
],
"answer": [1, 1, 4, 1],
},
{
"input": [
"""xa
xb
x""",
"ab",
],
"answer": [1, 2, 2, 2],
},
],
}
def test_Basics(self):
for i in self.TESTS['Basics']:
assert checkio(*i['input']) == i['answer']
def test_Extra(self):
for i in self.TESTS['Extra']:
assert checkio(*i['input']) == i['answer']
if __name__ == "__main__": # pragma: no cover
unittest.main()
|
73774
|
class Rect(object):
def __init__(self, cx, cy, width, height, confidence):
self.cx = cx
self.cy = cy
self.width = width
self.height = height
self.confidence = confidence
self.true_confidence = confidence
def overlaps(self, other):
if abs(self.cx - other.cx) > (self.width + other.width) / 1.5:
return False
elif abs(self.cy - other.cy) > (self.height + other.height) / 2.0:
return False
else:
return True
def distance(self, other):
return sum(
map(
abs, [
self.cx - other.cx, self.cy - other.cy, self.width -
other.width, self.height - other.height
]
)
)
def intersection(self, other):
left = max(self.cx - self.width / 2., other.cx - other.width / 2.)
right = min(self.cx + self.width / 2., other.cx + other.width / 2.)
width = max(right - left, 0)
top = max(self.cy - self.height / 2., other.cy - other.height / 2.)
bottom = min(self.cy + self.height / 2., other.cy + other.height / 2.)
height = max(bottom - top, 0)
return width * height
def area(self):
return self.height * self.width
def union(self, other):
return self.area() + other.area() - self.intersection(other)
def iou(self, other):
return self.intersection(other) / self.union(other)
def __eq__(self, other):
return (
self.cx == other.cx and self.cy == other.cy and
self.width == other.width and self.height == other.height and
self.confidence == other.confidence
)
|
73784
|
from .ReportDaily import *
# Find personal repositories that nonowners are pushing to.
# These repositories should be moved into organizations.
# Only look at active users (not suspended!) and only look at pushes
# of the last 4 weeks.
class ReportReposPersonalNonOwnerPushes(ReportDaily):
def name(self):
return "repositories-personal-nonowner-pushes"
def updateDailyData(self):
self.detailedHeader, self.detailedData = self.parseData(self.executeQuery(self.query()))
self.header = ["date", "personal repositories with nonowner pushes"]
self.data.append([str(self.yesterday()), len(self.detailedData)])
self.truncateData(self.timeRangeTotal())
self.sortDataByDate()
def query(self):
fourWeeksAgo = self.daysAgo(28)
return '''
SELECT
CONCAT(users.login, "/", repositories.name) as "repository",
COUNT(DISTINCT(pushes.pusher_id)) as "nonowner pushers"
FROM
repositories
JOIN users ON repositories.owner_id = users.id
JOIN pushes ON pushes.repository_id = repositories.id
WHERE
users.type = "user"
AND users.suspended_at IS NULL
AND CAST(pushes.created_at AS DATE) BETWEEN
"''' + str(fourWeeksAgo) + '''" AND "''' + str(self.yesterday()) + '''"
AND pushes.pusher_id != users.id
GROUP BY
repositories.id
ORDER BY
2 DESC, 1'''
|
73788
|
import pandas as pd
import numpy as np
import re
import nltk
from utils.utils import *
import time
from ql_score import ql_score
import pickle
class QL:
alpha = 0.5
mu=1500.
_inverted_index = {}
# data_root = './'
# _term_stats_path = data_root + 'clueweb_stats/term_stats.pkl'
# _term_stats_porter_path = data_root + 'clueweb_stats/term_stats.porter.pkl'
# _term_stats_krovetz_path = data_root + 'clueweb_stats/term_stats.krovetz.pkl'
# _doc_stats_path = data_root + 'clueweb_stats/doc_lengths'
# _index_path = data_root + 'data/topic_indexes/{}.pkl'
_mean_doc_len = 770.4786222801615
_total_docs = 33836981
_total_terms = 0
def __init__(self, do_stemming, do_stopword_removal, data_root = './', load_stats=True):
self.do_stemming = do_stemming
self.do_stopword_removal = do_stopword_removal
self.data_root = data_root
self._stopwords = nltk.corpus.stopwords.words('english')
self._term_stats_path = self.data_root + 'clueweb_stats/term_stats.pkl'
self._term_stats_porter_path = self.data_root + 'clueweb_stats/term_stats.porter.pkl'
self._term_stats_krovetz_path = self.data_root + 'clueweb_stats/term_stats.krovetz.pkl'
self._doc_stats_path = self.data_root + 'clueweb_stats/doc_lengths'
self._index_path = self.data_root + 'topic_indexes/{}.pkl'
if load_stats and self.do_stemming:
self._term_stats = pd.read_pickle(self._term_stats_krovetz_path)[1].to_dict()
elif load_stats:
self._term_stats = pd.read_pickle(self._term_stats_path)[1].to_dict()
for k in self._term_stats:
self._total_terms += self._term_stats[k]
if self.do_stopword_removal:
for stopw in self._stopwords:
self._total_terms -= self._term_stats[stopw] if stopw in self._term_stats else 0
def _stopword_removal(self, tokens):
return [word for word in tokens if word not in self._stopwords]
def load_doc_stats(self):
doc_lengths = pd.read_csv(self._doc_stats_path, sep='\t', header=None)
self._mean_doc_len = doc_lengths[2].mean()
self._total_docs = len(doc_lengths.index)
def load_topic_index(self, topic_id):
with open(self._index_path.format(topic_id), 'rb') as inp:
self._inverted_index = pickle.load(inp)
if self.do_stopword_removal:
for doc in self._inverted_index:
for stopw in self._stopwords:
if stopw in self._inverted_index[doc]['terms']:
self._inverted_index[doc]['length'] -= self._inverted_index[doc]['terms'][stopw]
def update_query_lang_model(self, query, question, answer):
output = {}
query_tokens, qlen = self._preprocess(query)
if type(question) == str:
other_tokens, other_len = self._preprocess(question + ' ' + answer)
else:
other_tokens, other_len = self._preprocess(question + answer)
# answer_tokens, ans_len = self._preprocess(answer)
all_tokens = set(list(query_tokens.keys()) + list(other_tokens.keys()))
for t in all_tokens:
try:
qfreq = float(query_tokens[t]) / qlen
except KeyError:
qfreq = 0
try:
qafreq = float(other_tokens[t]) / other_len
except KeyError:
qafreq = 0
output[t] = self.alpha * qfreq + (1 - self.alpha) * qafreq
# print(t, output[t])
self._query_lm = output
def _preprocess(self, text):
if type(text) == str:
if self.do_stemming:
text_tokens = tokenize_and_stem(text)
else:
text_tokens = tokenize_only(text)
if self.do_stopword_removal:
text_tokens = self._stopword_removal(text_tokens)
else:
text_tokens = text
output = dict()
for t in text_tokens:
if t not in output:
output[t] = 0.
output[t] += 1.
return output, len(text_tokens)
def _add_doc_to_inverted_index_if_not_existing(self, document_id, document):
if document_id not in self._inverted_index:
document_tokens, length = self._preprocess(document)
self._inverted_index[document_id] = {'terms': document_tokens,
'length': length}
# try:
# # print(document_tokens)
# self._inverted_index[document_id]['terms'] = document_tokens
# except KeyError:
# self._inverted_index[document_id]['terms'] = {}
# self._inverted_index[document_id]['length'] = len(document_tokens)
def get_result_list(self):
output = []
for doc_id in self._inverted_index:
output.append((doc_id, self.get_interpolated_score(doc_id)))
return output
def get_result_df(self, topk, query_id):
df = pd.DataFrame(self.get_result_list()).sort_values(1, ascending=False).head(topk)
df['record_id'] = query_id
return df
def get_interpolated_score(self, document_id):
doc_inf = self._inverted_index[document_id]
doc_len = doc_inf['length']
score = 0.
for t in self._query_lm:
try:
dfreq = doc_inf['terms'][t]
except KeyError:
dfreq = 0
try:
nq = self._term_stats[t]
except KeyError:
nq = 0.
# qafreq = float(other_tokens.count(t)) / len(other_tokens)
# # print(t, qfreq, qafreq)
# # q_score = self.alpha * qfreq + (1 - self.alpha) * qafreq
# # print('qscore', q_score)
# d_score = float(dfreq) / (self.mu + doc_len)
# d_score += (self.mu / (self.mu + doc_len)) * (float(nq) / self._total_terms)
# # print('dscore',d_score)
# if d_score > 0:
# score += q_score * np.log(d_score)
# else:
# print('This terms returns zero document frequency: ', t) nq = 0
q_score = self._query_lm[t] #float(query_tokens.count(t)) / len(query_tokens)
#
score += ql_score.ql_score_f(q_score, dfreq, self.mu, doc_len, nq, self._total_terms)
# qafreq = float(other_tokens.count(t)) / len(other_tokens)
# # print(t, qfreq, qafreq)
# # q_score = self.alpha * qfreq + (1 - self.alpha) * qafreq
# # print('qscore', q_score)
# print(dfreq,self.mu, doc_len,nq, self._total_terms)
# #old
# d_score = float(dfreq) / (self.mu + doc_len)
# d_score += (self.mu / (self.mu + doc_len)) * (float(nq) / self._total_terms)
# d_score = (float(dfreq) + (self.mu *(float(nq)/self._total_terms))/(doc_len+)
# # print('dscore',d_score)
# print(d_score)
# if d_score > 0:
# score += q_score * np.log(d_score)
# else:
# print('This terms returns zero document frequency: ', t)
return score
|
73789
|
from buidl.bech32 import (
cbor_encode,
cbor_decode,
bc32encode,
bc32decode,
uses_only_bech32_chars,
)
from buidl.helper import is_intable
from binascii import a2b_base64, b2a_base64
from math import ceil
import hashlib
class BCURStringFormatError(RuntimeError):
pass
def bcur_encode(data):
"""Returns bcur encoded string and checksum"""
cbor = cbor_encode(data)
enc = bc32encode(cbor)
h = hashlib.sha256(cbor).digest()
enc_hash = bc32encode(h)
return enc, enc_hash
def bcur_decode(data, checksum=None):
"""Returns decoded data, verifies checksum if provided"""
cbor = bc32decode(data)
if checksum is not None:
h = bc32decode(checksum)
calculated_digest = hashlib.sha256(cbor).digest()
if h != calculated_digest:
raise ValueError(f"Calculated digest {calculated_digest} != {h}")
return cbor_decode(cbor)
def _parse_bcur_helper(bcur_string):
"""
This parses a bcur string and returns the following (or raises an error):
payload, checksum, x, y
Notes:
- Works for both BCURSingle and BCURMulti.
- All entries may be empty except for payload.
- Checksums are not validated here, as checksum validation is different for single vs multi.
"""
if type(bcur_string) is not str:
raise BCURStringFormatError(
f"{bcur_string} is of type {type(bcur_string)}, not a string"
)
string = bcur_string.lower().strip()
if not string.startswith("ur:bytes/"):
raise BCURStringFormatError(f"String {string} doesn't start with ur:bytes/")
bcur_parts = string.split("/")
if len(bcur_parts) == 2:
# Non-animated QR code (just 1 qr, doesn't display 1of1 nor checksum)
_, payload = bcur_parts
checksum, x_int, y_int = None, 1, 1
elif len(bcur_parts) == 3:
# Non-animated QR code (just 1 qr, doesn't display 1of1 but does have checksum)
_, checksum, payload = bcur_parts
x_int, y_int = 1, 1
elif len(bcur_parts) == 4:
# Animated QR code
_, xofy, checksum, payload = bcur_parts
xofy_parts = xofy.split("of")
if len(xofy_parts) != 2:
raise BCURStringFormatError(f"x-of-y section malformed: {xofy_parts}")
if not is_intable(xofy_parts[0]) or not is_intable(xofy_parts[1]):
raise BCURStringFormatError(
f"x and y (in x-of-y) must both be integers: {xofy_parts}"
)
x_int = int(xofy_parts[0])
y_int = int(xofy_parts[1])
if x_int > y_int:
raise BCURStringFormatError("x must be >= y (in x-of-y): {xofy_parts}")
else:
raise BCURStringFormatError(f"{string} doesn't have 2-4 slashes")
if checksum:
if len(checksum) != 58:
raise BCURStringFormatError("Checksum must be 58 chars")
if not uses_only_bech32_chars(checksum):
raise BCURStringFormatError(
f"checksum can only contain bech32 characters: {checksum}"
)
if not uses_only_bech32_chars(payload):
raise BCURStringFormatError(
f"Payload can only contain bech32 characters: {payload}"
)
return payload, checksum, x_int, y_int
class BCURSingle:
def __init__(self, text_b64, encoded=None, checksum=None):
binary_b64 = a2b_base64(text_b64)
enc, enc_hash = bcur_encode(data=binary_b64)
if encoded and encoded != enc:
raise ValueError(f"Calculated encoding {enc} != {encoded}")
if checksum and checksum != enc_hash:
raise ValueError(f"Calculated checksum {enc_hash} != {checksum}")
self.text_b64 = text_b64
self.encoded = enc
self.enc_hash = enc_hash
def __repr__(self):
return self.encode()
def encode(self, use_checksum=True):
# Single QR, no x-of-y
if use_checksum:
return f"ur:bytes/{self.enc_hash}/{self.encoded}"
else:
return f"ur:bytes/{self.encoded}"
@classmethod
def parse(cls, to_parse):
"""Parses (decodes) a BCURSingle from a single BCUR string"""
payload, checksum, x, y = _parse_bcur_helper(bcur_string=to_parse)
if x != 1 or y != 1:
raise BCURStringFormatError(
f"BCURSingle must have x=1 and y=1, instead got x={x} and y={y}"
)
# will throw an error if checksum is incorrect
enc = bcur_decode(data=payload, checksum=checksum)
return cls(
text_b64=b2a_base64(enc).strip().decode(),
encoded=payload,
checksum=checksum,
)
class BCURMulti:
def __init__(self, text_b64, encoded=None, checksum=None):
binary_b64 = a2b_base64(text_b64)
enc, enc_hash = bcur_encode(data=binary_b64)
if encoded and encoded != enc:
raise ValueError(f"Calculated encoding {enc} != {encoded}")
if checksum and checksum != enc_hash:
raise ValueError(f"Calculated checksum {enc_hash} != {checksum}")
self.checksum = checksum
self.encoded = enc
self.text_b64 = text_b64
self.enc_hash = enc_hash
def __repr__(self):
return f"bcur: {self.checksum}\n{self.text_b64}\n"
def encode(self, max_size_per_chunk=300, animate=True):
"""
Take some base64 text (i.e. a PSBT string) and encode it into multiple QR codes using Blockchain Commons Uniform Resources.
If animate=False, then max_size_per_chunk is ignored and this returns a 1of1 with checksum.
Use parse() to return a BCURMulti object from this encoded result.
This algorithm makes all the chunks of about equal length.
This makes sure that the last chunk is not (too) different in size which is visually noticeable when animation occurs
Inspired by this JS implementation:
https://github.com/cryptoadvance/specter-desktop/blob/da35e7d88072475746077432710c77f799017eb0/src/cryptoadvance/specter/templates/includes/qr-code.html
"""
if animate is False:
number_of_chunks = 1
else:
number_of_chunks = ceil(len(self.encoded) / max_size_per_chunk)
chunk_length = ceil(len(self.encoded) / number_of_chunks)
# For number_of_chunks == 1 (with no checksum) use BCURSingle
resulting_chunks = []
for cnt in range(number_of_chunks):
start_idx = cnt * chunk_length
finish_idx = (cnt + 1) * chunk_length
resulting_chunks.append(
f"ur:bytes/{cnt+1}of{number_of_chunks}/{self.enc_hash}/{self.encoded[start_idx:finish_idx]}"
)
return resulting_chunks
@classmethod
def parse(cls, to_parse):
"""Parses a BCURMulti from a list of BCUR strings"""
if type(to_parse) not in (list, tuple):
raise BCURStringFormatError(
f"{to_parse} is of type {type(to_parse)}, not a list/tuple"
)
payloads = []
global_checksum, global_y = "", 0
for cnt, bcur_string in enumerate(to_parse):
entry_payload, entry_checksum, entry_x, entry_y = _parse_bcur_helper(
bcur_string=bcur_string
)
if cnt + 1 != entry_x:
raise ValueError(
f"BCUR strings not in order: got {entry_x} and was expecting {cnt+1}"
)
# Initialize checksum and y (as in x-of-y) on first loop
if cnt == 0:
global_checksum = entry_checksum
global_y = entry_y
elif entry_checksum != global_checksum:
raise ValueError(
f"Entry {bcur_string} has checksum {entry_checksum} but we're expecting {global_checksum}"
)
elif entry_y != global_y:
raise ValueError(
f"Entry {bcur_string} wants {entry_y} parts but we're expecting {global_y} parts"
)
# All checks pass
payloads.append(entry_payload)
# will throw an error if checksum is incorrect
enc = bcur_decode(data="".join(payloads), checksum=global_checksum)
return cls(text_b64=b2a_base64(enc).strip().decode(), checksum=global_checksum)
|
73795
|
from datetime import datetime
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app import crud
from app.core.security import verify_password
from app.models.domain import Domain
from app.models.event import Event
from app.models.user import User
from app.schemas.user import UserCreate, UserUpdate
from app.tests.utils.domain import create_random_domain
from app.tests.utils.event import create_random_page_view_event
from app.tests.utils.user import create_random_user
from app.tests.utils.utils import random_email, random_lower_string
def test_create_user(db: Session) -> None:
email = random_email()
password = <PASSWORD>()
user_in = UserCreate(email=email, password=password)
user = crud.user.create(db, obj_in=user_in)
assert user.email == email
assert hasattr(user, "hashed_password")
def test_authenticate_user(db: Session) -> None:
email = random_email()
password = <PASSWORD>()
user_in = UserCreate(email=email, password=password)
user = crud.user.create(db, obj_in=user_in)
authenticated_user = crud.user.authenticate(db, email=email, password=password)
assert authenticated_user
assert user.email == authenticated_user.email
def test_not_authenticate_user(db: Session) -> None:
email = random_email()
password = <PASSWORD>()
user = crud.user.authenticate(db, email=email, password=password)
assert user is None
def test_check_if_user_is_active(db: Session) -> None:
email = random_email()
password = <PASSWORD>()
user_in = UserCreate(email=email, password=password)
user = crud.user.create(db, obj_in=user_in)
is_active = crud.user.is_active(user)
assert is_active is True
def test_check_if_user_is_active_inactive(db: Session) -> None:
email = random_email()
password = <PASSWORD>_<PASSWORD>()
user_in = UserCreate(email=email, password=password, disabled=True)
user = crud.user.create(db, obj_in=user_in)
is_active = crud.user.is_active(user)
assert is_active
def test_check_if_user_is_superuser(db: Session) -> None:
email = random_email()
password = <PASSWORD>_<PASSWORD>()
user_in = UserCreate(email=email, password=password, is_superuser=True)
user = crud.user.create(db, obj_in=user_in)
is_superuser = crud.user.is_superuser(user)
assert is_superuser is True
def test_check_if_user_is_superuser_normal_user(db: Session) -> None:
username = random_email()
password = <PASSWORD>()
user_in = UserCreate(email=username, password=password)
user = crud.user.create(db, obj_in=user_in)
is_superuser = crud.user.is_superuser(user)
assert is_superuser is False
def test_get_user(db: Session) -> None:
password = <PASSWORD>()
username = random_email()
user_in = UserCreate(email=username, password=password, is_superuser=True)
user = crud.user.create(db, obj_in=user_in)
user_2 = crud.user.get(db, id=user.id)
assert user_2
assert user.email == user_2.email
assert jsonable_encoder(user) == jsonable_encoder(user_2)
def test_update_user(db: Session) -> None:
password = <PASSWORD>()
email = random_email()
user_in = UserCreate(email=email, password=password, is_superuser=True)
user = crud.user.create(db, obj_in=user_in)
new_password = <PASSWORD>()
user_in_update = UserUpdate(password=<PASSWORD>, is_superuser=True)
crud.user.update(db, db_obj=user, obj_in=user_in_update)
user_2 = crud.user.get(db, id=user.id)
assert user_2
assert user.email == user_2.email
assert verify_password(new_password, user_2.hashed_password)
def test_mark_deletion(db: Session):
user = create_random_user(db)
assert not user.delete_at
crud.user.mark_for_removal(db, user)
assert user.delete_at
def test_delete_pending_users(db: Session):
user = create_random_user(db)
domain = create_random_domain(db, owner_id=user.id)
event_id = create_random_page_view_event(db, domain=domain).id
user2 = create_random_user(db)
domain2 = create_random_domain(db, owner_id=user2.id)
event2_id = create_random_page_view_event(db, domain=domain2).id
domain_id = domain.id
user_id = user.id
user.delete_at = datetime.now()
db.commit()
crud.user.delete_pending_users(db)
assert not db.query(Domain).get(domain_id)
assert not db.query(User).get(user_id)
assert not db.query(Event).filter(Event.id == event_id).scalar()
assert db.query(Event).filter(Event.id == event2_id).scalar()
assert db.query(Domain).get(domain2.id)
assert db.query(User).get(user2.id)
|
73823
|
import time
from glob import glob
import subprocess
import os
from odyssey import run_signal_stem, slurm_fname, temp_dir, jobdir
if __name__ == "__main__":
print "Monitoring slurm jobs in {0}".format(os.getcwd())
while True:
for fname in glob(run_signal_stem + "*"):
jobname = fname[len(run_signal_stem):]
print "Launching job {0}".format(jobname)
with temp_dir(jobdir(jobname)):
subprocess.call(["sbatch", slurm_fname])
os.remove(fname)
time.sleep(2)
|
73825
|
import unittest
from unittest import mock
from printy.exceptions import InvalidFlag, InvalidInputType
from printy.core import Printy, WINDOWS
from printy.flags import Flags
class TestGlobalFlagsPrinty(unittest.TestCase):
""" Test case for formatting with a global set of flags specified """
def setUp(self):
self.sample_text = "Some Text To Print Out"
self.printy = Printy()
self.raw_text = self.printy.get_formatted_text
self.esc = self.printy.escape
def test_empty_value(self):
""" Tests passing an empty value print's nothing"""
text = ''
result = self.raw_text(text)
self.assertEqual(result, text)
def test_empty_value_with_flags(self):
"""
Tests that passing and empty value with some flags returns the
escape ansi characters
"""
text = ''
flags = 'rBH'
result = self.raw_text(text, flags)
expected_result = "%s%s" % (
Flags.join_flags(Flags.get_flag_values(flags)),
Flags.get_end_of_line()
)
self.assertEqual(result, expected_result)
def test_single_invalid_flag(self):
"""
Tests that passing an invalid flag (only one)
raises and exception
"""
invalid_flag = 'P'
with self.assertRaises(InvalidFlag):
self.printy.format(self.sample_text, invalid_flag)
def test_multiple_invalid_flag(self):
"""
Tests that passing multiple invalid flags raises an
exception with the first invalid flag found
"""
# P and G are invalid, should raise InvalidFlag
# with 'P' as invalid flag
flags = 'yBPGr'
with self.assertRaises(InvalidFlag) as e:
self.printy.format(self.sample_text, flags)
self.assertEqual(e.exception.flag, 'P')
def test_high_intensity_flag_color(self):
"""
Checks the correct format is returned for a high
intensity (>) flag color
"""
flag = 'p>'
text = 'Hello'
expected_text = '\x1b[38;5;98mHello\x1b[0m'
self.assertEqual(self.raw_text(text, flag), expected_text)
def test_low_intensity_flag_color(self):
"""
Checks the correct format is returned for a low
intensity (<) flag color
"""
flag = '<p'
text = 'Hello'
expected_text = '\x1b[38;5;54mHello\x1b[0m'
self.assertEqual(self.raw_text(text, flag), expected_text)
def tests_always_closing_format(self):
"""
Tests that the returned text is always ended with the closing tag
"""
result = self.raw_text(self.sample_text, 'r')
closing_tag = result[-4:]
self.assertEqual(len(closing_tag), 4)
self.assertEqual(closing_tag, Flags.get_end_of_line())
def test_no_flag_parameter_passed(self):
"""
Tests that passing no flag parameter will return a default value
"""
result = self.raw_text(self.sample_text)
self.assertEqual(result, self.sample_text)
def test_empty_flag(self):
"""
Test that passing and empty string as a flag still returns the
default value
"""
result = self.raw_text(self.sample_text, '')
self.assertEqual(result, self.sample_text)
def test_flags_with_spaces_in_between(self):
"""
Tests that passing a set of flags with some spaces in between
(like 'yB H U') still applies the desired formats
"""
desired_flags = 'yBH'
flags_with_one_space = 'yB H'
flags_with_multiple_spaces = 'y B H'
result_one = self.raw_text(self.sample_text, desired_flags)
result_two = self.raw_text(self.sample_text, flags_with_one_space)
result_three = self.raw_text(self.sample_text, flags_with_multiple_spaces)
self.assertTrue(result_one == result_two == result_three)
def test_escape_with_global_flags(self):
"""
Test escaping values with global flags
"""
text = '[n]escaped@'
expected_text = '\x1b[38;5;196m[n]escaped@\x1b[0m'
result = self.raw_text(self.esc(text), 'r')
self.assertEqual(result, expected_text)
@mock.patch('printy.core.Printy.set_windows_console_mode', return_value=True)
def test_virtual_terminal_processing_on_windows(self, mock_console_mode):
"""
Tests that if platform is windows, then then returns True
"""
self.printy.platform = WINDOWS
virtual_terminal_processing = mock_console_mode()
self.assertTrue(virtual_terminal_processing)
def test_return_cleaned_value_if_windows_is_not_properly_configured(self):
"""
Tests that if printy virtual_console_mode is false, then it returns the
cleaned_text
"""
flags = 'rBH'
# Changes platform to Windows
self.printy.platform = WINDOWS
self.printy.virtual_terminal_processing = False
result_one = self.raw_text(self.sample_text, flags)
self.assertEqual(result_one, self.sample_text)
def test_background_color_with_global_flags(self):
"""
Test backgroun color with global flags
"""
flags = 'yB{o}'
text = 'Hello'
expected_text = '\x1b[48;5;208;38;5;11;1mHello\x1b[0m'
self.assertEqual(self.raw_text(text, flags), expected_text)
def test_background_color_no_flag_with_global_flags(self):
"""
Test backgroun color with no flag for it, with global flags
"""
flags = 'yB{}'
text = 'Hello'
expected_text = '\x1b[38;5;11;1mHello\x1b[0m'
self.assertEqual(self.raw_text(text, flags), expected_text)
class TestInlineFlagsPrinty(unittest.TestCase):
""" Test case for inline formatting """
def setUp(self):
self.printy = Printy()
self.raw_text = self.printy.get_formatted_text
self.esc = self.printy.escape
def test_inline_format_with_global_flags(self):
"""
Tests that passing a text with inline formatting and also a global
set of flags takes this last one as the format to be applied
"""
inline_formatted = "[y]Hey you@"
no_format = 'Hey you'
global_flags = 'rB'
result_one = self.raw_text(inline_formatted, global_flags)
result_two = self.raw_text(no_format, global_flags)
self.assertEqual(result_one, result_two)
def test_inline_format_without_ending_format_character(self):
"""
Tests that passing an inline formatted text without the ending
formatting character still returns the formatted text
"""
result_one = self.raw_text('[y]Hey you')
result_two = self.raw_text('[y]Hey you@')
self.assertEqual(result_one, result_two)
def test_escape_special_characters(self):
""" Tests that escaping special characters prints them out """
inline_text_one = '[y]<EMAIL>@'
global_text_one = '<EMAIL>', 'y'
inline_text_two = '[bH]Some text \@@'
global_text_two = 'Some text @', 'bH'
inline_result_one = self.raw_text(inline_text_one)
global_result_one = self.raw_text(global_text_one[0], global_text_one[1])
inline_result_two = self.raw_text(inline_text_two)
global_result_two = self.raw_text(global_text_two[0], global_text_two[1])
self.assertEqual(inline_result_one, global_result_one)
self.assertEqual(inline_result_two, global_result_two)
def test_multiple_sections(self):
""" Test that formats are applied correctly to each section """
section_one = "Some"
section_two = ' '
section_three = 'text'
global_format_one = self.raw_text(section_one, 'rB')
global_format_two = self.raw_text(section_two)
global_format_three = self.raw_text(section_three, 'y')
joined_global_format = global_format_one + global_format_two + global_format_three
inline_text = '[rB]Some@ [y]text@'
inline_format = self.raw_text(inline_text)
self.assertEqual(inline_format, joined_global_format)
def test_read_file(self):
""" Test retrieving the text from a file """
text_in_file = 'printy'
file_name = 'printy_file'
with mock.patch('builtins.open', mock.mock_open(read_data=text_in_file)) as m:
result = self.printy.read_file(file_name)
m.assert_called_once_with(file_name)
self.assertEqual(result, text_in_file)
def test_escape_special_chars_method(self):
"""
Test escaping especial characters correctly, this method is used when
an object other than a string is passed
"""
text_to_escape = '[some text @ ]'
expected_value = '\[some text \@ \]'
escaped_text = Printy._escape_special_chars(text_to_escape)
self.assertEqual(expected_value, escaped_text)
def test_pretty_print_dicts(self):
""" Test pretty printing dictionaries """
dict_to_print = {'name': '<NAME>', 'age': 34}
expected_result = '{\n [n>]\'name\'@: [c>]\'<NAME>\'@[<oB],@\n [n>]\'age\'@: [c]34@[<oB],@\n}'
pretty_dict = Printy._repr_value(dict_to_print)
self.assertEqual(expected_result, pretty_dict)
def test_pretty_print_lists(self):
""" Test pretty printing lists """
list_to_print = [1, 2, 'hello']
expected_result = '\[\n [c]1@[<oB],@ [c]2@[<oB],@ [c>]\'hello\'@\n\]'
pretty_list = Printy._repr_value(list_to_print)
self.assertEqual(expected_result, pretty_list)
def test_pretty_printy_tuples(self):
""" Test pretty printing tuples """
tuple_to_print = (1, 2, 'hello')
expected_result = '(\n [c]1@[<oB],@ [c]2@[<oB],@ [c>]\'hello\'@\n)'
pretty_tuple = Printy._repr_value(tuple_to_print)
self.assertEqual(expected_result, pretty_tuple)
def test_pretty_printy_sets(self):
""" Test pretty printing sets """
set_to_print = {1, 2, 'hello'}
expected_result = '{\n [c]1@[<oB],@ [c]2@[<oB],@ [c>]\'hello\'@\n}'
pretty_set = Printy._repr_value(set_to_print)
self.assertEqual(expected_result, pretty_set)
def test_pretty_printy_dict_pretty_false(self):
""" Tests pretty printing a dict when 'pretty' parameter is set to False """
dict_to_print = {'name': '<NAME>', 'age': 34}
expected_result = '{\'name\': \'<NAME>\', \'age\': 34}'
not_pretty_dict = Printy._repr_value(dict_to_print, pretty=False)
self.assertEqual(expected_result, not_pretty_dict)
def test_pretty_printy_list_pretty_false(self):
""" Tests pretty printing a list when 'pretty' parameter is set to False """
list_to_print = [1, 2, 'hello']
expected_result = '\[1, 2, \'hello\'\]'
not_pretty_list = Printy._repr_value(list_to_print, pretty=False)
self.assertEqual(expected_result, not_pretty_list)
def test_pretty_printy_tuple_pretty_false(self):
""" Tests pretty printing a tuple when 'pretty' parameter is set to False """
tuple_to_print = (1, 2, 'hello')
expected_result = '(1, 2, \'hello\')'
not_pretty_tuple = Printy._repr_value(tuple_to_print, pretty=False)
self.assertEqual(expected_result, not_pretty_tuple)
def test_pretty_printy_set_pretty_false(self):
""" Tests pretty printing a set when 'pretty' parameter is set to False """
set_to_print = {1, 2, 'hello'}
expected_result = '{1, 2, \'hello\'}'
not_pretty_set = Printy._repr_value(set_to_print, pretty=False)
self.assertEqual(expected_result, not_pretty_set)
def test_pretty_print_str_method_of_objects(self):
""" Test printing the str method of an object, both not defined and defined """
builtin_obj = int
expected_builtin_result = '<class \'int\'>'
pretty_builtin = Printy._repr_value(builtin_obj)
class Person:
def __str__(self):
return '[c]I am a person@'
custom_str = Person()
# Notice how it should not return the escaped character
expected_custom_result = '[c]I am a person@'
pretty_custom = Printy._repr_value(custom_str)
self.assertEqual(expected_builtin_result, pretty_builtin)
self.assertEqual(expected_custom_result, pretty_custom)
def test_pretty_object_in_dictionary(self):
"""
Test pretty printing an str method of an object inside a dictionary
or any iterable, it should give it a light magenta color
"""
dict_to_print = {'class': int}
expected_result = '{\n [n>]\'class\'@: <class \'int\'>[<oB],@\n}'
pretty_dict = Printy._repr_value(dict_to_print)
self.assertEqual(expected_result, pretty_dict)
def test_pretty_custom_str_method_in_dictionary(self):
class CustomStrMethod:
def __str__(self):
return '[rBU]Red Bold Underlined@ and [y]Yellow@'
dict_to_print = {'str': CustomStrMethod()}
expected_result = '{\n [n>]\'str\'@: [rBU]Red Bold Underlined@ and [y]Yellow@[<oB],@\n}'
pretty_dict = Printy._repr_value(dict_to_print)
self.assertEqual(expected_result, pretty_dict)
def test_print_number(self):
integer_to_print = 123
float_to_print = 123.45
expected_result_integer = '[c]123@'
expected_result_float = '[c]123.45@'
result_integer = Printy._repr_value(integer_to_print)
result_float = Printy._repr_value(float_to_print)
self.assertEqual(expected_result_integer, result_integer)
self.assertEqual(expected_result_float, result_float)
def test_print_boolean(self):
expected_false = '[<o]False@'
expected_true = '[<o]True@'
result_false = Printy._repr_value(False)
result_true = Printy._repr_value(True)
self.assertEqual(expected_false, result_false)
self.assertEqual(expected_true, result_true)
def test_print_none(self):
expected_none = '[<o]None@'
result_none = Printy._repr_value(None)
self.assertEqual(expected_none, result_none)
def test_escape_with_inline_flags(self):
"""
Test escaping values on inline formats
"""
email = '<EMAIL>'
expected_text = '\x1b[38;5;28<EMAIL>\x1b[0m'
result = self.raw_text(f'[n]{self.esc(email)}@')
self.assertEqual(result, expected_text)
def test_background_color_with_inline_flags(self):
"""
Test backgroun color with inline flags
"""
text = '[yB{o}]Hello@'
expected_text = '\x1b[48;5;208;38;5;11;1mHello\x1b[0m'
self.assertEqual(self.raw_text(text), expected_text)
def test_background_color_no_flag_with_global_flags(self):
"""
Test backgroun color with no flag for it, with global flags
"""
text = '[yB{}]Hello@'
expected_text = '\x1b[38;5;11;1mHello\x1b[0m'
self.assertEqual(self.raw_text(text), expected_text)
class TestInputy(unittest.TestCase):
"""
Test case for inputy functionality
Here, it is not necessary to test whether the prompted message has the
correct format because it uses the methods already tested in the Printy
test cases
"""
def setUp(self):
self.inputy = Printy()
str_valid_test = "Valid String"
int_valid_test = 23
float_valid_test = 45.6
bool_valid_test = False
def test_normalize_options_not_enough_options_raises_value_error(self):
"""
Tests that passing a list with less than 2 items as 'options' raises
a Value Error when input_type is different than 'bool'
"""
with self.assertRaises(ValueError):
self.inputy._normalize_options([], self.inputy.STR)
def test_normalize_options_bool_more_than_two_items_get_the_first_two(self):
"""
Tests that passing more than 2 items as options for input_type 'bool'
returns only the first two
"""
options = ['yes', 'no', 'Yep', 'Nope']
normalized_options = self.inputy._normalize_options(options, self.inputy.BOOL)
expected_options = {'1': 'yes', '2': 'no'}
self.assertEqual(normalized_options, expected_options)
def test_normalize_options_bool_less_than_two_items_returns_tru_false(self):
"""
Tests that passing more than 2 items as options for input_type 'bool'
returns only the first two
"""
options = ['yes']
normalized_options = self.inputy._normalize_options(options, self.inputy.BOOL)
expected_options = {'1': 'True', '2': 'False'}
self.assertEqual(normalized_options, expected_options)
def test_check_boolean_case_insensitive(self):
"""
Test that passing a different case for one of the options still
returns the value
"""
options = {'1': 'y', '2': 'n'}
value = 'Y'
result, valid = self.inputy.check_boolean(value, options, 'i')
self.assertEqual(result, True)
self.assertEqual(valid, True)
def test_check_boolean_case_sensitive(self):
"""
Test that passing a different case for one of the options returns
an invalid value
"""
options = {'1': 'y', '2': 'n'}
value = 'Y'
result, valid = self.inputy.check_boolean(value, options, '')
self.assertEqual(result, False)
self.assertEqual(valid, False)
def test_check_integer_no_condition(self):
"""
Tests that passing no condition to check an integer returns the value
"""
positive_value = 5
negative_value = -5
condition = ''
result_positive, valid_positive = self.inputy.check_integer(str(positive_value), condition)
result_negative, valid_negative = self.inputy.check_integer(str(negative_value), condition)
self.assertTrue(valid_positive)
self.assertTrue(valid_negative)
self.assertEqual(result_positive, 5)
self.assertEqual(result_negative, -5)
def test_check_integer_condition_only_positive(self):
"""
Test that passing a condition '-' to the check_integer function will
return a valid value only when the value is negative
"""
positive_value = 5
negative_value = -5
condition = '-'
result_positive, valid_positive = self.inputy.check_integer(str(positive_value), condition)
result_negative, valid_negative = self.inputy.check_integer(str(negative_value), condition)
self.assertFalse(valid_positive)
self.assertTrue(valid_negative)
self.assertEqual(result_positive, 5)
self.assertEqual(result_negative, -5)
def test_check_integer_condition_only_negative(self):
"""
Test that passing a condition '-' to the check_integer function will
return a valid value only when the value is negative
"""
positive_value = 5
negative_value = -5
condition = '+'
result_positive, valid_positive = self.inputy.check_integer(str(positive_value), condition)
result_negative, valid_negative = self.inputy.check_integer(str(negative_value), condition)
self.assertTrue(valid_positive)
self.assertFalse(valid_negative)
self.assertEqual(result_positive, 5)
self.assertEqual(result_negative, -5)
def test_check_float_no_condition(self):
"""
Tests that passing no condition to check a float returns the value
"""
positive_value = 5.0
negative_value = -5.0
condition = ''
result_positive, valid_positive = self.inputy.check_float(str(positive_value), condition)
result_negative, valid_negative = self.inputy.check_float(str(negative_value), condition)
self.assertTrue(valid_positive)
self.assertTrue(valid_negative)
self.assertEqual(result_positive, 5.0)
self.assertEqual(result_negative, -5.0)
def test_check_float_condition_only_positive(self):
"""
Test that passing a condition '-' to the check_float function will
return a valid value only when the value is negative
"""
positive_value = 5.0
negative_value = -5.0
condition = '-'
result_positive, valid_positive = self.inputy.check_float(str(positive_value), condition)
result_negative, valid_negative = self.inputy.check_float(str(negative_value), condition)
self.assertFalse(valid_positive)
self.assertTrue(valid_negative)
self.assertEqual(result_positive, 5.0)
self.assertEqual(result_negative, -5.0)
def test_check_float_condition_only_negative(self):
"""
Test that passing a condition '-' to the check_float function will
return a valid value only when the value is negative
"""
positive_value = 5.0
negative_value = -5.0
condition = '+'
result_positive, valid_positive = self.inputy.check_float(str(positive_value), condition)
result_negative, valid_negative = self.inputy.check_float(str(negative_value), condition)
self.assertTrue(valid_positive)
self.assertFalse(valid_negative)
self.assertEqual(result_positive, 5.0)
self.assertEqual(result_negative, -5.0)
def test_check_string_options_by_number_case_sensitive(self):
"""
Tests that, passing a set of options, the correct value is returned if
user enters the number of the item in the list (case sensitive scenario)
"""
options = {'1': 'Oranges', '2': 'Apples', '3': 'Pineapples'}
selected = 3
expected_returned_value = options[str(selected)]
result, valid = self.inputy.check_string(str(selected), options, '')
self.assertTrue(valid)
self.assertEqual(result, expected_returned_value)
def test_check_string_options_by_number_case_insensitive(self):
"""
Tests that, passing a set of options, the correct value is returned if
user enters the number of the item in the list (case insensitive scenario)
"""
options = {'1': 'Oranges', '2': 'Apples', '3': 'Pineapples'}
selected = 3
expected_returned_value = options[str(selected)]
result, valid = self.inputy.check_string(str(selected), options, 'i')
self.assertTrue(valid)
self.assertEqual(result, expected_returned_value)
def test_check_string_options_invalid_by_number_case_insensitive(self):
"""
Tests that, passing a set of options, the correct value is returned if
user enters the number of the item in the list (case insensitive scenario)
"""
options = {'1': 'Oranges', '2': 'Apples', '3': 'Pineapples'}
selected = 6 # invalid
expected_returned_value = str(selected)
result, valid = self.inputy.check_string(str(selected), options, 'i')
self.assertFalse(valid)
self.assertEqual(result, expected_returned_value)
def test_check_string_options_case_insensitive(self):
"""
Test that passing a different case for one of the options still
returns the value
"""
options = {'1': 'Oranges', '2': 'Apples', '3': 'Pineapples'}
selected_capital_case = 'ORANGES'
selected_lower_case = 'oranges'
expected_returned_value = options['1']
result_capital, valid_capital = self.inputy.check_string(selected_capital_case, options, 'i')
result_lower, valid_lower = self.inputy.check_string(selected_lower_case, options, 'i')
self.assertTrue(valid_capital)
self.assertTrue(valid_lower)
self.assertEqual(result_capital, expected_returned_value)
self.assertEqual(result_lower, expected_returned_value)
def test_check_string_case_sensitive(self):
"""
Test that passing a different case for one of the options returns
an invalid value
"""
options = {'1': 'Oranges', '2': 'Apples', '3': 'Pineapples'}
selected_capital_case = 'ORANGES'
selected_matched_case = 'Oranges'
expected_returned_value = options['1']
result_capital, valid_capital = self.inputy.check_string(selected_capital_case, options, '')
result_matched, valid_matched = self.inputy.check_string(selected_matched_case, options, '')
self.assertFalse(valid_capital)
self.assertTrue(valid_matched)
self.assertEqual(result_matched, expected_returned_value)
@mock.patch('builtins.input', return_value=str_valid_test)
def test_passing_no_parameters_returns_a_value_str(self, mock_input):
""" Testing 'inputy' as a normal 'input()' function """
result_str = self.inputy.format_input()
self.assertEqual(result_str, self.str_valid_test)
@mock.patch('builtins.input', return_value=int_valid_test)
def test_passing_no_parameters_returns_a_value_str_from_int(self, mock_input):
""" Testing 'inputy' as a normal 'input()' function """
result_str_from_int = self.inputy.format_input()
self.assertEqual(result_str_from_int, str(self.int_valid_test))
@mock.patch('builtins.input', side_effect=[str_valid_test, bool_valid_test, float_valid_test, None, int_valid_test])
def test_passed_invalid_when_requested_int(self, mock_input):
"""
Test that, when specifying the users has to enter an integer,
the message is prompted until a valid number is passed
"""
result_valid_int = self.inputy.format_input(type='int')
self.assertEqual(result_valid_int, self.int_valid_test)
@mock.patch('builtins.input', side_effect=[None, str_valid_test, bool_valid_test, float_valid_test])
def test_passed_invalid_when_requested_float(self, mock_input):
"""
Test that, when specifying the users has to enter a number,
the message is prompted until a valid number is passed
"""
result_valid_int = self.inputy.format_input(type='float')
self.assertEqual(result_valid_int, self.float_valid_test)
@mock.patch('builtins.input', side_effect=[str_valid_test, None, int_valid_test, bool_valid_test])
def test_passed_invalid_when_requested_boolean(self, mock_input):
"""
Test that, when specifying the user has to enter a boolean
the message is prompted until a boolean is passed
"""
result_valid_boolean = self.inputy.format_input(type='bool')
self.assertEqual(result_valid_boolean, self.bool_valid_test)
@mock.patch('builtins.input', side_effect=[str_valid_test, None, int_valid_test, 'True'])
def test_passed_invalid_when_requested_boolean_str(self, mock_input):
"""
Test that, when specifying the user has to enter a boolean
the message is prompted until a test case insensitive with the name of
one of the boolean values is passed
"""
result_valid_boolean = self.inputy.format_input(type='bool')
self.assertEqual(result_valid_boolean, True)
@mock.patch('builtins.input', return_value=str_valid_test)
def test_passing_and_invalid_input_type(self, mock_input):
"""
Tests that passing and invalid input type raises an InvalidInputType
exception. We mock the input() just in case the tests reaches that section
"""
invalid_input_type = 'not_int_nor_float'
with self.assertRaises(InvalidInputType) as e:
self.inputy.format_input(type=invalid_input_type)
self.assertEqual(e.exception.input_type, invalid_input_type)
def test_converting_value_to_integer(self):
number_as_string_value = '2'
invalid_string = 'Not a number as a string'
none_value = None
self.assertEqual(Printy._to_int(number_as_string_value), 2)
self.assertEqual(Printy._to_int(none_value), None)
with self.assertRaises(ValueError):
Printy._to_int(invalid_string)
def test_max_digits(self):
""" Tests restriction when passing a max_digits parameter """
max_digits = 3
invalid_number = '1234'
valid_number = '123'
invalid_number_value, invalid_number_valid_value = self.inputy._check_number(
int, value=invalid_number, max_digits=max_digits
)
valid_number_value, valid_number_valid_value = self.inputy._check_number(
int, valid_number, max_digits=max_digits
)
self.assertTrue(valid_number_valid_value)
self.assertFalse(invalid_number_valid_value)
def test_max_decimals(self):
""" Tests restriction when passing a max_decimals parameter """
max_decimals = 3
invalid_number = '1234.1234'
valid_number = '1234.123'
invalid_number_value, invalid_number_valid_value = self.inputy._check_number(
float, value=invalid_number, max_decimals=max_decimals
)
valid_number_value, valid_number_valid_value = self.inputy._check_number(
float, valid_number, max_decimals=max_decimals
)
self.assertTrue(valid_number_valid_value)
self.assertFalse(invalid_number_valid_value)
|
73826
|
import tensorflow as tf
from deepsleep.nn import *
class DeepFeatureNet(object):
def __init__(
self,
batch_size,
input_dims,
n_classes,
is_train,
reuse_params,
use_dropout,
name="deepfeaturenet"
):
self.batch_size = batch_size
self.input_dims = input_dims
self.n_classes = n_classes
self.is_train = is_train
self.reuse_params = reuse_params
self.use_dropout = use_dropout
self.name = name
self.activations = []
self.layer_idx = 1
self.monitor_vars = []
def _build_placeholder(self):
# Input
name = "x_train" if self.is_train else "x_valid"
self.input_var = tf.compat.v1.placeholder(
tf.float32,
shape=[self.batch_size, self.input_dims, 1, 1],
name=name + "_inputs"
)
# Target
self.target_var = tf.compat.v1.placeholder(
tf.int32,
shape=[self.batch_size, ],
name=name + "_targets"
)
def _conv1d_layer(self, input_var, filter_size, n_filters, stride, wd=0):
input_shape = input_var.get_shape()
n_batches = input_shape[0].value
input_dims = input_shape[1].value
n_in_filters = input_shape[3].value
name = "l{}_conv".format(self.layer_idx)
with tf.compat.v1.variable_scope(name) as scope:
output = conv_1d(name="conv1d", input_var=input_var, filter_shape=[filter_size, 1, n_in_filters, n_filters], stride=stride, bias=None, wd=wd)
# # MONITORING
# self.monitor_vars.append(("{}_before_bn".format(name), output))
output = batch_norm_new(name="bn", input_var=output, is_train=self.is_train)
# # MONITORING
# self.monitor_vars.append(("{}_after_bn".format(name), output))
# output = leaky_relu(name="leaky_relu", input_var=output)
output = tf.nn.relu(output, name="relu")
self.activations.append((name, output))
self.layer_idx += 1
return output
def build_model(self, input_var):
# List to store the output of each CNNs
output_conns = []
######### CNNs with small filter size at the first layer #########
# Convolution
# network = self._conv1d_layer(input_var=input_var, filter_size=128, n_filters=64, stride=16, wd=1e-3)
network = self._conv1d_layer(input_var=input_var, filter_size=50, n_filters=64, stride=6, wd=1e-3)
# Max pooling
name = "l{}_pool".format(self.layer_idx)
network = max_pool_1d(name=name, input_var=network, pool_size=8, stride=8)
self.activations.append((name, network))
self.layer_idx += 1
# Dropout
if self.use_dropout:
name = "l{}_dropout".format(self.layer_idx)
if self.is_train:
network = tf.nn.dropout(network, keep_prob=0.5, name=name)
else:
network = tf.nn.dropout(network, keep_prob=1.0, name=name)
self.activations.append((name, network))
self.layer_idx += 1
# Convolution
network = self._conv1d_layer(input_var=network, filter_size=8, n_filters=128, stride=1)
network = self._conv1d_layer(input_var=network, filter_size=8, n_filters=128, stride=1)
network = self._conv1d_layer(input_var=network, filter_size=8, n_filters=128, stride=1)
# Max pooling
name = "l{}_pool".format(self.layer_idx)
network = max_pool_1d(name=name, input_var=network, pool_size=4, stride=4)
self.activations.append((name, network))
self.layer_idx += 1
# Flatten
name = "l{}_flat".format(self.layer_idx)
network = flatten(name=name, input_var=network)
self.activations.append((name, network))
self.layer_idx += 1
output_conns.append(network)
######### CNNs with large filter size at the first layer #########
# Convolution
# network = self._conv1d_layer(input_var=input_var, filter_size=1024, n_filters=64, stride=128)
network = self._conv1d_layer(input_var=input_var, filter_size=400, n_filters=64, stride=50)
# Max pooling
name = "l{}_pool".format(self.layer_idx)
network = max_pool_1d(name=name, input_var=network, pool_size=4, stride=4)
self.activations.append((name, network))
self.layer_idx += 1
# Dropout
if self.use_dropout:
name = "l{}_dropout".format(self.layer_idx)
if self.is_train:
network = tf.nn.dropout(network, keep_prob=0.5, name=name)
else:
network = tf.nn.dropout(network, keep_prob=1.0, name=name)
self.activations.append((name, network))
self.layer_idx += 1
# Convolution
network = self._conv1d_layer(input_var=network, filter_size=6, n_filters=128, stride=1)
network = self._conv1d_layer(input_var=network, filter_size=6, n_filters=128, stride=1)
network = self._conv1d_layer(input_var=network, filter_size=6, n_filters=128, stride=1)
# Max pooling
name = "l{}_pool".format(self.layer_idx)
network = max_pool_1d(name=name, input_var=network, pool_size=2, stride=2)
self.activations.append((name, network))
self.layer_idx += 1
# Flatten
name = "l{}_flat".format(self.layer_idx)
network = flatten(name=name, input_var=network)
self.activations.append((name, network))
self.layer_idx += 1
output_conns.append(network)
######### Aggregate and link two CNNs #########
# Concat
name = "l{}_concat".format(self.layer_idx)
network = tf.concat(axis=1, values=output_conns, name=name)
self.activations.append((name, network))
self.layer_idx += 1
# Dropout
if self.use_dropout:
name = "l{}_dropout".format(self.layer_idx)
if self.is_train:
network = tf.nn.dropout(network, keep_prob=0.5, name=name)
else:
network = tf.nn.dropout(network, keep_prob=1.0, name=name)
self.activations.append((name, network))
self.layer_idx += 1
return network
def init_ops(self):
self._build_placeholder()
# Get loss and prediction operations
with tf.compat.v1.variable_scope(self.name) as scope:
# Reuse variables for validation
if self.reuse_params:
scope.reuse_variables()
# Build model
network = self.build_model(input_var=self.input_var)
# Softmax linear
name = "l{}_softmax_linear".format(self.layer_idx)
network = fc(name=name, input_var=network, n_hiddens=self.n_classes, bias=0.0, wd=0)
self.activations.append((name, network))
self.layer_idx += 1
# Outputs of softmax linear are logits
self.logits = network
######### Compute loss #########
# Cross-entropy loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits,
labels=self.target_var,
name="sparse_softmax_cross_entropy_with_logits"
)
loss = tf.reduce_mean(loss, name="cross_entropy")
# Regularization loss
regular_loss = tf.add_n(
tf.compat.v1.get_collection("losses", scope=scope.name + "\/"),
name="regular_loss"
)
# print " "
# print "Params to compute regularization loss:"
# for p in tf.compat.v1.get_collection("losses", scope=scope.name + "\/"):
# print p.name
# print " "
# Total loss
self.loss_op = tf.add(loss, regular_loss)
# Predictions
self.pred_op = tf.argmax(self.logits, 1)
class DeepSleepNet(DeepFeatureNet):
def __init__(
self,
batch_size,
input_dims,
n_classes,
seq_length,
n_rnn_layers,
return_last,
is_train,
reuse_params,
use_dropout_feature,
use_dropout_sequence,
name="deepsleepnet"
):
super(self.__class__, self).__init__(
batch_size=batch_size,
input_dims=input_dims,
n_classes=n_classes,
is_train=is_train,
reuse_params=reuse_params,
use_dropout=use_dropout_feature,
name=name
)
self.seq_length = seq_length
self.n_rnn_layers = n_rnn_layers
self.return_last = return_last
self.use_dropout_sequence = use_dropout_sequence
def _build_placeholder(self):
# Input
name = "x_train" if self.is_train else "x_valid"
self.input_var = tf.compat.v1.placeholder(
tf.float32,
shape=[self.batch_size*self.seq_length, self.input_dims, 1, 1],
name=name + "_inputs"
)
# Target
self.target_var = tf.compat.v1.placeholder(
tf.int32,
shape=[self.batch_size*self.seq_length, ],
name=name + "_targets"
)
def build_model(self, input_var):
# Create a network with superclass method
network = super(self.__class__, self).build_model(
input_var=self.input_var
)
# Residual (or shortcut) connection
output_conns = []
# Fully-connected to select some part of the output to add with the output from bi-directional LSTM
name = "l{}_fc".format(self.layer_idx)
with tf.compat.v1.variable_scope(name) as scope:
output_tmp = fc(name="fc", input_var=network, n_hiddens=1024, bias=None, wd=0)
output_tmp = batch_norm_new(name="bn", input_var=output_tmp, is_train=self.is_train)
# output_tmp = leaky_relu(name="leaky_relu", input_var=output_tmp)
output_tmp = tf.nn.relu(output_tmp, name="relu")
self.activations.append((name, output_tmp))
self.layer_idx += 1
output_conns.append(output_tmp)
######################################################################
# Reshape the input from (batch_size * seq_length, input_dim) to
# (batch_size, seq_length, input_dim)
name = "l{}_reshape_seq".format(self.layer_idx)
input_dim = network.get_shape()[-1].value
seq_input = tf.reshape(network,
shape=[-1, self.seq_length, input_dim],
name=name)
assert self.batch_size == seq_input.get_shape()[0].value
self.activations.append((name, seq_input))
self.layer_idx += 1
# Bidirectional LSTM network
name = "l{}_bi_lstm".format(self.layer_idx)
hidden_size = 512 # will output 1024 (512 forward, 512 backward)
with tf.compat.v1.variable_scope(name) as scope:
def lstm_cell():
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=True,
state_is_tuple=True,
reuse=tf.compat.v1.get_variable_scope().reuse)
if self.use_dropout_sequence:
keep_prob = 0.5 if self.is_train else 1.0
cell = tf.compat.v1.nn.rnn_cell.DropoutWrapper(
cell,
output_keep_prob=keep_prob
)
return cell
fw_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(self.n_rnn_layers)], state_is_tuple = True)
bw_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(self.n_rnn_layers)], state_is_tuple = True)
# Initial state of RNN
self.fw_initial_state = fw_cell.zero_state(self.batch_size, tf.float32)
self.bw_initial_state = bw_cell.zero_state(self.batch_size, tf.float32)
# Feedforward to MultiRNNCell
list_rnn_inputs = tf.unstack(seq_input, axis=1)
#outputs, fw_state, bw_state = tf.nn.bidirectional_rnn(
outputs, fw_state, bw_state = tf.compat.v1.nn.static_bidirectional_rnn(
cell_fw=fw_cell,
cell_bw=bw_cell,
inputs=list_rnn_inputs,
initial_state_fw=self.fw_initial_state,
initial_state_bw=self.bw_initial_state
)
if self.return_last:
network = outputs[-1]
else:
network = tf.reshape(tf.concat(axis=1, values=outputs), [-1, hidden_size*2],
name=name)
self.activations.append((name, network))
self.layer_idx +=1
self.fw_final_state = fw_state
self.bw_final_state = bw_state
# Append output
output_conns.append(network)
######################################################################
# Add
name = "l{}_add".format(self.layer_idx)
network = tf.add_n(output_conns, name=name)
self.activations.append((name, network))
self.layer_idx += 1
# Dropout
if self.use_dropout_sequence:
name = "l{}_dropout".format(self.layer_idx)
if self.is_train:
network = tf.nn.dropout(network, keep_prob=0.5, name=name)
else:
network = tf.nn.dropout(network, keep_prob=1.0, name=name)
self.activations.append((name, network))
self.layer_idx += 1
return network
def init_ops(self):
self._build_placeholder()
# Get loss and prediction operations
with tf.compat.v1.variable_scope(self.name) as scope:
# Reuse variables for validation
if self.reuse_params:
scope.reuse_variables()
# Build model
network = self.build_model(input_var=self.input_var)
# Softmax linear
name = "l{}_softmax_linear".format(self.layer_idx)
network = fc(name=name, input_var=network, n_hiddens=self.n_classes, bias=0.0, wd=0)
self.activations.append((name, network))
self.layer_idx += 1
# Outputs of softmax linear are logits
self.logits = network
######### Compute loss #########
# Weighted cross-entropy loss for a sequence of logits (per example)
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[self.logits],
[self.target_var],
[tf.ones([self.batch_size * self.seq_length])],
name="sequence_loss_by_example"
)
loss = tf.reduce_sum(loss) / self.batch_size
# Regularization loss
regular_loss = tf.add_n(
tf.compat.v1.get_collection("losses", scope=scope.name + "\/"),
name="regular_loss"
)
# print " "
# print "Params to compute regularization loss:"
# for p in tf.compat.v1.get_collection("losses", scope=scope.name + "\/"):
# print p.name
# print " "
# Total loss
self.loss_op = tf.add(loss, regular_loss)
# Predictions
self.pred_op = tf.argmax(self.logits, 1)
|
73901
|
from concurrent.futures import Future, ThreadPoolExecutor
import logging
from vonx.indy.messages import StoredCredential
from vonx.web.view_helpers import (
IndyCredentialProcessor,
IndyCredentialProcessorException,
)
from api_indy.indy.credential import Credential, CredentialException, CredentialManager
from .boot import run_django_proc
LOGGER = logging.getLogger(__name__)
class CredentialProcessorQueue(IndyCredentialProcessor):
def __init__(self, max_threads=10):
super(CredentialProcessorQueue, self).__init__()
self._max_threads = max_threads
def setup(self, app):
app["credqueue"] = self
app.on_startup.append(self.app_start)
app.on_cleanup.append(self.app_stop)
async def app_start(self, _app=None):
self.start()
async def app_stop(self, _app=None):
self.stop()
def start(self):
self._executor = ThreadPoolExecutor(max_workers=self._max_threads)
def stop(self):
self._executor.shutdown(True)
def start_batch(self) -> object:
"""
May return batch info used for caching and/or scheduling
"""
return {"manager": CredentialManager()}
def get_manager(self, batch_info):
if batch_info:
return batch_info["manager"]
return CredentialManager()
def process_credential(
self, stored: StoredCredential, origin_did: str = None, batch_info=None) -> Future:
"""
Perform credential processing and create related objects.
Processing can be deferred until end_batch to determine appropriate chunk size,
currently using naive :class:`ThreadPoolExecutor`
"""
cred = Credential(stored.cred.cred_data, stored.cred.cred_req_metadata, stored.cred_id)
credential_manager = self.get_manager(batch_info)
LOGGER.info("Processing credential %s for DID %s", stored.cred_id, origin_did)
def proc():
try:
return credential_manager.process(cred, origin_did)
except CredentialException as e:
raise IndyCredentialProcessorException(str(e)) from e
return self._executor.submit(run_django_proc, proc)
def end_batch(self, batch_info):
"""
Ensure that processing has been kicked off
"""
pass
|
73910
|
import sys
from unittest.mock import MagicMock
import pytest
from lightning_transformers.core.nlp import HFBackboneConfig, HFTransformerDataConfig
from lightning_transformers.task.nlp.text_classification import (
TextClassificationDataModule,
TextClassificationTransformer,
)
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
def test_smoke_train_e2e(script_runner):
script_runner.hf_train(task="text_classification", dataset="emotion", model="prajjwal1/bert-tiny")
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
def test_smoke_train_default_dataset(script_runner):
script_runner.hf_train(
task="text_classification", model="prajjwal1/bert-tiny", cmd_args=['dataset.cfg.dataset_name="emotion"']
)
def test_smoke_predict_e2e(script_runner):
y = script_runner.hf_predict(['+x="Lightning rocks!"'], task="text_classification", model="prajjwal1/bert-tiny")
assert len(y) == 1
assert isinstance(y[0]["score"], float)
def test_predict_from_ckpt_path(script_runner, tmpdir):
script_runner.hf_train(
task="text_classification",
dataset="emotion",
model="prajjwal1/bert-tiny",
cmd_args=[f"trainer.default_root_dir={tmpdir}"],
fast_dev_run=0,
)
ckpt_path = tmpdir / "checkpoints" / "epoch=0-step=0.ckpt"
assert ckpt_path.exists()
y = script_runner.hf_predict(
['+x="Lightning rocks!"', f'+checkpoint_path="{ckpt_path}"'],
task="text_classification",
model="prajjwal1/bert-tiny",
)
assert len(y) == 1
assert isinstance(y[0]["score"], float)
def test_model_has_correct_cfg():
model = TextClassificationTransformer(HFBackboneConfig(pretrained_model_name_or_path="bert-base-cased"))
assert model.hparams.downstream_model_type == "transformers.AutoModelForSequenceClassification"
def test_datamodule_has_correct_cfg():
tokenizer = MagicMock()
dm = TextClassificationDataModule(tokenizer)
assert type(dm.cfg) is HFTransformerDataConfig
assert dm.tokenizer is tokenizer
|
73940
|
import unittest
import rxbp
from rxbp.flowable import Flowable
from rxbp.multicast.multicast import MultiCast
from rxbp.multicast.multicastsubscriber import MultiCastSubscriber
from rxbp.multicast.multicasts.loopflowablemulticast import LoopFlowableMultiCast
from rxbp.multicast.testing.testmulticast import TestMultiCast
from rxbp.multicast.testing.testmulticastobserver import TestMultiCastObserver
from rxbp.observerinfo import ObserverInfo
from rxbp.subscriber import Subscriber
from rxbp.testing.testflowable import TestFlowable
from rxbp.testing.tobserver import TObserver
from rxbp.testing.tscheduler import TScheduler
class TestLoopFlowablesMultiCast(unittest.TestCase):
def setUp(self) -> None:
self.multicast_scheduler = TScheduler()
self.source_scheduler = TScheduler()
self.info = MultiCastInfo(
multicast_scheduler=self.multicast_scheduler,
source_scheduler=self.source_scheduler,
)
self.source_multicast = TestMultiCast()
self.rx_sink = TestMultiCastObserver()
self.source1 = TestFlowable()
self.source2 = TestFlowable()
def test_send_single_flowable(self):
reduce_multicast = LoopFlowableMultiCast(
source=self.source_multicast,
func=lambda m: MultiCast(m).pipe(
rxbp.multicast.op.map(lambda t: t[0]),
),
initial=[0],
)
reduce_multicast.get_source(self.info).subscribe(self.rx_sink)
self.source_multicast.on_next(Flowable(self.source1))
self.assertEqual(1, len(self.rx_sink.received))
def test_subscribe_single_flowable(self):
reduce_multicast = LoopFlowableMultiCast(
source=self.source_multicast,
func=lambda m: MultiCast(m).pipe(
rxbp.multicast.op.map(lambda t: t[0]),
),
initial=[10],
)
reduce_multicast.get_source(self.info).subscribe(self.rx_sink)
self.source_multicast.on_next(Flowable(self.source1))
sink = TObserver(immediate_continue=0)
subscription = self.rx_sink.received[0].unsafe_subscribe(Subscriber(
scheduler=self.source_scheduler, subscribe_scheduler=self.source_scheduler,
))
subscription.observable.observe(init_observer_info(sink))
self.multicast_scheduler.advance_by(1)
self.source_scheduler.advance_by(1)
# self.source1.on_next_single(0)
print(sink.received)
self.assertEqual([10], sink.received)
# def test_send_dictionary(self):
# reduce_multicast = ReduceMultiCast(source=self.source_multicast)
# reduce_multicast.get_source(self.info).subscribe(self.rx_sink)
#
# self.source_multicast.on_next({'f1': Flowable(self.source1)})
#
# self.assertEqual(1, len(self.rx_sink.received))
#
# def test_reduce_single_flowables_without_maintaining_order(self):
# reduce_multicast = ReduceMultiCast(source=self.source_multicast)
# reduce_multicast.get_source(self.info).subscribe(self.rx_sink)
# self.source_multicast.on_next(Flowable(self.source1))
# self.source_multicast.on_next(Flowable(self.source2))
# self.source_multicast.on_completed()
#
# sink = TestObserver()
# subscription = self.rx_sink.received[0].unsafe_subscribe(Subscriber(
# scheduler=self.source_scheduler,
# subscribe_scheduler=self.source_scheduler
# ))
# subscription.observable.observe(init_observer_info(observer=sink))
#
# # sending the lifted flowable is scheduled on the multicast_scheduler
# self.multicast_scheduler.advance_by(1)
#
# self.source1.on_next_single(1)
# self.source2.on_next_single('a')
# self.source1.on_next_single(2)
# self.source1.on_completed()
# self.source2.on_next_single('b')
# self.source2.on_completed()
#
# self.assertEqual([1, 'a', 2, 'b'], sink.received)
# self.assertTrue(sink.is_completed)
#
# def test_reduce_single_flowables_with_maintaining_order(self):
# reduce_multicast = ReduceMultiCast(
# source=self.source_multicast,
# maintain_order=True,
# )
# reduce_multicast.get_source(self.info).subscribe(self.rx_sink)
# self.source_multicast.on_next(Flowable(self.source1))
# self.source_multicast.on_next(Flowable(self.source2))
# self.source_multicast.on_completed()
#
# sink = TestObserver()
# subscription = self.rx_sink.received[0].unsafe_subscribe(Subscriber(
# scheduler=self.source_scheduler,
# subscribe_scheduler=self.source_scheduler
# ))
# subscription.observable.observe(init_observer_info(observer=sink))
#
# # sending the lifted flowable is scheduled on the multicast_scheduler
# self.multicast_scheduler.advance_by(1)
#
# self.source1.on_next_single(1)
# self.source2.on_next_single('a')
# self.source1.on_next_single(2)
# self.source1.on_completed()
# self.source2.on_next_single('b')
# self.source2.on_completed()
#
# self.assertEqual([1, 2, 'a', 'b'], sink.received)
# self.assertTrue(sink.is_completed)
|
73949
|
from functools import partial
from itertools import groupby
from couchdbkit import ResourceNotFound
from corehq.apps.domain import SHARED_DOMAIN, UNKNOWN_DOMAIN
from corehq.blobs import CODES
from corehq.blobs.mixin import BlobHelper, BlobMetaRef
from corehq.blobs.models import BlobMigrationState, BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import ReindexAccessor
from corehq.util.doc_processor.sql import SqlDocumentProvider
import corehq.apps.accounting.models as acct
import corehq.apps.app_manager.models as apps
import corehq.apps.hqmedia.models as hqmedia
from corehq.apps.builds.models import CommCareBuild
from corehq.apps.case_importer.tracking.models import CaseUploadFileMeta, CaseUploadRecord
from corehq.apps.domain.models import Domain
from corehq.apps.export import models as exports
from corehq.apps.ota.models import DemoUserRestore
from corehq.apps.users.models import CommCareUser
import casexml.apps.case.models as cases
import couchforms.models as xform
class MultiDbMigrator(object):
def __init__(self, slug, couch_types, sql_reindexers):
self.slug = slug
self.couch_types = couch_types
self.sql_reindexers = sql_reindexers
def iter_migrators(self):
from . import migrate as mod
NoStateMigrator, SqlMigrator, BlobMetaMigrator = make_migrators(mod)
couch_migrator = partial(BlobMetaMigrator, blob_helper=couch_blob_helper)
def db_key(doc_type):
if isinstance(doc_type, tuple):
doc_type = doc_type[1]
return doc_type.get_db().dbname
for key, types in groupby(sorted(self.couch_types, key=db_key), key=db_key):
slug = "%s-%s" % (self.slug, key)
yield NoStateMigrator(slug, list(types), couch_migrator)
for rex in self.sql_reindexers:
slug = "%s-%s" % (self.slug, rex.model_class.__name__)
yield SqlMigrator(slug, rex(), BlobMetaMigrator)
def migrate(self, filename, *args, **kw):
def filen(n):
return None if filename is None else "{}.{}".format(filename, n)
migrated = 0
skipped = 0
for n, item in enumerate(self.iter_migrators()):
one_migrated, one_skipped = item.migrate(filen(n), *args, **kw)
migrated += one_migrated
skipped += one_skipped
print("\n")
if not skipped:
BlobMigrationState.objects.get_or_create(slug=self.slug)[0].save()
return migrated, skipped
def make_migrators(mod):
# defer class definitions to work around circular import
class BlobMetaMigrator(mod.BaseDocMigrator):
"""Migrate blob metadata to BlobMeta model"""
def __init__(self, *args, **kw):
super(BlobMetaMigrator, self).__init__(*args, **kw)
self.total_blobs = 0
def migrate(self, doc):
if not doc.get("external_blobs"):
return True
type_code = self.get_type_code(doc)
obj = self.blob_helper(doc, self.couchdb, type_code)
domain = obj.domain
if domain is None:
self.error(obj, {
"error": "unknown-domain",
"doc_type": obj.doc_type,
"doc_id": obj._id,
})
domain = UNKNOWN_DOMAIN
if getattr(obj, "_attachments", None):
self.error(obj, {
"error": "ignored-couch-attachments",
"doc_type": obj.doc_type,
"doc_id": obj._id,
"domain": obj.domain,
"attachments": obj._attachments,
})
with BlobMeta.get_cursor_for_partition_value(doc['_id']) as cursor:
for name, meta in obj.external_blobs.items():
if meta.blobmeta_id is not None:
# blobmeta already saved
continue
cursor.execute("""
INSERT INTO blobs_blobmeta (
domain,
type_code,
parent_id,
name,
key,
content_type,
content_length,
created_on
) VALUES (%s, %s, %s, %s, %s, %s, %s, CLOCK_TIMESTAMP())
ON CONFLICT (key) DO NOTHING
""", params=[
domain,
type_code,
doc["_id"],
name,
meta.key,
meta.content_type,
meta.content_length or 0,
])
self.total_blobs += 1
return True
def error(self, obj, doc):
print("Error: %s %r" % (doc["error"], obj))
super(BlobMetaMigrator, self).write_backup(doc)
class NoStateMigrator(mod.Migrator):
def write_migration_completed_state(self):
pass
class SqlMigrator(NoStateMigrator):
def __init__(self, slug, reindexer, doc_migrator_class):
types = [reindexer.model_class]
def doc_migrator(*args, **kw):
kw["blob_helper"] = reindexer.blob_helper
kw["get_type_code"] = reindexer.get_type_code
return doc_migrator_class(*args, **kw)
super(SqlMigrator, self).__init__(slug, types, doc_migrator)
self.reindexer = reindexer
def get_document_provider(self):
return SqlDocumentProvider(self.iteration_key, self.reindexer)
return NoStateMigrator, SqlMigrator, BlobMetaMigrator
class SqlBlobHelper(object):
"""Adapt a SQL model object to look like a BlobHelper
This is currently built on the assumtion that the SQL model only
references a single blob, and the blob name is not used.
"""
def __init__(self, obj, key, domain, reindexer):
self.obj = obj
self.domain = domain
self.blobs = {"": BlobMetaRef(key=key, **reindexer.blob_kwargs(obj))}
self.external_blobs = self.blobs
def __repr__(self):
return "<%s %s domain=%s id=%s>" % (
type(self).__name__,
self.doc_type,
self.domain,
self._id,
)
@property
def _id(self):
# NOTE unlike couch documents, this is different from `doc["_id"]`,
# the value used to set `BlobMeta.parent_id`. This value should
# only be used to identify the record in in case of error.
return self.obj.id
@property
def doc_type(self):
return type(self.obj).__name__
def sql_blob_helper(key_attr):
def blob_helper(self, doc, *ignored):
"""This has the same signature as BlobHelper
:returns: Object having parts of BlobHelper interface needed
for blob migrations (currently only used by BlobMetaMigrator).
"""
obj = doc["_obj_not_json"]
domain = self.get_domain(obj)
return SqlBlobHelper(obj, getattr(obj, key_attr), domain, self)
return blob_helper
class PkReindexAccessor(ReindexAccessor):
@property
def id_field(self):
return 'id'
def get_doc(self, *args, **kw):
# only used for retries; BlobMetaMigrator doesn't retry
raise NotImplementedError
def doc_to_json(self, obj, id):
return {"_id": str(id), "_obj_not_json": obj, "external_blobs": True}
class CaseUploadFileMetaReindexAccessor(PkReindexAccessor):
model_class = CaseUploadFileMeta
blob_helper = sql_blob_helper("identifier")
def doc_to_json(self, obj):
return PkReindexAccessor.doc_to_json(self, obj, self.get_domain(obj))
@staticmethod
def get_type_code(doc):
return CODES.data_import
def get_domain(self, obj):
try:
return CaseUploadRecord.objects.get(upload_file_meta_id=obj.id).domain
except CaseUploadRecord.DoesNotExist:
return None
def blob_kwargs(self, obj):
return {"content_length": obj.length}
class DemoUserRestoreReindexAccessor(PkReindexAccessor):
model_class = DemoUserRestore
blob_helper = sql_blob_helper("restore_blob_id")
def doc_to_json(self, obj):
return PkReindexAccessor.doc_to_json(
self, obj, obj.demo_user_id or "DemoUserRestore")
@staticmethod
def get_type_code(doc):
return CODES.demo_user_restore
def get_domain(self, obj):
try:
return CommCareUser.get(obj.demo_user_id).domain
except ResourceNotFound:
return None
def blob_kwargs(self, obj):
return {"content_length": obj.content_length, "content_type": "text/xml"}
def couch_blob_helper(doc, *args, **kw):
obj = BlobHelper(doc, *args, **kw)
get_domain = DOMAIN_MAP.get(obj.doc_type)
if get_domain is not None:
assert not hasattr(obj, "domain"), obj
obj.domain = get_domain(doc)
elif not hasattr(obj, "domain"):
obj.domain = None # will trigger "unknown-domain" error
return obj
def get_shared_domain(doc):
return SHARED_DOMAIN
def get_invoice_domain(doc):
if doc.get("is_wire"):
try:
return acct.WireInvoice.objects.get(id=int(doc["invoice_id"])).domain
except acct.WireInvoice.DoesNotExist:
return None # trigger "unknown-domain" error
# customer invoice has no domain
return UNKNOWN_DOMAIN
DOMAIN_MAP = {
"InvoicePdf": get_invoice_domain,
"CommCareBuild": get_shared_domain,
"CommCareAudio": get_shared_domain,
"CommCareImage": get_shared_domain,
"CommCareVideo": get_shared_domain,
"CommCareMultimedia": get_shared_domain,
}
migrate_metadata = lambda: MultiDbMigrator("migrate_metadata",
couch_types=[
apps.Application,
apps.LinkedApplication,
apps.RemoteApp,
("Application-Deleted", apps.Application),
("RemoteApp-Deleted", apps.RemoteApp),
apps.SavedAppBuild,
CommCareBuild,
Domain,
acct.InvoicePdf,
hqmedia.CommCareAudio,
hqmedia.CommCareImage,
hqmedia.CommCareVideo,
hqmedia.CommCareMultimedia,
cases.CommCareCase,
('CommCareCase-deleted', cases.CommCareCase),
('CommCareCase-Deleted', cases.CommCareCase),
('CommCareCase-Deleted-Deleted', cases.CommCareCase),
exports.CaseExportInstance,
exports.FormExportInstance,
exports.SMSExportInstance,
],
sql_reindexers=[
CaseUploadFileMetaReindexAccessor,
DemoUserRestoreReindexAccessor,
],
)
|
73983
|
import os
from setuptools import find_packages, setup
from openwisp_ipam import get_version
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
def get_install_requires():
"""
parse requirements.txt, ignore links, exclude comments
"""
requirements = []
for line in open('requirements.txt').readlines():
if line.startswith('#') or line == '' or line.startswith('git'):
continue
requirements.append(line)
return requirements
setup(
name='openwisp-ipam',
version=get_version(),
license='BSD-3-Clause',
author='OpenWISP',
author_email='<EMAIL>',
description='IP address space administration module of OpenWISP.',
long_description=README,
url='https://github.com/openwisp/openwisp-ipam',
download_url='https://github.com/openwisp/openwisp-ipam/releases',
platforms=['Platform Independent'],
keywords=['django', 'freeradius', 'networking', 'openwisp'],
packages=find_packages(exclude=['tests*', 'docs*']),
include_package_data=True,
zip_safe=False,
install_requires=get_install_requires(),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Topic :: Internet :: WWW/HTTP',
'Topic :: System :: Networking',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Framework :: Django',
'Programming Language :: Python :: 3',
],
)
|
73992
|
from stack import Stack
class Graph:
def __init__(self):
self.vertices: list = []
self.adjacencyList: dict = {}
self.distance: dict = {}
self.prev: dict = {}
self.colors: dict = {}
self.entry: dict = {}
self.exit: dict = {}
self.time: int = 0
def addVertex(self, label: str):
self.vertices.append(label)
self.adjacencyList[label]: list = []
self.distance[label] = 0
self.prev[label] = None
self.colors[label] = "white"
def addEdge(self, label1: str, label2: str):
self.adjacencyList[label1].append(label2)
self.adjacencyList[label2].append(label1)
def dfs(self, label: str):
s = Stack()
s.push(label)
self.colors[label] = "gray"
self.time += 1
self.entry[label] = self.time
current: str
neighbour: str
while not s.isEmpty():
current = s.peek()
neighbour = self.findUnvisitedNeighbour(current)
if neighbour is not None:
self.colors[neighbour] = "gray"
self.distance[neighbour] = self.distance[current] + 1
self.prev[neighbour] = current
self.time += 1
self.entry[neighbour] = self.time
s.push(neighbour)
else:
s.pop()
self.time += 1
self.exit[current] = self.time
self.colors[current] = "black"
def findUnvisitedNeighbour(self, label: str):
count: int = 0
found: bool = False
neighbour: str = None
while count < len(self.adjacencyList[label]) and not found:
if self.colors[self.adjacencyList[label][count]] == "white":
found = True
neighbour = self.adjacencyList[label][count]
else:
count += 1
return neighbour
def showPath(self, end: str)->str:
if self.prev[end] is None:
return end
else:
return self.showPath(self.prev[end]) + " -> " + end
|
73993
|
import csv
filename = 'ch02-data.tab'
data = []
try:
with open(filename) as f:
reader = csv.reader(f, dialect=csv.excel_tab)
c = 0
for row in reader:
if c == 0:
header = row
else:
data.append(row)
c += 1
except csv.Error as e:
print "Error reading CSV file at line %s: %s" % (reader.line_num, e)
sys.exit(-1)
if header:
print header
print '==================='
for datarow in data:
print datarow
|
74001
|
import contextlib
from datetime import datetime, timedelta
from typing import Iterable, Optional, Tuple, Union
import jwt
with open("tests/key/private_key", "rb") as f:
private_key = f.read()
with open("tests/key/public_key", "rb") as f:
public_key = f.read()
ACCESS_COOKIE_NAME = "access"
REFRESH_COOKIE_NAME = "refresh"
class User:
"""
Setup a user object with the given id, username and admin status.
"""
def __init__(self, id: int, username: str, admin: bool):
self.id = id
self.username = username
self.is_admin = admin
self.data = {"id": id, "username": username}
def mock_get_authenticated_user():
"""
Mock the get_authenticated_user function to return a user object.
Returns:
User: A user object.
"""
class User:
def __init__(self):
"""
Setup a user object with the given id, username and admin status.
"""
self.id = 2
self.username = "user"
self.is_admin = False
self.data = {"id": self.id, "username": self.username}
return User()
class MockDatabaseBackend:
"""
Mock the get_authenticated_user function to return a user object.
"""
def __init__(self, database_name):
self._incr = 5
self._users = [
{
"id": 1,
"email": "<EMAIL>",
"username": "admin",
"password": "<PASSWORD>",
"active": True,
"confirmed": True,
"permissions": ["admin"],
},
{
"id": 2,
"email": "<EMAIL>",
"username": "user",
"password": "<PASSWORD>",
"active": True,
"confirmed": True,
"permissions": [],
},
{
"id": 3,
"email": "<EMAIL>",
"username": "anotheruser",
"password": "<PASSWORD>",
"active": True,
"confirmed": False,
"permissions": [],
},
{
"id": 4,
"email": "<EMAIL>",
"username": "inactiveuser",
"password": "<PASSWORD>",
"active": False,
"confirmed": True,
"permissions": [],
},
{
"id": 5,
"email": "<EMAIL>",
"username": "socialuser",
"provider": "google",
"sid": "8888",
"active": False,
"confirmed": True,
"permissions": [],
},
]
self._email_confirmations = []
def _increment_id(self) -> int:
self._incr += 1
return self._incr
def _get(self, field: str, value) -> Optional[dict]:
return next((item for item in self._users if item.get(field) == value), None)
async def get(self, id: int) -> Optional[dict]:
return self._get("id", id)
async def get_by_email(self, email: str) -> Optional[dict]:
return self._get("email", email)
async def get_by_username(self, username: str) -> Optional[dict]:
return self._get("username", username)
async def get_by_social(self, provider: str, sid: str) -> Optional[dict]:
return next(
(
item
for item in self._users
if item.get("provider") == provider and item.get("sid") == sid
),
None,
) # pragma: no cover
async def create(self, obj: dict) -> int:
id = self._increment_id()
obj["id"] = id
self._users.append(obj)
return id
async def update(self, id: int, obj: dict) -> bool:
for i, item in enumerate(self._users):
if item.get("id") == id:
self._users[i].update(obj)
return True
return False # pragma: no cover
async def delete(self, id: int) -> bool:
"""
Delete a user.
Args:
id (int): The user id to delete.
Returns:
bool: True if the user was deleted, False otherwise.
"""
for i, item in enumerate(self._users): # pragma: no cover
if item.get("id") == id: # pragma: no cover
del self._users[i] # pragma: no cover
return True # pragma: no cover
return False # pragma: no cover
async def count(self, query) -> int:
return 42 # pragma: no cover
async def request_email_confirmation(self, email: str, token_hash: str) -> None:
"""
Add a new email confirmation to the list.
Args:
email (str): The email address to confirm.
token_hash (str): The token hash to confirm.
Returns:
None
"""
for i, item in enumerate(self._email_confirmations):
if item.get("email") == email: # pragma: no cover
self._email_confirmations[i].update(
{"token": token_hash}
) # pragma: no cover
return None # pragma: no cover
self._email_confirmations.append({"email": email, "token": token_hash})
async def confirm_email(self, token_hash: str) -> bool:
"""
Confirm an email address.
Args:
token_hash (str): The token hash to confirm.
Returns:
bool: True if the email was confirmed, False otherwise.
"""
for item in self._email_confirmations:
if item.get("token") == token_hash:
user = self._get("email", item.get("email"))
await self.update(user.get("id"), {"confirmed": True})
return True
return False
async def get_blacklist(self) -> Iterable[dict]:
"""
Get the blacklist.
Returns:
Iterable[dict]: The blacklist.
"""
return [
item for item in self._users if not item.get("active")
] # pragma: no cover
async def search(self) -> Tuple[dict, int]:
return self._users, 1 # pragma: no cover
class MockCacheBackend:
"""
mock the cache backend.
"""
def __init__(self) -> None:
self._db = {}
async def get(self, key: str) -> Optional[str]:
return self._db.get(key)
async def delete(self, key: str) -> None:
with contextlib.suppress(KeyError):
self._db.pop(key)
async def keys(self, match: str) -> Iterable[str]:
return {} # pragma: no cover
async def set(self, key: str, value: Union[str, bytes, int], expire: int) -> None:
self._db[key] = value
async def setnx(self, key: str, value: Union[str, bytes, int], expire: int) -> None:
v = self._db.get(key) # pragma: no cover
if v is None: # pragma: no cover
self._db[key] = value # pragma: no cover
async def incr(self, key: str) -> str:
v = self._db.get(key)
if v is not None:
self._db[key] = int(v) + 1
async def dispatch_action(self, channel: str, action: str, payload: str) -> None:
print("Dispatching action") # pragma: no cover
print(action) # pragma: no cover
print(payload) # pragma: no cover
class MockAuthBackend:
@classmethod
def create(
cls,
jwt_algorithm: str,
private_key: bytes,
public_key: bytes,
access_expiration: int,
refresh_expiration: int,
) -> None:
pass # pragma: no cover
def __init__(
self,
jwt_algorithm: str,
private_key: bytes,
public_key: bytes,
access_expiration: int = 60 * 5,
refresh_expiration: int = 60 * 10,
):
self._jwt_algorithm = jwt_algorithm
self._private_key = private_key
self._public_key = public_key
self._access_expiration = access_expiration
self._refresh_expiration = refresh_expiration
self._private_key = private_key
self._public_key = public_key
async def decode_token(self, token: str, leeway: int = 0) -> Optional[dict]:
"""
Decode a JWT token.
Args:
token (str): The JWT token to decode.
leeway (int, optional): The leeway to use when decoding the token. Defaults to 0.
Returns:
Optional[dict]: The decoded token.
"""
if token:
return jwt.decode(token, key=self._public_key, algorithms="RS256")
return None # pragma: no cover
def _create_token(
self, payload: dict, token_type: str, expiration_delta: Optional[int] = None
) -> str:
iat = datetime.utcnow()
if expiration_delta:
exp = datetime.utcnow() + timedelta(seconds=expiration_delta)
else:
exp = datetime.utcnow() + timedelta(seconds=60) # pragma: no cover
payload |= {"iat": iat, "exp": exp, "type": token_type}
token = jwt.encode(payload, self._private_key, algorithm=self._jwt_algorithm)
if isinstance(token, bytes):
# For PyJWT <= 1.7.1
return token.decode("utf-8") # pragma: no cover
# For PyJWT >= 2.0.0a1
return token
def create_access_token(self, payload: dict) -> str:
return self._create_token(payload, "access", 60 * 5)
def create_refresh_token(self, payload: dict) -> str:
return self._create_token(payload, "refresh", 60 * 10)
def create_tokens(self, payload: dict) -> dict:
access = self.create_access_token(payload)
refresh = self.create_refresh_token(payload)
return {"access": access, "refresh": refresh}
class MockEmailClient:
def __init__(self, *args):
pass
async def send_confirmation_email(self, *args):
"""
Send a confirmation email.
"""
async def send_forgot_password_email(self, *args):
"""
Send a forgot password email.
"""
def mock_verify_password(password: str, db_password: str) -> bool:
return password == <PASSWORD>
def mock_admin_required():
pass
|
74006
|
import logging
from mpfmc.tests.MpfMcTestCase import MpfMcTestCase
from unittest.mock import MagicMock, ANY
try:
from mpfmc.core.audio import SoundSystem
from mpfmc.assets.sound import SoundStealingMethod
except ImportError:
SoundSystem = None
SoundStealingMethod = None
logging.warning("mpfmc.core.audio library could not be loaded. Audio "
"features will not be available")
class TestAudioSoundLoop(MpfMcTestCase):
"""Test audio sound loops."""
def get_machine_path(self):
return 'tests/machine_files/audio'
def get_config_file(self):
return 'test_audio_sound_loop.yaml'
def test_sound_loop_track(self):
""" Tests the Sound Loop track type and its associated assets"""
if SoundSystem is None or self.mc.sound_system is None:
log = logging.getLogger('TestAudioSoundLoop')
log.warning("Sound system is not enabled - skipping audio tests")
self.skipTest("Sound system is not enabled")
self.assertIsNotNone(self.mc.sound_system)
interface = self.mc.sound_system.audio_interface
if interface is None:
log = logging.getLogger('TestAudioSoundLoop')
log.warning("Sound system audio interface could not be loaded - skipping audio tests")
self.skipTest("Sound system audio interface could not be loaded")
self.assertIsNotNone(interface)
# Check sound loop track
track_loops = interface.get_track_by_name("loops")
self.assertIsNotNone(track_loops)
self.assertEqual(track_loops.name, "loops")
self.assertAlmostEqual(track_loops.volume, 0.6, 1)
# /sounds/loops
self.assertTrue(hasattr(self.mc, 'sounds'))
self.assertIn('hihat', self.mc.sounds)
self.assertIn('kick', self.mc.sounds)
self.assertIn('kick2', self.mc.sounds)
# Sound loop sets
self.assertTrue(hasattr(self.mc, 'sound_loop_sets'))
self.assertIn('hi_hat', self.mc.sound_loop_sets)
self.assertIn('basic_beat', self.mc.sound_loop_sets)
self.assertIn('basic_beat2', self.mc.sound_loop_sets)
# Mock BCP send method
self.mc.bcp_processor.send = MagicMock()
self.mc.bcp_processor.enabled = True
# Test sound_loop_player
self.advance_time()
status = track_loops.get_status()
self.assertEqual(0, len(status))
self.mc.events.post('play_hi_hat')
self.advance_real_time(1)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['sound_id'])
self.assertEqual(130.0, status[0]['tempo'])
# Ensure sound_loop_set.events_when_played is working properly (send event when a sound_loop_set is played)
self.mc.bcp_processor.send.assert_any_call('trigger', name='hi_hat_played')
self.advance_real_time(2)
self.mc.bcp_processor.send.assert_any_call('trigger', name='hi_hat_looping')
self.mc.bcp_processor.send.reset_mock()
self.mc.events.post('play_basic_beat')
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual(2, len(status))
self.assertEqual('delayed', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['kick'].id, status[0]['sound_id'])
self.assertEqual('playing', status[1]['status'])
self.assertEqual(325660, status[1]['length'])
self.assertEqual(self.mc.sounds['hihat'].id, status[1]['sound_id'])
self.assertEqual(status[0]['start_delay_samples_remaining'], status[1]['stop_loop_samples_remaining'])
self.advance_real_time(2)
self.mc.bcp_processor.send.assert_any_call('trigger', name='hi_hat_stopped')
self.mc.bcp_processor.send.assert_any_call('trigger', name='basic_beat_played')
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['kick'].id, status[0]['sound_id'])
self.advance_real_time(2)
self.mc.bcp_processor.send.assert_any_call('trigger', name='basic_beat_looping')
self.mc.bcp_processor.send.reset_mock()
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['kick'].id, status[0]['sound_id'])
self.mc.events.post('play_hi_hat')
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual(2, len(status))
self.assertEqual('delayed', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['sound_id'])
self.assertEqual('playing', status[1]['status'])
self.assertEqual(325660, status[1]['length'])
self.assertEqual(self.mc.sounds['kick'].id, status[1]['sound_id'])
self.assertEqual(status[0]['start_delay_samples_remaining'], status[1]['stop_loop_samples_remaining'])
self.advance_real_time(2)
self.mc.bcp_processor.send.assert_any_call('trigger', name='basic_beat_stopped')
self.mc.bcp_processor.send.assert_any_call('trigger', name='hi_hat_played')
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['sound_id'])
self.advance_real_time(2)
self.mc.bcp_processor.send.assert_any_call('trigger', name='hi_hat_looping')
self.mc.bcp_processor.send.reset_mock()
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['sound_id'])
self.mc.events.post('play_basic_beat2')
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual(2, len(status))
self.assertEqual('delayed', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['kick2'].id, status[0]['sound_id'])
self.assertEqual('playing', status[1]['status'])
self.assertEqual(325660, status[1]['length'])
self.assertEqual(self.mc.sounds['hihat'].id, status[1]['sound_id'])
self.assertEqual(status[0]['start_delay_samples_remaining'], status[1]['stop_loop_samples_remaining'])
self.advance_real_time(2)
self.mc.bcp_processor.send.assert_any_call('trigger', name='hi_hat_stopped')
self.mc.bcp_processor.send.assert_any_call('trigger', name='basic_beat2_played')
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['kick2'].id, status[0]['sound_id'])
self.advance_real_time(2)
self.mc.bcp_processor.send.assert_any_call('trigger', name='basic_beat2_looping')
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['kick2'].id, status[0]['sound_id'])
def test_sound_loop_track_layers(self):
""" Tests the Sound Loop track layers"""
if SoundSystem is None or self.mc.sound_system is None:
log = logging.getLogger('TestAudioSoundLoop')
log.warning("Sound system is not enabled - skipping audio tests")
self.skipTest("Sound system is not enabled")
self.assertIsNotNone(self.mc.sound_system)
interface = self.mc.sound_system.audio_interface
if interface is None:
log = logging.getLogger('TestAudioSoundLoop')
log.warning("Sound system audio interface could not be loaded - skipping audio tests")
self.skipTest("Sound system audio interface could not be loaded")
self.assertIsNotNone(interface)
# Check sound loop track
track_loops = interface.get_track_by_name("loops")
self.assertIsNotNone(track_loops)
self.assertEqual(track_loops.name, "loops")
self.assertAlmostEqual(track_loops.volume, 0.6, 1)
# /sounds/loops
self.assertTrue(hasattr(self.mc, 'sounds'))
self.assertIn('kick', self.mc.sounds)
self.assertIn('kick2', self.mc.sounds)
self.assertIn('hihat', self.mc.sounds)
self.assertIn('snare', self.mc.sounds)
self.assertIn('clap', self.mc.sounds)
self.assertIn('bass_synth', self.mc.sounds)
self.assertEqual(1, self.mc.sounds["kick"].marker_count)
self.assertEqual(2, self.mc.sounds["hihat"].marker_count)
# Sound loop sets
self.assertTrue(hasattr(self.mc, 'sound_loop_sets'))
self.assertIn('basic_beat_layers', self.mc.sound_loop_sets)
self.assertIn('basic_beat_layers2', self.mc.sound_loop_sets)
# Mock BCP send method
self.mc.bcp_processor.send = MagicMock()
self.mc.bcp_processor.enabled = True
# Test sound_loop_player
self.advance_time()
self.mc.events.post('play_sound_synthping')
self.mc.events.post('play_basic_beat_layers')
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(325660, status[0]['length'])
self.assertEqual(self.mc.sounds['kick'].id, status[0]['sound_id'])
self.assertEqual(130.0, status[0]['tempo'])
self.assertEqual(3, len(status[0]['layers']))
self.assertEqual('stopped', status[0]['layers'][0]['status'])
self.assertEqual('stopped', status[0]['layers'][1]['status'])
self.assertEqual('stopped', status[0]['layers'][2]['status'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['layers'][0]['sound_id'])
self.assertEqual(self.mc.sounds['snare'].id, status[0]['layers'][1]['sound_id'])
self.assertEqual(self.mc.sounds['clap'].id, status[0]['layers'][2]['sound_id'])
self.advance_real_time(0.9)
# Ensure sound_loop_set.events_when_played is working properly (send event when a sound_loop_set is played)
self.mc.bcp_processor.send.assert_any_call('trigger', name='basic_beat_layers_played')
self.mc.events.post('add_hi_hats')
self.advance_time()
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(self.mc.sounds['kick'].id, status[0]['sound_id'])
self.assertEqual(3, len(status[0]['layers']))
self.assertEqual('queued', status[0]['layers'][0]['status'])
self.assertEqual('stopped', status[0]['layers'][1]['status'])
self.assertEqual('stopped', status[0]['layers'][2]['status'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['layers'][0]['sound_id'])
self.assertEqual(self.mc.sounds['snare'].id, status[0]['layers'][1]['sound_id'])
self.assertEqual(self.mc.sounds['clap'].id, status[0]['layers'][2]['sound_id'])
self.advance_real_time(3)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(self.mc.sounds['kick'].id, status[0]['sound_id'])
self.assertEqual(3, len(status[0]['layers']))
self.assertEqual('playing', status[0]['layers'][0]['status'])
self.assertEqual('stopped', status[0]['layers'][1]['status'])
self.assertEqual('stopped', status[0]['layers'][2]['status'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['layers'][0]['sound_id'])
self.assertEqual(self.mc.sounds['snare'].id, status[0]['layers'][1]['sound_id'])
self.assertEqual(self.mc.sounds['clap'].id, status[0]['layers'][2]['sound_id'])
self.mc.events.post('add_snare')
self.mc.events.post('add_claps')
self.advance_real_time(2)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(self.mc.sounds['kick'].id, status[0]['sound_id'])
self.assertEqual(3, len(status[0]['layers']))
self.assertEqual('playing', status[0]['layers'][0]['status'])
self.assertEqual('playing', status[0]['layers'][1]['status'])
self.assertEqual('playing', status[0]['layers'][2]['status'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['layers'][0]['sound_id'])
self.assertEqual(self.mc.sounds['snare'].id, status[0]['layers'][1]['sound_id'])
self.assertEqual(self.mc.sounds['clap'].id, status[0]['layers'][2]['sound_id'])
# Ensure sound_loop_set.events_when_looping is working properly (send event when a sound_loop_set loops)
self.mc.bcp_processor.send.assert_any_call('trigger', name='basic_beat_layers_looping')
# Ensure sound marker events are working properly for underlying sounds
self.mc.bcp_processor.send.assert_any_call('trigger', name='kick_marker_1', sound_instance=ANY, marker_id=0)
self.mc.bcp_processor.send.assert_any_call('trigger', name='hihat_marker_1', sound_instance=ANY, marker_id=0)
self.mc.bcp_processor.send.assert_any_call('trigger', name='hihat_marker_2', sound_instance=ANY, marker_id=1)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(self.mc.sounds["kick"].id, status[0]['sound_id'])
self.assertEqual(3, len(status[0]['layers']))
self.assertEqual('playing', status[0]['layers'][0]['status'])
self.assertEqual('playing', status[0]['layers'][1]['status'])
self.assertEqual('playing', status[0]['layers'][2]['status'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['layers'][0]['sound_id'])
self.assertEqual(self.mc.sounds['snare'].id, status[0]['layers'][1]['sound_id'])
self.assertEqual(self.mc.sounds['clap'].id, status[0]['layers'][2]['sound_id'])
self.advance_real_time(2)
self.mc.events.post('play_basic_beat_layers2')
self.advance_time()
status = track_loops.get_status()
self.assertEqual(2, len(status))
self.assertEqual('delayed', status[0]['status'])
self.assertEqual(self.mc.sounds["kick2"].id, status[0]['sound_id'])
self.assertEqual('playing', status[1]['status'])
self.assertEqual(self.mc.sounds["kick"].id, status[1]['sound_id'])
self.assertEqual(status[0]['start_delay_samples_remaining'], status[1]['stop_loop_samples_remaining'])
self.advance_real_time(4)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(self.mc.sounds["kick2"].id, status[0]['sound_id'])
self.assertEqual(4, len(status[0]['layers']))
self.assertEqual('playing', status[0]['layers'][0]['status'])
self.assertEqual('playing', status[0]['layers'][1]['status'])
self.assertEqual('stopped', status[0]['layers'][2]['status'])
self.assertEqual('playing', status[0]['layers'][3]['status'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['layers'][0]['sound_id'])
self.assertEqual(self.mc.sounds['snare'].id, status[0]['layers'][1]['sound_id'])
self.assertEqual(self.mc.sounds['clap'].id, status[0]['layers'][2]['sound_id'])
self.assertEqual(self.mc.sounds['bass_synth'].id, status[0]['layers'][3]['sound_id'])
# Ensure sound_loop_set.events_when_stopped is working properly (send event when a sound_loop_set stops)
self.mc.bcp_processor.send.assert_any_call('trigger', name='basic_beat_layers_stopped')
self.mc.bcp_processor.send.assert_any_call('trigger', name='sound_loop_set_stopped')
self.mc.events.post('fade_out_bass_synth')
self.advance_time()
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(self.mc.sounds["kick2"].id, status[0]['sound_id'])
self.assertEqual(4, len(status[0]['layers']))
self.assertEqual('playing', status[0]['layers'][0]['status'])
self.assertEqual('playing', status[0]['layers'][1]['status'])
self.assertEqual('stopped', status[0]['layers'][2]['status'])
self.assertEqual('fading out', status[0]['layers'][3]['status'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['layers'][0]['sound_id'])
self.assertEqual(self.mc.sounds['snare'].id, status[0]['layers'][1]['sound_id'])
self.assertEqual(self.mc.sounds['clap'].id, status[0]['layers'][2]['sound_id'])
self.assertEqual(self.mc.sounds['bass_synth'].id, status[0]['layers'][3]['sound_id'])
self.assertGreater(status[0]['layers'][3]['fade_out_steps'], 0)
self.assertGreater(status[0]['layers'][3]['fade_steps_remaining'], 0)
self.advance_real_time(4)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(self.mc.sounds["kick2"].id, status[0]['sound_id'])
self.assertEqual(4, len(status[0]['layers']))
self.assertEqual('playing', status[0]['layers'][0]['status'])
self.assertEqual('playing', status[0]['layers'][1]['status'])
self.assertEqual('stopped', status[0]['layers'][2]['status'])
self.assertEqual('stopped', status[0]['layers'][3]['status'])
self.assertEqual(self.mc.sounds['hihat'].id, status[0]['layers'][0]['sound_id'])
self.assertEqual(self.mc.sounds['snare'].id, status[0]['layers'][1]['sound_id'])
self.assertEqual(self.mc.sounds['clap'].id, status[0]['layers'][2]['sound_id'])
self.assertEqual(self.mc.sounds['bass_synth'].id, status[0]['layers'][3]['sound_id'])
self.assertGreater(status[0]['layers'][3]['fade_out_steps'], 0)
self.assertEqual(0, status[0]['layers'][3]['fade_steps_remaining'])
self.mc.events.post('reset_current_loop')
self.advance_real_time(0.1)
self.mc.events.post('reset_current_loop')
self.advance_real_time(0.1)
self.mc.events.post('reset_current_loop')
self.advance_real_time(0.1)
self.mc.events.post('reset_current_loop')
self.advance_real_time(0.2)
self.mc.events.post('reset_current_loop')
self.advance_real_time(0.1)
self.mc.events.post('play_basic_beat_layers')
self.mc.events.post('stop_current_loop')
self.mc.events.post('play_sound_synthping')
self.advance_real_time(2)
# Make sure next pending sound_loop_set is cancelled with stop action
status = track_loops.get_status()
self.assertEqual(0, len(status))
def test_sound_loop_fades(self):
""" Tests Sound Loop fading"""
if SoundSystem is None or self.mc.sound_system is None:
log = logging.getLogger('TestAudioSoundLoop')
log.warning("Sound system is not enabled - skipping audio tests")
self.skipTest("Sound system is not enabled")
self.assertIsNotNone(self.mc.sound_system)
interface = self.mc.sound_system.audio_interface
if interface is None:
log = logging.getLogger('TestAudioSoundLoop')
log.warning("Sound system audio interface could not be loaded - skipping audio tests")
self.skipTest("Sound system audio interface could not be loaded")
self.assertIsNotNone(interface)
# Check sound loop track
track_loops = interface.get_track_by_name("loops")
self.assertIsNotNone(track_loops)
self.assertEqual(track_loops.name, "loops")
self.assertAlmostEqual(track_loops.volume, 0.6, 1)
# /sounds/loops
self.assertTrue(hasattr(self.mc, 'sounds'))
self.assertIn('hihat', self.mc.sounds)
self.assertIn('kick', self.mc.sounds)
self.assertIn('kick2', self.mc.sounds)
# Sound loop sets
self.assertTrue(hasattr(self.mc, 'sound_loop_sets'))
self.assertIn('hi_hat', self.mc.sound_loop_sets)
self.assertIn('basic_beat', self.mc.sound_loop_sets)
self.assertIn('basic_beat2', self.mc.sound_loop_sets)
# Play hi-hat loop and check status
track_loops.play_sound_loop_set(self.mc.sound_loop_sets['hi_hat'])
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(status[0]['sound_id'], self.mc.sounds['hihat'].id)
# Now play kick loop and recheck status (both loops should be cross-fading and in sync)
track_loops.play_sound_loop_set(self.mc.sound_loop_sets['basic_beat'], None,
{'fade_in': 1.0, 'timing': 'now', 'synchronize': True})
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual('fading out', status[1]['status'])
self.assertEqual('fading in', status[0]['status'])
self.assertGreater(status[1]['fade_out_steps'], 0)
self.assertGreater(status[0]['fade_in_steps'], 1)
self.assertEqual(status[1]['sample_pos'], status[0]['sample_pos'])
self.assertEqual(status[1]['sound_id'], self.mc.sounds['hihat'].id)
self.assertEqual(status[0]['sound_id'], self.mc.sounds['kick'].id)
# Recheck status (hi-hat loop should be finished and kick loop should be playing)
self.advance_real_time(1.1)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(status[0]['sound_id'], self.mc.sounds['kick'].id)
# Now play hi hat loop and recheck status (both loops should be cross-fading and in sync)
track_loops.play_sound_loop_set(self.mc.sound_loop_sets['hi_hat'], None,
{'fade_in': 2.0, 'timing': 'now', 'synchronize': True})
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual(2, len(status))
self.assertEqual('fading out', status[1]['status'])
self.assertEqual('fading in', status[0]['status'])
self.assertGreater(status[1]['fade_out_steps'], 0)
self.assertGreater(status[0]['fade_in_steps'], 0)
self.assertEqual(status[1]['sample_pos'], status[0]['sample_pos'])
self.assertEqual(status[1]['sound_id'], self.mc.sounds['kick'].id)
self.assertEqual(status[0]['sound_id'], self.mc.sounds['hihat'].id)
self.advance_real_time(0.3)
# Now play kick 2 and recheck status (new loop should be fading in and other two loops fading out)
track_loops.play_sound_loop_set(self.mc.sound_loop_sets['basic_beat2'], None,
{'fade_in': 0.8, 'timing': 'now', 'synchronize': False})
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual(3, len(status))
self.assertEqual('fading in', status[0]['status'])
self.assertEqual('fading out', status[1]['status'])
self.assertEqual('fading out', status[2]['status'])
self.assertGreater(status[0]['fade_in_steps'], 0)
self.assertGreater(status[1]['fade_out_steps'], 0)
self.assertGreater(status[1]['fade_out_steps'], 0)
# don't know why this is off by one on Linux
self.assertIn(status[0]['fade_steps_remaining'],
[status[1]['fade_steps_remaining'], status[1]['fade_steps_remaining'] + 1])
self.assertIn(status[0]['fade_steps_remaining'],
[status[2]['fade_steps_remaining'], status[2]['fade_steps_remaining'] + 1])
self.assertEqual(status[1]['sample_pos'], status[2]['sample_pos'])
self.assertEqual(status[0]['sound_id'], self.mc.sounds['kick2'].id)
self.assertEqual(status[1]['sound_id'], self.mc.sounds['hihat'].id)
self.assertEqual(status[2]['sound_id'], self.mc.sounds['kick'].id)
def test_sound_loop_timing_settings(self):
""" Tests Sound Loop fading"""
if SoundSystem is None or self.mc.sound_system is None:
log = logging.getLogger('TestAudioSoundLoop')
log.warning("Sound system is not enabled - skipping audio tests")
self.skipTest("Sound system is not enabled")
self.assertIsNotNone(self.mc.sound_system)
interface = self.mc.sound_system.audio_interface
if interface is None:
log = logging.getLogger('TestAudioSoundLoop')
log.warning("Sound system audio interface could not be loaded - skipping audio tests")
self.skipTest("Sound system audio interface could not be loaded")
self.assertIsNotNone(interface)
# Check sound loop track
track_loops = interface.get_track_by_name("loops")
self.assertIsNotNone(track_loops)
self.assertEqual(track_loops.name, "loops")
self.assertAlmostEqual(track_loops.volume, 0.6, 1)
# /sounds/loops
self.assertTrue(hasattr(self.mc, 'sounds'))
self.assertIn('hihat', self.mc.sounds)
self.assertIn('kick', self.mc.sounds)
self.assertIn('kick2', self.mc.sounds)
# Sound loop sets
self.assertTrue(hasattr(self.mc, 'sound_loop_sets'))
self.assertIn('hi_hat', self.mc.sound_loop_sets)
self.assertIn('basic_beat', self.mc.sound_loop_sets)
self.assertIn('basic_beat2', self.mc.sound_loop_sets)
# Play hi-hat loop and check status
track_loops.play_sound_loop_set(self.mc.sound_loop_sets['hi_hat'])
self.advance_real_time(0.1)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(status[0]['sound_id'], self.mc.sounds['hihat'].id)
self.advance_real_time(0.5)
# Now play kick loop and recheck status (loops should perform a quick cross-fade and switch)
track_loops.play_sound_loop_set(self.mc.sound_loop_sets['basic_beat'], None, {'timing': 'now'})
self.advance_real_time(0.2)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(status[0]['sound_id'], self.mc.sounds['kick'].id)
self.advance_real_time(0.3)
# Now play second kick loop and recheck status (loops should perform a quick cross-fade and switch)
track_loops.play_sound_loop_set(self.mc.sound_loop_sets['basic_beat2'], None, {'timing': 'now'})
self.advance_real_time(0.2)
status = track_loops.get_status()
self.assertEqual(1, len(status))
self.assertEqual('playing', status[0]['status'])
self.assertEqual(status[0]['sound_id'], self.mc.sounds['kick2'].id)
self.advance_real_time(0.3)
|
74020
|
import time
import dweepy
import RPi.GPIO as GPIO
KEY = 'tweet_about_me'
OUTPUT_PIN = 18
OUTPUT_DURATION = 10
GPIO.setmode(GPIO.BCM)
GPIO.setup(OUTPUT_PIN, GPIO.OUT)
while True:
try:
for dweet in dweepy.listen_for_dweets_from(KEY):
print('Tweet: ' + dweet['content']['text'])
GPIO.output(OUTPUT_PIN, True)
time.sleep(OUTPUT_DURATION)
GPIO.output(OUTPUT_PIN, False)
except Exception:
pass
|
74046
|
import cv2, time
#TODO: fix ipcam
#import urllib2, base64
import numpy as np
class ipCamera(object):
def __init__(self,url, user = None, password = None):
self.url = url
auth_encoded = base64.encodestring('%s:%s' % (user, password))[:-1]
self.req = urllib2.Request(self.url)
self.req.add_header('Authorization', 'Basic %s' % auth_encoded)
def get_frame(self):
response = urllib2.urlopen(self.req)
img_array = np.asarray(bytearray(response.read()), dtype=np.uint8)
frame = cv2.imdecode(img_array, 1)
return frame
class Camera(object):
def __init__(self, camera = 0):
self.cam = cv2.VideoCapture(camera)
self.valid = False
try:
resp = self.cam.read()
self.shape = resp[1].shape
self.valid = True
except:
self.shape = None
def get_frame(self):
if self.valid:
_,frame = self.cam.read()
else:
frame = np.ones((480,640,3), dtype=np.uint8)
col = (0,256,256)
cv2.putText(frame, "(Error: Camera not accessible)",
(65,220), cv2.FONT_HERSHEY_PLAIN, 2, col)
return frame
def release(self):
self.cam.release()
|
74067
|
import unittest
from rcwa import Source, Layer, Plotter, Crystal, Solver, LayerStack
from rcwa.shorthand import *
from rcwa.testing import *
from rcwa.matrices import *
from rcwa import numpyArrayFromFile, testLocation, numpyArrayFromSeparatedColumnsFile
import numpy as np
class TestSolver(unittest.TestCase):
def testSetupSource(self):
kIncidentActual = complexArray([1.0607, 0.61237, 0.70711])
kIncidentCalculated = self.solver.source.kIncident
assertAlmostEqual(kIncidentActual, kIncidentCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: kIncident")
def testSetupKMatrices(self):
KxActual = self.Kx
KxCalculated = self.solver.Kx
assertAlmostEqual(KxActual, KxCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: Kx")
KyActual = self.Ky
KyCalculated = self.solver.Ky
assertAlmostEqual(KyActual, KyCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: Ky")
KzActual = self.KzReflectionRegion
KzCalculated = self.solver.KzReflectionRegion
assertAlmostEqual(KzActual, KzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: KzReflectionRegion")
KzActual = self.KzTransmissionRegion
KzCalculated = self.solver.KzTransmissionRegion
assertAlmostEqual(KzActual, KzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: KzTransmissionRegion")
KzActual = self.KzGapRegion
KzCalculated = self.solver.KzGapRegion
assertAlmostEqual(KzActual, KzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: KzGapRegion")
def testEdgeSMatrices(self):
self.solver.Solve()
SActual = self.SReflectionRegion
SCalculated = self.solver.SReflection
assertAlmostEqual(SActual, SCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: SReflection")
self.solver.Solve()
SActual = self.STransmissionRegion
SCalculated = self.solver.STransmission
assertAlmostEqual(SActual, SCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: STransmission")
def testInternalSMatrices(self):
self.solver.Solve()
SActual = self.SLayer1
SCalculated = self.solver.Si[0]
assertAlmostEqual(SActual, SCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: Si[0]")
self.solver.Solve()
SActual = self.SLayer2
SCalculated = self.solver.Si[1]
assertAlmostEqual(SActual, SCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: Si[1]")
def testrtAmplitudeCoefficients(self):
self.solver.Solve()
# HACK - FOR SOME REASON MY PHASE IS OFF BY PI.
rxActual = -self.rx
ryActual = -self.ry
rzActual = -self.rz
(rxCalculated, ryCalculated, rzCalculated) = (self.solver.rx, self.solver.ry, self.solver.rz)
(txCalculated, tyCalculated, tzCalculated) = (self.solver.tx, self.solver.ty, self.solver.tz)
assertAlmostEqual(rxActual, rxCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: rx")
assertAlmostEqual(ryActual, ryCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: ry")
assertAlmostEqual(rzActual, rzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: rz")
txActual = -self.tx
tyActual = -self.ty
tzActual = -self.tz
(rxCalculated, ryCalculated, rzCalculated) = (self.solver.rx, self.solver.ry, self.solver.rz)
(txCalculated, tyCalculated, tzCalculated) = (self.solver.tx, self.solver.ty, self.solver.tz)
(R, T) = (self.solver.R, self.solver.T)
assertAlmostEqual(txActual, txCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: tx")
assertAlmostEqual(tyActual, tyCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: ty")
assertAlmostEqual(tzActual, tzCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: tz")
def testDiffractionEfficiencies(self):
self.solver.Solve()
RActual = self.R
TActual = self.T
(RCalculated, TCalculated) = (self.solver.R, self.solver.T)
assertAlmostEqual(RActual, RCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: R")
assertAlmostEqual(TActual, TCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: T")
RTotActual = self.RTot
TTotActual = self.TTot
CTotActual = 1.0
RTotCalculated = self.solver.RTot
TTotCalculated = self.solver.TTot
CTotCalculated = self.solver.conservation
assertAlmostEqual(RTotActual, RTotCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: RTot")
assertAlmostEqual(TTotActual, TTotCalculated,
self.absoluteTolerance, self.relativeTolerance, "testSolver: TTot")
assertAlmostEqual(CTotActual, CTotCalculated, 1e-7, 1e-7, "testSolver: Conservation Violated")
def testIntegrationMultiWavelength(self):
testWavelengths = self.solver.source.wavelength*np.arange(0.2,2,0.01)
self.solver.Solve(testWavelengths)
#Plotter.plotReflectionSpectra(self.solver.results)
def setUp(self):
self.absoluteTolerance = 1e-4
self.relativeTolerance = 1e-3
devicePermittivityCellData = np.transpose(np.loadtxt(testLocation + '/triangleData.csv', delimiter=','))
devicePermeabilityCellData = 1 + 0 * devicePermittivityCellData
reflectionLayer = Layer(er=2.0, ur=1.0)
transmissionLayer = Layer(er=9.0, ur=1.0)
# NOTE: t1 AND t2 MUST BE NORMALIZED BY MULTIPLYING BY k0, OTHERWISE THIS WILL NOT WORK, AS
# EVERYTHING WAS FORMULATED IN TERMS OF NORMALIZED WAVEVECTORS. I DON'T KNOW OF AN ELEGANT WAY
# TO DO THIS OTHER THAN REQUIRING A CRYSTAL TO HAVE A SOURCE AS THE INPUT. I DON'T KNOW OF
# AN EASY WAY TO FIX THIS. I'M GOING TO FUDGE IT FOR NOW.
wavelength = 2
k0 = 2*pi/wavelength
theta = 60 * deg
phi = 30*deg
pTEM = 1/sqrt(2)*complexArray([1,1j])
source = Source(wavelength=wavelength, theta=theta, phi=phi, pTEM=pTEM, layer=reflectionLayer)
t1, t2 = complexArray([1.75, 0, 0]), complexArray([0, 1.5, 0])
thicknessLayer1 = 0.5 # should be 0.5
thicknessLayer2 = 0.3 # should be 0.3
numberHarmonics = (3, 3)
deviceCrystal = Crystal(devicePermittivityCellData, devicePermeabilityCellData, t1, t2)
layer1 = Layer(crystal=deviceCrystal, L=thicknessLayer1, numberHarmonics=numberHarmonics)
layer2 = Layer(er=6.0, ur=1.0, L=thicknessLayer2)
layerStack = LayerStack(reflectionLayer, layer1, layer2, transmissionLayer)
self.solver = Solver(layerStack, source, numberHarmonics)
@classmethod
def setUpClass(self):
"""
Test fixture for loading in all the external test data.
"""
self.Kx = np.diag(complexArray(
[2.2035, 1.0607, -0.0822, 2.2035, 1.0607, -0.0822, 2.2035, 1.0607, -0.0822]))
self.Ky = np.diag(complexArray(
[1.9457, 1.9457, 1.9457, 0.6124, 0.6124, 0.6124, -0.7210, -0.7210, -0.7210]))
self.KzReflectionRegion = numpyArrayFromFile(
testLocation + "/matrixDataOblique/reflectionRegion/KzReflectionRegion.txt")
self.KzTransmissionRegion = np.diag(complexArray(
[0.5989, 2.0222, 2.2820, 1.9415, 2.7386, 2.9357, 1.9039, 2.7121, 2.9109]))
self.KzGapRegion = numpyArrayFromFile(
testLocation + "/matrixDataOblique/freeSpace/KzFreeSpace.txt")
self.SGlobal11= numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/SGlobal11.txt")
self.SGlobal12= numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/SGlobal12.txt")
self.SGlobal21= numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/SGlobal21.txt")
self.SGlobal22= numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/SGlobal22.txt")
self.SGlobal = complexArray([
[self.SGlobal11, self.SGlobal12],
[self.SGlobal21, self.SGlobal22]])
self.S11ReflectionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/reflectionRegion/S11ReflectionRegion.txt")
self.S12ReflectionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/reflectionRegion/S12ReflectionRegion.txt")
self.S21ReflectionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/reflectionRegion/S21ReflectionRegion.txt")
self.S22ReflectionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/reflectionRegion/S22ReflectionRegion.txt")
self.SReflectionRegion = complexArray([
[self.S11ReflectionRegion, self.S12ReflectionRegion],
[self.S21ReflectionRegion, self.S22ReflectionRegion]])
self.S11TransmissionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/transmissionRegion/S11TransmissionRegion.txt")
self.S12TransmissionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/transmissionRegion/S12TransmissionRegion.txt")
self.S21TransmissionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/transmissionRegion/S21TransmissionRegion.txt")
self.S22TransmissionRegion = numpyArrayFromSeparatedColumnsFile(
testLocation + "/matrixDataOblique/transmissionRegion/S22TransmissionRegion.txt")
self.STransmissionRegion = complexArray([
[self.S11TransmissionRegion, self.S12TransmissionRegion],
[self.S21TransmissionRegion, self.S22TransmissionRegion]])
self.S11Layer1 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer1/S11Layer1.txt")
self.S12Layer1 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer1/S12Layer1.txt")
self.S21Layer1 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer1/S21Layer1.txt")
self.S22Layer1 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer1/S22Layer1.txt")
self.SLayer1 = complexArray([
[self.S11Layer1, self.S12Layer1],
[self.S21Layer1, self.S22Layer1]])
self.S11Layer2 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer2/S11Layer2.txt")
self.S12Layer2 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer2/S12Layer2.txt")
self.S21Layer2 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer2/S21Layer2.txt")
self.S22Layer2 = numpyArrayFromSeparatedColumnsFile(testLocation + "/matrixDataOblique/layer2/S22Layer2.txt")
self.SLayer2 = complexArray([
[self.S11Layer2, self.S12Layer2],
[self.S21Layer2, self.S22Layer2]])
self.DLayer12= np.loadtxt(testLocation + '/matrixDataOblique/layer2/D12.csv', dtype=np.cdouble)
self.FLayer12= np.loadtxt(testLocation + '/matrixDataOblique/layer2/F12.csv', dtype=np.cdouble)
self.rx = complexArray([-0.0187- 0.0155j, 0.0486 - 0.0467j, 0.0016 + 0.0012j,
0.0324 - 0.0229j, -0.1606 - 0.0348j, -0.0089 + 0.0156j,
0.0020 + 0.0105j, 0.0076 + 0.0187j, -0.0027 - 0.0129j])
self.ry = complexArray([-0.0077 - 0.0106j, 0.0184 + 0.0323j, -0.0267 - 0.0070j,
-0.0286 + 0.0472j, 0.2335 + 0.0138j, 0.0243 + 0.0164j,
0.0435 - 0.018j, 0.0183 + 0.0146j, -0.0062 + 0.0011j])
self.rz = complexArray([0.0213 - 0.0218j, -0.0078 + 0.0512j, 0.0103 - 0.0388j,
0.0120 + 0.0300j, -0.0386 - 0.0403j, 0.0123 + 0.0069j,
-0.0197 - 0.0147j, -0.0087 + 0.0157j, 0.0039 + 0.0002j])
self.tx = complexArray([0.0015 - 0.0016j, -0.0583 + 0.0256j, -0.0245 - 0.0098j,
0.0060 + 0.0210j, 0.3040 + 0.0664j, -0.0054 - 0.0632j,
-0.0123 - 0.0262j, -0.0323 - 0.0534j, 0.0169 + 0.0455j])
self.ty = complexArray([-0.0024 + 0.0011j, 0.0356 + 0.0282j, -0.0230 - 0.0071j,
0.0610 - 0.0011j, 0.0523 - 0.2913j, -0.0645 - 0.0027j,
-0.0170 - 0.0165j, -0.0420 + 0.0298j, 0.0258 - 0.0234j])
self.tz = complexArray([0.0023 + 0.0021j, - 0.0036 - 0.0406j, 0.0187 + 0.0057j,
-0.0261 - 0.0235j, -0.1294 + 0.0394j, 0.0133 - 0.0012j,
0.0078 + 0.0241j, 0.0014 + 0.0288j, 0.0069 - 0.0045j])
self.R = np.array([
[0,0,0],
[0,0.0848, 0.0011],
[0, 0.0025, 0.0004]])
self.T = np.array([
[0, 0.0149, 0.0055],
[0.0222, 0.7851, 0.0283],
[0.0053, 0.0348, 0.0150]])
self.R = np.transpose(self.R)
self.T = np.transpose(self.T)
self.RTot = 0.088768
self.TTot = 0.91123
|
74080
|
defaults = '''
const vec4 light = vec4(4.0, 3.0, 10.0, 0.0);
const vec4 eye = vec4(4.0, 3.0, 2.0, 0.0);
const mat4 mvp = mat4(
-0.8147971034049988, -0.7172931432723999, -0.7429299354553223, -0.7427813410758972,
1.0863960981369019, -0.5379698276519775, -0.5571974515914917, -0.5570859909057617,
0.0, 2.2415409088134766, -0.37146496772766113, -0.3713906705379486,
0.0, 0.0, 5.186222076416016, 5.385164737701416
);
'''
|
74103
|
from setuptools import setup, find_packages
import crisscross.metadata
with open("README.md", "r") as f:
long_description = f.read()
setup(
name='crisscross',
version=crisscross.metadata.version,
author='pnlng',
description='',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/pnlng/crisscross',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Text Processing',
'Topic :: Utilities'
],
packages=find_packages(),
include_package_data=True,
install_requires=[
'click',
'chevron',
'poyo'
],
entry_points='''
[console_scripts]
crisscross=crisscross.cli:cli
''',
python_requires='>=3.5'
)
|
74112
|
from deepproblog.utils import check_path
template = """
[Default]
batch_size = {0}
infoloss = {1}
name = poker_batch_{0}_infoloss_{1}
"""
i = 0
check_path("parameter_cfg/0.cfg")
for batch_size in [10, 25, 50, 100]:
for infoloss in [0, 0.5, 1.0, 2.0, 4.0]:
with open("parameter_cfg/{}.cfg".format(i), "w") as f:
f.write(template.format(batch_size, infoloss))
i += 1
template = """
[Default]
batch_size = 10
infoloss = 0.5
labeled = {0}
name = poker_batch_labeled_{0}_{1}
"""
i = 0
check_path("experiment/0.cfg")
for labeled in [300]:
for rep in range(10):
with open("experiment/{}.cfg".format(i), "w") as f:
f.write(template.format(labeled, rep))
i += 1
|
74135
|
from .quantum_register import QuantumRegister
from .classical_register import ClassicalRegister
import qsy.gates as gates
__version__ = '0.4.4'
|
74149
|
from interact import *
def eva_model():
parser = ArgumentParser()
parser.add_argument('--gpt2', action='store_true', help="use gpt2")
parser.add_argument("--model_checkpoint", type=str, default="./models/", help="Path, url or short name of the model")
parser.add_argument("--max_history", type=int, default=2, help="Number of previous utterances to keep in history")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=30, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9,
help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
if args.model_checkpoint == "":
logging.error("Checkpoint needed!")
return
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
tokenizer_class = BertTokenizer
model_class = OpenAIGPTLMHeadModel if not args.gpt2 else GPT2LMHeadModel
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint, do_lower_case=True)
model = model_class.from_pretrained(args.model_checkpoint)
model.to(args.device)
model.eval()
return model,tokenizer,args
history = []
model,tokenizer,args = eva_model()
def chat_response(raw_text):
global history
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
raw_text = " ".join(list(raw_text.replace(" ", "")))
history.append(tokenize(raw_text))
with torch.no_grad():
out_ids = sample_sequence(history, tokenizer, model, args)
history.append(out_ids)
history = history[-(2 * args.max_history + 1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
#print(out_text)
return out_text
print(0)
|
74173
|
import os
import pytest
import taichi as ti
from taichi import approx
def run_mpm88_test():
dim = 2
N = 64
n_particles = N * N
n_grid = 128
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 2.0e-4
p_vol = (dx * 0.5)**2
p_rho = 1
p_mass = p_vol * p_rho
E = 400
x = ti.Vector.field(dim, dtype=ti.f32, shape=n_particles)
v = ti.Vector.field(dim, dtype=ti.f32, shape=n_particles)
C = ti.Matrix.field(dim, dim, dtype=ti.f32, shape=n_particles)
J = ti.field(dtype=ti.f32, shape=n_particles)
grid_v = ti.Vector.field(dim, dtype=ti.f32, shape=(n_grid, n_grid))
grid_m = ti.field(dtype=ti.f32, shape=(n_grid, n_grid))
@ti.kernel
def substep():
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
stress = -dt * p_vol * (J[p] - 1) * 4 * inv_dx * inv_dx * E
affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
offset = ti.Vector([i, j])
dpos = (offset.cast(float) - fx) * dx
weight = w[i][0] * w[j][1]
grid_v[base + offset].atomic_add(
weight * (p_mass * v[p] + affine @ dpos))
grid_m[base + offset].atomic_add(weight * p_mass)
for i, j in grid_m:
if grid_m[i, j] > 0:
bound = 3
inv_m = 1 / grid_m[i, j]
grid_v[i, j] = inv_m * grid_v[i, j]
grid_v[i, j][1] -= dt * 9.8
if i < bound and grid_v[i, j][0] < 0:
grid_v[i, j][0] = 0
if i > n_grid - bound and grid_v[i, j][0] > 0:
grid_v[i, j][0] = 0
if j < bound and grid_v[i, j][1] < 0:
grid_v[i, j][1] = 0
if j > n_grid - bound and grid_v[i, j][1] > 0:
grid_v[i, j][1] = 0
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [
0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2
]
new_v = ti.Vector.zero(ti.f32, 2)
new_C = ti.Matrix.zero(ti.f32, 2, 2)
for i in ti.static(range(3)):
for j in ti.static(range(3)):
dpos = ti.Vector([i, j]).cast(float) - fx
g_v = grid_v[base + ti.Vector([i, j])]
weight = w[i][0] * w[j][1]
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx
v[p] = new_v
x[p] += dt * v[p]
J[p] *= 1 + dt * new_C.trace()
C[p] = new_C
# gui = ti._lib.core.GUI("MPM88", ti.core_veci(512, 512))
# canvas = gui.get_canvas()
for i in range(n_particles):
x[i] = [i % N / N * 0.4 + 0.2, i / N / N * 0.4 + 0.05]
v[i] = [0, -3]
J[i] = 1
for frame in range(10):
for s in range(50):
grid_v.fill([0, 0])
grid_m.fill(0)
substep()
pos = x.to_numpy()
pos[:, 1] *= 2
regression = [
0.31722742,
0.15826741,
0.10224003,
0.07810827,
]
for i in range(4):
assert (pos**(i + 1)).mean() == approx(regression[i], rel=1e-2)
@ti.test()
def test_mpm88():
run_mpm88_test()
def _is_appveyor():
# AppVeyor adds `APPVEYOR=True` ('true' on Ubuntu)
# https://www.appveyor.com/docs/environment-variables/
return os.getenv('APPVEYOR', '').lower() == 'true'
#TODO: Remove exclude of ti.metal
@pytest.mark.skipif(_is_appveyor(), reason='Stuck on Appveyor.')
@ti.test(require=ti.extension.async_mode, exclude=[ti.metal], async_mode=True)
def test_mpm88_async():
# It seems that all async tests on Appveyor run super slow. For example,
# on Appveyor, 10+ tests have passed during the execution of
# test_fuse_dense_x2y2z. Maybe thread synchronizations are expensive?
run_mpm88_test()
@ti.test(arch=[ti.cpu, ti.cuda, ti.opengl])
def test_mpm88_numpy_and_ndarray():
import numpy as np
dim = 2
N = 64
n_particles = N * N
n_grid = 128
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 2.0e-4
p_vol = (dx * 0.5)**2
p_rho = 1
p_mass = p_vol * p_rho
E = 400
@ti.kernel
def substep(x: ti.any_arr(element_dim=1), v: ti.any_arr(element_dim=1),
C: ti.any_arr(element_dim=2), J: ti.any_arr(),
grid_v: ti.any_arr(element_dim=1), grid_m: ti.any_arr()):
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
stress = -dt * p_vol * (J[p] - 1) * 4 * inv_dx * inv_dx * E
affine = ti.Matrix([[stress, 0], [0, stress]]) + p_mass * C[p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
offset = ti.Vector([i, j])
dpos = (offset.cast(float) - fx) * dx
weight = w[i][0] * w[j][1]
grid_v[base + offset].atomic_add(
weight * (p_mass * v[p] + affine @ dpos))
grid_m[base + offset].atomic_add(weight * p_mass)
for i, j in grid_m:
if grid_m[i, j] > 0:
bound = 3
inv_m = 1 / grid_m[i, j]
grid_v[i, j] = inv_m * grid_v[i, j]
grid_v[i, j][1] -= dt * 9.8
if i < bound and grid_v[i, j][0] < 0:
grid_v[i, j][0] = 0
if i > n_grid - bound and grid_v[i, j][0] > 0:
grid_v[i, j][0] = 0
if j < bound and grid_v[i, j][1] < 0:
grid_v[i, j][1] = 0
if j > n_grid - bound and grid_v[i, j][1] > 0:
grid_v[i, j][1] = 0
for p in x:
base = (x[p] * inv_dx - 0.5).cast(int)
fx = x[p] * inv_dx - base.cast(float)
w = [
0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2
]
new_v = ti.Vector.zero(ti.f32, 2)
new_C = ti.Matrix.zero(ti.f32, 2, 2)
for i in ti.static(range(3)):
for j in ti.static(range(3)):
dpos = ti.Vector([i, j]).cast(float) - fx
g_v = grid_v[base + ti.Vector([i, j])]
weight = w[i][0] * w[j][1]
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx
v[p] = new_v
x[p] += dt * v[p]
J[p] *= 1 + dt * new_C.trace()
C[p] = new_C
def run_test(x, v, C, J, grid_v, grid_m):
for i in range(n_particles):
x[i] = [i % N / N * 0.4 + 0.2, i / N / N * 0.4 + 0.05]
v[i] = [0, -3]
J[i] = 1
for frame in range(10):
for s in range(50):
grid_v.fill(0)
grid_m.fill(0)
substep(x, v, C, J, grid_v, grid_m)
pos = x if isinstance(x, np.ndarray) else x.to_numpy()
pos[:, 1] *= 2
regression = [
0.31722742,
0.15826741,
0.10224003,
0.07810827,
]
for i in range(4):
assert (pos**(i + 1)).mean() == approx(regression[i], rel=1e-2)
def test_numpy():
x = np.zeros((n_particles, dim), dtype=np.float32)
v = np.zeros((n_particles, dim), dtype=np.float32)
C = np.zeros((n_particles, dim, dim), dtype=np.float32)
J = np.zeros(n_particles, dtype=np.float32)
grid_v = np.zeros((n_grid, n_grid, dim), dtype=np.float32)
grid_m = np.zeros((n_grid, n_grid), dtype=np.float32)
run_test(x, v, C, J, grid_v, grid_m)
def test_ndarray():
x = ti.Vector.ndarray(dim, ti.f32, n_particles)
v = ti.Vector.ndarray(dim, ti.f32, n_particles)
C = ti.Matrix.ndarray(dim, dim, ti.f32, n_particles)
J = ti.ndarray(ti.f32, n_particles)
grid_v = ti.Vector.ndarray(dim, ti.f32, (n_grid, n_grid))
grid_m = ti.ndarray(ti.f32, (n_grid, n_grid))
run_test(x, v, C, J, grid_v, grid_m)
test_numpy()
test_ndarray()
|
74198
|
import argparse
import importlib
profiles = {
"core": True,
"legacy": False,
}
applications = {
"pyglet": "application_pyglet",
"pyglfw": "application_pyglfw",
"glut": "application_glut",
}
demos = {
"basic": "scene_basic",
"texturing": "scene_texture",
"scene_graph": "scene_scene_graph",
"orthographic": "scene_orthographic",
"multiple_viewports": "scene_multiple_viewports",
"sorting": "scene_sorting",
}
def parse_arguments():
parser = argparse.ArgumentParser(
description = "PyGLy demo application"
)
parser.add_argument(
"-g", "--opengl_profile",
choices = profiles.keys(),
default = "core",
help = "The OpenGL profile to use. (default: core)",
)
parser.add_argument(
"-p", "--platform",
choices = applications.keys(),
default = "pyglet",
help = "The windowing platform to use. (default: pyglet)",
)
parser.add_argument(
"-d", "--demo",
choices = demos.keys(),
default = "basic",
help = "The demo to run. (default: basic)",
)
return parser.parse_args()
def run_demo( args ):
global profiles, applications, demos
print "Windowing system:\t", args.platform
print "OpenGL profile:\t", args.opengl_profile
print "Scene:\t", args.demo
is_core = profiles[ args.opengl_profile ]
scene_module = __import__( demos[ args.demo ], fromlist=[""] )
scene = scene_module.Scene( is_core )
app_module = __import__( applications[ args.platform ], fromlist=[""] )
application = app_module.Application( scene )
application.run()
if __name__ == "__main__":
args = parse_arguments()
run_demo( args )
|
74200
|
from brownie import *
import json
def main():
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
# configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet" or thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
else:
raise Exception("network not supported")
if thisNetwork == "rsk-mainnet":
configFile = open('./scripts/contractInteraction/mainnet_contracts.json')
elif thisNetwork == "testnet":
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
contracts = json.load(configFile)
timelockOwnerAddress = contracts['timelockOwner']
multiSigKeyHolders= acct.deploy(MultiSigKeyHolders)
multiSigKeyHolders.transferOwnership(timelockOwnerAddress)
|
74249
|
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.lang.builder import Builder
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.boxlayout import BoxLayout
from kivymd.theming import ThemableBehavior
Builder.load_string(
"""
<AKCircularProgress>:
canvas.before:
Color:
rgba: root.background_circle_color if root.background_circle_color else app.theme_cls.primary_light
Line:
circle: ( self.x+ self.width/2, self.y+ self.height/2, self.height/2, root.start_deg, root.end_deg)
width: root.background_line_width
Color:
rgba: root.circle_color if root.circle_color else app.theme_cls.primary_color
Line:
circle: ( self.x+ self.width/2, self.y+ self.height/2, self.height/2, root.start_deg, root._current_deg)
width: root.line_width
MDLabel:
id: _percent_label
halign: 'center'
valign: 'center'
theme_text_color: 'Custom'
text_color: root.percent_color if root.percent_color else app.theme_cls.primary_color
font_size: root.percent_size
"""
)
class AKCircularProgress(ThemableBehavior, BoxLayout):
circle_color = ListProperty()
start_deg = NumericProperty(0)
end_deg = NumericProperty(360)
line_width = NumericProperty("3dp")
percent_color = ListProperty()
percent_size = NumericProperty("20dp")
current_percent = NumericProperty(-1)
anim_speed = NumericProperty(0.3)
anim_transition = StringProperty("out_quad")
max_percent = NumericProperty(100)
percent_type = OptionProperty("percent", options=["percent", "relative"])
background_circle_color = ListProperty()
background_line_width = NumericProperty("1dp")
_current_deg = NumericProperty(-1)
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(lambda x: self._update())
def _update(self):
self.current_percent = 0
def on_current_percent(self, *args):
deg_distance = self.end_deg - self.start_deg
self._each_percent = deg_distance / self.max_percent
_current_deg = args[1] * self._each_percent
percent_anim = Animation(
_current_deg=self.start_deg + _current_deg,
duration=self.anim_speed,
t=self.anim_transition,
)
percent_anim.start(self)
def on__current_deg(self, *args):
if self.percent_type == "percent":
self.ids._percent_label.text = (
str(
int(
(self._current_deg - self.start_deg)
/ self._each_percent
)
)
+ " %"
)
elif self.percent_type == "relative":
self.ids._percent_label.text = (
str(
int(
(self._current_deg - self.start_deg)
/ self._each_percent
)
)
+ "\\"
+ str(self.max_percent)
)
|
74261
|
import FWCore.ParameterSet.Config as cms
siTrackerMultiRecHitUpdator = cms.ESProducer("SiTrackerMultiRecHitUpdatorESProducer",
ComponentName = cms.string('SiTrackerMultiRecHitUpdator'),
TTRHBuilder = cms.string('WithAngleAndTemplate'),
HitPropagator = cms.string('trackingRecHitPropagator'),
#AnnealingProgram = cms.vdouble(80.0, 9.0, 4.0, 1.0, 1.0, 1.0),
AnnealingProgram = cms.vdouble(30.0, 18.0, 14.0, 11.0, 6.0, 4.0, 2.0, 1.0),
ChiSquareCut1D = cms.double(10.8276),
ChiSquareCut2D = cms.double(13.8155),
Debug = cms.bool(False)
)
|
74268
|
import numpy as np
from typing import Callable
from .base_score import BaseScore
class BleiLaffertyScore(BaseScore):
"""
This score implements method described in 2009 paper
Blei, <NAME>., and <NAME>erty. "Topic models." Text Mining.
Chapman and Hall/CRC, 2009. 101-124.
At the core this score helps to discover tokens that are most likely
to describe given topic. Summing up that score helps to estimate how
well the model distinguishes between topics. The higher this score - better
"""
def __init__(
self,
name: str = None,
num_top_tokens: int = 30,
should_compute: Callable[[int], bool] = None):
"""
Parameters
----------
name:
name of the score
num_top_tokens : int
now many tokens we consider to be
"""
super().__init__(name=name, should_compute=should_compute)
self.num_top_tokens = num_top_tokens
def __repr__(self):
return f'{self.__class__.__name__}(num_top_tokens={self.num_top_tokens})'
def _compute_blei_scores(self, phi):
"""
Computes Blei score
phi[wt] * [log(phi[wt]) - 1/T sum_k log(phi[wk])]
Parameters
----------
phi : pd.Dataframe
phi matrix of the model
Returns
-------
score : pd.Dataframe
wheighted phi matrix
""" # noqa: W291
topic_number = phi.shape[1]
blei_eps = 1e-42
log_phi = np.log(phi + blei_eps)
numerator = np.sum(log_phi, axis=1)
numerator = numerator[:, np.newaxis]
if hasattr(log_phi, "values"):
multiplier = log_phi.values - numerator / topic_number
else:
multiplier = log_phi - numerator / topic_number
scores = phi * multiplier
return scores
def call(self, model, **kwargs):
modalities = list(model.class_ids.keys())
score = 0
for modality in modalities:
phi = model.get_phi(class_ids=modality)
modality_scores = np.sort(self._compute_blei_scores(phi).values)
score += np.sum(modality_scores[-self.num_top_tokens:, :])
if modalities is None:
phi = model.get_phi()
modality_scores = np.sort(self._compute_blei_scores(phi).values)
score = np.sum(modality_scores[-self.num_top_tokens:, :])
return score
|
74285
|
from setuptools import setup
with open("README.md") as f:
long_description = f.read()
setup(
name="prefixdate",
version="0.4.0",
description="Formatting utility for international postal addresses",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pudo/prefixdate",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
],
keywords="date, partial date, iso8601, rfc3339",
packages=["prefixdate"],
package_data={"prefixdate": ["py.typed"]},
include_package_data=True,
scripts=[],
install_requires=[],
zip_safe=False,
extras_require={
"dev": [
"pytest",
"pytest-cov",
"mypy",
"bump2version",
"wheel>=0.29.0",
"twine",
],
},
)
|
74289
|
from fabric.api import local, task
@task
def bower(command, args='', option=''):
"""
usage: fab bower:<command>, <args>, <option>
Execute bower commands.
See 'fab bower:help' for more information
"""
local('cd {{ project_name }} && bower {0} {1} {2}'.format(
command,
args,
option
))
@task
def npm(command, args='', option=''):
"""
usage: fab npm:<command>, <args>, <option>
Execute npm commands
See 'fab npm:help' for more information
"""
local('cd {{ project_name }} && npm {0} {1} {2}'.format(
command,
option,
args,
))
@task
def scaffold(skip_install=''):
"""
Setup frontend management for Django project with yo, grunt and bower.
See 'https://github.com/cirlabs/generator-newsapp' for more information.
Skip installing npm modules by running fab scaffold:skip-install
"""
if skip_install != 'skip-install':
npm('install', 'yo', '-g')
npm('install', 'generator-newsapp', '-g')
local('cd {{ project_name }} && yo newsapp')
|
74319
|
from flask import Flask, jsonify, request
from flask.views import MethodView
app = Flask(__name__)
languages = [{'name' : 'JavaScript'}, {'name' : 'Python'}, {'name' : 'Ruby'}]
def get_language(name):
return [language for language in languages if language['name'] == name][0]
class Language(MethodView):
def get(self, language_name):
if language_name:
return jsonify({'language' : get_language(language_name)})
else:
return jsonify({'languages': languages})
def post(self):
new_language_name = request.json['name']
language = {'name' : new_language_name}
languages.append(language)
return jsonify({'language' : get_language(new_language_name)}), 201
def put(self, language_name):
language = get_language(language_name)
new_language_name = request.json['name']
language['name'] = new_language_name
return jsonify({'language' : get_language(new_language_name)})
def delete(self, language_name):
language = get_language(language_name)
languages.remove(language)
return '', 204
language_view = Language.as_view('language_api')
app.add_url_rule('/language', methods=['POST'], view_func=language_view)
app.add_url_rule('/language', methods=['GET'], defaults={'language_name' : None}, view_func=language_view)
app.add_url_rule('/language/<language_name>', methods=['GET', 'PUT', 'DELETE'], view_func=language_view)
|
74329
|
import cPickle as pkl
import gzip
import os
import re
import sys
import numpy
import math
import random
from binary_tree import BinaryTree
def convert_ptb_to_tree(line):
index = 0
tree = None
line = line.rstrip()
stack = []
parts = line.split()
for p_i, p in enumerate(parts):
# opening of a bracket, create a new node, take parent from top of stack
if p == '(':
if tree is None:
tree = BinaryTree(index)
else:
add_descendant(tree, index, stack[-1])
# add the newly created node to the stack and increment the index
stack.append(index)
index += 1
# close of a bracket, pop node on top of the stack
elif p == ')':
stack.pop(-1)
# otherwise, create a new node, take parent from top of stack, and set word
else:
add_descendant(tree, index, stack[-1])
tree.set_word(index, p)
index += 1
return tree
def add_descendant(tree, index, parent_index):
# add to the left first if possible, then to the right
if tree.has_left_descendant_at_node(parent_index):
if tree.has_right_descendant_at_node(parent_index):
sys.exit("Node " + str(parent_index) + " already has two children")
else:
tree.add_right_descendant(index, parent_index)
else:
tree.add_left_descendant(index, parent_index)
def fopen(filename, mode='r'):
if filename.endswith('.gz'):
return gzip.open(filename, mode)
return open(filename, mode)
class TextIterator:
"""Simple Bitext iterator."""
def __init__(self, source, target, label,
dict,
batch_size=128,
n_words=-1,
maxlen=500,
shuffle=True):
self.source = fopen(source, 'r')
self.target = fopen(target, 'r')
self.label = fopen(label, 'r')
with open(dict, 'rb') as f:
self.dict = pkl.load(f)
self.batch_size = batch_size
self.n_words = n_words
self.maxlen = maxlen
self.shuffle = shuffle
self.end_of_data = False
self.source_buffer = []
self.target_buffer = []
self.label_buffer = []
self.k = batch_size * 20
def __iter__(self):
return self
def reset(self):
self.source.seek(0)
self.target.seek(0)
self.label.seek(0)
def next(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
target = []
label = []
# fill buffer, if it's empty
assert len(self.source_buffer) == len(self.target_buffer), 'Buffer size mismatch!'
assert len(self.source_buffer) == len(self.label_buffer), 'Buffer size mismatch!'
if len(self.source_buffer) == 0:
for k_ in xrange(self.k):
ss = self.source.readline()
if ss == "":
break
tt = self.target.readline()
if tt == "":
break
ll = self.label.readline()
if ll == "":
break
ss = convert_ptb_to_tree(ss)
words_ss, left_mask_ss, right_mask_ss = ss.convert_to_sequence_and_masks(ss.root)
words_ss = [self.dict[w] if w in self.dict else 1
for w in words_ss]
if self.n_words > 0:
words_ss = [w if w < self.n_words else 1 for w in words_ss]
ss = (words_ss, left_mask_ss, right_mask_ss)
tt = convert_ptb_to_tree(tt)
words_tt, left_mask_tt, right_mask_tt = tt.convert_to_sequence_and_masks(tt.root)
words_tt = [self.dict[w] if w in self.dict else 1
for w in words_tt]
if self.n_words > 0:
words_tt = [w if w < self.n_words else 1 for w in words_tt]
tt = (words_tt, left_mask_tt, right_mask_tt)
if len(words_ss) > self.maxlen or len(words_tt) > self.maxlen:
continue
self.source_buffer.append(ss)
self.target_buffer.append(tt)
self.label_buffer.append(ll.strip())
if self.shuffle:
# sort by target buffer
tlen = numpy.array([len(t[0]) for t in self.target_buffer])
tidx = tlen.argsort()
# shuffle mini-batch
tindex = []
small_index = range(int(math.ceil(len(tidx)*1./self.batch_size)))
random.shuffle(small_index)
for i in small_index:
if (i+1)*self.batch_size > len(tidx):
tindex.extend(tidx[i*self.batch_size:])
else:
tindex.extend(tidx[i*self.batch_size:(i+1)*self.batch_size])
tidx = tindex
_sbuf = [self.source_buffer[i] for i in tidx]
_tbuf = [self.target_buffer[i] for i in tidx]
_lbuf = [self.label_buffer[i] for i in tidx]
self.source_buffer = _sbuf
self.target_buffer = _tbuf
self.label_buffer = _lbuf
if len(self.source_buffer) == 0 or len(self.target_buffer) == 0 or len(self.label_buffer) == 0:
self.end_of_data = False
self.reset()
raise StopIteration
try:
# actual work here
while True:
# read from source file and map to word index
try:
ss = self.source_buffer.pop(0)
tt = self.target_buffer.pop(0)
ll = self.label_buffer.pop(0)
except IndexError:
break
source.append(ss)
target.append(tt)
label.append(ll)
if len(source) >= self.batch_size or \
len(target) >= self.batch_size or \
len(label) >= self.batch_size:
break
except IOError:
self.end_of_data = True
if len(source) <= 0 or len(target) <= 0 or len(label) <= 0:
self.end_of_data = False
self.reset()
raise StopIteration
return source, target, label
|
74343
|
import argparse
import baselineUtils
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import os
import time
from transformer.batch import subsequent_mask
from torch.optim import Adam,SGD,RMSprop,Adagrad
from transformer.noam_opt import NoamOpt
import numpy as np
import scipy.io
import json
import pickle
from torch.utils.tensorboard import SummaryWriter
def main():
parser=argparse.ArgumentParser(description='Train the individual Transformer model')
parser.add_argument('--dataset_folder',type=str,default='datasets')
parser.add_argument('--dataset_name',type=str,default='zara1')
parser.add_argument('--obs',type=int,default=8)
parser.add_argument('--preds',type=int,default=12)
parser.add_argument('--emb_size',type=int,default=512)
parser.add_argument('--heads',type=int, default=8)
parser.add_argument('--layers',type=int,default=6)
parser.add_argument('--dropout',type=float,default=0.1)
parser.add_argument('--cpu',action='store_true')
parser.add_argument('--output_folder',type=str,default='Output')
parser.add_argument('--val_size',type=int, default=0)
parser.add_argument('--gpu_device',type=str, default="0")
parser.add_argument('--verbose',action='store_true')
parser.add_argument('--max_epoch',type=int, default=100)
parser.add_argument('--batch_size',type=int,default=100)
parser.add_argument('--validation_epoch_start', type=int, default=30)
parser.add_argument('--resume_train',action='store_true')
parser.add_argument('--delim',type=str,default='\t')
parser.add_argument('--name', type=str, default="zara1")
parser.add_argument('--factor', type=float, default=1.)
parser.add_argument('--evaluate',type=bool,default=True)
parser.add_argument('--save_step', type=int, default=1)
args=parser.parse_args()
model_name=args.name
try:
os.mkdir('models')
except:
pass
try:
os.mkdir('output')
except:
pass
try:
os.mkdir('output/QuantizedTF')
except:
pass
try:
os.mkdir(f'models/QuantizedTF')
except:
pass
try:
os.mkdir(f'output/QuantizedTF/{args.name}')
except:
pass
try:
os.mkdir(f'models/QuantizedTF/{args.name}')
except:
pass
log=SummaryWriter('logs/%s'%model_name)
#os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_device
device=torch.device("cuda")
if args.cpu or not torch.cuda.is_available():
device=torch.device("cpu")
args.verbose=True
## creation of the dataloaders for train and validation
if args.val_size==0:
train_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=True,verbose=args.verbose)
val_dataset, _ = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, 0, args.obs,
args.preds, delim=args.delim, train=False,
verbose=args.verbose)
else:
train_dataset, val_dataset = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, args.val_size, args.obs,
args.preds, delim=args.delim, train=True,
verbose=args.verbose)
test_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=False,eval=True,verbose=args.verbose)
mat = scipy.io.loadmat(os.path.join(args.dataset_folder, args.dataset_name, "clusters.mat"))
clusters=mat['centroids']
import quantized_TF
model=quantized_TF.QuantizedTF(clusters.shape[0], clusters.shape[0]+1, clusters.shape[0], N=args.layers,
d_model=args.emb_size, d_ff=1024, h=args.heads, dropout=args.dropout).to(device)
tr_dl=torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
val_dl = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)
#optim = SGD(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01)
#sched=torch.optim.lr_scheduler.StepLR(optim,0.0005)
optim = NoamOpt(args.emb_size, args.factor, len(tr_dl)*5,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
#optim=Adagrad(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01,lr_decay=0.001)
epoch=0
while epoch<args.max_epoch:
epoch_loss=0
model.train()
for id_b,batch in enumerate(tr_dl):
optim.optimizer.zero_grad()
scale=np.random.uniform(0.5,4)
#rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch=batch['src'].shape[0]
speeds_inp=batch['src'][:,1:,2:4]*scale
inp=torch.tensor(scipy.spatial.distance.cdist(speeds_inp.reshape(-1,2),clusters).argmin(axis=1).reshape(n_in_batch,-1)).to(device)
speeds_trg = batch['trg'][:,:,2:4]*scale
target = torch.tensor(
scipy.spatial.distance.cdist(speeds_trg.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch, -1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att=subsequent_mask(target.shape[1]).repeat(n_in_batch,1,1).to(device)
start_of_seq=torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp=torch.cat((start_of_seq,target[:,:-1]),1)
out=model(inp, dec_inp, src_att, trg_att)
loss = F.cross_entropy(out.view(-1,out.shape[-1]),target.view(-1),reduction='mean')
loss.backward()
optim.step()
print("epoch %03i/%03i frame %04i / %04i loss: %7.4f" % (epoch, args.max_epoch, id_b, len(tr_dl), loss.item()))
epoch_loss += loss.item()
#sched.step()
log.add_scalar('Loss/train', epoch_loss / len(tr_dl), epoch)
with torch.no_grad():
model.eval()
gt=[]
pr=[]
val_loss=0
step=0
for batch in val_dl:
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.contiguous().reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
speeds_trg = batch['trg'][:, :, 2:4]
target = torch.tensor(
scipy.spatial.distance.cdist(speeds_trg.contiguous().reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp = torch.cat((start_of_seq, target[:, :-1]), 1)
out = model(inp, dec_inp, src_att, trg_att)
loss = F.cross_entropy(out.contiguous().view(-1, out.shape[-1]), target.contiguous().view(-1), reduction='mean')
print("val epoch %03i/%03i frame %04i / %04i loss: %7.4f" % (
epoch, args.max_epoch, step, len(val_dl), loss.item()))
val_loss+=loss.item()
step+=1
log.add_scalar('validation/loss', val_loss / len(val_dl), epoch)
if args.evaluate:
# DETERMINISTIC MODE
model.eval()
model.eval()
gt = []
pr = []
inp_ = []
peds = []
frames = []
dt = []
for batch in test_dl:
inp_.append(batch['src'][:,:,0:2])
gt.append(batch['trg'][:, :, 0:2])
frames.append(batch['frames'])
peds.append(batch['peds'])
dt.append(batch['dataset'])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp = start_of_seq
for i in range(args.preds):
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out = model(inp, dec_inp, src_att, trg_att)
dec_inp=torch.cat((dec_inp,out[:,-1:].argmax(dim=2)),1)
preds_tr_b=clusters[dec_inp[:,1:].cpu().numpy()].cumsum(1)+batch['src'][:,-1:,0:2].cpu().numpy()
pr.append(preds_tr_b)
peds = np.concatenate(peds, 0)
frames = np.concatenate(frames, 0)
dt = np.concatenate(dt, 0)
gt = np.concatenate(gt, 0)
dt_names = test_dataset.data['dataset_name']
pr = np.concatenate(pr, 0)
mad,fad,errs=baselineUtils.distance_metrics(gt,pr)
log.add_scalar('eval/DET_mad', mad, epoch)
log.add_scalar('eval/DET_fad', fad, epoch)
scipy.io.savemat(f"output/QuantizedTF/{args.name}/{epoch:05d}.mat",
{'input': inp, 'gt': gt, 'pr': pr, 'peds': peds, 'frames': frames, 'dt': dt,
'dt_names': dt_names})
# MULTI MODALITY
if False:
num_samples=20
model.eval()
gt=[]
pr_all={}
for sam in range(num_samples):
pr_all[sam]=[]
for batch in test_dl:
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
gt.append(gt_b)
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
for sam in range(num_samples):
dec_inp = start_of_seq
for i in range(args.preds):
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out = model.predict(inp, dec_inp, src_att, trg_att)
h=out[:,-1]
dec_inp=torch.cat((dec_inp,torch.multinomial(h,1)),1)
preds_tr_b=clusters[dec_inp[:,1:].cpu().numpy()].cumsum(1)+batch['src'][:,-1:,0:2].cpu().numpy()
pr_all[sam].append(preds_tr_b)
gt=np.concatenate(gt,0)
#pr=np.concatenate(pr,0)
samp = {}
for k in pr_all.keys():
samp[k] = {}
samp[k]['pr'] = np.concatenate(pr_all[k], 0)
samp[k]['mad'], samp[k]['fad'], samp[k]['err'] = baselineUtils.distance_metrics(gt, samp[k]['pr'])
ev = [samp[i]['err'] for i in range(num_samples)]
e20 = np.stack(ev, -1)
mad_samp=e20.mean(1).min(-1).mean()
fad_samp=e20[:,-1].min(-1).mean()
#mad,fad,errs=baselineUtils.distance_metrics(gt,pr)
log.add_scalar('eval/MM_mad', mad_samp, epoch)
log.add_scalar('eval/MM_fad', fad_samp, epoch)
if epoch % args.save_step == 0:
torch.save(model.state_dict(), f'models/QuantizedTF/{args.name}/{epoch:05d}.pth')
epoch+=1
ab=1
if __name__=='__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.