hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
466e8b57966faf4a0cc17febbe2a82c29fab5e61 | 802 | py | Python | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
]
| null | null | null | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
]
| 1 | 2019-10-16T14:00:25.000Z | 2019-11-11T16:23:20.000Z | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
]
| 2 | 2017-05-15T06:03:27.000Z | 2019-07-21T09:04:24.000Z | from __future__ import absolute_import
from setuptools import setup
from txjsonrpc import meta
from txjsonrpc.util import dist
setup(
name=meta.display_name,
version=meta.version,
description=meta.description,
author=meta.author,
author_email=meta.author_email,
url=meta.url,
license=meta.license,
packages=dist.findPackages(meta.library_name),
long_description=dist.catReST(
"docs/PRELUDE.txt",
"README",
"docs/DEPENDENCIES.txt",
"docs/INSTALL.txt",
"docs/USAGE.txt",
"TODO",
"docs/HISTORY.txt",
stop_on_errors=True,
out=True),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
],
)
| 24.30303 | 50 | 0.63591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.254364 |
466e8e7393dfb5f6f03f625478a37ff5c418e4db | 28,135 | py | Python | gsflow/gsflow.py | pygsflow/pygsflow | 83860cd58078017a65e1633b1192469777f1ce15 | [
"CC0-1.0",
"BSD-3-Clause"
]
| 17 | 2019-11-11T02:49:29.000Z | 2022-02-17T03:45:19.000Z | gsflow/gsflow.py | pygsflow/pygsflow | 83860cd58078017a65e1633b1192469777f1ce15 | [
"CC0-1.0",
"BSD-3-Clause"
]
| 21 | 2019-07-10T21:45:11.000Z | 2022-02-22T17:57:20.000Z | gsflow/gsflow.py | pygsflow/pygsflow | 83860cd58078017a65e1633b1192469777f1ce15 | [
"CC0-1.0",
"BSD-3-Clause"
]
| 8 | 2019-11-11T02:49:36.000Z | 2021-09-30T18:43:45.000Z | # -*- coding: utf-8 -*-
import os
from .control import ControlFile
from .prms import PrmsModel
from .utils import gsflow_io, GsConstant
from .prms import Helper
from .modflow import Modflow
from .modsim import Modsim
import flopy
import subprocess as sp
import platform
import warnings
warnings.simplefilter("always", PendingDeprecationWarning)
warnings.simplefilter("always", UserWarning)
class GsflowModel(object):
"""
GsflowModel is the GSFLOW model object. This class can be used
to build a GSFLOW model, to load a GSFLOW model from it's control file,
to write input files for GSFLOW and to run GSFLOW.
Parameters
----------
control_file : str
control file path and name
prms : PrmsModel object
gsflow.prms.PrmsModel
mf : Modflow object
gsflow.modflow.Modflow
modflow_only : bool
flag that indicates only Modflow model
prms_only : bool
flag that indicates only PRMS model
gsflow_exe : str
GSFLOW executable path and name
modsim : bool
boolean flag to indicate that modsim is active
this creates a gsflow.modsim.Modsim object
model_ws : str, None
override method to set the base model directory when the
GSFLOW control file is not located in the same directory as
the script to run GSFLOW
Examples
--------
load from control file
>>> import gsflow
>>> gsf = gsflow.GsflowModel.load_from_file("gsflow.control")
create new, empty gsflow object
>>> control = gsflow.ControlFile(records_list=[])
>>> gsf = gsflow.GsflowModel(control=control)
"""
def __init__(
self,
control=None,
prms=None,
mf=None,
modflow_only=False,
prms_only=False,
gsflow_exe=None,
modsim=False,
model_ws=None,
):
if not isinstance(control, ControlFile):
raise ValueError("control must be a ControlFile object")
self.control = control
self.control_file = os.path.abspath(control.control_file)
self.ws = None
self._modflow_only = modflow_only
self._prms_only = prms_only
self.prms = None
self.mf = None
self.modsim = None
self.gsflow_exe = gsflow_exe
if gsflow_exe is None:
self.gsflow_exe = os.path.join(
os.path.dirname(__file__), r"bin\gsflow.exe"
)
# set prms object
if not modflow_only:
if prms and isinstance(prms, PrmsModel):
self.prms = prms
else:
err = "prms is not a PrmsModel object, skipping..."
warnings.warn(err, UserWarning)
# set flopy modflow object
if not prms_only:
if mf and isinstance(mf, flopy.modflow.Modflow):
self.mf = mf
namefile = os.path.basename(
control.get_values("modflow_name")[0]
)
if namefile is not None:
self.mf.namefile = namefile
else:
err = "modflow is not a gsflow.modflow.Modflow object, skipping..."
warnings.warn(err, UserWarning)
if modsim:
self.modsim = Modsim(self)
self.help = Helper()
@property
def modflow_only(self):
"""
Returns
-------
bool
"""
return self._modflow_only
@property
def prms_only(self):
"""
Returns
-------
bool
"""
return self._prms_only
def export_nc(self, f, **kwargs):
"""
Method to export the GSFLOW model as a netcdf
file. This method only works if nhru is equivalent
to nrow * ncol in modflow.
Parameters
----------
f : str
netcdf file name
kwargs :
keyword arguments for netcdf
"""
if not f.endswith(".nc"):
raise AssertionError("f must end with .nc extension")
if self.mf is None:
err = "Modflow object must be loaded to export netcdf file"
raise AssertionError(err)
f = self.mf.export(f, **kwargs)
if self.prms is not None:
f = self.prms.export_nc(f, self.mf, **kwargs)
return f
@staticmethod
def load_from_file(
control_file,
gsflow_exe="gsflow.exe",
modflow_only=False,
prms_only=False,
mf_load_only=None,
forgive=False,
model_ws=None,
):
"""
Method to load a gsflow model from it's control file
Parameters
----------
control_file : str
control file path & name, GSFLOW
gsflow_exe : str
gsflow executable path & name
modflow_only : bool
flag to load only modflow from the control file
prms_only : bool
flag to load only prms from the control file
mf_load_only : list
list of packages to load from modflow ex. [DIS, BAS, LPF]
forgive : bool
forgive file loading errors in flopy
model_ws : str, None
override method to set the base model directory when the
GSFLOW control file is not located in the same directory as
the script to run GSFLOW
Returns
-------
GsflowModel object
Examples
--------
>>> import gsflow
>>> gsf = gsflow.GsflowModel.load_from_file("gsflow.control")
"""
prms = None
modflow = None
modsim = False
if not (os.path.isfile(control_file)):
raise ValueError("Cannot find control file")
if model_ws is not None:
control = ControlFile.load_from_file(control_file, abs_path=False)
else:
control = ControlFile.load_from_file(control_file)
print("Control file is loaded")
mode = control.get_values("model_mode")[0].upper()
if mode == "MODFLOW":
modflow_only = True
elif mode == "PRMS":
prms_only = True
elif "MODSIM" in mode:
modsim = True
else:
pass
# load prms
if not modflow_only:
print("Working on loading PRMS model ...")
prms = PrmsModel.load_from_file(control_file, model_ws=model_ws)
if not prms_only:
# get model mode
if "GSFLOW" in mode.upper() or "MODFLOW" in mode.upper():
print("Working on loading MODFLOW files ....")
modflow = GsflowModel._load_modflow(
control, mf_load_only, model_ws, forgive
)
print("MODFLOW files are loaded ... ")
else:
prms_only = True
modflow_only = False
print("Mode is set to PRMS only, loading PRMS model only")
return GsflowModel(
control=control,
prms=prms,
mf=modflow,
modflow_only=modflow_only,
prms_only=prms_only,
gsflow_exe=gsflow_exe,
modsim=modsim,
)
@staticmethod
def _load_modflow(control, mf_load_only, model_ws=None, forgive=False):
"""
The package files in the .nam file are relative to the execuatble
gsflow. Here we set the model_ws to the location of the gsflow exe, via
the control file or a user supplied model_ws parameter
Parameters
----------
control : ControlFile object
control file object
mf_load_only : list
list of packages to restrict modflow loading to
model_ws : str
optional parameter that allows the use to set the model_ws
forgive : bool
forgive file load errors in modflow
Returns
-------
Modflow object
"""
name = control.get_values("modflow_name")
control_file = control.control_file
if model_ws is None:
name = gsflow_io.get_file_abs(
control_file=control_file, fn=name[0]
)
model_ws, name = os.path.split(name)
else:
model_ws = gsflow_io.get_file_abs(model_ws=model_ws)
name = name[0]
control_file = None
return Modflow.load(
name,
model_ws=model_ws,
control_file=control_file,
load_only=mf_load_only,
forgive=forgive,
)
def write_input(self, basename=None, workspace=None, write_only=None):
"""
Write input files for gsflow. Four cases are possible:
(1) if basename and workspace are None,then the exisiting files will be overwritten
(2) if basename is specified, only file names will be changes
(3) if only workspace is specified, only folder will be changed
(4) when both basename and workspace are specifed both files are changed
Parameters
----------
basename : str
project basename
workspace : str
model output directory
write_only: a list
['control', 'parameters', 'prms_data', 'mf', 'modsim']
Examples
--------
>>> gsf = gsflow.GsflowModel.load_from_file('gsflow.control')
>>> gsf.write_input(basename="new", workspace="../new_model")
"""
print("Writing the project files .....")
if workspace is not None:
workspace = os.path.abspath(workspace)
if (basename, workspace) == (None, None):
print("Warning: input files will be overwritten....")
self._write_all(write_only)
# only change the directory
elif basename is None and workspace is not None:
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
fnn = os.path.basename(self.control.control_file)
self.control.model_dir = workspace
self.control.control_file = os.path.join(workspace, fnn)
self.control_file = os.path.join(workspace, fnn)
if self.prms is not None:
self.prms.control_file = self.control_file
# change parameters
new_param_file_list = []
for par_record in self.prms.parameters.parameters_list:
curr_file = os.path.basename(par_record.file_name)
curr_file = os.path.join(workspace, curr_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
curr_file = os.path.relpath(
os.path.join(workspace, self.prms.data.name),
self.control.model_dir,
)
self.prms.data.model_dir = workspace
self.control.set_values("data_file", [curr_file])
# change mf
if self.mf is not None:
self.mf.change_model_ws(workspace, reset_external=True)
mfnm = self.mf.name + ".nam"
self.control.set_values("modflow_name", [mfnm])
# update file names in control object
self._update_control_fnames(workspace, basename)
# write
if self.prms is not None:
self.prms.control = self.control
self._write_all(write_only)
# only change the basename
elif basename is not None and workspace is None:
cnt_file = basename + "_cont.control"
ws_ = os.path.dirname(self.control.control_file)
self.control.control_file = os.path.join(ws_, cnt_file)
self.control_file = os.path.join(ws_, cnt_file)
self.prms.control_file = self.control_file
# change parameters
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(
self.prms.parameters.parameters_list
):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_dir = self.control.model_dir
curr_file = os.path.join(curr_dir, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_file = os.path.relpath(
os.path.join(self.prms.data.model_dir, dfile),
self.control.model_dir,
)
self.prms.data.name = dfile
self.control.set_values("data_file", [curr_file])
# change mf
if self.mf is not None:
curr_dir = self.mf.model_ws
self.mf._set_name(basename)
self._update_mf_basename(basename)
mfnm = self.mf.name + ".nam"
self.control.set_values("modflow_name", [mfnm])
# update file names in control object
self._update_control_fnames(workspace, basename)
self.prms.control = self.control
self._write_all(write_only)
# change both directory & basename
elif basename is not None and workspace is not None:
if not (os.path.isdir(workspace)):
os.mkdir(workspace)
cnt_file = basename + "_cont.control"
self.control.model_dir = workspace
self.control.control_file = os.path.join(workspace, cnt_file)
self.prms.control_file = self.control.control_file
self.control_file = self.control.control_file
# change parameters
# get param files list
flist = self.prms.parameters.parameter_files
new_param_file_list = []
for ifile, par_record in enumerate(
self.prms.parameters.parameters_list
):
file_index = flist.index(par_record.file_name)
par_file = basename + "_par_{}.params".format(file_index)
curr_file = os.path.join(workspace, par_file)
par_record.file_name = curr_file
if not (curr_file in new_param_file_list):
new_param_file_list.append(curr_file)
self.control.set_values("param_file", new_param_file_list)
# change datafile
dfile = basename + "_dat.data"
curr_file = os.path.relpath(
os.path.join(workspace, dfile), self.control.model_dir
)
self.prms.data.model_dir = workspace
self.prms.data.name = dfile
self.control.set_values("data_file", [curr_file])
# flatten mf
if self.mf is not None:
self.mf.change_model_ws(workspace)
self.mf._set_name(os.path.join(workspace, basename))
self._update_mf_basename(basename)
mfnm = basename + ".nam"
self.control.set_values(
"modflow_name",
[
os.path.relpath(
os.path.join(workspace, mfnm), self.control.model_dir
)
],
)
# update file names in control object
self._update_control_fnames(workspace, basename)
self.prms.control = self.control
self._write_all(write_only)
else:
raise NotImplementedError()
def _update_control_fnames(self, workspace, basename):
"""
Method to update control file names and paths
Parameters
----------
workspace : str
model output directory
basename : str
project basename
"""
if workspace is not None and basename is None:
self.control.model_dir = workspace
for rec_name in GsConstant.GSFLOW_FILES:
if rec_name in self.control.record_names:
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
va = os.path.join(workspace, os.path.basename(fil))
va = os.path.relpath(va, self.control.model_dir)
file_value.append(va)
self.control.set_values(rec_name, file_value)
else:
for rec_name in GsConstant.GSFLOW_FILES:
if rec_name in self.control.record_names:
if rec_name in ("modflow_name",):
continue
elif rec_name in (
"modflow_name",
"param_file",
"data_file",
):
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
ws, filvalue = os.path.split(fil)
if not ws:
pass
else:
filvalue = os.path.relpath(
fil, self.control.model_dir
)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
else:
file_values = self.control.get_values(rec_name)
file_value = []
for fil in file_values:
if workspace is None:
workspace = self.control.model_dir
vvfile = rec_name.split("_")
del vvfile[-1]
vvfile = "_".join(vvfile)
if "." in fil:
ext = fil.split(".")[-1]
else:
ext = "dat"
vvfile = basename + "_" + vvfile + "." + ext
filvalue = os.path.join(workspace, vvfile)
filvalue = os.path.relpath(
filvalue, self.control.model_dir
)
file_value.append(filvalue)
self.control.set_values(rec_name, file_value)
def _update_mf_basename(self, basename):
"""
Convience method to update modflow Basename
Parameters
----------
basename : str
basename of the Modflow object
"""
out_files_list = []
for ix, out_file in enumerate(self.mf.output_fnames):
if out_file.count(".") > 1:
ext = out_file.split(".")
del ext[0]
ext = ".".join(ext)
else:
ext = out_file.split(".")[-1]
new_outfn = "{}.{}".format(basename, ext)
out_files_list.append(new_outfn)
self.mf.output_fnames = out_files_list
def _write_all(self, write_only):
"""
Method to write input files
Parameters
----------
write_only : list
list of files to write accepts,
control, parameters, prms_data, mf, and modsim
"""
write_only_options = (
"control",
"parameters",
"prms_data",
"mf",
"modsim",
)
if write_only is not None:
if not isinstance(write_only, list):
raise ValueError("write_only agrgument must be a list")
# make write options case insensitive
write_only = [i.lower() for i in write_only]
for write_option in write_only:
if not (write_option in write_only_options):
raise ValueError(
"The option '{}' is not recognized...".format(
write_option
)
)
else:
write_only = ()
# write control
if len(write_only) == 0 or "control" in write_only:
print("Writing Control file ...")
self.control.write()
if self.prms is not None:
# self write parameters
if len(write_only) == 0 or "parameters" in write_only:
print("Writing Parameters files ...")
self.prms.parameters.write()
# write data
if len(write_only) == 0 or "prms_data" in write_only:
print("Writing Data file ...")
self.prms.data.write()
# write mf
if self.mf is not None:
if len(write_only) == 0 or "mf" in write_only:
print("Writing Modflow files...")
self.mf.write_input()
if self.modsim is not None:
if len(write_only) == 0 or "modsim" in write_only:
print("Writing MODSIM shapefile")
self.modsim.write_modsim_shapefile()
def run_model(self, model_ws=".", forgive=False, gsflow_exe=None):
"""
Method to run a gsflow model
Parameters
----------
model_ws : str
parameter to specify the model directory
forgive : bool
forgives convergence issues
gslfow_exe : str or None
path to gsflow_exe, if gsflow_exe is None it will use
the previously defined gsflow_exe variable or the default
gsflow.exe.
Returns
-------
None or (success, buffer)
Examples
--------
>>> gsf = gsflow.GsflowModel.load_from_file("gsflow.control")
>>> gsf.run_model()
"""
fn = self.control_file
if gsflow_exe is None:
gsflow_exe = self.gsflow_exe
if not os.path.isfile(gsflow_exe):
print(
"Warning : The executable of the model could not be found. "
"Use the gsflow_exe= parameter to define its path... "
)
return None
normal_msg = [
"normal termination",
] # , "simulation successful"]
if forgive:
normal_msg.append("failed to meet solver convergence criteria")
return self.__run(
exe_name=gsflow_exe,
namefile=fn,
normal_msg=normal_msg,
model_ws=model_ws,
)
def _generate_batch_file(self):
fn = os.path.dirname(self.control_file)
fn = os.path.join(fn, "__run_gsflow.bat")
self.__bat_file = fn
fidw = open(fn, "w")
exe = os.path.normpath(os.path.join(os.getcwd(), self.gsflow_exe))
cmd = exe + " " + self.control_file
fidw.write(cmd)
fidw.close()
def __run(
self,
exe_name,
namefile,
model_ws=".",
silent=False,
report=False,
normal_msg="normal termination",
cargs=None,
):
"""
This function will run the model using subprocess.Popen.
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
# test for exe in current working directory
if is_exe(program):
return program
# test for exe in path statement
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
success = False
buff = []
# convert normal_msg to lower case for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg.lower()]
elif isinstance(normal_msg, list):
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
if exe is None:
s = "The program {} does not exist or is not executable.".format(
exe_name
)
raise Exception(s)
else:
if not silent:
s = "pyGSFLOW is using the following executable to run the model: {}".format(
exe
)
print(s)
exe = os.path.normpath(os.path.join(os.getcwd(), exe))
if not os.path.isfile(os.path.join(model_ws, namefile)):
s = "The namefile for this model does not exists: {}".format(
namefile
)
raise Exception(s)
# simple little function for the thread to target
# def q_output(output, q):
# for line in iter(output.readline, b''):
# q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe, namefile]
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
# if platform.system().lower() == "windows":
# self._generate_batch_file()
# cargv = self.__bat_file
# else:
# pass
model_ws = os.path.dirname(self.control_file)
proc = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.STDOUT, cwd=model_ws)
while True:
line = proc.stdout.readline()
c = line.decode("utf-8")
if c != "":
for msg in normal_msg:
if msg in c.lower():
success = True
break
c = c.rstrip("\r\n")
if not silent:
print("{}".format(c))
if report:
buff.append(c)
else:
break
return success, buff
| 33.816106 | 95 | 0.530087 | 27,741 | 0.985996 | 0 | 0 | 4,519 | 0.160618 | 0 | 0 | 9,382 | 0.333464 |
466e94eb6dad2cfea42284bb23559a0aba280ee0 | 6,818 | py | Python | tests/integration/storage_memory/test_storage_memory_write.py | Sam-Martin/cloud-wanderer | 1879f9bb150054be5bf33fd46a47414b4939529e | [
"MIT"
]
| 1 | 2020-12-07T10:37:41.000Z | 2020-12-07T10:37:41.000Z | tests/integration/storage_memory/test_storage_memory_write.py | Sam-Martin/cloud-wanderer | 1879f9bb150054be5bf33fd46a47414b4939529e | [
"MIT"
]
| null | null | null | tests/integration/storage_memory/test_storage_memory_write.py | Sam-Martin/cloud-wanderer | 1879f9bb150054be5bf33fd46a47414b4939529e | [
"MIT"
]
| null | null | null | import logging
import pytest
from moto import mock_ec2, mock_iam, mock_sts
from cloudwanderer.cloud_wanderer_resource import CloudWandererResource
from cloudwanderer.storage_connectors import MemoryStorageConnector
from cloudwanderer.urn import URN
from tests.pytest_helpers import create_ec2_instances
logger = logging.getLogger(__name__)
@pytest.fixture(scope="function")
def memory_connector(request):
connector = MemoryStorageConnector()
connector.init()
return connector
def get_inferred_ec2_instances(cloudwanderer_boto3_session):
return [
CloudWandererResource(
urn=URN(
account_id="111111111111",
region="eu-west-2",
service="ec2",
resource_type="instance",
resource_id_parts=[instance.instance_id],
),
resource_data=instance.meta.data,
)
for instance in cloudwanderer_boto3_session.resource("ec2").instances.all()
]
def inferred_ec2_vpcs(cloudwanderer_boto3_session):
return [
CloudWandererResource(
urn=URN(
account_id="111111111111",
region="eu-west-2",
service="ec2",
resource_type="vpc",
resource_id_parts=[vpc.vpc_id],
),
resource_data=vpc.meta.data,
)
for vpc in cloudwanderer_boto3_session.resource("ec2").vpcs.all()
]
@pytest.fixture
def iam_role():
return CloudWandererResource(
urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role",
resource_id_parts=["test-role"],
),
resource_data={"RoleName": "test-role", "InlinePolicyAttachments": [{"PolicyNames": ["test-role"]}]},
dependent_resource_urns=[
URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy"],
)
],
)
@pytest.fixture
def iam_role_policies():
return [
CloudWandererResource(
urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy-1"],
),
resource_data={},
parent_urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role",
resource_id_parts=["test-role"],
),
),
CloudWandererResource(
urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy-2"],
),
resource_data={},
parent_urn=URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role",
resource_id_parts=["test-role"],
),
),
]
@mock_sts
@mock_iam
def test_write_resource_and_attribute(memory_connector, iam_role):
memory_connector.write_resource(resource=iam_role)
result = memory_connector.read_resource(urn=iam_role.urn)
assert result.urn == iam_role.urn
assert result.role_name == "test-role"
logger.info(result.cloudwanderer_metadata.resource_data)
assert result.inline_policy_attachments == [{"PolicyNames": ["test-role"]}]
assert result.dependent_resource_urns == [
URN(
account_id="111111111111",
region="us-east-1",
service="iam",
resource_type="role_policy",
resource_id_parts=["test-role", "test-role-policy"],
)
]
@mock_sts
@mock_ec2
def test_write_and_delete_instances(memory_connector, cloudwanderer_boto3_session):
create_ec2_instances()
inferred_ec2_instances = get_inferred_ec2_instances(cloudwanderer_boto3_session)
memory_connector.write_resource(resource=inferred_ec2_instances[0])
result_before_delete = memory_connector.read_resource(urn=inferred_ec2_instances[0].urn)
memory_connector.delete_resource(urn=inferred_ec2_instances[0].urn)
result_after_delete = memory_connector.read_resource(urn=inferred_ec2_instances[0].urn)
assert result_before_delete.urn == inferred_ec2_instances[0].urn
assert result_after_delete is None
@mock_sts
@mock_ec2
def test_write_and_delete_resource_of_type_in_account_region(memory_connector, cloudwanderer_boto3_session):
create_ec2_instances(count=5)
inferred_ec2_instances = get_inferred_ec2_instances(cloudwanderer_boto3_session)
for i in range(5):
memory_connector.write_resource(resource=inferred_ec2_instances[i])
memory_connector.delete_resource_of_type_in_account_region(
cloud_name="aws",
service="ec2",
resource_type="instance",
account_id="111111111111",
region="eu-west-2",
cutoff=None,
)
remaining_urns = [
resource.urn for resource in memory_connector.read_resources(service="ec2", resource_type="instance")
]
assert remaining_urns == []
def test_delete_subresources_from_resource(memory_connector, iam_role, iam_role_policies):
"""If we are deleting a parent resource we should delete all its subresources."""
memory_connector.write_resource(resource=iam_role)
memory_connector.write_resource(resource=iam_role_policies[0])
memory_connector.write_resource(resource=iam_role_policies[1])
role_before_delete = memory_connector.read_resource(urn=iam_role.urn)
role_policy_1_before_delete = memory_connector.read_resource(urn=iam_role_policies[0].urn)
role_policy_2_before_delete = memory_connector.read_resource(urn=iam_role_policies[1].urn)
# Delete the parent and ensure the subresources are also deleted
memory_connector.delete_resource(urn=iam_role.urn)
role_after_delete = memory_connector.read_resource(urn=iam_role.urn)
role_policy_1_after_delete = memory_connector.read_resource(urn=iam_role_policies[0].urn)
role_policy_2_after_delete = memory_connector.read_resource(urn=iam_role_policies[1].urn)
assert role_before_delete.urn == iam_role.urn
assert role_policy_1_before_delete.urn == iam_role_policies[0].urn
assert role_policy_2_before_delete.urn == iam_role_policies[1].urn
assert role_after_delete is None
assert role_policy_1_after_delete is None
assert role_policy_2_after_delete is None
| 34.434343 | 109 | 0.661044 | 0 | 0 | 0 | 0 | 4,158 | 0.609856 | 0 | 0 | 838 | 0.12291 |
466f047e17e0d6d7208910c763a4df77317279f9 | 4,596 | py | Python | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
]
| 233 | 2016-02-05T20:13:06.000Z | 2022-03-26T13:01:10.000Z | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
]
| 8 | 2017-12-20T17:07:58.000Z | 2020-08-06T15:44:55.000Z | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
]
| 15 | 2016-03-22T23:37:56.000Z | 2022-02-27T17:51:08.000Z | """Python wrapper around the _clibs PicoSAT extension."""
import os
from tt.errors.arguments import (
InvalidArgumentTypeError,
InvalidArgumentValueError)
if os.environ.get('READTHEDOCS') != 'True':
from tt._clibs import picosat as _c_picosat
VERSION = _c_picosat.VERSION
def sat_one(clauses, assumptions=None):
"""Find a solution that satisfies the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_one <tt.expressions.bexpr.BooleanExpression.sat_one>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: If solution is found, a list of ints representing the terms of
the solution; otherwise, if no solution found, ``None``.
:rtype: List[:class:`int <python:int>`] or ``None``
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Let's look at a simple example with no satisfiable solution::
>>> from tt import picosat
>>> picosat.sat_one([[1], [-1]]) is None
True
Here's an example where a solution exists::
>>> picosat.sat_one([[1, 2, 3], [-2, -3], [1, -2], [2, -3], [-2]])
[1, -2, -3]
Finally, here's an example using assumptions::
>>> picosat.sat_one([[1, 2, 3], [2, 3]], assumptions=[-1, -3])
[-1, 2, -3]
"""
try:
return _c_picosat.sat_one(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
def sat_all(clauses, assumptions=None):
"""Find all solutions that satisfy the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_all <tt.expressions.bexpr.BooleanExpression.sat_all>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: An iterator of solutions; if no satisfiable solutions exist, the
iterator will be empty.
:rtype: Iterator[List[:class:`int <python:int>`]]
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Here's an example showing the basic usage::
>>> from tt import picosat
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]]):
... print(solution)
...
[1, 2, 3, 4]
[1, 2, 3, -4]
[1, 2, -3, 4]
[1, 2, -3, -4]
[1, -2, 3, 4]
[1, -2, 3, -4]
We can cut down on some of the above solutions by including an assumption::
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]],
... assumptions=[-3]):
... print(solution)
...
[1, 2, -3, 4]
[1, 2, -3, -4]
"""
try:
return _c_picosat.sat_all(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
| 37.672131 | 79 | 0.650131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,833 | 0.833986 |
466f1e4523ee66d1060bcbe9b327e33221329528 | 3,745 | py | Python | tests/test_text_visualization.py | dianna-ai/dianna | 88bcaec001e640c35e5e1e08517ef1624fd661cb | [
"Apache-2.0"
]
| 9 | 2021-11-16T09:53:47.000Z | 2022-03-02T13:28:53.000Z | tests/test_text_visualization.py | dianna-ai/dianna | 88bcaec001e640c35e5e1e08517ef1624fd661cb | [
"Apache-2.0"
]
| 340 | 2021-03-03T12:55:37.000Z | 2022-03-31T13:53:44.000Z | tests/test_text_visualization.py | dianna-ai/dianna | 88bcaec001e640c35e5e1e08517ef1624fd661cb | [
"Apache-2.0"
]
| 5 | 2021-08-19T08:14:35.000Z | 2022-03-17T21:12:46.000Z | import os
import re
import shutil
import unittest
from pathlib import Path
from dianna.visualization.text import highlight_text
class Example1:
original_text = 'Doloremque aliquam totam ut. Aspernatur repellendus autem quia deleniti. Natus accusamus ' \
'doloribus et in quam officiis veniam et. '
explanation = [('ut', 25, -0.06405025896517044),
('in', 102, -0.05127647027074053),
('et', 99, 0.02254588506724936),
('quia', 58, -0.0008216335740370412),
('aliquam', 11, -0.0006268298968242725),
('Natus', 73, -0.0005556223616156406),
('totam', 19, -0.0005126140261410219),
('veniam', 119, -0.0005058379023790869),
('quam', 105, -0.0004573258796550468),
('repellendus', 40, -0.0003253862469633824)]
class Example2:
expected_html = '<html><body><span style="background:rgba(255, 0, 0, 0.08)">such</span> ' \
'<span style="background:rgba(255, 0, 0, 0.01)">a</span> <span style="background:rgba(0, 0, 255, 0.800000)">' \
'bad</span> <span style="background:rgba(0, 0, 255, 0.059287)">movie</span>.</body></html>\n'
original_text = 'Such a bad movie.'
explanation = [('bad', 7, -0.4922624307995777),
('such', 0, 0.04637815000309109),
('movie', 11, -0.03648111256069627),
('a', 5, 0.008377155657765745)]
class MyTestCase(unittest.TestCase):
temp_folder = 'temp_text_visualization_test'
html_file_path = str(Path(temp_folder) / 'output.html')
def test_text_visualization_no_output(self):
highlight_text(Example1.explanation, original_text=Example1.original_text)
assert not Path(self.html_file_path).exists()
def test_text_visualization_html_output_exists(self):
highlight_text(Example1.explanation, original_text=Example1.original_text,
output_html_filename=self.html_file_path)
assert Path(self.html_file_path).exists()
def test_text_visualization_html_output_contains_text(self):
highlight_text(Example1.explanation, original_text=Example1.original_text,
output_html_filename=self.html_file_path)
assert Path(self.html_file_path).exists()
with open(self.html_file_path, encoding='utf-8') as result_file:
result = result_file.read()
for word in _split_text_into_words(Example1.original_text):
assert word in result
def test_text_visualization_html_output_is_correct(self):
highlight_text(Example2.explanation, original_text=Example2.original_text,
output_html_filename=self.html_file_path)
assert Path(self.html_file_path).exists()
with open(self.html_file_path, encoding='utf-8') as result_file:
result = result_file.read()
assert result == Example2.expected_html
def test_text_visualization_show_plot(self):
highlight_text(Example1.explanation, original_text=Example1.original_text,
show_plot=True)
def setUp(self) -> None:
os.mkdir(self.temp_folder)
def tearDown(self) -> None:
shutil.rmtree(self.temp_folder, ignore_errors=True)
def _split_text_into_words(text):
# regex taken from
# https://stackoverflow.com/questions/12683201/python-re-split-to-split-by-spaces-commas-and-periods-but-not-in-cases-like
# explanation: split by \s (whitespace), and only split by commas and
# periods if they are not followed (?!\d) or preceded (?<!\d) by a digit.
regex = r'\s|(?<!\d)[,.](?!\d)'
return re.split(regex, text)
| 42.556818 | 131 | 0.646996 | 3,201 | 0.85474 | 0 | 0 | 0 | 0 | 0 | 0 | 879 | 0.234713 |
466fca56f7b6e59caf823a738ec5c36d18b27c25 | 2,983 | py | Python | setup.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
]
| null | null | null | setup.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
]
| null | null | null | setup.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
]
| null | null | null | import os
import sys
import builtins
import versioneer
if sys.version_info[:2] < (3, 7):
raise RuntimeError("Python version >= 3.7 required.")
builtins.__RBC_SETUP__ = True
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
CONDA_BUILD = int(os.environ.get('CONDA_BUILD', '0'))
CONDA_ENV = os.environ.get('CONDA_PREFIX', '') != ''
from setuptools import setup, find_packages # noqa: E402
DESCRIPTION = "RBC - Remote Backend Compiler Project"
LONG_DESCRIPTION = """
The aim of the Remote Backend Compiler project is to distribute the
tasks of a program JIT compilation process to separate computer
systems using the client-server model. The frontend of the compiler
runs on the client computer and the backend runs on the server
computer. The compiler frontend will send the program code to compiler
backend in IR form where it will be compiled to machine code.
"""
def setup_package():
src_path = os.path.dirname(os.path.abspath(sys.argv[0]))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
if CONDA_BUILD or CONDA_ENV:
# conda dependencies are specified in meta.yaml or conda
# enviroment should provide the correct requirements - using
# PyPI is unreliable, see below.
install_requires = []
setup_requires = []
tests_require = []
else:
# Get requirements via PyPI. Use at your own risk as more than
# once the numba and llvmlite have not matched.
install_requires = open('requirements.txt', 'r').read().splitlines()
setup_requires = ['pytest-runner', 'cffi']
tests_require = ['pytest']
metadata = dict(
name='rbc-project',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license='BSD',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Pearu Peterson',
maintainer='Pearu Peterson',
author_email='[email protected]',
url='https://github.com/xnd-project/rbc',
platforms='Cross Platform',
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Operating System :: OS Independent",
"Topic :: Software Development",
],
packages=find_packages(),
package_data={'': ['*.thrift']},
cffi_modules=['rbc/rbclib//_rbclib_build.py:ffibuilder'],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
)
try:
setup(**metadata)
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
del builtins.__RBC_SETUP__
| 32.78022 | 76 | 0.650352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,337 | 0.448207 |
46701c0193cfd9ee406763922c026176cc2a2fc9 | 1,126 | py | Python | src/prefect/schedules/adjustments.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
]
| 8,633 | 2019-03-23T17:51:03.000Z | 2022-03-31T22:17:42.000Z | src/prefect/schedules/adjustments.py | concreted/prefect | dd732f5990ee2b0f3d816adb285168fd63b239e4 | [
"Apache-2.0"
]
| 3,903 | 2019-03-23T19:11:21.000Z | 2022-03-31T23:21:23.000Z | src/prefect/schedules/adjustments.py | ngriffiths13/prefect | 7f5613abcb182494b7dc12159277c3bc5f3c9898 | [
"Apache-2.0"
]
| 937 | 2019-03-23T18:49:44.000Z | 2022-03-31T21:45:13.000Z | """
Schedule adjustments are functions that accept a `datetime` and modify it in some way.
Adjustments have the signature `Callable[[datetime], datetime]`.
"""
from datetime import datetime, timedelta
from typing import Callable
import pendulum
import prefect.schedules.filters
def add(interval: timedelta) -> Callable[[datetime], datetime]:
"""
Adjustment that adds a specified interval to the date.
Args:
- interval (timedelta): the amount of time to add
Returns:
- Callable[[datetime], bool]: the adjustment function
"""
def _adjustment_fn(dt: datetime) -> datetime:
return pendulum.instance(dt) + interval
return _adjustment_fn
def next_weekday(dt: datetime) -> datetime:
"""
Adjustment that advances a date to the next weekday. If the date is already a weekday,
it is returned unadjusted.
Args:
- dt (datetime): the datetime to adjust
Returns:
- datetime: the adjusted datetime
"""
pdt = pendulum.instance(dt)
while not prefect.schedules.filters.is_weekday(pdt):
pdt = pdt.add(days=1)
return pdt
| 24.478261 | 90 | 0.683837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 623 | 0.553286 |
4671817d5486f1ffa5048135771d27e1109e5cdd | 12,349 | py | Python | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
]
| 1 | 2017-02-21T16:46:21.000Z | 2017-02-21T16:46:21.000Z | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
]
| 1 | 2017-02-21T17:57:05.000Z | 2017-02-22T11:28:51.000Z | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
]
| null | null | null | from .fmodobject import *
from .fmodobject import _dll
from .structures import TAG, VECTOR
from .globalvars import get_class
class ConeSettings(object):
def __init__(self, sptr):
self._sptr = sptr
self._in = c_float()
self._out = c_float()
self._outvol = c_float()
ckresult(_dll.FMOD_Sound_Get3DConeSettings(self._sptr, byref(self._in), byref(self._out), byref(self._outvol)))
@property
def inside_angle(self):
return self._in.value
@inside_angle.setter
def inside_angle(self, angle):
self._in = c_float(angle)
self._commit()
@property
def outside_angle(self):
return self._out.value
@outside_angle.setter
def outside_angle(self, angle):
self._out = c_float(angle)
self._commit()
@property
def outside_volume(self):
return self._outvol.value
@outside_volume.setter
def outside_volume(self, vol):
self._outvol = c_float(vol)
self._commit()
def _commit(self):
ckresult(_dll.FMOD_Sound_Set3DConeSettings(self._sptr, self._in, self._out, self._outvol))
class Sound(FmodObject):
def add_sync_point(self, offset, offset_type, name):
s_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_AddSyncPoint(self._ptr, offset, offset_type, name, byref(s_ptr)))
return s_ptr
def delete_sync_point(self, point):
ckresult(_dll.FMOD_Sound_DeleteSyncPoint(self._ptr, point))
@property
def threed_cone_settings(self):
return ConeSettings(self._ptr)
@property
def custom_rolloff(self):
"""Returns the custom rolloff curve.
:rtype: List of [x, y, z] lists.
"""
num = c_int()
self._call_fmod("FMOD_Sound_Get3DCustomRolloff", None, byref(num))
curve = (VECTOR * num.value)()
self._call_fmod("FMOD_Sound_Get3DCustomRolloff", byref(curve), 0)
return [p.to_list() for p in curve]
@custom_rolloff.setter
def custom_rolloff(self, curve):
"""Sets the custom rolloff curve.
:param curve: The curve to set.
:type curve: A list of something that can be treated as a list of [x, y, z] values e.g. implements indexing in some way.
"""
native_curve = (VECTOR * len(curve))(*[VECTOR.from_list(lst) for lst in curve])
self._call_fmod("FMOD_Sound_Set3DCustomRolloff", native_curve, len(native_curve))
@property
def _min_max_distance(self):
min = c_float()
max = c_float()
ckresult(_dll.FMOD_Sound_Get3DMinMaxDistance(self._ptr, byref(min), byref(max)))
return (min.value, max.value)
@_min_max_distance.setter
def _min_max_distance(self, dists):
ckresult(_dll.FMOD_Sound_Set3DMinMaxDistance(self._ptr, c_float(dists[0]), c_float(dists[1])))
@property
def min_distance(self):
return self._min_max_distance[0]
@min_distance.setter
def min_distance(self, dist):
self._min_max_distance = (dist, self._min_max_distance[1])
@property
def max_distance(self):
return self._min_max_distance[1]
@max_distance.setter
def max_distance(self, dist):
self._min_max_distance = (self._min_max_distance[0], dist)
@property
def _defaults(self):
freq = c_float()
vol = c_float()
pan = c_float()
pri = c_int()
ckresult(_dll.FMOD_Sound_GetDefaults(self._ptr, byref(freq), byref(vol), byref(pan), byref(pri)))
return [freq.value, vol.value, pan.value, pri.value]
@_defaults.setter
def _defaults(self, vals):
ckresult(_dll.FMOD_Sound_SetDefaults(self._ptr, c_float(vals[0]), c_float(vals[1]), c_float(vals[2]), vals[3]))
@property
def default_frequency(self):
return self._defaults[0]
@default_frequency.setter
def default_frequency(self, freq):
d = self._defaults
d[0] = freq
self._defaults = d
@property
def default_volume(self):
return self._defaults[1]
@default_volume.setter
def default_volume(self, vol):
d = self._defaults
d[1] = vol
self._defaults = d
@property
def default_pan(self):
return self._defaults[2]
@default_pan.setter
def default_pan(self, pan):
d = self._defaults
d[2] = pan
self._defaults = d
@property
def default_priority(self):
return self._defaults[3]
@default_priority.setter
def default_priority(self, pri):
d = self._defaults
d[3] = pri
self._defaults = d
@property
def format(self):
type = c_int()
format = c_int()
bits = c_int()
ckresult(_dll.FMOD_Sound_GetFormat(self._ptr, byref(type), byref(format), byref(bits)))
return so(type=type.value, format=format.value, bits=bits.value)
def get_length(self, ltype):
len = c_uint()
ckresult(_dll.FMOD_Sound_GetLength(self._ptr, byref(len), ltype))
return len.value
@property
def loop_count(self):
c = c_int()
ckresult(_dll.FMOD_Sound_GetLoopCount(self._ptr, byref(c)))
return c.value
@loop_count.setter
def loop_count(self, count):
ckresult(_dll.FMOD_Sound_SetLoopCount(self._ptr, count))
@property
def loop_points(self):
"""Returns tuple of two tuples ((start, startunit),(end, endunit))"""
start = c_uint()
startunit = c_int()
end = c_uint()
endunit = c_int()
ckresult(_dll.FMOD_Sound_GetLoopPoints(self._ptr, byref(start), byref(startunit), byref(end), byref(endunit)))
return ((start.value, startunit.value), (end.value, endunit.value))
@loop_points.setter
def loop_points(self, p):
"""Same format as returned from this property is required to successfully call this setter."""
ckresult(_dll.FMOD_Sound_SetLoopPoints(self._ptr, p[0][0], p[0][1], p[1][0], p[1][1]))
@property
def mode(self):
mode = c_int()
ckresult(_dll.FMOD_Sound_GetMode(self._ptr, byref(mode)))
return mode.value
@mode.setter
def mode(self, m):
ckresult(_dll.FMOD_Sound_SetMode(self._ptr, m))
def get_music_channel_volume(self, channel):
v = c_float()
ckresult(_dll.FMOD_Sound_GetMusicChannelVolume(self._ptr, channel, byref(v)))
return v.value
def set_music_channel_volume(self, id, vol):
ckresult(_dll.FMOD_Sound_SetMusicChannelVolume(self._ptr, id, c_float(vol)))
@property
def num_music_channels(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetMusicNumChannels(self._ptr, byref(num)))
return num.value
@property
def name(self):
name = create_string_buffer(256)
ckresult(_dll.FMOD_Sound_GetName(self._ptr, byref(name), 256))
return name.value
@property
def num_subsounds(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumSubSounds(self._ptr, byref(num)))
return num.value
@property
def num_sync_points(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumSyncPoints(self._ptr, byref(num)))
return num.value
@property
def num_tags(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumTags(self._ptr, byref(num)))
return num.value
@property
def open_state(self):
state = c_int()
percentbuffered = c_uint()
starving = c_bool()
diskbusy = c_bool()
ckresult(_dll.FMOD_Sound_GetOpenState(self._ptr, byref(state), byref(percentbuffered), byref(starving),
byref(diskbusy)))
return so(state=state.value, percent_buffered=percentbuffered.value, starving=starving.value,
disk_busy=diskbusy.value)
@property
def sound_group(self):
grp_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSoundGroup(self._ptr, byref(grp_ptr)))
return get_class("SoundGroup")(grp_ptr)
@sound_group.setter
def sound_group(self, group):
check_type(group, get_class("SoundGroup"))
ckresult(_dll.FMOD_Sound_SetSoundGroup(self._ptr, group._ptr))
def get_subsound(self, index):
sh_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSubSound(self._ptr, index, byref(sh_ptr)))
return Sound(sh_ptr)
def get_sync_point(self, index):
sp = c_int()
ckresult(_dll.FMOD_Sound_GetSyncPoint(self._ptr, index, byref(sp)))
return sp.value
def get_sync_point_info(self, point):
name = c_char_p()
offset = c_uint()
offsettype = c_int()
ckresult(_dll.FMOD_Sound_GetSyncPointInfo(self._ptr, point, byref(name), 256, byref(offset), byref(offsettype)))
return so(name=name.value, offset=offset.value, offset_type=offsettype.value)
@property
def system_object(self):
sptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSystemObject(self._ptr, byref(sptr)))
return get_class("System")(sptr, False)
def play(self, paused=False):
return self.system_object.play_sound(self, paused)
def get_tag(self, index, name=None):
tag = TAG()
ckresult(_dll.FMOD_Sound_GetTag(self._ptr, name, index, byref(tag)))
return tag
@property
def _variations(self):
freq = c_float()
vol = c_float()
pan = c_float()
ckresult(_dll.FMOD_Sound_GetVariations(self._ptr, byref(freq), byref(vol), byref(pan)))
return [freq.value, vol.value, pan.value]
@_variations.setter
def _variations(self, vars):
ckresult(_dll.FMOD_Sound_SetVariations(self._ptr, c_float(vars[0]), c_float(vars[1]), c_float(vars[2])))
@property
def frequency_variation(self):
return self._variations[0]
@frequency_variation.setter
def frequency_variation(self, var):
v = self._variations
v[0] = var
self._variations = var
@property
def volume_variation(self):
return self._variations[1]
@volume_variation.setter
def volume_variation(self, var):
v = self._variations
v[1] = var
self._variations = var
@property
def pan_variation(self):
return self._variations[2]
@pan_variation.setter
def pan_variation(self, var):
v = self._variations
v[2] = var
self._variations = var
def lock(self, offset, length):
ptr1 = c_void_p()
len1 = c_uint()
ptr2 = c_void_p()
len2 = c_uint()
ckresult(_dll.FMOD_Sound_Lock(self._ptr, offset, length, byref(ptr1), byref(ptr2), byref(len1), byref(len2)))
return ((ptr1, len1), (ptr2, len2))
def release(self):
ckresult(_dll.FMOD_Sound_Release(self._ptr))
def set_subsound(self, index, snd):
check_type(snd, Sound)
ckresult(_dll.FMOD_Sound_SetSubSound(self._ptr, index, snd._ptr))
def set_subsound_sentence(self, sounds):
a = c_int * len(sounds)
ptrs = [o._ptr for o in sounds]
ai = a(*ptrs)
ckresult(_dll.FMOD_Sound_SetSubSoundSentence(self._ptr, ai, len(ai)))
def unlock(self, i1, i2):
"""I1 and I2 are tuples of form (ptr, len)."""
ckresult(_dll.FMOD_Sound_Unlock(self._ptr, i1[0], i2[0], i1[1], i2[1]))
@property
def music_speed(self):
speed = c_float()
self._call_fmod("FMOD_Sound_GetMusicSpeed", byref(speed))
return speed.value
@music_speed.setter
def music_speed(self, speed):
self._call_fmod("FMOD_Sound_SetMusicSpeed", c_float(speed))
def read_data(self, length):
"""Read a fragment of the sound's decoded data.
:param length: The requested length.
:returns: The data and the actual length.
:rtype: Tuple of the form (data, actual)."""
buf = create_string_buffer(length)
actual = c_uint()
self._call_fmod("FMOD_Sound_ReadData", buf, length, byref(actual))
return buf.value, actual.value
def seek_data(self, offset):
"""Seeks for data reading purposes.
:param offset: The offset to seek to in PCM samples.
:type offset: Int or long, but must be in range of an unsigned long, not python's arbitrary long."""
self._call_fmod("FMOD_Sound_SeekData", offset) | 31.745501 | 128 | 0.639809 | 12,219 | 0.989473 | 0 | 0 | 8,142 | 0.659325 | 0 | 0 | 1,131 | 0.091586 |
4671cf40aee848a7cf0a11db6406cdad41f3981d | 7,667 | py | Python | src/pynwb/retinotopy.py | weiglszonja/pynwb | 441c64f332b32eb8141a9d03d92e0fda2fd1ad83 | [
"BSD-3-Clause-LBNL"
]
| 132 | 2017-08-05T00:35:18.000Z | 2022-03-22T08:14:18.000Z | src/pynwb/retinotopy.py | weiglszonja/pynwb | 441c64f332b32eb8141a9d03d92e0fda2fd1ad83 | [
"BSD-3-Clause-LBNL"
]
| 1,273 | 2017-08-04T05:14:47.000Z | 2022-03-28T13:00:27.000Z | src/pynwb/retinotopy.py | weiglszonja/pynwb | 441c64f332b32eb8141a9d03d92e0fda2fd1ad83 | [
"BSD-3-Clause-LBNL"
]
| 68 | 2017-08-04T16:45:19.000Z | 2022-03-22T08:14:15.000Z | from collections.abc import Iterable
import warnings
from hdmf.utils import docval, popargs, call_docval_func, get_docval
from . import register_class, CORE_NAMESPACE
from .core import NWBDataInterface, NWBData
class RetinotopyImage(NWBData):
"""Gray-scale anatomical image of cortical surface. Array structure: [rows][columns]
"""
__nwbfields__ = ('bits_per_pixel',
'dimension',
'format',
'field_of_view')
@docval({'name': 'name', 'type': str, 'doc': 'Name of this retinotopy image'},
{'name': 'data', 'type': Iterable, 'doc': 'Data field.'},
{'name': 'bits_per_pixel', 'type': int,
'doc': 'Number of bits used to represent each value. This is necessary to determine maximum '
'(white) pixel value.'},
{'name': 'dimension', 'type': Iterable, 'shape': (2, ), 'doc': 'Number of rows and columns in the image.'},
{'name': 'format', 'type': Iterable, 'doc': 'Format of image. Right now only "raw" supported.'},
{'name': 'field_of_view', 'type': Iterable, 'shape': (2, ), 'doc': 'Size of viewing area, in meters.'})
def __init__(self, **kwargs):
bits_per_pixel, dimension, format, field_of_view = popargs(
'bits_per_pixel', 'dimension', 'format', 'field_of_view', kwargs)
call_docval_func(super().__init__, kwargs)
self.bits_per_pixel = bits_per_pixel
self.dimension = dimension
self.format = format
self.field_of_view = field_of_view
class FocalDepthImage(RetinotopyImage):
"""Gray-scale image taken with same settings/parameters (e.g., focal depth,
wavelength) as data collection. Array format: [rows][columns].
"""
__nwbfields__ = ('focal_depth', )
@docval(*get_docval(RetinotopyImage.__init__),
{'name': 'focal_depth', 'type': 'float', 'doc': 'Focal depth offset, in meters.'})
def __init__(self, **kwargs):
focal_depth = popargs('focal_depth', kwargs)
call_docval_func(super().__init__, kwargs)
self.focal_depth = focal_depth
class RetinotopyMap(NWBData):
"""Abstract two-dimensional map of responses to stimuli along a single response axis (e.g., altitude)
"""
__nwbfields__ = ('field_of_view',
'dimension')
@docval({'name': 'name', 'type': str, 'doc': 'the name of this axis map'},
{'name': 'data', 'type': Iterable, 'shape': (None, None), 'doc': 'data field.'},
{'name': 'field_of_view', 'type': Iterable, 'shape': (2, ), 'doc': 'Size of viewing area, in meters.'},
{'name': 'dimension', 'type': Iterable, 'shape': (2, ),
'doc': 'Number of rows and columns in the image'})
def __init__(self, **kwargs):
field_of_view, dimension = popargs('field_of_view', 'dimension', kwargs)
call_docval_func(super().__init__, kwargs)
self.field_of_view = field_of_view
self.dimension = dimension
class AxisMap(RetinotopyMap):
"""Abstract two-dimensional map of responses to stimuli along a single response axis (e.g., altitude) with unit
"""
__nwbfields__ = ('unit', )
@docval(*get_docval(RetinotopyMap.__init__, 'name', 'data', 'field_of_view'),
{'name': 'unit', 'type': str, 'doc': 'Unit that axis data is stored in (e.g., degrees)'},
*get_docval(RetinotopyMap.__init__, 'dimension'))
def __init__(self, **kwargs):
unit = popargs('unit', kwargs)
call_docval_func(super().__init__, kwargs)
self.unit = unit
@register_class('ImagingRetinotopy', CORE_NAMESPACE)
class ImagingRetinotopy(NWBDataInterface):
"""
Intrinsic signal optical imaging or widefield imaging for measuring retinotopy. Stores orthogonal
maps (e.g., altitude/azimuth; radius/theta) of responses to specific stimuli and a combined
polarity map from which to identify visual areas.
This group does not store the raw responses imaged during retinotopic mapping or the
stimuli presented, but rather the resulting phase and power maps after applying a Fourier
transform on the averaged responses.
Note: for data consistency, all images and arrays are stored in the format [row][column] and
[row, col], which equates to [y][x]. Field of view and dimension arrays may appear backward
(i.e., y before x).
"""
__nwbfields__ = ({'name': 'sign_map', 'child': True},
{'name': 'axis_1_phase_map', 'child': True},
{'name': 'axis_1_power_map', 'child': True},
{'name': 'axis_2_phase_map', 'child': True},
{'name': 'axis_2_power_map', 'child': True},
{'name': 'focal_depth_image', 'child': True},
{'name': 'vasculature_image', 'child': True},
'axis_descriptions')
@docval({'name': 'sign_map', 'type': RetinotopyMap,
'doc': 'Sine of the angle between the direction of the gradient in axis_1 and axis_2.'},
{'name': 'axis_1_phase_map', 'type': AxisMap,
'doc': 'Phase response to stimulus on the first measured axis.'},
{'name': 'axis_1_power_map', 'type': AxisMap,
'doc': 'Power response on the first measured axis. Response is scaled so 0.0 is no power in '
'the response and 1.0 is maximum relative power.'},
{'name': 'axis_2_phase_map', 'type': AxisMap,
'doc': 'Phase response to stimulus on the second measured axis.'},
{'name': 'axis_2_power_map', 'type': AxisMap,
'doc': 'Power response on the second measured axis. Response is scaled so 0.0 is no '
'power in the response and 1.0 is maximum relative power.'},
{'name': 'axis_descriptions', 'type': Iterable, 'shape': (2, ),
'doc': 'Two-element array describing the contents of the two response axis fields. '
'Description should be something like ["altitude", "azimuth"] or ["radius", "theta"].'},
{'name': 'focal_depth_image', 'type': FocalDepthImage,
'doc': 'Gray-scale image taken with same settings/parameters (e.g., focal depth, wavelength) '
'as data collection. Array format: [rows][columns].'},
{'name': 'vasculature_image', 'type': RetinotopyImage,
'doc': 'Gray-scale anatomical image of cortical surface. Array structure: [rows][columns].'},
{'name': 'name', 'type': str, 'doc': 'the name of this container', 'default': 'ImagingRetinotopy'})
def __init__(self, **kwargs):
axis_1_phase_map, axis_1_power_map, axis_2_phase_map, axis_2_power_map, axis_descriptions, \
focal_depth_image, sign_map, vasculature_image = popargs(
'axis_1_phase_map', 'axis_1_power_map', 'axis_2_phase_map', 'axis_2_power_map',
'axis_descriptions', 'focal_depth_image', 'sign_map', 'vasculature_image', kwargs)
call_docval_func(super().__init__, kwargs)
warnings.warn("The ImagingRetinotopy class currently cannot be written to or read from a file. "
"This is a known bug and will be fixed in a future release of PyNWB.")
self.axis_1_phase_map = axis_1_phase_map
self.axis_1_power_map = axis_1_power_map
self.axis_2_phase_map = axis_2_phase_map
self.axis_2_power_map = axis_2_power_map
self.axis_descriptions = axis_descriptions
self.focal_depth_image = focal_depth_image
self.sign_map = sign_map
self.vasculature_image = vasculature_image
| 52.513699 | 119 | 0.626842 | 7,386 | 0.963349 | 0 | 0 | 6,510 | 0.849094 | 0 | 0 | 3,991 | 0.520543 |
46725864f7a8f29464ea63af729e3e78c2a1218d | 370 | py | Python | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
]
| null | null | null | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
]
| null | null | null | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
]
| null | null | null | """
Implement a class function for user to put in a zip-code and
search relevant information about business entities in that zip-code area.
"""
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
class inputZipForm(Form):
inputZip = StringField('inputZip', validators=[DataRequired()])
| 28.461538 | 78 | 0.77027 | 93 | 0.251351 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.435135 |
46750d2f3ef713a053808ca00fc559cb70158512 | 283 | py | Python | Phase-2/Linked List/Day-70.py | CodedLadiesInnovateTech/python-challenges | 22ce26c68fea6c7c243ada831e47c52e27a62127 | [
"MIT"
]
| 11 | 2020-05-11T08:41:21.000Z | 2022-02-27T08:21:37.000Z | Phase-2/Linked List/Day-70.py | CodedLadiesInnovateTech/python-challenges | 22ce26c68fea6c7c243ada831e47c52e27a62127 | [
"MIT"
]
| 9 | 2020-05-12T10:46:06.000Z | 2020-05-28T17:37:19.000Z | Phase-2/Linked List/Day-70.py | CodedLadiesInnovateTech/python-challenges | 22ce26c68fea6c7c243ada831e47c52e27a62127 | [
"MIT"
]
| 44 | 2020-05-10T20:53:32.000Z | 2021-04-25T18:47:08.000Z | '''
1. Write a Python program to access a specific item in a singly linked list using index value.
2. Write a Python program to set a new value of an item in a singly linked list using index value.
3. Write a Python program to delete the first item from a singly linked list.
'''
| 31.444444 | 98 | 0.749117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.996466 |
46759a54fd059282243dcf32a6f899667fd72ec3 | 25 | py | Python | ngboost/version.py | dsharpc/ngboost | 8c05e0cb3b95bb23d8f30f17042d2fd8926c4014 | [
"Apache-2.0"
]
| null | null | null | ngboost/version.py | dsharpc/ngboost | 8c05e0cb3b95bb23d8f30f17042d2fd8926c4014 | [
"Apache-2.0"
]
| null | null | null | ngboost/version.py | dsharpc/ngboost | 8c05e0cb3b95bb23d8f30f17042d2fd8926c4014 | [
"Apache-2.0"
]
| null | null | null | __version__ = "0.3.4dev"
| 12.5 | 24 | 0.68 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.4 |
467608de6f71ab4b616b2d09915c4d849b7654e0 | 624 | py | Python | premailer/tests/test_utils.py | p12tic/premailer | ebfd310dbd0a88b465e811411f67a360e11f3292 | [
"BSD-3-Clause"
]
| null | null | null | premailer/tests/test_utils.py | p12tic/premailer | ebfd310dbd0a88b465e811411f67a360e11f3292 | [
"BSD-3-Clause"
]
| 1 | 2018-11-23T11:58:22.000Z | 2018-11-23T13:58:13.000Z | premailer/tests/test_utils.py | lavr/premailer | ebfd310dbd0a88b465e811411f67a360e11f3292 | [
"BSD-3-Clause"
]
| null | null | null | import unittest
from premailer.premailer import capitalize_float_margin
class UtilsTestCase(unittest.TestCase):
def testcapitalize_float_margin(self):
self.assertEqual(
capitalize_float_margin('margin:1em'),
'Margin:1em')
self.assertEqual(
capitalize_float_margin('margin-left:1em'),
'Margin-left:1em')
self.assertEqual(
capitalize_float_margin('float:right;'),
'Float:right;')
self.assertEqual(
capitalize_float_margin('float:right;color:red;margin:0'),
'Float:right;color:red;Margin:0')
| 31.2 | 70 | 0.639423 | 548 | 0.878205 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.240385 |
4676a784f66c68a2faf6e33e2c3d3bf09c476661 | 1,652 | py | Python | home/vscode/extensions/ms-python.python-2021.12.1559732655/pythonFiles/lib/python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py | qwertzy-antonio-godinho/dots | 65cd657f785e7da3a3ccb1a808c0fc1b8496e5b1 | [
"Apache-2.0"
]
| 6 | 2021-12-26T13:34:32.000Z | 2022-02-08T22:09:38.000Z | src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py | ev3dev/ptvsd | cea22767dd78a812a14e2330a540a368f615224e | [
"MIT"
]
| 8 | 2020-07-19T23:39:31.000Z | 2022-02-27T01:38:46.000Z | vscode/extensions/ms-python.python-2020.3.69010/pythonFiles/lib/python/old_ptvsd/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py | Adespinoza/dotfiles | e2509402a7fd2623a3ea401b6f9fcbf6a372fc60 | [
"CC0-1.0"
]
| 3 | 2020-08-04T02:48:32.000Z | 2020-08-17T01:20:09.000Z | import sys
try:
try:
from _pydevd_bundle_ext import pydevd_cython as mod
except ImportError:
from _pydevd_bundle import pydevd_cython as mod
except ImportError:
import struct
try:
is_python_64bit = (struct.calcsize('P') == 8)
except:
# In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.
raise ImportError
plat = '32'
if is_python_64bit:
plat = '64'
# We also accept things as:
#
# _pydevd_bundle.pydevd_cython_win32_27_32
# _pydevd_bundle.pydevd_cython_win32_34_64
#
# to have multiple pre-compiled pyds distributed along the IDE
# (generated by build_tools/build_binaries_windows.py).
mod_name = 'pydevd_cython_%s_%s%s_%s' % (sys.platform, sys.version_info[0], sys.version_info[1], plat)
check_name = '_pydevd_bundle.%s' % (mod_name,)
mod = getattr(__import__(check_name), mod_name)
# Regardless of how it was found, make sure it's later available as the
# initial name so that the expected types from cython in frame eval
# are valid.
sys.modules['_pydevd_bundle.pydevd_cython'] = mod
trace_dispatch = mod.trace_dispatch
PyDBAdditionalThreadInfo = mod.PyDBAdditionalThreadInfo
set_additional_thread_info = mod.set_additional_thread_info
global_cache_skips = mod.global_cache_skips
global_cache_frame_skips = mod.global_cache_frame_skips
_set_additional_thread_info_lock = mod._set_additional_thread_info_lock
fix_top_level_trace_and_get_trace_func = mod.fix_top_level_trace_and_get_trace_func
version = getattr(mod, 'version', 0)
| 31.169811 | 107 | 0.725787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.348668 |
4677191c1771ec77d8d7c68a2a88766f05fcf790 | 2,843 | py | Python | tests/test_thumbnails.py | pypeclub/openpype4-tests | c0c32b643f63e66609772270fff71e88dcd5b922 | [
"Apache-2.0"
]
| null | null | null | tests/test_thumbnails.py | pypeclub/openpype4-tests | c0c32b643f63e66609772270fff71e88dcd5b922 | [
"Apache-2.0"
]
| null | null | null | tests/test_thumbnails.py | pypeclub/openpype4-tests | c0c32b643f63e66609772270fff71e88dcd5b922 | [
"Apache-2.0"
]
| null | null | null | from tests.fixtures import api, PROJECT_NAME
assert api
THUMB_DATA1 = b"thisisaveryrandomthumbnailcontent"
THUMB_DATA2 = b"thisihbhihjhuuyiooanothbnlcontent"
def test_folder_thumbnail(api):
response = api.post(
f"projects/{PROJECT_NAME}/folders",
name="testicek",
folderType="Asset",
)
assert response
folder_id = response.data["id"]
# Ensure we cannot create an empty thumbnail
assert not api.raw_post(
f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail",
mime="image/png",
data=b"",
)
# Create a thumbnail for the folder
response = api.raw_post(
f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail",
mime="image/png",
data=THUMB_DATA1,
)
assert response
# Ensure the thumbnail is there
response = api.raw_get(f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail")
assert response == THUMB_DATA1
# Get the id of the thumbnail (we can re-use it later)
thumb1_id = api.get(
f"projects/{PROJECT_NAME}/folders/{folder_id}",
).data["thumbnailId"]
# Update thumbnail
response = api.raw_post(
f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail",
mime="image/png",
data=THUMB_DATA2,
)
assert response
# Ensure the thumbnail changed
response = api.raw_get(f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail")
assert response == THUMB_DATA2
# Let the folder use the old thumbnail
response = api.patch(
f"projects/{PROJECT_NAME}/folders/{folder_id}",
thumbnail_id=thumb1_id,
)
assert response
# Ensure the thumbnail is switched to the old one
response = api.raw_get(f"projects/{PROJECT_NAME}/folders/{folder_id}/thumbnail")
assert response == THUMB_DATA1
def test_version_thumbnail(api):
# Create folder/subset/version
response = api.post(
f"projects/{PROJECT_NAME}/folders",
name="test2",
folderType="Asset",
)
assert response
folder_id = response.data["id"]
response = api.post(
f"projects/{PROJECT_NAME}/subsets",
name="test2s",
family="theSopranos",
folderId=folder_id,
)
assert response
subset_id = response.data["id"]
response = api.post(
f"projects/{PROJECT_NAME}/versions",
version=1,
subsetId=subset_id,
)
version_id = response.data["id"]
# Create thumbnail for the version
response = api.raw_post(
f"projects/{PROJECT_NAME}/versions/{version_id}/thumbnail",
mime="image/png",
data=THUMB_DATA1,
)
assert response
# Verify that the thumbnail is there
response = api.raw_get(f"projects/{PROJECT_NAME}/versions/{version_id}/thumbnail")
assert response == THUMB_DATA1
| 23.890756 | 86 | 0.65248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,280 | 0.450229 |
4677247e7b07ffee44bb30042c587480349f229e | 914 | py | Python | POO/Heranca/aula107_classes.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
]
| 1 | 2021-09-01T01:58:13.000Z | 2021-09-01T01:58:13.000Z | POO/Heranca/aula107_classes.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
]
| null | null | null | POO/Heranca/aula107_classes.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
]
| null | null | null | # Generalizando para não repetir o código!
class Pessoa:
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
self.nomeclasse = self.__class__.__name__
def falar(self):
print(f'{self.nomeclasse} está falando.')
class Cliente(Pessoa):
def comprar(self):
print(f'{self.nomeclasse} está comprando...')
class Aluno(Pessoa):
def estudar(self):
print(f'{self.nomeclasse} está estudando...')
class ClienteVIP(Cliente):
def __init__(self, nome, idade, sobrenome):
super().__init__(nome, idade)
print(f'{self.nome}, {self.idade} anos, criado com sucesso.')
self.sobrenome = sobrenome
def falar(self):
Pessoa.falar(self)
# Como a classe Cliente não possui o método falar(), o Python busca na superclasse o método.
Cliente.falar(self)
print(f'{self.nome} {self.sobrenome}') | 28.5625 | 100 | 0.63895 | 868 | 0.941432 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.36551 |
467742b9ee49da3193dfeffba9fb6976ebe7eb72 | 2,391 | py | Python | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
]
| 136 | 2020-06-01T14:03:31.000Z | 2020-10-28T06:10:50.000Z | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
]
| 133 | 2020-05-26T13:48:04.000Z | 2020-10-28T05:25:55.000Z | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
]
| 36 | 2020-05-28T08:18:39.000Z | 2020-10-27T14:46:58.000Z | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.quantization.quantizer_propagation.structs import QuantizationTrait
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConvolutionMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXLinearMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXHardSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXGlobalAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAddLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXMulLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConcatLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXBatchNormMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXResizeMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSoftmaxMetatype
from nncf.common.graph.operator_metatypes import UnknownMetatype
DEFAULT_ONNX_QUANT_TRAIT_TO_OP_DICT = {
QuantizationTrait.INPUTS_QUANTIZABLE: [
ONNXConvolutionMetatype,
ONNXLinearMetatype,
ONNXAveragePoolMetatype,
ONNXGlobalAveragePoolMetatype,
ONNXAddLayerMetatype,
ONNXMulLayerMetatype,
ONNXBatchNormMetatype,
ONNXHardSigmoidMetatype,
ONNXResizeMetatype,
],
QuantizationTrait.NON_QUANTIZABLE: [ONNXSigmoidMetatype,
ONNXSoftmaxMetatype,
UnknownMetatype],
QuantizationTrait.CONCAT: [ONNXConcatLayerMetatype],
QuantizationTrait.OUTPUT_QUANTIZATION_AS_WEIGHTS: []
}
| 49.8125 | 89 | 0.79632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 577 | 0.241322 |
46779bc59fb8e412188640c04f3538454363b415 | 13,483 | py | Python | Package/CONFIG.py | YuanYuLin/samba | ef8aaca2c539b0d241414e7f335b4ff939461558 | [
"MIT"
]
| null | null | null | Package/CONFIG.py | YuanYuLin/samba | ef8aaca2c539b0d241414e7f335b4ff939461558 | [
"MIT"
]
| null | null | null | Package/CONFIG.py | YuanYuLin/samba | ef8aaca2c539b0d241414e7f335b4ff939461558 | [
"MIT"
]
| null | null | null | import ops
import iopc
TARBALL_FILE="samba-4.8.4.tar.gz"
TARBALL_DIR="samba-4.8.4"
INSTALL_DIR="samba-bin"
pkg_path = ""
output_dir = ""
tarball_pkg = ""
tarball_dir = ""
install_dir = ""
install_tmp_dir = ""
cc_host = ""
tmp_include_dir = ""
dst_include_dir = ""
dst_lib_dir = ""
dst_usr_local_lib_dir = ""
def set_global(args):
global pkg_path
global output_dir
global tarball_pkg
global install_dir
global install_tmp_dir
global tarball_dir
global cc_host
global tmp_include_dir
global dst_include_dir
global dst_lib_dir
global dst_usr_local_lib_dir
global dst_usr_local_libexec_dir
global dst_usr_local_share_dir
global dst_usr_local_dir
global src_pkgconfig_dir
global dst_pkgconfig_dir
global dst_bin_dir
global dst_etc_dir
global install_test_utils
pkg_path = args["pkg_path"]
output_dir = args["output_path"]
tarball_pkg = ops.path_join(pkg_path, TARBALL_FILE)
install_dir = ops.path_join(output_dir, INSTALL_DIR)
install_tmp_dir = ops.path_join(output_dir, INSTALL_DIR + "-tmp")
tarball_dir = ops.path_join(output_dir, TARBALL_DIR)
cc_host_str = ops.getEnv("CROSS_COMPILE")
cc_host = cc_host_str[:len(cc_host_str) - 1]
tmp_include_dir = ops.path_join(output_dir, ops.path_join("include",args["pkg_name"]))
dst_include_dir = ops.path_join("include",args["pkg_name"])
dst_lib_dir = ops.path_join(install_dir, "lib")
dst_bin_dir = ops.path_join(install_dir, "bin")
dst_etc_dir = ops.path_join(install_dir, "etc")
dst_usr_local_lib_dir = ops.path_join(install_dir, "usr/local/lib")
dst_usr_local_dir = ops.path_join(install_dir, "usr/local")
dst_usr_local_libexec_dir = ops.path_join(install_dir, "usr/local/libexec")
dst_usr_local_share_dir = ops.path_join(install_dir, "usr/local/share")
src_pkgconfig_dir = ops.path_join(pkg_path, "pkgconfig")
dst_pkgconfig_dir = ops.path_join(install_dir, "pkgconfig")
if ops.getEnv("INSTALL_TEST_UTILS") == 'y':
install_test_utils = True
else:
install_test_utils = False
def MAIN_ENV(args):
set_global(args)
ops.exportEnv(ops.setEnv("CC", ops.getEnv("CROSS_COMPILE") + "gcc"))
'''
ops.exportEnv(ops.setEnv("CXX", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("CPP", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("AR", ops.getEnv("CROSS_COMPILE") + "ar"))
ops.exportEnv(ops.setEnv("RANLIB", ops.getEnv("CROSS_COMPILE") + "ranlib"))
ops.exportEnv(ops.setEnv("CROSS", ops.getEnv("CROSS_COMPILE")))
'''
ops.exportEnv(ops.setEnv("DESTDIR", install_tmp_dir))
return False
def MAIN_EXTRACT(args):
set_global(args)
ops.unTarGz(tarball_pkg, output_dir)
return True
def MAIN_PATCH(args, patch_group_name):
set_global(args)
for patch in iopc.get_patch_list(pkg_path, patch_group_name):
if iopc.apply_patch(tarball_dir, patch):
continue
else:
sys.exit(1)
return True
def MAIN_CONFIGURE(args):
set_global(args)
job_count = ops.getEnv("BUILD_JOBS_COUNT")
extra_conf = []
'''
#extra_conf.append("--cross-compile")
#extra_conf.append("-C -V")
#extra_conf.append("--cross-answers=cc.txt")
#extra_conf.append("--hostcc=" + cc_host)
extra_conf.append("--abi-check-disable")
extra_conf.append("--disable-rpath")
extra_conf.append("--bundled-libraries=NONE")
#extra_conf.append("--cross-execute='qemu-arm-static -L /usr/arm-linux-gnu'")
extra_conf.append("--jobs=" + job_count)
extra_conf.append("--disable-gnutls")
#extra_conf.append("--private-libraries=NONE")
extra_conf.append("--without-gettext")
extra_conf.append("--without-systemd")
extra_conf.append("--without-ad-dc")
extra_conf.append("--without-ads")
extra_conf.append("--without-winbind")
extra_conf.append("--without-ldap")
extra_conf.append("--without-pam")
extra_conf.append("--without-pie")
extra_conf.append("--without-fam")
extra_conf.append("--without-dmapi")
extra_conf.append("--without-automount")
extra_conf.append("--without-utmp")
extra_conf.append("--without-dnsupdate")
extra_conf.append("--without-acl-support")
extra_conf.append("--without-quotas")
extra_conf.append("--without-cluster-support")
extra_conf.append("--disable-glusterfs")
extra_conf.append("--without-profiling-data")
extra_conf.append("--without-libarchive")
extra_conf.append("--without-regedit")
extra_conf.append("--without-ntvfs-fileserver")
extra_conf.append("--disable-python")
extra_conf.append("--disable-cups")
extra_conf.append("--disable-iprint")
extra_conf.append("--disable-avahi")
'''
extra_conf.append("--disable-python")
extra_conf.append("--without-ad-dc")
extra_conf.append("--without-acl-support")
extra_conf.append("--without-ldap")
extra_conf.append("--without-ads")
extra_conf.append("--without-pam")
extra_conf.append("--without-gettext")
extra_conf.append("--jobs=" + job_count)
extra_conf.append("--without-systemd")
extra_conf.append("--without-regedit")
extra_conf.append("--without-cluster-support")
extra_conf.append("--without-ntvfs-fileserver")
extra_conf.append("--without-winbind")
extra_conf.append("--disable-glusterfs")
extra_conf.append("--disable-cups")
extra_conf.append("--disable-iprint")
extra_conf.append("--disable-avahi")
extra_conf.append("--without-automount")
extra_conf.append("--without-dnsupdate")
extra_conf.append("--without-fam")
extra_conf.append("--without-dmapi")
extra_conf.append("--without-quotas")
extra_conf.append("--without-profiling-data")
extra_conf.append("--without-utmp")
extra_conf.append("--without-libarchive")
#extra_conf.append("--enable-developer")
print extra_conf
#iopc.waf(tarball_dir, extra_conf)
iopc.configure(tarball_dir, extra_conf)
return True
def MAIN_BUILD(args):
set_global(args)
ops.mkdir(install_dir)
ops.mkdir(install_tmp_dir)
iopc.make(tarball_dir)
iopc.make_install(tarball_dir)
ops.mkdir(install_dir)
ops.mkdir(dst_lib_dir)
ops.mkdir(dst_bin_dir)
ops.mkdir(dst_usr_local_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/sbin/nmbd"), dst_bin_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/sbin/smbd"), dst_bin_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc-binding.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so.0")
ops.ln(dst_lib_dir, "libdcerpc-binding.so.0.0.1", "libdcerpc-binding.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc-samr.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so.0")
ops.ln(dst_lib_dir, "libdcerpc-samr.so.0.0.1", "libdcerpc-samr.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libdcerpc.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so.0.0")
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so.0")
ops.ln(dst_lib_dir, "libdcerpc.so.0.0.1", "libdcerpc.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-krb5pac.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so.0.0")
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so.0")
ops.ln(dst_lib_dir, "libndr-krb5pac.so.0.0.1", "libndr-krb5pac.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-nbt.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so.0.0")
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so.0")
ops.ln(dst_lib_dir, "libndr-nbt.so.0.0.1", "libndr-nbt.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr.so.0.1.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so.0.1")
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so.0")
ops.ln(dst_lib_dir, "libndr.so.0.1.0", "libndr.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libndr-standard.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so.0.0")
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so.0")
ops.ln(dst_lib_dir, "libndr-standard.so.0.0.1", "libndr-standard.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnetapi.so.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnetapi.so.0", "libnetapi.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnss_winbind.so.2"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnss_winbind.so.2", "libnss_winbind.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libnss_wins.so.2"), dst_lib_dir)
ops.ln(dst_lib_dir, "libnss_wins.so.2", "libnss_wins.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-credentials.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so.0.0")
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so.0")
ops.ln(dst_lib_dir, "libsamba-credentials.so.0.0.1", "libsamba-credentials.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-errors.so.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-errors.so.1", "libsamba-errors.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-hostconfig.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so.0.0")
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so.0")
ops.ln(dst_lib_dir, "libsamba-hostconfig.so.0.0.1", "libsamba-hostconfig.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-passdb.so.0.27.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so.0.27")
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so.0")
ops.ln(dst_lib_dir, "libsamba-passdb.so.0.27.0", "libsamba-passdb.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamba-util.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so.0.0")
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so.0")
ops.ln(dst_lib_dir, "libsamba-util.so.0.0.1", "libsamba-util.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsamdb.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so.0.0")
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so.0")
ops.ln(dst_lib_dir, "libsamdb.so.0.0.1", "libsamdb.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsmbclient.so.0.3.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so.0.3")
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so.0")
ops.ln(dst_lib_dir, "libsmbclient.so.0.3.1", "libsmbclient.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libsmbconf.so.0"), dst_lib_dir)
ops.ln(dst_lib_dir, "libsmbconf.so.0", "libsmbconf.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libtevent-util.so.0.0.1"), dst_lib_dir)
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so.0.0")
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so.0")
ops.ln(dst_lib_dir, "libtevent-util.so.0.0.1", "libtevent-util.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/libwbclient.so.0.14"), dst_lib_dir)
ops.ln(dst_lib_dir, "libwbclient.so.0.14", "libwbclient.so.0")
ops.ln(dst_lib_dir, "libwbclient.so.0.14", "libwbclient.so")
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/winbind_krb5_locator.so"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/private/."), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/auth"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/idmap"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/ldb"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/nss_info"), dst_lib_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/samba/lib/vfs"), dst_lib_dir)
ops.ln(dst_usr_local_dir, "/tmp/samba", "samba")
return True
def MAIN_INSTALL(args):
set_global(args)
iopc.installBin(args["pkg_name"], ops.path_join(dst_lib_dir, "."), "lib")
iopc.installBin(args["pkg_name"], ops.path_join(dst_bin_dir, "."), "usr/sbin")
iopc.installBin(args["pkg_name"], ops.path_join(dst_usr_local_dir, "."), "usr/local")
#iopc.installBin(args["pkg_name"], ops.path_join(tmp_include_dir, "."), dst_include_dir)
#iopc.installBin(args["pkg_name"], ops.path_join(dst_pkgconfig_dir, '.'), "pkgconfig")
return False
def MAIN_SDKENV(args):
set_global(args)
return False
def MAIN_CLEAN_BUILD(args):
set_global(args)
return False
def MAIN(args):
set_global(args)
| 42.533123 | 112 | 0.705555 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,368 | 0.472298 |
4677cd39827e65c98f0ade72fd58eb0f79b2c0cc | 671 | py | Python | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
]
| 3 | 2019-08-02T21:02:47.000Z | 2021-09-08T13:59:43.000Z | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
]
| null | null | null | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# declaration
class Chain:
"""
A locator that ties together two others in order to express that something in {next}
caused {this} to be recorded
"""
# meta methods
def __init__(self, this, next):
self.this = this
self.next = next
return
def __str__(self):
# if {next} is non-trivial, show the chain
if self.next: return "{0.this}, {0.next}".format(self)
# otherwise don't
return "{0.this}".format(self)
# implementation details
__slots__ = "this", "next"
# end of file
| 18.638889 | 88 | 0.593145 | 538 | 0.799406 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.585438 |
467ab5f703a873dbd5ce9a6760742fdfbfa8b614 | 30 | py | Python | tests/resources/accepted/res_0_minpyversion_3_0.py | matteogabburo/python-ast-utils | 5069e9e53e8ed1f5305167254139b95967102aeb | [
"MIT"
]
| 3 | 2020-12-22T10:43:39.000Z | 2021-01-01T16:42:32.000Z | tests/resources/accepted/res_0_minpyversion_3_0.py | matteogabburo/python-ast-utils | 5069e9e53e8ed1f5305167254139b95967102aeb | [
"MIT"
]
| null | null | null | tests/resources/accepted/res_0_minpyversion_3_0.py | matteogabburo/python-ast-utils | 5069e9e53e8ed1f5305167254139b95967102aeb | [
"MIT"
]
| null | null | null | import os
x = 7
print(x + 1)
| 6 | 12 | 0.566667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
467b69ecaf5ca591ddc3465f82457df4ea005caa | 448 | py | Python | Mod 03/03 Prova.py | SauloCav/CN | e8de20dbcf1b3ff043743728b7dde2eff1ae8f87 | [
"MIT"
]
| null | null | null | Mod 03/03 Prova.py | SauloCav/CN | e8de20dbcf1b3ff043743728b7dde2eff1ae8f87 | [
"MIT"
]
| null | null | null | Mod 03/03 Prova.py | SauloCav/CN | e8de20dbcf1b3ff043743728b7dde2eff1ae8f87 | [
"MIT"
]
| null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def f(x):
return math.exp(x)/x**3
def int(a,b):
h = (b-a)/104
x_par = a+h
x_impar = a+2*h
soma_par = 0
soma_impar = 0
for i in range(52):
soma_par += f(x_par)
x_par += 2*h
for i in range(51):
soma_impar += f(x_impar)
x_impar += 2*h
return (f(a)+f(b) + 4 * soma_par + 2*soma_impar) *h/3
print(int(1.9,9.7))
| 18.666667 | 58 | 0.504464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.102679 |
467ea3052543109008b133a68620b40cb725a84a | 2,055 | py | Python | almetro/al.py | arnour/almetro | 7d00b1bb746b49dc9dd464395abdf4fe93f028fe | [
"Apache-2.0"
]
| null | null | null | almetro/al.py | arnour/almetro | 7d00b1bb746b49dc9dd464395abdf4fe93f028fe | [
"Apache-2.0"
]
| 1 | 2019-08-21T23:02:49.000Z | 2019-08-21T23:02:49.000Z | almetro/al.py | arnour/almetro | 7d00b1bb746b49dc9dd464395abdf4fe93f028fe | [
"Apache-2.0"
]
| null | null | null | from almetro.instance import growing
from almetro.metro import Metro
import timeit
class ExecutionSettings:
def __init__(self, trials=1, runs=1):
if not trials or trials < 1:
raise TypeError('#trials must be provided')
if not runs or runs < 1:
raise TypeError('#runs must be provided')
self.trials = trials
self.runs = runs
@staticmethod
def new():
return ExecutionSettings()
class InstanceSettings:
def __init__(self, instances=1, provider=growing()):
if not instances:
raise TypeError('#instances must be provided')
if not provider:
raise TypeError('provider must be provided')
self.instances = instances
self.provider = provider
@staticmethod
def new():
return InstanceSettings()
class Al:
def __init__(self, instance_settings=InstanceSettings.new(), execution_settings=ExecutionSettings.new()):
if not instance_settings:
raise TypeError('instance settings must be provided')
if not execution_settings:
raise TypeError('execution settings must be provided')
self.__instance_settings = instance_settings
self.__execution_settings = execution_settings
def with_instances(self, instances, provider):
return Al(instance_settings=InstanceSettings(instances, provider), execution_settings=self.__execution_settings)
def with_execution(self, trials, runs=1):
return Al(instance_settings=self.__instance_settings, execution_settings=ExecutionSettings(trials, runs))
def metro(self, algorithm, complexity):
metro = Metro.new(complexity)
for _ in range(self.__instance_settings.instances):
instance = self.__instance_settings.provider.new_instance()
def runner():
algorithm(**instance.value)
metro.register(instance, timeit.repeat(runner, number=self.__execution_settings.runs, repeat=self.__execution_settings.trials))
return metro
| 34.830508 | 139 | 0.685158 | 1,963 | 0.955231 | 0 | 0 | 125 | 0.060827 | 0 | 0 | 179 | 0.087105 |
467fe487527b89370f3c2d1bfc1b416969557a05 | 31,434 | py | Python | yt_dlp/extractor/archiveorg.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
]
| 80 | 2021-05-25T11:33:49.000Z | 2022-03-29T20:36:53.000Z | yt_dlp/extractor/archiveorg.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
]
| 53 | 2017-04-12T19:53:18.000Z | 2022-02-22T10:33:13.000Z | yt_dlp/extractor/archiveorg.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
]
| 22 | 2021-05-07T05:01:27.000Z | 2022-03-26T19:10:54.000Z | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_HTTPError
)
from ..utils import (
bug_reports_message,
clean_html,
dict_get,
extract_attributes,
ExtractorError,
get_element_by_id,
HEADRequest,
int_or_none,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
parse_duration,
parse_qs,
str_to_int,
str_or_none,
traverse_obj,
try_get,
unified_strdate,
unified_timestamp,
urlhandle_detect_ext,
url_or_none
)
class ArchiveOrgIE(InfoExtractor):
IE_NAME = 'archive.org'
IE_DESC = 'archive.org video and audio'
_VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^?#]+)(?:[?].*)?$'
_TESTS = [{
'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'md5': '8af1d4cf447933ed3c7f4871162602db',
'info_dict': {
'id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'ext': 'ogv',
'title': '1968 Demo - FJCC Conference Presentation Reel #1',
'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
'release_date': '19681210',
'timestamp': 1268695290,
'upload_date': '20100315',
'creator': 'SRI International',
'uploader': '[email protected]',
},
}, {
'url': 'https://archive.org/details/Cops1922',
'md5': '0869000b4ce265e8ca62738b336b268a',
'info_dict': {
'id': 'Cops1922',
'ext': 'mp4',
'title': 'Buster Keaton\'s "Cops" (1922)',
'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c',
'uploader': '[email protected]',
'timestamp': 1387699629,
'upload_date': "20131222",
},
}, {
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'only_matching': True,
}, {
'url': 'https://archive.org/details/Election_Ads',
'md5': '284180e857160cf866358700bab668a3',
'info_dict': {
'id': 'Election_Ads/Commercial-JFK1960ElectionAdCampaignJingle.mpg',
'title': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
'ext': 'mp4',
},
}, {
'url': 'https://archive.org/details/Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'md5': '7915213ef02559b5501fe630e1a53f59',
'info_dict': {
'id': 'Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'title': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
'ext': 'mp4',
'timestamp': 1205588045,
'uploader': '[email protected]',
'description': '1960 Presidential Campaign Election Commercials John F Kennedy, Richard M Nixon',
'upload_date': '20080315',
},
}, {
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16',
'md5': '7d07ffb42aba6537c28e053efa4b54c9',
'info_dict': {
'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t01.flac',
'title': 'Turning',
'ext': 'flac',
},
}, {
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
'md5': 'a07cd8c6ab4ee1560f8a0021717130f3',
'info_dict': {
'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
'title': 'Deal',
'ext': 'flac',
'timestamp': 1205895624,
'uploader': '[email protected]',
'description': 'md5:6a31f1996db0aa0fc9da6d6e708a1bb0',
'upload_date': '20080319',
'location': 'Barton Hall - Cornell University',
},
}, {
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik',
'md5': '7cb019baa9b332e82ea7c10403acd180',
'info_dict': {
'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/01.01. Bells Of Rostov.mp3',
'title': 'Bells Of Rostov',
'ext': 'mp3',
},
}, {
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
'info_dict': {
'id': 'lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02. Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1).mp3',
'title': 'Song And Chorus In The Polovetsian Camp From "Prince Igor" (Act 2, Scene 1)',
'ext': 'mp3',
'timestamp': 1569662587,
'uploader': '[email protected]',
'description': 'md5:012b2d668ae753be36896f343d12a236',
'upload_date': '20190928',
},
}]
@staticmethod
def _playlist_data(webpage):
element = re.findall(r'''(?xs)
<input
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s+class=['"]?js-play8-playlist['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s*/>
''', webpage)[0]
return json.loads(extract_attributes(element)['value'])
def _real_extract(self, url):
video_id = compat_urllib_parse_unquote_plus(self._match_id(url))
identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
# Archive.org metadata API doesn't clearly demarcate playlist entries
# or subtitle tracks, so we get them from the embeddable player.
embed_page = self._download_webpage(
'https://archive.org/embed/' + identifier, identifier)
playlist = self._playlist_data(embed_page)
entries = {}
for p in playlist:
# If the user specified a playlist entry in the URL, ignore the
# rest of the playlist.
if entry_id and p['orig'] != entry_id:
continue
entries[p['orig']] = {
'formats': [],
'thumbnails': [],
'artist': p.get('artist'),
'track': p.get('title'),
'subtitles': {}}
for track in p.get('tracks', []):
if track['kind'] != 'subtitles':
continue
entries[p['orig']][track['label']] = {
'url': 'https://archive.org/' + track['file'].lstrip('/')}
metadata = self._download_json(
'http://archive.org/metadata/' + identifier, identifier)
m = metadata['metadata']
identifier = m['identifier']
info = {
'id': identifier,
'title': m['title'],
'description': clean_html(m.get('description')),
'uploader': dict_get(m, ['uploader', 'adder']),
'creator': m.get('creator'),
'license': m.get('licenseurl'),
'release_date': unified_strdate(m.get('date')),
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
'webpage_url': 'https://archive.org/details/' + identifier,
'location': m.get('venue'),
'release_year': int_or_none(m.get('year'))}
for f in metadata['files']:
if f['name'] in entries:
entries[f['name']] = merge_dicts(entries[f['name']], {
'id': identifier + '/' + f['name'],
'title': f.get('title') or f['name'],
'display_id': f['name'],
'description': clean_html(f.get('description')),
'creator': f.get('creator'),
'duration': parse_duration(f.get('length')),
'track_number': int_or_none(f.get('track')),
'album': f.get('album'),
'discnumber': int_or_none(f.get('disc')),
'release_year': int_or_none(f.get('year'))})
entry = entries[f['name']]
elif f.get('original') in entries:
entry = entries[f['original']]
else:
continue
if f.get('format') == 'Thumbnail':
entry['thumbnails'].append({
'id': f['name'],
'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('width')),
'filesize': int_or_none(f.get('size'))})
extension = (f['name'].rsplit('.', 1) + [None])[1]
if extension in KNOWN_EXTENSIONS:
entry['formats'].append({
'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
'format': f.get('format'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'filesize': int_or_none(f.get('size')),
'protocol': 'https'})
# Sort available formats by filesize
for entry in entries.values():
entry['formats'] = list(sorted(entry['formats'], key=lambda x: x.get('filesize', -1)))
if len(entries) == 1:
# If there's only one item, use it as the main info dict
only_video = entries[list(entries.keys())[0]]
if entry_id:
info = merge_dicts(only_video, info)
else:
info = merge_dicts(info, only_video)
else:
# Otherwise, we have a playlist.
info['_type'] = 'playlist'
info['entries'] = list(entries.values())
if metadata.get('reviews'):
info['comments'] = []
for review in metadata['reviews']:
info['comments'].append({
'id': review.get('review_id'),
'author': review.get('reviewer'),
'text': str_or_none(review.get('reviewtitle'), '') + '\n\n' + review.get('reviewbody'),
'timestamp': unified_timestamp(review.get('createdate')),
'parent': 'root'})
return info
class YoutubeWebArchiveIE(InfoExtractor):
IE_NAME = 'web.archive:youtube'
IE_DESC = 'web.archive.org saved youtube videos'
_VALID_URL = r"""(?x)^
(?:https?://)?web\.archive\.org/
(?:web/)?
(?:(?P<date>[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional
(?:https?(?::|%3[Aa])//)?
(?:
(?:\w+\.)?youtube\.com(?::(?:80|443))?/watch(?:\.php)?(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
|(?:wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
)
(?P<id>[0-9A-Za-z_-]{11})(?:%26|\#|&|$)
"""
_TESTS = [
{
'url': 'https://web.archive.org/web/20150415002341/https://www.youtube.com/watch?v=aYAGB11YrSs',
'info_dict': {
'id': 'aYAGB11YrSs',
'ext': 'webm',
'title': 'Team Fortress 2 - Sandviches!',
'description': 'md5:4984c0f9a07f349fc5d8e82ab7af4eaf',
'upload_date': '20110926',
'uploader': 'Zeurel',
'channel_id': 'UCukCyHaD-bK3in_pKpfH9Eg',
'duration': 32,
'uploader_id': 'Zeurel',
'uploader_url': 'http://www.youtube.com/user/Zeurel'
}
}, {
# Internal link
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
'info_dict': {
'id': '97t7Xj_iBv0',
'ext': 'mp4',
'title': 'Why Machines That Bend Are Better',
'description': 'md5:00404df2c632d16a674ff8df1ecfbb6c',
'upload_date': '20190312',
'uploader': 'Veritasium',
'channel_id': 'UCHnyfMqiRRG1u-2MsSQLbXA',
'duration': 771,
'uploader_id': '1veritasium',
'uploader_url': 'http://www.youtube.com/user/1veritasium'
}
}, {
# Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
# Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
'info_dict': {
'id': 'AkhihxRKcrs',
'ext': 'webm',
'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)',
'upload_date': '20120712',
'duration': 398,
'description': 'md5:ff4de6a7980cb65d951c2f6966a4f2f3',
'uploader_id': 'machinima',
'uploader_url': 'http://www.youtube.com/user/machinima'
}
}, {
# FLV video. Video file URL does not provide itag information
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
'info_dict': {
'id': 'jNQXAC9IVRw',
'ext': 'flv',
'title': 'Me at the zoo',
'upload_date': '20050423',
'channel_id': 'UC4QobU6STFB0P71PMvOGN5A',
'duration': 19,
'description': 'md5:10436b12e07ac43ff8df65287a56efb4',
'uploader_id': 'jawed',
'uploader_url': 'http://www.youtube.com/user/jawed'
}
}, {
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
'info_dict': {
'id': 'lTx3G6h2xyA',
'ext': 'flv',
'title': 'Madeon - Pop Culture (live mashup)',
'upload_date': '20110711',
'uploader': 'Madeon',
'channel_id': 'UCqMDNf3Pn5L7pcNkuSEeO3w',
'duration': 204,
'description': 'md5:f7535343b6eda34a314eff8b85444680',
'uploader_id': 'itsmadeon',
'uploader_url': 'http://www.youtube.com/user/itsmadeon'
}
}, {
# First capture is of dead video, second is the oldest from CDX response.
'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
'info_dict': {
'id': '1JYutPM8O6E',
'ext': 'mp4',
'title': 'Fake Teen Doctor Strikes AGAIN! - Weekly Weird News',
'upload_date': '20160218',
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
'duration': 1236,
'description': 'md5:21032bae736421e89c2edf36d1936947',
'uploader_id': 'MachinimaETC',
'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
}
}, {
# First capture of dead video, capture date in link links to dead capture.
'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
'info_dict': {
'id': '6FPhZJGvf4E',
'ext': 'mp4',
'title': 'WTF: Video Games Still Launch BROKEN?! - T.U.G.S.',
'upload_date': '20160219',
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
'duration': 798,
'description': 'md5:a1dbf12d9a3bd7cb4c5e33b27d77ffe7',
'uploader_id': 'MachinimaETC',
'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
},
'expected_warnings': [
r'unable to download capture webpage \(it may not be archived\)'
]
}, { # Very old YouTube page, has - YouTube in title.
'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
'info_dict': {
'id': '-06-KB9XTzg',
'ext': 'flv',
'title': 'New Coin Hack!! 100% Safe!!'
}
}, {
'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
'info_dict': {
'id': 'dWW7qP423y8',
'ext': 'mp4',
'title': 'It\'s Bootleg AirPods Time.',
'upload_date': '20211021',
'channel_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
'channel_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug',
'duration': 810,
'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
'uploader': 'DankPods',
'uploader_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
'uploader_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug'
}
}, {
# player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
'info_dict': {
'id': '6Dh-RL__uN4',
'ext': 'mp4',
'title': 'bitch lasagna',
'upload_date': '20181005',
'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
'channel_url': 'http://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
'duration': 135,
'description': 'md5:2dbe4051feeff2dab5f41f82bb6d11d0',
'uploader': 'PewDiePie',
'uploader_id': 'PewDiePie',
'uploader_url': 'http://www.youtube.com/user/PewDiePie'
}
}, {
'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
'only_matching': True
}, {
'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M',
'only_matching': True
}, {
# Video not archived, only capture is unavailable video page
'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
'only_matching': True
}, { # Encoded url
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
'only_matching': True
}, {
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
'only_matching': True
}, {
'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&search=soccer',
'only_matching': True
}, {
'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg',
'only_matching': True
}
]
_YT_INITIAL_DATA_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE
_YT_INITIAL_PLAYER_RESPONSE_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialPlayerResponse["\']\s*\]|ytInitialPlayerResponse)\s*=[(\s]*({.+?})[)\s]*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_PLAYER_RESPONSE_RE
_YT_INITIAL_BOUNDARY_RE = r'(?:(?:var\s+meta|</script|\n)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_BOUNDARY_RE
_YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers
_YT_ALL_THUMB_SERVERS = orderedSet(
_YT_DEFAULT_THUMB_SERVERS + ['img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(0, 5), 9)]])
_WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/'
_OLDEST_CAPTURE_DATE = 20050214000000
_NEWEST_CAPTURE_DATE = 20500101000000
def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note='Downloading CDX API JSON'):
# CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md
query = {
'url': url,
'output': 'json',
'fl': 'original,mimetype,length,timestamp',
'limit': 500,
'filter': ['statuscode:200'] + (filters or []),
'collapse': collapse or [],
**(query or {})
}
res = self._download_json('https://web.archive.org/cdx/search/cdx', item_id, note, query=query)
if isinstance(res, list) and len(res) >= 2:
# format response to make it easier to use
return list(dict(zip(res[0], v)) for v in res[1:])
elif not isinstance(res, list) or len(res) != 0:
self.report_warning('Error while parsing CDX API response' + bug_reports_message())
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
return self._parse_json(self._search_regex(
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
def _extract_webpage_title(self, webpage):
page_title = self._html_search_regex(
r'<title>([^<]*)</title>', webpage, 'title', default='')
# YouTube video pages appear to always have either 'YouTube -' as prefix or '- YouTube' as suffix.
return self._html_search_regex(
r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
page_title, 'title', default='')
def _extract_metadata(self, video_id, webpage):
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None))
player_response = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') or {}
initial_data = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_DATA_RE, video_id, 'initial player response') or {}
initial_data_video = traverse_obj(
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'videoPrimaryInfoRenderer'),
expected_type=dict, get_all=False, default={})
video_details = traverse_obj(
player_response, 'videoDetails', expected_type=dict, get_all=False, default={})
microformats = traverse_obj(
player_response, ('microformat', 'playerMicroformatRenderer'), expected_type=dict, get_all=False, default={})
video_title = (
video_details.get('title')
or YoutubeBaseInfoExtractor._get_text(microformats, 'title')
or YoutubeBaseInfoExtractor._get_text(initial_data_video, 'title')
or self._extract_webpage_title(webpage)
or search_meta(['og:title', 'twitter:title', 'title']))
channel_id = str_or_none(
video_details.get('channelId')
or microformats.get('externalChannelId')
or search_meta('channelId')
or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', # @b45a9e6
webpage, 'channel id', default=None, group='id'))
channel_url = f'http://www.youtube.com/channel/{channel_id}' if channel_id else None
duration = int_or_none(
video_details.get('lengthSeconds')
or microformats.get('lengthSeconds')
or parse_duration(search_meta('duration')))
description = (
video_details.get('shortDescription')
or YoutubeBaseInfoExtractor._get_text(microformats, 'description')
or clean_html(get_element_by_id('eow-description', webpage)) # @9e6dd23
or search_meta(['description', 'og:description', 'twitter:description']))
uploader = video_details.get('author')
# Uploader ID and URL
uploader_mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">', # @fd05024
webpage)
if uploader_mobj is not None:
uploader_id, uploader_url = uploader_mobj.group('uploader_id'), uploader_mobj.group('uploader_url')
else:
# @a6211d2
uploader_url = url_or_none(microformats.get('ownerProfileUrl'))
uploader_id = self._search_regex(
r'(?:user|channel)/([^/]+)', uploader_url or '', 'uploader id', default=None)
upload_date = unified_strdate(
dict_get(microformats, ('uploadDate', 'publishDate'))
or search_meta(['uploadDate', 'datePublished'])
or self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], # @7998520
webpage, 'upload date', default=None))
return {
'title': video_title,
'description': description,
'upload_date': upload_date,
'uploader': uploader,
'channel_id': channel_id,
'channel_url': channel_url,
'duration': duration,
'uploader_url': uploader_url,
'uploader_id': uploader_id,
}
def _extract_thumbnails(self, video_id):
try_all = 'thumbnails' in self._configuration_arg('check_all')
thumbnail_base_urls = ['http://{server}/vi{webp}/{video_id}'.format(
webp='_webp' if ext == 'webp' else '', video_id=video_id, server=server)
for server in (self._YT_ALL_THUMB_SERVERS if try_all else self._YT_DEFAULT_THUMB_SERVERS) for ext in (('jpg', 'webp') if try_all else ('jpg',))]
thumbnails = []
for url in thumbnail_base_urls:
response = self._call_cdx_api(
video_id, url, filters=['mimetype:image/(?:webp|jpeg)'],
collapse=['urlkey'], query={'matchType': 'prefix'})
if not response:
continue
thumbnails.extend(
{
'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'),
'filesize': int_or_none(thumbnail_dict.get('length')),
'preference': int_or_none(thumbnail_dict.get('length'))
} for thumbnail_dict in response)
if not try_all:
break
self._remove_duplicate_formats(thumbnails)
return thumbnails
def _get_capture_dates(self, video_id, url_date):
capture_dates = []
# Note: CDX API will not find watch pages with extra params in the url.
response = self._call_cdx_api(
video_id, f'https://www.youtube.com/watch?v={video_id}',
filters=['mimetype:text/html'], collapse=['timestamp:6', 'digest'], query={'matchType': 'prefix'}) or []
all_captures = sorted([int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None])
# Prefer the new polymer UI captures as we support extracting more metadata from them
# WBM captures seem to all switch to this layout ~July 2020
modern_captures = list(filter(lambda x: x >= 20200701000000, all_captures))
if modern_captures:
capture_dates.append(modern_captures[0])
capture_dates.append(url_date)
if all_captures:
capture_dates.append(all_captures[0])
if 'captures' in self._configuration_arg('check_all'):
capture_dates.extend(modern_captures + all_captures)
# Fallbacks if any of the above fail
capture_dates.extend([self._OLDEST_CAPTURE_DATE, self._NEWEST_CAPTURE_DATE])
return orderedSet(capture_dates)
def _real_extract(self, url):
url_date, video_id = self._match_valid_url(url).groups()
urlh = None
try:
urlh = self._request_webpage(
HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id),
video_id, note='Fetching archived video file url', expected_status=True)
except ExtractorError as e:
# HTTP Error 404 is expected if the video is not saved.
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
self.raise_no_formats(
'The requested video is not archived, indexed, or there is an issue with web.archive.org',
expected=True)
else:
raise
capture_dates = self._get_capture_dates(video_id, int_or_none(url_date))
self.write_debug('Captures to try: ' + ', '.join(str(i) for i in capture_dates if i is not None))
info = {'id': video_id}
for capture in capture_dates:
if not capture:
continue
webpage = self._download_webpage(
(self._WAYBACK_BASE_URL + 'http://www.youtube.com/watch?v=%s') % (capture, video_id),
video_id=video_id, fatal=False, errnote='unable to download capture webpage (it may not be archived)',
note='Downloading capture webpage')
current_info = self._extract_metadata(video_id, webpage or '')
# Try avoid getting deleted video metadata
if current_info.get('title'):
info = merge_dicts(info, current_info)
if 'captures' not in self._configuration_arg('check_all'):
break
info['thumbnails'] = self._extract_thumbnails(video_id)
if urlh:
url = compat_urllib_parse_unquote(urlh.url)
video_file_url_qs = parse_qs(url)
# Attempt to recover any ext & format info from playback url & response headers
format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
if itag and itag in YoutubeIE._formats:
format.update(YoutubeIE._formats[itag])
format.update({'format_id': itag})
else:
mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
ext = (mimetype2ext(mime)
or urlhandle_detect_ext(urlh)
or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type')))
format.update({'ext': ext})
info['formats'] = [format]
if not info.get('duration'):
info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
if not info.get('title'):
info['title'] = video_id
return info
| 46.776786 | 213 | 0.558822 | 30,704 | 0.976777 | 0 | 0 | 417 | 0.013266 | 0 | 0 | 14,283 | 0.454381 |
4680d6848613dfb9b8af98b8d4dd6e1f33bd4389 | 1,008 | py | Python | Interfas Grafica XI (GUI)/InterfasGraficaXI.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
]
| 20 | 2020-08-19T23:27:01.000Z | 2022-02-03T12:02:17.000Z | Interfas Grafica XI (GUI)/InterfasGraficaXI.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
]
| 1 | 2021-04-10T18:06:05.000Z | 2021-04-10T18:06:05.000Z | Interfas Grafica XI (GUI)/InterfasGraficaXI.py | BrianMarquez3/Python-Course | 2622b4ddfd687505becfd246e82a2ed0cb9b76f3 | [
"MIT"
]
| 2 | 2020-12-03T19:35:36.000Z | 2021-11-10T14:58:39.000Z | # Interfas Grafica XI
# Menu
from tkinter import *
root=Tk()
barraMenu=Menu(root)
root.config(menu=barraMenu, width=600, height=400)
archivoMenu=Menu(barraMenu, tearoff=0)
archivoMenu.add_command(label="Nuevo")
archivoMenu.add_command(label="Guardar")
archivoMenu.add_command(label="Guardar Como")
archivoMenu.add_separator()
archivoMenu.add_command(label="Cerrar")
archivoMenu.add_command(label="Salir")
archivoEdicion=Menu(barraMenu, tearoff=0)
archivoHerramientas=Menu(barraMenu)
archivoEdicion.add_command(label="Copiar")
archivoEdicion.add_command(label="Cortar")
archivoEdicion.add_command(label="Pegar")
archivoAyuda=Menu(barraMenu, tearoff=0)
barraMenu.add_cascade(label="Archivo", menu=archivoMenu)
barraMenu.add_cascade(label="Edicion", menu=archivoEdicion)
barraMenu.add_cascade(label="Herramienta", menu=archivoHerramientas)
barraMenu.add_cascade(label="Ayuda", menu=archivoAyuda)
archivoAyuda.add_command(label="Licencia")
archivoAyuda.add_command(label="Acerca de...")
root.mainloop() | 28 | 68 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.155754 |
46812ee3bdef976af898f29d2c99337fc3788ea0 | 91 | py | Python | virtual/lib/python3.6/site-packages/macaroonbakery/tests/__init__.py | marknesh/pitches | 0a480d9bc2beafaefa0121393b1502cc05edab89 | [
"MIT"
]
| null | null | null | virtual/lib/python3.6/site-packages/macaroonbakery/tests/__init__.py | marknesh/pitches | 0a480d9bc2beafaefa0121393b1502cc05edab89 | [
"MIT"
]
| 11 | 2020-06-05T20:57:31.000Z | 2021-09-22T18:35:03.000Z | flask/lib/python3.6/site-packages/macaroonbakery/tests/__init__.py | JOFLIX/grapevines | 34576e01184570d79cc140b42ffb71d322132da6 | [
"MIT",
"Unlicense"
]
| 1 | 2020-11-04T06:48:34.000Z | 2020-11-04T06:48:34.000Z | # Copyright 2017 Canonical Ltd.
# Licensed under the LGPLv3, see LICENCE file for details.
| 30.333333 | 58 | 0.78022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.978022 |
4681cb44945ca44b04cb55846c39c0d1d3ca721a | 5,589 | py | Python | 08/postgresql_demo.py | catcherwong-archive/2019 | f9672920113b1ec0a5fcd6a6bde681f62d805763 | [
"MIT"
]
| 27 | 2019-04-07T15:31:53.000Z | 2021-08-28T16:18:34.000Z | 08/postgresql_demo.py | hca1120/2019 | 09e5f49407b8239409e857f8117877eedb6b0134 | [
"MIT"
]
| 5 | 2019-08-10T08:19:28.000Z | 2022-02-11T02:38:41.000Z | 08/postgresql_demo.py | hca1120/2019 | 09e5f49407b8239409e857f8117877eedb6b0134 | [
"MIT"
]
| 31 | 2019-04-07T15:31:57.000Z | 2022-02-02T20:36:58.000Z | # -*- coding: UTF-8 -*-
import psycopg2 #postgresql
import time
import datetime
class PgDemo:
def __init__(self, host, port, db, user, pwd):
self.host = host
self.port = port
self.db = db
self.user = user
self.pwd = pwd
def getConnection(self):
conn = None
try:
conn = psycopg2.connect(
host=self.host,
port=self.port,
database=self.db,
user=self.user,
password=self.pwd,
)
except Exception as err:
print("can not connect to the database,%s" % err)
return conn
def query_all(self):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1"
try:
cur = conn.cursor()
cur.execute(sql)
res = cur.fetchall()
# print(res)
print("id\tname\tgender\ttime")
for d in res:
print("%d\t%s\t%s\t%s" % (d[0], d[1], "male" if d[2] == 1 else "female", self.timestamp2datetime(d[3], False)))
except Exception as err:
print("query all fail, %s" % err)
finally:
cur.close()
def query_lastone(self):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1 order by create_time desc limit 1"
try:
cur = conn.cursor()
cur.execute(sql)
res = cur.fetchone()
# print(res)
print("id\tname\tgender\ttime")
print("%d\t%s\t%s\t%s" % (res[0], res[1], "male" if res[2] == 1 else "female", self.timestamp2datetime(res[3], False)))
except Exception as err:
print("query lastone fail, %s" % err)
finally:
cur.close()
def query_byname(self, name):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1 where name = %s"
try:
cur = conn.cursor()
cur.execute(sql, (name, ))
res = cur.fetchone()
# print(res)
print("id\tname\tgender\ttime")
print("%d\t%s\t%s\t%s" % (res[0], res[1], "male" if res[2] == 1 else "female", self.timestamp2datetime(res[3], False)))
except Exception as err:
print("query by name fail, %s" % err)
finally:
cur.close()
def insert_one(self, name, gender):
with self.getConnection() as conn:
sql = " insert into t1(name, gender, create_time) values(%s, %s, %s) "
try:
cur = conn.cursor()
cur.execute(sql, (name, gender, self.getCurrentTimestamp()))
print("insert ok")
except Exception as err:
print("insert one fail, %s" % err)
finally:
cur.close()
def update_genderbyid(self, id, gender):
with self.getConnection() as conn:
sql = " update t1 set gender = %s where id = %s "
try:
cur = conn.cursor()
cur.execute(sql, (gender, id))
print("update ok")
except Exception as err:
print("update gender by id fail, %s" % err)
finally:
cur.close()
def delete_byname(self, name):
with self.getConnection() as conn:
sql = " delete from t1 where name = %s "
try:
cur = conn.cursor()
cur.execute(sql, (name, ))
print("delete ok")
except Exception as err:
print("delete by name fail, %s" % err)
finally:
cur.close()
def getCurrentTimestamp(self):
ts = int ( round ( time.time() * 1000 ) )
print(ts)
return ts
def timestamp2datetime(self, timestamp, issecond):
if(issecond == True):
t = datetime.datetime.fromtimestamp(timestamp)
return t.strftime("%Y-%m-%d %H:%M:%S")
else:
t = datetime.datetime.fromtimestamp(timestamp / 1000)
return t.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
if __name__ == "__main__":
pg = PgDemo("127.0.0.1", 5432, "demo", "postgres", "123456")
print("===========insert_one==============")
pg.insert_one("wong", 1)
print("===========query_all==============")
pg.query_all()
print("===========query_lastone==============")
pg.query_lastone()
print("===========query_byname==============")
pg.query_byname("catcher")
print("===========update_genderbyid==============")
pg.update_genderbyid(4, 2)
print("===========delete_byname==============")
pg.delete_byname("wong")
print("===========query_all==============")
pg.query_all()
| 35.598726 | 138 | 0.429236 | 4,822 | 0.862458 | 0 | 0 | 0 | 0 | 0 | 0 | 1,160 | 0.207476 |
46820f0e1937a8a50c1292d89054f263875a439f | 686 | py | Python | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
]
| 1 | 2019-04-22T06:08:13.000Z | 2019-04-22T06:08:13.000Z | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
]
| null | null | null | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
]
| null | null | null | #! /usr/bin/env python
import nmrglue as ng
# read in the varian data
dic,data = ng.pipe.read("../common_data/2d_pipe/test.ft2")
# Set the parameters
u = ng.pipe.guess_udic(dic,data)
# create the converter object and initilize with varian data
C = ng.convert.converter()
C.from_pipe(dic,data,u)
# create pipe data and then write it out
ng.sparky.write("2d_sparky.ucsf",*C.to_sparky(),overwrite=True)
# check the conversion against NMRPipe
print "Conversion complete, listing differences between files:"
sdic,sdata = ng.sparky.read("2d_sparky.ucsf")
sdic2,sdata2 = ng.sparky.read("../common_data/2d_sparky/data.ucsf")
print ng.misc.pair_similar(sdic,sdata,sdic2,sdata2,verb=True)
| 29.826087 | 67 | 0.759475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.530612 |
46821c4a15686b2fb3b7ea49bee70f910667b4c7 | 36,931 | py | Python | jaqs/trade/analyze/analyze.py | WayneWan413/JAQS | e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4 | [
"Apache-2.0"
]
| null | null | null | jaqs/trade/analyze/analyze.py | WayneWan413/JAQS | e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4 | [
"Apache-2.0"
]
| null | null | null | jaqs/trade/analyze/analyze.py | WayneWan413/JAQS | e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4 | [
"Apache-2.0"
]
| null | null | null | # encoding: utf-8
from __future__ import print_function
import os
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import Formatter
from jaqs.trade.analyze.report import Report
from jaqs.data import RemoteDataService
from jaqs.data.basic.instrument import InstManager
from jaqs.trade import common
import jaqs.util as jutil
STATIC_FOLDER = jutil.join_relative_path("trade/analyze/static")
TO_PCT = 100.0
MPL_RCPARAMS = {'figure.facecolor': '#F6F6F6',
'axes.facecolor': '#F6F6F6',
'axes.edgecolor': '#D3D3D3',
'text.color': '#555555',
'grid.color': '#B1B1B1',
'grid.alpha': 0.3,
# scale
'axes.linewidth': 2.0,
'axes.titlepad': 12,
'grid.linewidth': 1.0,
'grid.linestyle': '-',
# font size
'font.size': 13,
'axes.titlesize': 18,
'axes.labelsize': 14,
'legend.fontsize': 'small',
'lines.linewidth': 2.5,
}
class TradeRecordEmptyError(Exception):
def __init__(self, *args):
super(TradeRecordEmptyError, self).__init__(*args)
class MyFormatter(Formatter):
def __init__(self, dates, fmt='%Y%m'):
self.dates = dates
self.fmt = fmt
def __call__(self, x, pos=0):
"""Return the label for time x at position pos"""
ind = int(np.round(x))
if ind >= len(self.dates) or ind < 0:
return ''
# return self.dates[ind].strftime(self.fmt)
return pd.to_datetime(self.dates[ind], format="%Y%m%d").strftime(self.fmt)
class BaseAnalyzer(object):
"""
Attributes
----------
_trades : pd.DataFrame
_configs : dict
data_api : BaseDataServer
_universe : set
All securities that have been traded.
"""
def __init__(self):
self.file_folder = ""
self._trades = None
self._configs = None
self.data_api = None
self.dataview = None
self._universe = []
self._closes = None
self._closes_adj = None
self.daily_position = None
self.adjust_mode = None
self.inst_map = dict()
self.performance_metrics = dict()
self.risk_metrics = dict()
self.report_dic = dict()
@property
def trades(self):
"""Read-only attribute"""
return self._trades
@property
def universe(self):
"""Read-only attribute"""
return self._universe
@property
def configs(self):
"""Read-only attribute"""
return self._configs
@property
def closes(self):
"""Read-only attribute, close prices of securities in the universe"""
return self._closes
@property
def closes_adj(self):
"""Read-only attribute, close prices of securities in the universe"""
return self._closes_adj
def initialize(self, data_api=None, dataview=None, file_folder='.'):
"""
Read trades from csv file to DataFrame of given data type.
Parameters
----------
data_api : RemoteDataService
dataview : DataView
file_folder : str
Directory path where trades and configs are stored.
"""
self.data_api = data_api
self.dataview = dataview
type_map = {'task_id': str,
'entrust_no': str,
'entrust_action': str,
'symbol': str,
'fill_price': float,
'fill_size': float,
'fill_date': np.integer,
'fill_time': np.integer,
'fill_no': str,
'commission': float}
abs_path = os.path.abspath(file_folder)
self.file_folder = abs_path
trades = pd.read_csv(os.path.join(self.file_folder, 'trades.csv'), ',', dtype=type_map)
if trades.empty:
raise TradeRecordEmptyError("No trade records found in your 'trades.csv' file. Analysis stopped.")
self._init_universe(trades.loc[:, 'symbol'].values)
self._init_configs(self.file_folder)
self._init_trades(trades)
self._init_symbol_price()
self._init_inst_data()
def _init_inst_data(self):
symbol_str = ','.join(self.universe)
if self.dataview is not None:
data_inst = self.dataview.data_inst
self.inst_map = data_inst.to_dict(orient='index')
elif self.data_api is not None:
inst_mgr = InstManager(data_api=self.data_api, symbol=symbol_str)
self.inst_map = {k: v.__dict__ for k, v in inst_mgr.inst_map.items()}
del inst_mgr
else:
raise ValueError("no dataview or dataapi provided.")
def _init_trades(self, df):
"""Add datetime column. """
df.loc[:, 'fill_dt'] = jutil.combine_date_time(df.loc[:, 'fill_date'], df.loc[:, 'fill_time'])
df = df.set_index(['symbol', 'fill_dt']).sort_index(axis=0)
# self._trades = jutil.group_df_to_dict(df, by='symbol')
self._trades = df
def _init_symbol_price(self):
"""Get close price of securities in the universe from data server."""
if self.dataview is not None:
df_close = self.dataview.get_ts('close', start_date=self.start_date, end_date=self.end_date)
df_close_adj = self.dataview.get_ts('close_adj', start_date=self.start_date, end_date=self.end_date)
else:
df, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close',
start_date=self.start_date, end_date=self.end_date)
if msg != '0,':
print(msg)
df_close = df.pivot(index='trade_date', columns='symbol', values='close')
df_adj, msg = self.data_api.daily(symbol=','.join(self.universe), fields='trade_date,symbol,close',
start_date=self.start_date, end_date=self.end_date)
if msg != '0,':
print(msg)
df_close_adj = df_adj.pivot(index='trade_date', columns='symbol', values='close')
self._closes = df_close
self._closes_adj = df_close_adj
def _init_universe(self, securities):
"""Return a set of securities."""
self._universe = set(securities)
def _init_configs(self, folder):
import codecs
with codecs.open(os.path.join(folder, 'configs.json'), 'r', encoding='utf-8') as f:
configs = json.load(f)
self._configs = configs
self.init_balance = self.configs['init_balance']
self.start_date = self.configs['start_date']
self.end_date = self.configs['end_date']
@staticmethod
def _process_trades(df):
"""Add various statistics to trades DataFrame."""
from jaqs.trade import common
# df = df.set_index('fill_date')
# pre-process
cols_to_drop = ['task_id', 'entrust_no', 'fill_no']
df = df.drop(cols_to_drop, axis=1)
def _apply(gp_df):
# calculation of non-cumulative fields
direction = gp_df['entrust_action'].apply(lambda s: 1 if common.ORDER_ACTION.is_positive(s) else -1)
fill_size, fill_price = gp_df['fill_size'], gp_df['fill_price']
turnover = fill_size * fill_price
gp_df.loc[:, 'BuyVolume'] = (direction + 1) / 2 * fill_size
gp_df.loc[:, 'SellVolume'] = (direction - 1) / -2 * fill_size
# Calculation of cumulative fields
gp_df.loc[:, 'CumVolume'] = fill_size.cumsum()
gp_df.loc[:, 'CumTurnOver'] = turnover.cumsum()
gp_df.loc[:, 'CumNetTurnOver'] = (turnover * -direction).cumsum()
gp_df.loc[:, 'position'] = (fill_size * direction).cumsum()
gp_df.loc[:, 'AvgPosPrice'] = calc_avg_pos_price(gp_df.loc[:, 'position'].values, fill_price.values)
gp_df.loc[:, 'CumProfit'] = (gp_df.loc[:, 'CumNetTurnOver'] + gp_df.loc[:, 'position'] * fill_price)
return gp_df
gp = df.groupby(by='symbol')
res = gp.apply(_apply)
return res
def process_trades(self):
# self._trades = {k: self._process_trades(v) for k, v in self.trades.items()}
self._trades = self._process_trades(self._trades)
def get_pos_change_info(self):
trades = pd.concat(self.trades.values(), axis=0)
gp = trades.groupby(by=['fill_date'], as_index=False)
res = OrderedDict()
account = OrderedDict()
for date, df in gp:
df_mod = df.loc[:, ['symbol', 'entrust_action', 'fill_size', 'fill_price',
'position', 'AvgPosPrice']]
df_mod.columns = ['symbol', 'action', 'size', 'price',
'position', 'cost price']
res[str(date)] = df_mod
mv = sum(df_mod.loc[:, 'price'] * df.loc[:, 'position'])
current_profit = sum(df.loc[:, 'CumProfit'])
cash = self.configs['init_balance'] + current_profit - mv
account[str(date)] = {'market_value': mv, 'cash': cash}
self.position_change = res
self.account = account
def get_daily(self):
close = self.closes
trade = self.trades
# pro-process
trade_cols = ['fill_date', 'BuyVolume', 'SellVolume', 'commission', 'position', 'AvgPosPrice', 'CumNetTurnOver']
trade = trade.loc[:, trade_cols]
gp = trade.groupby(by=['symbol', 'fill_date'])
func_last = lambda ser: ser.iat[-1]
trade = gp.agg({'BuyVolume': np.sum, 'SellVolume': np.sum, 'commission': np.sum,
'position': func_last, 'AvgPosPrice': func_last, 'CumNetTurnOver': func_last})
trade.index.names = ['symbol', 'trade_date']
# get daily position
df_position = trade['position'].unstack('symbol').fillna(method='ffill').fillna(0.0)
daily_position = df_position.reindex(close.index)
daily_position = daily_position.fillna(method='ffill').fillna(0)
self.daily_position = daily_position
# calculate statistics
close = pd.DataFrame(close.T.stack())
close.columns = ['close']
close.index.names = ['symbol', 'trade_date']
merge = pd.concat([close, trade], axis=1, join='outer')
def _apply(gp_df):
cols_nan_to_zero = ['BuyVolume', 'SellVolume', 'commission']
cols_nan_fill = ['close', 'position', 'AvgPosPrice', 'CumNetTurnOver']
# merge: pd.DataFrame
gp_df.loc[:, cols_nan_fill] = gp_df.loc[:, cols_nan_fill].fillna(method='ffill')
gp_df.loc[:, cols_nan_fill] = gp_df.loc[:, cols_nan_fill].fillna(0)
gp_df.loc[:, cols_nan_to_zero] = gp_df.loc[:, cols_nan_to_zero].fillna(0)
mask = gp_df.loc[:, 'AvgPosPrice'] < 1e-5
gp_df.loc[mask, 'AvgPosPrice'] = gp_df.loc[mask, 'close']
gp_df.loc[:, 'CumProfit'] = gp_df.loc[:, 'CumNetTurnOver'] + gp_df.loc[:, 'position'] * gp_df.loc[:, 'close']
gp_df.loc[:, 'CumProfitComm'] = gp_df['CumProfit'] - gp_df['commission'].cumsum()
daily_net_turnover = gp_df['CumNetTurnOver'].diff(1).fillna(gp_df['CumNetTurnOver'].iat[0])
daily_position_change = gp_df['position'].diff(1).fillna(gp_df['position'].iat[0])
gp_df['trading_pnl'] = (daily_net_turnover + gp_df['close'] * daily_position_change)
gp_df['holding_pnl'] = (gp_df['close'].diff(1) * gp_df['position'].shift(1)).fillna(0.0)
gp_df.loc[:, 'total_pnl'] = gp_df['trading_pnl'] + gp_df['holding_pnl']
return gp_df
gp = merge.groupby(by='symbol')
res = gp.apply(_apply)
self.daily = res
'''
def get_daily(self):
"""Add various statistics to daily DataFrame."""
self.daily = self._get_daily(self.closes, self.trades)
daily_dic = dict()
for sec, df_trade in self.trades.items():
df_close = self.closes[sec].rename('close')
res = self._get_daily(df_close, df_trade)
daily_dic[sec] = res
self.daily = daily_dic
'''
def get_returns(self, compound_return=True, consider_commission=True):
cols = ['trading_pnl', 'holding_pnl', 'total_pnl', 'commission', 'CumProfitComm', 'CumProfit']
'''
dic_symbol = {sec: self.inst_map[sec]['multiplier'] * df_daily.loc[:, cols]
for sec, df_daily in self.daily.items()}
df_profit = pd.concat(dic_symbol, axis=1) # this is cumulative profit
df_profit = df_profit.fillna(method='ffill').fillna(0.0)
df_pnl = df_profit.stack(level=1)
df_pnl = df_pnl.sum(axis=1)
df_pnl = df_pnl.unstack(level=1)
'''
daily = self.daily.loc[:, cols]
daily = daily.stack().unstack('symbol')
df_pnl = daily.sum(axis=1)
df_pnl = df_pnl.unstack(level=1)
self.df_pnl = df_pnl
# TODO temperary solution
if consider_commission:
strategy_value = (df_pnl['total_pnl'] - df_pnl['commission']).cumsum() + self.init_balance
else:
strategy_value = df_pnl['total_pnl'].cumsum() + self.init_balance
market_values = pd.concat([strategy_value, self.data_benchmark], axis=1).fillna(method='ffill')
market_values.columns = ['strat', 'bench']
df_returns = market_values.pct_change(periods=1).fillna(0.0)
df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum')
if compound_return:
df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1
df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0)
else:
df_returns.loc[:, 'active'] = df_returns['strat'] - df_returns['bench']
df_returns.loc[:, 'active_cum'] = df_returns['active'].add(1.0).cumprod(axis=0)
start = pd.to_datetime(self.configs['start_date'], format="%Y%m%d")
end = pd.to_datetime(self.configs['end_date'], format="%Y%m%d")
years = (end - start).days / 365.0
self.performance_metrics['Annual Return (%)'] =\
100 * (np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1)
self.performance_metrics['Annual Volatility (%)'] =\
100 * (df_returns.loc[:, 'active'].std() * np.sqrt(common.CALENDAR_CONST.TRADE_DAYS_PER_YEAR))
self.performance_metrics['Sharpe Ratio'] = (self.performance_metrics['Annual Return (%)']
/ self.performance_metrics['Annual Volatility (%)'])
self.risk_metrics['Beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1]
# bt_strat_mv = pd.read_csv('bt_strat_mv.csv').set_index('trade_date')
# df_returns = df_returns.join(bt_strat_mv, how='right')
self.returns = df_returns
def plot_pnl(self, save_folder=None):
old_mpl_rcparams = {k: v for k, v in mpl.rcParams.items()}
mpl.rcParams.update(MPL_RCPARAMS)
if save_folder is None:
save_folder = self.file_folder
fig1 = plot_portfolio_bench_pnl(self.returns.loc[:, 'strat_cum'],
self.returns.loc[:, 'bench_cum'],
self.returns.loc[:, 'active_cum'])
fig1.savefig(os.path.join(save_folder,'pnl_img.png'), facecolor=fig1.get_facecolor(), dpi=fig1.get_dpi())
fig2 = plot_daily_trading_holding_pnl(self.df_pnl['trading_pnl'],
self.df_pnl['holding_pnl'],
self.df_pnl['total_pnl'],
self.df_pnl['total_pnl'].cumsum())
fig2.savefig(os.path.join(save_folder,'pnl_img_trading_holding.png'), facecolor=fig2.get_facecolor(), dpi=fig2.get_dpi())
mpl.rcParams.update(old_mpl_rcparams)
def plot_pnl_OLD(self, save_folder=None):
if save_folder is None:
save_folder = self.file_folder
fig, (ax0, ax1, ax2) = plt.subplots(3, 1, figsize=(21, 8), dpi=300, sharex=True)
idx0 = self.returns.index
idx = np.arange(len(idx0))
bar_width = 0.3
ax0.bar(idx-bar_width/2, self.df_pnl['trading_pnl'], width=bar_width, color='indianred', label='Trading PnL',)
ax0.bar(idx+bar_width/2, self.df_pnl['holding_pnl'], width=bar_width, color='royalblue', label='Holding PnL')
ax0.axhline(0.0, color='k', lw=1, ls='--')
# ax0.plot(idx, self.pnl['total_pnl'], lw=1.5, color='violet', label='Total PnL')
ax0.legend(loc='upper left')
ax1.plot(idx, self.returns.loc[:, 'bench_cum'], label='Benchmark')
ax1.plot(idx, self.returns.loc[:, 'strat_cum'], label='Strategy')
ax1.legend(loc='upper left')
ax2.plot(idx, self.returns.loc[:, 'active_cum'], label='Extra Return')
ax2.legend(loc='upper left')
ax2.set_xlabel("Date")
ax2.set_ylabel("Net Value")
ax1.set_ylabel("Net Value")
ax2.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m-%d'))
plt.tight_layout()
fig.savefig(os.path.join(save_folder, 'pnl_img.png'))
plt.close()
def gen_report(self, source_dir, template_fn, out_folder='.', selected=None):
"""
Generate HTML (and PDF) report of the trade analysis.
Parameters
----------
source_dir : str
path of directory where HTML template and css files are stored.
template_fn : str
File name of HTML template.
out_folder : str
Output folder of report.
selected : list of str or None
List of symbols whose detailed PnL curve and position will be plotted.
# TODO: this parameter should not belong to function
"""
dic = dict()
dic['html_title'] = "Alpha Strategy Backtest Result"
dic['selected_securities'] = selected
# we do not want to show username / password in report
dic['props'] = {k: v for k, v in self.configs.items() if ('username' not in k and 'password' not in k)}
dic['performance_metrics'] = self.performance_metrics
dic['risk_metrics'] = self.risk_metrics
dic['position_change'] = self.position_change
dic['account'] = self.account
dic['df_daily'] = jutil.group_df_to_dict(self.daily, by='symbol')
dic['daily_position'] = self.daily_position
self.report_dic.update(dic)
self.returns.to_csv(os.path.join(out_folder, 'returns.csv'))
r = Report(self.report_dic, source_dir=source_dir, template_fn=template_fn, out_folder=out_folder)
r.generate_html()
r.output_html('report.html')
def do_analyze(self, result_dir, selected_sec=None):
if selected_sec is None:
selected_sec = []
print("process trades...")
self.process_trades()
print("get daily stats...")
self.get_daily()
print("calc strategy return...")
self.get_returns(consider_commission=False)
if len(selected_sec) > 0:
print("Plot single securities PnL")
for symbol in selected_sec:
df_daily = self.daily.loc[pd.IndexSlice[symbol, :], :]
df_daily.index = df_daily.index.droplevel(0)
if df_daily is not None:
plot_trades(df_daily, symbol=symbol, save_folder=self.file_folder)
print("Plot strategy PnL...")
self.plot_pnl(result_dir)
print("generate report...")
self.gen_report(source_dir=STATIC_FOLDER, template_fn='report_template.html',
out_folder=result_dir,
selected=selected_sec)
class EventAnalyzer(BaseAnalyzer):
def __init__(self):
super(EventAnalyzer, self).__init__()
self.metrics = dict()
self.daily = None
self.data_benchmark = None
self.returns = None # OrderedDict
self.position_change = None # OrderedDict
self.account = None # OrderedDict
def initialize(self, data_server_=None, dataview=None, file_folder='.'):
super(EventAnalyzer, self).initialize(data_api=data_server_, dataview=dataview,
file_folder=file_folder)
if self.dataview is not None and self.dataview.data_benchmark is not None:
self.data_benchmark = self.dataview.data_benchmark.loc[(self.dataview.data_benchmark.index >= self.start_date)
&(self.dataview.data_benchmark.index <= self.end_date)]
else:
benchmark = self.configs.get('benchmark', "")
if benchmark and data_server_:
df, msg = data_server_.daily(benchmark, start_date=self.closes.index[0], end_date=self.closes.index[-1])
self.data_benchmark = df.set_index('trade_date').loc[:, ['close']]
self.data_benchmark.columns = ['bench']
else:
self.data_benchmark = pd.DataFrame(index=self.closes.index, columns=['bench'], data=np.ones(len(self.closes), dtype=float))
class AlphaAnalyzer(BaseAnalyzer):
def __init__(self):
super(AlphaAnalyzer, self).__init__()
self.metrics = dict()
self.daily = None
self.returns = None # OrderedDict
self.position_change = None # OrderedDict
self.account = None # OrderedDict
self.df_brinson = None
self.data_benchmark = None
def initialize(self, data_api=None, dataview=None, file_folder='.'):
super(AlphaAnalyzer, self).initialize(data_api=data_api, dataview=dataview,
file_folder=file_folder)
if self.dataview is not None and self.dataview.data_benchmark is not None:
self.data_benchmark = self.dataview.data_benchmark.loc[(self.dataview.data_benchmark.index >= self.start_date)
&(self.dataview.data_benchmark.index <= self.end_date)]
@staticmethod
def _to_pct_return(arr, cumulative=False):
"""Convert portfolio value to portfolio (linear) return."""
r = np.empty_like(arr)
r[0] = 0.0
if cumulative:
r[1:] = arr[1:] / arr[0] - 1
else:
r[1:] = arr[1:] / arr[:-1] - 1
return r
'''
def get_returns_OLD(self, compound_return=True, consider_commission=True):
profit_col_name = 'CumProfitComm' if consider_commission else 'CumProfit'
vp_list = {sec: df_profit.loc[:, profit_col_name] for sec, df_profit in self.daily.items()}
df_profit = pd.concat(vp_list, axis=1) # this is cumulative profit
# TODO temperary solution
df_profit = df_profit.fillna(method='ffill').fillna(0.0)
strategy_value = df_profit.sum(axis=1) + self.configs['init_balance']
market_values = pd.concat([strategy_value, self.data_benchmark], axis=1).fillna(method='ffill')
market_values.columns = ['strat', 'bench']
df_returns = market_values.pct_change(periods=1).fillna(0.0)
df_returns = df_returns.join((df_returns.loc[:, ['strat', 'bench']] + 1.0).cumprod(), rsuffix='_cum')
if compound_return:
df_returns.loc[:, 'active_cum'] = df_returns['strat_cum'] - df_returns['bench_cum'] + 1
df_returns.loc[:, 'active'] = df_returns['active_cum'].pct_change(1).fillna(0.0)
else:
df_returns.loc[:, 'active'] = df_returns['strat'] - df_returns['bench']
df_returns.loc[:, 'active_cum'] = df_returns['active'].add(1.0).cumprod(axis=0)
start = pd.to_datetime(self.configs['start_date'], format="%Y%m%d")
end = pd.to_datetime(self.configs['end_date'], format="%Y%m%d")
years = (end - start).days / 365.0
self.metrics['yearly_return'] = np.power(df_returns.loc[:, 'active_cum'].values[-1], 1. / years) - 1
self.metrics['yearly_vol'] = df_returns.loc[:, 'active'].std() * np.sqrt(225.)
self.metrics['beta'] = np.corrcoef(df_returns.loc[:, 'bench'], df_returns.loc[:, 'strat'])[0, 1]
self.metrics['sharpe'] = self.metrics['yearly_return'] / self.metrics['yearly_vol']
# bt_strat_mv = pd.read_csv('bt_strat_mv.csv').set_index('trade_date')
# df_returns = df_returns.join(bt_strat_mv, how='right')
self.returns = df_returns
'''
def _get_index_weight(self):
if self.dataview is not None:
res = self.dataview.get_ts('index_weight', start_date=self.start_date, end_date=self.end_date)
else:
res = self.data_api.get_index_weights_daily(self.universe, self.start_date, self.end_date)
return res
def _brinson(self, close, pos, index_weight, group):
"""
Brinson Attribution.
Parameters
----------
close : pd.DataFrame
Index is date, columns are symbols.
pos : pd.DataFrame
Index is date, columns are symbols.
index_weight : pd.DataFrame
Index is date, columns are symbols.
group : pd.DataFrame
Index is date, columns are symbols.
Returns
-------
dict
"""
def group_sum(df, group_daily):
groups = np.unique(group_daily.values.flatten())
mask = np.isnan(groups.astype(float))
groups = groups[np.logical_not(mask)]
res = pd.DataFrame(index=df.index, columns=groups, data=np.nan)
for g in groups:
mask = group_daily == g
tmp = df[mask]
res.loc[:, g] = tmp.sum(axis=1)
return res
ret = close.pct_change(1)
pos_sum = pos.sum(axis=1)
pf_weight = pos.div(pos_sum, axis=0)
pf_weight.loc[pos_sum == 0, :] = 0.0
assert pf_weight.isnull().sum().sum() == 0
pf_weight = pf_weight.reindex(index=ret.index, columns=ret.columns)
pf_weight = pf_weight.fillna(0.0)
weighted_ret_pf = ret.mul(pf_weight)
weighted_ret_index = ret.mul(index_weight)
index_group_weight = group_sum(index_weight, group)
pf_group_weight = group_sum(pf_weight, group)
pf_group_ret = group_sum(weighted_ret_pf, group).div(pf_group_weight)
index_group_ret = group_sum(weighted_ret_index, group).div(index_group_weight)
allo_ret_group = (pf_group_weight - index_group_weight).mul(index_group_ret)
allo_ret = allo_ret_group.sum(axis=1)
selection_ret_group = (pf_group_ret - index_group_ret).mul(index_group_weight)
selection_ret = selection_ret_group.sum(axis=1)
active_ret = (weighted_ret_pf.sum(axis=1) - weighted_ret_index.sum(axis=1))
inter_ret = active_ret - selection_ret - allo_ret
df_brinson = pd.DataFrame(index=allo_ret.index,
data={'allocation': allo_ret,
'selection': selection_ret,
'interaction': inter_ret,
'total_active': active_ret})
return {'df_brinson': df_brinson, 'allocation': allo_ret_group, 'selection': selection_ret_group}
def brinson(self, group):
"""
Parameters
----------
group : str or pd.DataFrame
If group is string, this function will try to fetch the corresponding DataFrame from DataView.
If group is pd.DataFrame, it will be used as-is.
Returns
-------
"""
if isinstance(group, str):
group = self.dataview.get_ts(group, start_date=self.start_date, end_date=self.end_date)
elif isinstance(group, pd.DataFrame):
pass
else:
raise ValueError("Group must be string or DataFrame. But {} is provided.".format(group))
if group is None or group.empty:
raise ValueError("group is None or group is empty")
close = self.closes_adj
pos = self.daily_position
index_weight = self._get_index_weight()
res_dic = self._brinson(close, pos, index_weight, group)
df_brinson = res_dic['df_brinson']
self.df_brinson = df_brinson
self.report_dic['df_brinson'] = df_brinson
plot_brinson(df_brinson, save_folder=self.file_folder)
def do_analyze(self, result_dir, selected_sec=None, brinson_group=None):
if selected_sec is None:
selected_sec = []
print("process trades...")
self.process_trades()
print("get daily stats...")
self.get_daily()
print("calc strategy return...")
self.get_returns(consider_commission=False)
not_none_sec = []
if len(selected_sec) > 0:
print("Plot single securities PnL")
for symbol in selected_sec:
df_daily = self.daily.loc[pd.IndexSlice[symbol, :], :]
df_daily.index = df_daily.index.droplevel(0)
if df_daily is not None:
not_none_sec.append(symbol)
plot_trades(df_daily, symbol=symbol, save_folder=self.file_folder)
print("Plot strategy PnL...")
self.plot_pnl(result_dir)
if brinson_group is not None:
print("Do brinson attribution.")
group = self.dataview.get_ts(brinson_group)
if group is None:
raise ValueError("group data is None.")
self.brinson(group)
print("generate report...")
self.gen_report(source_dir=STATIC_FOLDER, template_fn='report_template.html',
out_folder=result_dir,
selected=not_none_sec)
def plot_daily_trading_holding_pnl(trading, holding, total, total_cum):
"""
Parameters
----------
Series
"""
idx0 = total.index
n = len(idx0)
idx = np.arange(n)
fig, (ax0, ax2, ax3) = plt.subplots(3, 1, figsize=(16, 13.5), sharex=True)
ax1 = ax0.twinx()
bar_width = 0.4
profit_color, lose_color = '#D63434', '#2DB635'
curve_color = '#174F67'
y_label = 'Profit / Loss ($)'
color_arr_raw = np.array([profit_color] * n)
color_arr = color_arr_raw.copy()
color_arr[total < 0] = lose_color
ax0.bar(idx, total, width=bar_width, color=color_arr)
ax0.set(title='Daily PnL', ylabel=y_label, xlim=[-2, n+2],)
ax0.xaxis.set_major_formatter(MyFormatter(idx0, '%y-%m-%d'))
ax1.plot(idx, total_cum, lw=1.5, color=curve_color)
ax1.set(ylabel='Cum. ' + y_label)
ax1.yaxis.label.set_color(curve_color)
color_arr = color_arr_raw.copy()
color_arr[trading < 0] = lose_color
ax2.bar(idx-bar_width/2, trading, width=bar_width, color=color_arr)
ax2.set(title='Daily Trading PnL', ylabel=y_label)
color_arr = color_arr_raw.copy()
color_arr[holding < 0] = lose_color
ax3.bar(idx+bar_width/2, holding, width=bar_width, color=color_arr)
ax3.set(title='Daily Holding PnL', ylabel=y_label, xticks=idx[: : n//10])
return fig
def plot_portfolio_bench_pnl(portfolio_cum_ret, benchmark_cum_ret, excess_cum_ret):
"""
Parameters
----------
Series
"""
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16, 9), sharex=True)
idx_dt = portfolio_cum_ret.index
idx = np.arange(len(idx_dt))
y_label_ret = "Cumulative Return (%)"
ax1.plot(idx, (benchmark_cum_ret-1) * TO_PCT, label='Benchmark', color='#174F67')
ax1.plot(idx, (portfolio_cum_ret-1) * TO_PCT, label='Strategy', color='#198DD6')
ax1.legend(loc='upper left')
ax1.set(title="Absolute Return of Portfolio and Benchmark",
#xlabel="Date",
ylabel=y_label_ret)
ax1.grid(axis='y')
ax2.plot(idx, (excess_cum_ret-1) * TO_PCT, label='Extra Return', color='#C37051')
ax2.set(title="Excess Return Compared to Benchmark", ylabel=y_label_ret
#xlabel="Date",
)
ax2.grid(axis='y')
ax2.xaxis.set_major_formatter(MyFormatter(idx_dt, '%y-%m-%d')) # 17-09-31
fig.tight_layout()
return fig
def plot_brinson(df, save_folder):
"""
Parameters
----------
df : pd.DataFrame
"""
allo, selec, inter, total = df['allocation'], df['selection'], df['interaction'], df['total_active']
fig, ax1 = plt.subplots(1, 1, figsize=(21, 8))
idx0 = df.index
idx = range(len(idx0))
ax1.plot(idx, selec, lw=1.5, color='indianred', label='Selection Return')
ax1.plot(idx, allo, lw=1.5, color='royalblue', label='Allocation Return')
ax1.plot(idx, inter, lw=1.5, color='purple', label='Interaction Return')
# ax1.plot(idx, total, lw=1.5, ls='--', color='k', label='Total Active Return')
ax1.axhline(0.0, color='k', lw=0.5, ls='--')
ax1.legend(loc='upper left')
ax1.set_xlabel("Date")
ax1.set_ylabel("Return")
ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m-%d'))
plt.tight_layout()
fig.savefig(os.path.join(save_folder, 'brinson_attribution.png'))
plt.close()
def calc_avg_pos_price(pos_arr, price_arr):
"""
Calculate average cost price using position and fill price.
When position = 0, cost price = symbol price.
"""
assert len(pos_arr) == len(price_arr)
avg_price = np.zeros_like(pos_arr, dtype=float)
avg_price[0] = price_arr[0]
for i in range(pos_arr.shape[0] - 1):
if pos_arr[i+1] == 0:
avg_price[i+1] = 0.0
else:
pos_diff = pos_arr[i+1] - pos_arr[i]
if pos_arr[i] == 0 or pos_diff * pos_arr[i] > 0:
count = True
else:
count = False
if count:
avg_price[i+1] = (avg_price[i] * pos_arr[i] + pos_diff * price_arr[i+1]) * 1. / pos_arr[i+1]
else:
avg_price[i+1] = avg_price[i]
return avg_price
def plot_trades(df, symbol="", save_folder='.', marker_size_adjust_ratio=0.1):
old_mpl_rcparams = {k: v for k, v in mpl.rcParams.items()}
mpl.rcParams.update(MPL_RCPARAMS)
idx0 = df.index
idx = range(len(idx0))
price = df.loc[:, 'close']
bv, sv = df.loc[:, 'BuyVolume'].values, df.loc[:, 'SellVolume'].values
profit = df.loc[:, 'CumProfit'].values
avgpx = df.loc[:, 'AvgPosPrice']
bv_m = np.max(bv)
sv_m = np.max(sv)
if bv_m > 0:
bv = bv / bv_m * 100
if sv_m > 0:
sv = sv / sv_m * 100
fig = plt.figure(figsize=(14, 10))
ax1 = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
ax3 = plt.subplot2grid((4, 1), (3, 0), rowspan=1, sharex=ax1)
ax2 = ax1.twinx()
ax1.plot(idx, price, label='Price', linestyle='-', lw=1, marker='', color='yellow')
ax1.scatter(idx, price, label='buy', marker='o', s=bv, color='indianred')
ax1.scatter(idx, price, label='sell', marker='o', s=sv, color='forestgreen')
ax1.plot(idx, avgpx, lw=1, marker='', color='green')
ax1.legend(loc='upper left')
ax1.set(title="Price, Trades and PnL for {:s}".format(symbol), ylabel="Price ($)")
ax1.xaxis.set_major_formatter(MyFormatter(idx0, '%Y-%m'))
ax2.plot(idx, profit, label='PnL', color='k', lw=1, ls='--', alpha=.4)
ax2.legend(loc='upper right')
ax2.set(ylabel="Profit / Loss ($)")
# ax1.xaxis.set_major_formatter(MyFormatter(df.index))#, '%H:%M'))
ax3.plot(idx, df.loc[:, 'position'], marker='D', markersize=3, lw=2)
ax3.axhline(0, color='k', lw=1, ls='--', alpha=0.8)
ax3.set(title="Position of {:s}".format(symbol))
fig.tight_layout()
fig.savefig(save_folder + '/' + "{}.png".format(symbol), facecolor=fig.get_facecolor(), dpi=fig.get_dpi())
mpl.rcParams.update(old_mpl_rcparams)
| 39.583065 | 139 | 0.584198 | 29,648 | 0.802794 | 0 | 0 | 2,361 | 0.06393 | 0 | 0 | 10,724 | 0.290379 |
4682bbaf850d64b54a79c88a195b6f6e2f183e48 | 114 | py | Python | lightnet/data/transform/__init__.py | eavise-kul/lightnet | d2d5d3fff8f929c3683c34f176217649375b98e1 | [
"MIT"
]
| 6 | 2019-10-10T05:42:50.000Z | 2022-02-27T04:59:29.000Z | lightnet/data/transform/__init__.py | eavise-kul/lightnet | d2d5d3fff8f929c3683c34f176217649375b98e1 | [
"MIT"
]
| null | null | null | lightnet/data/transform/__init__.py | eavise-kul/lightnet | d2d5d3fff8f929c3683c34f176217649375b98e1 | [
"MIT"
]
| 4 | 2020-01-25T20:16:23.000Z | 2021-04-29T13:02:34.000Z | #
# Lightnet data transforms
# Copyright EAVISE
#
from .pre import *
from .post import *
from .util import *
| 12.666667 | 28 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.438596 |
46830865694c3242ec731476bef2c0bab11ffa36 | 420 | py | Python | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
]
| null | null | null | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
]
| 85 | 2020-07-24T00:04:28.000Z | 2022-02-10T10:35:15.000Z | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
]
| null | null | null | from rest_framework import status
from rest_framework.exceptions import APIException
class BadSource(APIException):
"""
Exception for when a lazily-loaded data source can't
be accessed for some reason
"""
status_code = status.HTTP_417_EXPECTATION_FAILED
default_code = 'bad_source'
def __init__(self, source: str, reason: str):
super().__init__(f"Bad source '{source}': {reason}")
| 28 | 60 | 0.719048 | 332 | 0.790476 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.347619 |
468313da2c3cc1e70694859bf5264667fcd82781 | 8,935 | py | Python | build_tools/docker/manage_images.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
]
| 1 | 2021-03-15T13:53:30.000Z | 2021-03-15T13:53:30.000Z | build_tools/docker/manage_images.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
]
| null | null | null | build_tools/docker/manage_images.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages IREE Docker image definitions.
Includes information on their dependency graph and GCR URL.
Example usage:
Rebuild the cmake image and all images that transitiviely on depend on it,
tagging them with `latest`:
python3 build_tools/docker/manage_images.py --build --image cmake
Print out output for rebuilding the cmake image and all images that
transitiviely on depend on it, but don't take side-effecting actions:
python3 build_tools/docker/manage_images.py --build --image cmake --dry-run
Push all `prod` images to GCR:
python3 build_tools/docker/manage_images.py --push --tag prod --images all
Rebuild and push all images and update references to them in the repository:
python3 build_tools/docker/manage_images.py --push --images all
--update-references
"""
import argparse
import fileinput
import os
import posixpath
import re
import subprocess
import sys
IREE_GCR_URL = 'gcr.io/iree-oss/'
DOCKER_DIR = 'build_tools/docker/'
# Map from image names to images that they depend on.
IMAGES_TO_DEPENDENCIES = {
'base': [],
'bazel': ['base', 'util'],
'bazel-python': ['bazel'],
'bazel-tensorflow': ['bazel-python'],
'bazel-tensorflow-nvidia': ['bazel-tensorflow-vulkan'],
'bazel-tensorflow-swiftshader': ['bazel-tensorflow-vulkan', 'swiftshader'],
'bazel-tensorflow-vulkan': ['bazel-tensorflow'],
'cmake': ['base', 'util'],
'cmake-android': ['cmake', 'util'],
'cmake-python': ['cmake'],
'cmake-python-nvidia': ['cmake-python-vulkan'],
'cmake-python-swiftshader': ['cmake-python-vulkan', 'swiftshader'],
'cmake-python-vulkan': ['cmake-python'],
'rbe-toolchain': [],
'swiftshader': ['cmake'],
'util': [],
}
IMAGES_TO_DEPENDENT_IMAGES = {k: [] for k in IMAGES_TO_DEPENDENCIES}
for image, dependencies in IMAGES_TO_DEPENDENCIES.items():
for dependency in dependencies:
IMAGES_TO_DEPENDENT_IMAGES[dependency].append(image)
IMAGES_HELP = [f'`{name}`' for name in IMAGES_TO_DEPENDENCIES]
IMAGES_HELP = f'{", ".join(IMAGES_HELP)} or `all`'
def parse_arguments():
"""Parses command-line options."""
parser = argparse.ArgumentParser(
description="Build IREE's Docker images and optionally push them to GCR.")
parser.add_argument(
'--images',
'--image',
type=str,
required=True,
action='append',
help=f'Name of the image to build: {IMAGES_HELP}.')
parser.add_argument(
'--tag',
type=str,
default='latest',
help='Tag for the images to build. Defaults to `latest` (which is good '
'for testing changes in a PR). Use `prod` to update the images that the '
'CI caches.')
parser.add_argument(
'--pull',
action='store_true',
help='Pull the specified image before building.')
parser.add_argument(
'--build',
action='store_true',
help='Build new images from the current Dockerfiles.')
parser.add_argument(
'--push',
action='store_true',
help='Push the built images to GCR. Requires gcloud authorization.')
parser.add_argument(
'--update_references',
'--update-references',
action='store_true',
help='Update all references to the specified images to point at the new'
' digest.')
parser.add_argument(
'--dry_run',
'--dry-run',
'-n',
action='store_true',
help='Print output without building or pushing any images.')
args = parser.parse_args()
for image in args.images:
if image == 'all':
# Sort for a determinstic order
args.images = sorted(IMAGES_TO_DEPENDENCIES.keys())
elif image not in IMAGES_TO_DEPENDENCIES:
raise parser.error('Expected --image to be one of:\n'
f' {IMAGES_HELP}\n'
f'but got `{image}`.')
return args
def get_ordered_images_to_process(images):
unmarked_images = list(images)
# Python doesn't have a builtin OrderedSet
marked_images = set()
order = []
def visit(image):
if image in marked_images:
return
for dependent_images in IMAGES_TO_DEPENDENT_IMAGES[image]:
visit(dependent_images)
marked_images.add(image)
order.append(image)
while unmarked_images:
visit(unmarked_images.pop())
order.reverse()
return order
def stream_command(command, dry_run=False):
print(f'Running: `{" ".join(command)}`')
if dry_run:
return 0
process = subprocess.Popen(
command,
bufsize=1,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True)
for line in process.stdout:
print(line, end='')
if process.poll() is None:
raise RuntimeError('Unexpected end of output while process is not finished')
return process.poll()
def check_stream_command(command, dry_run=False):
exit_code = stream_command(command, dry_run=dry_run)
if exit_code != 0:
print(f'Command failed with exit code {exit_code}: `{" ".join(command)}`')
sys.exit(exit_code)
def get_repo_digest(image):
inspect_command = [
'docker',
'image',
'inspect',
f'{image}',
'-f',
'{{index .RepoDigests 0}}',
]
inspect_process = subprocess.run(
inspect_command,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=10)
if inspect_process.returncode != 0:
print(f'Computing the repository digest for {image} failed.'
' Has it been pushed to GCR?')
print(f'Output from `{" ".join(inspect_command)}`:')
print(inspect_process.stdout, end='')
print(inspect_process.stderr, end='')
sys.exit(inspect_process.returncode)
_, repo_digest = inspect_process.stdout.strip().split('@')
return repo_digest
def update_rbe_reference(digest, dry_run=False):
print('Updating WORKSPACE file for rbe-toolchain')
for line in fileinput.input(files=['WORKSPACE'], inplace=(not dry_run)):
if line.strip().startswith('digest ='):
print(re.sub('sha256:[a-zA-Z0-9]+', digest, line), end='')
else:
print(line, end='')
def update_references(image_name, digest, dry_run=False):
print(f'Updating references to {image_name}')
grep_command = ['git', 'grep', '-l', f'{image_name}@sha256']
grep_process = subprocess.run(
grep_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=5,
universal_newlines=True)
if grep_process.returncode > 1:
print(f'{" ".join(grep_command)} '
f'failed with exit code {grep_process.returncode}')
sys.exit(grep_process.returncode)
if grep_process.returncode == 1:
print(f'Found no references to {image_name}')
return
files = grep_process.stdout.split()
print(f'Updating references in {len(files)} files: {files}')
for line in fileinput.input(files=files, inplace=(not dry_run)):
print(
re.sub(f'{image_name}@sha256:[a-zA-Z0-9]+', f'{image_name}@{digest}',
line),
end='')
if __name__ == '__main__':
args = parse_arguments()
# Ensure the user has the correct authorization if they try to push to GCR.
if args.push:
if stream_command(['which', 'gcloud']) != 0:
print('gcloud not found.'
' See https://cloud.google.com/sdk/install for installation.')
sys.exit(1)
check_stream_command(['gcloud', 'auth', 'configure-docker'],
dry_run=args.dry_run)
images_to_process = get_ordered_images_to_process(args.images)
print(f'Also processing dependent images. Will process: {images_to_process}')
for image in images_to_process:
print(f'Processing image {image}')
image_name = posixpath.join(IREE_GCR_URL, image)
image_tag = f'{image_name}:{args.tag}'
image_path = os.path.join(DOCKER_DIR, image)
if args.pull:
check_stream_command(['docker', 'pull', image_tag], dry_run=args.dry_run)
if args.build:
check_stream_command(['docker', 'build', '--tag', image_tag, image_path],
dry_run=args.dry_run)
if args.push:
check_stream_command(['docker', 'push', image_tag], dry_run=args.dry_run)
if args.update_references:
digest = get_repo_digest(image_tag)
# Just hardcode this oddity
if image == 'rbe-toolchain':
update_rbe_reference(digest, dry_run=args.dry_run)
update_references(image_name, digest, dry_run=args.dry_run)
| 32.140288 | 80 | 0.675993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,097 | 0.458534 |
4684141f6543556bf465b101be71f060f3b08131 | 27,570 | py | Python | suncasa/pygsfit/gsutils.py | wyq24/suncasa | e6ed6d8b9bd2186c4af6d0354d03af5fff9aef7a | [
"BSD-2-Clause"
]
| null | null | null | suncasa/pygsfit/gsutils.py | wyq24/suncasa | e6ed6d8b9bd2186c4af6d0354d03af5fff9aef7a | [
"BSD-2-Clause"
]
| null | null | null | suncasa/pygsfit/gsutils.py | wyq24/suncasa | e6ed6d8b9bd2186c4af6d0354d03af5fff9aef7a | [
"BSD-2-Clause"
]
| null | null | null | import numpy as np
# import sys
import math
import os, sys, platform
import astropy.units as u
from sunpy import map as smap
from astropy.coordinates import SkyCoord
from suncasa.io import ndfits
import lmfit
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
from suncasa.utils import mstools
from suncasa.utils import qlookplot as ql
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
from astropy.io import fits
import numpy.ma as ma
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import gstools
# name of the fast gyrosynchrotron codes shared library
if platform.system() == 'Linux' or platform.system() == 'Darwin':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr.so')
if platform.system() == 'Windows':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr64.dll')
def kev2k(eng):
return 11604525.00617 * eng
def ff_emission(em, T=1.e7, Z=1., mu=1.e10):
from astropy import constants as const
import astropy.units as u
T = T * u.k
mu = mu * u.Hz
esu = const.e.esu
k_B = const.k_B.cgs
m_e = const.m_e.cgs
c = const.c.cgs
bmax = (3 * k_B * T * u.k / m_e) ** 0.5 / 2.0 / np.pi / (mu * u.Hz)
bmin = Z * esu ** 2 / 3. / k_B / T
lnbb = np.log((bmax / bmin).value)
ka_mu = 1. / mu ** 2 / T ** 1.5 * (
Z ** 2 * esu ** 6 / c / np.sqrt(2. * np.pi * (m_e * k_B) ** 3)) * np.pi ** 2 / 4.0 * lnbb
# print(ka_mu, em)
opc = ka_mu * em
return T.value * (1 - np.exp(-opc.value))
def sfu2tb(freq, flux, area):
# frequency in Hz
# flux in sfu
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
Tb = flux * sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr)
return Tb
def tb2sfu(freq, tb, area):
# frequency in Hz
# brightness temperature in K
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
flux = tb / (sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr))
return flux
def initspecplot(axes, cplts):
errobjs = []
for cpltidx, cplt in enumerate(cplts):
errobjs.append(axes.errorbar([], [], yerr=[], linestyle='', marker='o', mfc='none', mec=cplt, alpha=1.0))
axes.set_yscale("log")
axes.set_xscale("log")
axes.set_xlim([1, 20])
axes.set_ylim([0.1, 1000])
axes.set_xticks([1, 5, 10, 20])
axes.set_xticklabels([1, 5, 10, 20])
axes.set_xticks([1, 5, 10, 20])
axes.set_yticks([])
axes.set_yticks([0.01, 0.1, 1, 10, 100, 1000])
axes.set_ylabel('T$_b$ [MK]')
axes.set_xlabel('Frequency [GHz]')
x = np.linspace(1, 20, 10)
for ll in [-1, 0, 1, 2, 3, 4]:
y = 10. ** (-2 * np.log10(x) + ll)
axes.plot(x, y, 'k--', alpha=0.1)
# y2 = 10. ** (-4 * np.log10(x) + ll)
# y3 = 10. ** (-8 * np.log10(x) + ll)
# ax_eospec.plot(x, y, 'k--', x, y2, 'k:', x, y3, 'k-.', alpha=0.1)
return errobjs
def set_errorobj(xout, yout, errobj, yerr=None):
eospec, dummy, (errbar_eospec,) = errobj
eospec.set_data(xout, yout)
if yerr is not None:
yerr_top = yout + yerr
yerr_bot = yout - yerr
new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
errbar_eospec.set_segments(new_segments_y)
def mwspec2min_1src(params, freqghz, tb=None, tb_err=None, arcsec2cm=0.725e8, showplt=False):
# params are defined by lmfit.Paramters()
'''
params: parameters defined by lmfit.Paramters()
freqghz: frequencies in GHz
ssz: pixel size in arcsec
tb: reference brightness temperature in K
tb_err: uncertainties of reference brightness temperature in K
'''
from scipy import interpolate
GET_MW = gstools.initGET_MW(libname) # load the library
ssz = float(params['ssz'].value) # # source area in arcsec^2
depth = float(params['depth'].value) # total source depth in arcsec
Bmag = float(params['Bmag'].value) # magnetic field strength in G
Tth = float(params['Tth'].value) # thermal temperature in MK
nth = float(params['nth'].value) # thermal density in 1e10 cm^{-3}
nrlh = 10. ** float(params['lognrlh'].value) # total nonthermal density above 0.1 MeV
delta = float(params['delta'].value) # powerlaw index
theta = float(params['theta'].value) # viewing angle in degrees
Emin = float(params['Emin'].value) # low energy cutoff of nonthermal electrons in MeV
Emax = float(params['Emax'].value) # high energy cutoff of nonthermal electrons in MeV
E_hi = 0.1
nrl = nrlh * (Emin ** (1. - delta) - Emax * (1. - delta)) / (E_hi ** (1. - delta) - Emax ** (1. - delta))
Nf = 100 # number of frequencies
NSteps = 1 # number of nodes along the line-of-sight
N_E = 15 # number of energy nodes
N_mu = 15 # number of pitch-angle nodes
Lparms = np.zeros(11, dtype='int32') # array of dimensions etc.
Lparms[0] = NSteps
Lparms[1] = Nf
Lparms[2] = N_E
Lparms[3] = N_mu
Rparms = np.zeros(5, dtype='double') # array of global floating-point parameters
Rparms[0] = ssz * arcsec2cm ** 2 # Area, cm^2
# Rparms[0] = 1e20 # area, cm^2
Rparms[1] = 1e9 # starting frequency to calculate spectrum, Hz
Rparms[2] = 0.02 # logarithmic step in frequency
Rparms[3] = 12 # f^C
Rparms[4] = 12 # f^WH
ParmLocal = np.zeros(24, dtype='double') # array of voxel parameters - for a single voxel
ParmLocal[0] = depth * arcsec2cm / NSteps # voxel depth, cm
ParmLocal[1] = Tth * 1e6 # T_0, K
ParmLocal[2] = nth * 1e10 # n_0 - thermal electron density, cm^{-3}
ParmLocal[3] = Bmag # B - magnetic field, G
Parms = np.zeros((24, NSteps), dtype='double', order='F') # 2D array of input parameters - for multiple voxels
for i in range(NSteps):
Parms[:, i] = ParmLocal # most of the parameters are the same in all voxels
# if NSteps > 1:
# Parms[4, i] = 50.0 + 30.0 * i / (NSteps - 1) # the viewing angle varies from 50 to 80 degrees along the LOS
# else:
# Parms[4, i] = 50.0 # the viewing angle varies from 50 to 80 degrees along the LOS
Parms[4, i] = theta
# parameters of the electron distribution function
n_b = nrl # n_b - nonthermal electron density, cm^{-3}
mu_c = np.cos(np.pi * 70 / 180) # loss-cone boundary
dmu_c = 0.2 # Delta_mu
E_arr = np.logspace(np.log10(Emin), np.log10(Emax), N_E, dtype='double') # energy grid (logarithmically spaced)
mu_arr = np.linspace(-1.0, 1.0, N_mu, dtype='double') # pitch-angle grid
f0 = np.zeros((N_E, N_mu), dtype='double') # 2D distribution function array - for a single voxel
# computing the distribution function (equivalent to PLW & GLC)
A = n_b / (2.0 * np.pi) * (delta - 1.0) / (Emin ** (1.0 - delta) - Emax ** (1.0 - delta))
B = 0.5 / (mu_c + dmu_c * np.sqrt(np.pi) / 2 * math.erf((1.0 - mu_c) / dmu_c))
for i in range(N_E):
for j in range(N_mu):
amu = abs(mu_arr[j])
f0[i, j] = A * B * E_arr[i] ** (-delta) * (1.0 if amu < mu_c else np.exp(-((amu - mu_c) / dmu_c) ** 2))
f_arr = np.zeros((N_E, N_mu, NSteps), dtype='double',
order='F') # 3D distribution function array - for multiple voxels
for k in range(NSteps):
f_arr[:, :, k] = f0 # electron distribution function is the same in all voxels
RL = np.zeros((7, Nf), dtype='double', order='F') # input/output array
# calculating the emission for array distribution (array -> on)
res = GET_MW(Lparms, Rparms, Parms, E_arr, mu_arr, f_arr, RL)
if res:
# retrieving the results
f = RL[0]
I_L = RL[5]
I_R = RL[6]
if showplt:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(f, I_L + I_R)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Total intensity (array)')
ax.set_xlabel('Frequency, GHz')
ax.set_ylabel('Intensity, sfu')
flx_model = I_L + I_R
flx_model = np.nan_to_num(flx_model) + 1e-11
logf = np.log10(f)
logflx_model = np.log10(flx_model)
logfreqghz = np.log10(freqghz)
interpfunc = interpolate.interp1d(logf, logflx_model, kind='linear')
logmflx = interpfunc(logfreqghz)
mflx = 10. ** logmflx
mtb = sfu2tb(np.array(freqghz) * 1.e9, mflx, ssz)
else:
print("Calculation error!")
if tb is None:
return mtb
if tb_err is None:
# return mTb - Tb
return mtb - tb
# wt = 1./flx_err
# wt = 1./(Tb_err/Tb/np.log(10.))
# residual = np.abs((logmTb - np.log10(Tb))) * wt
# residual = np.abs((mflx - flx)) * wt
residual = (mtb - tb) / tb_err
return residual
class RegionSelector:
# def set_errorobj(self, xout, yout, errobj, yerr):
# eospec, dummy, (errbar_eospec,) = errobj
# eospec.set_data(xout, yout)
# if yerr is not None:
# yerr_top = yout + yerr
# yerr_bot = yout - yerr
# new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
# errbar_eospec.set_segments(new_segments_y)
# return 1
def subdata(self, xs, ys, rfile):
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(rfile)
ny, nx = rmap.data.shape
tr_coord = rmap.top_right_coord
bl_coord = rmap.bottom_left_coord
x0 = bl_coord.Tx.to(u.arcsec).value
y0 = bl_coord.Ty.to(u.arcsec).value
x1 = tr_coord.Tx.to(u.arcsec).value
y1 = tr_coord.Ty.to(u.arcsec).value
dx = rmap.scale.axis1.to(u.arcsec / u.pix).value
dy = rmap.scale.axis2.to(u.arcsec / u.pix).value
mapx, mapy = np.linspace(x0, x1, nx) - dx / 2.0, np.linspace(y0, y1, ny) - dy / 2.0
xsmin = np.nanmin(xs)
xsmax = np.nanmax(xs)
ysmin = np.nanmin(ys)
ysmax = np.nanmax(ys)
if np.abs(xsmax - xsmin) < dx:
xsmax = xsmin + dx
if np.abs(ysmax - ysmin) < dy:
ysmax = ysmin + dy
xmask = np.logical_and(mapx >= xsmin, mapx <= xsmax)
nxnew = np.count_nonzero(xmask)
ymask = np.logical_and(mapy >= ysmin, mapy <= ysmax)
nynew = np.count_nonzero(ymask)
xmask = np.tile(xmask, ny).reshape(ny, nx)
ymask = np.tile(ymask, nx).reshape(nx, ny).transpose()
mask = xmask & ymask
# print(np.count_nonzero(mask))
self.npix = np.count_nonzero(mask)
self.area = self.npix * dx * dy
data = rdata[:, mask]
# print(rdata[:, :, mask])
# print(mask.shape, rdata.shape, data.shape)
data = np.squeeze(data)
# print(data.shape)
return data
def __init__(self, clkpnts, boxlines, eofiles, errobjs, cfreqs=None, rms=None, eofile_ref=None, errobj_ref=None,
wTmap=None, outspec_ff=None, scatter_gsfit=None,
get_peak=False, get_sum=False):
self.boxline = []
self.clkpnt = []
self.xs = list(clkpnts[0].get_xdata())
self.ys = list(clkpnts[0].get_ydata())
self.npix = None
self.area = None
self.xout = []
self.yout = []
self.xouterr = []
self.youterr = []
for errobj in errobjs:
eospec, dummy, (errbar_eospec,) = errobj
self.xout.append(eospec.get_xdata())
self.yout.append(eospec.get_ydata())
self.errobjs = errobjs
self.errobj_ref = errobj_ref
self.outspec_ff = outspec_ff
self.scatter_gsfit = scatter_gsfit
self.cfreqs = cfreqs
self.rms = rms
self.eofiles = eofiles
self.eofile_ref = eofile_ref
self.wTmap = wTmap
self.wT = None
self.em = None
self.get_peak = get_peak
self.get_sum = get_sum
self.tps = []
self.params = None
for idx, s in enumerate(clkpnts):
self.boxline.append(boxlines[idx])
self.clkpnt.append(s)
self.cid = s.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
axes = [clkpnt.axes for clkpnt in self.clkpnt]
if self.clkpnt[0].figure.canvas.toolbar.mode == '':
if event.inaxes not in axes:
return
nxs = len(self.xs)
if event.button == 1:
if nxs < 2:
self.xs.append(event.xdata)
self.ys.append(event.ydata)
else:
self.xs = [event.xdata]
self.ys = [event.ydata]
elif event.button == 3:
if len(self.xs) > 0:
self.xs.pop()
self.ys.pop()
self.get_flux()
def get_flux(self):
if len(self.xs) > 0:
xs = np.array(self.xs, dtype=np.float64)
ys = np.array(self.ys, dtype=np.float64)
for clkpnt in self.clkpnt:
clkpnt.set_data(xs, ys)
else:
for clkpnt in self.clkpnt:
clkpnt.set_data([], [])
nxs = len(self.xs)
if nxs <= 1:
for line in self.boxline:
line.set_data([], [])
elif nxs == 2:
datas = []
# eofile = self.eofiles[0]
# rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofile)
# data = self.subdata(xs, ys, eofile)
# datas.append(data)
for tidx, eofile in enumerate(self.eofiles):
data = self.subdata(xs, ys, eofile)
datas.append(data)
if self.eofile_ref is not None:
data_ref = self.subdata(xs, ys, self.eofile_ref)
if self.wTmap is not None:
datawT = self.subdata(xs, ys, self.wTmap)
if self.get_peak:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmax(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
youts_outspec_ref = np.nanmax(data_ref[0, dd, :, :]) / 1e6
else:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmean(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
if data.ndim > 1:
youts_outspec_ref = np.nanmean(data_ref, axis=-1) / 1e6
else:
youts_outspec_ref = data_ref / 1e6
self.tps = []
for data in datas:
if data.ndim > 1:
self.tps.append(np.nansum(data, axis=-1) / 1e6)
else:
self.tps.append(data / 1e6)
xout = self.cfreqs
for tidx, errobj in enumerate(self.errobjs):
set_errorobj(xout, youts_outspec[tidx], errobj, self.rms)
if self.eofile_ref is not None:
set_errorobj(xout, youts_outspec_ref, self.errobj_ref, self.rms)
if self.wTmap is not None:
print(datawT.shape)
wT = np.nanmean(datawT[..., 1]) * 1e6
em = np.nanmean(datawT[..., 0])
arcsec2cm = (self.wTmap[0].rsun_meters / self.wTmap[0].rsun_obs).to(u.cm / u.arcsec).value
# nele = 4.0e10
# depth = em / nele ** 2 / arcsec2cm
# print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, depth: {:.1f} arcsec if nele is {:.2e} cm-3'.format(wT / 1e6, em, depth, nele))
depth = 20. ## arcsec
nele = np.sqrt(em / (depth * arcsec2cm))
print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, nele: {:.2e} cm-3 if depth is {:.1f} arcsec'.format(
wT / 1e6, em, nele, depth))
self.wT = wT
self.em = em
yout_ff = np.array([ff_emission(em, T=wT, Z=1., mu=ll) for ll in xout * 1e9]) / 1.e6
self.outspec_ff.set_data(xout, yout_ff)
self.errobjs[0][0].figure.canvas.draw_idle()
for line in self.boxline:
line.set_data([xs[0], xs[1], xs[1], xs[0], xs[0]], [ys[0], ys[0], ys[1], ys[1], ys[0]])
clkpnt.figure.canvas.draw_idle()
class GStool:
# def get_showaia(self):
# return self._showaia
#
# def set_showaia(self, value):
# self._showaia = value
#
# showaia = property(fget=get_showaia, fset=set_showaia, doc="`Boolean`-like: Display AIA image or not")
def __init__(self, eofiles, aiafile=None, xycen=None, fov=None, freqghz_bound=[-1, 100], calpha=0.5,
clevels=np.array([0.3, 1.0]), opencontour=None):
self.aiafile = aiafile
self.eofiles = eofiles
self.xycen = xycen
self.fov = fov
self.calpha = calpha
self.clevels = clevels
self.freqghz_bound = freqghz_bound
self.opencontour = opencontour
self._showaia = False
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofiles[0])
self.bdinfo = bdinfo = ndfits.get_bdinfo(rfreqs, rdelts)
self.cfreqs = cfreqs = bdinfo['cfreqs']
self.cfreqs_all = cfreqs_all = bdinfo['cfreqs_all']
self.freq_dist = lambda fq: (fq - cfreqs_all[0]) / (cfreqs_all[-1] - cfreqs_all[0])
self.ntim = ntim = len(eofiles)
self.xlim = xlim = xycen[0] + np.array([-1, 1]) * 0.5 * fov[0]
self.ylim = ylim = xycen[1] + np.array([-1, 1]) * 0.5 * fov[1]
nspw = len(rfreqs)
eodate = Time(rmap.date.mjd + rmap.exposure_time.value / 2. / 24 / 3600, format='mjd')
ny, nx = rmap.data.shape
x0, x1 = (np.array([1, rmap.meta['NAXIS1']]) - rmap.meta['CRPIX1']) * rmap.meta['CDELT1'] + \
rmap.meta['CRVAL1']
y0, y1 = (np.array([1, rmap.meta['NAXIS2']]) - rmap.meta['CRPIX2']) * rmap.meta['CDELT2'] + \
rmap.meta['CRVAL2']
dx = rmap.meta['CDELT1']
dy = rmap.meta['CDELT2']
mapx, mapy = np.linspace(x0, x1, nx), np.linspace(y0, y1, ny)
fig = plt.figure(figsize=(15, 6))
self.fig = fig
grids = fig.add_gridspec(ncols=3, nrows=1, width_ratios=[1, 1, 0.6])
self.grids = grids
axs = []
axs.append(fig.add_subplot(grids[0, 0]))
axs.append(fig.add_subplot(grids[0, 1], sharex=axs[-1], sharey=axs[-1]))
axs.append(fig.add_subplot(grids[0, 2]))
if aiafile:
if os.path.exists(aiafile):
try:
aiacmap = plt.get_cmap('gray_r')
aiamap = smap.Map(aiafile)
ax = axs[0]
aiamap.plot(axes=ax, cmap=aiacmap)
ax = axs[1]
aiamap.plot(axes=ax, cmap=aiacmap)
self._showaia = True
except:
self._showaia = False
if self._showaia:
if self.opencontour is None:
self.opencontour = False
else:
if self.opencontour is None:
self.opencontour = True
## Plot EOVSA images as filled contour on top of the AIA image
icmap = plt.get_cmap('RdYlBu')
cts = []
## color map for spectra from the image series
tcmap = plt.get_cmap('turbo')
for s, sp in enumerate(rfreqs):
data = rdata[s, ...]
clvls = clevels * np.nanmax(data)
rcmap = [icmap(self.freq_dist(self.cfreqs[s]))] * len(clvls)
if self.opencontour:
cts.append(ax.contour(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
else:
cts.append(ax.contourf(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
for ax in axs[:2]:
ax.set_xlabel('Solar-X [arcsec]')
ax.set_ylabel('Solar-y [arcsec]')
ax.set_title('')
ax.text(0.02, 0.01,
' '.join(['AIA {:.0f} Å'.format(aiamap.wavelength.value),
aiamap.date.datetime.strftime('%Y-%m-%dT%H:%M:%S')]),
ha='left',
va='bottom',
color='k', transform=ax.transAxes)
ax.text(0.02, 0.05, ' '.join(['EOVSA ', eodate.datetime.strftime('%Y-%m-%dT%H:%M:%S')]), ha='left',
va='bottom',
color='k', transform=ax.transAxes)
divider = make_axes_locatable(axs[0])
cax = divider.append_axes("right", size="8%", pad=0.08)
cax.set_visible(False)
divider = make_axes_locatable(axs[1])
cax = divider.append_axes("right", size="8%", pad=0.08)
ticks, bounds, vmax, vmin, freqmask = ql.get_colorbar_params(bdinfo)
cb = colorbar.ColorbarBase(cax, norm=colors.Normalize(vmin=vmin, vmax=vmax), cmap=icmap,
orientation='vertical', boundaries=bounds, spacing='proportional',
ticks=ticks, format='%4.1f', alpha=calpha)
for fbd_lo, fbd_hi in freqmask:
if fbd_hi is not None:
cax.axhspan(fbd_lo, fbd_hi, hatch='//', edgecolor='k', facecolor='#BBBBBB')
plt.text(0.5, 1.05, 'MW', ha='center', va='bottom', transform=cax.transAxes, color='k', fontweight='normal')
plt.text(0.5, 1.01, '[GHz]', ha='center', va='bottom', transform=cax.transAxes, color='k',
fontweight='normal')
cax.xaxis.set_visible(False)
cax.tick_params(axis="y", pad=-20., length=0, colors='k', labelsize=7)
cax.axhline(vmin, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.axhline(vmax, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.text(1.25, 0.0, '{:.1f}'.format(vmin), fontsize=9, transform=cax.transAxes, va='center', ha='left')
cax.text(1.25, 1.0, '{:.1f}'.format(vmax), fontsize=9, transform=cax.transAxes, va='center', ha='left')
boxlines = []
clkpnts = []
for idx, ax in enumerate(axs[:2]):
if idx == 0:
c = 'g'
elif idx == 1:
c = 'b'
else:
c = 'k'
line, = ax.plot([], [], '-', c=c, alpha=1.0) # empty line
boxlines.append(line)
clkpnt, = ax.plot([], [], '+', c='white', alpha=0.7) # empty line
clkpnts.append(clkpnt)
if ntim < 2:
cplts = ['k']
else:
cplts = tcmap(np.linspace(0, 1, ntim))
self.cplts = cplts
self.ax_eospec = axs[-1]
errobjs = initspecplot(self.ax_eospec, cplts)
grids.tight_layout(fig)
self.region = RegionSelector(clkpnts, boxlines, eofiles, errobjs, cfreqs=cfreqs, rms=None, wTmap=None)
self.scatter_eospecs_fit = []
self.scatter_eospecs = []
def set_params(self, params):
ssz = self.region.area # source area in arcsec^2
params.add('ssz', value=ssz, vary=False) # pixel size in arcsec^2
self.params = params
def plot_components(self):
ti = 0
tb = self.region.errobjs[ti][0].get_ydata() * 1e6
tb_ma = ma.masked_less_equal(tb, 0)
freqghz = self.region.errobjs[0][0].get_xdata()
# freqghz_ma = ma.masked_outside(freqghz, 1.0, 15.0)
freqghz_ma = ma.masked_outside(freqghz, self.freqghz_bound[0], self.freqghz_bound[1])
mask_fit = np.logical_or(freqghz_ma.mask, tb_ma.mask)
freqghz_ma = ma.masked_array(freqghz, mask_fit)
tb_ma = ma.masked_array(tb, mask_fit)
# scatter_eospecs_fit.append(
# ax_spec.plot(freqghz_ma, tb_ma / 1.e6, marker='o', linestyle='', c=cplts[ti]))
# flx_rms = rms
tb_err = tb * 0.0
tb_err[:] = 1.e6
tb_err_ma = ma.masked_array(tb_err, tb_ma.mask)
if len(self.scatter_eospecs_fit) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs_fit.append(
self.ax_eospec.errorbar(freqghz_ma, tb_ma / 1.e6, yerr=tb_err_ma / 1.e6, marker='.', ms=1,
linestyle='',
c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
set_errorobj(freqghz_ma, tb_ma / 1.e6, self.scatter_eospecs_fit[ti], yerr=tb_err_ma / 1.e6)
def fit(self):
ti = 0
tb = self.region.errobjs[ti][0].get_ydata() * 1e6
tb_ma = ma.masked_less_equal(tb, 0)
freqghz = self.region.errobjs[0][0].get_xdata()
# freqghz_ma = ma.masked_outside(freqghz, 1.0, 15.0)
freqghz_ma = ma.masked_outside(freqghz, self.freqghz_bound[0], self.freqghz_bound[1])
mask_fit = np.logical_or(freqghz_ma.mask, tb_ma.mask)
freqghz_ma = ma.masked_array(freqghz, mask_fit)
tb_ma = ma.masked_array(tb, mask_fit)
# scatter_eospecs_fit.append(
# ax_spec.plot(freqghz_ma, tb_ma / 1.e6, marker='o', linestyle='', c=cplts[ti]))
# flx_rms = rms
tb_err = tb * 0.1
# tb_err[:] = 0.2e6
tb_err_ma = ma.masked_array(tb_err, tb_ma.mask)
if len(self.scatter_eospecs_fit) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs_fit.append(
self.ax_eospec.errorbar(freqghz_ma, tb_ma / 1.e6, yerr=tb_err_ma / 1.e6, marker='.', ms=1,
linestyle='', c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
set_errorobj(freqghz_ma, tb_ma / 1.e6, self.scatter_eospecs_fit[ti], yerr=tb_err_ma / 1.e6)
mini = lmfit.Minimizer(mwspec2min_1src, self.params, fcn_args=(freqghz_ma.compressed(),),
fcn_kws={'tb': tb_ma.compressed(), 'tb_err': tb_err_ma.compressed()},
nan_policy='omit')
method = 'nelder'
# # method = 'differential_evolution'
mi = mini.minimize(method=method)
print(method + ' minimization results')
print(lmfit.fit_report(mi.params))
tb_fit = mwspec2min_1src(mi.params, freqghz)
if len(self.scatter_eospecs) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs.append(self.ax_eospec.plot(freqghz, tb_fit / 1.e6, linestyle='-', c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs[ti][0].set_data(freqghz, tb_fit / 1.e6)
| 40.784024 | 145 | 0.551904 | 18,293 | 0.663487 | 0 | 0 | 0 | 0 | 0 | 0 | 5,316 | 0.192811 |
4685248be49c1c014500014593ebd58e99994652 | 1,496 | py | Python | msgvis/apps/questions/migrations/0001_initial.py | hds-lab/textvis-drg | bfb136b6105df84fb6c1c89cc595bf9e9f22c5fe | [
"MIT"
]
| 10 | 2015-12-04T07:43:11.000Z | 2021-01-23T00:44:56.000Z | msgvis/apps/questions/migrations/0001_initial.py | hds-lab/textvis-drg | bfb136b6105df84fb6c1c89cc595bf9e9f22c5fe | [
"MIT"
]
| 200 | 2015-02-11T05:41:57.000Z | 2015-11-13T03:47:25.000Z | msgvis/apps/questions/migrations/0001_initial.py | hds-lab/textvis-drg | bfb136b6105df84fb6c1c89cc595bf9e9f22c5fe | [
"MIT"
]
| 6 | 2015-10-02T18:01:09.000Z | 2021-01-23T00:44:58.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dimensions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('year', models.PositiveIntegerField(default=None, null=True, blank=True)),
('authors', models.CharField(default=None, max_length=250, blank=True)),
('link', models.CharField(default=None, max_length=250, blank=True)),
('title', models.CharField(default=None, max_length=250, blank=True)),
('venue', models.CharField(default=None, max_length=250, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField()),
('dimensions', models.ManyToManyField(to='dimensions.Dimension')),
('source', models.ForeignKey(default=None, to='questions.Article', null=True)),
],
options={
},
bases=(models.Model,),
),
]
| 36.487805 | 114 | 0.556818 | 1,387 | 0.927139 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.124332 |
4685622823bb3cb792b6697fa854e6d940a37ece | 7,412 | py | Python | test/test_check_alert.py | russ-lewis/cs120-queuebot | 6b121ef9b0d7db7bbb1810f71129d995ce6a1659 | [
"MIT"
]
| null | null | null | test/test_check_alert.py | russ-lewis/cs120-queuebot | 6b121ef9b0d7db7bbb1810f71129d995ce6a1659 | [
"MIT"
]
| null | null | null | test/test_check_alert.py | russ-lewis/cs120-queuebot | 6b121ef9b0d7db7bbb1810f71129d995ce6a1659 | [
"MIT"
]
| 1 | 2021-05-18T02:32:54.000Z | 2021-05-18T02:32:54.000Z | import io
import sys
import unittest
import asyncio
import random
from contextlib import redirect_stdout
from .utils import *
from queuebot import QueueBot, QueueConfig, DiscordUser
config = {
"SECRET_TOKEN": "NOONEWILLEVERGUESSTHISSUPERSECRETSTRINGMWAHAHAHA",
"TA_ROLES": ["UGTA"],
"LISTEN_CHANNELS": ["join-queue"],
"CHECK_VOICE_WAITING": "False",
"VOICE_WAITING": "waiting-room",
"ALERT_ON_FIRST_JOIN": "True",
"VOICE_OFFICES": ["Office Hours Room 1", "Office Hours Room 2", "Office Hours Room 3"],
"ALERTS_CHANNEL": "queue-alerts",
}
config = QueueConfig(config, test_mode=True)
# TODO Comment each test case
class QueueTest(unittest.TestCase):
def setUp(self):
random.seed(SEED)
self.config = config.copy()
self.bot = QueueBot(self.config, None, testing=True)
# self.bot.waiting_room = MockVoice(config.VOICE_WAITING)
self.bot.logger = MockLogger()
self.bot.office_rooms = [MockVoice(name) for name in config.VOICE_OFFICES]
def reset_vc_queue(self):
# Reset queue
russ = get_rand_element(ALL_TAS)
message = MockMessage("!q clear", russ)
with io.StringIO() as buf, redirect_stdout(buf):
run(self.bot.queue_command(message))
self.assertEqual(len(self.bot._queue), 0)
# Empty voice channels
for v in self.bot.office_rooms:
v.members = []
def test_no_tas(self):
# No TAs in rooms
student = get_rand_element(ALL_STUDENTS)
self.assertEqual(len(self.bot._queue), 0)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", student)
run(self.bot.queue_command(message))
self.assertTrue(buf.getvalue().strip().startswith(
f"SEND: ✅ {student.get_mention()} you have been added at position #1"))
self.assertEqual(len(self.bot._queue), 1)
self.reset_vc_queue()
def test_one_ta(self):
ta = get_rand_element(ALL_TAS)
office_room = get_rand_element(self.bot.office_rooms)
office_room.members.append(ta)
student = get_rand_element(ALL_STUDENTS)
self.assertEqual(len(self.bot._queue), 0)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", student)
run(self.bot.queue_command(message))
self.assertTrue(buf.getvalue().strip().startswith(
f"SEND: {ta.get_mention()} The queue is no longer empty"))
self.assertEqual(len(self.bot._queue), 1)
self.reset_vc_queue()
def get_mentions_from_send(self, buf):
send_str = buf.getvalue().strip().split("\n", 1)[0]
assert send_str.startswith("SEND:")
assert "<@" in send_str
assert "The queue is no longer empty" in send_str
return send_str.lstrip("SEND: ") \
.rstrip(" The queue is no longer empty") \
.split(" ")
def test_many_tas_one_room(self):
tas = get_n_rand(ALL_TAS, 3)
office_room = get_rand_element(self.bot.office_rooms)
office_room.members.extend(tas)
mention_set = set()
student = get_rand_element(ALL_STUDENTS)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", student)
run(self.bot.queue_command(message))
mentions = self.get_mentions_from_send(buf)
mention_set.update(mentions)
for ta in tas:
self.assertTrue(ta.get_mention() in mention_set)
mention_set.remove(ta.get_mention())
self.assertEqual(len(mention_set), 0)
self.reset_vc_queue()
def test_many_tas_all_rooms(self):
tas = get_n_rand(ALL_TAS, 5)
tas_copy = tas.copy()
while len(tas) > 0:
for office_room in self.bot.office_rooms:
# If we run out of TAs while going through all the rooms
if len(tas) == 0:
break
office_room.add_member(tas.pop())
mention_set = set()
student = get_rand_element(ALL_STUDENTS)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", student)
run(self.bot.queue_command(message))
mentions = self.get_mentions_from_send(buf)
mention_set.update(mentions)
for ta in tas_copy:
self.assertTrue(ta.get_mention() in mention_set)
mention_set.remove(ta.get_mention())
self.assertEqual(len(mention_set), 0)
self.reset_vc_queue()
def test_ta_with_student(self):
busy_room, open_room = get_n_rand(self.bot.office_rooms, 2)
busy_ta, open_ta = get_n_rand(ALL_TAS, 2)
busy_student, open_student = get_n_rand(ALL_STUDENTS, 2)
busy_room.add_many_members(busy_ta, busy_student)
open_room.add_member(open_ta)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", busy_student)
run(self.bot.queue_command(message))
mentions = self.get_mentions_from_send(buf)
self.assertEqual(mentions, [open_ta.get_mention()])
def test_ta_with_student2(self):
rooms = get_n_rand(self.bot.office_rooms, 3)
busy_rooms = rooms[:-1]
open_room = rooms[-1]
busy_ta, open_ta = get_n_rand(ALL_TAS, 2)
students = [ None ]
open_student = None
while open_student in students:
students = get_n_rand(ALL_STUDENTS, 5)
open_student = get_rand_element(ALL_STUDENTS)
busy_rooms[0].add_many_members(busy_ta, *students[:-2])
busy_rooms[1].add_many_members(busy_ta, *students[-2:])
open_room.add_member(open_ta)
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", open_student)
run(self.bot.queue_command(message))
mentions = self.get_mentions_from_send(buf)
self.assertEqual(mentions, [open_ta.get_mention()])
def test_two_tas(self):
tas = get_n_rand(ALL_TAS, 2)
rooms = get_n_rand(self.bot.office_rooms, 2)
rooms[0].add_member(tas[0])
rooms[1].add_member(tas[1])
students = get_n_rand(ALL_STUDENTS, 2)
# Check for both alerted
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", students[0])
run(self.bot.queue_command(message))
ta_list = set(self.get_mentions_from_send(buf))
for ta in tas:
ta_list.remove(ta.get_mention())
self.assertEqual(len(ta_list), 0)
# Remove first student from queue
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q next", tas[0])
run(self.bot.queue_command(message))
self.assertEqual(len(self.bot._queue), 0)
# First ta helps first student
rooms[0].add_member(students[0])
# Another student joins
with io.StringIO() as buf, redirect_stdout(buf):
message = MockMessage("!q join", students[1])
run(self.bot.queue_command(message))
ta_list = self.get_mentions_from_send(buf)
self.assertEqual(ta_list, [tas[1].get_mention()])
if __name__ == '__main__':
unittest.main()
| 34.314815 | 91 | 0.625472 | 6,716 | 0.905854 | 0 | 0 | 0 | 0 | 0 | 0 | 920 | 0.12409 |
4685d4b1728860e781a678cd76d788dda6fe260b | 236 | py | Python | iam/__init__.py | dataday/aws-utilities-sdk | 7b1236f27a27e2830ccbbf905bbc05f864a1aecf | [
"MIT"
]
| null | null | null | iam/__init__.py | dataday/aws-utilities-sdk | 7b1236f27a27e2830ccbbf905bbc05f864a1aecf | [
"MIT"
]
| null | null | null | iam/__init__.py | dataday/aws-utilities-sdk | 7b1236f27a27e2830ccbbf905bbc05f864a1aecf | [
"MIT"
]
| null | null | null | """
.. module:: aws_utilities_cli.iam
:platform: OS X
:synopsis: Small collection of utilities that
use the Amazon Web Services (AWS) SDK
.. moduleauthor:: dataday
"""
__all__ = ['generate_identity', 'generate_policy']
| 23.6 | 50 | 0.686441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.932203 |
46869ee7c4058bcc60c1e873b09ca6bac3bb10d7 | 2,556 | py | Python | tests/common/test_run/triangle_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
]
| null | null | null | tests/common/test_run/triangle_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
]
| null | null | null | tests/common/test_run/triangle_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
]
| null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from tests.common.test_op import triangle
from akg.utils import kernel_exec as utils
from tests.common.gen_random import random_gaussian
def triangle_execute(shape, const_value, lower, dtype, attrs):
support_type = ['float16', 'float32']
assert dtype in support_type
assert len(shape) <= 2
if attrs is None:
attrs = {'enable_pre_poly_loop_partition': False}
attrs['enable_pre_poly_loop_partition'] = False
attrs['enable_post_poly_loop_partition'] = False
attrs['enable_convert_if'] = True
attrs['enable_double_buffer'] = False
output_shape = shape
if len(shape) == 1:
output_shape = [shape[0], shape[0]]
input, bench_mark = gen_data(shape, output_shape, const_value, lower, dtype)
op_attrs = [const_value, lower]
mod = triangle_compile(shape, dtype, op_attrs, attrs)
source_code = mod.imported_modules[0].get_source()
output = np.full(output_shape, np.nan, dtype)
output = utils.mod_launch(mod, (input, output), expect=bench_mark)
# compare result
compare_result = compare_tensor(output, bench_mark, rtol=5e-3, equal_nan=True)
return input, output, bench_mark, compare_result
def triangle_compile(shape, dtype, op_attrs, attrs):
return utils.op_build_test(triangle.triangle, [shape], [dtype], op_attrs, kernel_name='triangle', attrs=attrs)
def gen_data(shape, output_shape, const_value, lower, dtype):
input = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
if len(shape) == 2:
bench_mark = input
else:
bench_mark = np.zeros(output_shape).astype(dtype)
for i in range(output_shape[0]):
bench_mark[i] = input
if lower:
for i in range(output_shape[0]):
bench_mark[i][i + 1:] = const_value
else:
for i in range(output_shape[0]):
bench_mark[i][:i] = const_value
return input, bench_mark
| 35.5 | 114 | 0.711659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 757 | 0.296166 |
4686dbf11ea2488f7a45d2ed0c1748432a5a0064 | 394 | py | Python | profiles/migrations/0018_auto_20180514_2106.py | brentfraser/geotabloid | 772106b2d39b5405045814b5f013ece5713469b1 | [
"MIT"
]
| 2 | 2018-12-03T09:19:31.000Z | 2020-02-11T15:32:12.000Z | {{cookiecutter.project_slug}}/profiles/migrations/0018_auto_20180514_2106.py | brentfraser/cookiecutter-geopaparazzi-server | f9cd705991879deac67365007e9589142afc09bf | [
"BSD-3-Clause"
]
| 2 | 2019-02-20T17:50:55.000Z | 2019-02-21T15:19:51.000Z | profiles/migrations/0018_auto_20180514_2106.py | GeoAnalytic-code/geotabloid | af017d470ef4553d5fbd24d865cb22ca643fd999 | [
"MIT"
]
| 2 | 2018-10-19T17:07:01.000Z | 2021-01-13T06:54:55.000Z | # Generated by Django 2.0.3 on 2018-05-14 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0017_otherfiles_location'),
]
operations = [
migrations.AlterField(
model_name='project',
name='url',
field=models.FileField(upload_to='projects/'),
),
]
| 20.736842 | 58 | 0.598985 | 301 | 0.763959 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.274112 |
46873638ac399bb3c28bf800bc92bbfd39940934 | 15,189 | py | Python | orio/main/tuner/tuner.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
]
| 24 | 2015-01-26T03:14:19.000Z | 2021-09-27T23:10:12.000Z | orio/main/tuner/tuner.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
]
| 30 | 2015-04-17T18:14:27.000Z | 2021-05-30T15:01:47.000Z | orio/main/tuner/tuner.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
]
| 20 | 2015-02-11T08:20:19.000Z | 2022-01-15T17:55:00.000Z | #
# The tuner class to initiate the empirical performance tuning process
#
import re, sys, os
from orio.main.util.globals import *
import orio.main.dyn_loader, orio.main.tspec.tspec, orio.main.tuner.ptest_codegen, orio.main.tuner.ptest_driver
#--------------------------------------------------
# the name of the module containing various search algorithms
SEARCH_MOD_NAME = 'orio.main.tuner.search'
#--------------------------------------------------
class PerfTuner:
'''
The empirical performance tuner.
This class is responsible for invoking the code generators of the annotation modules,
compiling the resulting code, and interfacing with the search interface to run the
tests and collect the results.
'''
#-------------------------------------------------
def __init__(self, odriver):
'''To instantiate an empirical performance tuner object'''
self.odriver = odriver
self.dloader = orio.main.dyn_loader.DynLoader()
self.num_params=0
self.num_configs=0
self.num_bin=0
self.num_int=0
self.tinfo = None
#-------------------------------------------------
def tune(self, module_body_code, line_no, cfrags):
'''
Perform empirical performance tuning on the given annotated code. And return the best
optimized code variant.
'''
# extract the tuning information specified from the given annotation
tinfo = self.__extractTuningInfo(module_body_code, line_no)
self.tinfo = tinfo
# determine if parallel search is required
use_parallel_search = tinfo.batch_cmd != None
# create a performance-testing code generator for each distinct problem size
ptcodegens = []
#timing_code = ''
for prob_size in self.__getProblemSizes(tinfo.iparam_params, tinfo.iparam_constraints):
if self.odriver.lang == 'c':
c = orio.main.tuner.ptest_codegen.PerfTestCodeGen(prob_size, tinfo.ivar_decls, tinfo.ivar_decl_file,
tinfo.ivar_init_file, tinfo.ptest_skeleton_code_file, self.odriver.lang,
tinfo.random_seed, use_parallel_search, tinfo.validation_file)
elif self.odriver.lang == 'cuda':
c = orio.main.tuner.ptest_codegen.PerfTestCodeGenCUDA(prob_size, tinfo.ivar_decls, tinfo.ivar_decl_file,
tinfo.ivar_init_file, tinfo.ptest_skeleton_code_file, self.odriver.lang,
tinfo.random_seed, use_parallel_search)
elif self.odriver.lang == 'opencl':
c = orio.main.tuner.ptest_codegen.PerfTestCodeGenOpenCL(prob_size, tinfo.ivar_decls, tinfo.ivar_decl_file,
tinfo.ivar_init_file, tinfo.ptest_skeleton_code_file, self.odriver.lang,
tinfo.random_seed, use_parallel_search)
elif self.odriver.lang == 'fortran':
c = orio.main.tuner.ptest_codegen.PerfTestCodeGenFortran(prob_size, tinfo.ivar_decls, tinfo.ivar_decl_file,
tinfo.ivar_init_file, tinfo.ptest_skeleton_code_file, self.odriver.lang,
tinfo.random_seed, use_parallel_search)
else:
err('main.tuner.tuner: unknown output language specified: %s' % self.odriver.lang)
ptcodegens.append(c)
# create the performance-testing driver
ptdriver = orio.main.tuner.ptest_driver.PerfTestDriver(self.tinfo, use_parallel_search,
self.odriver.lang,
c.getTimerCode(use_parallel_search))
# get the axis names and axis value ranges to represent the search space
axis_names, axis_val_ranges = self.__buildCoordSystem(tinfo.pparam_params, tinfo.cmdline_params)
info('%s' % axis_names)
info('%s' % axis_val_ranges)
# combine the performance parameter constraints
pparam_constraint = 'True'
for vname, rhs in tinfo.pparam_constraints:
pparam_constraint += ' and (%s)' % rhs
# dynamically load the search engine class and configure it
if Globals().extern:
tinfo.search_algo='Extern'
info('Running in %s mode' % tinfo.search_algo)
info('Using parameters %s' % Globals().config)
class_name = tinfo.search_algo
mod_name = '.'.join([SEARCH_MOD_NAME, class_name.lower(), class_name.lower()])
search_class = self.dloader.loadClass(mod_name, class_name)
# convert the search time limit (from minutes to seconds) and get the total number of
# search runs
search_time_limit = 60 * tinfo.search_time_limit
search_total_runs = tinfo.search_total_runs
search_use_z3 = tinfo.search_use_z3
search_resume = tinfo.search_resume
# get the search-algorithm-specific arguments
search_opts = dict(tinfo.search_opts)
# perform the performance tuning for each distinct problem size
optimized_code_seq = []
for ptcodegen in ptcodegens:
if Globals().verbose:
info('\n----- begin empirical tuning for problem size -----')
# Sort y variable name... not sure it's really necessary
iparams = sorted(ptcodegen.input_params[:])
for pname, pvalue in iparams:
info(' %s = %s' % (pname, pvalue))
iparams = sorted(ptcodegen.input_params[:])
for pname, pvalue in iparams:
Globals().metadata['size_' + pname] = pvalue
debug(ptcodegen.input_params[:])
# create the search engine
search_eng = search_class({'cfrags':cfrags, # code versions
'axis_names':axis_names, # performance parameter names
'axis_val_ranges':axis_val_ranges, # performance parameter values
'pparam_constraint':pparam_constraint,
'search_time_limit':search_time_limit,
'search_total_runs':search_total_runs,
'search_resume':search_resume,
'search_opts':search_opts,
'ptcodegen':ptcodegen,
'ptdriver':ptdriver, 'odriver':self.odriver,
'use_parallel_search':use_parallel_search,
'input_params':ptcodegen.input_params[:]})
# search for the best performance parameters
best_perf_params, best_perf_cost = search_eng.search()
# output the best performance parameters
if Globals().verbose and not Globals().extern:
info('----- the obtained best performance parameters -----')
pparams = sorted(list(best_perf_params.items()))
for pname, pvalue in pparams:
info(' %s = %s' % (pname, pvalue))
# generate the optimized code using the obtained best performance parameters
if Globals().extern:
best_perf_params=Globals().config
debug("[orio.main.tuner.tuner] Globals config: %s" % str(Globals().config), obj=self, level=6)
cur_optimized_code_seq = self.odriver.optimizeCodeFrags(cfrags, best_perf_params)
# check the optimized code sequence
if len(cur_optimized_code_seq) != 1:
err('orio.main.tuner internal error: the empirically optimized code cannot contain multiple versions')
# get the optimized code
optimized_code, _, externals = cur_optimized_code_seq[0]
# insert comments into the optimized code to include information about
# the best performance parameters and the input problem sizes
iproblem_code = ''
iparams = sorted(ptcodegen.input_params[:])
for pname, pvalue in iparams:
if pname == '__builtins__':
continue
iproblem_code += ' %s = %s \n' % (pname, pvalue)
pparam_code = ''
pparams = sorted(list(best_perf_params.items()))
for pname, pvalue in pparams:
if pname == '__builtins__':
continue
pparam_code += ' %s = %s \n' % (pname, pvalue)
info_code = '\n/**-- (Generated by Orio) \n'
if not Globals().extern:
info_code += 'Best performance cost: \n'
info_code += ' %s \n' % best_perf_cost
info_code += 'Tuned for specific problem sizes: \n'
info_code += iproblem_code
info_code += 'Best performance parameters: \n'
info_code += pparam_code
info_code += '--**/\n'
optimized_code = info_code + optimized_code
# store the optimized for this problem size
optimized_code_seq.append((optimized_code, ptcodegen.input_params[:], externals))
# return the optimized code
return optimized_code_seq
# Private methods
#-------------------------------------------------
def __extractTuningInfo(self, code, line_no):
'''Extract tuning information from the given annotation code'''
# parse the code
match_obj = re.match(r'^\s*import\s+spec\s+([/A-Za-z_]+);\s*$', code)
# if the code contains a single import statement
if match_obj:
# get the specification name
spec_name = match_obj.group(1)
spec_file = spec_name+'.spec'
try:
src_dir = '/'.join(list(Globals().src_filenames.keys())[0].split('/')[:-1])
spec_file_path = os.getcwd() + '/' + src_dir + '/' + spec_file
f = open(spec_file_path, 'r')
tspec_code = f.read()
f.close()
except:
err('%s: cannot open file for reading: %s' % (self.__class__, spec_file_path))
tuning_spec_dict = orio.main.tspec.tspec.TSpec().parseProgram(tspec_code)
# if the tuning specification is hardcoded into the given code
elif code.lstrip().startswith('spec'):
tuning_spec_dict = orio.main.tspec.tspec.TSpec().parseProgram(code)
else:
# parse the specification code to get the tuning information
tuning_spec_dict = orio.main.tspec.tspec.TSpec().parseSpec(code, line_no)
# return the tuning information
return tuning_spec_dict
#-------------------------------------------------
def __listAllCombinations(self, seqs):
'''
Enumerate all combinations of the given sequences.
e.g. input: [['a','b'],[1,2]] --> [['a',1],['a',2],['b',1],['b',2]]
'''
# the base case
if len(seqs) == 0:
return []
# the recursive step
trailing_combs = self.__listAllCombinations(seqs[1:])
if trailing_combs == []:
trailing_combs = [[]]
combs = []
for i in seqs[0]:
for c in trailing_combs:
combs.append([i] + c)
# return the combinations
return combs
#-------------------------------------------------
def __getProblemSizes(self, iparam_params, iparam_constraints):
'''Return all valid problem sizes'''
# combine the input parameter constraints
iparam_constraint = 'True'
for vname, rhs in iparam_constraints:
iparam_constraint += ' and (%s)' % rhs
# compute all possible combinations of problem sizes
prob_sizes = []
pnames, pvalss = list(zip(*iparam_params))
for pvals in self.__listAllCombinations(pvalss):
prob_sizes.append(list(zip(pnames, pvals)))
# exclude all invalid problem sizes
n_prob_sizes = []
for p in prob_sizes:
try:
is_valid = eval(iparam_constraint, dict(p))
except Exception as e:
err('orio.main.tuner.tuner:%s: failed to evaluate the input parameter constraint expression\n --> %s: %s' % (iparam_constraint,e.__class__.__name__, e))
if is_valid:
n_prob_sizes.append(p)
prob_sizes = n_prob_sizes
# check if the new problem sizes is empty
if len(prob_sizes) == 0:
err('orio.main.tuner.tuner: no valid problem sizes exist. please check the input parameter ' +
'constraints')
# return all possible combinations of problem sizes
return prob_sizes
#-------------------------------------------------
def __buildCoordSystem(self, perf_params, cmdline_params):
'''Return information about the coordinate systems that represent the search space'''
debug("BUILDING COORD SYSTEM", obj=self,level=3)
# get the axis names and axis value ranges
axis_names = []
axis_val_ranges = []
for pname, prange in perf_params:
axis_names.append(pname)
# BN: why on earth would someone do this?????
# axis_val_ranges.append(self.__sort(prange))
axis_val_ranges.append(prange)
for pname, prange in cmdline_params:
axis_names.append('__cmdline_' + pname)
axis_val_ranges.append(prange)
self.num_params=len(axis_names)
self.num_configs=1
self.num_bin=0
self.num_categorical = 0
self.num_int=self.num_params
ptype=[]
for vals in axis_val_ranges:
self.num_configs=self.num_configs*len(vals)
ptype.append('I')
if type(vals[0]) == bool:
self.num_bin=self.num_bin+1
ptype[len(ptype)-1]=('B')
if type(vals[0]) == str:
self.num_categorical = self.num_categorical+1
self.num_int -= self.num_bin
self.num_int -= self.num_categorical
info('Search_Space = %1.3e' % self.num_configs)
info('Number_of_Parameters = %02d' % self.num_params)
info('Numeric_Parameters = %02d' % self.num_int)
info('Binary_Parameters = %02d' % self.num_bin)
info('Categorical_Parameters = %02d' % self.num_categorical)
sys.stderr.write('%s\n'% Globals().configfile)
return (axis_names, axis_val_ranges)
| 43.397143 | 169 | 0.55448 | 14,728 | 0.969649 | 0 | 0 | 0 | 0 | 0 | 0 | 4,594 | 0.302456 |
46882caba8a1bc1cb57f8753adac56b5f9b622b8 | 1,862 | py | Python | verify_data.py | goowell/DrAdvice | d1f9d70671500599b3a36eb9c83390e39c521d25 | [
"MIT"
]
| null | null | null | verify_data.py | goowell/DrAdvice | d1f9d70671500599b3a36eb9c83390e39c521d25 | [
"MIT"
]
| null | null | null | verify_data.py | goowell/DrAdvice | d1f9d70671500599b3a36eb9c83390e39c521d25 | [
"MIT"
]
| null | null | null | from transformer import *
from logger import logger
def find_missing():
from db import paients_source, paients_info
import re
for pi in paients_info.find():
if paients_source.find({'_id': re.compile(pi['住院号'], re.IGNORECASE)}).count()>0:
pass
else:
print(pi['住院号'])
def verify_data(collection):
'verify the data format is correct or not.'
for d in collection.find():
info = d.get('d').get('info')
if len(info) <12 and info[0] != '1':
logger.error('invalid patient info:' + d['_id']+str(info))
if len(d.get('d').get('doctor_advice')) == 0:
logger.error('invalid doctor advice:' + d['_id'])
else:
has_long = False
has_short = False
for a in d.get('d').get('doctor_advice'):
if len(a) != 18:
logger.error('invalid doctor advice:' + d['_id'])
logger.error("invalid doctor advice: " + a)
if a[3] == '长':
has_long = True
else:
has_short = True
if not (has_long and has_short):
logger.error('invalid doctor advice: ' + d['_id'] + ', long/short: {}/{}'.format(has_long, has_short) )
def get_info(collection):
'count PE'
for d in collection.find():
if len(d.get('d').get('doctor_advice')) == 0:
print('invalid doctor advice:' + d['_id'])
else:
one_p = split_all_ad(d)
print(one_p)
break
def main():
'main entry'
from datetime import datetime
from db import paients_source
start = datetime.now()
print('hello..')
# verify_data(paients_source)
# get_info(collection)
find_missing()
print(datetime.now() - start)
if __name__ == '__main__':
main() | 29.09375 | 119 | 0.538131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.226013 |
46885ee1267724c58d93c90bd0626ea4f768c7c7 | 1,904 | py | Python | jvm-packages/cudautils.py | NVIDIA/spark-xgboost | 983159f52784e3d9dfd8e9957c5f8ca18e64f934 | [
"Apache-2.0"
]
| 15 | 2020-06-11T02:20:26.000Z | 2022-03-09T07:18:23.000Z | jvm-packages/cudautils.py | NVIDIA/spark-xgboost | 983159f52784e3d9dfd8e9957c5f8ca18e64f934 | [
"Apache-2.0"
]
| 4 | 2021-01-20T03:24:23.000Z | 2021-11-01T05:33:38.000Z | jvm-packages/cudautils.py | NVIDIA/spark-xgboost | 983159f52784e3d9dfd8e9957c5f8ca18e64f934 | [
"Apache-2.0"
]
| 6 | 2020-06-24T03:28:58.000Z | 2021-10-01T16:04:11.000Z | #!/usr/bin/env python
import os
import re
import subprocess
import sys
# version -> classifier
# '' means default classifier
cuda_vers = {
'11.2': ['cuda11', '']
}
def check_classifier(classifier):
'''
Check the mapping from cuda version to jar classifier.
Used by maven build.
'''
cu_ver = detect_cuda_ver()
classifier_list = cuda_vers[cu_ver]
if classifier not in classifier_list:
raise Exception("Jar classifier '{}' mismatches the 'nvcc' version {} !".format(classifier, cu_ver))
def get_classifier():
cu_ver = detect_cuda_ver()
classifier_list = cuda_vers[cu_ver]
return classifier_list[0]
def get_supported_vers():
'''
Get the supported cuda versions.
'''
return cuda_vers.keys()
def get_supported_vers_str():
'''
Get the supported cuda versions and join them as a string.
Used by shell script.
'''
return ' '.join(cuda_vers.keys())
def detect_cuda_ver():
'''
Detect the cuda version from current nvcc tool.
'''
nvcc_ver_bin = subprocess.check_output('nvcc --version', shell=True)
nvcc_ver = re.search('release ([.0-9]+), V([.0-9]+)', str(nvcc_ver_bin)).group(1)
if nvcc_ver in get_supported_vers():
return nvcc_ver
else:
raise Exception("Unsupported cuda version: {}, Please check your 'nvcc' version.".format(nvcc_ver))
def cudaver():
return 'cuda{}'.format(detect_cuda_ver())
if __name__ == "__main__":
num_args = len(sys.argv)
action = sys.argv[1].lower() if num_args > 1 else 'l'
if action =='c':
classifier = sys.argv[2].lower() if num_args > 2 else ''
check_classifier(classifier)
elif action == 'd':
print(detect_cuda_ver())
elif action == 'g':
print(get_classifier())
elif action == 'l':
print(get_supported_vers_str())
else:
print("Unsupported action: " + action)
| 25.386667 | 108 | 0.644958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 623 | 0.327206 |
4688b12d1b22b922d562bb53aed309b70230470c | 294 | py | Python | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
]
| null | null | null | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
]
| null | null | null | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
]
| null | null | null | # Generated by Django 3.0.2 on 2020-03-29 19:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hitchhikeapp', '0010_userdata_userid'),
]
operations = [
migrations.DeleteModel(
name='Dog',
),
]
| 17.294118 | 49 | 0.602041 | 209 | 0.710884 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.29932 |
4689c871990ae00114397708dbcc29fc3f6a6ac6 | 5,087 | py | Python | support/models.py | gurupratap-matharu/django-tickets-app | 8200af606e382f8806511c318961589f34375cdf | [
"MIT"
]
| 1 | 2020-10-16T16:37:04.000Z | 2020-10-16T16:37:04.000Z | support/models.py | gurupratap-matharu/django-tickets-app | 8200af606e382f8806511c318961589f34375cdf | [
"MIT"
]
| null | null | null | support/models.py | gurupratap-matharu/django-tickets-app | 8200af606e382f8806511c318961589f34375cdf | [
"MIT"
]
| null | null | null | import pytz
from datetime import date, time, datetime, timedelta
from django.core.exceptions import ValidationError
from django.db import models
START_HOUR = 9
END_HOUR = 18
workingHours = END_HOUR - START_HOUR
class Vendor(models.Model):
"""
This class defines which vendors are allowed raise tickets with our system.
"""
vendor = models.CharField(max_length=25)
def __str__(self):
return self.vendor
def no_past(value):
today = date.today()
if value < today:
raise ValidationError('Holiday Date cannot be in the past.')
class Holiday(models.Model):
"""
Define the holiday or non-working days for each based on each region.
"""
day = models.DateField(help_text="Enter the date of Holiday", validators=[no_past])
description = models.CharField(max_length=200, blank=True)
class Meta:
ordering = ('day',)
def __str__(self):
return "{} {}".format(self.day, self.description)
class Category(models.Model):
"""
We define the type of category to which a particular ticket belongs here.
"""
CATEGORY_CHOICES = (
('Website Down', 'Website Down'),
('Problem with WiFi', 'Problem with WiFi'),
('Server Down', 'Server Down'),
('Cannot Login', 'Cannot Login'),
('Critical Bug','Critical Bug'),
('Problem with Billing System','Problem with Billing System'),
)
category = models.CharField(max_length=50, choices=CATEGORY_CHOICES)
class Meta:
verbose_name_plural = "categories"
def __str__(self):
return self.category
class Ticket(models.Model):
"""
Our ticket models objects are created here and stored in the database
as a table with the attributes mentioned below.
"""
SEVERITY_CHOICES = (
(4, 1), # severity 1 to be resolved in 4 hours
(24, 2), # severity 2 to be resolved in 24 hours
(72, 3), # severity 3 to be resolved in 72 hours / 3 days
(168, 4), # severity 4 to be resolved in 168 hours / 7 days
(720, 5), # severity 5 to be resolved in 720 hours / 30 days
)
STATUS_CHOICES = (
('Issued', 'Issued'), # ticket raised but not assigned
('In Process', 'In Process'), # ticket assigned
('Resolved', 'Resolved'), # ticket resolved
('Cancelled', 'Cancelled'),
)
vendor = models.ForeignKey(Vendor, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
severity = models.PositiveIntegerField(choices=SEVERITY_CHOICES)
description = models.CharField(max_length=255)
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='Issued')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expiry = models.DateTimeField(blank=True, null=True)
def save(self, *args, **kwargs):
"""
Here we over-ride the default `save` method to populate the expiry field
based on creation date, holidays and weekends.
"""
self.expiry = findExpiryDate(self.severity)
super().save(*args, **kwargs) # Call the "real" save() method.
def __str__(self):
return "{} | {} | {} ".format(self.vendor.vendor, self.category.category, self.created_at)
def findExpiryDate(sla):
"""
Finds the expiry date for a ticket based on
1. Severity of the ticket
2. Date of issue
"""
now = datetime.now()
flag = 1
# if ticket is received today between 00:00 hours to Start_Hour
# we reset the flag
if now.hour < START_HOUR:
flag = 0
# if ticket is received today between office hours then
# we simply deduct working hours left today from sla
if START_HOUR < now.hour < END_HOUR:
hoursLeftToday = END_HOUR - sla
sla -= hoursLeftToday
tomorrow = date.today() + timedelta(days=flag)
shiftTime = time(START_HOUR,0,0)
dt = datetime.combine(tomorrow, shiftTime, pytz.utc)
dt = adjust_Weekends_And_Holidays(dt) # adjust incase we hit a weekend
# now we find the office days and office hours
# we would need to complete the sla
days, hours = divmod(sla, workingHours)
dt += timedelta(hours=hours)
dt = adjust_Weekends_And_Holidays(dt, days=days) # adjust incase we hit a weekend
return dt
def isWeekend(dt):
"""Finds if a date lies on a weekend or not. Returns a boolean"""
if 0 < dt.weekday() < 6:
return False
else:
return True
def isHoliday(dt):
"""Finds if a date lies on a holiday or not. Returns a boolean"""
return Holiday.objects.filter(day=dt.date()).exists()
def adjust_Weekends_And_Holidays(dt, days=0):
"""
Adjust the datetime to a future datetime accomodating for
1. days needed
2. skipping Weekends
"""
while isWeekend(dt) or isHoliday(dt):
dt += timedelta(days=1)
while days:
dt += timedelta(days=1)
if isWeekend(dt) or isHoliday(dt):
continue
else:
days -= 1
return dt
| 30.279762 | 98 | 0.649892 | 2,960 | 0.581875 | 0 | 0 | 0 | 0 | 0 | 0 | 1,966 | 0.386475 |
4689fd0a503a48da1fc4fb1000e346ebf2f7be93 | 605 | py | Python | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
]
| 7 | 2020-05-07T08:13:44.000Z | 2021-12-17T07:33:51.000Z | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
]
| 17 | 2019-11-29T23:17:26.000Z | 2020-12-20T15:47:17.000Z | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
]
| 1 | 2020-12-17T22:44:21.000Z | 2020-12-17T22:44:21.000Z | from hypothesis import given
from tests.port_tests.hints import (PortedBoundingBox,
PortedPoint)
from tests.utils import equivalence
from . import strategies
@given(strategies.points)
def test_basic(point: PortedPoint) -> None:
assert isinstance(point.bounding_box, PortedBoundingBox)
@given(strategies.points, strategies.points)
def test_bijection(first_point: PortedPoint,
second_point: PortedPoint) -> None:
assert equivalence(first_point == second_point,
first_point.bounding_box == second_point.bounding_box)
| 31.842105 | 77 | 0.707438 | 0 | 0 | 0 | 0 | 404 | 0.667769 | 0 | 0 | 0 | 0 |
468a1af4f4a7334446b0e0152db92174a4f3295b | 424 | py | Python | test/conftest.py | alexandonian/lightning | 90350fd454cd7a51c35adadf5b9753868ac6dccd | [
"Apache-2.0"
]
| null | null | null | test/conftest.py | alexandonian/lightning | 90350fd454cd7a51c35adadf5b9753868ac6dccd | [
"Apache-2.0"
]
| null | null | null | test/conftest.py | alexandonian/lightning | 90350fd454cd7a51c35adadf5b9753868ac6dccd | [
"Apache-2.0"
]
| null | null | null | import pytest
# import station
def pytest_addoption(parser):
parser.addoption("--engine", action="store", default="local",
help="engine to run tests with")
@pytest.fixture(scope='module')
def eng(request):
engine = request.config.getoption("--engine")
if engine == 'local':
return None
if engine == 'spark':
station.start(spark=True)
return station.engine()
| 23.555556 | 66 | 0.625 | 0 | 0 | 0 | 0 | 237 | 0.558962 | 0 | 0 | 98 | 0.231132 |
468ce700bfaf82c4969d9da9bf954d79c010ee00 | 7,960 | py | Python | pytorch_translate/dual_learning/dual_learning_models.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
]
| 1 | 2020-07-24T10:59:17.000Z | 2020-07-24T10:59:17.000Z | pytorch_translate/dual_learning/dual_learning_models.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
]
| null | null | null | pytorch_translate/dual_learning/dual_learning_models.py | dzhulgakov/translate | 018d3eed8d93ff32e86c912e68045c7a3f4ed0b7 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
import logging
import torch.nn as nn
from fairseq import checkpoint_utils
from fairseq.models import BaseFairseqModel, register_model
from pytorch_translate import rnn
from pytorch_translate.rnn import (
LSTMSequenceEncoder,
RNNDecoder,
RNNEncoder,
RNNModel,
base_architecture,
)
from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask
logger = logging.getLogger(__name__)
@register_model("dual_learning")
class DualLearningModel(BaseFairseqModel):
"""
An architecture to jointly train primal model and dual model by leveraging
distribution duality, which exist for both parallel data and monolingual
data.
"""
def __init__(self, args, task, primal_model, dual_model, lm_model=None):
super().__init__()
self.args = args
self.task_keys = ["primal", "dual"]
self.models = nn.ModuleDict(
{"primal": primal_model, "dual": dual_model, "lm": lm_model}
)
def forward(self, src_tokens, src_lengths, prev_output_tokens=None):
"""
If batch is monolingual, need to run beam decoding to generate
fake prev_output_tokens.
"""
# TODO: pass to dual model too
primal_encoder_out = self.models["primal"].encoder(src_tokens, src_lengths)
primal_decoder_out = self.models["primal"].decoder(
prev_output_tokens, primal_encoder_out
)
return primal_decoder_out
def max_positions(self):
return {
"primal_source": (
self.models["primal"].encoder.max_positions(),
self.models["primal"].decoder.max_positions(),
),
"dual_source": (
self.models["dual"].encoder.max_positions(),
self.models["dual"].decoder.max_positions(),
),
"primal_parallel": (
self.models["primal"].encoder.max_positions(),
self.models["primal"].decoder.max_positions(),
),
"dual_parallel": (
self.models["dual"].encoder.max_positions(),
self.models["dual"].decoder.max_positions(),
),
}
@register_model("dual_learning_rnn")
class RNNDualLearningModel(DualLearningModel):
"""Train two models for a task and its duality jointly.
This class uses RNN arch, but can be extended to take arch as an arument.
This class takes translation as a task, but the framework is intended
to be general enough to be applied to other tasks as well.
"""
def __init__(self, args, task, primal_model, dual_model, lm_model=None):
super().__init__(args, task, primal_model, dual_model, lm_model)
@staticmethod
def add_args(parser):
rnn.RNNModel.add_args(parser)
parser.add_argument(
"--unsupervised-dual",
default=False,
action="store_true",
help="Train with dual loss from monolingual data.",
)
parser.add_argument(
"--supervised-dual",
default=False,
action="store_true",
help="Train with dual loss from parallel data.",
)
@classmethod
def build_model(cls, args, task):
""" Build both the primal and dual models.
For simplicity, both models share the same arch, i.e. the same model
params would be used to initialize both models.
Support for different models/archs would be added in further iterations.
"""
base_architecture(args)
if args.sequence_lstm:
encoder_class = LSTMSequenceEncoder
else:
encoder_class = RNNEncoder
decoder_class = RNNDecoder
encoder_embed_tokens, decoder_embed_tokens = RNNModel.build_embed_tokens(
args, task.primal_src_dict, task.primal_tgt_dict
)
primal_encoder = encoder_class(
task.primal_src_dict,
embed_dim=args.encoder_embed_dim,
embed_tokens=encoder_embed_tokens,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
primal_decoder = decoder_class(
src_dict=task.primal_src_dict,
dst_dict=task.primal_tgt_dict,
embed_tokens=decoder_embed_tokens,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
primal_task = PytorchTranslateTask(
args, task.primal_src_dict, task.primal_tgt_dict
)
primal_model = rnn.RNNModel(primal_task, primal_encoder, primal_decoder)
if args.pretrained_forward_checkpoint:
pretrained_forward_state = checkpoint_utils.load_checkpoint_to_cpu(
args.pretrained_forward_checkpoint
)
primal_model.load_state_dict(pretrained_forward_state["model"], strict=True)
print(
f"Loaded pretrained primal model from {args.pretrained_forward_checkpoint}"
)
encoder_embed_tokens, decoder_embed_tokens = RNNModel.build_embed_tokens(
args, task.dual_src_dict, task.dual_tgt_dict
)
dual_encoder = encoder_class(
task.dual_src_dict,
embed_dim=args.encoder_embed_dim,
embed_tokens=encoder_embed_tokens,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
dual_decoder = decoder_class(
src_dict=task.dual_src_dict,
dst_dict=task.dual_tgt_dict,
embed_tokens=decoder_embed_tokens,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
dual_task = PytorchTranslateTask(args, task.dual_src_dict, task.dual_tgt_dict)
dual_model = rnn.RNNModel(dual_task, dual_encoder, dual_decoder)
if args.pretrained_backward_checkpoint:
pretrained_backward_state = checkpoint_utils.load_checkpoint_to_cpu(
args.pretrained_backward_checkpoint
)
dual_model.load_state_dict(pretrained_backward_state["model"], strict=True)
print(
f"Loaded pretrained dual model from {args.pretrained_backward_checkpoint}"
)
# TODO (T36875783): instantiate a langauge model
lm_model = None
return RNNDualLearningModel(args, task, primal_model, dual_model, lm_model)
| 38.829268 | 91 | 0.650377 | 7,440 | 0.934673 | 0 | 0 | 7,510 | 0.943467 | 0 | 0 | 1,454 | 0.182663 |
468d721e5802a550fe36c1b0efccab7310faf51c | 697 | py | Python | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
]
| null | null | null | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
]
| null | null | null | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
]
| null | null | null | from ._utils import construct_dia, construct_hth, construct_sampling_matrix
from .bsgda import bsgda, computing_sets, recon_bsgda, solving_set_covering
from .ess import ess, ess_sampling, recon_ess
from .fastgsss import fastgsss, recon_fastssss
from .rsbs import cheby_coeff4ideal_band_pass, estimate_lk, recon_rsbs, rsbs
__all__ = [
"ess",
"ess_sampling",
"bsgda",
"computing_sets",
"solving_set_covering",
"cheby_coeff4ideal_band_pass",
"estimate_lk",
"rsbs",
"fastgsss",
# reconstruction
"recon_fastssss",
"recon_bsgda",
"recon_ess",
"recon_rsbs",
# utils
"construct_sampling_matrix",
"construct_hth",
"construct_dia",
]
| 25.814815 | 76 | 0.71736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.364419 |
468df8250e372c77ba85fdae3eaf93df4bca1fda | 3,382 | py | Python | h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_glm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
]
| 1 | 2022-03-15T06:08:14.000Z | 2022-03-15T06:08:14.000Z | h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_glm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
]
| 58 | 2021-10-01T12:43:37.000Z | 2021-12-08T22:58:43.000Z | h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_glm.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
]
| null | null | null | import tempfile
import os
import sys
sys.path.insert(1,"../../")
import h2o
from h2o.estimators import H2OGeneralizedLinearEstimator, H2OGenericEstimator
from tests import pyunit_utils
from tests.testdir_generic_model import compare_output, Capturing, compare_params
def test(x, y, output_test, strip_part, algo_name, generic_algo_name, family):
# GLM
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/testng/airlines_train.csv"))
glm = H2OGeneralizedLinearEstimator(nfolds = 2, family = family, max_iterations=2) # alpha = 1, lambda_ = 1, bad values, use default
glm.train(x = x, y = y, training_frame=airlines, validation_frame=airlines, )
print(glm)
with Capturing() as original_output:
glm.show()
original_model_filename = tempfile.mkdtemp()
original_model_filename = glm.download_mojo(original_model_filename)
generic_mojo_model_from_file = H2OGenericEstimator.from_file(original_model_filename)
assert generic_mojo_model_from_file is not None
print(generic_mojo_model_from_file)
compare_params(glm, generic_mojo_model_from_file)
with Capturing() as generic_output:
generic_mojo_model_from_file.show()
output_test(str(original_output), str(generic_output), strip_part, algo_name, generic_algo_name)
predictions = generic_mojo_model_from_file.predict(airlines)
assert predictions is not None
assert predictions.nrows == 24421
assert generic_mojo_model_from_file._model_json["output"]["model_summary"] is not None
assert len(generic_mojo_model_from_file._model_json["output"]["model_summary"]._cell_values) > 0
assert generic_mojo_model_from_file._model_json["output"]["variable_importances"] is not None
assert len(generic_mojo_model_from_file._model_json["output"]["variable_importances"]._cell_values) > 0
generic_mojo_filename = tempfile.mkdtemp("zip", "genericMojo");
generic_mojo_filename = generic_mojo_model_from_file.download_mojo(path=generic_mojo_filename)
assert os.path.getsize(generic_mojo_filename) == os.path.getsize(original_model_filename)
def mojo_model_test_binomial():
test(["Origin", "Dest"], "IsDepDelayed", compare_output, 'GLM Model: summary', 'ModelMetricsBinomialGLM: glm',
'ModelMetricsBinomialGLMGeneric: generic', 'binomial')
def mojo_model_test_regression():
test(["Origin", "Dest"], "Distance", compare_output, 'GLM Model: summary', 'ModelMetricsRegressionGLM: glm',
'ModelMetricsRegressionGLMGeneric: generic', 'gaussian')
def mojo_model_test_multinomial():
test(["Origin", "Distance"], "Dest", compare_output, 'GLM Model: summary', 'ModelMetricsMultinomialGLM: glm',
'ModelMetricsMultinomialGLMGeneric: generic', 'multinomial')
def mojo_model_test_ordinal():
test(["Origin", "Distance", "IsDepDelayed"], "fDayOfWeek", compare_output, 'GLM Model: summary',
'ModelMetricsOrdinalGLM: glm',
'ModelMetricsOrdinalGLMGeneric: generic', 'ordinal')
if __name__ == "__main__":
pyunit_utils.standalone_test(mojo_model_test_binomial)
pyunit_utils.standalone_test(mojo_model_test_multinomial)
pyunit_utils.standalone_test(mojo_model_test_regression)
pyunit_utils.standalone_test(mojo_model_test_ordinal)
else:
mojo_model_test_binomial()
mojo_model_test_multinomial()
mojo_model_test_regression()
mojo_model_test_ordinal()
| 46.328767 | 136 | 0.77262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 767 | 0.226789 |
468f2faee1688669d20b891ff6fb1ee641d68824 | 9,160 | py | Python | test/HPE3ParClient_base.py | jyotsnalothe/python-3parclient | aa6ece402c2e3e83cae102f04ffc83f9e2421bf3 | [
"Apache-2.0"
]
| 35 | 2015-12-03T16:46:11.000Z | 2022-01-19T10:50:35.000Z | test/HPE3ParClient_base.py | jyotsnalothe/python-3parclient | aa6ece402c2e3e83cae102f04ffc83f9e2421bf3 | [
"Apache-2.0"
]
| 57 | 2015-12-01T00:34:39.000Z | 2022-03-25T12:00:50.000Z | test/HPE3ParClient_base.py | jyotsnalothe/python-3parclient | aa6ece402c2e3e83cae102f04ffc83f9e2421bf3 | [
"Apache-2.0"
]
| 64 | 2016-04-24T00:22:43.000Z | 2021-08-06T09:29:38.000Z | # (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test base class of 3PAR Client."""
import os
import sys
import unittest
import subprocess
import time
import inspect
from pytest_testconfig import config
import datetime
from functools import wraps
from hpe3parclient import client, file_client
TIME = datetime.datetime.now().strftime('%H%M%S')
try:
# For Python 3.0 and later
from urllib.parse import urlparse
except ImportError:
# Fall back to Python 2's urllib2
from urlparse import urlparse
class HPE3ParClientBaseTestCase(unittest.TestCase):
user = config['TEST']['user']
password = config['TEST']['pass']
flask_url = config['TEST']['flask_url']
url_3par = config['TEST']['3par_url']
debug = config['TEST']['debug'].lower() == 'true'
unitTest = config['TEST']['unit'].lower() == 'true'
port = None
remote_copy = config['TEST']['run_remote_copy'].lower() == 'true'
run_remote_copy = remote_copy and not unitTest
if run_remote_copy:
secondary_user = config['TEST_REMOTE_COPY']['user']
secondary_password = config['TEST_REMOTE_COPY']['pass']
secondary_url_3par = config['TEST_REMOTE_COPY']['3par_url']
secondary_target_name = config['TEST_REMOTE_COPY']['target_name']
ssh_port = None
if 'ssh_port' in config['TEST']:
ssh_port = int(config['TEST']['ssh_port'])
elif unitTest:
ssh_port = 2200
else:
ssh_port = 22
# Don't setup SSH unless needed. It slows things down.
withSSH = False
if 'domain' in config['TEST']:
DOMAIN = config['TEST']['domain']
else:
DOMAIN = 'UNIT_TEST_DOMAIN'
if 'cpg_ldlayout_ha' in config['TEST']:
CPG_LDLAYOUT_HA = int(config['TEST']['cpg_ldlayout_ha'])
if 'disk_type' in config['TEST']:
DISK_TYPE = int(config['TEST']['disk_type'])
CPG_OPTIONS = {'domain': DOMAIN,
'LDLayout': {'HA': CPG_LDLAYOUT_HA,
'diskPatterns': [{'diskType':
DISK_TYPE}]}}
else:
CPG_OPTIONS = {'domain': DOMAIN,
'LDLayout': {'HA': CPG_LDLAYOUT_HA}}
else:
CPG_LDLAYOUT_HA = None
CPG_OPTIONS = {'domain': DOMAIN}
if 'known_hosts_file' in config['TEST']:
known_hosts_file = config['TEST']['known_hosts_file']
else:
known_hosts_file = None
if 'missing_key_policy' in config['TEST']:
missing_key_policy = config['TEST']['missing_key_policy']
else:
missing_key_policy = None
def setUp(self, withSSH=False, withFilePersona=False):
self.withSSH = withSSH
self.withFilePersona = withFilePersona
cwd = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
if self.unitTest:
self.printHeader('Using flask ' + self.flask_url)
parsed_url = urlparse(self.flask_url)
userArg = '-user=%s' % self.user
passwordArg = '-password=%s' % self.password
portArg = '-port=%s' % parsed_url.port
script = 'HPE3ParMockServer_flask.py'
path = "%s/%s" % (cwd, script)
try:
self.mockServer = subprocess.Popen([sys.executable,
path,
userArg,
passwordArg,
portArg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE
)
except Exception:
pass
time.sleep(1)
if self.withFilePersona:
self.cl = file_client.HPE3ParFilePersonaClient(self.flask_url)
else:
self.cl = client.HPE3ParClient(self.flask_url)
if self.withSSH:
self.printHeader('Using paramiko SSH server on port %s' %
self.ssh_port)
ssh_script = 'HPE3ParMockServer_ssh.py'
ssh_path = "%s/%s" % (cwd, ssh_script)
self.mockSshServer = subprocess.Popen([sys.executable,
ssh_path,
str(self.ssh_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
time.sleep(1)
else:
if withFilePersona:
self.printHeader('Using 3PAR %s with File Persona' %
self.url_3par)
self.cl = file_client.HPE3ParFilePersonaClient(self.url_3par)
else:
self.printHeader('Using 3PAR ' + self.url_3par)
self.cl = client.HPE3ParClient(self.url_3par)
if self.withSSH:
# This seems to slow down the test cases, so only use this when
# requested
if self.unitTest:
# The mock SSH server can be accessed at 0.0.0.0.
ip = '0.0.0.0'
else:
parsed_3par_url = urlparse(self.url_3par)
ip = parsed_3par_url.hostname.split(':').pop()
try:
# Now that we don't do keep-alive, the conn_timeout needs to
# be set high enough to avoid sometimes slow response in
# the File Persona tests.
self.cl.setSSHOptions(
ip,
self.user,
self.password,
port=self.ssh_port,
conn_timeout=500,
known_hosts_file=self.known_hosts_file,
missing_key_policy=self.missing_key_policy)
except Exception as ex:
print(ex)
self.fail("failed to start ssh client")
# Setup remote copy target
if self.run_remote_copy:
parsed_3par_url = urlparse(self.secondary_url_3par)
ip = parsed_3par_url.hostname.split(':').pop()
self.secondary_cl = client.HPE3ParClient(self.secondary_url_3par)
try:
self.secondary_cl.setSSHOptions(
ip,
self.secondary_user,
self.secondary_password,
port=self.ssh_port,
conn_timeout=500,
known_hosts_file=self.known_hosts_file,
missing_key_policy=self.missing_key_policy)
except Exception as ex:
print(ex)
self.fail("failed to start ssh client")
self.secondary_cl.login(self.secondary_user,
self.secondary_password)
if self.debug:
self.cl.debug_rest(True)
self.cl.login(self.user, self.password)
if not self.port:
ports = self.cl.getPorts()
ports = [p for p in ports['members']
if p['linkState'] == 4 and # Ready
('device' not in p or not p['device']) and
p['mode'] == self.cl.PORT_MODE_TARGET]
self.port = ports[0]['portPos']
def tearDown(self):
self.cl.logout()
if self.run_remote_copy:
self.secondary_cl.logout()
if self.unitTest:
self.mockServer.kill()
if self.withSSH:
self.mockSshServer.kill()
def print_header_and_footer(func):
"""Decorator to print header and footer for unit tests."""
@wraps(func)
def wrapper(*args, **kwargs):
test = args[0]
test.printHeader(unittest.TestCase.id(test))
result = func(*args, **kwargs)
test.printFooter(unittest.TestCase.id(test))
return result
return wrapper
def printHeader(self, name):
print("\n##Start testing '%s'" % name)
def printFooter(self, name):
print("##Completed testing '%s\n" % name)
def findInDict(self, dic, key, value):
for i in dic:
if key in i and i[key] == value:
return True
| 37.235772 | 78 | 0.533188 | 8,085 | 0.882642 | 0 | 0 | 260 | 0.028384 | 0 | 0 | 2,043 | 0.223035 |
468fc07953c8147b25a2f944027c5638901e823c | 4,452 | py | Python | test/drivers/second_quantization/hdf5d/test_driver_hdf5.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
]
| null | null | null | test/drivers/second_quantization/hdf5d/test_driver_hdf5.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
]
| null | null | null | test/drivers/second_quantization/hdf5d/test_driver_hdf5.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
]
| null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Driver HDF5 """
import os
import pathlib
import shutil
import tempfile
import unittest
import warnings
from test import QiskitNatureTestCase
from test.drivers.second_quantization.test_driver import TestDriver
from qiskit_nature.drivers.second_quantization import HDF5Driver
from qiskit_nature.drivers import QMolecule
from qiskit_nature.properties.second_quantization.electronic import ElectronicStructureDriverResult
class TestDriverHDF5(QiskitNatureTestCase, TestDriver):
"""HDF5 Driver tests."""
def setUp(self):
super().setUp()
driver = HDF5Driver(
hdf5_input=self.get_resource_path(
"test_driver_hdf5.hdf5", "drivers/second_quantization/hdf5d"
)
)
self.driver_result = driver.run()
def test_convert(self):
"""Test the legacy-conversion method."""
legacy_file_path = self.get_resource_path(
"test_driver_hdf5_legacy.hdf5", "drivers/second_quantization/hdf5d"
)
with self.subTest("replace=True"):
# pylint: disable=consider-using-with
tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".hdf5")
tmp_file.close()
os.unlink(tmp_file.name)
shutil.copy(legacy_file_path, tmp_file.name)
try:
driver = HDF5Driver(tmp_file.name)
# replacing file won't trigger deprecation on run
driver.convert(replace=True)
driver.run()
finally:
os.unlink(tmp_file.name)
msg_mol_ref = (
"The HDF5Driver.run with legacy HDF5 file method is deprecated as of version 0.4.0 "
"and will be removed no sooner than 3 months after the release "
". Your HDF5 file contains the legacy QMolecule object! You should "
"consider converting it to the new property framework. See also HDF5Driver.convert."
)
with self.subTest("replace=False"):
# pylint: disable=consider-using-with
tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".hdf5")
tmp_file.close()
new_file_name = pathlib.Path(tmp_file.name).with_name(
str(pathlib.Path(tmp_file.name).stem) + "_new.hdf5"
)
os.unlink(tmp_file.name)
shutil.copy(legacy_file_path, tmp_file.name)
try:
driver = HDF5Driver(tmp_file.name)
# not replacing file will trigger deprecation on run
driver.convert(replace=False)
with warnings.catch_warnings(record=True) as c_m:
warnings.simplefilter("always")
driver.run()
self.assertEqual(str(c_m[0].message), msg_mol_ref)
# using new file won't trigger deprecation
HDF5Driver(new_file_name).run()
finally:
os.unlink(tmp_file.name)
os.unlink(new_file_name)
class TestDriverHDF5Legacy(QiskitNatureTestCase, TestDriver):
"""HDF5 Driver legacy file-support tests."""
def setUp(self):
super().setUp()
hdf5_file = self.get_resource_path(
"test_driver_hdf5_legacy.hdf5", "drivers/second_quantization/hdf5d"
)
# Using QMolecule directly here to avoid the deprecation on HDF5Driver.run method
# to be triggered and let it be handled on the method test_convert
# Those deprecation messages are shown only once and this one could prevent
# the test_convert one to show if called first.
molecule = QMolecule(hdf5_file)
molecule.load()
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.driver_result = ElectronicStructureDriverResult.from_legacy_driver_result(molecule)
warnings.filterwarnings("default", category=DeprecationWarning)
if __name__ == "__main__":
unittest.main()
| 39.75 | 99 | 0.655885 | 3,484 | 0.78257 | 0 | 0 | 0 | 0 | 0 | 0 | 1,667 | 0.374438 |
469076a38899f95c1e7c8c2bdfb61492327f8f5d | 11,269 | py | Python | 01_test_pytorch.py | yokaji/dcase2021_task2_baseline_ae | 47b621e6abd0502fed2123c31c61b081bce5c223 | [
"MIT"
]
| null | null | null | 01_test_pytorch.py | yokaji/dcase2021_task2_baseline_ae | 47b621e6abd0502fed2123c31c61b081bce5c223 | [
"MIT"
]
| null | null | null | 01_test_pytorch.py | yokaji/dcase2021_task2_baseline_ae | 47b621e6abd0502fed2123c31c61b081bce5c223 | [
"MIT"
]
| null | null | null | ########################################################################
# import default libraries
########################################################################
import os
import csv
import sys
import gc
########################################################################
########################################################################
# import additional libraries
########################################################################
import numpy as np
import scipy.stats
import torch
import torch.nn as nn
# from import
from tqdm import tqdm
from sklearn import metrics
try:
from sklearn.externals import joblib
except:
import joblib
# original lib
import common as com
from pytorch_model import AutoEncoder
########################################################################
########################################################################
# load parameter.yaml
########################################################################
param = com.yaml_load()
#######################################################################
########################################################################
# output csv file
########################################################################
def save_csv(save_file_path,
save_data):
with open(save_file_path, "w", newline="") as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(save_data)
########################################################################
########################################################################
# main 01_test.py
########################################################################
if __name__ == "__main__":
####################################################################
# set device
####################################################################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device : {}".format(device))
####################################################################
# check mode
# "development": mode == True
# "evaluation": mode == False
mode = com.command_line_chk()
if mode is None:
sys.exit(-1)
# make output result directory
os.makedirs(param["result_directory"], exist_ok=True)
# load base directory
dirs = com.select_dirs(param=param, mode=mode)
# initialize lines in csv for AUC and pAUC
csv_lines = []
if mode:
performance_over_all = []
# loop of the base directory
for idx, target_dir in enumerate(dirs):
print("\n===========================")
print("[{idx}/{total}] {target_dir}".format(target_dir=target_dir, idx=idx+1, total=len(dirs)))
machine_type = os.path.split(target_dir)[1]
print("============== MODEL LOAD ==============")
# load model file
model_file = "{model}/model_{machine_type}.hdf5".format(model=param["model_directory"],
machine_type=machine_type)
if not os.path.exists(model_file):
com.logger.error("{} model not found ".format(machine_type))
sys.exit(-1)
input_channel = param["feature"]["n_mels"] * param["feature"]["n_frames"]
model = AutoEncoder(input_channel).to(device)
model.eval()
if device.type == "cuda":
model.load_state_dict(torch.load(model_file))
elif device.type == "cpu":
model.load_state_dict(torch.load(model_file, map_location=torch.device("cpu")))
# load anomaly score distribution for determining threshold
score_distr_file_path = "{model}/score_distr_{machine_type}.pkl".format(model=param["model_directory"],
machine_type=machine_type)
shape_hat, loc_hat, scale_hat = joblib.load(score_distr_file_path)
# determine threshold for decision
decision_threshold = scipy.stats.gamma.ppf(q=param["decision_threshold"], a=shape_hat, loc=loc_hat, scale=scale_hat)
if mode:
# results for each machine type
csv_lines.append([machine_type])
csv_lines.append(["section", "domain", "AUC", "pAUC", "precision", "recall", "F1 score"])
performance = []
dir_names = ["source_test", "target_test"]
for dir_name in dir_names:
#list machine id
section_names = com.get_section_names(target_dir, dir_name=dir_name)
for section_name in section_names:
# load test file
files, y_true = com.file_list_generator(target_dir=target_dir,
section_name=section_name,
dir_name=dir_name,
mode=mode)
# setup anomaly score file path
anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"],
machine_type=machine_type,
section_name=section_name,
dir_name=dir_name)
anomaly_score_list = []
# setup decision result file path
decision_result_csv = "{result}/decision_result_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"],
machine_type=machine_type,
section_name=section_name,
dir_name=dir_name)
decision_result_list = []
print("\n============== BEGIN TEST FOR A SECTION ==============")
y_pred = [0. for k in files]
for file_idx, file_path in tqdm(enumerate(files), total=len(files)):
try:
data = com.file_to_vectors(file_path,
n_mels=param["feature"]["n_mels"],
n_frames=param["feature"]["n_frames"],
n_fft=param["feature"]["n_fft"],
hop_length=param["feature"]["hop_length"],
power=param["feature"]["power"])
except:
com.logger.error("File broken!!: {}".format(file_path))
data = torch.tensor(data, dtype=torch.float32).to(device)
reconst = model(data)
mseloss = nn.functional.mse_loss(data.detach(), reconst.detach())
y_pred[file_idx] = mseloss.item()
# store anomaly scores
anomaly_score_list.append([os.path.basename(file_path), y_pred[file_idx]])
# store decision results
if y_pred[file_idx] > decision_threshold:
decision_result_list.append([os.path.basename(file_path), 1])
else:
decision_result_list.append([os.path.basename(file_path), 0])
# output anomaly scores
save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list)
com.logger.info("anomaly score result -> {}".format(anomaly_score_csv))
# output decision results
save_csv(save_file_path=decision_result_csv, save_data=decision_result_list)
com.logger.info("decision result -> {}".format(decision_result_csv))
if mode:
# append AUC and pAUC to lists
auc = metrics.roc_auc_score(y_true, y_pred)
p_auc = metrics.roc_auc_score(y_true, y_pred, max_fpr=param["max_fpr"])
tn, fp, fn, tp = metrics.confusion_matrix(y_true, [1 if x > decision_threshold else 0 for x in y_pred]).ravel()
prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
csv_lines.append([section_name.split("_", 1)[1], dir_name.split("_", 1)[0], auc, p_auc, prec, recall, f1])
performance.append([auc, p_auc, prec, recall, f1])
performance_over_all.append([auc, p_auc, prec, recall, f1])
com.logger.info("AUC : {}".format(auc))
com.logger.info("pAUC : {}".format(p_auc))
com.logger.info("precision : {}".format(prec))
com.logger.info("recall : {}".format(recall))
com.logger.info("F1 score : {}".format(f1))
print("\n============ END OF TEST FOR A SECTION ============")
if mode:
# calculate averages for AUCs and pAUCs
amean_performance = np.mean(np.array(performance, dtype=float), axis=0)
csv_lines.append(["arithmetic mean", ""] + list(amean_performance))
hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance, dtype=float), sys.float_info.epsilon), axis=0)
csv_lines.append(["harmonic mean", ""] + list(hmean_performance))
csv_lines.append([])
del data
del model
if mode:
csv_lines.append(["", "", "AUC", "pAUC", "precision", "recall", "F1 score"])
# calculate averages for AUCs and pAUCs
amean_performance = np.mean(np.array(performance_over_all, dtype=float), axis=0)
csv_lines.append(["arithmetic mean over all machine types, sections, and domains", ""] + list(amean_performance))
hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance_over_all, dtype=float), sys.float_info.epsilon), axis=0)
csv_lines.append(["harmonic mean over all machine types, sections, and domains", ""] + list(hmean_performance))
csv_lines.append([])
# output results
result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name=param["result_file"])
com.logger.info("results -> {}".format(result_path))
save_csv(save_file_path=result_path, save_data=csv_lines)
| 50.308036 | 151 | 0.460112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,309 | 0.293637 |
4690da1c3b97e01a8795122d75752b424704a346 | 1,706 | py | Python | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
]
| null | null | null | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
]
| null | null | null | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
]
| 1 | 2021-12-20T11:44:51.000Z | 2021-12-20T11:44:51.000Z | #!/usr/bin/env python
import netfilterqueue
import scapy.all as scapy
ack_list = []
def set_load(packet, load):
packet[scapy.Raw].load = load
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
return packet
def process_packet(packet):
"""Modify downloads files on the fly while target uses HTTP/HTTPS.
Do not forget to choose the port you will be using in line 22/29.
Do not forget to modify line 24 and 35 and uncomment them afterwards."""
scapy_packet = scapy.IP (packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
if scapy_packet[scapy.TCP].dport == #CHOOSE PORT HERE: 80 / 10000:
# print("HTTP Request")
if ".exe" in scapy_packet[scapy.Raw].load and #Input IP of your web server here: "10.0.2.15" not in scapy_packet[scapy.Raw].load:
print("Captured .exe file in the Request packet.")
ack_list.append(scapy_packet[scapy.TCP].ack)
# print(scapy_packet.show())
elif scapy_packet[scapy.TCP].sport ==#CHOOSE PORT HERE: 80 / 10000:
# print("HTTP Response")
if scapy_packet[scapy.TCP].seq in ack_list:
ack_list.remove(scapy_packet[scapy.TCP].seq)
print("Replacing the file.")
# print(scapy_packet.show())
modified_packet = set_load(scapy_packet, #Input the full path of your executable here: "HTTP/1.1 301 Moved Permanently\nLocation: http://10.0.2.15/Evil%20Files/lazagne.exe\n\n")
packet.set_payload(str(modified_packet))
packet.accept()
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
| 37.086957 | 193 | 0.654162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.40211 |
46917396382ee9f0addf54bb780182338681e694 | 1,757 | py | Python | tmpmodels.py | firaan1/iamgrateful | 445260313ab94dfdaf310a6766e848c8df94b624 | [
"MIT"
]
| null | null | null | tmpmodels.py | firaan1/iamgrateful | 445260313ab94dfdaf310a6766e848c8df94b624 | [
"MIT"
]
| null | null | null | tmpmodels.py | firaan1/iamgrateful | 445260313ab94dfdaf310a6766e848c8df94b624 | [
"MIT"
]
| null | null | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, Text, ForeignKey, DateTime, func, Boolean
from sqlalchemy.orm import relation, sessionmaker, relationship, backref
from datetime import datetime
import os
# Database
DATABASE = 'sqlite:///db.sqlite3'
DEBUG = True
# ORM
Base = declarative_base()
# model
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
passcode = Column(Integer, nullable=False)
question = Column(String)
answer = Column(String)
def __init__(self, passcode):
self.passcode = passcode
def __repr__(self):
return '<User %s>' % {self.id}
class Memory(Base):
__tablename__ = 'memory'
id = Column(Integer, primary_key=True, autoincrement=True)
happiness = Column(Integer)
date = Column(DateTime, default = datetime.now())
things = relationship('Thing', secondary = 'memory_thing_link')
def __repr__(self):
return '<Memory %s>' % {self.date}
class Thing(Base):
__tablename__ = 'thing'
id = Column(Integer, primary_key=True, autoincrement=True)
text = Column(Text)
def __repr__(self):
return '<Item %s>' % {self.text}
class MemoryThingLink(Base):
__tablename__ = 'memory_thing_link'
memory_id = Column(Integer, ForeignKey('memory.id'), primary_key=True)
thing_id = Column(Integer, ForeignKey('thing.id'), primary_key=True)
# if __name__ == '__main__':
# connection
engine = create_engine(DATABASE, echo = DEBUG)
session_factory = sessionmaker(bind = engine)
session = session_factory()
# initialize database
if not os.path.exists('db.sqlite3'):
Base.metadata.create_all(engine)
| 30.824561 | 89 | 0.712009 | 1,088 | 0.619237 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.136027 |
4691f105e7b4e6d56ef7ec3a85a8060f44a867c1 | 579 | py | Python | doc/filters.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
]
| 6 | 2018-05-15T05:08:52.000Z | 2021-12-23T12:31:28.000Z | doc/filters.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
]
| 1 | 2022-01-01T15:08:26.000Z | 2022-01-01T15:08:36.000Z | doc/filters.py | CargobaseDev/openpyxl | 782ba90d26be452379fc06f45227e12bca9bade4 | [
"MIT"
]
| 6 | 2020-03-23T15:59:14.000Z | 2021-09-18T09:54:57.000Z | from openpyxl import Workbook
wb = Workbook()
ws = wb.active
data = [
["Fruit", "Quantity"],
["Kiwi", 3],
["Grape", 15],
["Apple", 3],
["Peach", 3],
["Pomegranate", 3],
["Pear", 3],
["Tangerine", 3],
["Blueberry", 3],
["Mango", 3],
["Watermelon", 3],
["Blackberry", 3],
["Orange", 3],
["Raspberry", 3],
["Banana", 3]
]
for r in data:
ws.append(r)
ws.auto_filter.ref = "A1:B15"
ws.auto_filter.add_filter_column(0, ["Kiwi", "Apple", "Mango"])
ws.auto_filter.add_sort_condition("B2:B15")
wb.save("filtered.xlsx")
| 18.09375 | 63 | 0.544041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.33506 |
4692314ed6b0b1046dcbfa825a3d464141899b16 | 1,150 | py | Python | Bleak/two_devices.py | mbdev2/MIS_FindMyProfessor | fa6a8e9b787013b2eadbc021b0d74210de689872 | [
"MIT"
]
| null | null | null | Bleak/two_devices.py | mbdev2/MIS_FindMyProfessor | fa6a8e9b787013b2eadbc021b0d74210de689872 | [
"MIT"
]
| null | null | null | Bleak/two_devices.py | mbdev2/MIS_FindMyProfessor | fa6a8e9b787013b2eadbc021b0d74210de689872 | [
"MIT"
]
| null | null | null | from bleak import BleakClient
import asyncio
import functools
notify_uuid = "00002a19-0000-1000-8000-00805f9b34fb".format(0x2A19)
def callback(sender, data, mac_address):
#data = bytearray(data)
dataint = int.from_bytes(data, byteorder='little', signed=True)
print(mac_address, dataint)
def run(addresses):
loop = asyncio.get_event_loop()
tasks = asyncio.gather(*(connect_to_device(address) for address in addresses))
loop.run_until_complete(tasks)
async def connect_to_device(address):
print("starting", address, "loop")
async with BleakClient(address, timeout=10.0) as client:
print("connect to", address)
try:
#model_number = await client.read_gatt_char(address)
await client.start_notify(notify_uuid, functools.partial(callback, mac_address=address))
await asyncio.sleep(1000.0)
await client.stop_notify(notify_uuid)
except Exception as e:
print(e)
print("disconnect from", address)
if __name__ == "__main__":
run(
["96E8409A-F2EB-4029-B3DC-615FADE0C838","D31CB0CA-890E-476B-80D9-80ED8A3AA69A"]
)
| 27.380952 | 100 | 0.691304 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.466087 | 252 | 0.21913 |
4692c2ff5367cf7fc52d9c66cbd5187236f80e7d | 655 | py | Python | binary search tree insertion.py | buhuhaha/python | 4ff72ac711f0948ae5bcb0886d68e8df77fe515b | [
"MIT"
]
| null | null | null | binary search tree insertion.py | buhuhaha/python | 4ff72ac711f0948ae5bcb0886d68e8df77fe515b | [
"MIT"
]
| null | null | null | binary search tree insertion.py | buhuhaha/python | 4ff72ac711f0948ae5bcb0886d68e8df77fe515b | [
"MIT"
]
| null | null | null |
class Node:
left = right = None
def __init__(self, data):
self.data = data
def inorder(root):
if root is None:
return
inorder(root.left)
print(root.data, end=' ')
inorder(root.right)
def insert(root, key):
if root is None:
return Node(key)
if key < root.data:
root.left = insert(root.left, key)
else:
root.right = insert(root.right, key)
return root
def constructBST(keys):
root = None
for key in keys:
root = insert(root, key)
return root
if __name__ == '__main__':
keys = [15, 10, 20, 8, 12, 16, 25]
root = constructBST(keys)
inorder(root) | 22.586207 | 44 | 0.58626 | 90 | 0.137405 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.019847 |
469393ea6c4b1c5c7b78ca579da1a18fef848cb3 | 625 | py | Python | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
]
| null | null | null | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
]
| 1 | 2020-03-24T17:29:40.000Z | 2020-03-24T17:29:40.000Z | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
]
| 1 | 2020-03-24T16:41:31.000Z | 2020-03-24T16:41:31.000Z | from ndebug import env_helpers
def test_inspect_ops(mocker):
mocker.patch.dict('os.environ', {'DEBUG_COLORS': 'no',
'DEBUG_DEPTH': '10',
'DEBUG_SHOW_HIDDEN': 'enabled',
'DEBUG_SOMETHING': 'null'})
actual = env_helpers.options()
assert actual == {'colors': False, 'depth': 10, 'show_hidden': True, 'something': None}
def test_load_and_save():
actual = env_helpers.load()
assert actual == ''
env_helpers.save('test:data')
actual = env_helpers.load()
assert actual == 'test:data'
| 31.25 | 91 | 0.5536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.2576 |
4693fb42192c5502c57c49f8441c5cf7ba66b002 | 1,709 | py | Python | relay_lib_seeed_test_2.py | johnwargo/seeed-studio-relay-v2 | ce1b84b0f0001baa7ec4729dc2967531da3bdc22 | [
"MIT"
]
| 1 | 2021-05-07T07:40:27.000Z | 2021-05-07T07:40:27.000Z | relay_lib_seeed_test_2.py | johnwargo/seeed-studio-relay-v2 | ce1b84b0f0001baa7ec4729dc2967531da3bdc22 | [
"MIT"
]
| null | null | null | relay_lib_seeed_test_2.py | johnwargo/seeed-studio-relay-v2 | ce1b84b0f0001baa7ec4729dc2967531da3bdc22 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
'''*****************************************************************************************************************
Seeed Studio Relay Board Library V2
Test Application #2
By John M. Wargo (https://www.johnwargo.com)
********************************************************************************************************************'''
import sys
import time
from seeed_relay_v1 import Relay
def process_loop():
# turn all of the relays on
relay.all_on()
relay.print_status_all()
# wait a second
time.sleep(1)
# turn all of the relays off
relay.all_off()
relay.print_status_all()
# wait a second
time.sleep(1)
# now cycle each relay every second in an infinite loop
while True:
# test the on/off methods
print('Testing on/off methods')
for i in range(1, 5):
relay.on(i)
relay.print_status_all()
time.sleep(1)
relay.off(i)
relay.print_status_all()
time.sleep(1)
# test the toggle method
print('Testing the toggle methods')
for i in range(1, 5):
relay.toggle_port(i)
relay.print_status_all()
time.sleep(1)
relay.toggle_port(i)
relay.print_status_all()
time.sleep(1)
print('Repeating loop')
# Now see what we're supposed to do next
if __name__ == "__main__":
# Create the relay object
relay = Relay()
try:
process_loop()
except KeyboardInterrupt:
print("\nExiting application")
# turn off all of the relays
relay.all_off()
# exit the application
sys.exit(0)
| 26.703125 | 119 | 0.504389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 771 | 0.451141 |
469452474a032213255cf5547c78a4dee27d7d79 | 2,087 | py | Python | quasar/sa_database.py | stevencyrway/quasar | db8ff733ec1becf95defc7097890232cd5944d48 | [
"MIT"
]
| 12 | 2017-05-17T17:22:47.000Z | 2021-05-24T17:24:42.000Z | quasar/sa_database.py | stevencyrway/quasar | db8ff733ec1becf95defc7097890232cd5944d48 | [
"MIT"
]
| 484 | 2015-12-02T19:24:34.000Z | 2022-02-22T16:36:47.000Z | quasar/sa_database.py | stevencyrway/quasar | db8ff733ec1becf95defc7097890232cd5944d48 | [
"MIT"
]
| 8 | 2017-04-27T20:42:05.000Z | 2022-01-11T19:43:57.000Z | import os
from sqlalchemy import bindparam, create_engine, exc
from sqlalchemy.dialects.postgresql.json import JSONB
from sqlalchemy.engine.url import URL
from sqlalchemy.sql import text
from .utils import log, logerr
# Setup SQL Alchemy vars.
pg_opts = {
'drivername': os.getenv('PG_DRIVER'),
'username': os.getenv('PG_USER'),
'password': os.getenv('PG_PASSWORD'),
'host': os.getenv('PG_HOST'),
'port': os.getenv('PG_PORT'),
'database': os.getenv('PG_DATABASE')
}
pg_ssl = os.getenv('PG_SSL')
class Database:
def __init__(self, options={}):
pg_opts.update(options)
self.connect()
def connect(self):
# Setup SQL Alchemy postgres connection.
try:
engine = create_engine(URL(**pg_opts),
connect_args={'sslmode': pg_ssl})
self.engine = engine
self.conn = engine.connect()
except exc.InterfaceError as e:
log("Couldnt't establsh DB connection!")
log("Error is:")
logerr(e)
def disconnect(self):
self.conn.close()
return self.conn
def query(self, query):
return self.conn.execute(query)
def query_str(self, query, record):
# Run query with string substitution using ':thisvar' SQL Alchemy
# standard based formatting. e.g.
# query = 'INSERT :bar into foo;', record = {bar: 'baz'}
run_query = text(query)
return self.conn.execute(run_query, record)
def query_json(self, query, record, col_name):
# Based on the post https://stackoverflow.com/a/46031085, this
# function forces a JSONB binding to insert JSON record types
# into a table using SQL Alchemy.
# This function is tightly coupled with the log_event function
# in the cio_queue.py code. Hacky solution to get
# https://www.pivotaltracker.com/story/show/172585118 resolved.
run_query = text(query)
return self.conn.execute(
run_query.bindparams(bindparam(col_name, type_=JSONB)), record)
| 32.107692 | 75 | 0.632966 | 1,562 | 0.748443 | 0 | 0 | 0 | 0 | 0 | 0 | 730 | 0.349784 |
4694573c6edf0ff0ed4f4786ad3fb6ae431575db | 29,122 | py | Python | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
]
| 1 | 2020-06-13T13:57:11.000Z | 2020-06-13T13:57:11.000Z | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
]
| null | null | null | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
]
| null | null | null | import abc
class CommandBlock:
def __init__(self, command, conditional=True, mode='CHAIN', auto=True,
opposite=False, single_use=True):
self.command = command
self.cond = conditional
self.mode = mode
self.auto = auto
self.opposite = opposite
self.single_use = single_use
def resolve(self, scope):
return self.command.resolve(scope)
class Resolvable(metaclass=abc.ABCMeta):
@abc.abstractmethod
def resolve(self, scope):
pass
class SimpleResolve(Resolvable):
def __init__(self, *args):
self.args = args
def resolve(self, scope):
return ' '.join(map(lambda el: el.resolve(scope) \
if isinstance(el, Resolvable) \
else el, self.args))
class Command(Resolvable):
pass
class EntityRef(Resolvable):
def is_single_entity(self, scope):
raise NotImplementedError()
@property
def ref(self):
return EntityReference(self)
class ObjectiveRef(Resolvable):
def __init__(self, name):
assert type(name) == str
self.objective = name
def resolve(self, scope):
return scope.objective(self.objective)
class NameRef(EntityRef):
def __init__(self, name):
assert type(name) == str
self.name = name
@property
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return self.name
class ScoreRef:
def __init__(self, target, objective):
assert isinstance(target, EntityRef)
assert isinstance(objective, ObjectiveRef)
self.target = target
self.objective = objective
def resolve_pair(self, scope):
return '%s %s' % (self.target.resolve(scope),
self.objective.resolve(scope))
class Var(ScoreRef):
def __init__(self, nameref):
super().__init__(GlobalEntity, ObjectiveRef(nameref))
def make_selector(selector, **kwargs):
output = '@' + selector
if not kwargs:
return output
def str_pairs(items):
output = []
for key, value in items:
if type(value) == dict:
value = '{%s}' % str_pairs(value.items())
output.append('%s=%s' % (key, value))
return ','.join(output)
return '%s[%s]' % (output, str_pairs(kwargs.items()))
class Selector(EntityRef):
def __init__(self, type, args=None):
assert type in 'aespr'
self.type = type
assert args is None or isinstance(args, SelectorArgs)
self.args = args
def resolve_params(self, scope):
if not self.args:
return {}
return self.args.resolve(scope)
def is_single_entity(self, scope):
if self.type in 'spr':
return True
params = self.resolve_params(scope)
return 'limit' in params and params['limit'] == '1'
def resolve(self, scope):
return make_selector(self.type, **self.resolve_params(scope))
class _GlobalEntity(EntityRef):
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return scope.global_entity()
GlobalEntity = _GlobalEntity()
class _PosUtil(EntityRef):
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return scope.pos_util_entity()
PosUtil = _PosUtil()
class NbtPath(Resolvable):
def __init__(self, path):
self.path = path
def subpath(self, childpath):
# TODO path validation
return self.__class__(self.path + childpath)
def resolve(self, scope):
return self.path
def __eq__(self, other):
if type(other) != type(self):
return False
return self.path == other.path
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.path)
class Path(NbtPath):
def resolve(self, scope):
return scope.custom_nbt_path(self.path)
class ArrayPath(Path):
def __init__(self, index=None, key=None):
sub = '[%d]' % index if index is not None else ''
assert key is None or index is not None
sub += '.%s' % key if key else ''
super().__init__('%s%s' % (self.name, sub))
def subpath(self, childpath):
# Don't use our constructor
return Path(self.path).subpath(childpath)
class StackPath(ArrayPath):
name = 'stack'
def StackFrame(index):
class StackFramePath(ArrayPath):
name = 'stack[%d].stack' % (-index - 1)
return StackFramePath
StackFrameHead = StackFrame(0)
class GlobalPath(ArrayPath):
name = 'globals'
class Cmd(Command):
def __init__(self, cmd):
self.command = cmd
def resolve(self, scope):
return self.command
class Execute(Command):
def __init__(self, chain):
self.chain = SimpleResolve(*chain._components)
def resolve(self, scope):
return 'execute %s' % self.chain.resolve(scope)
def ensure_selector(sel_arg):
assert isinstance(sel_arg, EntityRef), sel_arg
return sel_arg
class ExecuteChain:
def __init__(self):
self._components = []
self.can_terminate = False
def add(self, *args):
for arg in args:
if type(arg) in [str, int, float]:
self._components.append(str(arg))
elif isinstance(arg, Resolvable):
self._components.append(arg)
else:
assert False, type(arg)
return self
def run(self, cmd):
self.add('run', cmd)
return Execute(self)
def finish(self):
assert self.can_terminate
return Execute(self)
def as_entity(self, select_arg):
self.can_terminate = False
return self.add('as', ensure_selector(select_arg))
def at(self, select_arg):
self.can_terminate = False
return self.add('at', ensure_selector(select_arg))
def at_pos(self, pos):
self.can_terminate = False
return self.add('positioned', pos)
def at_entity_pos(self, select_arg):
self.can_terminate = False
return self.add('positioned', 'as', ensure_selector(select_arg))
def align(self, axes):
self.can_terminate = False
assert ''.join(axis for axis in axes if axis in 'xyz') == axes
return self.add('align', axes)
def facing(self, pos):
self.can_terminate = False
return self.add('facing', pos)
def facing_entity(self, select_arg, feature):
self.can_terminate = False
assert feature == 'eyes' or feature == 'feet'
return self.add('facing', 'entity', ensure_selector(select_arg), \
feature)
def rotated(self, y, x):
self.can_terminate = False
return self.add('rotated', y, x)
def rotated_as_entity(self, select_arg):
self.can_terminate = False
return self.add('rotated', 'as', ensure_selector(select_arg))
def anchored(self, anchor):
self.can_terminate = False
assert anchor == 'feet' or anchor == 'eyes'
return self.add('anchored', anchor)
def cond(self, cond_type):
self.can_terminate = False
assert cond_type == 'if' or cond_type == 'unless'
return ExecuteChain.Cond(self, cond_type)
class Cond:
def add(self, *args):
self.parent.can_terminate = True
return self.parent.add(*((self.cond_type,) + args))
def __init__(self, parent, cond_type):
self.parent = parent
self.cond_type = cond_type
def entity(self, entityref):
return self.add('entity', ensure_selector(entityref))
def score(self, targetref, operator, sourceref):
assert isinstance(targetref, ScoreRef)
assert isinstance(sourceref, ScoreRef)
assert operator in ['<', '<=', '=', '>=', '>']
return self.add('score', targetref.target, targetref.objective,
operator, sourceref.target, sourceref.objective)
def score_range(self, scoreref, range):
assert isinstance(scoreref, ScoreRef)
assert isinstance(range, ScoreRange)
return self.add('score', scoreref.target, scoreref.objective,
'matches', range)
def block(self, pos, block):
assert isinstance(pos, WorldPos) and pos.block_pos
return self.add('block', pos, block)
def blocks_match(self, begin, end, dest, type):
assert type in ['all', 'masked']
return self.add('blocks', begin, end, dest, type)
def store(self, store_type):
assert store_type in ['result', 'success']
self.can_terminate = False
return ExecuteChain.Store(self, store_type)
class Store:
def add(self, *args):
return self.parent.add(*(('store', self.store_type) + args))
def __init__(self, parent, store_type):
self.parent = parent
self.store_type = store_type
def score(self, scoreref):
assert isinstance(scoreref, ScoreRef)
return self.add('score', scoreref.target, scoreref.objective)
def entity(self, target, path, data_type, scale=1):
return self.add('entity', ensure_selector(target), \
path, data_type, scale)
def bossbar(self, bar, attr):
assert attr in ['value', 'max']
return self.add('bossbar', bar, attr)
class BlockOrEntityRef(Resolvable):
pass
class EntityReference(BlockOrEntityRef):
def __init__(self, target):
assert isinstance(target, EntityRef)
self.target = target
def resolve(self, scope):
assert self.target.is_single_entity(scope)
return 'entity %s' % self.target.resolve(scope)
class WorldPos(Resolvable):
def __init__(self, x, y, z, block_pos=False):
is_anchor = self._check_coord(x, True, not block_pos)
was_anchor = self._check_coord(y, is_anchor, not block_pos)
is_anchor = self._check_coord(z, was_anchor, not block_pos)
if was_anchor:
assert is_anchor
self.x, self.y, self.z = x, y, z
self.block_pos = block_pos
def _check_coord(self, val, allow_anchor, allow_float):
if isinstance(val, AnchorRelCoord):
assert allow_anchor
return True
if type(val) == float:
assert allow_float
return False
if type(val) == int:
return False
if isinstance(val, WorldRelCoord):
return False
assert False, val
@property
def ref(self):
return BlockReference(self)
def resolve(self, scope):
return '%s %s %s' % (self.x, self.y, self.z)
class RelativeCoord:
def __init__(self, val):
self.str = self.marker
if type(val) == int:
if val != 0:
self.str += '%d' % val
elif type(val) == float:
if val != 0.0:
# https://stackoverflow.com/a/2440786
self.str += ('%f' % val).rstrip('0').rstrip('.')
else:
assert False, val
self.val = val
def __str__(self):
return self.str
class WorldRelCoord(RelativeCoord):
marker = '~'
class AnchorRelCoord(RelativeCoord):
marker = '^'
class BlockReference(BlockOrEntityRef):
def __init__(self, pos):
assert isinstance(pos, WorldPos) and pos.block_pos
self.pos = pos
def resolve(self, scope):
return 'block %s' % self.pos.resolve(scope)
class _UtilBlockPos(WorldPos):
def __init__(self, is_zero_tick):
self.block_pos = True
self.is_zero_tick = is_zero_tick
def resolve(self, scope):
if self.is_zero_tick:
return scope.get_zero_tick_block()
return scope.get_util_block()
UtilBlockPos = _UtilBlockPos(False)
ZeroTickBlockPos = _UtilBlockPos(True)
class DataGet(Command):
def __init__(self, target, path, scale=1):
assert isinstance(target, BlockOrEntityRef)
assert isinstance(scale, (int, float))
self.target = target
self.path = path
self.scale = int(scale) if scale == int(scale) else scale
def resolve(self, scope):
return 'data get %s %s %s' % (self.target.resolve(scope),
self.path.resolve(scope), self.scale)
class DataMerge(Command):
def __init__(self, ref, nbt):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.nbt = nbt
def resolve(self, scope):
return 'data merge %s %s' % (self.ref.resolve(scope),
self.nbt.resolve(scope))
class DataModify(Command):
def __init__(self, ref, path, action, *rest):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.path = path
self.action = action
self.init(*rest)
def resolve(self, scope):
return 'data modify %s %s %s' % (
self.ref.resolve(scope), self.path.resolve(scope), self.action)
class DataModifyValue(DataModify):
def init(self, val):
self.val = val
def resolve(self, scope):
return '%s value %s' % (super().resolve(scope), self.val.resolve(scope))
class DataModifyFrom(DataModify):
def init(self, ref, path):
assert isinstance(ref, BlockOrEntityRef)
self.fromref = ref
self.frompath = path
def resolve(self, scope):
return '%s from %s %s' % (super().resolve(scope),
self.fromref.resolve(scope), self.frompath.resolve(scope))
class DataModifyStack(DataModifyValue):
def __init__(self, index, key, action, value, path=StackPath):
super().__init__(GlobalEntity.ref, path(index, key), action,
value)
class DataRemove(Command):
def __init__(self, ref, path):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.path = path
def resolve(self, scope):
return 'data remove %s %s' % (self.ref.resolve(scope),
self.path.resolve(scope))
class Function(Command):
def __init__(self, func_name):
self.name = func_name
def resolve(self, scope):
return 'function %s' % scope.function_name(self.name)
class Tellraw(Command):
def __init__(self, text, target):
assert isinstance(text, TextComponentHolder)
assert isinstance(target, EntityRef)
self.text = text
self.target = target
def resolve(self, scope):
return 'tellraw %s %s' % (self.target.resolve(scope),
self.text.resolve_str(scope))
class TextComponent(Resolvable):
pass
class TextComponentHolder(TextComponent):
def __init__(self, style, children):
self.style = style
self.children = children
def resolve_str(self, scope):
import json
return json.dumps(self.resolve(scope), separators=(',', ':'))
def resolve(self, scope):
text = {}
for key, value in self.style.items():
text[key] = self._resolve_style(key, value, scope)
extra = []
for child in self.children:
if isinstance(child, TextComponentHolder) and not child.style:
for child_child in child.children:
extra.append(child_child.resolve(scope))
else:
extra.append(child.resolve(scope))
if not self.style:
return extra
if extra:
if len(extra) == 1 and type(extra[0]) == dict:
text.update(extra[0])
else:
text['extra'] = extra
return text
def _resolve_style(self, key, value, scope):
if key == 'clickEvent':
assert isinstance(value, TextClickAction)
return value.resolve(scope)
return value
class TextStringComponent(TextComponent):
def __init__(self, stringval):
self.val = stringval
def resolve(self, scope):
return {'text': self.val}
class TextNBTComponent(TextComponent):
def __init__(self, entity, path):
assert isinstance(entity, EntityRef)
assert isinstance(path, Path)
self.entity = entity
self.path = path
def resolve(self, scope):
assert self.entity.is_single_entity(scope)
return {'nbt': self.path.resolve(scope),
'entity': self.entity.resolve(scope)}
class TextScoreComponent(TextComponent):
def __init__(self, ref):
assert isinstance(ref, ScoreRef)
self.ref = ref
def resolve(self, scope):
return {'score':
{'name': self.ref.target.resolve(scope),
'objective': self.ref.objective.resolve(scope)}}
class TextClickAction(Resolvable):
def __init__(self, action, value):
self.action = action
self.value = value
def resolve(self, scope):
if type(self.value) == str:
value = self.value
else:
assert self.action in ['run_command', 'suggest_command'] \
and isinstance(self.value, Command)
value = self.value.resolve(scope)
return {'action': self.action, 'value': value}
class Teleport(Command):
def __init__(self, target, *more):
assert isinstance(target, EntityRef)
self.args = [target]
self.args.extend(more)
def resolve(self, scope):
return 'tp %s' % ' '.join(a.resolve(scope) for a in self.args)
class Clone(Command):
def __init__(self, src0, src1, dest):
self.src0 = src0
self.src1 = src1
self.dest = dest
def resolve(self, scope):
return 'clone %s %s %s' % (self.src0.resolve(scope),
self.src1.resolve(scope),
self.dest.resolve(scope))
class Setblock(Command):
def __init__(self, pos, block):
assert isinstance(pos, WorldPos) and pos.block_pos
self.pos = pos
self.block = block
def resolve(self, scope):
return 'setblock %s %s' % (self.pos.resolve(scope),
self.block.resolve(scope))
class Scoreboard(Command):
allows_negative = False
def __init__(self, varref, value):
assert isinstance(varref, ScoreRef)
assert isinstance(value, int)
assert self.allows_negative or value >= 0
self.var = varref
self.value = value
def resolve(self, scope):
return 'scoreboard players %s %s %d' % (
self.op, self.var.resolve_pair(scope), self.value)
class SetConst(Scoreboard):
op = 'set'
allows_negative = True
class AddConst(Scoreboard):
op = 'add'
class RemConst(Scoreboard):
op = 'remove'
class GetValue(Command):
def __init__(self, scoreref):
assert isinstance(scoreref, ScoreRef)
self.ref = scoreref
def resolve(self, scope):
return 'scoreboard players get %s' % self.ref.resolve_pair(scope)
class Operation(Command):
def __init__(self, left, right):
assert isinstance(left, ScoreRef)
assert isinstance(right, ScoreRef)
self.left = left
self.right = right
def resolve(self, scope):
return 'scoreboard players operation %s %s %s' % (
self.left.resolve_pair(scope), self.op,
self.right.resolve_pair(scope))
class OpAssign(Operation): op = '='
class OpAdd(Operation): op = '+='
class OpSub(Operation): op = '-='
class OpMul(Operation): op = '*='
class OpDiv(Operation): op = '/='
class OpMod(Operation): op = '%='
class OpIfLt(Operation): op = '<'
class OpIfGt(Operation): op = '>'
class OpSwap(Operation): op = '><'
class SelectorArgs(Resolvable):
pass
class SimpleSelectorArgs(SelectorArgs):
def __init__(self, args):
self.args = args
def resolve(self, scope):
return dict(self.args)
class ScoreRange(Resolvable):
def __init__(self, min=None, max=None):
assert min is not None or max is not None
self.min = min
self.max = max
def resolve(self, scope):
range = ''
if self.min is not None:
range = '%d' % self.min
if self.max is not None and self.max != self.min:
range += '..%d' % self.max
elif self.max is None:
range += '..'
return range
class SelRange(SelectorArgs):
def __init__(self, objective, min=None, max=None):
assert isinstance(objective, ObjectiveRef)
self.objective = objective
self.range = ScoreRange(min, max)
def resolve(self, scope):
return {'scores': { self.objective.resolve(scope):
self.range.resolve(scope) }}
class SelEquals(SelRange):
def __init__(self, objective, value):
super().__init__(objective, value, value)
class ComboSelectorArgs(SelectorArgs):
@staticmethod
def new(first, second):
if first is None: return second
if second is None: return first
return ComboSelectorArgs(first, second)
def __init__(self, first, second):
self.first = first
self.second = second
def resolve(self, scope):
sel = {}
sel.update(self.first.resolve(scope))
sel.update(self.second.resolve(scope))
return sel
class SelNbt(SelectorArgs):
def __init__(self, path, value):
self.nbt_spec = {}
if not path:
self.nbt_spec = value
else:
self.build_selector(path, self.nbt_spec, value)
def build_selector(self, path, parent, value):
for i in range(len(path) - 1):
node = path[i]
if node.isdigit():
pos = int(node)
while len(parent) < pos + 1:
parent.append({})
parent = parent[pos]
continue
if node not in parent:
parent[node] = {}
if len(path) > i + 1:
if path[i+1].isdigit():
if not parent[node]:
parent[node] = []
else:
assert type(parent[node]) == list
parent = parent[node]
if path[-1].isdigit():
pos = int(path[-1])
while len(parent) < pos + 1:
parent.append({})
path[-1] = pos
parent[path[-1]] = value
def stringify_nbt(self, node, scope):
# TODO quoted keys
if type(node) == dict:
return '{%s}' % ','.join('%s:%s' % (k, self.stringify_nbt(v, scope))
for k,v in node.items())
if type(node) == list:
return '[%s]' % ','.join(map(lambda n:self.stringify_nbt(n, scope), node))
if isinstance(node, Resolvable):
return node.resolve(scope)
assert False, type(node)
def resolve(self, scope):
return {'nbt': self.stringify_nbt(self.nbt_spec, scope)}
class TeamName(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.team_name(self.name)
class TeamModify(Command):
def __init__(self, team, attr, value):
assert isinstance(team, TeamName)
self.team = team
assert attr in ['color', 'friendlyFire', 'seeFriendlyInvisibles',
'nametagVisibility', 'deathMessageVisibility',
'collisionRule', 'displayName', 'prefix', 'suffix']
self.attr = attr
self.value = value
def resolve(self, scope):
return 'team modify %s %s %s' % (self.team.resolve(scope), self.attr,
self.value)
class JoinTeam(Command):
def __init__(self, team, members):
assert isinstance(team, TeamName)
assert members is None or isinstance(members, EntityRef)
self.team = team
self.members = members
def resolve(self, scope):
members = (' ' + self.members.resolve(scope)) if self.members else ''
return 'team join %s%s' % (self.team.resolve(scope), members)
class Bossbar(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.bossbar(self.name)
class BossbarSet(Command):
def __init__(self, bar, prop, value):
assert isinstance(bar, Bossbar)
self.bar = bar
self.prop = prop
self.value = value
def resolve(self, scope):
value = (' ' + self.value.resolve(scope)) if self.value else ''
return 'bossbar set %s %s%s' % (self.bar.resolve(scope), self.prop,
value)
class Kill(Command):
def __init__(self, target):
assert isinstance(target, EntityRef)
self.target = target
def resolve(self, scope):
return 'kill %s' % self.target.resolve(scope)
class ReplaceItem(Command):
def __init__(self, ref, slot, item, amount=None):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.slot = slot
self.item = item
self.amount = amount
def resolve(self, scope):
amount = (' %d' % self.amount) if self.amount is not None else ''
return 'replaceitem %s %s %s%s' % (self.ref.resolve(scope), self.slot,
self.item.resolve(scope), amount)
class GiveItem(Command):
def __init__(self, targets, item, count=1):
assert isinstance(targets, EntityRef)
self.targets = targets
self.item = item
self.count = count
def resolve(self, scope):
return 'give %s %s %d' % (self.targets.resolve(scope),
self.item.resolve(scope), self.count)
class ClearItem(Command):
def __init__(self, targets, item, max_count=-1):
assert isinstance(targets, EntityRef)
self.targets = targets
self.item = item
self.max_count = max_count
def resolve(self, scope):
return 'clear %s %s %d' % (self.targets.resolve(scope),
self.item.resolve(scope), self.max_count)
class EffectGive(Command):
def __init__(self, target, effect, seconds=None, amp=None, hide=None):
assert isinstance(target, EntityRef)
self.target = target
self.effect = effect
self.seconds = seconds if seconds is not None else 30
self.amp = amp if amp is not None else 0
self.hide = hide if hide is not None else False
def resolve(self, scope):
return 'effect give %s %s %d %d %s' % (self.target.resolve(scope),
self.effect, self.seconds, self.amp,
'true' if self.hide else 'false')
class Particle(Command):
def __init__(self, name, pos, delta, speed, count, mode, players):
self.name = name
self.pos = pos
self.delta = delta
self.speed = speed
self.count = count
self.mode = mode
self.players = players
def resolve(self, scope):
players = (' ' + self.players.resolve(scope)) if self.players else ''
return 'particle %s %s %s %f %d %s%s' % (self.name,
self.pos.resolve(scope), self.delta.resolve(scope),
self.speed, self.count, self.mode, players)
class Title(Command):
def __init__(self, target, action, *args):
assert isinstance(target, EntityRef)
self.target = target
self.action = action
self.args = args
def resolve(self, scope):
args = (' ' + SimpleResolve(*self.args).resolve(scope)) \
if self.args else ''
return 'title %s %s%s' % (self.target.resolve(scope), self.action, args)
class Summon(Command):
def __init__(self, entity_name, pos, data=None):
assert pos is None or isinstance(pos, WorldPos)
self.name = entity_name
self.pos = pos
self.data = data
def resolve(self, scope):
pos = (' ' + self.pos.resolve(scope)) if self.pos else \
(' ~ ~ ~' if self.data else '')
data = (' ' + self.data.resolve(scope)) if self.data else ''
return 'summon %s%s%s' % (self.name, pos, data)
class Advancement(Command):
def __init__(self, action, target, range, *args):
assert action in ['grant', 'revoke']
assert isinstance(target, EntityRef)
self.action = action
self.target = target
self.range = range
self.args = args
def resolve(self, scope):
args = (' ' + SimpleResolve(*self.args).resolve(scope)) \
if self.args else ''
return 'advancement %s %s %s%s' % (self.action,
self.target.resolve(scope),
self.range, args)
class AdvancementRef(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.advancement_name(self.name)
| 29.327291 | 86 | 0.588215 | 28,211 | 0.968718 | 0 | 0 | 428 | 0.014697 | 0 | 0 | 1,518 | 0.052126 |
46950a30a497c84732798b48f44483d04a01233a | 217 | py | Python | top/clearlight/reptile/bilibili/bj_tech_mooc/example_04_360.py | ClearlightY/Python_learn | 93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232 | [
"Apache-2.0"
]
| 1 | 2020-01-16T09:23:43.000Z | 2020-01-16T09:23:43.000Z | top/clearlight/reptile/bilibili/bj_tech_mooc/example_04_360.py | ClearlightY/Python_learn | 93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232 | [
"Apache-2.0"
]
| null | null | null | top/clearlight/reptile/bilibili/bj_tech_mooc/example_04_360.py | ClearlightY/Python_learn | 93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232 | [
"Apache-2.0"
]
| null | null | null | import requests
keyword = "python"
try:
kv = {'q':keyword}
r = requests.get('http://www.so.com/s', params=kv)
print(r.request.url)
r.raise_for_status()
print(len(r.text))
except:
print('爬取失败') | 19.727273 | 54 | 0.617512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.204444 |
4695279b1ca8306d24c6c58add7de32e6798011f | 4,489 | py | Python | rodnet/models/backbones/cdc_deep.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
]
| null | null | null | rodnet/models/backbones/cdc_deep.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
]
| null | null | null | rodnet/models/backbones/cdc_deep.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
]
| null | null | null | import torch.nn as nn
class RODEncode(nn.Module):
def __init__(self, in_channels=2):
super(RODEncode, self).__init__()
self.conv1a = nn.Conv3d(
in_channels=in_channels,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1a_1 = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1a_2 = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(2, 2, 2),
padding=(4, 2, 2),
)
self.conv2a = nn.Conv3d(
in_channels=64,
out_channels=128,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv2b = nn.Conv3d(
in_channels=128,
out_channels=128,
kernel_size=(9, 5, 5),
stride=(2, 2, 2),
padding=(4, 2, 2),
)
self.conv3a = nn.Conv3d(
in_channels=128,
out_channels=256,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv3b = nn.Conv3d(
in_channels=256,
out_channels=256,
kernel_size=(9, 5, 5),
stride=(1, 2, 2),
padding=(4, 2, 2),
)
self.bn1a = nn.BatchNorm3d(num_features=64)
self.bn1a_1 = nn.BatchNorm3d(num_features=64)
self.bn1a_2 = nn.BatchNorm3d(num_features=64)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn2a = nn.BatchNorm3d(num_features=128)
self.bn2b = nn.BatchNorm3d(num_features=128)
self.bn3a = nn.BatchNorm3d(num_features=256)
self.bn3b = nn.BatchNorm3d(num_features=256)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(
self.bn1a(self.conv1a(x))
) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)
# additional
x = self.relu(
self.bn1a_1(self.conv1a_1(x))
) # (B, 64, W, 128, 128) -> (B, 64, W, 128, 128)
x = self.relu(
self.bn1a_2(self.conv1a_2(x))
) # (B, 64, W, 128, 128) -> (B, 64, W, 128, 128)
x = self.relu(
self.bn1b(self.conv1b(x))
) # (B, 64, W, 128, 128) -> (B, 64, W/2, 64, 64)
x = self.relu(
self.bn2a(self.conv2a(x))
) # (B, 64, W/2, 64, 64) -> (B, 128, W/2, 64, 64)
x = self.relu(
self.bn2b(self.conv2b(x))
) # (B, 128, W/2, 64, 64) -> (B, 128, W/4, 32, 32)
x = self.relu(
self.bn3a(self.conv3a(x))
) # (B, 128, W/4, 32, 32) -> (B, 256, W/4, 32, 32)
x = self.relu(
self.bn3b(self.conv3b(x))
) # (B, 256, W/4, 32, 32) -> (B, 256, W/4, 16, 16)
return x
class RODDecode(nn.Module):
def __init__(self, n_class):
super(RODDecode, self).__init__()
self.convt1 = nn.ConvTranspose3d(
in_channels=256,
out_channels=128,
kernel_size=(4, 6, 6),
stride=(2, 2, 2),
padding=(1, 2, 2),
)
self.convt2 = nn.ConvTranspose3d(
in_channels=128,
out_channels=64,
kernel_size=(4, 6, 6),
stride=(2, 2, 2),
padding=(1, 2, 2),
)
self.convt3 = nn.ConvTranspose3d(
in_channels=64,
out_channels=n_class,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
# self.upsample = nn.Upsample(size=(rodnet_configs['win_size'], radar_configs['ramap_rsize'],
# radar_configs['ramap_asize']), mode='nearest')
def forward(self, x):
x = self.prelu(self.convt1(x)) # (B, 256, W/4, 16, 16) -> (B, 128, W/2, 32, 32)
x = self.prelu(self.convt2(x)) # (B, 128, W/2, 32, 32) -> (B, 64, W, 64, 64)
x = self.convt3(x) # (B, 64, W, 64, 64) -> (B, 3, W, 128, 128)
return x
| 32.294964 | 101 | 0.461573 | 4,461 | 0.993763 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.155268 |
46955a61d6eda18fd04e0a7384414c8a588922bf | 85 | py | Python | File/admin.py | alstn2468/Likelion_DRF_Project | 35a359a05185f551ed2e999ab17e0108a69d6b57 | [
"MIT"
]
| 28 | 2019-10-15T13:15:26.000Z | 2021-11-08T08:23:45.000Z | 15_LikeLionDRFProject/File/admin.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
]
| 1 | 2021-05-22T18:27:01.000Z | 2021-05-22T18:27:01.000Z | 15_LikeLionDRFProject/File/admin.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
]
| 17 | 2019-09-09T00:15:36.000Z | 2021-01-28T13:08:51.000Z | from django.contrib import admin
from .models import File
admin.site.register(File)
| 17 | 32 | 0.811765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
469862e42b088f23b41b49c8734db4c50395bddc | 28,022 | py | Python | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
]
| 21 | 2019-10-24T04:59:52.000Z | 2021-05-11T12:47:17.000Z | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
]
| null | null | null | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
]
| 9 | 2019-10-26T16:56:08.000Z | 2021-03-15T14:10:21.000Z | import shutil
import socket
import subprocess
import threading
import json
import pickle
import tempfile
import time
import box
import threading
import os
import base64
import getpass
import urllib
import requests
import zipfile
import sys
import pprint
import platform
DEBUG = True
BPH_TEMPLATE_SERVER_IP = sys.argv[1]
BPH_TEMPLATE_SERVER_PORT = int(sys.argv[2])
BPH_CONTROLLER_WEB_PORT = int(sys.argv[3])
running_os = platform.release()
if running_os == "7":
APP_DATA = "C:\\Users\\{current_user}\\AppData\\Roaming\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\Users\\{current_user}\\AppData\\Local\\Temp\\".format(
current_user=getpass.getuser())
elif running_os == "XP":
# To avoid tool issues when dealing with white-spaced paths.
APP_DATA = "C:\\DOCUME~1\\{current_user}\\APPLIC~1\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\DOCUME~1\\{current_user}\\LOCALS~1\\Temp\\".format(
current_user=getpass.getuser())
else:
print "Unsupported platform! Exiting..."
sys.exit()
class FilterSpecialVars():
def __init__(self, unfiltered_data, template=None, custom_user_vars=None):
# unfiltered_data should be a list
self.unfiltered_data = unfiltered_data
self.filtered_data = []
self.special_vars = {
'@appdata@': APP_DATA, # os.path.expandvars('%appdata%'),
'@temp@': TMP_FOLDER,
'@toolname@': template['tool_name'], # "peid"
'@filename@': template.tool.filename, # "peid.exe"
'@rid@': template['rid'],
'@md5@': template['md5'],
'@sample@': "\"" + ExecutionManager.sample_abs_path + "\"",
'@sample_filename@': "\"" + os.path.basename(ExecutionManager.sample_abs_path) + "\"",
'@tool_drive@': template['tool_drive'],
'@tool_path@': os.path.join(template['tool_drive'], template['remote_tool_path'].replace('/','\\')),
'@tool_abs_path@': os.path.join(template['tool_drive'], template['remote_tool_path'],
template.tool.filename),
'@report_folder@': os.path.join(APP_DATA, template['rid'], template['tool_name'])
}
if custom_user_vars != None:
self.custom_user_vars_filter(custom_user_vars)
def custom_user_vars_filter(self, custom_user_vars):
if DEBUG: print "Custom User Vars Filtering: {}".format(custom_user_vars)
for k, v in custom_user_vars.items():
key = "@{}@".format(k)
self.special_vars.update({key: v})
if DEBUG: print self.special_vars
def filter_now(self):
def do_filter(unfiltered_string):
for k, v in self.special_vars.items():
if k in str(unfiltered_string):
unfiltered_string = unfiltered_string.replace(k, v)
if DEBUG: print ">> Found: {}".format(unfiltered_string)
return unfiltered_string
for unfiltered_string in self.unfiltered_data:
if len(unfiltered_string) != 0:
if DEBUG: print "### Searching Variable ###: {}".format(unfiltered_string)
self.filtered_data.append(do_filter(unfiltered_string))
if DEBUG: print self.special_vars
if DEBUG:
print"FILTERED: {}".format(self.filtered_data)
# return " ".join(self.filtered_data)
class File(object):
def __init__(self):
pass
def generate_random_file_name(self):
import string
import random
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(0, 10))
def zip_file(self, file_abs_path, seconds=5):
if not file_abs_path.endswith('.log') and not file_abs_path.endswith('.zip'):
if DEBUG: print "Creating compressed (zip) archive: {}".format(file_abs_path)
#time.sleep(5)
try:
zip_filename = "{}.zip".format(os.path.basename(file_abs_path))
if DEBUG: print zip_filename
original_filename = os.path.basename(file_abs_path)
if DEBUG: print original_filename
path_location = os.path.dirname(file_abs_path)
if DEBUG: print path_location
zip_file_abs_path = "{}\\{}".format(path_location, zip_filename)
if DEBUG: print zip_file_abs_path
zf = zipfile.ZipFile(zip_file_abs_path, 'w', zipfile.ZIP_DEFLATED)
# When a file is bein created as compressed file (zip), in some cases
# the set delay time is not enough and file-access errors appears.
# To avoid such situation, several attempts are made until the access
# to the source file is ready.
try:
zf.write(file_abs_path, os.path.basename(file_abs_path))
except IOError:
if DEBUG: print "Target file is still in use... attempting in ({}) seconds".format(seconds)
time.sleep(seconds)
self.zip_file(file_abs_path)
else:
if DEBUG: print "Zip file creation - Done."
except OSError as e:
if DEBUG: print "Error when setting up info for target zip file: {}".format(e)
raise
else:
zipfile.ZIP_DEFLATED
if os.path.isfile(zip_file_abs_path):
if DEBUG: print "Zip file ok: {}".format(zip_file_abs_path)
# os.remove(file_abs_path)
return zip_filename
else:
if DEBUG: print "Zip file can't be created"
return None
class AutoItScript(File):
def __init__(self, automation_data):
self.autoit_script = None
self.__base64totmp(automation_data)
def __base64totmp(self, automation_data):
if DEBUG: print "Converting from base64 file data to Auto-it Script"
tmp_au_script_abs_path = os.path.join(
APP_DATA, self.generate_random_file_name())
with open(tmp_au_script_abs_path, 'w+') as tmp_au_script:
for _ in automation_data:
if DEBUG: print "Writing: {}\n".format(_)
tmp_au_script.write(_)
self.autoit_script = tmp_au_script_abs_path
class DownloadedFile(File):
def __init__(self, download_url):
self.download_dir = APP_DATA
self.fake_file_name = self.generate_random_file_name()
self.original_file_name = os.path.basename(download_url)
self.extension = os.path.splitext(download_url)[1].replace('.', '')
#self.abs_path = os.path.join(self.download_dir, "{}.{}".format(
# self.fake_file_name, self.extension))
self.abs_path = os.path.join(self.download_dir, self.original_file_name)
if DEBUG:
print self.abs_path
class ExecutionManager(object):
report_path = ""
sample_abs_path = ""
#### Agent Command Control ######
def execute_tool(self, **cmd_data):
if DEBUG:
print cmd_data
tool_drive = cmd_data['tool_drive']
tool_path = cmd_data['tool_path'].replace('/', '\\')
tool_name = cmd_data['tool_name']
tool_abs_path = "\"{tool_drive}{tool_path}\\{tool_name}\"".format(
tool_drive=tool_drive,
tool_path=tool_path,
tool_name=tool_name,
)
if DEBUG:
print tool_abs_path
tool_args = cmd_data['tool_args']
if DEBUG:
print tool_args
cmd = "{} {}".format(tool_abs_path, tool_args)
if DEBUG:
print cmd
print "\nExecuting Cmd: {}\n".format(cmd)
subprocess.call(cmd, shell=True)
def exec_manager(self, **cmd_data):
if DEBUG:
if DEBUG: print "\nExecuting Thread with data: {}\n".format(cmd_data)
thread_name = cmd_data['tool_name']
thread = threading.Thread(target=self.execute_tool, name=thread_name, kwargs=cmd_data)
thread.start()
def write_tmp_file(self, datatowrite, sample_abs_path):
try:
if DEBUG: print "Writing Tmp file: {}".format(sample_abs_path)
with open(sample_abs_path, 'wb+') as f:
f.write(datatowrite)
except:
if DEBUG: print "Error while creating the tmp file."
else:
if DEBUG: print "Done."
if os.path.isfile(sample_abs_path):
if DEBUG: print "Temp file created correctly."
# Destination folder is created this way because because
# some tools shows weird behaviors when passing arguments
# For instance, CFF Explorer does not work correctly when
# the file agument resides on a directory with whitespaces.
# The workaround is to use DOS version of the path.
#fixed_sample_abs_path = sample_abs_path.split('\\')
#fixed_sample_abs_path[1] = "docume~1"
#fixed_sample_abs_path[3] = "applic~1"
# print fixed_sample_abs_path
# Setting up Class attribute for sample path
return sample_abs_path
return False
def download_file(self, download_url):
if DEBUG: print "Downloading: {}".format(download_url)
try:
import urllib2
filedata = urllib2.urlopen(download_url)
except urllib2.URLError:
if DEBUG: print "Can't download the target sample file. Make sure BPH Webserver is running on the host."
return False
else:
datatowrite = filedata.read()
sample_abs_path = DownloadedFile(download_url).abs_path
# Used when filtering custom variables
ExecutionManager.sample_abs_path = sample_abs_path
if DEBUG: print "Downloaded file: {}".format(sample_abs_path)
return self.write_tmp_file(datatowrite, sample_abs_path)
def execute_autoit_script(self, template, auto_it_script_abs_path):
# The previously generated AutoIT script will be executed.
if DEBUG: print "Executing Auto-It script"
self.exec_manager(
tool_drive=template.tool_drive,
tool_path='misc\\autoitv3\\',
tool_name='AutoIt3.exe',
tool_args=auto_it_script_abs_path)
def tool_execution(self, template):
def selected_execution(filtered_parameters, filtered_automation):
cascade_execution = False
if filtered_parameters is not None and filtered_automation is not None:
if DEBUG: print "Cascaded Execution Detected: parameters -> autoit"
cascade_execution = True
if filtered_parameters is not None:
if DEBUG: print "Parameter Execution Detected"
self.exec_manager(
tool_drive=template.tool_drive,
tool_path=template.remote_tool_path,
tool_name=template.tool.filename,
tool_args=filtered_parameters
)
if filtered_automation is not None:
# If cascase execution is set, then a delay between tool execution
# and automation is also set. This to allow the tool to properly
# load and the automation be able to run properly. A default value
# of 5 seconds was given.
if cascade_execution:
if DEBUG: print "Cascade Execution Delay - Running now..."
time.sleep(5)
if DEBUG: print "Automation-Only Execution Detected"
custom_user_vars = template.configuration.execution.custom_user_vars
auto_it_script_abs_path = AutoItScript(filtered_automation).autoit_script
self.execute_autoit_script(template, auto_it_script_abs_path)
def filter_custom_vars(template, filter_type=None):
# Handling template parameters custom vars
if filter_type is not None:
custom_user_vars = template.configuration.execution.custom_user_vars
if filter_type == "parameters":
parameters = template.actions[template.actions.action]['parameters']
if parameters is not None:
if DEBUG: print "Parameters: {}".format(parameters)
if len(custom_user_vars) != 0:
if DEBUG: print "Custom Parameters Vars {} - Parameters({})".format(custom_user_vars, parameters)
filtered_parameters = self.filter_variables(
parameters, template, filter_type='parameters', custom_user_vars=custom_user_vars)
else:
filtered_parameters = self.filter_variables(
parameters, template, filter_type='parameters', custom_user_vars=None)
return filtered_parameters
if filter_type == "automation":
automation = template.actions[template.actions.action]['automation']
if automation is not None:
if DEBUG: print "Automation: {}".format(automation)
if len(custom_user_vars) != 0:
if DEBUG: print "Custom Automation Vars {}".format(custom_user_vars)
filtered_automation = self.filter_variables(
automation, template, filter_type='automation', custom_user_vars=custom_user_vars)
else:
filtered_automation = self.filter_variables(
automation, template, filter_type='automation', custom_user_vars=None)
return filtered_automation
action_name = template.actions.action
if DEBUG: print "Executing: {}".format(action_name)
filtered_parameters = filter_custom_vars(template, filter_type='parameters')
filtered_automation = filter_custom_vars(template, filter_type='automation')
selected_execution(filtered_parameters, filtered_automation)
class TemplateManager(ExecutionManager):
def __init__(self, template):
# self.report_directory_check(template.vm_report_name)
if DEBUG: print "#"*50
if DEBUG: print dict(template)
if DEBUG: print "#"*50
# Each tool request must save files. Those can be either a log file
# or output files from its execution. This "report path" folder will
# be created per request.
#
# The /files/ folder will be used to store any additional files generated
# by the tool.
self.report_path_files = os.path.join(
APP_DATA, template.rid, template.tool_name, 'files')
self.report_path = os.path.join(
APP_DATA, template.rid, template.tool_name)
if not os.path.isdir(self.report_path_files):
if DEBUG: print "Creating: {}".format(self.report_path_files)
os.makedirs(self.report_path_files)
if template.configuration.execution['download_sample']:
self.download_file(template.download_url)
# Tool execution will eventually select which execution type will be run,
# either automated or manual (only based in parameters)
self.tool_execution(template)
# Delay (seconds) between tool executions.
exec_delay = template.configuration.execution.delay
if DEBUG: print "Execution Delay (in seconds): {}".format(exec_delay)
time.sleep(exec_delay)
while True:
if DEBUG: print threading.active_count()
if DEBUG: print threading.enumerate()
threads = str(threading.enumerate()).lower()
if template.configuration.execution.background_run:
if DEBUG: print "TOOL DOES RUN IN BACKGROUND..."
if template.tool.filename.lower() in threads:
# FIXED: This allows more than one tool running in background
if threading.active_count() != 1:
if "autoit" not in threads:
if DEBUG: print "TOOL RUN CHECK DONE"
break
else:
if DEBUG: print "TOOL DOES NOT RUN IN BACKGROUND..."
if template.tool.filename.lower() not in threads:
if "autoit" not in threads:
if DEBUG: print "TOOL RUN CHECK - DONE"
break
time.sleep(1)
if DEBUG: print "\n###### Tool execution has ended #######\n"
if DEBUG: print threading.active_count()
if DEBUG: print threading.enumerate()
if template.configuration.reporting.report_files:
if DEBUG: print "########## Starting COLLECTING HTTP FILES ##############"
self.report(template)
def filter_variables(self, data, template, filter_type=None, custom_user_vars=None):
if filter_type == "parameters":
# Convert into list here.
data = data.split(' ')
if filter_type == "automation":
# Decode first, then convert into a list.
data = base64.decodestring(data).split('\n')
if DEBUG: print "Filtering Variables: {}".format(data)
unfiltered_data = FilterSpecialVars(data, template=template, custom_user_vars=custom_user_vars)
unfiltered_data.filter_now()
if DEBUG: print "Filtered Args: ({})".format(unfiltered_data.filtered_data)
if filter_type == "parameters":
return " ".join(unfiltered_data.filtered_data)
if filter_type == "automation":
return unfiltered_data.filtered_data
def report_back(self, report_data):
url = "http://{}:{}/bph/report.php".format(BPH_TEMPLATE_SERVER_IP, BPH_CONTROLLER_WEB_PORT)
files = {'file': open(report_data['file_abs_path'], 'rb')}
response = requests.post(url, data={'project_name': report_data['project_name'],
'md5': report_data['md5'],
'sid': report_data['sid'],
'tool': report_data['tool_name'],
'rid': report_data['rid'],
'file': report_data['file'],
'dir': report_data['dir']}, files=files)
if DEBUG: print "Response: {}".format(response.text)
def report_files(self, base_folder, tool_name):
if DEBUG: print "Searching files in: {} - tool: {}".format(base_folder, tool_name)
while True:
if len(os.listdir(base_folder)) != 0:
if DEBUG: print "Files found.. Collecting them now..."
files_found = []
for root, dirs, files in os.walk(base_folder):
for file in files:
full_path = os.path.join(root, file)
if DEBUG: print "FullPath: {}".format(full_path)
file_name = os.path.basename(full_path)
if DEBUG: print "FileName: {}".format(file_name)
index = full_path.split('\\').index(tool_name)
if DEBUG: print "Index: {}".format(index)
path_found = "/".join([x for x in full_path.split('\\')[index+1:]])
if DEBUG: print "PathFound: {}".format(path_found)
if path_found.count('/') == 0:
# Tool log file was found (e.g. bintext.log)
if DEBUG: print "Found log file: {}".format(path_found)
if path_found.endswith('.log'):
if DEBUG: print "FullPath: {}".format(full_path)
file_and_path_found = [full_path, path_found, '/']
files_found.append(file_and_path_found)
else:
# Any file inside of the /files/ folder.
if DEBUG: print "Found non-log file: {}".format(path_found)
# For non-log files, a file version of the file will be generated
# due problems of uploading big files through HTTP. This is a temporary fix.
zip_filename = File().zip_file(full_path)
file_and_path_found = zip_filename.split() + \
path_found.split('/')[:-1]
if DEBUG: print file_and_path_found
file_and_path_found.insert(
0, full_path.replace(file_name, zip_filename))
if file_and_path_found not in files_found:
if DEBUG: print "Appending file found: {}".format(file_and_path_found)
files_found.append(file_and_path_found)
if DEBUG: print "FullPathFound: {}".format(file_and_path_found)
if DEBUG: print "Files Found: {}".format(files_found)
return list(files_found)
else:
if DEBUG: print "Waiting for files to appear..."
time.sleep(1)
def report(self, template):
def filter_dir(unfiltered_dir):
if DEBUG: print "Unfiltered dir: {}".format(unfiltered_dir)
dir_path = "/".join(unfiltered_dir)
if dir_path.startswith('/'):
return unfiltered_dir[0]
return "/{}".format(dir_path)
report_data = {}
if os.path.isdir(self.report_path):
if DEBUG: print "Sending back results to C&C server..."
# Request variables. Generate data on the server.
report_data['project_name'] = template.project_name
report_data['md5'] = template.md5
report_data['sid'] = template.sid
report_data['rid'] = template.rid
report_data['tool_name'] = template.tool_name
for file_found in self.report_files(self.report_path,
template.tool_name):
# if DEBUG: print "FileFound: {}".format(file_found)
report_data['file_abs_path'] = file_found[0]
report_data['file'] = urllib.quote(file_found[1], safe='')
report_data['dir'] = filter_dir(file_found[2:])
if DEBUG: print report_data
self.report_back(report_data)
if DEBUG: print "Done."
else:
if DEBUG: print "Report Directory ({}) does not exist".format(self.report_path)
def report_directory_check(self, vm_report_name):
report_path = os.path.join(APP_DATA, vm_report_name)
if DEBUG:
print report_path
if not os.path.isdir(report_path):
os.mkdir(report_path)
self.report_directory_check()
else:
REPORT_PATH = report_path
class Agent:
RETRY_SECS = 1
BUFFER_SIZE = 16384
def __init__(self):
self.connection_status = False
#### Agent Control Functions ####
def start(self):
print "Starting Agent..."
# Connect to Server
self.connect()
def stop(self):
print "Stopping Agent..."
self.disconnect()
self.connection_status = False
def restart(self):
self.stop()
self.start()
#### Agent Connection Functions ####
def check_connection(self):
pass
# print dir(self._clientsocket)
def is_connected(self):
if self.connection_status == True:
return True
return False
def send(self, data):
print "Sending Data: {}".format(data)
try:
self._clientsocket.send(data)
except:
self.reconnect()
def listen(self):
print "Connected to C&C Template Server. Waiting for instructions..."
try:
while True:
# Keeps running receiving data. Once received
# it its automatically un-serialized and converted
# into an Python dictionary object.
serialized_data = pickle.loads(self._clientsocket.recv(self.BUFFER_SIZE))
template_data = box.Box(serialized_data)
# TemplateManager decomposes serialized data
# and take actions to execute the selected program
TemplateManager(template_data)
print "Sending back to C&C => OK status"
self.send('ok')
except socket.error as e:
print "Server disconnection: {}".format(e)
self.reconnect()
except EOFError as e:
print "Server disconnection...".format(e)
self.reconnect()
else:
# If template data was received correctly, then acknowledge.
self.send('skip')
def connect(self):
# Make the connection to the server
print "Connecting to C&C Template Server: {}:{}".format(BPH_TEMPLATE_SERVER_IP, BPH_TEMPLATE_SERVER_PORT)
try:
# Initialize Socket & connect back to server.
self._clientsocket = socket.socket()
self._clientsocket.connect((BPH_TEMPLATE_SERVER_IP, BPH_TEMPLATE_SERVER_PORT))
self._clientsocket.setblocking(1)
except socket.error:
self.reconnect()
except KeyboardInterrupt:
print "Interrupting execution."
sys.exit()
else:
print "Connection established. "
self.connection_status = True
self.listen()
def disconnect(self):
self._clientsocket.close()
def reconnect(self):
print "Reconnecting...."
if DEBUG: print "Connection Error. Server down? Attempting connection in: ({}) seconds".format(self.RETRY_SECS)
time.sleep(self.RETRY_SECS)
if DEBUG: print "Attempting now..."
self.connect()
if __name__ == "__main__":
agent = Agent()
try:
agent.start()
while True:
# agent.check_connection()
if not agent.is_connected():
# If agent stops. Start it again.
agent.start()
except KeyboardInterrupt:
print "Manual interruption. Bye!"
sys.exit()
| 40.552822 | 126 | 0.554386 | 26,482 | 0.945043 | 0 | 0 | 0 | 0 | 0 | 0 | 6,282 | 0.224181 |
469870ae47593eb387aa34d03ce486676acb3094 | 2,599 | py | Python | python/src/learn/lstmSequence.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
]
| null | null | null | python/src/learn/lstmSequence.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
]
| null | null | null | python/src/learn/lstmSequence.py | kakaba2009/MachineLearning | 26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33 | [
"Apache-2.0"
]
| null | null | null | # LSTM with Variable Length Input Sequences to One Character Output
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.utils import np_utils
from keras.preprocessing.sequence import pad_sequences
from theano.tensor.shared_randomstreams import RandomStreams
# fix random seed for reproducibility
numpy.random.seed(7)
# define the raw dataset
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# create mapping of characters to integers (0-25) and the reverse
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
int_to_char = dict((i, c) for i, c in enumerate(alphabet))
# prepare the dataset of input to output pairs encoded as integers
num_inputs = 16
max_len = 5
dataX = []
dataY = []
for i in range(num_inputs):
start = numpy.random.randint(len(alphabet)-2)
end = numpy.random.randint(start, min(start+max_len,len(alphabet)-1))
sequence_in = alphabet[start:end+1]
sequence_out = alphabet[end + 1]
dataX.append([char_to_int[char] for char in sequence_in])
dataY.append(char_to_int[sequence_out])
print( sequence_in, '->', sequence_out )
# convert list of lists to array and pad sequences if needed
X = pad_sequences(dataX, maxlen=max_len, dtype='float32')
# reshape X to be [samples, time steps, features]
X = numpy.reshape(X, (X.shape[0], max_len, 1))
# normalize
X = X / float(len(alphabet))
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# create and fit the model
batch_size = 1
model = Sequential()
model.add(LSTM(16, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
for i in range(1):
model.fit(X, y, nb_epoch=1, batch_size=batch_size, verbose=2, shuffle=False)
model.reset_states()
# summarize performance of the model
scores = model.evaluate(X, y, batch_size=batch_size, verbose=0)
model.reset_states()
print("Model Accuracy: %.2f%%" % (scores[1]*100))
# demonstrate some model predictions
for i in range(1):
pattern_index = numpy.random.randint(len(dataX))
pattern = dataX[pattern_index]
x = pad_sequences([pattern], maxlen=max_len, dtype='float32')
x = numpy.reshape(x, (1, max_len, 1))
x = x / float(len(alphabet))
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
print( seq_in, "->", result )
| 41.919355 | 91 | 0.719123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 654 | 0.251635 |
4699a0bfa6dd1ddc3a1e8897780df54022543382 | 8,541 | py | Python | DATA/prediction/direction/pred_script.py | korcsmarosgroup/ARN2DataBase | 8931cec0387e5c8b599df40d652ac5fdb5c49a8f | [
"MIT"
]
| null | null | null | DATA/prediction/direction/pred_script.py | korcsmarosgroup/ARN2DataBase | 8931cec0387e5c8b599df40d652ac5fdb5c49a8f | [
"MIT"
]
| null | null | null | DATA/prediction/direction/pred_script.py | korcsmarosgroup/ARN2DataBase | 8931cec0387e5c8b599df40d652ac5fdb5c49a8f | [
"MIT"
]
| null | null | null | """
Direction prediction based on learning dataset from reactome
PPI direction calculated from domain interaction directions
"""
# Imports
import sqlite3, csv, os
import pandas as pd
import logging
import pickle
# # Initiating logger
# logger = logging.getLogger()
# handler = logging.FileHandler('../../workflow/SLK3.log')
# logger.setLevel(logging.DEBUG)
# handler.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# logger.addHandler(handler)
class DirScore:
def __init__(self):
# Defining constants
self.REACTOME_DB = '../../SLKlib/mapper/protein/output/reactome_mapped.db'
self.PFAM_FILE = ['../prediction/direction/files/uniprot-pfam_human.tab',
'../prediction/direction/files/uniprot-pfam_drosi.tab',
'../prediction/direction/files/uniprot-pfam_danio.tab',
'../prediction/direction/files/uniprot-pfam_celegans.tab']
logging.basicConfig(level=logging.DEBUG)
self.pfam_dict = {}
self.dir_score_dict = {}
# Adding the two output dictionaries of test_score function to a pickle files
# so that the next function can access them inbetween script executions
# TODO: remove pickle files after each run
self.PICKLE_FILE = 'dir_score.pickle'
if os.path.isfile(self.PICKLE_FILE):
self.pfam_dict, self.dir_score_dict = pickle.load(open(self.PICKLE_FILE, 'rb'))
else:
self.test_scores()
pickle.dump((self.pfam_dict, self.dir_score_dict), open(self.PICKLE_FILE, 'wb'))
def test_scores(self):
# Setting as global so next script can access it
df_all = pd.DataFrame(columns=['a_dom', 'b_dom'])
conn = sqlite3.connect(self.REACTOME_DB)
# Setting up learning data set
logging.debug("Started connection to reactome dataset")
for inpfam in self.PFAM_FILE:
with open(inpfam) as infile:
infile.readline()
for line in infile:
line = line.strip().split('\t')
if len(line) == 4:
self.pfam_dict[line[0]] = line[3].split(';')[0:-1]
with conn:
c = conn.cursor()
counter = 0
# Getting PPI data
logging.debug('Getting PPI data')
c.execute("SELECT interactor_a_node_name, interactor_b_node_name FROM edge")
while True:
row = c.fetchone()
counter += 1
if row is None:
break
else:
a_node = row[0].split(':')[1]
b_node = row[1].split(':')[1]
if a_node not in self.pfam_dict or b_node not in self.pfam_dict:
continue
int_list = [self.pfam_dict[a_node], self.pfam_dict[b_node]]
for id1, id2 in zip(int_list[0], int_list[1]):
# Setting up dataframe for all domain-domain interactions
# len(df_all) sets the name of the line
df_all = df_all.set_value(len(df_all), col=['a_dom', 'b_dom'], value=[id1, id2])
# All domains in a dataframe, without direction
all_domain_df = df_all['a_dom'].append(df_all['b_dom']).reset_index(name='domain')
all_count = all_domain_df.groupby('domain').size().reset_index(name='counter')
# Getting probability of each domain
# Number of domain occurrence / Number of all domains
logging.debug('Getting probability of each domain')
prob_dom = {}
# Number of all domain occurrences
total_occurrence = all_count['counter'].sum()
# Iterating over domains
for index, domain in all_count['domain'].iteritems():
dom_count = all_count.loc[all_count['domain'] == domain, 'counter'].iloc[0]
P_domain = dom_count / total_occurrence
# Adding data into a dictionary
prob_dom[domain] = P_domain
#print(domain, P_domain)
# Getting directed domain-domain interaction probabilities
# Number of directed DDI / number of all DDIs
logging.debug('Getting DDI probabilities')
prob_inter = {}
# Getting the occurrences for each directed interaction
all_inter_counted = df_all.groupby(['a_dom', 'b_dom']).size().reset_index(name='counter')
all_inter_counter = all_inter_counted['counter'].sum()
# Iterating over interactions
for index2, count in all_inter_counted['counter'].iteritems():
P_inter = count / all_inter_counter
# Getting domain ids
a_dom = all_inter_counted.loc[all_inter_counted['counter'] == count, 'a_dom'].iloc[0]
b_dom = all_inter_counted.loc[all_inter_counted['counter'] == count, 'b_dom'].iloc[0]
# Adding the into a dictionary
prob_inter['->'.join((a_dom, b_dom))] = P_inter
# Calculating direction score
# (P_AtoB - P_BtoA) / P_A * P_B
logging.debug('Calculating direction scores')
for key in prob_inter.keys():
a = key.split('->')[0]
b = key.split('->')[1]
other_dir = '->'.join((b, a))
if other_dir in prob_inter.keys():
dir_score = (prob_inter[key] - prob_inter[other_dir]) / prob_dom[a] * prob_dom[b]
self.dir_score_dict[key] = dir_score
else:
dir_score = (prob_inter[key] - 0) / prob_dom[a] * prob_dom[b]
self.dir_score_dict[key] = dir_score
#print(key, dir_score)
#return self.dir_score_dict, self.pfam_dict
# LAYER 3
def apply_to_db(self):
#logger.debug(self.pfam_dict)
#logger.debug(self.dir_score_dict)
conn2 = sqlite3.connect('SLK3_layers.db')
# logger.debug("Connected to '%s" % conn2)
with conn2:
c2 = conn2.cursor()
c22 = conn2.cursor()
c2.execute("SELECT interactor_a_node_name, interactor_b_node_name FROM ATG_Reg")
while True:
row = c2.fetchone()
if row is None:
break
else:
prot_a = row[0].split(':')[1]
prot_b = row[1].split(':')[1]
dir_score_sum = 0
# Summing DDI scores
#logging.debug('Summing DDI scores')
if prot_a in self.pfam_dict.keys() and prot_b in self.pfam_dict.keys():
for dom_a, dom_b in zip(self.pfam_dict[prot_a], self.pfam_dict[prot_b]):
#print(dir_score_dict['->'.join((dom_a, dom_b))])
if '->'.join((dom_a, dom_b)) in self.dir_score_dict.keys():
dir_score_sum += self.dir_score_dict['->'.join((dom_a, dom_b))]
# To get final direction score of the unknown PPIs we calculate
# the average of each proteins' all domain interaction scores
if len(self.pfam_dict[prot_a]) * len(self.pfam_dict[prot_b]) == 0:
logging.debug(prot_a, len(self.pfam_dict[prot_a]), prot_b, len(self.pfam_dict[prot_b]))
continue
else:
dir_score_final_PPI = dir_score_sum / (len(self.pfam_dict[prot_a]) * len(self.pfam_dict[prot_b]))
#logging.debug("Updating scores")
c22.execute("UPDATE ATG_Reg SET confidence_scores = '%s' "
"WHERE ATG_Reg.interactor_a_node_name = '%s' AND ATG_Reg.interactor_b_node_name = '%s'"
% ('|dir_pred:' + str(dir_score_final_PPI), row[0], row[1]))
if __name__ == '__main__':
test = DirScore()
logger.debug('Creating test set')
test.test_scores()
logger.debug('Adding scores to dataset')
test.apply_to_db()
logger.debug('Direction prediction done')
| 45.673797 | 125 | 0.550872 | 7,764 | 0.909027 | 0 | 0 | 0 | 0 | 0 | 0 | 2,892 | 0.338602 |
4699a1827567dd51a8a50f85f10e57138d48a545 | 1,398 | py | Python | src/main.py | vcodrins/json_to_folder | 3e07259b1c5757587a5235b2412441d87607597f | [
"MIT"
]
| null | null | null | src/main.py | vcodrins/json_to_folder | 3e07259b1c5757587a5235b2412441d87607597f | [
"MIT"
]
| null | null | null | src/main.py | vcodrins/json_to_folder | 3e07259b1c5757587a5235b2412441d87607597f | [
"MIT"
]
| null | null | null | import json
import os.path
import sys
from exceptions import *
from create_folder_structure import create_folder_structure
def main():
try:
if len(sys.argv) != 3:
raise InvalidArgumentCount
if not os.path.exists(sys.argv[2]):
raise InvalidFilePath
if not os.path.exists(sys.argv[1]):
raise InvalidFolderPath
try:
json_object = json.load(open(sys.argv[2]))
except ValueError:
raise InvalidJsonFile
output_folder = sys.argv[1]
create_folder_structure(output_folder, json_object)
except InvalidArgumentCount:
print("""
Invalid number of arguments
Please make sure to use quotes for outputFolder and jsonFile if path includes spaces
Valid paths may be:
"file.json"
"./file.json"
"folder/file.json"
"./folder/file.json"
"absolute/path/to/file.json"
Usage:
main.py "<outputFolder>" "<jsonFile>"
""")
except InvalidFolderPath:
print("""
Output folder does not exist
""")
except InvalidFilePath:
print("""
Input json file does not exist
""")
except InvalidJsonFile:
print("""
Input json file is invalid
""")
main()
| 26.884615 | 96 | 0.556509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.434192 |
469a1ea0ba86db2759c0a614f1ca8112b547ba08 | 277 | py | Python | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
]
| null | null | null | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
]
| 7 | 2020-06-19T15:32:07.000Z | 2021-08-23T20:49:39.000Z | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
]
| null | null | null | from datetime import timedelta
import pytest
from model_bakery import baker
@pytest.fixture()
def create_expire_user():
def make_user(**kwargs):
user = baker.make('members.User')
user.created -= timedelta(days=4)
return user
return make_user
| 19.785714 | 41 | 0.693141 | 0 | 0 | 0 | 0 | 197 | 0.711191 | 0 | 0 | 14 | 0.050542 |
469ba49461fc882de80bb1d478b0aec8c3c11361 | 3,952 | py | Python | src/SecurityDecorator.py | JanCwik/SoftwarePraktikum | d4a2b196f21a5379188cb78b31c59d69f739964f | [
"RSA-MD"
]
| 7 | 2020-05-18T14:20:17.000Z | 2020-07-27T17:37:38.000Z | src/SecurityDecorator.py | JanCwik/SoftwarePraktikum | d4a2b196f21a5379188cb78b31c59d69f739964f | [
"RSA-MD"
]
| null | null | null | src/SecurityDecorator.py | JanCwik/SoftwarePraktikum | d4a2b196f21a5379188cb78b31c59d69f739964f | [
"RSA-MD"
]
| 2 | 2020-05-18T14:20:22.000Z | 2020-07-27T17:37:05.000Z | from flask import request
from google.auth.transport import requests
import google.oauth2.id_token
from server.ApplikationsAdministration import ApplikationsAdministration
#Benutzer.py, BenutzerMapper + BenutzerMethoden in ApplikationsAdministration
def secured(function):
"""Decorator zur Google Firebase-basierten Authentifizierung von Benutzern
Da es sich bei diesem System um eine basale Fallstudie zu Lehrzwecken handelt, wurde hier
bewusst auf ein ausgefeiltes Berechtigungskonzept verzichtet. Vielmehr soll dieses Decorator
einen Weg aufzeigen, wie man technisch mit vertretbarem Aufwand in eine Authentifizierung
einsteigen kann.
POLICY: Die hier demonstrierte Policy ist, dass jeder, der einen durch Firebase akzeptierten
Account besitzt, sich an diesem System anmelden kann. Bei jeder Anmeldung werden Klarname,
Mail-Adresse sowie die Google User ID in unserem System gespeichert bzw. geupdated. Auf diese
Weise könnte dann für eine Erweiterung des Systems auf jene Daten zurückgegriffen werden.
"""
firebase_request_adapter = requests.Request()
def wrapper(*args, **kwargs):
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
objects = None
if id_token:
try:
# Verify the token against the Firebase Auth API. This example
# verifies the token on each page load. For improved performance,
# some applications may wish to cache results in an encrypted
# session store (see for instance
# http://flask.pocoo.org/docs/1.0/quickstart/#sessions).
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
if claims is not None:
adm = ApplikationsAdministration()
google_user_id = claims.get("user_id")
email = claims.get("email")
name = claims.get("name")
user = adm.get_user_by_google_user_id(google_user_id)
# Benennen wie in ApplikationsAdministration
if user is not None:
"""Fall: Der Benutzer ist unserem System bereits bekannt.
Wir gehen davon aus, dass die google_user_id sich nicht ändert.
Wohl aber können sich der zugehörige Klarname (name) und die
E-Mail-Adresse ändern. Daher werden diese beiden Daten sicherheitshalber
in unserem System geupdated."""
user.set_name(name)
user.set_email(email)
adm.update_benutzer(user)
#set_name und set_email benennen wie in Benutzer.py
#adm.save-user benennen wie in ApplikationsAdministration.py
else:
"""Fall: Der Benutzer war bislang noch nicht eingelogged.
Wir legen daher ein neues User-Objekt an, um dieses ggf. später
nutzen zu können.
"""
user = adm.benutzer_anlegen(name, email, google_user_id)
#Benennen wie in ApplikationsAdministration
print(request.method, request.path, "angefragt durch:", name, email)
objects = function(*args, **kwargs)
return objects
else:
return '', 401 # UNAUTHORIZED !!!
except ValueError as exc:
# This will be raised if the token is expired or any other
# verification checks fail.
error_message = str(exc)
return exc, 401 # UNAUTHORIZED !!!
return '', 401 # UNAUTHORIZED !!!
return wrapper
| 45.953488 | 97 | 0.603492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,147 | 0.542035 |
469bf8d9da5f7e85ca99597046844afc1b477cd9 | 213 | py | Python | code/django18/django18/newsletter/forms.py | dvl/celerytalk | 312de04ea24bb073357684a3a35cfd782b2b7aae | [
"MIT"
]
| null | null | null | code/django18/django18/newsletter/forms.py | dvl/celerytalk | 312de04ea24bb073357684a3a35cfd782b2b7aae | [
"MIT"
]
| null | null | null | code/django18/django18/newsletter/forms.py | dvl/celerytalk | 312de04ea24bb073357684a3a35cfd782b2b7aae | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
class NewsletterForm(forms.Form):
assunto = forms.CharField()
mensagem = forms.CharField(widget=forms.Textarea)
| 19.363636 | 53 | 0.737089 | 119 | 0.558685 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.107981 |
469d18528989ab40a67eb477eeda37c2533ddfd8 | 5,448 | py | Python | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
]
| 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import *
# Documentation of the MVA
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MultivariateElectronIdentificationRun2
# https://rembserj.web.cern.ch/rembserj/notes/Electron_MVA_ID_2017_documentation
#
# In this file we define the locations of the MVA weights, cuts on the MVA values
# for specific working points, and configure those cuts in VID
#
# The tag is an extra string attached to the names of the products
# such as ValueMaps that needs to distinguish cases when the same MVA estimator
# class is used with different tuning/weights
mvaTag = "Fall17NoIsoV1"
# There are 6 categories in this MVA. They have to be configured in this strict order
# (cuts and weight files order):
# 0 EB1 (eta<0.8) pt 5-10 GeV | pt < ptSplit && |eta| < ebSplit
# 1 EB2 (eta>=0.8) pt 5-10 GeV | pt < ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 2 EE pt 5-10 GeV | pt < ptSplit && |eta| >= ebeeSplit
# 3 EB1 (eta<0.8) pt 10-inf GeV | pt >= ptSplit && |eta| < ebSplit
# 4 EB2 (eta>=0.8) pt 10-inf GeV | pt >= ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 5 EE pt 10-inf GeV | pt >= ptSplit && |eta| >= ebeeSplit
mvaFall17WeightFiles_V1 = cms.vstring(
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_10_2017_puinfo_BDT.weights.xml.gz"
)
## The working point for this MVA that is expected to have about 90% signal
# WP tuned to give about 90 and 80% signal efficiecny for electrons from Drell-Yan with pT > 25 GeV
# The working point for the low pt categories is just taken over from the high pt
idName90 = "mvaEleID-Fall17-noIso-V1-wp90"
MVA_WP90 = EleMVA_WP(
idName = idName90, mvaTag = mvaTag,
cutCategory0 = "0.9165112826974601 - exp(-pt / 2.7381703555094217) * 1.03549199648109", # EB1 low pt
cutCategory1 = "0.8655738322220173 - exp(-pt / 2.4027944652597073) * 0.7975615613282494", # EB2 low pt
cutCategory2 = "-3016.035055227131 - exp(-pt / -52140.61856333602) * -3016.3029387236506", # EE low pt
cutCategory3 = "0.9616542816132922 - exp(-pt / 8.757943837889817) * 3.1390200321591206", # EB1
cutCategory4 = "0.9319258011430132 - exp(-pt / 8.846057432565809) * 3.5985063793347787", # EB2
cutCategory5 = "0.8899260780999244 - exp(-pt / 10.124234115859881) * 4.352791250718547", # EE
)
idName80 = "mvaEleID-Fall17-noIso-V1-wp80"
MVA_WP80 = EleMVA_WP(
idName = idName80, mvaTag = mvaTag,
cutCategory0 = "0.9530240956555949 - exp(-pt / 2.7591425841003647) * 0.4669644718545271", # EB1 low pt
cutCategory1 = "0.9336564763961019 - exp(-pt / 2.709276284272272) * 0.33512286599215946", # EB2 low pt
cutCategory2 = "0.9313133688365339 - exp(-pt / 1.5821934800715558) * 3.8889462619659265", # EE low pt
cutCategory3 = "0.9825268564943458 - exp(-pt / 8.702601455860762) * 1.1974861596609097", # EB1
cutCategory4 = "0.9727509457929913 - exp(-pt / 8.179525631018565) * 1.7111755094657688", # EB2
cutCategory5 = "0.9562619539540145 - exp(-pt / 8.109845366281608) * 3.013927699126942", # EE
)
### WP tuned for HZZ analysis with very high efficiency (about 98%)
# The working points were found by requiring the same signal efficiencies in
# each category as for the Spring 16 HZZ ID
# (see RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_HZZ_V1_cff.py)
idNamewpLoose = "mvaEleID-Fall17-noIso-V1-wpLoose"
MVA_WPLoose = EleMVA_WP(
idName = idNamewpLoose, mvaTag = mvaTag,
cutCategory0 = "-0.13285867293779202", # EB1 low pt
cutCategory1 = "-0.31765300958836074", # EB2 low pt
cutCategory2 = "-0.0799205914718861" , # EE low pt
cutCategory3 = "-0.856871961305474" , # EB1
cutCategory4 = "-0.8107642141584835" , # EB2
cutCategory5 = "-0.7179265933023059" # EE
)
#
# Finally, set up VID configuration for all cuts
#
# Create the PSet that will be fed to the MVA value map producer
mvaEleID_Fall17_noIso_V1_producer_config = cms.PSet(
mvaName = cms.string(mvaClassName),
mvaTag = cms.string(mvaTag),
# Category parameters
nCategories = cms.int32(6),
categoryCuts = cms.vstring(*EleMVA_6CategoriesCuts),
# Weight files and variable definitions
weightFileNames = mvaFall17WeightFiles_V1,
variableDefinition = cms.string("RecoEgamma/ElectronIdentification/data/ElectronMVAEstimatorRun2Fall17V1Variables.txt")
)
# Create the VPset's for VID cuts
mvaEleID_Fall17_V1_wpLoose = configureVIDMVAEleID( MVA_WPLoose )
mvaEleID_Fall17_V1_wp90 = configureVIDMVAEleID( MVA_WP90 )
mvaEleID_Fall17_V1_wp80 = configureVIDMVAEleID( MVA_WP80 )
mvaEleID_Fall17_V1_wpLoose.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp90.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp80.isPOGApproved = cms.untracked.bool(True)
| 54.48 | 124 | 0.727423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,794 | 0.696402 |
469d299beef21a4f12403e1476091e6f816d16ea | 1,676 | py | Python | dqn_plus/notebooks/code/train_ram.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
]
| null | null | null | dqn_plus/notebooks/code/train_ram.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
]
| null | null | null | dqn_plus/notebooks/code/train_ram.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
]
| null | null | null | import numpy as np
import gym
from utils import *
from agent import *
from config import *
def train(env, agent, num_episode, eps_init, eps_decay, eps_min, max_t):
rewards_log = []
average_log = []
eps = eps_init
for i in range(1, 1 + num_episode):
episodic_reward = 0
done = False
state = env.reset()
t = 0
while not done and t < max_t:
t += 1
state = state.reshape(1, -1)
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.memory.remember(state, action, reward, next_state, done)
if t % 4 == 0 and len(agent.memory) >= agent.bs:
agent.learn()
agent.soft_update(agent.tau)
state = next_state.copy()
episodic_reward += reward
rewards_log.append(episodic_reward)
average_log.append(np.mean(rewards_log[-100:]))
print('\rEpisode {}, Reward {:.3f}, Average Reward {:.3f}'.format(i, episodic_reward, average_log[-1]), end='')
if i % 100 == 0:
print()
eps = max(eps * eps_decay, eps_min)
return rewards_log, average_log
if __name__ == '__main__':
env = gym.make(RAM_ENV_NAME)
agent = Agent(env.observation_space.shape[0], env.action_space.n, BATCH_SIZE, LEARNING_RATE, TAU, GAMMA, DEVICE, False, DUEL, DOUBLE, PRIORITIZED)
rewards_log, _ = train(env, agent, RAM_NUM_EPISODE, EPS_INIT, EPS_DECAY, EPS_MIN, MAX_T)
np.save('{}_rewards.npy'.format(RAM_ENV_NAME), rewards_log)
agent.Q_local.to('cpu')
torch.save(agent.Q_local.state_dict(), '{}_weights.pth'.format(RAM_ENV_NAME)) | 33.52 | 150 | 0.613365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.060263 |
469d44404e5e5089163e7fb2cbe8fd08587f00ec | 4,274 | py | Python | tools/parallel_launcher/parallel_launcher.py | Gitman1989/chromium | 2b1cceae1075ef012fb225deec8b4c8bbe4bc897 | [
"BSD-3-Clause"
]
| 2 | 2017-09-02T19:08:28.000Z | 2021-11-15T15:15:14.000Z | tools/parallel_launcher/parallel_launcher.py | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
]
| null | null | null | tools/parallel_launcher/parallel_launcher.py | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
]
| 1 | 2020-11-04T07:22:28.000Z | 2020-11-04T07:22:28.000Z | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool launches several shards of a gtest-based binary
in parallel on a local machine.
Example usage:
parallel_launcher.py path/to/base_unittests
"""
import optparse
import os
import subprocess
import sys
import threading
import time
def StreamCopyWindows(stream_from, stream_to):
"""Copies stream_from to stream_to."""
while True:
buf = stream_from.read(1024)
if not buf:
break
stream_to.write(buf)
stream_to.flush()
def StreamCopyPosix(stream_from, stream_to, child_exited):
"""
Copies stream_from to stream_to, and exits if child_exited
is signaled.
"""
import fcntl
# Put the source stream in a non-blocking mode, so we can check
# child_exited when there is no data.
fd = stream_from.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
buf = os.read(fd, 1024)
except OSError, e:
if e.errno == 11:
if child_exited.isSet():
break
time.sleep(0.1)
continue
raise
if not buf:
break
stream_to.write(buf)
stream_to.flush()
class TestLauncher(object):
def __init__(self, args, executable, num_shards, shard):
self._args = args
self._executable = executable
self._num_shards = num_shards
self._shard = shard
self._test = None
def launch(self):
env = os.environ.copy()
env['CHROME_LOG_FILE'] = 'chrome_log_%d' % self._shard
if 'GTEST_TOTAL_SHARDS' in env:
# Handle the requested sharding transparently.
outer_shards = int(env['GTEST_TOTAL_SHARDS'])
outer_index = int(env['GTEST_SHARD_INDEX'])
env['GTEST_TOTAL_SHARDS'] = str(self._num_shards * outer_shards)
# Calculate the right shard index to pass to the child. This is going
# to be a shard of a shard.
env['GTEST_SHARD_INDEX'] = str((self._num_shards * outer_index) +
self._shard)
else:
env['GTEST_TOTAL_SHARDS'] = str(self._num_shards)
env['GTEST_SHARD_INDEX'] = str(self._shard)
args = self._args + ['--test-server-shard=' + str(self._shard)]
self._test = subprocess.Popen(args=args,
executable=self._executable,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
def wait(self):
if subprocess.mswindows:
stdout_thread = threading.Thread(
target=StreamCopyWindows,
args=[self._test.stdout, sys.stdout])
stdout_thread.start()
code = self._test.wait()
stdout_thread.join()
return code
else:
child_exited = threading.Event()
stdout_thread = threading.Thread(
target=StreamCopyPosix,
args=[self._test.stdout, sys.stdout, child_exited])
stdout_thread.start()
code = self._test.wait()
child_exited.set()
stdout_thread.join()
return code
def main(argv):
parser = optparse.OptionParser()
parser.add_option("--shards", type="int", dest="shards", default=10)
# Make it possible to pass options to the launched process.
# Options for parallel_launcher should be first, then the binary path,
# and finally - optional arguments for the launched binary.
parser.disable_interspersed_args()
options, args = parser.parse_args(argv)
if not args:
print 'You must provide path to the test binary'
return 1
env = os.environ
if bool('GTEST_TOTAL_SHARDS' in env) != bool('GTEST_SHARD_INDEX' in env):
print 'Inconsistent environment. GTEST_TOTAL_SHARDS and GTEST_SHARD_INDEX'
print 'should either be both defined, or both undefined.'
return 1
launchers = []
for shard in range(options.shards):
launcher = TestLauncher(args, args[0], options.shards, shard)
launcher.launch()
launchers.append(launcher)
return_code = 0
for launcher in launchers:
if launcher.wait() != 0:
return_code = 1
return return_code
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 27.574194 | 78 | 0.656996 | 1,873 | 0.438231 | 0 | 0 | 0 | 0 | 0 | 0 | 1,315 | 0.307674 |
469e17c0eac34c546af54d506129856bf3802b70 | 83 | py | Python | 05_Practice1/Step06/yj.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
]
| null | null | null | 05_Practice1/Step06/yj.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
]
| 3 | 2020-11-04T05:38:53.000Z | 2021-03-02T02:15:19.000Z | 05_Practice1/Step06/yj.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
]
| null | null | null | a = int(input())
for i in range(a):
print('* '*(a-a//2))
print(' *'*(a//2)) | 20.75 | 24 | 0.433735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.096386 |
469f11caff52684e1427825fdf274cfe729dc55b | 231 | py | Python | greydot/errors.py | TralahM/greydot-api | 34982c34bf878b2728e934147ca4ea38a78523e4 | [
"MIT"
]
| null | null | null | greydot/errors.py | TralahM/greydot-api | 34982c34bf878b2728e934147ca4ea38a78523e4 | [
"MIT"
]
| null | null | null | greydot/errors.py | TralahM/greydot-api | 34982c34bf878b2728e934147ca4ea38a78523e4 | [
"MIT"
]
| null | null | null | class NoMessageRecipients(Exception):
"""
Raised when Message Recipients are not specified.
"""
pass
class InvalidAmount(Exception):
"""
Raised when an invalid currency amount is specified
"""
pass
| 19.25 | 55 | 0.666667 | 228 | 0.987013 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.571429 |
469f233747542475f293cc21f8824a73074353c6 | 7,176 | py | Python | ginga/util/dp.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| 76 | 2015-01-05T14:46:14.000Z | 2022-03-23T04:10:54.000Z | ginga/util/dp.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| 858 | 2015-01-17T01:55:12.000Z | 2022-03-08T20:20:31.000Z | ginga/util/dp.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| 60 | 2015-01-14T21:59:07.000Z | 2022-02-13T03:38:49.000Z | #
# dp.py -- Data pipeline and reduction routines
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
from collections import OrderedDict
from ginga import AstroImage, colors
from ginga.RGBImage import RGBImage
from ginga.util import wcs
# counter used to name anonymous images
prefixes = dict(dp=0)
def get_image_name(image, pfx='dp'):
global prefixes
name = image.get('name', None)
if name is None:
if pfx not in prefixes:
prefixes[pfx] = 0
name = '{0}{1:d}'.format(pfx, prefixes[pfx])
prefixes[pfx] += 1
image.set(name=name)
return name
def make_image(data_np, oldimage, header, pfx='dp'):
# Prepare a new image with the numpy array as data
image = AstroImage.AstroImage()
image.set_data(data_np)
# Set the header to be the old image header updated
# with items from the new header
oldhdr = oldimage.get_header()
oldhdr.update(header)
image.update_keywords(oldhdr)
# give the image a name
get_image_name(image, pfx=pfx)
return image
def create_blank_image(ra_deg, dec_deg, fov_deg, px_scale, rot_deg,
cdbase=[1, 1], dtype=None, logger=None, pfx='dp',
mmap_path=None, mmap_mode='w+'):
# ra and dec in traditional format
ra_txt = wcs.raDegToString(ra_deg, format='%02d:%02d:%06.3f')
dec_txt = wcs.decDegToString(dec_deg, format='%s%02d:%02d:%05.2f')
if np.isscalar(px_scale):
px_wd_scale, px_ht_scale = (px_scale, px_scale)
else:
px_wd_scale, px_ht_scale = px_scale
# Create an empty image
if np.isscalar(fov_deg):
fov_wd_deg, fov_ht_deg = (fov_deg, fov_deg)
else:
fov_wd_deg, fov_ht_deg = fov_deg
width = int(round(fov_wd_deg / px_wd_scale))
height = int(round(fov_ht_deg / px_ht_scale))
# round to an even size
if width % 2 != 0:
width += 1
if height % 2 != 0:
height += 1
if dtype is None:
dtype = np.float32
if mmap_path is None:
data = np.zeros((height, width), dtype=dtype)
else:
data = np.memmap(mmap_path, dtype=dtype, mode=mmap_mode,
shape=(height, width))
crpix1 = float(width // 2)
crpix2 = float(height // 2)
header = OrderedDict((('SIMPLE', True),
('BITPIX', -32),
('EXTEND', True),
('NAXIS', 2),
('NAXIS1', width),
('NAXIS2', height),
('RA', ra_txt),
('DEC', dec_txt),
('EQUINOX', 2000.0),
('OBJECT', 'MOSAIC'),
('LONPOLE', 180.0),
))
# Add basic WCS keywords
wcshdr = wcs.simple_wcs(crpix1, crpix2, ra_deg, dec_deg,
(px_wd_scale, px_ht_scale),
rot_deg, cdbase=cdbase)
header.update(wcshdr)
# Create image container
image = AstroImage.AstroImage(data, logger=logger)
image.update_keywords(header)
# give the image a name
get_image_name(image, pfx=pfx)
return image
def recycle_image(image, ra_deg, dec_deg, fov_deg, px_scale, rot_deg,
cdbase=[1, 1], logger=None, pfx='dp'):
# ra and dec in traditional format
ra_txt = wcs.raDegToString(ra_deg, format='%02d:%02d:%06.3f')
dec_txt = wcs.decDegToString(dec_deg, format='%s%02d:%02d:%05.2f')
header = image.get_header()
pointing = OrderedDict((('RA', ra_txt),
('DEC', dec_txt),
))
header.update(pointing)
# Update WCS keywords and internal wcs objects
wd, ht = image.get_size()
crpix1 = wd // 2
crpix2 = ht // 2
wcshdr = wcs.simple_wcs(crpix1, crpix2, ra_deg, dec_deg, px_scale,
rot_deg, cdbase=cdbase)
header.update(wcshdr)
# this should update the wcs
image.update_keywords(header)
# zero out data array
data = image.get_data()
data.fill(0)
## # Create new image container sharing same data
## new_image = AstroImage.AstroImage(data, logger=logger)
## new_image.update_keywords(header)
## # give the image a name
## get_image_name(new_image, pfx=pfx)
new_image = image
return new_image
def make_flat(imglist, bias=None):
flats = [image.get_data() for image in imglist]
flatarr = np.array(flats)
# Take the median of the individual frames
flat = np.median(flatarr, axis=0)
# Normalize flat
# mean or median?
#norm = np.mean(flat.flat)
norm = np.median(flat.flat)
flat = flat / norm
# no zero divisors
flat[flat == 0.0] = 1.0
img_flat = make_image(flat, imglist[0], {}, pfx='flat')
return img_flat
def make_bias(imglist):
biases = [image.get_data() for image in imglist]
biasarr = np.array(biases)
# Take the median of the individual frames
bias = np.median(biasarr, axis=0)
img_bias = make_image(bias, imglist[0], {}, pfx='bias')
return img_bias
def add(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np + data2_np
image = make_image(result, image1, {}, pfx='add')
return image
def subtract(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np - data2_np
image = make_image(result, image1, {}, pfx='sub')
return image
def divide(image1, image2):
data1_np = image1.get_data()
data2_np = image2.get_data()
result = data1_np / data2_np
image = make_image(result, image1, {}, pfx='div')
return image
# https://gist.github.com/stscieisenhamer/25bf6287c2c724cb9cc7
def masktorgb(mask, color='lightgreen', alpha=1.0):
"""Convert boolean mask to RGB image object for canvas overlay.
Parameters
----------
mask : ndarray
Boolean mask to overlay. 2D image only.
color : str
Color name accepted by Ginga.
alpha : float
Opacity. Unmasked data are always transparent.
Returns
-------
rgbobj : RGBImage
RGB image for canvas Image object.
Raises
------
ValueError
Invalid mask dimension.
"""
mask = np.asarray(mask)
if mask.ndim != 2:
raise ValueError('ndim={0} is not supported'.format(mask.ndim))
ht, wd = mask.shape
r, g, b = colors.lookup_color(color)
rgbobj = RGBImage(data_np=np.zeros((ht, wd, 4), dtype=np.uint8))
rc = rgbobj.get_slice('R')
gc = rgbobj.get_slice('G')
bc = rgbobj.get_slice('B')
ac = rgbobj.get_slice('A')
ac[:] = 0 # Transparent background
rc[mask] = int(r * 255)
gc[mask] = int(g * 255)
bc[mask] = int(b * 255)
ac[mask] = int(alpha * 255)
# For debugging
#rgbobj.save_as_file('ztmp_rgbobj.png')
return rgbobj
def split_n(lst, sz):
n = len(lst)
k, m = n // sz, n % sz
return [lst[i * k + min(i, m):(i + 1) * k + min(i + 1, m)]
for i in range(sz)]
# END
| 27.181818 | 72 | 0.593924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,890 | 0.263378 |
469f5e5ed924f088a814fc16a98e14d55994cdf9 | 3,294 | py | Python | jigsaw/datasets/datasets.py | alexvishnevskiy/jigsaw | 7fc2c4cd3700a54e9c5cbc02870bf4057b0a9fe3 | [
"MIT"
]
| null | null | null | jigsaw/datasets/datasets.py | alexvishnevskiy/jigsaw | 7fc2c4cd3700a54e9c5cbc02870bf4057b0a9fe3 | [
"MIT"
]
| null | null | null | jigsaw/datasets/datasets.py | alexvishnevskiy/jigsaw | 7fc2c4cd3700a54e9c5cbc02870bf4057b0a9fe3 | [
"MIT"
]
| null | null | null | from torch.utils.data import Dataset
from ..utils.optimal_lenght import find_optimal_lenght
class PairedDataset(Dataset):
def __init__(
self,
df,
cfg,
tokenizer,
more_toxic_col='more_toxic',
less_toxic_col='less_toxic'
):
self.df = df
self.cfg = cfg
self.tokenizer = tokenizer
self.more_toxic = df[more_toxic_col].values
self.less_toxic = df[less_toxic_col].values
self.more_toxic_max_lenght = find_optimal_lenght(
df, tokenizer, more_toxic_col, cfg.max_length
)
self.less_toxic_max_lenght = find_optimal_lenght(
df, tokenizer, less_toxic_col, cfg.max_length
)
def __len__(self):
return len(self.df)
def __getitem__(self, index):
more_toxic = self.more_toxic[index]
less_toxic = self.less_toxic[index]
inputs_more_toxic = self.tokenizer.encode_plus(
more_toxic,
truncation=True,
max_length=self.more_toxic_max_lenght,
add_special_tokens=True,
)
inputs_less_toxic = self.tokenizer.encode_plus(
less_toxic,
truncation=True,
max_length=self.less_toxic_max_lenght,
add_special_tokens=True,
)
target = 1
more_toxic_ids = inputs_more_toxic['input_ids']
more_toxic_mask = inputs_more_toxic['attention_mask']
less_toxic_ids = inputs_less_toxic['input_ids']
less_toxic_mask = inputs_less_toxic['attention_mask']
return {
'more_toxic_ids': more_toxic_ids,
'more_toxic_mask': more_toxic_mask,
'less_toxic_ids': less_toxic_ids,
'less_toxic_mask': less_toxic_mask,
'target': target
}
class RegressionDataset(Dataset):
def __init__(self, df, cfg, tokenizer, text_col, target_col = None):
self.df = df
self.cfg = cfg
self.tokenizer = tokenizer
self.X = df[text_col].values
self.target_col = target_col
self.max_lenght = find_optimal_lenght(
df, tokenizer, text_col, cfg.max_length
)
if target_col is not None:
self.y = df[target_col].values
def __len__(self):
return len(self.df)
def __getitem__(self, index):
text = self.X[index]
if self.target_col is not None:
target = self.y[index]
inputs = self.tokenizer.encode_plus(
text,
truncation=True,
max_length=self.max_lenght,
add_special_tokens=True,
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
if self.target_col is not None:
return {
'input_ids': ids,
'attention_mask': mask,
'target': target
}
else:
return {
'input_ids': ids,
'attention_mask': mask
} | 31.673077 | 70 | 0.532483 | 3,197 | 0.970553 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.073163 |
46a0c78276633a2a5a223df91b47b5f7924ae094 | 66 | py | Python | packaging/pack1/andrew_mod1.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
]
| 3 | 2019-05-04T12:19:09.000Z | 2019-08-30T07:12:31.000Z | packaging/pack1/build/lib/mymod1.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
]
| null | null | null | packaging/pack1/build/lib/mymod1.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
]
| null | null | null |
def something() -> None:
print("Andrew says: `something`.")
| 13.2 | 38 | 0.606061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.409091 |
46a1c447600050372f1c46ddc6ed6f7e8c87b183 | 117 | py | Python | app/api/v2/views/blacklist.py | MaggieChege/STORE-MANAGER-API-V2 | d8b2c7312304df627369721e8e1821cf724431d7 | [
"MIT"
]
| null | null | null | app/api/v2/views/blacklist.py | MaggieChege/STORE-MANAGER-API-V2 | d8b2c7312304df627369721e8e1821cf724431d7 | [
"MIT"
]
| null | null | null | app/api/v2/views/blacklist.py | MaggieChege/STORE-MANAGER-API-V2 | d8b2c7312304df627369721e8e1821cf724431d7 | [
"MIT"
]
| null | null | null | blacklist=set()
def get_blacklist():
return blacklist
def add_to_blacklist(jti):
return blacklist.add(jti)
| 14.625 | 29 | 0.735043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
46a2b9041cbdb5a67a2d23664c83d5328cdf8c09 | 260 | py | Python | apps/tasks/api/views.py | dayvidemerson/django-rest-example | 85eabb1e154cfd8ebc0019080b37cd3f1302c206 | [
"MIT"
]
| null | null | null | apps/tasks/api/views.py | dayvidemerson/django-rest-example | 85eabb1e154cfd8ebc0019080b37cd3f1302c206 | [
"MIT"
]
| null | null | null | apps/tasks/api/views.py | dayvidemerson/django-rest-example | 85eabb1e154cfd8ebc0019080b37cd3f1302c206 | [
"MIT"
]
| null | null | null | from rest_framework import viewsets
from rest_framework import generics
from ..models import Task
from .serializers import TaskSerializer
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
queryset = Task.objects.all()
| 21.666667 | 41 | 0.796154 | 113 | 0.434615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
46a39565f7364f7f1fb4b269da3328cdcc2b0021 | 97 | py | Python | jarvis/__init__.py | jduncan8142/JARVIS | 387003bc00cea2ca74d7094a92e55eab593a968a | [
"MIT"
]
| null | null | null | jarvis/__init__.py | jduncan8142/JARVIS | 387003bc00cea2ca74d7094a92e55eab593a968a | [
"MIT"
]
| null | null | null | jarvis/__init__.py | jduncan8142/JARVIS | 387003bc00cea2ca74d7094a92e55eab593a968a | [
"MIT"
]
| null | null | null | __version__ = "0.0.3"
__author__ = "Jason Duncan"
__support__ = "[email protected]"
| 24.25 | 46 | 0.742268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.546392 |
46a4d57f3c07a504c88eba6e3644cc933118a8c3 | 1,798 | py | Python | src/figcli/test/cli/action.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
]
| 36 | 2020-07-21T21:22:02.000Z | 2021-10-20T06:55:47.000Z | src/figcli/test/cli/action.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
]
| 2 | 2020-10-29T12:49:15.000Z | 2021-04-29T01:12:05.000Z | src/figcli/test/cli/action.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
]
| null | null | null | from typing import Union, List
import pexpect
from figcli.utils.utils import Utils
import sys
class FiggyAction:
"""
Actions prevent cyclic dependencies, and are designed for leveraging FiggyCli for cleanup steps when running inside
of tests.
"""
def __init__(self, command, extra_args=""):
self.c = Utils.default_colors()
self.command = command
self.extra_args = extra_args
self._child = self.spawn(command)
print(f"{self.c.fg_yl}Executing action: {self._child.args}{self.c.rs}")
self._child.logfile = sys.stdout
self._child.delaybeforesend = .5
def spawn(self, command: str):
return pexpect.spawn(command, timeout=10, encoding='utf-8')
def expect_multiple(self, regexes: List[str]):
print(f'Expecting: {regexes}')
return self._child.expect(regexes)
def expect(self, regex: Union[List[str], str], retry=True):
print(f'Expecting: {regex}')
expect_list = [regex] + [pexpect.TIMEOUT] if isinstance(regex, str) else regex + [pexpect.TIMEOUT]
result = self._child.expect(expect_list)
if result == len(expect_list) - 1 and retry:
self.alert(f"EXPECT FAILED: {regex} initiating retry!")
self._child = self.spawn(self.command)
return self.expect(regex, retry=False)
else:
return result
def sendline(self, line: str):
print(f'Sending: {line}')
self._child.sendline(line)
def wait(self):
self._child.wait()
def alert(self, msg: str):
print(f"{self.c.fg_yl}-----------------------------------------{self.c.rs}")
print(f"{self.c.fg_rd} ALERT: {msg}{self.c.rs}")
print(f"{self.c.fg_yl}-----------------------------------------{self.c.rs}") | 34.576923 | 119 | 0.60178 | 1,700 | 0.945495 | 0 | 0 | 0 | 0 | 0 | 0 | 502 | 0.279199 |
46a4f53ed5b4a611b18a262f155eca68d71783fb | 7,831 | py | Python | test/python/test_elementwise_ops.py | avijit-chakroborty/ngraph-bridge | b691d57412a40582ea93c6e564d80c750b7f2e8e | [
"Apache-2.0"
]
| 142 | 2019-02-21T00:53:06.000Z | 2022-03-11T07:46:28.000Z | test/python/test_elementwise_ops.py | tensorflow/ngraph | ea6422491ec75504e78a63db029e7f74ec3479a5 | [
"Apache-2.0"
]
| 252 | 2019-03-11T19:27:59.000Z | 2021-03-19T10:58:17.000Z | test/python/test_elementwise_ops.py | tensorflow/ngraph | ea6422491ec75504e78a63db029e7f74ec3479a5 | [
"Apache-2.0"
]
| 65 | 2019-03-13T15:27:29.000Z | 2021-07-16T07:09:16.000Z | # ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge elementwise operations test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
class TestElementwiseOperations(NgraphTest):
@pytest.mark.parametrize(("v1", "v2", "expected"),
((1.0, -1.0, [1.0]), (100, 200, ([200],)),
([0.0, 5.0, 10.0], [6.0],
(np.array([[6.0, 6.0, 10.0]]),))))
def test_maximum(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.maximum(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [True]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, False, True]]),))))
def test_less_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [True]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, False, True]]),))))
def test_less(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [False]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, True, False]]),))))
def test_greater_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [False]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, True, False]]),))))
def test_greater(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("v1", "v2", "expected"),
((True, True, [True]), (True, False, ([False],)),
(1.0, -2.0, ([True],)), (False, 100, ([False],)),
([False, True, False], [True],
(np.array([[False, True, False]]),))))
def test_logical_and(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.bool, shape=(None))
val2 = tf.compat.v1.placeholder(tf.bool, shape=(None))
out = tf.logical_and(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("test_input", "expected"), ((False, True),
(True, False)))
def test_logicalnot_1d(self, test_input, expected):
val = tf.compat.v1.placeholder(tf.bool, shape=(1,))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: (test_input,)})[
0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
def test_logicalnot_2d(self):
test_input = ((True, False, True), (False, True, False))
expected = np.logical_not(test_input)
val = tf.compat.v1.placeholder(tf.bool, shape=(2, 3))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: test_input})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
| 47.75 | 80 | 0.477461 | 6,772 | 0.864768 | 0 | 0 | 6,221 | 0.794407 | 0 | 0 | 933 | 0.119142 |
46a58d19b627254c0cc57fea12f1310b8d2e7c37 | 7,453 | py | Python | qc/slips.py | mfkiwl/UREGA-qc | 989e6b59d4fa5259ce48daa6165bdab4e020ba49 | [
"MIT"
]
| null | null | null | qc/slips.py | mfkiwl/UREGA-qc | 989e6b59d4fa5259ce48daa6165bdab4e020ba49 | [
"MIT"
]
| null | null | null | qc/slips.py | mfkiwl/UREGA-qc | 989e6b59d4fa5259ce48daa6165bdab4e020ba49 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Lars Stenseng.
@mail: [email protected]
"""
# from qc.__version__ import __version__
import georinex as gr
import numpy as np
from matplotlib.pyplot import figure, show
import matplotlib.pyplot as plt
obs = gr.load(
'tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx',
# tlim=['2021-11-03T12:00', '2021-11-03T12:30'])
tlim=['2021-11-03T05:30', '2021-11-03T07:30'])
# tlim=['2021-11-03T15:00', '2021-11-03T18:00'])
# hdr = gr.rinexheader(
# 'tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx')
# rnx_version = 3
# %% Starting test
# Copying helper functions from Multipath class - later on, it could be turned
# into a separate class with helper functions
# Pick GPS satellites
svG = []
for i in range(0, len(obs.sv)):
if str(obs.sv[i].values)[0] == 'G':
svG.append(str(obs.sv[i].values))
else:
continue
# %%
# 5:30 to 7:30, G08 and G21 give 2 cycle slips # [290:300]
# 'G01','G06','G08','G10','G12','G14','G17','G19','G21','G22','G24','G30','G32'
sat = 'G21'
sattest = obs.sel(sv=sat).dropna(dim='time', how='all')
# G02 data vars with no-nan: C1C, D1C, L1C, S1C, C1W, C2W, D2W, L2W, S1W, S2W
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
L1 = sattest['L1C'] # GPS
L2 = sattest['L2W'] # GPS
# L1 = sattest['L1C'] # Galileo
# L2 = sattest['L8Q'] # Galileo
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for i in range(1, len(L4)):
L4_diff.append(np.abs(L4[i] - L4[i-1]))
if (np.abs(L4[i] - L4[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L4_diff, label=sat)
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L4')
plt.title('Single-frequency Melbourne-Wuebbena')
show()
print('Slips:', slips_nr, ', Slip criterion:', criterion.values)
# %%
# Plot all loaded sats, L1 and L2
ax = figure(figsize=(10, 6)).gca()
for i in range(0, len(svG)):
test = obs.sel(sv=svG[i]).dropna(dim='time', how='all')
L1test = test['L1C']
L2test = test['L2W']
ax.plot(L1test.time, L1test, label=svG[i], linewidth=2.0)
#ax.plot(L2test.time, L2test, label='L2', linewidth=0.5)
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('Carrier phases')
show()
# %%
# Plot separate sats, L1 and L2
ax = figure(figsize=(10, 6)).gca()
test = obs.sel(sv='E21').dropna(dim='time', how='all')
L1test = test['L1C']
L2test = test['L2W']
ax.plot(L1test.time, L1test, label='L1', linewidth=2.0)
ax.plot(L2test.time, L2test, label='L2', linewidth=1.0)
ax.grid()
# ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('Carrier phases')
show()
# %% Dual-frequency Melbourne-Wuebbena testing
# 'G01','G06','G08','G10','G12','G14','G17','G19','G21','G22','G24','G30','G32'
sat = 'G21'
sattest = obs.sel(sv=sat).dropna(dim='time', how='all')
# G02 data vars with no-nan: C1C, D1C, L1C, S1C, C1W, C2W, D2W, L2W, S1W, S2W
freq = [1575.42, 1227.60, 1176.45] # L1, L2, L5 for GPS
f1 = freq[0]*1e6
f2 = freq[1]*1e6
P1 = sattest['C1C']
P2 = sattest['C2W']
L1 = sattest['L1C'] # GPS
L2 = sattest['L2W'] # GPS
# L1 = sattest['L1C'] # Galileo
# L2 = sattest['L8Q'] # Galileo
L6 = (1/(f1-f2))*(f1*L1 - f2*L2) - (1/(f1+f2))*(f1*P1 + f2*P2)
sigma_L6 = np.std(L6)
k = 4 # criterion factor
criterion = k*sigma_L6
slips_nr = 0
L6_diff = []
for i in range(1, len(L6)):
L6_diff.append(np.abs(L6[i] - L6[i-1]))
if (np.abs(L6[i] - L6[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L6_diff, label=sat)
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L6')
plt.title('Dual-frequency Melbourne-Wuebbena')
show()
print('Slips:', slips_nr, ', Slip criterion:', criterion.values)
# %% Work in Progress
class Slips:
"""
Class for cycle slip detection of RINEX files.
Provides options for different detection algorithms.
Parameters
----------
L1 : TYPE
DESCRIPTION.
Returns
-------
L4 : TYPE
DESCRIPTION.
"""
def __init__(self):
pass
def slips_MW_single_freq(self, obs):
"""
Cycle slip detection algorithm 1.
Based on Melbourne-Wuebbena,
but only on carrier phase data (single-frequency)
(from Vaclavovic-Dousa 2016 article)
Parameters
----------
obs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
# Select a list of GPS satellites
svG = []
for i in range(0, len(obs.sv)):
if str(obs.sv[i].values)[0] == 'G':
svG.append(str(obs.sv[i].values))
else:
continue
# Melbourne-Wuebbena parameters (predetermined)
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
# For each tracked satellite
for i in range(0, len(svG)):
current_sat = obs.sel(sv=svG[i]).dropna(dim='time', how='all')
L1 = current_sat['L1C']
L2 = current_sat['L2W']
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for j in range(1, len(L4)):
L4_diff.append(np.abs(L4[j] - L4[j-1]))
if (np.abs(L4[j] - L4[j-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
print('Sat:', svG[i],
', Slips:', slips_nr,
', Slip criterion:', criterion.values)
def plot_slips(self, obs, sat_nr: str):
"""
Plot cycle slips for one satellite vehicle.
Parameters
----------
obs : TYPE
DESCRIPTION.
sat_nr : str
DESCRIPTION.
Returns
-------
None.
"""
sat = obs.sel(sv=sat_nr).dropna(dim='time', how='all')
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
L1 = sat['L1C']
L2 = sat['L2W']
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for i in range(1, len(L4)):
L4_diff.append(np.abs(L4[i] - L4[i-1]))
if (np.abs(L4[i] - L4[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L4_diff, label=sat_nr, linewidth=1.0)
# labelfull = 'Slip limit: ', criterion.values
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L4')
show()
print('Sat:', sat_nr,
', Slips:', slips_nr,
', Slip criterion:', criterion.values)
# %% Testing first algorithm
sliptest = Slips().slips_MW_single_freq(obs)
# %% Testing plot function
sliptest = Slips().plot_slips(obs, 'G08')
| 25.611684 | 79 | 0.570643 | 3,202 | 0.429626 | 0 | 0 | 0 | 0 | 0 | 0 | 3,246 | 0.435529 |
46a68217514e2d9ae5f9e06cbba236282798ed2c | 8,610 | py | Python | tests/test_utils.py | jga/goldfinchsong | 638e166948944a7f027d03bcf8f7c14dc2f4b6f2 | [
"MIT"
]
| null | null | null | tests/test_utils.py | jga/goldfinchsong | 638e166948944a7f027d03bcf8f7c14dc2f4b6f2 | [
"MIT"
]
| null | null | null | tests/test_utils.py | jga/goldfinchsong | 638e166948944a7f027d03bcf8f7c14dc2f4b6f2 | [
"MIT"
]
| null | null | null | from collections import OrderedDict
from datetime import datetime, timezone
import unittest
from os.path import join
from tinydb import TinyDB, storages
from goldfinchsong import utils
IMAGE_NAMES = ['goldfinch1.jpg', 'goldfinch2.jpg', 'goldfinch3.jpg',
'goldfinch4.jpg', 'goldfinch5.jpg']
TEST_TEXT1 = 'This is a test of the goldfinchsong project. This test checks ' \
'abbreviations, vowel elision, length checking, and other logic. ' \
'Tests are important!'
TEST_TEXT2 = 'This is a test of the goldfinchsong project. Tests ' \
'abbreviations, vowel elision, length checking, and other logic. ' \
'Tests are important!'
class LoadContentTests(unittest.TestCase):
def test_basic_load(self):
image_directory = 'tests/images/'
db = TinyDB(storage=storages.MemoryStorage)
content = utils.load_content(db, image_directory)
full_image_path = content[0]
image_file = full_image_path.replace(image_directory, '')
status_text = content[1]
self.assertTrue(image_file in IMAGE_NAMES)
self.assertEqual(image_file.replace('.jpg', ''), status_text)
def test_storage_in_db(self):
image_directory = 'tests/images/'
# let's load a list of tweets into the db
db = TinyDB(storage=storages.MemoryStorage)
image_names = [
'goldfinch1.jpg',
'goldfinch2.jpg',
'goldfinch3.jpg',
'goldfinch4.jpg'
]
for image_name in image_names:
delivery_timestamp = datetime.now(tz=timezone.utc).isoformat()
tweet = {'image': image_name, 'delivered_on': delivery_timestamp}
db.insert(tweet)
content = utils.load_content(db, image_directory)
self.assertEqual(content[2], 'goldfinch5.jpg')
tweets = db.all()
self.assertEqual(len(tweets), 4, msg=tweets)
class UtilitiesTests(unittest.TestCase):
def test_apply_abbreviations(self):
text_conversions = {
'abbreviations': 'abbr',
'goldfinchsong': 'gf',
'important': 'impt'
}
# exhausts all conversions before reaching limit
new_text1 = utils.apply_abbreviations(TEST_TEXT1, text_conversions)
expected_text1 = 'This is a test of the gf project. This test checks ' \
'abbr, vowel elision, length checking, and other logic. ' \
'Tests are impt!'
self.assertEqual(expected_text1, new_text1)
new_text2 = utils.apply_abbreviations(TEST_TEXT2, text_conversions)
self.assertTrue(len(new_text2) <= 117)
def test_apply_vowel_elision(self):
result_text = utils.apply_vowel_elision(TEST_TEXT1)
expected_text = 'This is a tst of the gldfnchsng prjct. Ths tst chcks ' \
'abbrvtns, vwl elsn, lngth chckng, and othr lgc. Tsts ' \
'are imprtnt!'
self.assertEqual(expected_text, result_text)
def test_assemble_elided_status(self):
complete_words = ['test', 'a', 'is', 'This']
elided_words = ['systm', 'gldfnch', 'the', 'of']
result = utils.assemble_elided_status(complete_words, elided_words)
self.assertEqual('This is a test of the gldfnch systm', result)
def test_chop_words(self):
result_text = utils.chop_words(TEST_TEXT1)
expected_text = 'This is a test of the goldfinchsong project. This test checks ' \
'abbreviations, vowel elision, length checking, and'
self.assertEqual(expected_text, result_text)
def test_is_image(self):
image_files = [
'image.gif',
'image.jpg',
'image.jpeg',
'image.png',
'image.GIF',
'image.JPG',
'image.JPEG',
'image.PNG',
'image.GiF',
'image.JpG',
'image.JpEg',
'image.PnG'
]
for image_file in image_files:
self.assertTrue(utils.is_image_file(image_file))
def test_is_not_image(self):
image_files = [
'image.docx',
'image.pdf',
'image.md',
'image.html',
'image.css',
'image.odt',
'image.sh',
'image.xlsx',
'image.txt',
'image.c',
'image.py',
'image'
]
for image_file in image_files:
self.assertFalse(utils.is_image_file(image_file))
def test_trim_file_extensions(self):
image_files = [
'image.gif',
'image.jpg',
'image.jpeg',
'image.png',
'image.GIF',
'image.JPG',
'image.JPEG',
'image.PNG',
'image.GiF',
'image.JpG',
'image.JpEg',
'image.PnG'
]
for image_file in image_files:
self.assertEqual(utils.trim_file_extension(image_file), 'image')
def test_to_compact_text(self):
text_conversions = {
'abbreviations': 'abbrs',
'goldfinchsong': 'gfnch',
'important': 'importnt'
}
candidate_text1 = utils.to_compact_text(TEST_TEXT1, 100, text_conversions)
expected_text1 = 'Ths is a tst of the gfnch prjct. Ths tst chcks abbrs, ' \
'vwl elsn, lngth chckng, and othr lgc. Tsts are'
self.assertEqual(expected_text1, candidate_text1)
candidate_text2 = utils.to_compact_text(TEST_TEXT1, 50, text_conversions)
expected_text2 = 'Ths is a tst of the gfnch prjct. Ths tst chcks'
self.assertEqual(expected_text2, candidate_text2)
candidate_text3 = utils.to_compact_text(TEST_TEXT1, 20, text_conversions)
expected_text3 = 'Ths is a tst of the'
self.assertEqual(expected_text3, candidate_text3)
def test_extract_status_text(self):
conversion_data = (
('abbreviations', 'abbrs'),
('goldfinchsong', 'gfnch'),
('important', 'importnt'),
)
text_conversions = OrderedDict(conversion_data)
file = 'Some_goldfinchsong_image-file_with_a_very_long_set_of_' \
'characters_and_abbreviations_that_conveys_important_info.png'
candidate_text1 = utils.extract_status_text(file, text_conversions, maximum_length=100,)
expected_text1 = 'Some gfnch image-file with a very long set of characters and abbrs that conveys important info'
self.assertEqual(expected_text1, candidate_text1)
candidate_text2 = utils.extract_status_text(file, text_conversions, maximum_length=70,)
expected_text2 = 'Sme gfnch imge-fle wth a vry lng st of chrctrs and abbrs tht cnvys'
self.assertEqual(expected_text2, candidate_text2)
def test_get_unused_files(self):
available_files = list()
for index in range(1,101):
image_name = 'image{0}.png'.format(index)
available_files.append(image_name)
db = TinyDB(storage=storages.MemoryStorage)
for id in range(1,52):
image_name = 'image{0}.png'.format(id)
db.insert({'image': image_name})
unused_files = utils.get_unused_files(db, available_files)
self.assertEqual(len(unused_files), 49)
self.assertEqual(unused_files[0], 'image52.png')
self.assertEqual(unused_files[5], 'image57.png')
self.assertEqual(unused_files[10], 'image62.png')
self.assertEqual(unused_files[15], 'image67.png')
self.assertEqual(unused_files[20], 'image72.png')
self.assertEqual(unused_files[33], 'image85.png')
self.assertEqual(unused_files[48], 'image100.png')
def test_db_purge_when_all_posted(self):
available_files = list()
for index in range(1,101):
image_name = 'image{0}.png'.format(index)
available_files.append(image_name)
db = TinyDB(storage=storages.MemoryStorage)
for id in range(1,106):
image_name = 'image{0}.png'.format(id)
db.insert({'image': image_name})
self.assertEqual(len(db.all()), 105)
unused_files = utils.get_unused_files(db, available_files)
self.assertEqual(len(unused_files), 100)
self.assertEqual(unused_files[0], 'image1.png')
self.assertEqual(unused_files[5], 'image6.png')
self.assertEqual(unused_files[10], 'image11.png')
self.assertEqual(unused_files[33], 'image34.png')
self.assertEqual(unused_files[50], 'image51.png')
| 40.046512 | 121 | 0.616609 | 7,911 | 0.918815 | 0 | 0 | 0 | 0 | 0 | 0 | 2,321 | 0.26957 |
46a6977e7b919a1d64a9944a2e191bffb62c293c | 2,868 | py | Python | lingvo/tasks/image/input_generator.py | allenwang28/lingvo | 26d3d6672d3f46d8f281c2aa9f57166ef6296738 | [
"Apache-2.0"
]
| 2,611 | 2018-10-16T20:14:10.000Z | 2022-03-31T14:48:41.000Z | lingvo/tasks/image/input_generator.py | allenwang28/lingvo | 26d3d6672d3f46d8f281c2aa9f57166ef6296738 | [
"Apache-2.0"
]
| 249 | 2018-10-27T06:02:29.000Z | 2022-03-30T18:00:39.000Z | lingvo/tasks/image/input_generator.py | allenwang28/lingvo | 26d3d6672d3f46d8f281c2aa9f57166ef6296738 | [
"Apache-2.0"
]
| 436 | 2018-10-25T05:31:45.000Z | 2022-03-31T07:26:03.000Z | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input generator for image data."""
import os
import lingvo.compat as tf
from lingvo.core import base_input_generator
from tensorflow.python.ops import io_ops
class _MnistInputBase(base_input_generator.BaseTinyDatasetInput):
"""Base input params for MNIST."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data_dtype = tf.uint8
p.data_shape = (28, 28, 1)
p.label_dtype = tf.uint8
return p
def _Preprocess(self, raw):
data = tf.stack([
tf.image.per_image_standardization(img) for img in tf.unstack(raw)
])
data.set_shape(raw.shape)
return data
class MnistTrainInput(_MnistInputBase):
"""MNist training set."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data = 'x_train'
p.label = 'y_train'
p.num_samples = 60000
p.batch_size = 256
p.repeat = True
return p
class MnistTestInput(_MnistInputBase):
"""MNist test set."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data = 'x_test'
p.label = 'y_test'
p.num_samples = 10000
p.batch_size = 256
p.repeat = False
return p
def _GetRandomImages(batch_size):
images = tf.random.uniform((batch_size, 28, 28, 1), 0, 255, tf.int32)
return tf.cast(images, tf.uint8)
def _GetRandomLabels(batch_size):
labels = tf.random.categorical(0.1 * tf.ones((1, 10)), batch_size)
return tf.cast(labels, tf.uint8)
def FakeMnistData(tmpdir, train_size=60000, test_size=10000):
"""Fake Mnist data for unit tests."""
data_path = os.path.join(tmpdir, 'ckpt')
with tf.Graph().as_default():
tf.random.set_seed(91)
with tf.Session() as sess:
sess.run(
io_ops.save_v2(
data_path,
tensor_names=['x_train', 'y_train', 'x_test', 'y_test'],
shape_and_slices=['', '', '', ''],
tensors=[
_GetRandomImages(train_size),
_GetRandomLabels(train_size),
_GetRandomImages(test_size),
_GetRandomLabels(test_size)
]))
return data_path
| 27.84466 | 80 | 0.639121 | 1,031 | 0.359484 | 0 | 0 | 607 | 0.211646 | 0 | 0 | 995 | 0.346932 |
46a69a58e97c1c80acfb7499d4de7c5a7c1ed4bb | 875 | py | Python | src/solutions/part1/q389_find_diff.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
]
| null | null | null | src/solutions/part1/q389_find_diff.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
]
| null | null | null | src/solutions/part1/q389_find_diff.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
]
| null | null | null | from src.base.solution import Solution
from src.tests.part1.q389_test_find_diff import FindDiffTestCases
class FindDiff(Solution):
def verify_output(self, test_output, output):
return test_output[0] == output[0]
def run_test(self, input):
return self.findTheDifference(input[0], input[1])
def gen_test_cases(self):
return FindDiffTestCases()
def print_output(self, output):
super(FindDiff, self).print_output(output)
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
lkp = dict()
for ch in s:
lkp[ch] = lkp.get(ch, 0) + 1
for ch in t:
lkp[ch] = lkp.get(ch, 0) - 1
if lkp[ch] < 0:
return ch
if __name__ == '__main__':
solution = FindDiff()
solution.run_tests() | 24.305556 | 65 | 0.580571 | 689 | 0.787429 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.099429 |
46aa55bc676b909ffd23d501d1007af51f171f16 | 293 | py | Python | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
]
| null | null | null | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
]
| null | null | null | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
]
| null | null | null | #unit
#mydict.py
class Dict(dict):
def __init__(self,**kw):
super(Dict,self).__init__(**kw)
def __getattr__(self,key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object han no attribute'%s'" %key)
def __setattr__(self,key,value):
self[key]=value
| 19.533333 | 67 | 0.68942 | 271 | 0.924915 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.177474 |
46ab99e6eb8d1fdb04330410131bcc8d1d609369 | 19,623 | py | Python | copy_annotations/conflict.py | abhinav-kumar-thakur/TabularCellTypeClassification | 844029ee59867c41acfa75c8ce12db6713c9960c | [
"MIT"
]
| 19 | 2019-06-06T18:23:29.000Z | 2022-01-06T15:30:20.000Z | copy_annotations/conflict.py | abhinav-kumar-thakur/TabularCellTypeClassification | 844029ee59867c41acfa75c8ce12db6713c9960c | [
"MIT"
]
| 524 | 2019-07-01T19:18:39.000Z | 2022-02-13T19:33:02.000Z | copy_annotations/conflict.py | abhinav-kumar-thakur/TabularCellTypeClassification | 844029ee59867c41acfa75c8ce12db6713c9960c | [
"MIT"
]
| 18 | 2019-06-06T18:23:07.000Z | 2021-07-15T06:01:17.000Z | import contextlib
import os
import tempfile
import warnings
from enum import Enum
import mip
class IISFinderAlgorithm(Enum):
DELETION_FILTER = 1
ADDITIVE_ALGORITHM = 2
class SubRelaxationInfeasible(Exception):
pass
class NonRelaxableModel(Exception):
pass
class ConflictFinder:
"""This class groups some IIS (Irreducible Infeasible Set) search algorithms"""
def __init__(self, model: mip.Model):
if model.status == mip.OptimizationStatus.LOADED:
print("model not runned yet, checking if feasible or not")
model.emphasis = 1 # feasibility
model.preprocess = 1 # -1 automatic, 0 off, 1 on.
model.optimize()
assert (
model.status == mip.OptimizationStatus.INFEASIBLE
), "model is not linear infeasible"
self.model = model
def find_iis(
self, method: IISFinderAlgorithm = IISFinderAlgorithm.DELETION_FILTER,
cbc_verbose: bool = False
) -> mip.ConstrList:
"""main method to find an IIS, this method is just a grouping of the other implementations
Args:
model (mip.Model): Infeasible model where to find the IIS
method (str, optional): name of the method to use ["deletion-filter", "additive_algorithm"]. Defaults to 'deletion-filter".
Returns:
mip.ConstrList: IIS constraint list
"""
# assert ,is not because time limit
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
if method == IISFinderAlgorithm.DELETION_FILTER:
return self.deletion_filter()
if method == IISFinderAlgorithm.ADDITIVE_ALGORITHM:
return self.additive_algorithm()
def deletion_filter(self) -> mip.ConstrList:
"""deletion filter algorithm for search an IIS
Args:
model (mip.Model): Infeasible model
Returns:
mip.ConstrList: IIS
"""
# 1. create a model with all constraints but one
aux_model = self.model.copy()
aux_model.objective = 1
aux_model.emphasis = 1 # feasibility
aux_model.preprocess = 1 # -1 automatic, 0 off, 1 on.
print("starting deletion_filter algorithm")
for inc_crt in self.model.constrs:
aux_model_inc_crt = aux_model.constr_by_name(
inc_crt.name
) # find constraint by name
aux_model.remove(aux_model_inc_crt) # temporally remove inc_crt
aux_model.optimize()
status = aux_model.status
# 2. test feasibility, if feasible, return dropped constraint to the set
# 2.1 else removed it permanently
# print('status {}'.format(status))
if status == mip.OptimizationStatus.INFEASIBLE:
# print("removing permanently {}".format(inc_crt.name))
continue
elif status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
aux_model.add_constr(
inc_crt.expr, name=inc_crt.name, priority=inc_crt.priority
)
iis = aux_model.constrs
return iis
def additive_algorithm(self) -> mip.ConstrList:
"""Additive algorithm to find an IIS
Returns:
mip.ConstrList: IIS
"""
# Create some aux models to test feasibility of the set of constraints
aux_model_testing = mip.Model()
for var in self.model.vars:
aux_model_testing.add_var(
name=var.name,
lb=var.lb,
ub=var.ub,
var_type=var.var_type,
# obj= var.obj,
# column=var.column #!! libc++abi.dylib: terminating with uncaught exception of type CoinError
)
aux_model_testing.objective = 1
aux_model_testing.emphasis = 1 # feasibility
aux_model_testing.preprocess = 1 # -1 automatic, 0 off, 1 on.
aux_model_iis = (
aux_model_testing.copy()
) # a second aux model to test feasibility of the incumbent iis
# algorithm start
all_constraints = self.model.constrs
testing_crt_set = mip.ConstrList(model=aux_model_testing) # T
iis = mip.ConstrList(model=aux_model_iis) # I
while True:
for crt in all_constraints:
testing_crt_set.add(crt.expr, name=crt.name)
aux_model_testing.constrs = testing_crt_set
aux_model_testing.optimize()
if aux_model_testing.status == mip.OptimizationStatus.INFEASIBLE:
iis.add(crt.expr, name=crt.name)
aux_model_iis.constrs = iis
aux_model_iis.optimize()
if aux_model_iis.status == mip.OptimizationStatus.INFEASIBLE:
return iis
elif aux_model_iis.status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
testing_crt_set = mip.ConstrList(model=aux_model_testing)
for (
crt
) in (
iis
): # basically this loop is for set T=I // aux_model_iis = iis.copy()
testing_crt_set.add(crt.expr, name=crt.name)
break
def deletion_filter_milp_ir_lc_bd(self) -> mip.ConstrList:
"""Integer deletion filter algorithm (milp_ir_lc_bd)
Raises:
NotImplementedError: [description]
Returns:
mip.ConstrList: [description]
"""
raise NotImplementedError("WIP")
# major constraint sets definition
t_aux_model = mip.Model(name="t_auxiliary_model")
iis_aux_model = mip.Model(name="t_auxiliary_model")
linear_constraints = mip.ConstrList(
model=t_aux_model
) # all the linear model constraints
variable_bound_constraints = mip.ConstrList(
model=t_aux_model
) # all the linear model constrants related specifically for the variable bounds
integer_varlist_crt = mip.VarList(
model=t_aux_model
) # the nature vars constraints for vartype in Integer/Binary
# fill the above sets with the constraints
for crt in self.model.constrs:
linear_constraints.add(crt.expr, name=crt.name)
for var in self.model.vars:
if var.lb != -mip.INF:
variable_bound_constraints.add(
var >= var.lb, name="{}_lb_crt".format(var.name)
)
if var.ub != mip.INF:
variable_bound_constraints.add(
var <= var.ub, name="{}_ub_crt".format(var.name)
)
for var in self.model.vars:
if var.var_type in (mip.INTEGER, mip.BINARY):
integer_varlist_crt.add(var)
status = "IIS"
# add all LC,BD to the incumbent, T= LC + BD
for (
var
) in (
self.model.vars
): # add all variables as if they where CONTINUOUS and without bonds (because this will be separated)
iis_aux_model.add_var(
name=var.name, lb=-mip.INF, ub=mip.INF, var_type=mip.CONTINUOUS
)
for crt in linear_constraints + variable_bound_constraints:
iis_aux_model.add_constr(crt.expr, name=crt.name, priority=crt.priority)
iis_aux_model.optimize()
if iis_aux_model.status == mip.OptimizationStatus.INFEASIBLE:
# if infeasible means that this is a particular version of an LP
return self.deletion_filter() # (STEP 2)
# add all the integer constraints to the model
iis_aux_model.vars.remove(
[var for var in integer_varlist_crt]
) # remove all integer variables
for var in integer_varlist_crt:
iis_aux_model.add_var(
name=var.name,
lb=-mip.INF,
ub=mip.INF,
var_type=var.var_type, # this will add the var with his original type
)
# filter IR constraints that create infeasibility (STEP 1)
for var in integer_varlist_crt:
iis_aux_model.vars.remove(iis_aux_model.var_by_name(var.name))
iis_aux_model.add_var(
name=var.name,
lb=-mip.INF,
ub=mip.INF,
var_type=mip.CONTINUOUS, # relax the integer constraint over var
)
iis_aux_model.optimize()
# if infeasible then update incumbent T = T-{ir_var_crt}
# else continue
# STEP 2 filter lc constraints
# STEP 3 filter BD constraints
# return IS o IIS
def deletion_filter_milp_lc_ir_bd(self) -> mip.ConstrList:
raise NotImplementedError # TODO
class ConflictRelaxer:
def __init__(self, model: mip.Model):
if model.status == mip.OptimizationStatus.LOADED:
print("model not runned yet, checking if feasible or not")
model.emphasis = 1 # feasibility
model.preprocess = 1 # -1 automatic, 0 off, 1 on.
model.optimize()
assert (
model.status == mip.OptimizationStatus.INFEASIBLE
), "model is not linear infeasible"
self.model = model
self.iis_num_iterations = 0
self.iis_iterations = []
self.relax_slack_iterations = []
@property
def slack_by_crt(self) -> dict:
answ = {}
for slack_dict_iter in self.relax_slack_iterations:
for crt_name in slack_dict_iter.keys():
if crt_name in answ.keys():
answ[crt_name] += slack_dict_iter[crt_name]
else:
answ[crt_name] = slack_dict_iter[crt_name]
return answ
def hierarchy_relaxer(
self,
relaxer_objective: str = "min_abs_slack_val",
default_priority: mip.constants.ConstraintPriority = mip.constants.ConstraintPriority.MANDATORY,
cbc_verbose: bool = False
) -> mip.Model:
"""hierarchy relaxer algorithm, it's gonna find a IIS and then relax it using the objective function defined (`relaxer_objective`) and then update the model
with the relaxed constraints. This process runs until there's not more IIS on the model.
Args:
relaxer_objective (str, optional): objective function of the relaxer model (IIS relaxer model). Defaults to 'min_abs_slack_val'.
default_priority (ConstraintPriority, optional): If a constraint does not have a supported substring priority in the name, it will assign a default priority.
Defaults to ConstraintPriority.MANDATORY.
Raises:
NonRelaxableModel: [description]
Returns:
mip.Model: relaxed model
"""
relaxed_model = self.model.copy()
relaxed_model._status = self.model._status # TODO solve this in a different way
# map unmaped constraitns to default
for crt in relaxed_model.constrs:
if not crt.priority:
crt.priority = default_priority
iis_it = 0
iis_dict = {}
taboo_list_iis = []
cf = ConflictFinder(relaxed_model)
while True:
# 1. find iis
iis = cf.find_iis(IISFinderAlgorithm.DELETION_FILTER)
self.iis_iterations.append([crt.name for crt in iis]) # track iteration
self.iis_num_iterations += 1 # track iteration
iis_priority_set = set([crt.priority for crt in iis])
# check if "relaxable" model mapping
if iis_priority_set == set([mip.constants.ConstraintPriority.MANDATORY]):
raise NonRelaxableModel("Infeasible model, is not possible to relax MANDATORY constraints")
# 2. relax iis
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
for level, relaxing_level in enumerate(sorted(iis_priority_set, key=lambda x: x.value)):
# highest case (raise exception)
if relaxing_level == mip.constants.ConstraintPriority.MANDATORY:
raise NonRelaxableModel("Infeasible model, is not possible to relax MANDATORY constraints")
try:
slack_dict = self.relax_iis(iis, relaxer_objective=relaxer_objective, lowest_priority=relaxing_level)
except SubRelaxationInfeasible as e:
warnings.warn(f'Warning relaxing more than one level, currently on l{level} : {relaxing_level}')
continue
else:
# relaxable iis, this is will continue with the next iteration then
break
self.relax_slack_iterations.append(slack_dict)
# 3. add the slack variables to the original problem
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
relaxed_model = self.relax_constraints(relaxed_model, slack_dict)
# 4. check if feasible
relaxed_model.emphasis = 1 # feasibility
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
relaxed_model.optimize()
if relaxed_model.status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
print("finished relaxation process !")
break
else:
print(
"relaxed the current IIS, still infeasible, searching for a new IIS to relax"
)
print("relaxed constraints {0}".format(list(slack_dict.keys())))
iis_it += 1
# print(f'found iis_{iis_it} = {[crt.name for crt in iis]}')
iis_dict[iis_it] = {}
iis_crt = [crt.name for crt in iis]
iis_dict[iis_it]['iis'] = [{'name': crt.name, 'priority': str(crt.priority).split('.')[1]} for crt in iis]
print(f'found iis_{iis_it} : len = {len(iis_crt)} in_taboo = {(iis_crt in taboo_list_iis)}')
taboo_list_iis.append(iis_crt)
iis_dict[iis_it]['slack'] = slack_dict
return relaxed_model
@classmethod
def relax_iis(
cls, iis: mip.ConstrList, relaxer_objective: str = "min_abs_slack_val", lowest_priority: 'mip.constants.ConstraintPriority' = None
) -> dict:
"""This function is the sub module that finds the optimum relaxation for an IIS, given a crt priority mapping and a objective function
Args:
iis (mip.ConstrList): IIS constraint list
relaxer_objective (str, optional): objective function to use when relaxing. Defaults to 'min_abs_slack_val'.
Returns:
dict: a slack variable dictionary with the value of the {constraint_name:slack.value} pair to be added to each constraint in order to make the IIS feasible
"""
relax_iis_model = mip.Model()
if lowest_priority is None:
lowest_priority = min([crt.priority for crt in iis])
to_relax_crts = [crt for crt in iis if crt.priority == lowest_priority or crt.priority < lowest_priority]
# create a model that only contains the iis
slack_vars = {}
abs_slack_vars = {}
abs_slack_cod_vars = {}
for crt in iis:
# print(crt.name, crt.priority)
for var in crt._Constr__model.vars:
relax_iis_model.add_var(
name=var.name,
lb=var.lb,
ub=var.ub,
var_type=var.var_type,
obj=var.obj,
)
if crt in to_relax_crts:
# if this is a -to be relax- constraint
slack_vars[crt.name] = relax_iis_model.add_var(
name="{0}__{1}".format(crt.name, "slack"),
lb=-mip.INF,
ub=mip.INF,
var_type=mip.CONTINUOUS,
)
abs_slack_vars[crt.name] = relax_iis_model.add_var(
name="{0}_abs".format(slack_vars[crt.name].name),
lb=0,
ub=mip.INF,
var_type=mip.CONTINUOUS,
)
# add relaxed constraint to model
relax_expr = crt.expr + slack_vars[crt.name]
relax_iis_model.add_constr(
relax_expr,
name="{}_relaxed".format(crt.name),
)
# add abs(slack) variable encoding constraints
relax_iis_model.add_constr(
abs_slack_vars[crt.name] >= slack_vars[crt.name],
name="{}_positive_min_bound".format(slack_vars[crt.name].name),
)
relax_iis_model.add_constr(
abs_slack_vars[crt.name] >= -slack_vars[crt.name],
name="{}_negative_min_bound".format(slack_vars[crt.name].name),
)
else:
# if not to be relaxed we added directly to the model
relax_iis_model.add_constr(
crt.expr, name="{}_original".format(crt.name), priority=crt.priority
)
# find the min abs value of the slack variables
relax_iis_model.objective = mip.xsum(list(abs_slack_vars.values()))
relax_iis_model.sense = mip.MINIMIZE
relax_iis_model.optimize()
if relax_iis_model.status == mip.OptimizationStatus.INFEASIBLE:
raise SubRelaxationInfeasible("sub relaxation model infeasible, this could mean that in the IIS the mandatory constraints are infeasible sometimes")
slack_dict = {}
for crt in to_relax_crts:
slack_dict[crt.name] = slack_vars[crt.name].x
return slack_dict
@classmethod
def relax_constraints(cls, relaxed_model: mip.Model, slack_dict: dict) -> mip.Model:
"""this method creates a modification of the model `relaxed_model` where all the constraints in the slack_dict are
modified in order to add the slack values to make the IIS disappear
Args:
relaxed_model (mip.Model): model to relax
slack_dict (dict): pairs {constraint_name: slack_var.value}
Returns:
mip.Model: a modification of the original model where all the constraints are modified with the slack values
"""
for crt_name in slack_dict.keys():
crt_original = relaxed_model.constr_by_name(crt_name)
relax_expr = crt_original.expr + slack_dict[crt_name]
relaxed_model.add_constr(
relax_expr, name=crt_original.name, priority=crt_original.priority
)
relaxed_model.remove(crt_original) # remove constraint
return relaxed_model
@contextlib.contextmanager
def ignore_output():
with tempfile.TemporaryFile() as f:
orig_std_out = os.dup(1)
os.dup2(f.fileno(), 1)
yield # pause the coroutine to execute the with code
os.dup2(orig_std_out, 1)
os.close(orig_std_out) | 42.109442 | 169 | 0.587983 | 19,235 | 0.980227 | 250 | 0.01274 | 5,318 | 0.271009 | 0 | 0 | 6,193 | 0.315599 |
46abd7d33dffc8675b1cbcb1f61d7140668df589 | 249 | py | Python | output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_pattern_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_pattern_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_pattern_1_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| null | null | null | from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_pattern_1_xsd.nistschema_sv_iv_atomic_integer_pattern_1 import NistschemaSvIvAtomicIntegerPattern1
__all__ = [
"NistschemaSvIvAtomicIntegerPattern1",
]
| 41.5 | 190 | 0.891566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.148594 |
46ad0929668e5c287bd02c9734950a08fc91328e | 3,919 | py | Python | Dietscheduler/lib/menu_converter.py | floromaer/DietScheduler | 48403f13dbcaa7981f252361819f06435a75333b | [
"MIT"
]
| null | null | null | Dietscheduler/lib/menu_converter.py | floromaer/DietScheduler | 48403f13dbcaa7981f252361819f06435a75333b | [
"MIT"
]
| null | null | null | Dietscheduler/lib/menu_converter.py | floromaer/DietScheduler | 48403f13dbcaa7981f252361819f06435a75333b | [
"MIT"
]
| null | null | null | import re
import xlsxwriter
def parse_menu_to_excel(filename,menu_dict,days_dict,results,goal_dict,food_database,reversed_ingredient_dict,grocery_dict):
# making a temporary dict to map dates and columns in excel:
temp_dates_dict = {}
i=0
for key in days_dict.keys():
temp_dates_dict[days_dict[key]['date_var'].get()] = i
i += 1
temp_meals_dict = {}
i = 0
for meal in ['Breakfast', 'Lunch','Dinner']:
temp_meals_dict[meal] = i
i += 1
# converting the menu-dict to dates and lunches
for item in list(menu_dict.keys()):
new_key = tuple(tuple(elem.replace('M1', 'Breakfast').replace('M2', 'Lunch').replace('M3', 'Dinner').replace('D1', days_dict['D1']['date_var'].get()).replace('D2',days_dict['D2']['date_var'].get()).replace('D3',days_dict['D3']['date_var'].get()).replace('D4',days_dict['D4']['date_var'].get()).replace('D5',days_dict['D5']['date_var'].get()).replace('D6',days_dict['D6']['date_var'].get()).replace('D7',days_dict['D7']['date_var'].get())
for elem in tup) for tup in item)
menu_dict[new_key] = menu_dict[item]
menu_dict.pop(item)
# putting it into an excel file:
workbook = xlsxwriter.Workbook(filename)
separator_format = workbook.add_format({'bg_color': '#000000'})
# make worksheets
menu_worksheet = workbook.add_worksheet(f"Menu - {days_dict['D1']['date_var'].get()} to {days_dict['D7']['date_var'].get()}") # for menu
temp_worksheet_dict = {}
global_groceries_worksheet = workbook.add_worksheet("your grocery list")
for group in list(menu_dict.keys()):
temp_worksheet_dict[group] = workbook.add_worksheet(f"{list(menu_dict[group].keys())[0][:31]}")
# print the menu to menu-sheet
col = 0
for key in temp_dates_dict:
menu_worksheet.write(0, col, key)
col += 1
row = 1
for key in temp_meals_dict:
menu_worksheet.write(row, 0, key)
row += 1
for group in menu_dict.keys():
for slot in group:
menu_worksheet.write(temp_meals_dict[slot[1]] + 1,temp_dates_dict[slot[0]] + 1, str(list(menu_dict[group].keys())[0]))
for i in range(0,8):
menu_worksheet.write(4,i,"",separator_format)
menu_worksheet.write(5,0, "Results:")
row = 5
for metric in results.keys():
menu_worksheet.write(row,1,str(f"{metric}: {round(results[metric],2)}"))
row += 1
menu_worksheet.write(5,2, "Goals:")
row = 6
for metric in goal_dict.keys():
menu_worksheet.write(row,3,str(f"{metric}: {round(goal_dict[metric],2)}"))
row += 1
# writing the global grocery-list:
row = 1
col = 0
global_groceries_worksheet.write(0,0,"Your grocery list:")
for ingredient in grocery_dict.keys():
ingredient_id = reversed_ingredient_dict[ingredient]
global_groceries_worksheet.write(row, col, ingredient)
global_groceries_worksheet.write(row, col + 1, str(grocery_dict[ingredient]))
global_groceries_worksheet.write(row, col + 2, str(food_database['ingredients'][ingredient_id]['unit']))
row += 1
# writing the recipe-lists:
for group in menu_dict.keys():
temp_worksheet_dict[group].write(0,0, f"Ingredient list for {list(menu_dict[group].keys())[0]}:")
row = 1
col = 0
for recipe in menu_dict[group].keys():
for ingredient in menu_dict[group][recipe].keys():
ingredient_id = reversed_ingredient_dict[ingredient]
temp_worksheet_dict[group].write(row, col, ingredient)
temp_worksheet_dict[group].write(row, col + 1, str(menu_dict[group][recipe][ingredient]))
temp_worksheet_dict[group].write(row, col + 2, str(food_database['ingredients'][ingredient_id]['unit']))
row += 1
workbook.close()
| 48.382716 | 446 | 0.63307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 845 | 0.215616 |
46ad5a9032022b2051fa16df11181281b5f9eae8 | 1,256 | py | Python | example_problems/tutorial/graph_connectivity/services/esempi/check_one_sol_server.py | romeorizzi/TAlight | 2217f8790820d8ec7ab076c836b2d182877d8ee8 | [
"MIT"
]
| 3 | 2021-01-08T09:56:46.000Z | 2021-03-02T20:47:29.000Z | example_problems/tutorial/graph_connectivity/services/esempi/check_one_sol_server.py | romeorizzi/TAlight | 2217f8790820d8ec7ab076c836b2d182877d8ee8 | [
"MIT"
]
| 1 | 2021-01-23T06:50:31.000Z | 2021-03-17T15:35:18.000Z | example_problems/tutorial/graph_connectivity/services/esempi/check_one_sol_server.py | romeorizzi/TAlight | 2217f8790820d8ec7ab076c836b2d182877d8ee8 | [
"MIT"
]
| 4 | 2021-01-06T12:10:23.000Z | 2021-03-16T22:16:07.000Z | #!/usr/bin/env python3
from sys import stderr, exit
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
from parentheses_lib import recognize
# METADATA OF THIS TAL_SERVICE:
problem="parentheses"
service="check_one_sol_server"
args_list = [
('input_formula',str),
('n',str),
('silent',bool),
('lang',str),
('ISATTY',bool),
]
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
n=ENV['n']
len_input = len(ENV["input_formula"])//2
if not ENV["silent"]:
TAc.print(LANG.opening_msg, "green")
def answer():
if recognize(ENV["input_formula"], TAc, LANG) and not ENV["silent"]:
TAc.OK()
TAc.print(LANG.render_feedback("ok", f' Your string is a well-formed formula with {len_input} pairs of parentheses.'), "yellow", ["bold"])
if n=='free':
answer()
else:
if len_input==int(n):
answer()
elif recognize(ENV["input_formula"], TAc, LANG) and not ENV['silent']:
TAc.print(LANG.render_feedback("different_lengths", f"No! Your string represents a valid formula of parentheses but not of {n} pairs."), "red", ["bold"])
exit(0)
| 29.904762 | 162 | 0.65207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 469 | 0.373408 |
46ad743d904b177a6882bac14c8a5ed867753ee6 | 2,436 | py | Python | app/validation/translator.py | codingedward/book-a-meal-api | 36756abc225bf7e8306330f2c3e223dc32af7869 | [
"MIT"
]
| null | null | null | app/validation/translator.py | codingedward/book-a-meal-api | 36756abc225bf7e8306330f2c3e223dc32af7869 | [
"MIT"
]
| null | null | null | app/validation/translator.py | codingedward/book-a-meal-api | 36756abc225bf7e8306330f2c3e223dc32af7869 | [
"MIT"
]
| 2 | 2018-10-01T17:45:19.000Z | 2020-12-07T13:48:25.000Z | """Translates validation error messages for the response"""
messages = {
'accepted': 'The :field: must be accepted.',
'after': 'The :field: must be a date after :other:.',
'alpha': 'The :field: may contain only letters.',
'alpha_dash': 'The :field: may only contain letters, numbers, and dashes.',
'alpha_num': 'The :field: may contain only letters and numbers.',
'array': 'The :field: must be an array.',
'before': 'The :field: must be a date before :other:.',
'between': 'The :field: must be between :least: and :most:.',
'between_string': 'The :field: must be between :least: and :most: characters.',
'between_numeric': 'The :field: must be between :least: and :most:.',
'boolean': 'The :field: must be either true or false.',
'confirmed': 'The :field: confirmation does not match.',
'date': 'The :field: is not a valid date.',
'different': 'The :field: and :other: must be different.',
'digits': 'The :field: must be :length: digits.',
'email': 'The :field: must be a valid email address.',
'exists': 'The selected :field: is invalid.',
'found_in': 'The selected :field: is invalid.',
'integer': 'The :field: must be an integer.',
'json': 'The :field: must be valid json format.',
'most_string': 'The :field: must not be greater than :most: characters.',
'most_numeric': 'The :field: must not be greater than :most:.',
'least_string': 'The :field: must be at least :least: characters.',
'least_numeric': 'The :field: must be at least :least:.',
'not_in': 'The selected :field: is invalid.',
'numeric': 'The :field: must be a number.',
'positive': 'The :field: must be a positive number.',
'regex': 'The :field: format is invalid.',
'required': 'The :field: field is required.',
'required_with': 'The :field: field is required when :other: is present.',
'required_without': 'The :field: field is required when :other: si not present.',
'same': 'The :field: and :other: must match.',
'size_string': 'The :field: must be :size: characters.',
'size_numeric': 'The :field: must be :size:.',
'string': 'The :field: must be a string.',
'unique': 'The :field: is already taken.',
'url': 'The :field: format is invalid.',
}
def trans(rule, fields):
message = messages[rule]
for k, v in fields.items():
message = message.replace(k, v).replace('_', ' ')
return message
| 47.764706 | 85 | 0.630542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,961 | 0.805008 |
46af16319f2d029f582d103a8745545d6de7422c | 333 | py | Python | chat/main/consumers.py | mlambir/channels_talk_pyconar2016 | 82e54eb914fb005fcdebad1ed07cede898957733 | [
"MIT"
]
| 12 | 2016-11-30T15:22:22.000Z | 2018-02-27T23:03:12.000Z | chat/main/consumers.py | mlambir/channels_talk_pyconar2016 | 82e54eb914fb005fcdebad1ed07cede898957733 | [
"MIT"
]
| null | null | null | chat/main/consumers.py | mlambir/channels_talk_pyconar2016 | 82e54eb914fb005fcdebad1ed07cede898957733 | [
"MIT"
]
| null | null | null | from channels import Group
# websocket.connect
def ws_add(message):
Group("chat").add(message.reply_channel)
# websocket.receive
def ws_message(message):
Group("chat").send({
"text": message.content['text'],
})
# websocket.disconnect
def ws_disconnect(message):
Group("chat").discard(message.reply_channel) | 22.2 | 48 | 0.708709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.27027 |
46af2004cde5fbac1f953e967f4311dafcb8c8e2 | 8,154 | py | Python | env/enviroment.py | Dorebom/robot_pybullet | 21e95864da28eb5553266513b1a1a735901395b6 | [
"MIT"
]
| null | null | null | env/enviroment.py | Dorebom/robot_pybullet | 21e95864da28eb5553266513b1a1a735901395b6 | [
"MIT"
]
| null | null | null | env/enviroment.py | Dorebom/robot_pybullet | 21e95864da28eb5553266513b1a1a735901395b6 | [
"MIT"
]
| null | null | null | from copy import deepcopy
import numpy as np
import pybullet as p
import gym
from gym import spaces
from env.robot import Manipulator
from env.work import Work
class Env():
def __init__(self, reward,
step_max_pos = 0.002,
step_max_orn = 0.02,
initial_pos_noise = 0.001,
initial_orn_noise = 0.001,
step_pos_noise = 0.0002,
step_orn_noise = 0.0002):
p.connect(p.GUI)
p.setPhysicsEngineParameter(enableFileCaching=0)
p.setRealTimeSimulation(False)
p.setGravity(0, 0, -9.8)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.setPhysicsEngineParameter(contactBreakingThreshold=0.001)
# Init
self._is_init_env = False
# Plane
self.plane_pos = [0, 0, -0.1]
p.loadURDF("urdf/plane/plane.urdf", self.plane_pos)
self.reward = reward
self.max_initial_pos_noise = initial_pos_noise
self.max_initial_orn_noise = initial_orn_noise
self.max_step_pos_noise = step_pos_noise
self.max_step_orn_noise = step_orn_noise
# robot
self.step_max_pos = step_max_pos
self.step_max_orn = step_max_orn
self.inv_scaled_force_coef = 5000
# for learning
self.action_space = spaces.Box(
low=-1,
high=1,
shape=(6,),
dtype=np.float32
)
self.observation_space = spaces.Box(
low=-1,
high=1,
shape=(12,),
dtype=np.float32
)
self._act_rel_tcp_pose = [0, 0, 0, 0, 0, 0]
def init_env(self, mode = 'rel',
robot_tcp_pose = [0, 0, 0, 0, 0, 0],
robot_base_pose = [0, 0, 0, 0, 0, 0],
robot_tool_pose = [0, 0, 0, 0, 0, 0],
work_base_pose = [0, 0, 0, 0, 0, 0]):
if self._is_init_env == False:
# Load work
self.work = Work(base_pose = work_base_pose)
self.act_abs_work_pose = work_base_pose
# Load robot
self.robot = Manipulator(tool_pose=robot_tool_pose, base_pose=robot_base_pose)
self._reset_robot_pose(mode=mode, tcp_pose=robot_tcp_pose)
self.initial_pos_noise = np.random.uniform(-self.max_initial_pos_noise,
self.max_initial_pos_noise, 3)
self.initial_orn_noise = np.random.uniform(-self.max_initial_orn_noise,
self.max_initial_orn_noise, 3)
self._is_init_env = True
return self.observe_state(mode = mode)
def _reset_robot_pose(self, mode='rel', tcp_pose=[0, 0, 0, 0, 0, 0]):
abs_tcp_pose = np.zeros(6)
if mode == 'rel':
abs_tcp_pose = np.array(self.act_abs_work_pose) + np.array(tcp_pose)
elif mode == 'abs':
abs_tcp_pose = tcp_pose
else:
print("ERROR(enviroment.py): mode is not correct.")
abs_tcp_pose = [0, 0, 0, 0, 0, 0]
self.robot.reset_pose(abs_tcp_pose=abs_tcp_pose)
def reset(self,
mode = 'rel',
tcp_pose = [0, 0, 0, 0, 0, 0],
base_pose = [0, 0, 0, 0, 0, 0],
tool_pose = [0, 0, 0, 0, 0, 0],
work_pose = [0, 0, 0, 0, 0, 0]):
if self._is_init_env == False:
return self.init_env(mode = mode,
robot_tcp_pose = tcp_pose,
robot_base_pose = base_pose,
robot_tool_pose = tool_pose,
work_base_pose = work_pose)
# For 処理の高速化
'''
if np.linalg.norm( np.array(tool_pose) - self.prev_tool_pose ) < 1e-6:
else:
'''
# Reset env
p.resetSimulation()
# Load Plane
p.loadURDF("urdf/plane/plane.urdf", self.plane_pos)
# Reset work
self.work.reset(base_pose = work_pose)
# Reset Robot
self.robot.reset_base(base_pose=base_pose, tool_pose=tool_pose)
self._reset_robot_pose(mode='rel', tcp_pose=tcp_pose)
self.initial_pos_noise = np.random.uniform(-self.max_initial_pos_noise,
self.max_initial_pos_noise, 3)
self.initial_orn_noise = np.random.uniform(-self.max_initial_orn_noise,
self.max_initial_orn_noise, 3)
self.prev_tool_pose = tool_pose
return self.observe_state(mode = mode)
def destory(self):
p.disconnect()
def step(self, action, step):
# ここは指令値生成なので,真値が良い
cmd_abs_tcp_pose = np.zeros(6)
cmd_abs_tcp_pose[:3] = np.array(self._act_abs_tcp_pose[:3]) + np.array(action[:3])
cmd_abs_tcp_pose[3:6] = np.array(self._act_abs_tcp_pose[3:6]) + np.array(action[3:6])
print('next_pose:', cmd_abs_tcp_pose)
self.robot.move_to_pose(cmd_abs_tcp_pose, mode='direct')
pose, force, success, out_range = self.decision()
r = self.calc_reward(relative_pose = pose,
success = success,
out_range = out_range,
act_step = step)
done = success or out_range
return np.concatenate([pose, force]), r, done, success
def decision(self):
'''
observe
act_abs_tcp_pose
act_rel_tcp_pose
act_abs_work_pose
act_force
'''
act_pose_noisy, act_force = self.observe_state(mode='rel')
scaled_act_force = act_force / self.inv_scaled_force_coef
# [Note] ここは真値で評価
success_range_of_pos = 0.003
success_range_of_orn = 0.04
success = (np.linalg.norm(self._act_rel_tcp_pose[:3]) <= success_range_of_pos and \
np.linalg.norm(self._act_rel_tcp_pose[3:]) <= success_range_of_orn)
# [Note] ここは真値で評価は正しくない気がする.
out_range_of_pos = 0.1
out_range_of_orn = 0.8
out_range = any([abs(pos) > out_range_of_pos for pos in act_pose_noisy[:3]]) \
or any([abs(orn) > out_range_of_orn for orn in act_pose_noisy[3:6]])
return act_pose_noisy, scaled_act_force, success, out_range
def observe_state(self, mode='rel'):
self._act_abs_tcp_pose, self.act_force, _ = self.robot.get_state()
self._act_abs_work_pose = self.work.get_state()
self._act_rel_tcp_pose = np.array(self._act_abs_tcp_pose) - np.array(self._act_abs_work_pose)
'''
ノイズ処理
'''
act_rel_tcp_pose_noisy = np.zeros(6)
act_rel_tcp_pose_noisy[:3] = self._act_rel_tcp_pose[:3] + self.initial_pos_noise
act_rel_tcp_pose_noisy[3:6] = self._act_rel_tcp_pose[3:6] + self.initial_orn_noise
act_rel_tcp_pose_noisy[:3] += np.random.uniform(-self.max_step_pos_noise,
self.max_step_pos_noise, 3)
act_rel_tcp_pose_noisy[3:6] += np.random.uniform(-self.max_step_orn_noise,
self.max_step_orn_noise, 3)
if mode == 'rel':
return act_rel_tcp_pose_noisy, self.act_force
elif mode == 'abs':
act_abs_tcp_pose_noisy = np.zeros(6)
act_abs_tcp_pose_noisy[:3] = self._act_abs_tcp_pose[:3] + self.initial_pos_noise
act_abs_tcp_pose_noisy[3:6] = self._act_abs_tcp_pose[3:6] + self.initial_orn_noise
act_abs_work_pose_noisy = np.zeros(6)
act_abs_work_pose_noisy[:3] = self._act_abs_work_pose[:3] + self.initial_pos_noise
act_abs_work_pose_noisy[3:6] = self._act_abs_work_pose[3:6] + self.initial_orn_noise
return act_abs_tcp_pose_noisy, act_abs_work_pose_noisy, self.act_force
def calc_reward(self, relative_pose, success, out_range, act_step):
return self.reward.reward_function(relative_pose, success, out_range, act_step)
def scale_action(self, action):
scaled_action = deepcopy(action)
scaled_action[:3]*=self.step_max_pos
scaled_action[3:]*=self.step_max_orn
return scaled_action | 38.828571 | 101 | 0.589159 | 8,102 | 0.980397 | 0 | 0 | 0 | 0 | 0 | 0 | 714 | 0.086399 |
46b07861f72e984eb2546daa1dab51801dd00b0a | 1,820 | py | Python | Thread/Threading.py | zxg110/PythonGrammer | 7d07648c62e3d49123688c33d09fe4bb369cf852 | [
"Apache-2.0"
]
| null | null | null | Thread/Threading.py | zxg110/PythonGrammer | 7d07648c62e3d49123688c33d09fe4bb369cf852 | [
"Apache-2.0"
]
| null | null | null | Thread/Threading.py | zxg110/PythonGrammer | 7d07648c62e3d49123688c33d09fe4bb369cf852 | [
"Apache-2.0"
]
| null | null | null | import _thread
import time
import threading
#
# def print_time(threadName,delay):
# count = 0;
# while count < 5:
# time.sleep(delay)
# count += 1;
# print("%s: %s" % (threadName, time.ctime(time.time())))
#
# try:
# _thread.start_new(print_time,("Thread-1",2,))
# _thread.start_new(print_time("Thread-2",4))
# except:
# print("error")
#
# while 1:
# pass
# Python3 通过两个标准库 _thread 和 threading 提供对线程的支持。
# _thread 提供了低级别的、原始的线程以及一个简单的锁,它相比于 threading 模块的功能还是比较有限的。
# threading 模块除了包含 _thread 模块中的所有方法外,还提供的其他方法:
# threading.currentThread(): 返回当前的线程变量。
# threading.enumerate(): 返回一个包含正在运行的线程的list。正在运行指线程启动后、结束前,不包括启动前和终止后的线程。
# threading.activeCount(): 返回正在运行的线程数量,与len(threading.enumerate())有相同的结果。
# 除了使用方法外,线程模块同样提供了Thread类来处理线程,Thread类提供了以下方法:
# run(): 用以表示线程活动的方法。
# start():启动线程活动。
# join([time]): 主线程中,创建了子线程B,并且在主线程A中调用了B.join(),那么,主线程A会在调用的地方等待,
# 直到子线程B完成操作后,才接着往下执行。参数time代表线程运行最大时间,即如果超过这个时间,不管这个此线程有
# 没有执行完毕都会被回收,然后主线程或函数都会接着执行的。
# isAlive(): 返回线程是否活动的。
# getName(): 返回线程名。
# setName(): 设置线程名。
exitFlag = 0
class MyThread(threading.Thread):
def __init__(self,threadID,name,counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print ("开始线程:" + self.name)
print_time(self.name, 2,self.counter)
print ("退出线程:" + self.name)
def print_time(threadName, delay, counter):
while counter:
# if exitFlag:
# threadName.exit()
time.sleep(delay)
print("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
# 创建新线程
thread1 = MyThread(1, "Thread-1", 5)
thread2 = MyThread(2, "Thread-2", 5)
# 开启新线程
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print ("退出主线程")
| 26 | 73 | 0.666484 | 365 | 0.142578 | 0 | 0 | 0 | 0 | 0 | 0 | 1,821 | 0.711328 |
46b1624f4a6a70026386fb13d4c9f4cd8b816721 | 492 | py | Python | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
]
| null | null | null | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
]
| null | null | null | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
]
| null | null | null | from django.shortcuts import render
from rest_framework import generics
# Create your views here.
from petsAPI.models import Pets
from petsAPI.serializers import PetSerializer
def index(req):
return render(req, 'index.html')
class PetsListApiView(generics.ListCreateAPIView):
queryset = Pets.objects.all()
serializer_class = PetSerializer
class PetDetailsApiView(generics.RetrieveUpdateDestroyAPIView):
queryset = Pets.objects.all()
serializer_class = PetSerializer | 24.6 | 63 | 0.792683 | 255 | 0.518293 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.075203 |
46b234a2d05a51f6fb5809df7fa1df618dfd4547 | 2,153 | py | Python | cluster_faces.py | sandhyalaxmiK/faces_clustering | e2da7e057ce5ec749e0c631f450e262f046b8e1d | [
"MIT"
]
| null | null | null | cluster_faces.py | sandhyalaxmiK/faces_clustering | e2da7e057ce5ec749e0c631f450e262f046b8e1d | [
"MIT"
]
| null | null | null | cluster_faces.py | sandhyalaxmiK/faces_clustering | e2da7e057ce5ec749e0c631f450e262f046b8e1d | [
"MIT"
]
| null | null | null | import face_recognition
import sys,os
import re,cv2
def sorted_alphanumeric(data):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(data, key=alphanum_key)
input_dir_path=sys.argv[1]
output_dir_path=sys.argv[2]
if not os.path.exists(output_dir_path):
os.mkdir(output_dir_path)
if not os.path.exists(output_dir_path+'/'+str(1)):
os.mkdir(output_dir_path+'/'+str(1))
input_images=sorted_alphanumeric(os.listdir(input_dir_path))
cv2.imwrite(output_dir_path+'/'+str(1)+'/'+input_images[0],cv2.imread(input_dir_path+'/'+input_images[0]))
if not os.path.exists(output_dir_path+'/back_imgs'):
os.mkdir(output_dir_path+'/back_imgs')
if not os.path.exists(output_dir_path+'/error'):
os.mkdir(output_dir_path+'/error')
for img_path in input_images[1:]:
try:
prev_similarity=0
img=face_recognition.load_image_file(input_dir_path+'/'+img_path)
img_encoding=face_recognition.face_encodings(img)
if img_encoding==[]:
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.imwrite(output_dir_path+'/back_imgs/'+img_path,img)
continue
img_encoding=face_recognition.face_encodings(img)[0]
imgs_dirs=sorted_alphanumeric(os.listdir(output_dir_path))
imgs_dirs=list(set(imgs_dirs)-set(['error','back_imgs']))
for img_dir in imgs_dirs:
check_img=face_recognition.load_image_file(output_dir_path+'/'+img_dir+'/'+sorted_alphanumeric(os.listdir(output_dir_path+'/'+img_dir))[0])
check_img_encoding=face_recognition.face_encodings(check_img)[0]
similarity=1-face_recognition.compare_faces([img_encoding], check_img_encoding)
if similarity>prev_similarity:
prev_similarity=similarity
result_dir=img_dir
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
if prev_similarity<0.6:
new_dir=str(len(os.listdir(output_dir_path))+1)
os.mkdir(output_dir_path+'/'+new_dir)
cv2.imwrite(output_dir_path+'/'+new_dir+'/'+img_path,img)
else:
cv2.imwrite(output_dir_path+'/'+result_dir+'/'+img_path,img)
except:
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.imwrite(output_dir_path+'/error/'+img_path,img)
| 39.87037 | 142 | 0.75987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.06131 |
46b37f4db1428d7e3e970e352faebde87a24d82f | 7,329 | py | Python | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
]
| null | null | null | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
]
| null | null | null | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
]
| null | null | null | import requests
from bbdata.config import output_api_url
from bbdata.util import handle_response
class Objects:
base_path = "/objects"
auth = None
def __init__(self, auth):
self.auth = auth
def get_all(self, tags=None, search=None, page=None, per_page=None,
writable=False):
"""
Get the list of accessible objects.
GET /objects
https://bbdata.daplab.ch/api/#objects_get
"""
params = {
"tags": tags,
"search": search,
"page": page,
"perPage": per_page,
"writable": writable,
}
url = output_api_url + self.base_path
r = requests.get(url, params, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def put(self, name, unit_symbol, owner, description=None):
"""
Create a new object.
PUT /objects
https://bbdata.daplab.ch/api/#objects_put
"""
json = {
"name": name,
"description": description,
"unitSymbol": unit_symbol,
'owner': owner
}
url = output_api_url + self.base_path
r = requests.put(url, json=json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def get(self, object_id):
"""
Get an object.
GET /objects/{objectIs}
https://bbdata.daplab.ch/api/#objects__objectid__get
"""
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.get(url, headers=self.auth.headers)
# return ObjectResponse(r.json())
return handle_response(r.status_code, r.json())
def post(self, object_id, data):
"""
Edit the name and/or the description of the object.
Only the properties appearing in the body will be modified.
POST /objects/{objectId}
https://bbdata.daplab.ch/api/#objects__objectid__post
"""
# TODO The data to send isn't define in the API Docs
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.post(url, data, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def delete(self, object_id):
"""
Delete the object with the given id
POST /objects/{objectId}
https://bbdata.daplab.ch/api/#objects__objectid__delete
"""
# TODO This method is in the Postman profile but isn't in the docs
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.delete(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def post_disable(self, object_id):
"""
Disable this object. All associated tokens will be removed.
POST /objects/{objectId}/disable
https://bbdata.daplab.ch/api/#objects__objectid__disable_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/disable"
r = requests.post(url, headers=self.auth.headers)
return handle_response(r.status_code, True)
def post_enable(self, object_id):
"""
Enable this object.
POST /objects/{objectId}/enable
https://bbdata.daplab.ch/api/#objects__objectid__enable_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/enable"
r = requests.post(url, headers=self.auth.headers)
return handle_response(r.status_code, True)
def get_tokens(self, object_id, description=None):
"""
Get the list of tokens for the object. A token is used to submit new
measures (see input-api).
An optional description can be passed in the
body (max 65 characters).
GET /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_get
"""
# TODO The API docs says it's possible to pass an optional description
# but it looks like it's a mistake for a GET request...
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
json = {
"description": description
}
r = requests.get(url, json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def put_tokens(self, object_id):
"""
Generate a new secured token.
PUT /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_put
"""
# TODO The optional description should probably be added in this
# method
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
r = requests.put(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def post_tokens(self, object_id, description):
"""
Edit the token's description.
POST /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
json = {
"description": description
}
r = requests.post(url, json=json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def delete_tokens(self, object_id, token_id):
"""
Revoke a token.
DELETE /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_delete
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
params = {
"tokenId": token_id
}
r = requests.delete(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def put_tags(self, object_id, tags):
"""
Add tags to the object.
PUT /objects/{objectId}/tags
https://bbdata.daplab.ch/api/#objects__objectid__tags_put
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tags"
params = {
"tags": tags
}
r = requests.put(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def delete_tags(self, object_id, tags):
"""
Remove tags.
DELETE /objects/{objectId}/tags
https://bbdata.daplab.ch/api/#objects__objectid__tags_delete
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tags"
params = {
"tags": tags
}
r = requests.put(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def get_comments(self, object_id):
"""
Get all comments attached to this object. Use the /comments endpoint
for more actions.
GET /objects/{objectId}/comments
https://bbdata.daplab.ch/api/#objects__objectid__comments_get
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/comments"
r = requests.get(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
| 33.619266 | 78 | 0.592714 | 7,229 | 0.986356 | 0 | 0 | 0 | 0 | 0 | 0 | 3,050 | 0.416155 |
46b3fea476ee5e207c6461dc2f22693adf1376cd | 94 | py | Python | python/tako/client/__init__.py | vyomkeshj/tako | d0906df5cdc0023ee955ad34d9eb4696b5ecec5e | [
"MIT"
]
| null | null | null | python/tako/client/__init__.py | vyomkeshj/tako | d0906df5cdc0023ee955ad34d9eb4696b5ecec5e | [
"MIT"
]
| null | null | null | python/tako/client/__init__.py | vyomkeshj/tako | d0906df5cdc0023ee955ad34d9eb4696b5ecec5e | [
"MIT"
]
| null | null | null | from .exception import TakoException, TaskFailed # noqa
from .session import connect # noqa
| 31.333333 | 56 | 0.787234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.12766 |
46b430ceffc244986e1b0a3ab9f0c59e0b7629b0 | 5,533 | py | Python | helpers/parser.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
]
| null | null | null | helpers/parser.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
]
| null | null | null | helpers/parser.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
]
| null | null | null | import argparse
def parse():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-data_dir', \
action='store', \
nargs='?', \
const=None, \
default='./dataset', \
type=str, \
choices=None, \
help='directory of datasets', \
metavar=None)
parser.add_argument('--device', \
action='store', \
nargs='?', \
const=None, \
default='cuda', \
type=str, \
choices=None, \
help='Device to run the model', \
metavar=None)
parser.add_argument('--dim', \
action='store', \
nargs='?', \
const=None, \
default=2, \
type=int, \
choices=None, \
help='Dimension of the model', \
metavar=None)
parser.add_argument('--batch_size', \
action='store', \
nargs='?', \
const=None, \
default=25, \
type=int, \
choices=None, \
help='Batch size', \
metavar=None)
parser.add_argument('--n_epochs', \
action='store', \
nargs='?', \
const=None, \
default=3, \
type=int, \
choices=None, \
help='Number of epochs', \
metavar=None)
parser.add_argument('--run_number', \
action='store', \
nargs='?', \
const=None, \
default=0, \
type=int, \
choices=None, \
help='Run number', \
metavar=None)
parser.add_argument('--padding_mode', \
action='store', \
nargs='?', \
const=None, \
default='reflect', \
type=str, \
choices=None, \
help='Padding type (default: reflect)', \
metavar=None)
parser.add_argument('--preprocess_type', \
action='store', \
nargs='?', \
const=None, \
default='normalization', \
type=str, \
choices=None, \
help='Preprocess type (default: normalization)', \
metavar=None)
parser.add_argument('--model_name', \
action='store', \
nargs='?', \
const=None, \
default='AMR_Net', \
type=str, \
choices=None, \
help='Name of the model (default: AMR_Net)', \
metavar=None)
parser.add_argument('--lr', \
action='store', \
nargs='?', \
const=None, \
default=0.0002, \
type=float, \
choices=None, \
help='Learning rate', \
metavar=None)
parser.add_argument('--beta_1', \
action='store', \
nargs='?', \
const=None, \
default=0.9, \
type=float, \
choices=None, \
help='beta_1 for Adam', \
metavar=None)
parser.add_argument('--beta_2', \
action='store', \
nargs='?', \
const=None, \
default=0.999, \
type=float, \
choices=None, \
help='beta_2 for Adam', \
metavar=None)
# Used for inference
parser.add_argument('--inference_mode', \
action='store_true', \
default=False, \
help='train or inference')
parser.add_argument('-state_file_dir', \
action='store', \
nargs='?', \
const=None, \
default='./', \
type=str, \
choices=None, \
help='directory storing torch state files', \
metavar=None)
parser.add_argument('--load_nth_state_file', \
action='store', \
nargs='?', \
const=None, \
default=0, \
type=int, \
choices=None, \
help='nth state file to load', \
metavar=None)
args = parser.parse_args()
return args
| 39.521429 | 74 | 0.325863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 784 | 0.141695 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.