id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
494120
|
import pytest
async def test_handler(app, client):
from muffin.handler import Handler, route_method
assert Handler
@app.route('/handler', '/handler/{res}')
class Index(Handler):
async def get(self, request):
return request.path_params or 'ok'
async def post(self, request):
data = await request.data()
return dict(data)
@Handler.route('/custom1', methods=['put', 'patch'])
async def custom1(self, request):
return self.__class__.__name__
@route_method('/custom2')
async def custom2(self, request):
return 'CUSTOM2'
assert sorted(Index.methods) == ['GET', 'POST']
res = await client.get('/handler')
assert res.status_code == 200
assert await res.text() == 'ok'
res = await client.get('/handler/123')
assert res.status_code == 200
assert await res.json() == {'res': '123'}
res = await client.post('/handler', json={'test': 'passed'})
assert res.status_code == 200
assert await res.json() == {'test': 'passed'}
res = await client.put('/handler')
assert res.status_code == 405
assert await res.text() == 'Specified method is invalid for this resource'
res = await client.put('/custom1')
assert res.status_code == 200
assert await res.text() == 'Index'
res = await client.get('/custom1')
assert res.status_code == 405
res = await client.get('/custom2')
assert res.status_code == 200
assert await res.text() == 'CUSTOM2'
async def test_deffered(app, client):
from muffin.handler import Handler
class Resource(Handler):
methods = 'post'
async def get(self, request):
raise RuntimeError
async def post(self, request):
return 'Resource is here'
@Handler.route('/resource/custom')
async def custom(self, request):
return 'Resource Custom is here'
assert Resource.methods == {'POST'}
app.route('/resource')(Resource)
res = await client.get('/resource')
assert res.status_code == 405
res = await client.post('/resource')
assert res.status_code == 200
assert await res.text() == 'Resource is here'
res = await client.post('/resource/custom')
assert res.status_code == 200
assert await res.text() == 'Resource Custom is here'
|
494171
|
import os
from PyQt5.QtWidgets import QDialog
from cvstudio.util import GUIUtilities, FileUtilities
from cvstudio.vo import DatasetVO
from .base_dataset_form import Ui_Base_DatasetDialog
class DatasetForm(QDialog, Ui_Base_DatasetDialog):
def __init__(self, vo: DatasetVO = None, parent=None):
super(DatasetForm, self).__init__(parent)
self.setupUi(self)
self.setWindowTitle("Create new dataset".title())
self.setWindowIcon(GUIUtilities.get_icon("polygon.png"))
self._value = vo
self.initialize_form()
def initialize_form(self):
if self._value:
self.nameLineEdit.setText(self._value.name)
self.descriptionEditText.setPlainText(self._value.description)
# self.type.setCurrentText(self._value.data_type)
@property
def value(self) -> DatasetVO:
return self._value
def accept(self) -> None:
if not self.nameLineEdit.text():
GUIUtilities.show_info_message("The name field is required", "info")
return
if self._value is None:
usr_folder = FileUtilities.get_usr_folder()
new_folder = FileUtilities.create_new_folder(usr_folder)
vo = DatasetVO()
ds_folder = os.path.basename(new_folder)
vo.folder = ds_folder
self._value = vo
else:
vo = self._value
vo.name = self.nameLineEdit.text()
vo.description = self.descriptionEditText.toPlainText()
# dataset_vo.data_type=self.typeComboBox.currentText()
return QDialog.accept(self)
def reject(self) -> None:
return QDialog.reject(self)
|
494185
|
from .common import pred, iroot_ceil, floor_lg
from .generalized_accumulator import (
GeneralizedAccumulatorFactory,
GeneralizedAccumulator,
GeneralizedProver,
GeneralizedVerifier,
)
# Implementation of the generalized accumulator with p "evenly spaced" back-pointers.
# Cost of update: O(p)
# Proof size: O((log n)^(1 + 1/p))
def get_representatives(k: int, p: int):
if k == 1:
return []
# if k is even, we also add k - 1
result = [k - 1] if k % 2 == 0 else []
l = floor_lg(k) # k has l + 1 bits
d = iroot_ceil(p, l) # d = ceil(floor(log n)^(1/p))
# computes all the powers of d that are not bigger than l
exponents = set([1])
if d > 1:
t = d
while t <= l and d > 1:
exponents.add(t)
t *= d
t = pred(k)
c = 1 # count of how many bits are zeroed
while t > 0:
if c in exponents:
result.append(t)
t = pred(t)
c += 1
return result
class MultipointerAccumulatorFactory(GeneralizedAccumulatorFactory):
def create_accumulator(self, p: int):
def get_representatives_p(n: int):
return get_representatives(n, p)
accumulator_manager = GeneralizedAccumulator(get_representatives_p)
prover = GeneralizedProver(get_representatives_p, accumulator_manager)
verifier = GeneralizedVerifier(get_representatives_p)
return accumulator_manager, prover, verifier
|
494193
|
import argparse, sys
from pipelines_utils import parameter_utils, utils
from pipelines_utils_rdkit import rdkit_utils, mol_utils
from rdkit import Chem
from . import calc_interactions
def execute(suppl, writer, report, group_by_field, score_field, score_descending, stats_fields):
count = 0
total = 0
errors = 0
group_count = 0
curr_gbv = None
interactions = {}
best_scores = {}
best_mols = {}
stats_data = {}
for mol in suppl:
total +=1
if not mol:
errors += 1
continue
if not mol.HasProp(group_by_field):
report.write("WARNING: molecule %s does not contain field %s\n" % (total, group_by_field))
errors += 1
continue
if not mol.HasProp(score_field):
report.write("WARNING: molecule %s does not contain field %s\n" % (total, score_field))
errors += 1
continue
gbv = mol.GetProp(group_by_field)
sco = mol.GetDoubleProp(score_field)
inters = gen_interactions(mol)
utils.log('processing', gbv, inters)
if gbv != curr_gbv:
# write summaries
if curr_gbv:
write_summary(writer, report, gbv, group_count, best_mols, stats_data)
curr_gbv = gbv
group_count = 1
best_scores = {inters: sco}
best_mols = {inters: mol}
stats_data = {}
add_stats(mol, inters, stats_fields, stats_data)
else:
# add to summary
group_count += 1
curr_best_sco = best_scores.get(inters, None)
add_stats(mol, inters, stats_fields, stats_data)
if None == curr_best_sco:
best_scores[inters] = sco
best_mols[inters] = mol
else:
if score_descending:
if sco > curr_best_sco:
best_scores[inters] = sco
best_mols[inters] = mol
else:
if sco < curr_best_sco:
best_scores[inters] = sco
best_mols[inters] = mol
count += 1
write_summary(writer, report, gbv, group_count, best_mols, stats_data)
return count, total, errors
def write_summary(writer, report, gbv, count, best_mols, stats_data):
report.write("Summary for %s molecules for %s\n" % (count, gbv))
for inter, mol in best_mols.items():
report.write(" %s\n" % (str(inter)))
if inter in stats_data:
for field, values in stats_data[inter].items():
report.write(" %s = [%s, %s, %s, %s]\n" % (field, len(values), min(values), max(values), sum(values) / len(values)))
writer.write(mol)
str_interactions = 'Interactions'
def gen_interactions(mol):
interactions = []
interactions.append(find_canonical_interactions(mol, calc_interactions.inter_type_hbond + str_interactions))
interactions.append(find_canonical_interactions(mol, calc_interactions.inter_type_halogen + str_interactions))
interactions.append(find_canonical_interactions(mol, calc_interactions.inter_type_hydrophobic + str_interactions))
interactions.append(find_canonical_interactions(mol, calc_interactions.inter_type_salt_bridge + str_interactions))
interactions.append(find_canonical_interactions(mol, calc_interactions.inter_type_pi_stacking + str_interactions))
interactions.append(find_canonical_interactions(mol, calc_interactions.inter_type_pi_cation + str_interactions))
return tuple(interactions)
def find_canonical_interactions(mol, prop):
if mol.HasProp(prop):
canons = []
inters = mol.GetProp(prop)
lines = inters.split('\n')
for line in lines:
tokens = line.split(' ')
canon = tokens[0]
canons.append(canon)
return tuple(sorted(canons))
else:
return None
def add_stats(mol, inters, stats_fields, stats_data):
if inters in stats_data:
d = stats_data[inters]
else:
d = {}
stats_data[inters] = d
if stats_fields:
for field in stats_fields:
if mol.HasProp(field):
v = mol.GetDoubleProp(field)
if field in d:
d[field].append(v)
else:
d[field] = [v]
### start main execution #########################################
def main():
### command line args definitions #########################################
parser = argparse.ArgumentParser(description='Filter interactions')
parameter_utils.add_default_io_args(parser)
parser.add_argument('-f', '--group-by-field', required=True, help='Field to group records by (must be sequential)')
parser.add_argument('-s', '--score-field', required=True, help='Field to use to rank records within a group')
parser.add_argument('-d', '--score-descending', action='store_true', help='Sort records in descending order')
parser.add_argument('-x', '--stats-fields', nargs='*', help='Field to use to for summary statistics')
parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode')
parser.add_argument('--thin', action='store_true', help='Thin output mode')
parser.add_argument('--no-gzip', action='store_true', help='Do not compress the output (STDOUT is never compressed')
args = parser.parse_args()
utils.log("filter_interactions: ", args)
# handle metadata
source = "filter_interactions.py"
datasetMetaProps = {"source": source, "description": "Filter by interactions"}
clsMappings = {
# "EnumChargesSrcMolUUID": "java.lang.String",
# "EnumChargesSrcMolIdx": "java.lang.Integer"
}
fieldMetaProps = [
# {"fieldName": "EnumChargesSrcMolUUID", "values": {"source": source, "description": "UUID of source molecule"}},
# {"fieldName": "EnumChargesSrcMolIdx", "values": {"source": source, "description": "Index of source molecule"}}
]
input, suppl = rdkit_utils.default_open_input(args.input, args.informat)
output, writer, output_base = rdkit_utils.default_open_output(args.output,
'filter_interactions', args.outformat,
thinOutput=False, valueClassMappings=clsMappings,
datasetMetaProps=datasetMetaProps,
fieldMetaProps=fieldMetaProps,
compress=not args.no_gzip)
report_file = open(output_base + '.report', 'wt')
count, total, errors = execute(suppl, writer, report_file, args.group_by_field, args.score_field, args.score_descending,
args.stats_fields)
utils.log(count, total, errors)
if input:
input.close()
writer.flush()
writer.close()
output.close()
report_file.close()
# re-write the metadata as we now know the size
if args.outformat == 'json':
utils.write_squonk_datasetmetadata(output_base, False, clsMappings, datasetMetaProps, fieldMetaProps, size=total)
if args.meta:
utils.write_metrics(output_base, {'__InputCount__': count, '__OutputCount__': total, '__ErrorCount__': errors,
'FilterInteractions': total})
if __name__ == "__main__":
main()
|
494203
|
lines = open('data.csv').read().split('\n')
data = [(int(x[:x.find(',')]), float(x[x.find(',')+1:])) for x in lines if x]
REPORT_THRESHOLD = 0.23
def get_error(scale, elasticity, growth):
err = 0
bs_fac, fee_fac = 1 / (1 + elasticity), elasticity / (1 + elasticity)
for i, (block_size, avg_fee) in enumerate(data):
expected = scale * (1 + growth) ** i
actual = block_size ** bs_fac * avg_fee ** fee_fac
# if i >= len(data) - 6:
# err += ((expected / actual - 1) ** 2) * 2
err += (expected / actual - 1) ** 2
return err
best = (0, 0, 0, 9999999999999999999999999.0)
for scale in [1 * 1.05 ** x for x in range(300)]:
for elasticity in [x*0.025 for x in range(120)]:
for growth in [x*0.001 for x in range(120)]:
err = get_error(scale, elasticity, growth)
if err <= REPORT_THRESHOLD:
print('%d %.3f %.3f: %.3f' % (scale, elasticity, growth, err))
if err < best[-1]:
best = scale, elasticity, growth, err
print('Best params: %d %.3f %.3f (err %.3f)' % best)
scale, elasticity, growth, err = best
bs_fac, fee_fac = 1 / (1 + elasticity), elasticity / (1 + elasticity)
for i, (block_size, avg_fee) in enumerate(data):
expected = scale * (1 + growth) ** i
actual = block_size ** bs_fac * avg_fee ** fee_fac
print(i, actual, expected)
|
494216
|
import bpy
material = (bpy.context.material.active_node_material if bpy.context.material.active_node_material else bpy.context.material)
material.subsurface_scattering.radius = 10.899, 6.575, 2.508
material.subsurface_scattering.color = 0.947, 0.931, 0.852
|
494265
|
import numpy as np
a_1d = np.arange(4)
print(a_1d)
# [0 1 2 3]
print(a_1d[[0, 2]])
# [0 2]
print(a_1d[[0, 3, 2, 1, 2, -1, -2]])
# [0 3 2 1 2 3 2]
print(a_1d[np.array([0, 3, 2, 1, 2, -1, -2])])
# [0 3 2 1 2 3 2]
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[[0, 2]])
# [[ 0 1 2 3]
# [ 8 9 10 11]]
print(a_2d[:, [0, 2]])
# [[ 0 2]
# [ 4 6]
# [ 8 10]]
print(a_2d[[0, 2], [0, 2]])
# [ 0 10]
print(a_2d[np.ix_([0, 2], [0, 2])])
# [[ 0 2]
# [ 8 10]]
print(a_2d[np.ix_([0, 2, 1, 1, -1, -1], [0, 2, 1, 3])])
# [[ 0 2 1 3]
# [ 8 10 9 11]
# [ 4 6 5 7]
# [ 4 6 5 7]
# [ 8 10 9 11]
# [ 8 10 9 11]]
print(a_2d[:, [1]])
# [[1]
# [5]
# [9]]
print(a_2d[:, [1]].shape)
# (3, 1)
print(a_2d[:, 1])
# [1 5 9]
print(a_2d[:, 1].shape)
# (3,)
|
494267
|
import sys
from pypy.interpreter.baseobjspace import W_Root, ObjSpace, Wrappable, \
Arguments
from pypy.interpreter.error import OperationError, wrap_oserror
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.rlib.libffi import *
from pypy.rpython.lltypesystem import lltype, rffi
from pypy.rlib.unroll import unrolling_iterable
from pypy.tool.sourcetools import func_with_new_name
from pypy.rlib.rarithmetic import intmask, r_uint, r_singlefloat
from pypy.module._rawffi.tracker import tracker
def _signed_type_for(TYPE):
sz = rffi.sizeof(TYPE)
if sz == 4: return ffi_type_sint32
elif sz == 8: return ffi_type_sint64
else: raise ValueError("unsupported type size for %r" % (TYPE,))
def _unsigned_type_for(TYPE):
sz = rffi.sizeof(TYPE)
if sz == 4: return ffi_type_uint32
elif sz == 8: return ffi_type_uint64
else: raise ValueError("unsupported type size for %r" % (TYPE,))
TYPEMAP = {
# XXX A mess with unsigned/signed/normal chars :-/
'c' : ffi_type_uchar,
'b' : ffi_type_schar,
'B' : ffi_type_uchar,
'h' : ffi_type_sshort,
'u' : ffi_type_uint, # XXX think deeper how to map it properly
'H' : ffi_type_ushort,
'i' : ffi_type_sint,
'I' : ffi_type_uint,
# xxx don't use ffi_type_slong and ffi_type_ulong - their meaning
# changes from a libffi version to another :-((
'l' : _signed_type_for(rffi.LONG),
'L' : _unsigned_type_for(rffi.ULONG),
'q' : _signed_type_for(rffi.LONGLONG),
'Q' : _unsigned_type_for(rffi.ULONGLONG),
'f' : ffi_type_float,
'd' : ffi_type_double,
's' : ffi_type_pointer,
'P' : ffi_type_pointer,
'z' : ffi_type_pointer,
'O' : ffi_type_pointer,
'Z' : ffi_type_pointer,
}
TYPEMAP_PTR_LETTERS = "POszZ"
UNPACKED_TYPECODES = dict([(code, (code,
intmask(field_desc.c_size),
intmask(field_desc.c_alignment)))
for code, field_desc in TYPEMAP.items()])
LL_TYPEMAP = {
'c' : rffi.CHAR,
'u' : lltype.UniChar,
'b' : rffi.SIGNEDCHAR,
'B' : rffi.UCHAR,
'h' : rffi.SHORT,
'H' : rffi.USHORT,
'i' : rffi.INT,
'I' : rffi.UINT,
'l' : rffi.LONG,
'L' : rffi.ULONG,
'q' : rffi.LONGLONG,
'Q' : rffi.ULONGLONG,
'f' : rffi.FLOAT,
'd' : rffi.DOUBLE,
's' : rffi.CCHARP,
'z' : rffi.CCHARP,
'Z' : rffi.CArrayPtr(lltype.UniChar),
'O' : rffi.VOIDP,
'P' : rffi.VOIDP,
'v' : lltype.Void,
}
def letter2tp(space, key):
try:
return UNPACKED_TYPECODES[key]
except KeyError:
raise OperationError(space.w_ValueError, space.wrap(
"Unknown type letter %s" % (key,)))
def unpack_typecode(space, w_typecode):
if space.is_true(space.isinstance(w_typecode, space.w_str)):
letter = space.str_w(w_typecode)
return letter2tp(space, letter)
else:
w_size, w_align = space.unpacktuple(w_typecode, expected_length=2)
return ('V', space.int_w(w_size), space.int_w(w_align)) # value object
def _get_type_(space, key):
try:
return TYPEMAP[key]
except KeyError:
raise OperationError(space.w_ValueError, space.wrap(
"Unknown type letter %s" % (key,)))
class W_CDLL(Wrappable):
def __init__(self, space, name):
self.cdll = CDLL(name)
self.name = name
self.w_cache = space.newdict()
self.space = space
# xxx refactor away !
def get_arg_type(self, letter, argsize, argalignment):
space = self.space
if letter == 'V': # xxx leaks
return make_struct_ffitype(argsize, argalignment)
else:
return _get_type_(space, letter)
def get_type(self, key):
space = self.space
return _get_type_(space, key)
def ptr(self, space, name, w_argtypes, w_restype):
""" Get a pointer for function name with provided argtypes
and restype
"""
# xxx refactor
if space.is_w(w_restype, space.w_None):
resshape = None
ffi_restype = ffi_type_void
elif space.is_true(space.isinstance(w_restype, space.w_str)):
tp_letter = space.str_w(w_restype)
if tp_letter == 'v':
resshape = None
ffi_restype = ffi_type_void
else:
from pypy.module._rawffi.array import get_array_cache
cache = get_array_cache(space)
resshape = cache.get_array_type(letter2tp(space, tp_letter))
ffi_restype = self.get_type(tp_letter)
else:
from pypy.module._rawffi.structure import W_Structure
resshape = space.interp_w(W_Structure, w_restype)
ffi_restype = resshape.get_ffi_type()
w = space.wrap
w_argtypes = space.newtuple(space.unpackiterable(w_argtypes))
w_key = space.newtuple([w(name), w_argtypes, w(resshape)])
try:
return space.getitem(self.w_cache, w_key)
except OperationError, e:
if e.match(space, space.w_KeyError):
pass
else:
raise
argtypes_w = space.unpackiterable(w_argtypes)
argtypes = [unpack_typecode(space, w_arg) for w_arg in argtypes_w]
ffi_argtypes = [self.get_arg_type(letter, argsize, argalignment)
for letter, argsize, argalignment
in argtypes]
try:
ptr = self.cdll.getrawpointer(name, ffi_argtypes, ffi_restype)
w_funcptr = W_FuncPtr(space, ptr, argtypes, resshape)
space.setitem(self.w_cache, w_key, w_funcptr)
return w_funcptr
except KeyError:
raise OperationError(space.w_AttributeError, space.wrap(
"No symbol %s found in library %s" % (name, self.name)))
ptr.unwrap_spec = ['self', ObjSpace, str, W_Root, W_Root]
def getprimitive(self, space, letter, name):
from pypy.module._rawffi.array import get_array_cache
cache = get_array_cache(space)
w_array = cache.get_array_type(letter2tp(space, letter))
try:
address_as_uint = rffi.cast(lltype.Unsigned,
self.cdll.getaddressindll(name))
except KeyError:
raise OperationError(space.w_ValueError,
space.wrap("Cannot find symbol %s" % (name,)))
return w_array.fromaddress(space, address_as_uint, 1)
getprimitive.unwrap_spec = ['self', ObjSpace, str, str]
def descr_new_cdll(space, w_type, name):
try:
return space.wrap(W_CDLL(space, name))
except OSError, e:
raise wrap_oserror(space, e)
descr_new_cdll.unwrap_spec = [ObjSpace, W_Root, str]
W_CDLL.typedef = TypeDef(
'CDLL',
__new__ = interp2app(descr_new_cdll),
ptr = interp2app(W_CDLL.ptr),
getprimitive= interp2app(W_CDLL.getprimitive),
__doc__ = """ C Dynamically loaded library
use CDLL(libname) to create a handle to a C library (the argument is processed
the same way as dlopen processes it). On such a library you can call:
lib.ptr(func_name, argtype_list, restype)
where argtype_list is a list of single characters and restype is a single
character. The character meanings are more or less the same as in the struct
module, except that s has trailing \x00 added, while p is considered a raw
buffer.""" # xxx fix doc
)
unroll_letters_for_numbers = unrolling_iterable("bBhHiIlLqQ")
def segfault_exception(space, reason):
w_mod = space.getbuiltinmodule("_rawffi")
w_exception = space.getattr(w_mod, space.wrap("SegfaultException"))
return OperationError(w_exception, space.wrap(reason))
class W_DataShape(Wrappable):
def allocate(self, space, length, autofree=False):
raise NotImplementedError
class W_DataInstance(Wrappable):
def __init__(self, space, size, address=r_uint(0)):
if address:
self.ll_buffer = rffi.cast(rffi.VOIDP, address)
else:
self.ll_buffer = lltype.malloc(rffi.VOIDP.TO, size, flavor='raw',
zero=True)
if tracker.DO_TRACING:
ll_buf = rffi.cast(rffi.INT, self.ll_buffer)
tracker.trace_allocation(ll_buf, self)
def getbuffer(space, self):
return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer))
def byptr(self, space):
from pypy.module._rawffi.array import get_array_cache
array_of_ptr = get_array_cache(space).array_of_ptr
array = array_of_ptr.allocate(space, 1)
array.setitem(space, 0, space.wrap(self))
return space.wrap(array)
byptr.unwrap_spec = ['self', ObjSpace]
def free(self, space):
if not self.ll_buffer:
raise segfault_exception(space, "freeing NULL pointer")
self._free()
free.unwrap_spec = ['self', ObjSpace]
def _free(self):
if tracker.DO_TRACING:
ll_buf = rffi.cast(rffi.INT, self.ll_buffer)
tracker.trace_free(ll_buf)
lltype.free(self.ll_buffer, flavor='raw')
self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO)
def unwrap_truncate_int(TP, space, w_arg):
if space.is_true(space.isinstance(w_arg, space.w_int)):
return rffi.cast(TP, space.int_w(w_arg))
else:
return rffi.cast(TP, space.bigint_w(w_arg).ulonglongmask())
unwrap_truncate_int._annspecialcase_ = 'specialize:arg(0)'
def unwrap_value(space, push_func, add_arg, argdesc, tp, w_arg):
letter, _, _ = tp
w = space.wrap
if letter == "d":
push_func(add_arg, argdesc, space.float_w(w_arg))
elif letter == "f":
push_func(add_arg, argdesc, rffi.cast(rffi.FLOAT,
space.float_w(w_arg)))
elif letter in TYPEMAP_PTR_LETTERS:
# check for NULL ptr
datainstance = space.interpclass_w(w_arg)
if isinstance(datainstance, W_DataInstance):
ptr = datainstance.ll_buffer
else:
ptr = unwrap_truncate_int(rffi.VOIDP, space, w_arg)
push_func(add_arg, argdesc, ptr)
elif letter == "c":
s = space.str_w(w_arg)
if len(s) != 1:
raise OperationError(space.w_TypeError, w(
"Expected string of length one as character"))
val = s[0]
push_func(add_arg, argdesc, val)
elif letter == 'u':
s = space.unicode_w(w_arg)
if len(s) != 1:
raise OperationError(space.w_TypeError, w(
"Expected unicode string og length one as wide character"))
val = s[0]
push_func(add_arg, argdesc, val)
else:
for c in unroll_letters_for_numbers:
if letter == c:
TP = LL_TYPEMAP[c]
val = unwrap_truncate_int(TP, space, w_arg)
push_func(add_arg, argdesc, val)
return
else:
raise OperationError(space.w_TypeError,
space.wrap("cannot directly write value"))
unwrap_value._annspecialcase_ = 'specialize:arg(1)'
ll_typemap_iter = unrolling_iterable(LL_TYPEMAP.items())
def wrap_value(space, func, add_arg, argdesc, tp):
letter, _, _ = tp
for c, ll_type in ll_typemap_iter:
if letter == c:
if c in TYPEMAP_PTR_LETTERS:
res = func(add_arg, argdesc, rffi.VOIDP)
return space.wrap(rffi.cast(lltype.Unsigned, res))
elif c == 'v':
func(add_arg, argdesc, ll_type)
return space.w_None
elif c == 'q' or c == 'Q' or c == 'L' or c == 'c' or c == 'u':
return space.wrap(func(add_arg, argdesc, ll_type))
elif c == 'f' or c == 'd':
return space.wrap(float(func(add_arg, argdesc, ll_type)))
else:
return space.wrap(intmask(func(add_arg, argdesc, ll_type)))
raise OperationError(space.w_TypeError,
space.wrap("cannot directly read value"))
wrap_value._annspecialcase_ = 'specialize:arg(1)'
class W_FuncPtr(Wrappable):
def __init__(self, space, ptr, argtypes, resshape):
self.ptr = ptr
self.resshape = resshape
self.argtypes = argtypes
def call(self, space, args_w):
from pypy.module._rawffi.array import W_ArrayInstance
from pypy.module._rawffi.structure import W_StructureInstance
argnum = len(args_w)
if argnum != len(self.argtypes):
msg = "Wrong number of argument: expected %d, got %d" % (
len(self.argtypes), argnum)
raise OperationError(space.w_TypeError, space.wrap(msg))
args_ll = []
for i in range(argnum):
argtype_letter, argtype_size, argtype_alignment = self.argtypes[i]
w_arg = args_w[i]
if argtype_letter == 'V': # by value object
arg = space.interp_w(W_StructureInstance, w_arg)
if (arg.shape.size != argtype_size or
arg.shape.alignment != argtype_alignment):
msg = ("Argument %d should be a structure of size %d and "
"alignment %d, "
"got instead size %d and alignment %d" %
(i+1, argtype_size, argtype_alignment,
arg.shape.size, arg.shape.alignment))
raise OperationError(space.w_TypeError, space.wrap(msg))
else:
arg = space.interp_w(W_ArrayInstance, w_arg)
if arg.length != 1:
msg = ("Argument %d should be an array of length 1, "
"got length %d" % (i+1, arg.length))
raise OperationError(space.w_TypeError, space.wrap(msg))
letter = arg.shape.itemtp[0]
if letter != argtype_letter:
if not (argtype_letter in TYPEMAP_PTR_LETTERS and
letter in TYPEMAP_PTR_LETTERS):
msg = "Argument %d should be typecode %s, got %s" % (
i+1, argtype_letter, letter)
raise OperationError(space.w_TypeError, space.wrap(msg))
args_ll.append(arg.ll_buffer)
# XXX we could avoid the intermediate list args_ll
if self.resshape is not None:
result = self.resshape.allocate(space, 1)
self.ptr.call(args_ll, result.ll_buffer)
return space.wrap(result)
else:
self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO))
return space.w_None
call.unwrap_spec = ['self', ObjSpace, 'args_w']
W_FuncPtr.typedef = TypeDef(
'FuncPtr',
__call__ = interp2app(W_FuncPtr.call)
)
def _create_new_accessor(func_name, name):
def accessor(space, tp_letter):
if len(tp_letter) != 1:
raise OperationError(space.w_ValueError, space.wrap(
"Expecting string of length one"))
tp_letter = tp_letter[0] # fool annotator
try:
return space.wrap(intmask(getattr(TYPEMAP[tp_letter], name)))
except KeyError:
raise OperationError(space.w_ValueError, space.wrap(
"Unknown type specification %s" % tp_letter))
accessor.unwrap_spec = [ObjSpace, str]
return func_with_new_name(accessor, func_name)
sizeof = _create_new_accessor('sizeof', 'c_size')
alignment = _create_new_accessor('alignment', 'c_alignment')
def charp2string(space, address, maxlength=sys.maxint):
if address == 0:
return space.w_None
s = rffi.charp2strn(rffi.cast(rffi.CCHARP, address), maxlength)
return space.wrap(s)
charp2string.unwrap_spec = [ObjSpace, r_uint, int]
def charp2rawstring(space, address, maxlength=-1):
if maxlength == -1:
return charp2string(space, address)
s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, address), maxlength)
return space.wrap(s)
charp2rawstring.unwrap_spec = [ObjSpace, r_uint, int]
|
494330
|
from typing import Final, List, Optional, Union
from web3 import Web3
from web3.eth import TxReceipt
from eth_account.account import LocalAccount
from thirdweb.abi.multiwrap import ITokenBundleToken
from thirdweb.common.marketplace import is_token_approved_for_transfer
from thirdweb.common.nft import upload_or_extract_uri
from thirdweb.constants.role import Role
from thirdweb.core.classes.contract_events import ContractEvents
from thirdweb.core.classes.contract_metadata import ContractMetadata
from thirdweb.core.classes.contract_roles import ContractRoles
from thirdweb.core.classes.contract_royalty import ContractRoyalty
from thirdweb.core.classes.contract_wrapper import ContractWrapper
from thirdweb.core.classes.erc_721 import ERC721
from thirdweb.abi import Multiwrap as MultiwrapABI
from thirdweb.core.classes.ipfs_storage import IpfsStorage
from thirdweb.types.contract import ContractType
from thirdweb.types.nft import NFTMetadataInput, NFTMetadataOwner
from thirdweb.types.sdk import SDKOptions
from thirdweb.types.multiwrap import (
ERC1155Wrappable,
ERC20Wrappable,
ERC721Wrappable,
TokensToWrap,
WrappedTokens,
)
from thirdweb.types.settings.metadata import MultiwrapContractMetadata
from thirdweb.common.currency import (
fetch_currency_metadata,
format_units,
has_erc20_allowance,
normalize_price_value,
)
from thirdweb.types.tx import TxResultWithId
class Multiwrap(ERC721[MultiwrapABI]):
"""
Multiwrap lets you wrap any number of ERC20, ERC721, or ERC1155 tokens into
a single wrapped token bundle.
```python
from thirdweb import ThirdwebSDK
# You can customize this to a supported network or your own RPC URL
network = "mumbai"
# Now we can create a new instance of the SDK
sdk = ThirdwebSDK(network)
# If you want to send transactions, you can instantiate the SDK with a private key instead:
# sdk = ThirdwebSDK.from_private_key(PRIVATE_KEY, network)
contract = sdk.get_multiwrap("{{contract_address}}")
```
"""
_abi_type = MultiwrapABI
contract_type: Final[ContractType] = ContractType.MULTIWRAP
contract_roles: Final[List[Role]] = [
Role.ADMIN,
Role.MINTER,
Role.TRANSFER,
Role.UNWRAP,
]
metadata: ContractMetadata[MultiwrapABI, MultiwrapContractMetadata]
roles: ContractRoles
royalty: ContractRoyalty[MultiwrapABI]
events: ContractEvents[MultiwrapABI]
def __init__(
self,
provider: Web3,
address: str,
storage: IpfsStorage,
signer: Optional[LocalAccount] = None,
options: SDKOptions = SDKOptions(),
):
abi = MultiwrapABI(provider, address)
contract_wrapper = ContractWrapper(abi, provider, signer, options)
super().__init__(contract_wrapper, storage)
self.metadata = ContractMetadata(
contract_wrapper,
storage,
MultiwrapContractMetadata,
)
self.roles = ContractRoles(contract_wrapper, self.contract_roles)
self.royalty = ContractRoyalty(contract_wrapper, self.metadata)
self.events = ContractEvents(contract_wrapper)
"""
READ FUNCTIONS
"""
def get_wrapped_contents(self, wrapped_token_id: int) -> WrappedTokens:
"""
Get the contents of a wrapped token bundle
:param wrapped_token_id: The ID of the wrapped token to get the contents of
:returns: The contents of the wrapped token bundle
```python
token_id = 0
contents = contract.get_wrapped_contents(token_id)
print(contents.erc20_tokens)
print(contents.erc721_tokens)
print(contents.erc1155_tokens)
```
"""
wrapped_tokens = self._contract_wrapper._contract_abi.get_wrapped_contents.call(
wrapped_token_id
)
erc20_tokens = []
erc721_tokens = []
erc1155_tokens = []
for token in wrapped_tokens:
if token["tokenType"] == 0:
token_metadata = fetch_currency_metadata(
self._contract_wrapper.get_provider(), token["assetContract"]
)
erc20_tokens.append(
ERC20Wrappable(
token["assetContract"],
format_units(token["totalAmount"], token_metadata.decimals),
)
)
continue
if token["tokenType"] == 1:
erc721_tokens.append(
ERC721Wrappable(token["assetContract"], token["tokenId"])
)
continue
if token["tokenType"] == 2:
erc1155_tokens.append(
ERC1155Wrappable(
token["assetContract"], token["tokenId"], token["totalAmount"]
)
)
return WrappedTokens(erc20_tokens, erc721_tokens, erc1155_tokens)
"""
WRITE FUNCTIONS
"""
def wrap(
self,
contents: TokensToWrap,
wrapped_token_metadata: Union[str, NFTMetadataInput],
recipient_address: Optional[str] = None,
) -> TxResultWithId[NFTMetadataOwner]:
"""
Wrap any number of ERC20, ERC721, or ERC1155 tokens into a single wrapped token
:param contents: The tokens to wrap into a single wrapped token
:param wrapped_token_metadata: The metadata to use for the wrapped token
:param recipient_address: The optional address to send the wrapped token to
:returns: The transaction receipt of the token wrapping
```python
from thirdweb.types import (
TokensToWrap,
ERC20Wrappable,
ERC721Wrappable,
ERC1155Wrappable,
NFTMetadataInput,
)
# Contract setup goes here...
tx = contract.wrap(
TokensToWrap(
erc20_tokens=[
ERC20Wrappable(contract_address="0x...", quantity=0.8),
],
erc721_tokens=[
ERC721Wrappable(contract_address="0x...", token_id=0),
],
erc1155_tokens=[
ERC1155Wrappable(contract_address="0x...", token_id=0, quantity=1),
]
),
NFTMetadataInput(
name="Wrapped NFT",
description="This is a wrapped bundle of tokens and NFTs",
image="ipfs://...",
)
)
print(tx.receipt, tx.id)
```
"""
uri = upload_or_extract_uri(wrapped_token_metadata, self._storage)
if recipient_address is None:
recipient_address = self._contract_wrapper.get_signer_address()
tokens = self._to_token_struct_list(contents)
receipt = self._contract_wrapper.send_transaction(
"wrap", [tokens, uri, recipient_address]
)
events = self._contract_wrapper.get_events("TokensWrapped", receipt)
if len(events) == 0:
raise Exception("No TokensWrapped event found")
id = events[0].get("args").get("tokenIdOfWrappedToken") # type: ignore
return TxResultWithId(receipt, id=id, data=lambda: self.get(id))
def unwrap(
self, wrapped_token_id: int, recipient_address: Optional[str] = None
) -> TxReceipt:
"""
Unwrap a wrapped token bundle
:param wrapped_token_id: The ID of the wrapped token to unwrap
:param recipient_address: The optional address to send the unwrapped tokens to
:returns: The transaction receipt of the token unwrapping
```python
tx = contract.unwrap(wrapped_token_id, receipientAddress)
```
"""
if recipient_address is None:
recipient_address = self._contract_wrapper.get_signer_address()
return self._contract_wrapper.send_transaction(
"unwrap", [wrapped_token_id, recipient_address]
)
"""
INTERNAL FUNCTIONS
"""
def _to_token_struct_list(self, contents: TokensToWrap) -> List[ITokenBundleToken]:
tokens: List[ITokenBundleToken] = []
provider = self._contract_wrapper.get_provider()
owner = self._contract_wrapper.get_signer_address()
if len(contents.erc20_tokens) > 0:
for erc20 in contents.erc20_tokens:
normalized_quantity = normalize_price_value(
provider,
erc20.quantity,
erc20.contract_address,
)
has_allowance = has_erc20_allowance(
self._contract_wrapper,
erc20.contract_address,
normalized_quantity,
)
if not has_allowance:
raise Exception(
(
f"ERC20 with contract address {erc20.contract_address} does not have enough allowance to transfer. "
"You can set allowance to the multiwrap contract to transfer these tokens by running:\n"
f'sdk.get_token("{erc20.contract_address}").set_allowance("{owner}", {erc20.quantity})'
)
)
tokens.append(
{
"tokenType": 0,
"tokenId": 0,
"assetContract": erc20.contract_address,
"totalAmount": normalized_quantity,
}
)
if len(contents.erc721_tokens) > 0:
for erc721 in contents.erc721_tokens:
is_approved = is_token_approved_for_transfer(
self._contract_wrapper.get_provider(),
self.get_address(),
erc721.contract_address,
erc721.token_id,
owner,
)
if not is_approved:
raise Exception(
(
f"ERC721 token {erc721.token_id} with contract address {erc721.contract_address} is not approved for transfer. "
"You can approve this token for transfer by running:\n"
f'sdk.get_nft_collection("{erc721.contract_address}")'
f'.set_approval_for_token("{self._contract_wrapper._contract_abi.contract_address}", {erc721.token_id})'
)
)
tokens.append(
{
"tokenType": 1,
"tokenId": erc721.token_id,
"assetContract": erc721.contract_address,
"totalAmount": 0,
}
)
if len(contents.erc1155_tokens) > 0:
for erc1155 in contents.erc1155_tokens:
is_approved = is_token_approved_for_transfer(
self._contract_wrapper.get_provider(),
self.get_address(),
erc1155.contract_address,
erc1155.token_id,
owner,
)
if not is_approved:
raise Exception(
(
f"ERC1155 token {erc1155.token_id} with contract address {erc1155.contract_address} is not approved for transfer. "
"You can approve this token for transfer by running:\n"
f'sdk.get_edition("{erc1155.contract_address}")'
f'.set_approval_for_all("{self._contract_wrapper._contract_abi.contract_address}", True)'
)
)
tokens.append(
{
"tokenType": 2,
"tokenId": erc1155.token_id,
"assetContract": erc1155.contract_address,
"totalAmount": erc1155.quantity,
}
)
return tokens
|
494412
|
import django
from gensim.models import KeyedVectors
import gensim
import sys
import os
# 获取当前文件的目录
from FunPySearch.celery import app
pwd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# 获取项目名的目录(因为我的当前文件是在项目名下的文件夹下的文件.所以是../)
sys.path.append(pwd)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FunPySearch.settings")
django.setup()
from user.models import KeyWord2Vec # 必须放着
def gen_word2vec_save_to_mysql_test(model_name="small", keyword=None):
if model_name == "small":
model = gensim.models.Word2Vec.load("./trained_models/zhihu.model")
word2vec_list = model.wv.most_similar(keyword, topn=5)
elif model_name == "tencent":
model = KeyedVectors.load_word2vec_format("./trained_models/45000-small.txt")
word2vec_list = model.most_similar(keyword, topn=5)
elif model_name == "zhihu":
model = KeyedVectors.load_word2vec_format("./trained_models/sgns.zhihu.bigram-char")
word2vec_list = model.most_similar(keyword, topn=5)
word2vec_list_common = []
for item in word2vec_list:
word2vec_list_common.append(item[0])
word2vec_text = ",".join(word2vec_list_common)
print(word2vec_text)
@app.task
def gen_word2vec_save_to_mysql(model_name="small", keyword=None):
if model_name == "small":
try:
model = gensim.models.Word2Vec.load("./search/trained_models/zhihu.model")
word2vec_list = model.wv.most_similar(keyword, topn=5)
except KeyError:
word2vec_list = []
elif model_name == "tencent":
model = KeyedVectors.load_word2vec_format("./search/trained_models/45000-small.txt")
word2vec_list = model.most_similar(keyword, topn=5)
elif model_name == "zhihu":
model = KeyedVectors.load_word2vec_format("./search/trained_models/sgns.zhihu.bigram-char")
word2vec_list = model.most_similar(keyword, topn=5)
word2vec_list_common = []
for item in word2vec_list:
word2vec_list_common.append(item[0])
word2vec_text = ",".join(word2vec_list_common)
print(word2vec_text)
if KeyWord2Vec.objects.filter(keyword=keyword):
pass
else:
keyword_word2vec = KeyWord2Vec()
keyword_word2vec.keyword = keyword
keyword_word2vec.keyword_word2vec = word2vec_text
keyword_word2vec.save()
def test_us_small_model():
model = gensim.models.Word2Vec.load("./trained_models/zhihu.model")
print(model.wv.most_similar('老公', topn=8))
print("*" * 20)
def test_tencent_ai_model():
model = KeyedVectors.load_word2vec_format("./trained_models/45000-small.txt")
print(model.most_similar('特朗普', topn=10))
print("*" * 20)
print(model.most_similar(positive=['女', '国王'], negative=['男'], topn=1))
print("*" * 20)
print(model.doesnt_match("上海 成都 广州 北京".split(" ")))
print("*" * 20)
print(model.similarity('女人', '男人'))
def test_zhihu_model():
model = KeyedVectors.load_word2vec_format("./trained_models/sgns.zhihu.bigram-char")
print(model.most_similar('特朗普', topn=10))
print("*" * 20)
print(model.most_similar(positive=['女', '国王'], negative=['男'], topn=1))
print("*" * 20)
print(model.doesnt_match("上海 成都 广州 北京".split(" ")))
print("*" * 20)
print(model.similarity('女人', '男人'))
if __name__ == '__main__':
# print("使用我们自己训练的微型知乎wordvec模型: ")
# test_us_small_model()
# print("使用腾讯ai实验室word2vec模型: ") # https://ai.tencent.com/ailab/nlp/embedding.html
# test_tencent_ai_model()
# print("使用中文开源知乎word2vec模型") # https://github.com/Embedding/Chinese-Word-Vectors
# test_zhihu_model()
# print("************************")
gen_word2vec_save_to_mysql_test("small", "老公")
|
494425
|
import numpy as np
import sys
sys.path.append(".")
from ai.action.movement.movements.basic import *
from ai.action.movement.movements.sit import *
import ai.actionplanner
def kneading(mars, times=6):
sit(mars)
_speed = 0.7
l = 0
for i in range(times):
l += 1
leg_num = (l % 2 + 1)
_sign = (l % 2) *2 - 1
angle_2_ges1 = 0
angle_3_ges1 = 0
angle_2_ges2 = -20
angle_3_ges2 = 60
j1_angles = -5 * _sign
mars.setLegAngle(1, 1, j1_angles, _speed)
mars.setLegAngle(2, 1, -j1_angles, _speed)
mars.setLegAngle(3, 1, j1_angles, _speed)
mars.setLegAngle(4, 1, -j1_angles, _speed)
ai.actionplanner.ActionPlanner.sleep(0.2)
move_head_tail(mars,1,0)
mars.setLegAngle(leg_num, 2, angle_2_ges1, _speed)
mars.setLegAngle(leg_num, 3, angle_3_ges1, _speed)
ai.actionplanner.ActionPlanner.sleep(0.6)
mars.setLegAngle(leg_num, 2, angle_2_ges2, _speed)
mars.setLegAngle(leg_num, 3, angle_3_ges2, _speed)
move_head_tail(mars,1,0)
ai.actionplanner.ActionPlanner.sleep(0.6)
mars.setLegAngle(leg_num, 2, angle_2_ges1, _speed*0.8)
mars.setLegAngle(leg_num, 3, angle_3_ges1, _speed*0.8)
ai.actionplanner.ActionPlanner.sleep(0.6)
|
494446
|
from verifai.features import *
from verifai.samplers import *
from collections import defaultdict
def test_grid():
space = FeatureSpace({
'weather': Feature(DiscreteBox([0,12])),
'car_positions': Feature(Box([-10,10], [0,1]))
})
sampler = FeatureSampler.gridSamplerFor(space)
i = 0
dict_samples = defaultdict(int)
while True:
try:
sample = sampler.nextSample()
dict_samples[(sample.weather[0], sample.car_positions[0],
sample.car_positions[1])] = 0
except TerminationException:
break
i+=1
assert i == len(dict_samples) and i == 21*21*13
def test_grid_iter():
space = FeatureSpace({
'weather': Feature(DiscreteBox([0,12])),
'day': Feature(DiscreteBox([1,7])),
'car_positions': Feature(Box([-10,10]))
})
sampler = FeatureSampler.gridSamplerFor(space)
samples = list(sampler)
assert len(samples) == 13*7*21
def test_grid_oper():
def f(sample):
x = sample.x[0]
return (6 * x - 2) ** 2 * np.sin(12 * x - 4)
space = FeatureSpace({'x': Feature(Box([0,1]))})
sampler = FeatureSampler.gridSamplerFor(space)
samples = []
y_samples = []
for i in range(21):
sample = sampler.nextSample()
samples.append(sample)
y_samples.append(f(sample))
min_i = np.array(y_samples).argmin()
assert min_i == 15
def test_grid_non_standardizable():
space = FeatureSpace({
'a': Feature(DiscreteBox([0,12])),
'b': Feature(FilteredDomain(Box([0,1]), lambda x: x[0] > 0.5))
})
sampler = FeatureSampler.gridSamplerFor(space)
samples = list(sampler)
assert len(samples) == 13
assert all(sample.b[0] > 0.5 for sample in samples)
|
494452
|
import torch
import numpy as np
from transformers import BertModel
from sentence_transformers import SentenceTransformer, util
from relevance_model import text_preprocessing, preprocessing_for_bert, bert_predict, BertClassifier
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import json
import time
import os
import math
import random
# Generate answer candidates (refer to Alg. in Appendix)
def generate_answer_choices(i, relevance_results, similarity, similarity_answers,
choices,
itera, thres=0.9, thres1=0.2):
candidates = []
min_score = 100000000
bucket_size = int(len(relevance_results[0])/3)
for j in range(bucket_size):
if relevance_results[i][j+itera*bucket_size] > thres:
candidates.append(j+itera*bucket_size)
score = 0
min_idx = -1
if itera == 0:
for candidate in candidates:
score = similarity[i][candidate]
if similarity[i][candidate] < thres1:
score = 10
if score < min_score:
min_score = score
min_idx = candidate
else:
for candidate in candidates:
score = similarity[i][candidate]
if similarity[i][candidate] < thres1:
score = 10
for choice in choices:
if similarity_answers[choice][candidate] < thres1:
score += 10
else:
score += similarity_answers[choice][candidate]
if score < min_score:
min_score = score
min_idx = candidate
return min_idx
# Obtain text data
def make_list(filename):
f = open(filename, "r")
answers = []
questions = []
questions_with_idx = []
answer_choices = []
answer_choices_idx = []
for line in f:
data = json.loads(line)
questions_with_idx.append(data["question"])
ans_choices = data["answer_choices"]
ans_id = data["answer_label"]
ans_right = ans_choices[ans_id]
objects = data["objects"]
final_question = text_preprocessing(data["question"], objects)
final_ans = text_preprocessing(ans_right, objects)
answers.append(' '.join(final_ans))
questions.append(' '.join(final_question))
answer_choices.append(ans_right)
answer_choices_idx.append(ans_id)
return answers, questions, answer_choices, questions_with_idx, answer_choices_idx
# Replace the object indices in answer candidates with the ones existing in the right answer
def limit_range(choices, objects_size):
new_choices = []
for choice in choices:
new_choice = []
for token in choice:
if isinstance(token, list):
idx_list = []
for idx in token:
if idx >= objects_size:
idx_list.append(objects_size-1)
else:
idx_list.append(idx)
new_choice.append(idx_list)
else:
new_choice.append(token)
new_choices.append(new_choice)
return new_choices
# Integrate the original dataset with only right answers
def make_final_list(filename, choices, choices_idx):
f = open(filename, "r")
with open("../X_VCR/val.jsonl", 'w') as f1:
i = 0
for line in f:
data = json.loads(line)
# Erroreous DP
if "west_17.jpg" in data["img_fn"]:
i += 1
continue
data["answer_choices"] = limit_range(choices[i], len(data["objects"]))
data["answer_label"] = choices_idx[i]
data["rationale_choices"] = limit_range(choices[i], len(data["objects"]))
data["rationale_label"] = choices_idx[i]
data_json = json.dumps(data)
f1.write(data_json+'\n')
i += 1
def remove_zero(arr):
arr_new = []
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] == 0:
arr_new.append(arr[i][:j])
break
return arr_new
# Convert answer choices into VCR form
def convert_to_VCR(x, idx):
pronouns = ["he", "she", "it", "they", "his", "her", "their", "him", "them", "its"]
arr = x.split(" ")
arr_new = []
idx_list = []
for token in arr:
if token in GENDER_NEUTRAL_NAMES:
idx_list.append(name2idx[token])
else:
if idx_list != []:
arr_new.append(idx_list)
idx_list = []
arr_new.append(token)
if idx_list != []:
arr_new.append(idx_list)
question_idx = []
for token in questions_with_idx_MC[idx]:
if isinstance(token, list):
question_idx.append(token)
orig_choices = answer_choices_MC[idx]
i = 0
j = 0
final = []
while i < len(arr_new) and j < len(orig_choices):
if isinstance(arr_new[i], list):
while not isinstance(orig_choices[j], list):
j += 1
if j == len(orig_choices):
j -= 1
break
if isinstance(orig_choices[j], list):
final.append(orig_choices[j])
j += 1
else:
if len(question_idx) != 0:
final.append(question_idx[0])
question_idx = question_idx[1:]
else:
final.append(arr_new[i])
elif arr_new[i].lower() in pronouns:
while not isinstance(orig_choices[j], list):
j += 1
if j == len(orig_choices):
j -= 1
break
if isinstance(orig_choices[j], list):
final.append(orig_choices[j])
if arr_new[i].lower() == "his" or arr_new[i].lower() == "her" or arr_new[i].lower() == "their" or arr_new[i].lower() == "its":
final.append('\'')
final.append('s')
j += 1
else:
final.append(arr_new[i])
else:
final.append(arr_new[i])
i += 1
return final
model = SentenceTransformer('stsb-roberta-base')
answers_MC, questions_MC, answer_choices_MC, questions_with_idx_MC, answer_choices_idx_MC = make_list("MC-VCR_test.jsonl")
answers, _, _, _, _ = make_list("../X_VCR/orig_val.jsonl")
GENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',
'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn']
name2idx = dict()
for i, name in enumerate(GENDER_NEUTRAL_NAMES):
name2idx[name] = i
if not os.path.exists("relevance.npy"):
relevance_model = torch.load("relevance_model.th")
relevance_model.eval()
questions_MC_tokenized, questions_attention_mask_MC = preprocessing_for_bert(questions_MC)
answers_MC_tokenized, attention_mask_MC = preprocessing_for_bert(answers_MC)
answers_tokenized, attention_mask = preprocessing_for_bert(answers)
questions_MC_tokenized = remove_zero(questions_MC_tokenized.numpy().tolist())
answers_MC_tokenized = remove_zero(answers_MC_tokenized.numpy().tolist())
answers_tokenized = remove_zero(answers_tokenized.numpy().tolist())
questions_attention_mask_MC = remove_zero(questions_attention_mask_MC.numpy().tolist())
attention_mask_MC = remove_zero(attention_mask_MC.numpy().tolist())
attention_mask = remove_zero(attention_mask.numpy().tolist())
relevance_results = np.zeros((len(answers_MC_tokenized), len(answers_tokenized)))
# Compute the relevance matrix w.r.t. all the right choices of GD-VCR and VCR dev
for i, sample_MC in enumerate(questions_MC_tokenized):
start = time.time()
batch_size = 64
val_inputs = [sample_MC + sample[1:] for sample in answers_tokenized]
val_masks = [questions_attention_mask_MC[i] + sample[1:] for sample in attention_mask]
val_inputs_1 = [val_input[:64] if len(val_input) > 64 else val_input + [0] * (64-len(val_input)) for val_input in val_inputs]
val_masks_1 = [val_mask[:64] if len(val_mask) > 64 else val_mask + [0] * (64-len(val_mask)) for val_mask in val_masks]
val_inputs = torch.tensor(val_inputs_1)
val_masks = torch.tensor(val_masks_1)
val_labels = torch.tensor(np.array([0] * len(answers)))
val_data = TensorDataset(val_inputs, val_masks, val_labels)
val_sampler = SequentialSampler(val_data)
val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size)
results = bert_predict(relevance_model, val_dataloader)
relevance_results[i] = np.array([result[1] for result in results])
end = time.time()
print(i, len(results), len(results[0]), end - start)
np.save("relevance.npy", relevance_results)
else:
embeddings1 = model.encode(answers_MC, convert_to_tensor=True)
embeddings2 = model.encode(answers, convert_to_tensor=True)
similarity = util.pytorch_cos_sim(embeddings1, embeddings2)
device = torch.device("cpu")
relevance_results = np.load("relevance.npy")
embeddings_answers = model.encode(answers, convert_to_tensor=True)
similarity_answers = util.pytorch_cos_sim(embeddings_answers, embeddings_answers)
choices = []
for i in range(len(relevance_results)):
choices.append([])
for itera in range(3):
choices[i].append(generate_answer_choices(i, relevance_results, similarity, similarity_answers, choices[i], itera))
final_choices = []
final_choices_idx = []
for i in range(len(choices)):
final_choices.append([])
for j, choice in enumerate(choices[i]):
if j == answer_choices_idx_MC[i]:
final_choices[i].append(answer_choices_MC[i])
final_choices[i].append(convert_to_VCR(answers[choice], i))
if answer_choices_idx_MC[i] == 3:
final_choices[i].append(answer_choices_MC[i])
print(final_choices[i])
final_choices_idx.append(random.randint(0,3))
temp = final_choices[-1][answer_choices_idx_MC[i]]
final_choices[-1][answer_choices_idx_MC[i]] = final_choices[-1][final_choices_idx[-1]]
final_choices[-1][final_choices_idx[-1]] = temp
print(final_choices_idx[-1])
print(final_choices[i])
assert len(final_choices[i]) == 4
print("----")
make_final_list("MC-VCR_test.jsonl", final_choices, final_choices_idx)
|
494506
|
import datetime
import time
import os
from celery import Celery
celery_app = Celery('hello', broker=os.environ['REDIS_URL'])
@celery_app.task
def hello():
time.sleep(10)
with open ('hellos.txt', 'a') as hellofile:
hellofile.write('Hello {}\n'.format(datetime.datetime.now()))
|
494511
|
Import("env")
with open("version.txt") as fp:
version = fp.readline()
env.Replace(PROGNAME="firmware_%s" % version.replace(".", "_"))
|
494545
|
from rest_framework.generics import ListCreateAPIView
from rest_framework.generics import RetrieveUpdateDestroyAPIView
from .models import Micropost, Usr
from .serializers import MicropostSerializer
class MicropostsListView(ListCreateAPIView):
serializer_class = MicropostSerializer
def get_queryset(self):
return Micropost.objects.filter(user__username=self.kwargs['username'])
def perform_create(self, serializer):
user = Usr.objects.get(username=self.kwargs['username'])
serializer.save(user=user)
class MicropostView(RetrieveUpdateDestroyAPIView):
serializer_class = MicropostSerializer
def get_queryset(self):
return Micropost.objects.filter(user__username=self.kwargs['username'])
|
494547
|
class InvalidFileName(ValueError):
def __init__(self, filename):
self.message = 'Invalid file name: {}'.format(filename)
super(ValueError, self).__init__(self.message)
|
494562
|
import os
import sys
import math
import pickle
import argparse
import time
from torch import optim
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
sys.path.append(os.getcwd())
from utils import *
from experiments.utils.config import Config
from experiments.utils.batch_gen_amass import BatchGeneratorAMASSCanonicalized
def loss_function(X, Y_r, Y, mu, logvar):
MSE = F.l1_loss(Y_r, Y) + cfg.lambda_tf*F.l1_loss(Y_r[1:]-Y_r[:-1], Y[1:]-Y[:-1])
MSE_v = F.l1_loss(X[-1], Y_r[0])
KLD = 0.5 * torch.mean(-1 - logvar + mu.pow(2) + logvar.exp())
if robustkl:
KLD = torch.sqrt(1 + KLD**2)-1
loss_r = MSE + cfg.lambda_v * MSE_v + cfg.beta * KLD
return loss_r, np.array([loss_r.item(), MSE.item(), MSE_v.item(), KLD.item()])
def train(epoch):
t_s = time.time()
train_losses = 0
total_num_sample = 0
loss_names = ['TOTAL', 'MSE', 'MSE_v', 'KLD']
while batch_gen.has_next_rec():
traj = batch_gen.next_batch(cfg.batch_size).to(device)
if (torch.isnan(traj)).any():
print('- meet nan. Skip it')
continue
X = traj[:t_his]
Y = traj[t_his:]
Y_r, mu, logvar = model(X, Y)
loss, losses = loss_function(X, Y_r, Y, mu, logvar)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_losses += losses
total_num_sample += 1
batch_gen.reset()
scheduler.step()
dt = time.time() - t_s
train_losses /= total_num_sample
lr = optimizer.param_groups[0]['lr']
losses_str = ' '.join(['{}: {:.4f}'.format(x, y) for x, y in zip(loss_names, train_losses)])
logger.info('====> Epoch: {} Time: {:.2f} {} lr: {:.5f}'.format(epoch, dt, losses_str, lr))
for name, loss in zip(loss_names, train_losses):
tb_logger.add_scalar('vae_' + name, loss, epoch)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default=None)
parser.add_argument('--mode', default='train')
parser.add_argument('--iter', type=int, default=0)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu_index', type=int, default=0)
parser.add_argument('--test', action='store_true', default=False)
args = parser.parse_args()
"""load the right model"""
if 'vanilla' in args.cfg:
from models.models_vanilla import *
elif 'mojo' in args.cfg:
from models.models_mojo import *
"""setup"""
np.random.seed(args.seed)
torch.manual_seed(args.seed)
dtype = torch.float32
torch.set_default_dtype(dtype)
device = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_index)
cfg = Config(args.cfg, test=args.test)
tb_logger = SummaryWriter(cfg.tb_dir) if args.mode == 'train' else None
logger = create_logger(os.path.join(cfg.log_dir, 'log.txt'))
"""parameter"""
mode = args.mode
nz = cfg.nz
t_his = cfg.t_his
t_pred = cfg.t_pred
body_repr = cfg.body_repr
subsets = cfg.dataset
robustkl = cfg.robustkl
"""data"""
batch_gen = BatchGeneratorAMASSCanonicalized(amass_data_path=cfg.dataset_path,
amass_subset_name=subsets,
sample_rate=8,
body_repr=body_repr)
batch_gen.get_rec_list()
"""model"""
model = get_vae_model(cfg, batch_gen.get_feature_dim())
optimizer = optim.Adam(model.parameters(), lr=cfg.vae_lr)
scheduler = get_scheduler(optimizer, policy='lambda', nepoch_fix=cfg.num_vae_epoch_fix, nepoch=cfg.num_vae_epoch)
if args.iter > 0:
cp_path = cfg.vae_model_path % args.iter
print('loading model from checkpoint: %s' % cp_path)
model_cp = torch.load(cp_path)
model.load_state_dict(model_cp['model_dict'])
if mode == 'train':
model.to(device)
model.train()
for i in range(args.iter, cfg.num_vae_epoch):
train(i)
if cfg.save_model_interval > 0 and (i + 1) % cfg.save_model_interval == 0:
with to_cpu(model):
cp_path = cfg.vae_model_path % (i + 1)
model_cp = {'model_dict': model.state_dict()}
torch.save(model_cp, cp_path)
|
494642
|
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def is_leaf_node(self, root: TreeNode) -> bool:
return root.left is None and root.right is None
def binary_tree_paths(self, root: TreeNode, current_path: str, results: List[str]):
if root is None:
return
if self.is_leaf_node(root):
results.append(f'{current_path}->{root.val}'[2:])
return
self.binary_tree_paths(root.left, f'{current_path}->{root.val}', results)
self.binary_tree_paths(root.right, f'{current_path}->{root.val}', results)
def binaryTreePaths(self, root: TreeNode) -> List[str]:
result = []
self.binary_tree_paths(root, '', results=result)
return result
|
494667
|
import theano
import theano.tensor as T
import numpy as np
from utility.utility import *
from lstm_layer import *
def uni_lstm_init(prefix, params, layer_setting):
return lstm_init(prefix+'_forward', params, layer_setting)
def uni_lstm_calc(prefix, params, layer_setting,state_below, h_init = None, c_init = None, mask = None, training = True):
return lstm_calc(prefix+'_forward', params, layer_setting, state_below, h_init, c_init, mask, training = True)
|
494704
|
from commitizen import factory, out
from commitizen.config import BaseConfig
class Example:
"""Show an example so people understands the rules."""
def __init__(self, config: BaseConfig, *args):
self.config: BaseConfig = config
self.cz = factory.commiter_factory(self.config)
def __call__(self):
out.write(self.cz.example())
|
494717
|
import sys
import os
import matplotlib
import fnmatch
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut
from scipy.ndimage.filters import maximum_filter
from skimage.feature import peak_local_max
from scipy.odr import ODR, Model, RealData
def locate_intercept(x,y,x_range):
print 'locating offset'
ix = np.arange(len(x))
p = np.where(x[ix]>2)[0]
ix = ix[p]
p = np.where(np.logical_and(x[ix] < np.percentile(x[ix],x_range[1]),x[ix] > np.percentile(x[ix],x_range[0])))[0]
ix = ix[p]
for n in range(10):
a = np.nanmedian(y[ix]-x[ix])
print a, x.shape
p = np.where(np.abs(y[ix]-a-x[ix])<2.5*np.nanstd(y[ix]-a-x[ix]))[0]
ix = ix[p]
return a, ix
def calibrate(dir,plotfile='calibration.png',magnitude_range_fraction=(0.1,8),sky_flux_cutoff_percent=0.1,ZP=25):
magfile = os.path.join(dir,'ref.mags')
fluxfile = os.path.join(dir,'ref.flux')
mag = np.loadtxt(magfile)
flux = np.loadtxt(fluxfile)
p = np.where((mag[:,3] > 0) & (flux[:,0] > 0))[0]
#sky_max_flux = np.percentile(flux[p,0],sky_flux_cutoff_percent)
#q = np.where(flux[p,0] < sky_max_flux)[0]
#sky_flux = 0.9*np.mean(flux[p[q],0])
#q = np.where(mag[p,3] > np.percentile(mag[p,3],99.9))[0]
#sky_flux = 0.95*np.median(flux[p[q],0])
#flux[:,0] -= sky_flux
x = np.linspace(np.min(mag[p,3]),np.max(mag[p,3]),3)
offset, stars = locate_intercept(mag[p,3],ZP-2.5*np.log10(flux[p,0]),magnitude_range_fraction)
# Axes definitions
nullfmt = plt.NullFormatter()
rect_scatter = [0.15, 0.15, 0.7, 0.7]
rect_histx = [0.15, 0.85, 0.7, 0.1]
rect_histy = [0.85, 0.15, 0.1, 0.7]
binsize = 0.5
bandwidth = 0.25
ymin, ymax = (offset-1,offset+1)
binsize_y = 0.05
fig = plt.figure()
ax1 = fig.add_subplot(223, position=rect_scatter)
ax1.scatter(mag[p,3],ZP-2.5*np.log10(flux[p,0])-mag[p,3],s=5,color='k')
ax1.scatter(mag[p[stars],3],ZP-2.5*np.log10(flux[p[stars],0])-mag[p[stars],3],s=5,color='c')
#ax.plot(x,x,'r--')
ax1.plot(x,x*0.0+offset,'r',label=r'$\Delta mag = %4.2f$'%offset)
ax1.grid()
ax1.legend(loc='upper left')
ax1.set_xlabel(r'$M_{\rm DAO}$',fontsize='14')
ax1.set_ylabel(r'$%5.2f-2.5*\log_{10} F_{\rm P}-M_{\rm DAO}$'%ZP,fontsize='14')
xmin, xmax = plt.xlim()
xx = np.linspace(xmin,xmax,1000)
ax2 = fig.add_subplot(221, position=rect_histx)
hval, bins, _ = ax2.hist(mag[p,3],range=(xmin,xmax),bins=int((xmax-xmin)/binsize+1),
normed=True,alpha=0.3)
kde_skl = KernelDensity(kernel='epanechnikov',bandwidth=bandwidth)
sample = mag[p,3]
kde_skl.fit(sample[:, np.newaxis])
log_pdf = kde_skl.score_samples(xx[:, np.newaxis])
ax2.plot(xx,np.exp(log_pdf),'r')
ax2.xaxis.set_major_formatter(nullfmt)
ax2.yaxis.set_major_formatter(nullfmt)
ax2.set_title(dir)
ax3 = fig.add_subplot(221, position=rect_histy)
ax3.hist(ZP-2.5*np.log10(flux[p,0])-mag[p,3],range=(ymin,ymax),bins=int((ymax-ymin)/binsize_y+1),
orientation='horizontal',normed=True,alpha=0.3)
ax3.xaxis.set_major_formatter(nullfmt)
ax3.yaxis.set_major_formatter(nullfmt)
ax1.set_ylim((ymin,ymax))
ax3.set_ylim((ymin,ymax))
plt.savefig(os.path.join(dir,plotfile))
ax1.set_ylim((offset-0.1,offset+0.1))
ax3.set_ylim((offset-0.1,offset+0.1))
plt.savefig(os.path.join(dir,'zoom-'+plotfile))
mag[:,3] += offset
if mag.shape[1] == 4:
np.savetxt(os.path.join(dir,'ref.mags.calibrated'),mag,fmt='%5d %8.3f %8.3f %7.4f')
else:
np.savetxt(os.path.join(dir,'ref.mags.calibrated'),mag,fmt='%5d %8.3f %8.3f %7.4f %7.4f %7.3f %7.3f %7.3f')
cflux = 10.0**(0.4*(ZP-mag[:,3]))
if mag.shape[1] == 4:
cfluxerr = 0.0*cflux
else:
cfluxerr = cflux - 10.0**(0.4*(ZP-mag[:,3]-mag[:,4]))
np.savetxt(os.path.join(dir,'ref.flux.calibrated'),np.vstack((cflux,cfluxerr)).T,fmt='%12.4f %12.4f')
def makeCMD(dirI,dirV,bandwidth = 0.2,ifile=None,vfile=None,plot_density=True,RC=None,source_colour=None,xlabel=None,ylabel=None,
IV=None,legend=True,title=True,plotfile=None,density_plotfile=None,I_limit=21,V_limit=21,RC_limit=18,optimize_bandwidth=False):
if ifile is None:
ifile = os.path.join(dirI,'ref.mags.calibrated')
if vfile is None:
vfile = os.path.join(dirV,'ref.mags.calibrated')
if xlabel is None:
xlabel = r'$(V-I)_{\rm P}$'
if ylabel is None:
ylabel = r'$I_{\rm p}$'
im = np.loadtxt(ifile)
vm = np.loadtxt(vfile)
p = np.where((im[:,4] > 0) & (im[:,3] < I_limit) & (vm[:,4] > 0) & (vm[:,3] < V_limit) )[0]
plt.figure()
plt.scatter(vm[p,3]-im[p,3],im[p,3],s=3,c='k')
plt.grid()
plt.gca().invert_yaxis()
plt.xlabel(xlabel,fontsize='14')
plt.ylabel(ylabel,fontsize='14')
if title:
plt.title(dirI+' '+dirV)
if RC is not None:
plt.scatter(RC[0],RC[1],color='r',s=60,label='Red Clump (%6.3f,%6.3f)'%RC)
# if source_pos is not None:
# plt.errorbar(source_pos[0],source_pos[1],xerr=source_pos[2],yerr=source_pos[3],color='c')
# plt.scatter(source_pos[0],source_pos[1],marker='o',s=80,facecolors='none', edgecolors='c')
if IV is not None:
plt.scatter(IV[1]-IV[0],IV[0],color='m',marker='.',s=40,label='Blended Source (%6.3f,%6.3f)'%(IV[1]-IV[0],IV[0]))
if source_colour is not None:
plt.scatter(np.nan,np.nan,color='w',marker='.',s=40,label='$(V-I)_S$ = %6.3f +/- %5.3f'%(source_colour[0],source_colour[1]))
if source_colour is not None and RC is not None:
#plt.scatter(np.nan,np.nan,color='w',marker='.',s=40,label='$\Delta (V-I)_S$ = %6.3f +/- %5.3f'%(source_colour[0]-RC[0],source_colour[1]))
plt.scatter(np.nan,np.nan,color='w',marker='.',s=40,label='$(V-I)_{S,0}$ = %6.3f +/- %5.3f'%(source_colour[0]-RC[0]+1.06,source_colour[1]))
if legend:
plt.legend(loc='upper left')
xmin, xmax = plt.xlim()
ymax, ymin = plt.ylim()
if plotfile is None:
plotfile = dirI+'-'+dirV+'-CMD.png'
plt.savefig(plotfile)
plt.close()
print xmin, xmax, ymin, ymax
np.savetxt(dirI+'-'+dirV+'-CMDdata',
np.vstack((im[p,0],im[p,1],im[p,2],vm[p,3],vm[p,4],im[p,3],im[p,4])).T,
fmt='%6d %9.3f %9.3f %7.4f %7.4f %7.4f %7.4f',
header='ID xpos ypos V V_err I I_err')
red_clump = None
if plot_density:
prob = np.ones(im.shape[0])
Zmax = 1.0
p = np.where((RC_limit+2 > im[:,3]) & (im[:,3] >= RC_limit))[0]
VI_main_sequence = np.nanmedian(vm[p,3]-im[p,3])
p = np.where(vm[:,3]-im[:,3] < VI_main_sequence)[0]
prob[p] = 0.0
for iteration in range(10):
p = np.where((im[:,4] > 0) & (vm[:,4] > 0) & (im[:,3] < RC_limit) & (vm[:,3] < V_limit) & (vm[:,3]-im[:,3] > VI_main_sequence) & (prob > 0.2*Zmax))[0]
samples = np.vstack([vm[p,3]-im[p,3],im[p,3]]).T
if optimize_bandwidth:
bandwidths = np.linspace(0.1,0.6,101)
grid = GridSearchCV(KernelDensity(kernel='gaussian'),
{'bandwidth': bandwidths},
cv=LeaveOneOut())
grid.fit(samples)
bandwidth = grid.best_params_['bandwidth']
print 'optimal bandwidth =',bandwidth
kde_skl = KernelDensity(kernel='gaussian',bandwidth=bandwidth)
kde_skl.fit(samples)
# score_samples() returns the log-likelihood of the samples
prob = np.exp(kde_skl.score_samples(np.vstack([vm[:,3]-im[:,3],im[:,3]]).T))
xvi = np.linspace(xmin,xmax,int(40*(xmax-xmin)+1))
xi = np.linspace(ymin,ymax,int(40*(ymax-ymin)+1))
Y, X = np.meshgrid(xvi, xi[::-1])
xy = np.vstack([Y.ravel(), X.ravel()]).T
Z = np.exp(kde_skl.score_samples(xy))
Z = Z.reshape(X.shape)
levels = np.linspace(0, Z.max(), 25)
Zmax = np.max(np.max(Z))
mx = maximum_filter(Z,size=20)
#lm = (Z == mx) * (Z > 0.01*Zmax) * (Z < 0.99*Zmax)
lm = (Z == mx) * (Z > 0.01*Zmax)
if np.sum(lm) > 0:
nlm = np.nonzero(lm)
max_nlm = np.argmax(Z[nlm])
local_maxima = np.nonzero(lm)
i0 = local_maxima[0][max_nlm]
i1 = local_maxima[1][max_nlm]
red_clump = (float(Y[i0,i1]),float(X[i0,i1]))
print Z[local_maxima]/Zmax
print 'Red clump detected at',red_clump
else:
print 'Error detecting red clump'
red_clump = None
plt.figure()
plt.contourf(Y, X, Z, levels=levels, cmap=plt.cm.Reds)
if np.sum(lm) > 0:
plt.scatter(vm[:,3]-im[:,3],im[:,3],s=3,c='k')
plt.scatter(vm[p,3]-im[p,3],im[p,3],s=3,c='b')
plt.scatter(red_clump[0],red_clump[1],color='c',marker='+',s=60,label='Red Clump (%6.3f,%6.3f)'%red_clump)
if source_colour is not None:
plt.scatter(np.nan,np.nan,color='w',marker='.',s=40,label='$(V-I)_S$ = %6.3f +/- %5.3f'%(source_colour[0],source_colour[1]))
plt.scatter(np.nan,np.nan,color='w',marker='.',s=40,\
label='$(V-I)_{S,0}$ = %6.3f +/- %5.3f'%(source_colour[0]-red_clump[0]+1.06,source_colour[1]))
if legend:
plt.legend(loc='upper left')
plt.grid()
plt.gca().invert_yaxis()
plt.legend(loc='upper left')
plt.xlabel(xlabel,fontsize='14')
plt.ylabel(ylabel,fontsize='14')
plt.title(dirI+' '+dirV)
plt.xlim((xmin,xmax))
plt.ylim((ymax,ymin))
if density_plotfile is None:
density_plotfile = dirI+'-'+dirV+'-CMD-density.png'
plt.savefig(density_plotfile)
plt.close()
return red_clump
def source_colour(ifile,vfile,plotfile='source_colour.png',VIoffset=0.0):
# Define a function (quadratic in our case) to fit the data with.
def linear_func1(p, x):
m, c = p
return m*x + c
Idata = np.loadtxt(ifile)
Vdata = np.loadtxt(vfile)
qI = np.where(Idata[:,5] < 1.0)
qV = np.where(Vdata[:,5] < 1.0)
Idata = Idata[qI]
Vdata = Vdata[qV]
intervals=[0.025,0.05,0.1,0.2]
colour = []
delta_colour = []
plt.figure(figsize=(12,12))
for inter,interval in enumerate(intervals):
start = np.floor(np.min(Idata[:,0]))
end = np.ceil(np.max(Idata[:,0]))
time = np.arange(start,end,interval)
flux1 = np.zeros_like(time) + np.nan
flux2 = np.zeros_like(time) + np.nan
flux1_err = np.zeros_like(time) + np.nan
flux2_err = np.zeros_like(time) + np.nan
for i in range(len(time)):
q = np.where(np.abs(Idata[:,0] - time[i]) < interval/2.0)[0]
if q.any():
flux1[i] = np.sum(Idata[q,1]/Idata[q,2]**2) / np.sum(1.0/Idata[q,2]**2)
flux1_err[i] = np.sqrt(1.0 / np.sum(1.0/Idata[q,2]**2))
p = np.where(np.abs(Vdata[:,0] - time[i]) < interval/2.0)[0]
if p.any():
flux2[i] = np.sum(Vdata[p,1]/Vdata[p,2]**2) / np.sum(1.0/Vdata[p,2]**2)
flux2_err[i] = np.sqrt(1.0 / np.sum(1.0/Vdata[p,2]**2))
plt.subplot(2,2,inter+1)
plt.errorbar(flux1/1000.0,flux2/1000.0,xerr=flux1_err/1000.0,yerr=flux2_err/1000.0,fmt='.')
plt.xlabel(r'$\delta F_I (000)$')
plt.ylabel(r'$\delta F_V (000)$')
plt.grid()
# Create a model for fitting.
linear_model = Model(linear_func1)
good_data = np.where(np.logical_and(np.isfinite(flux1),np.isfinite(flux2)))[0]
offset = np.mean(flux2[good_data]-flux1[good_data])
# Create a RealData object using our initiated data from above.
data = RealData(flux1[good_data], flux2[good_data], sx=flux1_err[good_data], sy=flux2_err[good_data])
# Set up ODR with the model and data.
odr = ODR(data, linear_model, beta0=[1.0, offset])
# Run the regression.
out = odr.run()
# Use the in-built pprint method to give us results.
out.pprint()
x1, x2 = plt.gca().get_xlim()
x_fit = np.linspace(x1*1000,x2*1000, 1000)
y_fit = linear_func1(out.beta, x_fit)
plt.plot(x_fit/1000.0,y_fit/1000.0,'r-',label=r"$\delta F_V = %5.3f \delta F_I + %5.3f$"%(out.beta[0],out.beta[1]))
colour.append(VIoffset-2.5*np.log10(out.beta[0]))
delta_colour.append(VIoffset-2.5*np.log10(out.beta[0]-out.sd_beta[0]) - colour[inter])
plt.title(r'$\Delta t = %5.3f \quad (V-I)_S = %8.3f \pm %8.3f$'%(interval,colour[inter],delta_colour[inter]))
plt.legend()
plt.savefig(plotfile)
return colour, delta_colour
def plot_lightcurve(file, columns=(0,3,4),plotfile='lightcurve.png',grid_on=True):
data = np.loadtxt(file)
plt.figure(figsize=(8,5))
plt.errorbar(data[:,columns[0]],data[:,columns[1]],data[:,columns[2]],fmt='.')
plt.gca().invert_yaxis()
plt.xlabel(r'$HJD - 2450000$')
plt.ylabel(r'$Magnitude$')
if grid_on:
plt.grid()
plt.savefig(plotfile)
|
494770
|
import uuid
from django.db import models
from validator.models.validation_run import ValidationRun
class CeleryTask(models.Model):
validation = models.ForeignKey(to=ValidationRun, on_delete=models.PROTECT, related_name='celery_tasks', null=False)
celery_task_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
def __str__(self):
return "task: {}".format(self.celery_task_id)
|
494773
|
import functools
import numpy as np
from ... import problems
class RamseyCassKoopmansModel(problems.TwoPointBVP):
"""
Class representing a generic Solow growth model.
Attributes
----------
equilibrium_capital : function
Equilibrium value for capital (per unit effective labor).
equilibrium_consumption : function
Equilibrium value for consumption (per unit effective labor).
intensive_output : function
Output (per unit effective labor supply).
marginal_product_capital : function
Marginal product of capital (i.e., first derivative of intensive output).
params : dict(str: float)
Dictionary of model parameters.
pratt_arrow_risk_aversion : function
Pratt-Arrow relative risk aversion function.
"""
def __init__(self, ARA, f, k_star, mpk, params):
"""
Initialize an instance of the RamseyCassKoopmans class.
Parameters
----------
ARA : function
Pratt-Arrow absolute risk aversion function.
f : function
Output (per unit effective labor supply).
k_star : function
Equilibrium (i.e., steady-state) value for capital stock (per unit
effective labor supply).
mpk : function
Marginal product of capital (per unit effective labor supply).
params : dict(str: float)
Dictionary of model parameters
"""
self._equilibrium_capital = k_star
self._intensive_output = f
self._marginal_product_capital = mpk
self._pratt_arrow_risk_aversion = ARA
# construct the terminal condition
c_star = self._c_star_factory(k_star)
terminal_condition = self._terminal_condition_factory(c_star)
self._equilibrium_consumption = c_star
# construct the RHS of the system of ODEs
rhs = self._rhs_factory(ARA, f, mpk)
super(RamseyCassKoopmansModel, self).__init__(self._initial_condition,
terminal_condition, 1, 2,
params, rhs)
@property
def equilibrium_capital(self):
return self._equilibrium_capital
@property
def equilibrium_consumption(self):
return self._equilibrium_consumption
@property
def intensive_output(self):
return self._intensive_output
@property
def marginal_product_capital(self):
return self._marginal_product_capital
@property
def pratt_arrow_risk_aversion(self):
return self._pratt_arrow_risk_aversion
@staticmethod
def _actual_investment(k_tilde, c_tilde, f, **params):
return f(k_tilde, **params) - c_tilde
@staticmethod
def _breakeven_investment(k_tilde, delta, g, n, **params):
return (g + n + delta) * k_tilde
@classmethod
def _c_tilde_dot(cls, t, k_tilde, c_tilde, ARA, mpk, A0, delta, g, rho, **params):
A = cls._technology(t, A0, g)
return ((mpk(k_tilde, **params) - delta - rho) / (A * ARA(t, A * c_tilde, **params))) - g * c_tilde
@staticmethod
def _initial_condition(t, k_tilde, c_tilde, A0, K0, N0, **params):
return [k_tilde - (K0 / (A0 * N0))]
@staticmethod
def _technology(t, A0, g):
return A0 * np.exp(g * t)
@classmethod
def _k_dot(cls, t, k_tilde, c_tilde, f, delta, g, n, **params):
k_dot = (cls._actual_investment(k_tilde, c_tilde, f, **params) -
cls._breakeven_investment(k_tilde, delta, g, n))
return k_dot
@classmethod
def _ramsey_model(cls, t, k_tilde, c_tilde, ARA, f, mpk, A0, delta, g, n, rho, **params):
out = [cls._k_dot(t, k_tilde, c_tilde, f, delta, g, n, **params),
cls._c_tilde_dot(t, k_tilde, c_tilde, ARA, mpk, A0, delta, g, rho, **params)]
return out
@classmethod
def _rhs_factory(cls, ARA, f, mpk):
return functools.partial(cls._ramsey_model, ARA=ARA, f=f, mpk=mpk)
@staticmethod
def _terminal_condition(t, k_tilde, c_tilde, c_star, **params):
return [c_tilde - c_star(**params)]
@classmethod
def _terminal_condition_factory(cls, c_star):
return functools.partial(cls._terminal_condition, c_star=c_star)
def _c_star(self, k_star, **params):
k_tilde = k_star(**params)
c_star = (self.intensive_output(k_tilde, **params) -
self._breakeven_investment(k_tilde, **params))
return c_star
def _c_star_factory(self, k_star):
return functools.partial(self._c_star, k_star=k_star)
|
494787
|
import json
import os
import numpy as np
from argparse import ArgumentParser
from CLIP import clip
import clipgrams
def main():
# Args
parser = ArgumentParser()
parser.add_argument('--image_dir', type=str)
parser.add_argument('--index_dir', type=str)
parser.add_argument('--knn', type=int, default=10)
parser.add_argument('--clip_model', type=str, default='ViT-B/16')
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--num_prepro_workers', type=int, default=8)
parser.add_argument('--lower', type=bool, default=True)
parser.add_argument('--load_entries', type=bool, default=False)
args = parser.parse_args()
# Load index args and add to current args
fname = os.path.join(args.index_dir, 'args.txt')
with open(fname, 'r') as f:
index_args = json.load(f)
for key in list(index_args.keys()):
if key not in args.__dict__.keys():
args.__dict__[key] = index_args[key]
# Load clip
net, preprocess = clip.load(args.clip_model, jit=False)
net = net.eval().requires_grad_(False).to(args.device)
# Tagger
clipgrams.tagger(args, net)
if __name__ == '__main__':
main()
|
494798
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from pcdet.models.backbones_3d.sa_block import SA_block
class PointContext3D(nn.Module):
def __init__(self, model_cfg, IN_DIM, dropout=0.1):
super().__init__()
self.model_cfg = model_cfg
self.pc_range = self.model_cfg.PC_RANGE
self.voxel_size = self.model_cfg.VOXEL_SIZE
self.grid_size = self.model_cfg.GRID_SIZE
self.IN_DIM = IN_DIM
# Self-attention layers
self.self_attn1 = SA_block(inplanes=self.model_cfg.ATTN_DIM, planes=self.model_cfg.ATTN_DIM)
self.self_attn2 = SA_block(inplanes=2 * self.model_cfg.ATTN_DIM, planes=2 * self.model_cfg.ATTN_DIM)
# MLP layers
self.reduce_dim = nn.Sequential(nn.Conv1d(IN_DIM, self.model_cfg.ATTN_DIM, kernel_size=1),
nn.BatchNorm1d(self.model_cfg.ATTN_DIM),
nn.ReLU(inplace=True),
nn.Conv1d(self.model_cfg.ATTN_DIM, self.model_cfg.ATTN_DIM, kernel_size=1),
nn.BatchNorm1d(self.model_cfg.ATTN_DIM),
nn.ReLU(inplace=True)
)
self.reduce_dim_cat = nn.Sequential(nn.Conv1d(2*self.model_cfg.ATTN_DIM, self.model_cfg.ATTN_DIM, kernel_size=1),
nn.BatchNorm1d(self.model_cfg.ATTN_DIM),
nn.ReLU(inplace=True),
nn.Conv1d(self.model_cfg.ATTN_DIM, self.model_cfg.ATTN_DIM, kernel_size=1),
nn.BatchNorm1d(self.model_cfg.ATTN_DIM),
nn.ReLU(inplace=True)
)
def add_context_to_points(self, point_feats, l_1=None, l_2=None):
"""Add self-attention context across every selected and deformed point"""
global context_points
if l_1 is None and l_2 is None:
context_points = self.self_attn1(point_feats)
context_points = self.self_attn2(context_points)
if l_1 is not None and l_2 is None:
context_points = self.self_attn1(point_feats)
ms1 = torch.cat([l_1, context_points], dim=1)
context_points_ms1 = self.self_attn2(ms1)
context_points = self.reduce_dim_cat(context_points_ms1)
if l_1 is not None and l_2 is not None:
ms1 = torch.cat([l_1, point_feats], dim=1)
ms1 = self.reduce_dim_cat(ms1)
ms1_context = self.self_attn1(ms1)
ms2 = torch.cat([l_2, ms1_context], dim=1)
ms2_context = self.self_attn2(ms2)
context_points = self.reduce_dim_cat(ms2_context)
return context_points
def forward(self, batch_size, l_features, l_xyz, l_conv1=None, l_conv2=None):
"""
Args:
:param l_conv2:
:param l_conv1:
:param batch_size:
:param l_xyz:
:param l_features:
"""
# reduce dim of selected points
l_features_red = self.reduce_dim(l_features)
# get context for every deformed point features input to this module
point_context_features = self.add_context_to_points(l_features_red, l_conv1, l_conv2)
return point_context_features
|
494945
|
from ._base import validate_input
from ._optimizers import constrained_binary_solve
from ._optimizers import constrained_multiclass_solve
__all__ = [
"constrained_binary_solve",
"constrained_multiclass_solve",
"validate_input",
]
|
494991
|
import numpy as np
__all__ = ()
class H5LogTable:
def __init__(self, group):
self._group = group
def __getitem__(self, label):
return self._group[label] if label in self._group else []
def resize(self, size):
for ds in self._group.values():
ds.resize(size, axis=0)
# mimicking Pytables API
@property
def row(self):
class Appender:
def __setitem__(_, label, row): # noqa: B902, N805
if isinstance(row, np.ndarray):
shape = row.shape
elif isinstance(row, (float, int)):
shape = ()
if label not in self._group:
if isinstance(row, np.ndarray):
dtype = row.dtype
elif isinstance(row, float):
dtype = float
else:
dtype = None
self._group.create_dataset(
label, (0, *shape), maxshape=(None, *shape), dtype=dtype
)
ds = self._group[label]
ds.resize(ds.shape[0] + 1, axis=0)
ds[-1, ...] = row
return Appender()
class _EnergyOffset:
value = None
def __call__(self, offset):
assert self.value is None
self.value = offset
return self
def __enter__(self):
return self
def __exit__(self, *args):
assert self.value is not None
self.value = None
return None
def __rsub__(self, base):
return base - self.value if self.value else base
energy_offset = _EnergyOffset()
|
494992
|
from abc import ABC, abstractmethod
from msdm.core.problemclasses.mdp.mdp import MarkovDecisionProcess, State, Action
from msdm.core.distributions import Distribution
from msdm.core.algorithmclasses import Result
import random
class Policy(ABC):
@abstractmethod
def action_dist(self, s: State) -> Distribution[Action]:
pass
def action(self, s: State) -> Action:
return self.action_dist(s).sample()
def run_on(self,
mdp: MarkovDecisionProcess,
initial_state=None,
max_steps=int(2 ** 30),
rng=random):
if initial_state is None:
initial_state = mdp.initial_state_dist().sample()
traj = []
s = initial_state
for t in range(max_steps):
if mdp.is_terminal(s):
break
a = self.action_dist(s).sample(rng=rng)
ns = mdp.next_state_dist(s, a).sample(rng=rng)
r = mdp.reward(s, a, ns)
traj.append((s, a, ns, r))
s = ns
if traj:
states, actions, _, rewards = zip(*traj)
else:
states = ()
actions = ()
rewards = ()
return Result(**{
'state_traj': states,
'action_traj': actions,
'reward_traj': rewards
})
|
494996
|
import unittest
import os
from sdc11073.wsdiscovery import WSDiscoverySingleAdapter
from sdc11073 import pmtypes
from sdc11073.location import SdcLocation
from sdc11073.sdcclient import SdcClient
from tests.mockstuff import SomeDevice
loopback_adapter = 'Loopback Pseudo-Interface 1' if os.name == 'nt' else 'lo'
"""
Base test to use in all test that require device or a client. This sets up a default device and client
and has connect method.
"""
class BaseTest(unittest.TestCase):
def setUp(self):
self.wsdiscovery = WSDiscoverySingleAdapter(loopback_adapter)
self.wsdiscovery.start()
self._locValidators = [pmtypes.InstanceIdentifier('Validator', extensionString='System')]
def tearDown(self):
self.wsdiscovery.stop()
def setUpCocoDraft10(self):
self.cocoFinalLocation = SdcLocation(fac='tklx', poc='CU1', bed='cocoDraft10Bed')
self.sdcDeviceCoCoFinal = SomeDevice.fromMdibFile(self.wsdiscovery, None, '70041_MDIB_Final.xml')
self.sdcDeviceCoCoFinal.startAll()
self.sdcDeviceCoCoFinal.setLocation(self.cocoFinalLocation, self._locValidators)
xAddr = self.sdcDeviceCoCoFinal.getXAddrs()
self.sdcClientCocoFinal = SdcClient(xAddr[0],
deviceType=self.sdcDeviceCoCoFinal.mdib.sdc_definitions.MedicalDeviceType,
validate=True)
self.sdcClientCocoFinal.startAll()
def stopDraft10(self):
self.sdcClientCocoFinal.stopAll()
self.sdcDeviceCoCoFinal.stopAll()
|
495003
|
import logging
from typing import List, Optional, Sequence
from langid.langid import LanguageIdentifier
import numpy as np
from sacred import Experiment
from tqdm import tqdm
from torch.utils.data import DataLoader
from experiments.Experiment_utils import create_logger
from experiments.data_loading import data_loading_ingredient, load_test_folds, save_probs, save_lang_to_idx
ex = Experiment('LangID_experiment', ingredients=[data_loading_ingredient])
# Attach the logger to the experiment
ex.logger = create_logger()
@ex.capture
def test_model(data_set=None, langider=None, lang_to_idx=None, ) -> np.ndarray:
"""
Tests a given langid.py model on the given data set.
:param data_set: data set to test on
:param langider: model to test
:param lang_to_idx: mapping of languages to ids
"""
import numpy as np
langs = data_set.get_tag_set()
pred_prob = np.zeros((len(data_set), len(langs) + 1))
dataloader = DataLoader(data_set)
for i, elem in enumerate(tqdm(dataloader)):
text = elem['text'][0]
label = elem['label'][0]
ranking = langider.rank(text)
for lang, prob in ranking:
pred_prob[i, lang_to_idx[lang]] = prob
pred_prob[i, len(langs)] = lang_to_idx[label]
return pred_prob
@ex.config
def config():
model_path = None # Which model to load, if none, the built-in langid.py model is used
@ex.capture
def load_langid_model(model_path: Optional[str], lang_set: Sequence[str]) -> LanguageIdentifier:
"""
Loads the provided langid.py model. If none provided, then it loads the default model.
:param model_path: path to model to load
:param lang_set: language set to which the model should be restricted. Provide empty list for
no restrictions.
:return: language identifier
"""
if model_path is None:
from langid import langid
langider = LanguageIdentifier.from_modelstring(langid.model, norm_probs=True)
else:
langider = LanguageIdentifier.from_modelpath(model_path, norm_probs=True)
if len(lang_set) > 0:
langider.set_languages(langs=lang_set)
return langider
@ex.automain
def main(_log):
_log.info("Loading test data")
test_dataset = load_test_folds()
tag_set = test_dataset.get_tag_set()
_log.info("Loading langid.py model")
langider = load_langid_model(lang_set=tag_set)
_log.info("Testing model")
lang_to_idx = test_dataset.lang_to_idx
eval_data = test_model(data_set=test_dataset, langider=langider, lang_to_idx=lang_to_idx)
_log.info("Saving predictions and lang_to_idx")
save_probs(eval_data, ex)
save_lang_to_idx(test_dataset.lang_to_idx, ex)
|
495022
|
from flask import request, url_for
from json import dumps as json_encode, loads as json_decode
from app.helpers.oauth.config import oauth_config
from base64 import b64encode, b64decode
from slugify import slugify as slugify_str
from re import sub
from urllib.parse import quote, urlencode
import config
def json_to_b64(json):
return b64encode(json_encode(json).encode('utf8')).decode('utf8')
def json_to_str(json):
return json_encode(json)
def b64_to_json(json):
return json_decode(b64decode(json))
def encode_state(provider):
return json_to_b64({'provider':provider, 'redirect':request.url})
def encode_oauth(provider):
provider_config = oauth_config.get(provider)
authorize_url = provider_config.get('authorize')
scopes = provider_config.get('scopes')
return f'{authorize_url}?' + urlencode([*{
'client_id': config.auth.get(provider).get('client-id'),
'scope': " ".join(scopes),
'state': encode_state(provider),
'redirect_uri': config.canonical_host + url_for('auth_login_oauth')
}.items()])
def oauth_data():
return {
'sites': {
site_id: {
**{k: v for k, v in site_data.items() if k != 'auth'},
'client': config.auth.get(site_id).get('client-id')
} for site_id, site_data in oauth_config.items()
},
'redirect_uri': config.canonical_host + url_for('auth_login_oauth')
}
def slugify(string):
return slugify_str(sub(r"[']", '', string))
|
495036
|
import tensorflow as tf
from tensorflow.keras.regularizers import l2
class ArcNet(tf.keras.layers.Layer):
def __init__(self, num_classes, regularizer=l2(5e-4), **kwargs):
super(ArcNet, self).__init__(**kwargs)
self.regularizer = regularizer
self.num_classes = num_classes
def build(self, input_shape):
self.weight = self.add_weight(shape=[input_shape[-1], self.num_classes],
initializer=tf.keras.initializers.GlorotUniform(),
trainable=True,
regularizer=self.regularizer)
def call(self, feature):
normed_feature = tf.nn.l2_normalize(feature, axis=1)
normed_weight = tf.nn.l2_normalize(self.weight, axis=0)
cos_theta = tf.matmul(normed_feature, normed_weight)
return cos_theta
def get_config(self):
config = super().get_config()
config.update({"units": self.num_classes,
"kernel_regularizer": self.regularizer})
return config
|
495049
|
from kivy.animation import Animation
from functools import partial
from .base import Animator
__all__ = (
"RotateOutAnimator",
"RotateOutDownLeftAnimator",
"RotateOutDownRightAnimator",
"RotateOutUpLeftAnimator",
"RotateOutUpRightAnimator",
)
# rotate out
class RotateOutAnimator(Animator):
def start_(self, tmp=None):
props = ["angle", "opacity"]
vals = [-200, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateOutDownLeftAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x - self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [-90, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateOutDownRightAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x + 3 * self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [90, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateOutUpLeftAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x - self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [90, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
class RotateOutUpRightAnimator(Animator):
def start_(self, tmp=None):
pivot = (self.widget.x + 3 * self.widget.width / 2, self.widget.y)
self.widget.origin_ = pivot
props = ["angle", "opacity"]
vals = [-90, 0]
anim = Animation(d=self.duration, **dict(zip(props, vals)))
anim.cancel_all(self.widget)
anim.start(self.widget)
anim.bind(on_complete=partial(self.anim_complete, self))
|
495075
|
import pysftp
from paramiko import sftp
import mlflow as mlf
import pandas as pd
import json
'''
model = 0 (Default), 1 (LightGBM), 2 (Multiclass), 3 (TF-Ranking)
features passed saved as dictionary
F1 result of model
CVS of trained model
'''
def mlflow_log(model, metrics, parameters, rest_params, features):
cnopts = pysftp.CnOpts()
with pysftp.Connection('192.168.3.11', username='ubuntu', private_key='~/.ssh/id_rsa', port=22, cnopts=cnopts):
mlf.set_tracking_uri('http://192.168.3.11:80')
print('SFTP connection successful')
if model == 1:
experiment_name = 'LightGBM'
elif model == 2:
experiment_name = 'Multiclass'
elif model == 3:
experiment_name = 'TF-Ranking'
else:
print('Model is missing!')
experiment_id = model
mlf.set_experiment(experiment_name)
with mlf.start_run(experiment_id=experiment_id, run_name=experiment_name, nested=False):
for name, value in metrics.items():
mlf.log_metric(name, value)
for name, value in parameters.items():
mlf.log_param(name, value)
for name, value in rest_params.items():
mlf.log_param(name, value)
for name, value in features.items():
mlf.log_param(name, value)
print(f'Logging successful')
def mlflow_dict(df_summary):
columns = df_summary.columns.values
metrics = ['Acc_mean', 'F1_mean', 'Prec_mean']
rest_param = ['model_type']
metrics_dict = {}
parameter_dict = {}
rest_param_dict = {}
feature_dict = {}
for metric in metrics:
value = float(df_summary[metric])
if metric not in metrics_dict:
metrics_dict[metric] = value
print('Metrics successful')
if 'features' in columns:
features = df_summary['features'].values[0].split(' - ')
feature_dict = {features[i]: features[i] for i in range(0, len(features))}
print('Features successful')
if 'parameters' in columns:
parameter_dict = json.loads(df_summary.parameters.iloc[0])
print('Parameters successful')
for param in rest_param:
value = df_summary[param].values[0]
if param not in rest_param_dict:
rest_param_dict[param] = value
print('Rest_Params successful')
return (metrics_dict, parameter_dict, rest_param_dict, feature_dict)
|
495077
|
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route('/api')
def my_microservice():
print(request)
print(request.environ)
response = jsonify({'Hello': 'World!'})
print(response)
print(response.data)
return response
if __name__ == '__main__':
print(app.url_map)
app.run()
|
495121
|
import pytest
from nbformat import notebooknode
from nb2hugo.preprocessors import ImagesPreprocessor
@pytest.fixture
def preprocessor():
return ImagesPreprocessor()
@pytest.fixture
def resources(tmpdir):
nb_dir = tmpdir.mkdir('nb_dir')
img_dir = nb_dir.mkdir('img_dir')
img_dir.join('fake.png').write('not really an image')
resources = {
'metadata': {
'name': 'notebook',
'path': nb_dir,
},
}
return resources
def test_process_image_link(preprocessor, resources):
resources['images_path'] = {}
link = preprocessor._process_image_link('Some alt text', 'img_dir/fake.png', resources)
assert link == ''
assert 'fake.png' in resources['images_path']
source = 'Some text with an .\nAnd more text.'
processed_source = 'Some text with an .\nAnd more text.'
raw_cell, code_cell, markdown_cell, expected_markdown_cell = [
notebooknode.from_dict({"cell_type": cell_type,
"metadata": {},
"source": source})
for cell_type, source in [('raw', source),
('code', source),
('markdown', source),
('markdown', processed_source)]
]
@pytest.mark.parametrize("input_cell, expected_cell, fake_in_images_path", [
(raw_cell, raw_cell, False),
(code_cell, code_cell, False),
(markdown_cell, expected_markdown_cell, True),
])
def test_preprocess_cell(preprocessor, resources, input_cell,
expected_cell, fake_in_images_path):
resources['images_path'] = {}
result = preprocessor.preprocess_cell(input_cell.copy(), resources.copy(), None)
assert result[0] == expected_cell
assert ('fake.png' in resources['images_path']) == fake_in_images_path
|
495140
|
from django import forms
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponseNotFound
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from ..models import *
from .settings import *
import datetime
import math
class ModifyForm(forms.Form):
title = forms.CharField(label='标题', max_length=50,
widget=forms.TextInput(attrs={'class': 'form-control form-control-user mb-5'}))
content = forms.CharField(label='博客内容', widget=forms.Textarea(attrs={'class': 'form-control form-control-user mb-5'}))
@login_required
def home(request):
user = request.user
blogs = Blog.objects.filter(user_id__exact=user.id).order_by('-modified_time')
collections = Collection.objects.filter(user_id__exact=user.id).order_by('-blog__collect_amount')
return render(request, '../templates/blog/home.html',
{
'blogs': blogs,
'collections': collections
})
@login_required
def blog(request, pk):
blog = get_object_or_404(Blog, pk=pk)
user: User = request.user
if user.collection_set.filter(blog_id__exact=blog.pk).count() == 0:
is_collected = False
else:
is_collected = True
comments = blog.comment_set.all().order_by('created_time')
if request.method == 'POST':
delete_comment = request.POST.get('delete_comment')
create_comment = request.POST.get('create_comment')
collect_blog = request.POST.get('collect_blog')
if not (delete_comment is None):
try:
comment = Comment.objects.filter(user_id__exact=user.id,
blog_id__exact=blog.id,
content__exact=delete_comment)[0]
comment.delete()
except IndexError:
pass
if not (collect_blog is None):
collection = Collection.objects.filter(user_id__exact=user.id, blog_id__exact=blog.id)
if len(collection) == 0:
new_collection = Collection(user=user, blog=blog)
blog.collect_amount += 1
blog.save()
new_collection.save()
is_collected = True
else:
collection.delete()
is_collected = False
if not (create_comment is None):
new_comment = Comment(user=user, blog=blog, content=create_comment)
new_comment.save()
else:
blog.pageview += 1
blog.save()
return render(request, '../templates/blog/blog.html',
{'blog': blog, 'comments': comments, 'user': user, 'is_collected': is_collected})
@login_required
def modify(request, pk):
blog = get_object_or_404(Blog, pk=pk)
if blog.user != request.user:
return HttpResponseForbidden
if request.method == "POST":
form = ModifyForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
content = form.cleaned_data['content']
blog.title = title
blog.content = content
blog.save()
return HttpResponseRedirect(reverse('helper:blog_page', args=(blog.id,)))
else:
form = ModifyForm(initial={'title': blog.title, 'content': blog.content})
return render(request, '../templates/blog/modify.html', {'form': form})
@login_required
def delete(request, pk):
blog = get_object_or_404(Blog, pk=pk)
if blog.user != request.user:
return HttpResponseForbidden
blog.delete()
return HttpResponseRedirect(reverse("helper:blog_homepage"))
@login_required
def add(request):
user = request.user
if request.method == "POST":
form = ModifyForm(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
content = form.cleaned_data['content']
blog = Blog(user=user, title=title, content=content)
blog.save()
return HttpResponseRedirect(reverse('helper:blog_page', args=(blog.id,)))
else:
form = ModifyForm()
return render(request, '../templates/blog/modify.html', {'form': form})
def hot(request, pg):
blogs = Blog.objects.filter(Q(user__blog__pageview__gte=HOT_BLOG_PAGEVIEW,
modified_time__gte = timezone.now() -
datetime.timedelta(days=BLOGPAGE_HOT_BLOG_DAY)) |
Q(modified_time__gte = timezone.now() -
datetime.timedelta(days=BLOGPAGE_COMMON_BLOG_DAY))).distinct().order_by('-modified_time')
page_num = math.ceil(len(blogs) / BLOGPAGE_BLOG_NUMBER)
if pg > page_num or pg < 1:
return HttpResponseNotFound()
if pg * BLOGPAGE_BLOG_NUMBER > len(blogs):
number = len(blogs)
else:
number = pg * BLOGPAGE_BLOG_NUMBER
blogs = blogs[(pg - 1) * BLOGPAGE_BLOG_NUMBER: number]
return render(request, '../templates/blog/hot.html',
{
'blogs': blogs,
'page_num': page_num,
'current_page': pg
})
@login_required
def public(request, friend_id):
user: User = request.user
friend = Friend.objects.filter(user_id__exact=user.id, friend_id__exact=friend_id)
if len(friend) == 0 and friend_id != user.id:
return HttpResponseForbidden()
blogs = Blog.objects.filter(user_id__exact=friend_id)
message = None
if friend_id == user.id:
friend_user = user
else:
friend_user = friend[0].friend
if friend[0].authority == 0:
blogs = None
message = "没有权限访问!"
return render(request, '../templates/blog/friend.html',
{
'blogs': blogs,
'friend': friend_user,
'message': message
})
|
495169
|
from django import template
register = template.Library()
cache = {}
@register.inclusion_tag('more_menu_links.html')
def more_menu_links():
from ...utils import get_menu_links
if 'links' not in cache:
cache['links'] = get_menu_links()
return {'links': cache['links']}
|
495212
|
import os
import pytest
try:
from pyscf import gto, ao2mo, scf
from pauxy.utils.from_pyscf import (
integrals_from_scf,
integrals_from_chkfile,
get_pyscf_wfn,
dump_pauxy
)
no_pyscf = False
except (ImportError, OSError):
no_pyscf = True
from pauxy.utils.io import (
write_input,
read_qmcpack_wfn_hdf,
from_qmcpack_sparse
)
@pytest.mark.unit
@pytest.mark.skipif(no_pyscf, reason="pyscf not found.")
def test_from_pyscf():
atom = gto.M(atom='Ne 0 0 0', basis='sto-3g', verbose=0, parse_arg=False)
mf = scf.RHF(atom)
mf.kernel()
h1e, chol, nelec, enuc = integrals_from_scf(mf, verbose=0, chol_cut=1e-5)
assert chol.shape[0] == 15
assert chol.shape[1] == 25
assert nelec == (5,5)
assert h1e.shape[0] == 5
@pytest.mark.unit
@pytest.mark.skipif(no_pyscf, reason="pyscf not found.")
def test_from_chkfile():
atom = gto.M(atom=[('H', 1.5*i, 0, 0) for i in range(0,10)],
basis='sto-6g', verbose=0, parse_arg=False)
mf = scf.RHF(atom)
mf.chkfile = 'scf.chk'
mf.kernel()
h1e, chol, nelec, enuc = integrals_from_chkfile('scf.chk', verbose=0, chol_cut=1e-5)
assert h1e.shape == (10,10)
assert chol.shape == (19,100)
assert nelec == (5,5)
assert enuc == pytest.approx(6.805106937254286)
@pytest.mark.unit
@pytest.mark.skipif(no_pyscf, reason="pyscf not found.")
def test_pyscf_to_pauxy():
atom = gto.M(atom=[('H', 1.5*i, 0, 0) for i in range(0,4)],
basis='sto-6g', verbose=0, parse_arg=False)
mf = scf.RHF(atom)
mf.chkfile = 'scf.chk'
mf.kernel()
dump_pauxy(chkfile='scf.chk', hamil_file='afqmc.h5', sparse=True)
wfn = read_qmcpack_wfn_hdf('afqmc.h5')
h1e, chol, ecore, nmo, na, nb = from_qmcpack_sparse('afqmc.h5')
write_input('input.json', 'afqmc.h5', 'afqmc.h5')
def teardown_module(self):
cwd = os.getcwd()
files = ['scf.chk', 'afqmc.h5', 'input.json']
for f in files:
try:
os.remove(cwd+'/'+f)
except OSError:
pass
|
495232
|
import unittest
from tests.test_utils import get_sample_pdf_with_labels, get_sample_pdf, get_sample_sdf, get_sample_pdf_with_extra_cols, get_sample_pdf_with_no_text_col ,get_sample_spark_dataframe
from nlu import *
class TestNameSpace(unittest.TestCase):
def test_tokenize(self):
df = nlu.load('en.tokenize').predict('What a wonderful day!')
print(df)
df = nlu.load('tokenize').predict('What a wonderful day!')
print(df)
def test_pos(self):
df = nlu.load('pos', verbose=True).predict('What a wonderful day!')
print(df)
#
# def test_embed(self):
# # df = nlu.load('en.embed').predict('What a wonderful day!')
# #
# # print(df)
#
# df = nlu.load('embed').predict('What a wonderful day!')
# print(df)
#
#
# def test_embed_glove(self):
# df = nlu.load('en.embed.glove').predict('What a wonderful day!')
#
# print(df)
#
# df = nlu.load('embed.glove').predict('What a wonderful day!')
# print(df)
# df = nlu.load('glove').predict('What a wonderful day!')
# print(df)
#
def test_sentiment_twitter_out(self):
# res=nlu.load('en.sentiment.twitter',verbose=True).predict('@elonmusk Tesla stock price is too high imo') # ifninite loop ??
res = nlu.load('en.sentiment.imdb',verbose=True).predict('The Matrix was a pretty good movie')
print(res)
print(res.columns)
def test_output_levels(self):
print('token test')
df = nlu.load('sentiment',verbose=True).predict('What a wonderful day!', output_level='token')
print(df)
print('document test')
df = nlu.load('sentiment',verbose=True).predict('What a wonderful day!', output_level='document')
print(df)
print('sentence test')
df = nlu.load('sentiment',verbose=True).predict('What a wonderful day!', output_level='sentence')
print(df)
print('chunk test')
df = nlu.load('sentiment',verbose=True).predict('I like peanut butter and jelly!', output_level='chunk')
print(df)
def test_ner_multilingual(self):
df = nlu.load('ner',verbose=True).predict('New York is a great place and America aswell')
print(df)
def test_sentiment(self):
df = nlu.load('en.sentiment').predict('What a wonderful day!')
def test_emotion(self):
df = nlu.load('en.classify.emotion').predict('What a wonderful day!')
print(df)
def test_spell(self):
df = nlu.load('spell').predict('What a wonderful day!')
print(df)
#
def test_dependency(self):
df = nlu.load('dep', verbose=True).predict('What a wonderful day!')
print(df)
def test_dependency_untyped(self):
df = nlu.load('dep.untyped', verbose=True).predict('What a wonderful day!')
print(df)
def test_bert(self):
df = nlu.load('bert').predict('What a wonderful day!')
print(df)
def test_lang(self):
df = nlu.load('lang', verbose=True).predict('What a wonderful day!')
print(df)
print(df.columns)
print(df['language_de'])
print(df['language_fr'])
print(len(df['language_de'][0]))
# df = nlu.load('xx.classify.lang').predict('What a wonderful day!')
# print(df)
# df = nlu.load('classify.lang').predict('What a wonderful day!')
# print(df)
# print(df)
def test_explain(self):
df = nlu.load('en.explain').predict('What a wonderful day!')
print(df)
df = nlu.load('explain').predict('What a wonderful day!')
print(df)
def test_match(self):
df = nlu.load('match.date',verbose=True).predict('What a wonderful day!')
print(df)
# df = nlu.load('en.match.date').predict('What a wonderful day!')
# print(df)
def test_clean_stop(self):
# df = nlu.load('clean.stop').predict('What a wonderful day!')
# print(df)
df = nlu.load('en.clean.stop').predict('What a wonderful day!')
print(df)
def test_spell(self):
df = nlu.load('spell').predict('What a wonderful day!')
print(df)
df = nlu.load('en.spell').predict('What a wonderful day!')
print(df)
# def test_all_spell(self):
# df = nlu.load('en.spell.symmetric').predict('What a wonderful day!')
#
# print(df)
#
# df = nlu.load('en.spell.context').predict('What a wonderful day!')
# print(df)
# df = nlu.load('en.spell.norvig').predict('What a wonderful day!')
#
# print(df)
# df = nlu.load('spell').predict('What a wonderful day!')
#
# print(df)
#
# df = nlu.load('en.spell').predict('What a wonderful day!')
#
# print(df)
# def test_biobert(self):
# df = nlu.load('biobert').predict('What a wonderful day!')
#
# print(df)
#
# df = nlu.load('en.embed.biobert').predict('What a wonderful day!')
# print(df)
#
# def test_elmo(self):
# df = nlu.load('en.embed.elmo').predict('What a wonderful day!')
# print(df)
# df = nlu.load('elmo').predict('What a wonderful day!')
# print(df)
#
# def test_use(self):
# df = nlu.load('en.embed.use').predict('What a wonderful day!')
#
# print(df)
#
# df = nlu.load('use').predict('What a wonderful day!')
# print(df)
#
# def test_albert(self):
# df = nlu.load('en.embed.albert').predict('What a wonderful day!')
#
# print(df)
#
# df = nlu.load('albert').predict('What a wonderful day!')
# print(df)
#
# def test_xlnet(self):
# df = nlu.load('en.embed.xlnet').predict('What a wonderful day!')
#
# print(df)
#
# df = nlu.load('xlnet').predict('What a wonderful day!')
# print(df)
def test_lemma(self):
df = nlu.load('lemma').predict('What a wonderful day!')
print(df)
df = nlu.load('en.lemma').predict('What a wonderful day!')
print(df)
# def test_norm(self):
# df = nlu.load('lemma').predict('What a wonderful day!')
#
# print(df)
# df = nlu.load('en.lemma').predict('What a wonderful day!')
#
# print(df)
#
# def test_use(self):
# df = nlu.load('en.embed_sentence.use').predict('What a wonderful day!')
# print(df)
#
# def test_glove(self):
# df = nlu.load('nl.ner.wikiner.glove_6B_300').predict('What a wonderful day!')
#
# print(df)
def test_sentence_detector(self):
df = nlu.load('sentence_detector', verbose=True).predict('What a wonderful day! Tomorrow will be even better!')
print(df)
def test_stopwords(self):
df = nlu.load('match.chunk').predict('What a wonderful day!')
print(df)
def test_classify_lang(self):
df = nlu.load('xx.classify.wiki_7').predict('What a wonderful day!')
print(df)
def test_sentiment_on_datasets(self):
df = nlu.load('sentiment.twitter').predict('What a wonderful day!')
print(df)
# df = nlu.load('sentiment.imdb').predict('What a wonderful day!')
# print(df)
def test_multiple_nlu_references(self):
# df = nlu.load('elmo bert').predict('What a wonderful day!')
df = nlu.load('elmo').predict('What a wonderful day!')
print(df)
# df = nlu.load('sentiment.imdb').predict('What a wonderful day!')
# print(df)
def test_sentiment_output(self):
res = nlu.load('sentiment',verbose=True).predict('Your life is the sum of a remainder of an unbalanced equation inherent to the programming of the matrix. You are the eventuality of an anomaly, which despite my sincerest efforts I have been unable to eliminate from what is otherwise a harmony of mathematical precision. While it remains a burden assiduously avoided, it is not unexpected, and thus not beyond a measure of control. Which has led you, inexorably, here.', output_level='sentence')
# res = nlu.load('bert',verbose=True).predict('@Your life is the sum of a remainder of an unbalanced equation inherent to the programming of the matrix. You are the eventuality of an anomaly, which despite my sincerest efforts I have been unable to eliminate from what is otherwise a harmony of mathematical precision. While it remains a burden assiduously avoided, it is not unexpected, and thus not beyond a measure of control. Which has led you, inexorably, here.', output_level='sentence')
print(res)
print(res['sentiment'])
print(res.dtypes)
def test_stem(self):
pdf = get_sample_pdf()
res = nlu.load('stem',verbose=True).predict(pdf )
print(res)
res = nlu.load('en.stem',verbose=True).predict(pdf)
print(res)
def test_norm(self):
pdf = get_sample_pdf()
res = nlu.load('norm',verbose=True).predict(pdf, output_positions=True )
print(res)
# res = nlu.load('en.norm',verbose=True).predict(pdf)
# print(res)
def test_chunk(self):
res = nlu.load('chunk',verbose=True).predict('I like peanut butter and jelly!' )
print(res)
def test_ngram(self):
pdf = get_sample_pdf()
# res = nlu.load('ngram',verbose=True).predict(pdf )
pipe = nlu.load('ngram',verbose=True)
# print(res['ngrams'])
print("PIPE", pipe)
res = nlu.load('en.ngram',verbose=True).predict(pdf)
print(res['ngrams'])
def test_chunk_embeds(self):
pdf = get_sample_pdf()
res = nlu.load('embed_chunk',verbose=True).predict("What a wondful day!" )
print(res)
res = nlu.load('en.embed_chunk',verbose=True).predict(pdf)
print(res)
def test_regex_matcher(self):
pdf = get_sample_pdf()
res = nlu.load('match.regex',verbose=True).predict(pdf )
print(res)
def test_text_matcher(self):
pdf = get_sample_pdf()
res = nlu.load('match.text',verbose=True).predict(pdf )
print(res)
def test_auto_sentence_embed_bert(self): # TODO WIP
pdf = get_sample_pdf()
res = nlu.load('embed_sentence.bert',verbose=True).predict(pdf )
print(res)
def test_auto_sentence_embed_elmo(self): # TODO WIP
pdf = get_sample_pdf()
res = nlu.load('embed_sentence.elmo',verbose=True).predict(pdf )
print(res)
# def test_bad_pandas_column_datatype(self):
# sdf = get_sample_spark_dataframe()
# res = nlu.load('asdasj.asdas',verbose=True).predict(sdf, output_level='sentence')
# # res = nlu.load('bert',verbose=True).predict('@Your life is the sum of a remainder of an unbalanced equation inherent to the programming of the matrix. You are the eventuality of an anomaly, which despite my sincerest efforts I have been unable to eliminate from what is otherwise a harmony of mathematical precision. While it remains a burden assiduously avoided, it is not unexpected, and thus not beyond a measure of control. Which has led you, inexorably, here.', output_level='sentence')
#
# print(res)
#
# def test_bad_pandas_dataframe_datatype(self):
# sdf = get_sample_spark_dataframe()
# res = nlu.load('asdasj.asdas',verbose=True).predict(sdf, output_level='sentence')
# # res = nlu.load('bert',verbose=True).predict('@Your life is the sum of a remainder of an unbalanced equation inherent to the programming of the matrix. You are the eventuality of an anomaly, which despite my sincerest efforts I have been unable to eliminate from what is otherwise a harmony of mathematical precision. While it remains a burden assiduously avoided, it is not unexpected, and thus not beyond a measure of control. Which has led you, inexorably, here.', output_level='sentence')
#
# print(res)
#2.6 test
def test_electra(self):
pdf = get_sample_pdf()
res = nlu.load('en.embed.electra',verbose=True).predict(pdf )
print(res)
def test_embed_sentence_bert(self):
pdf = get_sample_pdf()
res = nlu.load('en.embed_sentence.small_bert_L2_128',verbose=True).predict(pdf )
print(res)
def test_embed_sentence_bert(self):
pdf = get_sample_pdf()
res = nlu.load('en.embed_sentence.biobert.pubmed_base_cased',verbose=True).predict(pdf )
print(res)
def test_toxic(self):
pdf = get_sample_pdf()
res = nlu.load('en.classify.toxic',verbose=True).predict(pdf )
print(res)
def test_e2e(self):
pdf = get_sample_pdf()
res = nlu.load('en.classify.e2e',verbose=True).predict(pdf )
print(res)
def test_labse(self):
pdf = get_sample_pdf()
res = nlu.load('xx.embed_sentence.labse',verbose=True).predict(pdf )
print(res)
def test_xx_bert(self):
pdf = get_sample_pdf()
res = nlu.load('xx.embed_sentence',verbose=True).predict(pdf )
print(res)
def test_26_bert(self):
res = nlu.load('en.ner.bert',verbose=True).predict('The NLU library is a machine learning library, simmilar to Tensorflow and Keras')
print(res)
if __name__ == '__main__':
unittest.main()
|
495257
|
import uvicorn
from nebulo.gql.sqla_to_gql import sqla_models_to_graphql_schema
from nebulo.server.exception import http_exception
from nebulo.server.routes import get_graphiql_route, get_graphql_route
from sqlalchemy import Column, DateTime, ForeignKey, Integer, Text, create_engine
from sqlalchemy import text as sql_text
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from starlette.applications import Starlette
from starlette.exceptions import HTTPException
# Config
DATABASE_URI = "postgresql://app_user:app_password@localhost:5522/nebulo_example"
#####################
# SQLAlchemy Models #
#####################
Base = declarative_base()
class Author(Base):
__tablename__ = "author"
id = Column(Integer, primary_key=True, comment="@exclude create, update")
name = Column(Text, nullable=False)
created_at = Column(
DateTime,
nullable=False,
server_default=sql_text("now()"),
comment="@exclude create, update",
)
books = relationship("Book", uselist=True, backref="Author")
class Book(Base):
__tablename__ = "book"
id = Column(Integer, primary_key=True, comment="@exclude create, update")
title = Column(Text, nullable=False)
author_id = Column(Integer, ForeignKey("author.id"), nullable=False)
created_at = Column(
DateTime,
nullable=False,
default=sql_text("now()"),
comment="@exclude create, update",
)
# backref: Author
#################################
# Starlette Application Factory #
#################################
def create_app(connection_str, sqla_models) -> Starlette:
"""Create the Starlette app"""
async_connection = connection_str.replace("postgresql://", "postgresql+asyncpg://")
engine = create_async_engine(async_connection)
# Convert sqla models to graphql schema
gql_schema = sqla_models_to_graphql_schema(sqla_models)
# Build the Starlette GraphQL Route
graphql_route = get_graphql_route(gql_schema=gql_schema, engine=engine)
# Build the Starlette GraphiQL Route and StaticFiles
graphiql_route = get_graphiql_route()
# Instantiate the Starlette app
_app = Starlette(
routes=[graphql_route, graphiql_route],
exception_handlers={HTTPException: http_exception},
on_startup=[engine.connect],
on_shutdown=[engine.dispose],
)
return _app
# Instantiate the app
APP = create_app(connection_str=DATABASE_URI, sqla_models=[Author, Book])
if __name__ == "__main__":
# Create Tables
with create_engine(DATABASE_URI).connect() as sqla_engine:
Base.metadata.create_all(bind=sqla_engine)
uvicorn.run(
"app:APP",
host="0.0.0.0",
port=5084,
log_level="info",
reload=False,
)
|
495268
|
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
def inrange(v, shape):
return 0 <= v[0] < shape[0] and 0 <= v[1] < shape[1]
def to_int(x):
return tuple(map(int, x))
def save_heatmap(prefix, image, lines):
im_rescale = (512, 512)
heatmap_scale = (128, 128)
# fy, fx = heatmap_scale[1] / image.shape[0], heatmap_scale[0] / image.shape[1]
lcmap = np.zeros(heatmap_scale, dtype=np.float32) # (128, 128)
lcoff = np.zeros((2,) + heatmap_scale, dtype=np.float32) # (2, 128, 128)
lleng = np.zeros(heatmap_scale, dtype=np.float32) # (128, 128)
angle = np.zeros(heatmap_scale, dtype=np.float32) # (128, 128)
# the coordinate of lines can not equal to 128 (less than 128).
# lines[:, :, 0] = np.clip(lines[:, :, 0] * fx, 0, heatmap_scale[0] - 1e-4)
# lines[:, :, 1] = np.clip(lines[:, :, 1] * fy, 0, heatmap_scale[1] - 1e-4)
# lines = lines[:, :, ::-1] # change position of x and y --> (r, c)
for v0, v1 in lines:
v = (v0 + v1) / 2
vint = to_int(v)
lcmap[vint] = 1
lcoff[:, vint[0], vint[1]] = v - vint - 0.5
lleng[vint] = np.sqrt(np.sum((v0 - v1) ** 2)) / 2 # L
if v0[0] <= v[0]:
vv = v0
else:
vv = v1
# the angle under the image coordinate system (r, c)
# theta means the component along the c direction on the unit vector
if np.sqrt(np.sum((vv - v) ** 2)) <= 1e-4:
continue
angle[vint] = np.sum((vv - v) * np.array([0., 1.])) / np.sqrt(np.sum((vv - v) ** 2)) # theta
# the junction coordinate(image coordinate system) of line can be recovered by follows:
# direction = [-sqrt(1-theta^2), theta]
# (-sqrt(1-theta^2) means the component along the r direction on the unit vector, it always negative.)
# center = coordinate(lcmap) + offset + 0.5
# J = center (+-) direction * lleng (+-) means two end points
# image = cv2.resize(image, im_rescale)
# plt.figure()
# plt.imshow(image)
# for v0, v1 in lines:
# plt.plot([v0[1] * 4, v1[1] * 4], [v0[0] * 4, v1[0] * 4])
# plt.savefig(f"{prefix[-8:]}_line_gt.png", dpi=200), plt.close()
# return
# coor = np.argwhere(lcmap == 1)
# for yx in coor:
# offset = lcoff[:, int(yx[0]), int(yx[1])]
# length = lleng[int(yx[0]), int(yx[1])]
# theta = angle[int(yx[0]), int(yx[1])]
#
# center = yx + offset
# d = np.array([-np.sqrt(1-theta**2), theta])
# plt.scatter(center[1]*4, center[0]*4, c="b")
#
# plt.arrow(center[1]*4, center[0]*4, d[1]*length*4, d[0]*length*4,
# length_includes_head=True,
# head_width=15, head_length=25, fc='r', ec='b')
# plt.savefig(f"{prefix}_line.png", dpi=200), plt.close()
# plt.subplot(122), \
# plt.imshow(image)
# coor = np.argwhere(lcmap == 1)
# for yx in coor:
# offset = lcoff[:, int(yx[0]), int(yx[1])]
# length = lleng[int(yx[0]), int(yx[1])]
# theta = angle[int(yx[0]), int(yx[1])]
#
# center = yx + offset
# d = np.array([-np.sqrt(1-theta**2), theta])
#
# n0 = center + d * length
# n1 = center - d * length
# plt.plot([n0[1] * 4, n1[1] * 4], [n0[0] * 4, n1[0] * 4])
# plt.savefig(f"{prefix[-8:]}_line.png", dpi=100), plt.close()
np.savez_compressed(
f"{prefix}_line.npz",
# aspect_ratio=image.shape[1] / image.shape[0],
lcmap=lcmap,
lcoff=lcoff,
lleng=lleng,
angle=angle,
)
# cv2.imwrite(f"{prefix}.png", image)
if __name__ == '__main__':
root = "/home/dxl/Data/york/valid/"
filelist = glob.glob(f"{root}/*_label.npz")
for file in filelist:
with np.load(file) as npz:
lines = npz["lpos"][:, :, :2]
# image = cv2.imread(file.replace("_label.npz", ".png"))
image = None
save_heatmap(file[:-10], image, lines)
print(file)
|
495315
|
import xlwt
book = xlwt.Workbook()
for magn in (0, 60, 100, 75, 150):
for preview in (False, True):
sheet = book.add_sheet('magn%d%s' % (magn, "np"[preview]))
if preview:
sheet.preview_magn = magn
else:
sheet.normal_magn = magn
sheet.page_preview = preview
for rowx in range(100):
sheet.write(rowx, 0, "Some text")
book.save("zoom_magnification.xls")
|
495320
|
import argparse
from utils import get_logger
logger = get_logger()
arg_lists = []
parser = argparse.ArgumentParser()
def str2bool(v):
return v.lower() in ('true')
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
# Network
net_arg = add_argument_group('Network')
net_arg.add_argument('--network_type', type=str, choices=['seq2seq', 'classification'], default='classification')
net_arg.add_argument('--nas_type', type=str, choices=['NAS_RNN'], default='NAS_RNN')
net_arg.add_argument('--nas', type=str2bool, default=True)
# Controller
net_arg.add_argument('--num_blocks', type=int, default=6)
net_arg.add_argument('--use_highway_connections', type=str2bool, default=True)
net_arg.add_argument('--tie_weights', type=str2bool, default=True)
net_arg.add_argument('--controller_hid', type=int, default=100)
# Shared parameters for PTB
net_arg.add_argument('--model_type', type=str, default='max-pool', choices=['max-pool'])
net_arg.add_argument('--dropout', type=float, default=0.5)
net_arg.add_argument('--dropoute', type=float, default=0.1)
net_arg.add_argument('--dropouti', type=float, default=0.65)
net_arg.add_argument('--dropouth', type=float, default=0.3)
net_arg.add_argument('--use_variational_dropout', type=str2bool, default=False)
net_arg.add_argument('--weight_init', type=float, default=None)
net_arg.add_argument('--cell_type', type=str, default='lstm', choices=['lstm','gru'])
net_arg.add_argument('--birnn', type=str2bool, default=True)
net_arg.add_argument('--embed', type=int, default=256)
net_arg.add_argument('--hid', type=int, default=256)
net_arg.add_argument('--hidden_varient', type=str, default='gru', choices=['gru','simple'])
net_arg.add_argument('--use_bias', type=str2bool, default=False)
net_arg.add_argument('--num_layers', type=int, default=1)
net_arg.add_argument('--num_classes', type=int, default=2)
net_arg.add_argument('--rnn_max_length', type=int, default=35)
net_arg.add_argument('--encoder_rnn_max_length', type=int, default=50)
net_arg.add_argument('--decoder_rnn_max_length', type=int, default=20)
net_arg.add_argument('--max_vocab_size', type=int, default=10000)
net_arg.add_argument('--beam_size', type=int, default=1)
net_arg.add_argument('--rnn_activations', type=eval,
default="['tanh', 'ReLU', 'identity', 'sigmoid']")
# Data
data_arg = add_argument_group('Data')
data_arg.add_argument('--dataset', type=str, default='')
data_arg.add_argument('--vocab_file', type=str, default='data/glue_tasks/qnli/vocab')
data_arg.add_argument('--use_glove_emb', type=str2bool, default=False)
data_arg.add_argument('--use_elmo', type=str2bool, default=True)
data_arg.add_argument('--use_precomputed_elmo', type=str2bool, default=True)
data_arg.add_argument('--use_bert', type=str2bool, default=False)
data_arg.add_argument('--glove_file_path', type=str, default='')
# Training / test parameters
learn_arg = add_argument_group('Learning')
learn_arg.add_argument('--mode', type=str, default='train',
choices=['train', 'derive', 'test', 'retrain', 'retest', 'val'],
help='train: Training ENAS, derive: Deriving Architectures')
learn_arg.add_argument('--batch_size', type=int, default=64)
learn_arg.add_argument('--use_cas', type=str2bool, default=False)
learn_arg.add_argument('--test_batch_size', type=int, default=1)
learn_arg.add_argument('--max_epoch', type=int, default=20)
learn_arg.add_argument('--entropy_mode', type=str, default='reward', choices=['reward', 'regularizer'])
learn_arg.add_argument('--use_l2_regularization', type=str2bool, default=False)
learn_arg.add_argument('--l2_reg_lambda', type=float, default=1e-7)
learn_arg.add_argument('--use_block_sparse_regularization', type=str2bool, default=False)
learn_arg.add_argument('--block_sparse_reg_lambda', type=float, default=1e-7)
learn_arg.add_argument('--use_alcl_condition2', type=str2bool, default=False)
learn_arg.add_argument('--alcl_l2_reg_lambda', type=float, default=1e-7)
learn_arg.add_argument('--orthogonal_reg_lambda', type=float, default=1e-7)
# Controller
learn_arg.add_argument('--ppl_square', type=str2bool, default=False)
learn_arg.add_argument('--reward_type', type=str, default='CIDEr')
learn_arg.add_argument('--num_reward_batches', type=int, default=1)
learn_arg.add_argument('--reward_c', type=int, default=80)
learn_arg.add_argument('--ema_baseline_decay', type=float, default=0.95)
learn_arg.add_argument('--discount', type=float, default=1.0)
learn_arg.add_argument('--controller_max_step', type=int, default=500,
help='step for controller parameters')
learn_arg.add_argument('--controller_optim', type=str, default='adam')
learn_arg.add_argument('--controller_lr', type=float, default=3.5e-4,
help="will be ignored if --controller_lr_cosine=True")
learn_arg.add_argument('--controller_grad_clip', type=float, default=0)
learn_arg.add_argument('--tanh_c', type=float, default=2.5)
learn_arg.add_argument('--softmax_temperature', type=float, default=5.0)
learn_arg.add_argument('--entropy_coeff', type=float, default=1e-4)
learn_arg.add_argument('--use_softmax_tanh_c_temperature', type=str2bool, default=False)
learn_arg.add_argument('--use_softmax_tanh_c', type=str2bool, default=False)
# Shared parameters
learn_arg.add_argument('--initial_step', type=int, default=0)
learn_arg.add_argument('--max_step', type=int, default=200,
help='step for model parameters')
learn_arg.add_argument('--num_sample', type=int, default=1,
help='# of Monte Carlo samples')
learn_arg.add_argument('--optim', type=str, default='adam')
learn_arg.add_argument('--lr', type=float, default=0.001)
learn_arg.add_argument('--use_decay_lr', type=str2bool, default=False)
learn_arg.add_argument('--decay', type=float, default=0.96)
learn_arg.add_argument('--decay_after', type=float, default=15)
learn_arg.add_argument('--l2_reg', type=float, default=1e-7)
learn_arg.add_argument('--grad_clip', type=float, default=0.25)
learn_arg.add_argument('--use_batchnorm', type=str2bool, default=False)
learn_arg.add_argument('--use_node_batchnorm', type=str2bool, default=False)
learn_arg.add_argument('--batchnorm_momentum', type=float, default=0.1)
# Deriving Architectures
learn_arg.add_argument('--derive_num_sample', type=int, default=100)
# Misc
misc_arg = add_argument_group('Misc')
misc_arg.add_argument('--model_name', type=str, default='')
misc_arg.add_argument('--load_path', type=str, default='')
misc_arg.add_argument('--load_dag', type=str, default='')
misc_arg.add_argument('--continue_training', type=str2bool, default=False)
misc_arg.add_argument('--use_alcl', type=str2bool, default=False)
misc_arg.add_argument('--multitask', type=str, default=None)
misc_arg.add_argument('--log_step', type=int, default=50)
misc_arg.add_argument('--save_epoch', type=int, default=1)
misc_arg.add_argument('--save_criteria', type=str, default='acc', choices=['acc','CIDEr', 'AVG', 'F1', 'invppl'])
misc_arg.add_argument('--max_save_num', type=int, default=4)
misc_arg.add_argument('--log_level', type=str, default='INFO', choices=['INFO', 'DEBUG', 'WARN'])
misc_arg.add_argument('--log_dir', type=str, default='logs')
misc_arg.add_argument('--data_dir', type=str, default='data')
misc_arg.add_argument('--num_gpu', type=int, default=1)
misc_arg.add_argument('--random_seed', type=int, default=1111)
misc_arg.add_argument('--use_tensorboard', type=str2bool, default=True)
def get_args():
args, unparsed = parser.parse_known_args()
#print(args.multitask)
if args.multitask is not None:
args.multitask = args.multitask.split(",")
if args.num_gpu > 0:
setattr(args, 'cuda', True)
else:
setattr(args, 'cuda', False)
if len(unparsed) > 1:
logger.info(f"Unparsed args: {unparsed}")
return args, unparsed
|
495334
|
import os
from FinRL.finrl import config
from FinRL.finrl import config_tickers
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
from FinRL.finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader
from FinRL.finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split
import pandas as pd
df = YahooDownloader(start_date = '2008-01-01',
end_date = '2022-06-02',
ticker_list = config_tickers.DOW_30_TICKER).fetch_data()
fe = FeatureEngineer(
use_technical_indicator=True,
use_turbulence=False,
user_defined_feature = False)
df = fe.preprocess_data(df)
# add covariance matrix as states
df = df.sort_values(['date', 'tic'], ignore_index=True)
df.index = df.date.factorize()[0]
cov_list = []
return_list = []
# look back is one year
lookback = 252
for i in range(lookback, len(df.index.unique())):
data_lookback = df.loc[i - lookback:i, :]
price_lookback = data_lookback.pivot_table(index='date', columns='tic', values='close')
return_lookback = price_lookback.pct_change().dropna()
return_list.append(return_lookback)
covs = return_lookback.cov().values
cov_list.append(covs)
df_cov = pd.DataFrame({'date': df.date.unique()[lookback:], 'cov_list': cov_list, 'return_list': return_list})
df = df.merge(df_cov, on='date')
df = df.sort_values(['date', 'tic']).reset_index(drop=True)
train = data_split(df, '2009-04-01','2020-03-31')
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from stable_baselines3.common.vec_env import DummyVecEnv
class StockPortfolioEnv(gym.Env):
"""A portfolio allocation environment for OpenAI gym
Attributes
----------
df: DataFrame
input data
stock_dim : int
number of unique stocks
hmax : int
maximum number of shares to trade
initial_amount : int
start money
transaction_cost_pct: float
transaction cost percentage per trade
reward_scaling: float
scaling factor for reward, good for training
state_space: int
the dimension of input features
action_space: int
equals stock dimension
tech_indicator_list: list
a list of technical indicator names
turbulence_threshold: int
a threshold to control risk aversion
day: int
an increment number to control date
Methods
-------
_sell_stock()
perform sell action based on the sign of the action
_buy_stock()
perform buy action based on the sign of the action
step()
at each step the agent will return actions, then
we will calculate the reward, and return the next observation.
reset()
reset the environment
render()
use render to return other functions
save_asset_memory()
return account value at each time step
save_action_memory()
return actions/positions at each time step
"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
transaction_cost_pct,
reward_scaling,
state_space,
action_space,
tech_indicator_list,
turbulence_threshold=None,
lookback=252,
day=0):
# super(StockEnv, self).__init__()
# money = 10 , scope = 1
self.day = day
self.lookback = lookback
self.df = df
self.stock_dim = stock_dim
self.hmax = hmax
self.initial_amount = initial_amount
self.transaction_cost_pct = transaction_cost_pct
self.reward_scaling = reward_scaling
self.state_space = state_space
self.action_space = action_space
self.tech_indicator_list = tech_indicator_list
# action_space normalization and shape is self.stock_dim
self.action_space = spaces.Box(low=0, high=1, shape=(self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(self.state_space + len(self.tech_indicator_list), self.state_space))
# load data from a pandas dataframe
self.data = self.df.loc[self.day, :]
self.covs = self.data['cov_list'].values[0]
self.state = np.append(np.array(self.covs),
[self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)
self.terminal = False
self.turbulence_threshold = turbulence_threshold
# initalize state: inital portfolio return + individual stock return + individual weights
self.portfolio_value = self.initial_amount
# memorize portfolio value each step
self.asset_memory = [self.initial_amount]
# memorize portfolio return each step
self.portfolio_return_memory = [0]
self.actions_memory = [[1 / self.stock_dim] * self.stock_dim]
self.date_memory = [self.data.date.unique()[0]]
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique()) - 1
if self.terminal:
df = pd.DataFrame(self.portfolio_return_memory)
df.columns = ['daily_return']
plt.plot(df.daily_return.cumsum(), 'r')
plt.savefig('results/cumulative_reward.png')
plt.close()
plt.plot(self.portfolio_return_memory, 'r')
plt.savefig('results/rewards.png')
plt.close()
print("=================================")
print("begin_total_asset:{}".format(self.asset_memory[0]))
print("end_total_asset:{}".format(self.portfolio_value))
df_daily_return = pd.DataFrame(self.portfolio_return_memory)
df_daily_return.columns = ['daily_return']
if df_daily_return['daily_return'].std() != 0:
sharpe = (252 ** 0.5) * df_daily_return['daily_return'].mean() / \
df_daily_return['daily_return'].std()
print("Sharpe: ", sharpe)
print("=================================")
return self.state, self.reward, self.terminal, {}
else:
weights = self.softmax_normalization(actions)
self.actions_memory.append(weights)
last_day_memory = self.data
# load next state
self.day += 1
self.data = self.df.loc[self.day, :]
self.covs = self.data['cov_list'].values[0]
self.state = np.append(np.array(self.covs),
[self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)
portfolio_return = sum(((self.data.close.values / last_day_memory.close.values) - 1) * weights)
log_portfolio_return = np.log(sum((self.data.close.values / last_day_memory.close.values) * weights))
# update portfolio value
new_portfolio_value = self.portfolio_value * (1 + portfolio_return)
self.portfolio_value = new_portfolio_value
# save into memory
self.portfolio_return_memory.append(portfolio_return)
self.date_memory.append(self.data.date.unique()[0])
self.asset_memory.append(new_portfolio_value)
# the reward is the new portfolio value or end portfolo value
self.reward = new_portfolio_value
return self.state, self.reward, self.terminal, {}
def reset(self):
self.asset_memory = [self.initial_amount]
self.day = 0
self.data = self.df.loc[self.day, :]
# load states
self.covs = self.data['cov_list'].values[0]
self.state = np.append(np.array(self.covs),
[self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)
self.portfolio_value = self.initial_amount
# self.cost = 0
# self.trades = 0
self.terminal = False
self.portfolio_return_memory = [0]
self.actions_memory = [[1 / self.stock_dim] * self.stock_dim]
self.date_memory = [self.data.date.unique()[0]]
return self.state
def render(self, mode='human'):
return self.state
def softmax_normalization(self, actions):
numerator = np.exp(actions)
denominator = np.sum(np.exp(actions))
softmax_output = numerator / denominator
return softmax_output
def save_asset_memory(self):
date_list = self.date_memory
portfolio_return = self.portfolio_return_memory
# print(len(date_list))
# print(len(asset_list))
df_account_value = pd.DataFrame({'date': date_list, 'daily_return': portfolio_return})
return df_account_value
def save_action_memory(self):
# date and close price length must match actions length
date_list = self.date_memory
df_date = pd.DataFrame(date_list)
df_date.columns = ['date']
action_list = self.actions_memory
df_actions = pd.DataFrame(action_list)
df_actions.columns = self.data.tic.values
df_actions.index = df_date.date
# df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
return df_actions
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_sb_env(self):
e = DummyVecEnv([lambda: self])
obs = e.reset()
return e, obs
stock_dimension = len(train.tic.unique())
state_space = stock_dimension
print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}")
tech_indicator_list = ['macd', 'rsi_30', 'cci_30', 'dx_30']
feature_dimension = len(tech_indicator_list)
print(f"Feature Dimension: {feature_dimension}")
env_kwargs = {
"hmax": 100,
"initial_amount": 1000000,
"transaction_cost_pct": 0,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": tech_indicator_list,
"action_space": stock_dimension,
"reward_scaling": 1e-1
}
e_train_gym = StockPortfolioEnv(df=train, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
print(type(env_train))
from FinRL.finrl.agents.stablebaselines3.models import DRLAgent
agent = DRLAgent(env = env_train)
A2C_PARAMS = {"n_steps": 10, "ent_coef": 0.005, "learning_rate": 0.0004}
model_a2c = agent.get_model(model_name="a2c",model_kwargs = A2C_PARAMS)
trained_a2c = agent.train_model(model=model_a2c, tb_log_name='a2c',
total_timesteps=40000)
agent = DRLAgent(env = env_train)
PPO_PARAMS = {
"n_steps": 2048,
"ent_coef": 0.005,
"learning_rate": 0.001,
"batch_size": 128,
}
model_ppo = agent.get_model("ppo",model_kwargs = PPO_PARAMS)
trained_ppo = agent.train_model(model=model_ppo,
tb_log_name='ppo',
total_timesteps=40000)
trade = data_split(df,'2020-04-01', '2022-05-31')
e_trade_gym = StockPortfolioEnv(df = trade, **env_kwargs)
import torch
import plotly.express as px
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
import pandas as pd
from pypfopt import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import objective_functions
unique_tic = trade.tic.unique()
unique_trade_date = trade.date.unique()
import pyfolio
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline,convert_daily_return_to_pyfolio_ts
baseline_df = get_baseline(
ticker="^DJI",
start = '2020-07-01',
end = '2021-09-01')
baseline_df_stats = backtest_stats(baseline_df, value_col_name = 'close')
baseline_returns = get_daily_return(baseline_df, value_col_name="close")
dji_cumpod =(baseline_returns+1).cumprod()-1
from pyfolio import timeseries
df_daily_return_a2c, df_actions_a2c = DRLAgent.DRL_prediction(model=trained_a2c,
environment = e_trade_gym)
df_daily_return_ppo, df_actions_ppo = DRLAgent.DRL_prediction(model=trained_ppo,
environment = e_trade_gym)
time_ind = pd.Series(df_daily_return_a2c.date)
a2c_cumpod =(df_daily_return_a2c.daily_return+1).cumprod()-1
ppo_cumpod =(df_daily_return_ppo.daily_return+1).cumprod()-1
DRL_strat_a2c = convert_daily_return_to_pyfolio_ts(df_daily_return_a2c)
DRL_strat_ppo = convert_daily_return_to_pyfolio_ts(df_daily_return_ppo)
perf_func = timeseries.perf_stats
perf_stats_all_a2c = perf_func( returns=DRL_strat_a2c,
factor_returns=DRL_strat_a2c,
positions=None, transactions=None, turnover_denom="AGB")
perf_stats_all_ppo = perf_func( returns=DRL_strat_ppo,
factor_returns=DRL_strat_ppo,
positions=None, transactions=None, turnover_denom="AGB")
def extract_weights(drl_actions_list):
a2c_weight_df = {'date':[], 'weights':[]}
for i in range(len(drl_actions_list)):
date = drl_actions_list.index[i]
tic_list = list(drl_actions_list.columns)
weights_list = drl_actions_list.reset_index()[list(drl_actions_list.columns)].iloc[i].values
weight_dict = {'tic':[], 'weight':[]}
for j in range(len(tic_list)):
weight_dict['tic'] += [tic_list[j]]
weight_dict['weight'] += [weights_list[j]]
a2c_weight_df['date'] += [date]
a2c_weight_df['weights'] += [pd.DataFrame(weight_dict)]
a2c_weights = pd.DataFrame(a2c_weight_df)
return a2c_weights
a2c_weights = extract_weights(df_actions_a2c)
ppo_weights = extract_weights(df_actions_ppo)
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
def prepare_data(trainData):
train_date = sorted(set(trainData.date.values))
X = []
for i in range(0, len(train_date) - 1):
d = train_date[i]
d_next = train_date[i+1]
y = train.loc[train['date'] == d_next].return_list.iloc[0].loc[d_next].reset_index()
y.columns = ['tic', 'return']
x = train.loc[train['date'] == d][['tic','macd','rsi_30','cci_30','dx_30']]
train_piece = pd.merge(x, y, on = 'tic')
train_piece['date'] = [d] * len(train_piece)
X += [train_piece]
trainDataML = pd.concat(X)
X = trainDataML[tech_indicator_list].values
Y = trainDataML[['return']].values
return X, Y
train_X, train_Y = prepare_data(train)
rf_model = RandomForestRegressor(max_depth = 35, min_samples_split = 10, random_state = 0).fit(train_X, train_Y.reshape(-1))
dt_model = DecisionTreeRegressor(random_state = 0, max_depth=35, min_samples_split = 10 ).fit(train_X, train_Y.reshape(-1))
svm_model = SVR(epsilon=0.14).fit(train_X, train_Y.reshape(-1))
lr_model = LinearRegression().fit(train_X, train_Y)
def output_predict(model, reference_model=False):
meta_coefficient = {"date": [], "weights": []}
portfolio = pd.DataFrame(index=range(1), columns=unique_trade_date)
initial_capital = 1000000
portfolio.loc[0, unique_trade_date[0]] = initial_capital
for i in range(len(unique_trade_date) - 1):
current_date = unique_trade_date[i]
next_date = unique_trade_date[i + 1]
df_current = df[df.date == current_date].reset_index(drop=True)
tics = df_current['tic'].values
features = df_current[tech_indicator_list].values
df_next = df[df.date == next_date].reset_index(drop=True)
if not reference_model:
predicted_y = model.predict(features)
mu = predicted_y
Sigma = risk_models.sample_cov(df_current.return_list[0], returns_data=True)
else:
mu = df_next.return_list[0].loc[next_date].values
Sigma = risk_models.sample_cov(df_next.return_list[0], returns_data=True)
predicted_y_df = pd.DataFrame({"tic": tics.reshape(-1, ), "predicted_y": mu.reshape(-1, )})
min_weight, max_weight = 0, 1
ef = EfficientFrontier(mu, Sigma)
weights = ef.nonconvex_objective(
objective_functions.sharpe_ratio,
objective_args=(ef.expected_returns, ef.cov_matrix),
weights_sum_to_one=True,
constraints=[
{"type": "ineq", "fun": lambda w: w - min_weight}, # greater than min_weight
{"type": "ineq", "fun": lambda w: max_weight - w}, # less than max_weight
],
)
weight_df = {"tic": [], "weight": []}
meta_coefficient["date"] += [current_date]
# it = 0
for item in weights:
weight_df['tic'] += [item]
weight_df['weight'] += [weights[item]]
weight_df = pd.DataFrame(weight_df).merge(predicted_y_df, on=['tic'])
meta_coefficient["weights"] += [weight_df]
cap = portfolio.iloc[0, i]
# current cash invested for each stock
current_cash = [element * cap for element in list(weights.values())]
# current held shares
current_shares = list(np.array(current_cash) / np.array(df_current.close))
# next time period price
next_price = np.array(df_next.close)
portfolio.iloc[0, i + 1] = np.dot(current_shares, next_price)
portfolio = portfolio.T
portfolio.columns = ['account_value']
portfolio = portfolio.reset_index()
portfolio.columns = ['date', 'account_value']
stats = backtest_stats(portfolio, value_col_name='account_value')
portfolio_cumprod = (portfolio.account_value.pct_change() + 1).cumprod() - 1
return portfolio, stats, portfolio_cumprod, pd.DataFrame(meta_coefficient)
lr_portfolio, lr_stats, lr_cumprod, lr_weights = output_predict(lr_model)
dt_portfolio, dt_stats, dt_cumprod, dt_weights = output_predict(dt_model)
svm_portfolio, svm_stats, svm_cumprod, svm_weights = output_predict(svm_model)
rf_portfolio, rf_stats, rf_cumprod, rf_weights = output_predict(rf_model)
reference_portfolio, reference_stats, reference_cumprod, reference_weights = output_predict(None, True)
def calculate_gradient(model, interpolated_input, actions, feature_idx, stock_idx, h = 1e-1):
forward_input = interpolated_input
forward_input[feature_idx + stock_dimension][stock_idx] += h
forward_Q = model.policy.evaluate_actions(torch.FloatTensor(forward_input).reshape(-1,stock_dimension*(stock_dimension + feature_dimension)), torch.FloatTensor(actions).reshape(-1,stock_dimension))
interpolated_Q = model.policy.evaluate_actions(torch.FloatTensor(interpolated_input).reshape(-1,stock_dimension*(stock_dimension + feature_dimension)), torch.FloatTensor(actions).reshape(-1,stock_dimension))
forward_Q = forward_Q[0].detach().cpu().numpy()[0]
interpolated_Q = interpolated_Q[0].detach().cpu().numpy()[0]
return (forward_Q - interpolated_Q) / h
import copy
meta_Q = {"date": [], "feature": [], "Saliency Map": [], "algo": []}
for algo in {"A2C", "PPO"}:
if algo == "A2C":
prec_step = 1e-2
else:
prec_step = 1e-1
model = eval("trained_" + algo.lower())
df_actions = eval("df_actions_" + algo.lower())
for i in range(len(unique_trade_date) - 1):
date = unique_trade_date[i]
covs = trade[trade['date'] == date].cov_list.iloc[0]
features = trade[trade['date'] == date][tech_indicator_list].values # N x K
actions = df_actions.loc[date].values
for feature_idx in range(len(tech_indicator_list)):
int_grad_per_feature = 0
for stock_idx in range(features.shape[0]): # N
int_grad_per_stock = 0
avg_interpolated_grad = 0
for alpha in range(1, 51):
scale = 1 / 50
baseline_features = copy.deepcopy(features)
baseline_noise = np.random.normal(0, 1, stock_dimension)
baseline_features[:, feature_idx] = [0] * stock_dimension
interpolated_features = baseline_features + scale * alpha * (features - baseline_features) # N x K
interpolated_input = np.append(covs, interpolated_features.T, axis=0)
interpolated_gradient = \
calculate_gradient(model, interpolated_input, actions, feature_idx, stock_idx, h=prec_step)[0]
avg_interpolated_grad += interpolated_gradient * scale
int_grad_per_stock = (features[stock_idx][feature_idx] - 0) * avg_interpolated_grad
int_grad_per_feature += int_grad_per_stock
meta_Q['date'] += [date]
meta_Q['algo'] += [algo]
meta_Q['feature'] += [tech_indicator_list[feature_idx]]
meta_Q['Saliency Map'] += [int_grad_per_feature]
meta_Q = pd.DataFrame(meta_Q)
import statsmodels.api as sm
meta_score_coef = {"date":[], "coef":[], "algo":[]}
for algo in ["LR", "RF", "Reference Model", "SVM", "DT", "A2C", "PPO"]:
if algo == "LR":
weights = lr_weights
elif algo == "RF":
weights = rf_weights
elif algo == "DT":
weights = dt_weights
elif algo == "SVM":
weights = svm_weights
elif algo == "A2C":
weights = a2c_weights
elif algo == "PPO":
weights = ppo_weights
else:
weights = reference_weights
for i in range(len(unique_trade_date) - 1):
date = unique_trade_date[i]
next_date = unique_trade_date[i+1]
df_temp = df[df.date==date].reset_index(drop=True)
df_temp_next = df[df.date==next_date].reset_index(drop=True)
weight_piece = weights[weights.date == date].iloc[0]['weights']
piece_return = pd.DataFrame(df_temp_next.return_list.iloc[0].loc[next_date]).reset_index()
piece_return.columns = ['tic', 'return']
X = df_temp[['macd','rsi_30', 'cci_30', 'dx_30', 'tic']]
X_next = df_temp_next[['macd','rsi_30', 'cci_30', 'dx_30', 'tic']]
piece = weight_piece.merge(X, on = 'tic').merge(piece_return, on = 'tic')
piece['Y'] = piece['return'] * piece['weight']
X = piece[['macd','rsi_30', 'cci_30', 'dx_30']]
X = sm.add_constant(X)
Y = piece[['Y']]
model = sm.OLS(Y,X)
results = model.fit()
meta_score_coef["coef"] += [(X * results.params).sum(axis = 0)]
meta_score_coef["date"] += [date]
meta_score_coef["algo"] += [algo]
meta_score_coef = pd.DataFrame(meta_score_coef)
performance_score = {"date":[], "algo":[], "score":[]}
for i in range(0, len(unique_trade_date)):
date_ = unique_trade_date[i]
if len(meta_score_coef[(meta_score_coef['date'] == date_)]) == 0:
continue
lr_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'LR')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
rf_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'RF')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
reference_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'Reference Model')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
dt_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'DT')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
svm_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'SVM')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
saliency_coef_a2c = meta_Q[(meta_Q['date'] == date_) & (meta_Q['algo'] == "A2C")]['Saliency Map'].values
saliency_coef_ppo = meta_Q[(meta_Q['date'] == date_) & (meta_Q['algo'] == "PPO")]['Saliency Map'].values
lr_score = np.corrcoef(lr_coef, reference_coef)[0][1]
rf_score = np.corrcoef(rf_coef, reference_coef)[0][1]
dt_score = np.corrcoef(dt_coef, reference_coef)[0][1]
svm_score = np.corrcoef(svm_coef, reference_coef)[0][1]
saliency_score_a2c = np.corrcoef(saliency_coef_a2c, reference_coef)[0][1]
saliency_score_ppo = np.corrcoef(saliency_coef_ppo, reference_coef)[0][1]
for algo in ["LR","A2C","PPO","RF","DT", "SVM"]:
performance_score["date"] += [date_]
performance_score["algo"] += [algo]
if algo == "LR":
score = lr_score
elif algo == "RF":
score = rf_score
elif algo == "DT":
score = dt_score
elif algo == "A2C":
score = saliency_score_a2c
elif algo == "SVM":
score = svm_score
else:
score = saliency_score_ppo
performance_score["score"] += [score]
performance_score = pd.DataFrame(performance_score)
multi_performance_score = {"date":[], "algo":[], "score":[]}
window = 20
for i in range(len(unique_trade_date) - window ):
date_ = unique_trade_date[i]
if len(meta_score_coef[(meta_score_coef['date'] == date_)]) == 0:
continue
lr_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'LR')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
rf_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'RF')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
reference_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'Reference Model')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
for w in range(1, window):
date_f = unique_trade_date[i + w]
prx_coef = meta_score_coef[(meta_score_coef['date'] == date_f) & (meta_score_coef['algo'] == 'Reference Model')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
reference_coef += prx_coef
reference_coef = reference_coef / window
dt_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'DT')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
svm_coef = meta_score_coef[(meta_score_coef['date'] == date_) & (meta_score_coef['algo'] == 'SVM')]['coef'].values[0][['macd','rsi_30','cci_30','dx_30']].values
saliency_coef_a2c = meta_Q[(meta_Q['date'] == date_) & (meta_Q['algo'] == "A2C")]['Saliency Map'].values
saliency_coef_ppo = meta_Q[(meta_Q['date'] == date_) & (meta_Q['algo'] == "PPO")]['Saliency Map'].values
lr_score = np.corrcoef(lr_coef, reference_coef)[0][1]
rf_score = np.corrcoef(rf_coef, reference_coef)[0][1]
dt_score = np.corrcoef(dt_coef, reference_coef)[0][1]
svm_score = np.corrcoef(svm_coef, reference_coef)[0][1]
saliency_score_a2c = np.corrcoef(saliency_coef_a2c, reference_coef)[0][1]
saliency_score_ppo = np.corrcoef(saliency_coef_ppo, reference_coef)[0][1]
for algo in ["LR", "A2C", "RF", "PPO", "DT", "SVM"]:
multi_performance_score["date"] += [date_]
multi_performance_score["algo"] += [algo]
if algo == "LR":
score = lr_score
elif algo == "RF":
score = rf_score
elif algo == "DT":
score = dt_score
elif algo == "A2C":
score = saliency_score_a2c
elif algo == "SVM":
score = svm_score
else:
score = saliency_score_ppo
multi_performance_score["score"] += [score]
multi_performance_score = pd.DataFrame(multi_performance_score)
from datetime import datetime as dt
import matplotlib.pyplot as plt
import plotly
import plotly.graph_objs as go
trace1_portfolio = go.Scatter(x = time_ind, y = a2c_cumpod, mode = 'lines', name = 'A2C')
trace2_portfolio = go.Scatter(x = time_ind, y = ppo_cumpod, mode = 'lines', name = 'PPO')
trace3_portfolio = go.Scatter(x = time_ind, y = dji_cumpod, mode = 'lines', name = 'DJIA')
trace4_portfolio = go.Scatter(x = time_ind, y = lr_cumprod, mode = 'lines', name = 'LR')
trace5_portfolio = go.Scatter(x = time_ind, y = rf_cumprod, mode = 'lines', name = 'RF')
trace6_portfolio = go.Scatter(x = time_ind, y = dt_cumprod, mode = 'lines', name = 'DT')
trace7_portfolio = go.Scatter(x = time_ind, y = svm_cumprod, mode = 'lines', name = 'SVM')
fig = go.Figure()
fig.add_trace(trace1_portfolio)
fig.add_trace(trace2_portfolio)
fig.add_trace(trace3_portfolio)
fig.add_trace(trace4_portfolio)
fig.add_trace(trace5_portfolio)
fig.add_trace(trace6_portfolio)
fig.add_trace(trace7_portfolio)
fig.update_layout(
legend=dict(
x=0,
y=1,
traceorder="normal",
font=dict(
family="sans-serif",
size=15,
color="black"
),
bgcolor="White",
bordercolor="white",
borderwidth=2
),
)
fig.update_layout(title={
# 'text': "Cumulative Return using FinRL",
'y': 0.85,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'})
fig.update_layout(
paper_bgcolor='rgba(1,1,0,0)',
plot_bgcolor='rgba(1, 1, 0, 0)',
xaxis_title="Date",
yaxis=dict(titlefont=dict(size=30), title="Cumulative Return"),
font=dict(
size=40,
),
)
fig.update_layout(font_size=20)
fig.update_traces(line=dict(width=2))
fig.update_xaxes(showline=True, linecolor='black', showgrid=True, gridwidth=1, gridcolor='LightSteelBlue', mirror=True)
fig.update_yaxes(showline=True, linecolor='black', showgrid=True, gridwidth=1, gridcolor='LightSteelBlue', mirror=True)
fig.update_yaxes(zeroline=True, zerolinewidth=1, zerolinecolor='LightSteelBlue')
fig.show()
meta_score = {"Annual return":[], "Annual volatility":[], "Max drawdown":[], "Sharpe ratio":[], "Algorithm":[], "Calmar ratio":[]}
for name in ["LR", "A2C", "RF", "Reference Model", "PPO", "SVM", "DT", "DJI"]:
if name == "DT":
annualreturn = dt_stats["Annual return"]
annualvol = dt_stats["Annual volatility"]
sharpeRatio = dt_stats["Sharpe ratio"]
maxdradown = dt_stats["Max drawdown"]
calmarratio = dt_stats["Calmar ratio"]
elif name == "LR":
annualreturn = lr_stats["Annual return"]
annualvol = lr_stats["Annual volatility"]
sharpeRatio = lr_stats["Sharpe ratio"]
maxdradown = lr_stats["Max drawdown"]
calmarratio = lr_stats["Calmar ratio"]
elif name == "SVM":
annualreturn = svm_stats["Annual return"]
annualvol = svm_stats["Annual volatility"]
sharpeRatio = svm_stats["Sharpe ratio"]
maxdradown = svm_stats["Max drawdown"]
calmarratio = svm_stats["Calmar ratio"]
elif name == "RF":
annualreturn = rf_stats["Annual return"]
annualvol = rf_stats["Annual volatility"]
sharpeRatio = rf_stats["Sharpe ratio"]
maxdradown = rf_stats["Max drawdown"]
calmarratio = rf_stats["Calmar ratio"]
elif name == "Reference Model":
annualreturn = reference_stats["Annual return"]
annualvol = reference_stats["Annual volatility"]
sharpeRatio = reference_stats["Sharpe ratio"]
maxdradown = reference_stats["Max drawdown"]
calmarratio = reference_stats["Calmar ratio"]
elif name == "PPO":
annualreturn = perf_stats_all_ppo["Annual return"]
annualvol = perf_stats_all_ppo["Annual volatility"]
sharpeRatio = perf_stats_all_ppo["Sharpe ratio"]
maxdradown = perf_stats_all_ppo["Max drawdown"]
calmarratio = perf_stats_all_ppo["Calmar ratio"]
elif name == "DJI":
annualreturn = baseline_df_stats["Annual return"]
annualvol = baseline_df_stats["Annual volatility"]
sharpeRatio = baseline_df_stats["Sharpe ratio"]
maxdradown = baseline_df_stats["Max drawdown"]
calmarratio = baseline_df_stats["Calmar ratio"]
else:
annualreturn = perf_stats_all_a2c["Annual return"]
annualvol = perf_stats_all_a2c["Annual volatility"]
sharpeRatio = perf_stats_all_a2c["Sharpe ratio"]
maxdradown = perf_stats_all_a2c["Max drawdown"]
calmarratio = perf_stats_all_a2c["Calmar ratio"]
meta_score["Algorithm"] += [name]
meta_score["Annual return"] += [annualreturn]
meta_score["Annual volatility"] += [annualvol]
meta_score["Max drawdown"] += [maxdradown]
meta_score["Sharpe ratio"] += [sharpeRatio]
meta_score["Calmar ratio"] += [calmarratio]
meta_score = pd.DataFrame(meta_score).sort_values("Sharpe ratio")
postiveRatio = pd.DataFrame(performance_score.groupby("algo").apply(lambda x : np.mean(x['score'])))
postiveRatio = postiveRatio.reset_index()
postiveRatio.columns = ['algo', 'avg_correlation_coefficient']
postiveRatio['Sharpe Ratio'] = [0] * 6
# postiveRatio.plot.bar(x = 'algo', y = 'avg_correlation_coefficient')
postiveRatiom = pd.DataFrame(multi_performance_score.groupby("algo").apply(lambda x : np.mean(x['score'])))
postiveRatiom = postiveRatiom.reset_index()
postiveRatiom.columns = ['algo', 'avg_correlation_coefficient']
postiveRatiom['Sharpe Ratio'] = [0] * 6
# postiveRatiom.plot.bar(x = 'algo', y = 'avg_correlation_coefficient')
for algo in ['A2C', 'PPO', 'LR','DT', 'RF', 'SVM']:
postiveRatio.loc[postiveRatio['algo'] == algo, 'Sharpe Ratio'] = meta_score.loc[meta_score['Algorithm'] == algo,'Sharpe ratio'].values[0]
postiveRatiom.loc[postiveRatio['algo'] == algo, 'Sharpe Ratio'] = meta_score.loc[meta_score['Algorithm'] == algo,'Sharpe ratio'].values[0]
postiveRatio.sort_values("Sharpe Ratio", inplace= True)
postiveRatiom.sort_values("Sharpe Ratio", inplace= True)
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
# Add traces
fig.add_trace(
go.Scatter(x=postiveRatiom['algo'], y=postiveRatiom['Sharpe Ratio'], name="Sharpe Ratio", marker_size=15,
line_width=5),
secondary_y=True,
)
fig.add_trace(
go.Bar(x=postiveRatiom['algo'], y=postiveRatiom['avg_correlation_coefficient'],
name="Multi-Step Average Correlation Coefficient ", width
=0.38),
secondary_y=False,
)
fig.add_trace(
go.Bar(x=postiveRatio['algo'], y=postiveRatio['avg_correlation_coefficient'],
name="Single-Step Average Correlation Coefficient ", width
=0.38),
secondary_y=False,
)
fig.update_layout(
paper_bgcolor='rgba(1,1,0,0)',
plot_bgcolor='rgba(1, 1, 0, 0)',
)
fig.update_layout(legend=dict(
yanchor="top",
y=1.5,
xanchor="right",
x=0.95
))
fig.update_layout(font_size=15)
# Set x-axis title
fig.update_xaxes(title_text="Model")
fig.update_xaxes(showline=True, linecolor='black', showgrid=True, gridwidth=1, gridcolor='LightSteelBlue', mirror=True)
fig.update_yaxes(showline=True, linecolor='black', showgrid=True, secondary_y=False, gridwidth=1,
gridcolor='LightSteelBlue', mirror=True)
fig.update_yaxes(zeroline=True, zerolinewidth=1, zerolinecolor='LightSteelBlue')
# Set y-axes titles
fig.update_yaxes(title_text="Average Correlation Coefficient", secondary_y=False, range=[-0.1, 0.1])
fig.update_yaxes(title_text="Sharpe Ratio", secondary_y=True, range=[-0.5, 2.5])
fig.show()
import plotly.graph_objects as go
from plotly.subplots import make_subplots
fig = make_subplots(rows=2, cols=3)
trace0 = go.Histogram(x=performance_score[performance_score['algo'] == 'A2C']['score'].values, nbinsx=25, name='A2C',
histnorm='probability')
trace1 = go.Histogram(x=performance_score[performance_score['algo'] == 'PPO']['score'].values, nbinsx=25, name='PPO',
histnorm='probability')
trace2 = go.Histogram(x=performance_score[performance_score['algo'] == 'DT']['score'].values, nbinsx=25, name='DT',
histnorm='probability')
trace3 = go.Histogram(x=performance_score[performance_score['algo'] == 'LR']['score'].values, nbinsx=25, name='LR',
histnorm='probability')
trace4 = go.Histogram(x=performance_score[performance_score['algo'] == 'SVM']['score'].values, nbinsx=25, name='SVM',
histnorm='probability')
trace5 = go.Histogram(x=performance_score[performance_score['algo'] == 'RF']['score'].values, nbinsx=25, name='RF',
histnorm='probability')
fig.append_trace(trace0, 1, 1)
fig.append_trace(trace1, 1, 2)
fig.append_trace(trace2, 1, 3)
fig.append_trace(trace3, 2, 1)
fig.append_trace(trace4, 2, 2)
fig.append_trace(trace5, 2, 3)
# Update xaxis properties
fig.update_xaxes(title_text="Correlation coefficient", row=2, col=2)
fig.update_yaxes(title_text="Frequency", row=1, col=1)
fig.update_yaxes(title_text="Frequency", row=2, col=1)
fig.update_layout(
paper_bgcolor='rgba(1,1,0,0)',
plot_bgcolor='rgba(1, 1, 0, 0)',
font=dict(
size=18,
),
)
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=1
))
fig.update_xaxes(showline=True, linecolor='black', showgrid=True, gridwidth=1, gridcolor='LightSteelBlue', mirror=True)
fig.update_yaxes(showline=True, linecolor='black', showgrid=True, gridwidth=1, gridcolor='LightSteelBlue', mirror=True)
fig.update_yaxes(zeroline=True, zerolinewidth=1, zerolinecolor='LightSteelBlue')
fig.show()
import plotly.graph_objects as go
from plotly.subplots import make_subplots
fig = make_subplots(rows=2, cols=3)
trace0 = go.Histogram(x=multi_performance_score[multi_performance_score['algo'] == 'A2C']['score'].values, nbinsx=25,
name='A2C', histnorm='probability')
trace1 = go.Histogram(x=multi_performance_score[multi_performance_score['algo'] == 'PPO']['score'].values, nbinsx=25,
name='PPO', histnorm='probability')
trace2 = go.Histogram(x=multi_performance_score[multi_performance_score['algo'] == 'DT']['score'].values, nbinsx=25,
name='DT', histnorm='probability')
trace3 = go.Histogram(x=multi_performance_score[multi_performance_score['algo'] == 'LR']['score'].values, nbinsx=25,
name='LR', histnorm='probability')
trace4 = go.Histogram(x=multi_performance_score[multi_performance_score['algo'] == 'SVM']['score'].values, nbinsx=25,
name='SVM', histnorm='probability')
trace5 = go.Histogram(x=multi_performance_score[multi_performance_score['algo'] == 'RF']['score'].values, nbinsx=25,
name='RF', histnorm='probability')
fig.update_layout(yaxis1=dict(range=[0, 0.2]))
fig.update_layout(yaxis2=dict(range=[0, 0.2]))
fig.update_layout(yaxis3=dict(range=[0, 0.4]))
fig.update_layout(yaxis4=dict(range=[0, 0.4]))
fig.update_layout(yaxis5=dict(range=[0, 0.4]))
fig.update_layout(yaxis6=dict(range=[0, 0.4]))
fig.append_trace(trace0, 1, 1)
fig.append_trace(trace1, 1, 2)
fig.append_trace(trace2, 1, 3)
fig.append_trace(trace3, 2, 1)
fig.append_trace(trace4, 2, 2)
fig.append_trace(trace5, 2, 3)
# Update xaxis properties
fig.update_xaxes(title_text="Correlation coefficient", row=2, col=2)
fig.update_yaxes(title_text="Frequency", row=1, col=1)
fig.update_yaxes(title_text="Frequency", row=2, col=1)
fig.update_layout(
paper_bgcolor='rgba(1,1,0,0)',
plot_bgcolor='rgba(1, 1, 0, 0)',
font=dict(
size=18,
),
)
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=1
))
fig.update_xaxes(showline=True, linecolor='black', showgrid=True, gridwidth=1, gridcolor='LightSteelBlue', mirror=True)
fig.update_yaxes(showline=True, linecolor='black', showgrid=True, gridwidth=1, gridcolor='LightSteelBlue', mirror=True)
fig.update_yaxes(zeroline=True, zerolinewidth=1, zerolinecolor='LightSteelBlue')
fig.show()
pass
|
495380
|
import json
import os
from os import listdir
import os.path as osp
import pickle
import time
from collections import OrderedDict, Counter
import argparse
import numpy as np
import pathlib
import joblib
from gym.wrappers.monitoring.video_recorder import VideoRecorder
from TeachMyAgent.run_utils.environment_args_handler import EnvironmentArgsHandler
from TeachMyAgent.run_utils.student_args_handler import StudentArgsHandler
from TeachMyAgent.teachers.teacher_controller import param_vec_to_param_dict
from TeachMyAgent.students.spinup.utils.test_policy import load_policy_and_env as spinup_load_policy
from TeachMyAgent.students.openai_baselines.ppo2.ppo2 import get_model as get_baselines_model
from TeachMyAgent.students.ppo_utils import create_custom_vec_normalized_envs
def get_student_type(save_path):
'''
Returns 'spinup' or 'baselines' depending on how the logs look like.
Args:
save_path (str): Path containing logs
'''
for root, _, files in os.walk(save_path):
if 'progress.txt' in files: # Spinup
return 'spinup'
elif 'progress.csv' in files: # OpenAI Baselines
return 'baselines'
def load_training_infos(save_path):
'''
Load hyperparameters stored in config.json.
Args:
save_path (str): Path containing logs
'''
with open(osp.join(save_path, 'config.json')) as json_file:
training_config = json.load(json_file)
return training_config
def get_baselines_last_checkpoint(path):
'''
OpenAI Baselines students save multiple checkpoints of the model. This function only loads the last one.
Args:
save_path (str): Path containing checkpoints
'''
last_checkpoint = -1
for f in listdir(path):
if osp.isfile(osp.join(path, f)):
try:
checkpoint = int(f)
last_checkpoint = f if checkpoint > int(last_checkpoint) else last_checkpoint
except Exception:
continue
return last_checkpoint
def load_env_params(save_path):
'''
Load book-keeped information (e.g. training and test tasks along with the obtained reward).
Args:
save_path (str): Path containing logs
'''
with open(osp.join(save_path, 'env_params_save.pkl'), "rb") as file:
teacher_dict = pickle.load(file)
return teacher_dict
def get_training_test_size(teacher_dict):
'''
Calculate size of test set used during training.
Args:
teacher_dict (dict): Dictionary of loaded logs.
'''
param_to_count = teacher_dict["env_params_test"][0]
nb_of_epochs = 0
for param in teacher_dict["env_params_test"]:
if (param_to_count == param).all():
nb_of_epochs += 1
return int(len(teacher_dict["env_params_test"]) / nb_of_epochs)
def load_training_test_set(save_path, order_by_best_rewards=None):
'''
Load test set used during training.
Args:
save_path (str): Path containing logs
order_by_best_rewards (str): If None => Do not order test set.
If True => Order test set using rewards obtained from greatest to lowest
If False => Order test set using rewards obtained from lowest to greatest
Returns:
list of tasks and list associated rewards
'''
### Get last training test episodes and sort them by total reward
teacher_dict = load_env_params(save_path)
test_set_size = get_training_test_size(teacher_dict)
test_params_to_use = teacher_dict["env_params_test"][-test_set_size:] # nth last
test_rewards_to_use = teacher_dict["env_test_rewards"][-test_set_size:]
if order_by_best_rewards is not None:
print("Getting test set tasks ordered by last return from {} to {} ..."
.format("greatest" if order_by_best_rewards else "lowest",
"lowest" if order_by_best_rewards else "greatest"))
sorted_indexes_of_test_episodes = sorted(range(test_set_size),
key=lambda k: test_rewards_to_use[k],
reverse=order_by_best_rewards) # Sort with best results first
else:
print("Getting test set tasks as defined...")
sorted_indexes_of_test_episodes = range(test_set_size)
teacher_param_env_bounds = OrderedDict(teacher_dict["env_param_bounds"])
env_params_list = [param_vec_to_param_dict(teacher_param_env_bounds, test_params_to_use[i])
for i in sorted_indexes_of_test_episodes]
associated_rewards_list = [test_rewards_to_use[i] for i in sorted_indexes_of_test_episodes]
return env_params_list, associated_rewards_list
def load_fixed_test_set(save_path, test_set_name):
'''
Load a test set from a file.
Args:
save_path (str): Path containing test sets
test_set_name (str): Name of the file containing the test set (do not add the extension)
Returns:
list of tasks
'''
teacher_dict = load_env_params(save_path)
teacher_param_env_bounds = OrderedDict(teacher_dict["env_param_bounds"])
test_param_vec = np.array(pickle.load(open("TeachMyAgent/teachers/test_sets/" + test_set_name + ".pkl", "rb")))
return [param_vec_to_param_dict(teacher_param_env_bounds, vec) for vec in test_param_vec]
def load_env(save_path, load_test_env=False):
'''
Load saved environment.
Args:
save_path (str): Path containing logs
load_test_env (str): Name of the file containing the test set (do not add the extension)
Returns:
loaded environment
'''
try:
filename = osp.join(save_path, 'vars.pkl')
state = joblib.load(filename)
if load_test_env:
env = state['test_env']
else:
env = state['env']
except Exception as err:
print("Unable to load envs : {}".format(err))
env = None
return env
def load_vectorized_env(save_path, env):
try:
filename = osp.join(save_path, 'vars.pkl')
state = joblib.load(filename)
env.__load_rms__(state["ob_rms"], state["ret_rms"])
except Exception as err:
print("Unable to load Running Mean Stds : {}".format(err))
def run_policy(env, get_action, env_params_list, max_ep_len=None, episode_id=0, record=False, recording_path=None,
no_render=False, use_baselines=False):
'''
Run an episode of a trained policy.
Args:
env: Environment
get_action: Policy function
env_params_list: List of tasks among one must be loaded
max_ep_len: Maximum number of steps allowed in the episode
episode_id: Id of the episode to load in `env_params_list`
record: Whether a video of the episode should be recorded
recording_path: Path on which the video must be saved
no_render: Whether the episode must be ran without a frame rendering it
use_baselines: Whether the policy was trained using OpenAI Baselines
'''
if record:
if os.name == "nt":
full_path = os.path.join(pathlib.Path().absolute(), recording_path)
full_path_len = len(full_path)
nb_char_to_remove = full_path_len - 245
if nb_char_to_remove > 0:
recording_path = recording_path[:-nb_char_to_remove]
video_recorder = VideoRecorder(env, recording_path + "_ep" + str(episode_id) + ".mp4", enabled=True)
if use_baselines:
env.get_raw_env().set_environment(**env_params_list[episode_id])
else:
env.set_environment(**env_params_list[episode_id])
if use_baselines:
_, o = env.reset()
else:
o = env.reset()
r, d, ep_ret, ep_len, n = 0, False, 0, 0, 0
while True:
if record and video_recorder.enabled:
video_recorder.capture_frame()
if not record and not no_render:
env.render()
time.sleep(1e-3)
a = get_action(o)
o, r, d, i = env.step(a)
if use_baselines:
ep_ret += i[0]["original_reward"][0]
else:
ep_ret += r
ep_len += 1
if d or (ep_len == max_ep_len):
print('Episode %d \t EpRet %.3f \t EpLen %d'%(episode_id, ep_ret, ep_len))
if record and video_recorder.enabled:
video_recorder.close()
video_recorder.enabled = False
break
return ep_ret
def main(args):
'''
Test a learned policy on tasks.
Args:
args: arguments defining what has to be run
'''
if args.fixed_test_set is None:
# training_config = load_training_infos(args.fpath)
# nb_test_episodes_during_training = training_config["num_test_episodes"] \
# if "num_test_episodes" in training_config \
# else training_config["nb_test_episodes"]
test_set_params, _ = load_training_test_set(args.fpath, args.bests)
else:
test_set_params = load_fixed_test_set(args.fpath, args.fixed_test_set)
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
student_type = get_student_type(args.fpath)
env = None
if args.load_env:
env = load_env(args.fpath, args.use_test_env is not None)
if env is None:
env_fn, _, _, _ = EnvironmentArgsHandler.get_object_from_arguments(args)
if student_type == "spinup":
env = env_fn()
elif student_type == "baselines":
env, _ = create_custom_vec_normalized_envs(env_fn)
load_vectorized_env(args.fpath, env)
if student_type == 'spinup':
get_action = spinup_load_policy(args.fpath,
args.itr if args.itr >= 0 else 'last',
args.deterministic)
env._SET_RENDERING_VIEWPORT_SIZE(600, 400)
elif student_type == 'baselines':
ac_kwargs = dict()
ac_kwargs['hidden_sizes'] = [int(layer) for layer in args.hidden_sizes.split("/")]
nbatch_train = args.nb_env_steps * 1e6 // int(args.sample_size//args.batch_size)
model = get_baselines_model(network=args.network, nbatch_train=nbatch_train, ob_space=env.observation_space,
ac_space=env.action_space, env=env, nsteps=args.sample_size, ent_coef=args.ent_coef,
vf_coef=args.vf_coef, hidden_sizes=ac_kwargs['hidden_sizes'])
last_checkpoint = get_baselines_last_checkpoint(args.fpath + "/checkpoints/")
model.load(args.fpath + "/checkpoints/" + last_checkpoint)
# Careful : The recurrent version is not implemented here yet
get_action = lambda o: model.step(o)[0]
env.get_raw_env()._SET_RENDERING_VIEWPORT_SIZE(600, 400)
else:
raise Exception('Unknown student type.')
if args.episode_ids == "-1":
print("Testing the policy on the whole test set...")
episodes = [i for i in range(len(test_set_params))]
else:
episodes = [int(id) for id in args.episode_ids.split("/")]
rewards = []
for episode_id in episodes:
r = run_policy(env, get_action, test_set_params, args.len, episode_id, args.record, args.recording_path,
args.norender, use_baselines=student_type == 'baselines')
rewards.append(r)
env.close()
return rewards
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_parser():
'''
Define arguments that can be used when testing a policy.
'''
parser = argparse.ArgumentParser()
parser.add_argument('--fpath', type=str)
parser.add_argument('--len', '-l', type=int, default=0)
parser.add_argument('--norender', '-nr', action='store_true')
parser.add_argument('--itr', '-i', type=int, default=-1)
parser.add_argument('--deterministic', '-d', action='store_true')
parser.add_argument('--episode_ids', '-id', type=str, default="0")
parser.add_argument('--bests', type=str2bool, default=None)
parser.add_argument('--fixed_test_set', '-ts', type=str, default=None)
parser.add_argument('--load_env', action='store_true')
parser.add_argument('--use_test_env', action='store_true')
parser.add_argument('--record', type=str2bool, default=False)
parser.add_argument('--recording_path', type=str, default=None)
EnvironmentArgsHandler.set_parser_arguments(parser)
StudentArgsHandler.set_parser_arguments(parser)
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
|
495390
|
from django.contrib import admin
from .models import ExtendedBacker
admin.site.register(ExtendedBacker)
|
495409
|
import sys
if sys.version[0] == 3:
TimeoutError = TimeoutError
else:
TimeoutError = OSError
class Timeout(TimeoutError):
"""Raised when the lock could not be acquired in *timeout* seconds."""
def __init__(self, lock_file):
#: The path of the file lock.
self.lock_file = lock_file
def __str__(self):
return "The file lock '{}' could not be acquired.".format(self.lock_file)
__all__ = [
"Timeout",
]
|
495414
|
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import os
import os.path as osp
import numpy as np
import json
import provider
from sklearn.model_selection import train_test_split
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TRAIN_FILES_MODELNET = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES_MODELNET = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
MODELNET10_TRAIN_FILE = 'data/ModelNet10/trainShuffled_Relabel.h5'
MODELNET10_TEST_FILE = 'data/ModelNet10/testShuffled_Relabel.h5'
CHAIR_PATH = 'data/Chair'
KEYPOINT_CHAIR_PATH = 'data/Chair/keypts_chair.mat'
CHAIR_FILES = os.listdir(CHAIR_PATH)
TRAIN_CHAIR_FILES = [osp.join(CHAIR_PATH,f) for f in CHAIR_FILES if 'train' in f]
VAL_CHAIR_FILES = [osp.join(CHAIR_PATH,f) for f in CHAIR_FILES if 'val' in f]
TEST_CHAIR_FILES = [osp.join(CHAIR_PATH,f) for f in CHAIR_FILES if 'test' in f]
KEYPOINTNET_PATH = "/media/tianxing/Samsung 1T/ShapeNetCore/"
def naive_read_pcd(path):
lines = open(path, 'r').readlines()
idx = -1
for i, line in enumerate(lines):
if line.startswith('DATA ascii'):
idx = i + 1
break
lines = lines[idx:]
lines = [line.rstrip().split(' ') for line in lines]
data = np.asarray(lines)
pc = np.array(data[:, :3], dtype=np.float)
return pc
def get_pointcloud(dataset, NUM_POINT=2048, shuffle=True):
"""
Load the dataset into memory
"""
if dataset == 'modelnet':
train_file_idxs = np.arange(0, len(TRAIN_FILES_MODELNET))
data_train = []
label_train = []
for fn in range(len(TRAIN_FILES_MODELNET)):
print('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TRAIN_FILES_MODELNET[fn])
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
data_train.append(current_data)
label_train.append(current_label)
result_train = np.vstack(data_train)
label_train = np.concatenate(label_train, axis=None)
if shuffle:
X_train, y_train, _ = provider.shuffle_data(result_train, np.squeeze(label_train))
else:
X_train, y_train = result_train, np.squeeze(label_train)
data_test = []
label_test = []
for fn in range(len(TEST_FILES_MODELNET)):
print('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TEST_FILES_MODELNET[fn])
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
data_test.append(current_data)
label_test.append(current_label)
result_test = np.vstack(data_test)
label_test = np.concatenate(label_test, axis=None)
if shuffle:
X_test, y_test, _ = provider.shuffle_data(result_test, np.squeeze(label_test))
else:
X_test, y_test = result_test, np.squeeze(label_test)
elif dataset == 'shapenet':
shapenet_data, shapenet_label = provider.get_shapenet_data()
shapenet_data = shapenet_data[:,0:NUM_POINT,:]
X_train, X_test, y_train, y_test = train_test_split(shapenet_data, shapenet_label, test_size=0.2, random_state=42, shuffle=shuffle)
elif dataset == 'shapenet_chair':
shapenet_data, shapenet_label = provider.get_shapenet_data()
shapenet_data = shapenet_data[:,0:NUM_POINT,:]
shapenet_data, shapenet_label = shapenet_data[shapenet_label==17], shapenet_label[shapenet_label==17]
X_train, X_test, y_train, y_test = train_test_split(shapenet_data, shapenet_label, test_size=0.2, random_state=42, shuffle=shuffle)
elif dataset == 'modelnet10':
current_data, current_label = provider.loadDataFile(MODELNET10_TRAIN_FILE)
current_data = current_data[:,0:NUM_POINT,:]
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
X_train, y_train = current_data, current_label
current_data, current_label = provider.loadDataFile(MODELNET10_TEST_FILE)
current_data = current_data[:,0:NUM_POINT,:]
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
X_test, y_test = current_data, current_label
elif dataset == 'keypoint':
current_data, current_label = provider.load_mat_keypts(TRAIN_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_train, y_train = current_data, current_label
current_data, current_label = provider.load_mat_keypts(TEST_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]):
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_test, y_test = current_data, current_label
elif dataset == 'keypoint_10class':
current_data, current_label = provider.load_mat_keypts(TRAIN_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)
current_label[:, -10:] = np.arange(1, 11)
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_train, y_train = current_data, current_label
current_data, current_label = provider.load_mat_keypts(TEST_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)
current_label[:, -10:] = np.arange(1, 11)
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]):
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_test, y_test = current_data, current_label
elif dataset == "keypointnet":
json_path = osp.join(KEYPOINTNET_PATH, "annotations/all.json")
annots = json.load(open(json_path))
X = []
y = []
for annot in annots:
class_id = annot["class_id"]
model_id = annot["model_id"]
kpts = []
for kpt in annot["keypoints"]:
kpts.append(kpt["xyz"])
pcd_path = osp.join(KEYPOINTNET_PATH, f"pcds/{class_id}/{model_id}.pcd")
if os.path.exists(pcd_path):
pcd = naive_read_pcd(pcd_path)
pcd = pcd[0:NUM_POINT, :]
else:
continue
if len(kpts) != 10:
continue
pcd = np.concatenate((pcd[:-10], kpts))
label = np.zeros(NUM_POINT-10)
label = np.concatenate((label, np.ones(10)))
X.append(pcd)
y.append(label)
current_data = np.array(X)
current_label = np.array(y)
if False and shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_train, X_test, y_train, y_test = train_test_split(current_data, current_label, test_size=0.2, random_state=42, shuffle=shuffle)
else:
raise NotImplementedError()
print(f'Dataset name: {dataset}')
print(f'X_train: {X_train.shape}')
print(f'X_test: {X_test.shape}')
print(f'y_train: {y_train.shape}')
print(f'y_test: {y_test.shape}')
return X_train, X_test, y_train, y_test
# debug
if __name__ == "__main__":
X_train, X_test, y_train, y_test = get_pointcloud('keypointnet')
print(X_train)
print(np.sum(y_train))
|
495423
|
from drawille import Canvas
from timeit import timeit
c = Canvas()
frames = 1000 * 10
sizes = ((0, 0),
(10, 10),
(20, 20),
(20, 40),
(40, 20),
(40, 40),
(100, 100))
for x, y in sizes:
c.set(0, 0)
for i in range(y):
c.set(x, i)
r = timeit(c.frame, number=frames)
print('{0}x{1}\t{2}'.format(x, y, r))
c.clear()
|
495452
|
from unittest import (
TestCase,
main
)
from source import (
CSTR,
CINT,
CConst
)
from common import (
def_tokens,
pypath,
ee
)
with pypath("..ply"):
from ply.lex import lex
# Defining a lexer affects `ply.yacc.LRParser.parse` invocations without
# explicit `lexer` argument value because a new lexer overwrites existing lexer
# inside `ply` (it's a global reference).
def t_ANY(t):
".+"
return t
t_error = lambda _ : None
def_tokens(globals())
lex()
TEST_CCONST_VERBOSE = ee("TEST_CCONST_VERBOSE")
class CConstTest(object):
if TEST_CCONST_VERBOSE:
def setUp(self):
print("== " + self.data + " ==")
def test(self):
self.parsed = CConst.parse(self.data)
self.back = str(self.parsed)
self.assertIsInstance(self.parsed, self.expected_type)
self.assertEqual(self.back, self.data)
if TEST_CCONST_VERBOSE:
def tearDown(self):
print(self.back, repr(self.parsed))
def test_repr(self):
parsed = CConst.parse(self.data)
representation = repr(parsed)
evaluated_repr = eval(representation)
self.assertEqual(parsed, evaluated_repr)
class AStringTest(CConstTest, TestCase):
data = """an arbitrary
string with new line and quoted "@" and Windows\r\nnewline"""
expected_type = CSTR
class CINTTest(CConstTest):
expected_type = CINT
class RegularIntTest(CINTTest):
""" `CINT` assumes decimal representation of `int` without leading zeros to
be regular. A regular CINT is assumed to be equal to a same valued `int`
ignoring the absence of an appearance information.
"""
def test(self):
super(RegularIntTest, self).test()
self.assertEqual(self.parsed, self.parsed.v)
class PositiveHexTest(CINTTest, TestCase):
data = "0x1F000"
class NegativeHexTest(CINTTest, TestCase):
data = "-0xDEADBEEF"
class PositiveDecimalTest1(RegularIntTest, TestCase):
data = "1"
class PositiveDecimalTest2(RegularIntTest, TestCase):
data = "1223235324"
class NegativeDecimalTest(RegularIntTest, TestCase):
data = "-1"
class LeadingZerosHexTest1(CINTTest, TestCase):
data = "0x0001"
class LeadingZerosHexTest2(CINTTest, TestCase):
data = "0x000"
class LeadingZerosBinTest1(CINTTest, TestCase):
data = "0b01011010101"
class LeadingZerosBinTest2(CINTTest, TestCase):
data = "0b00000"
class BinZeroTest(CINTTest, TestCase):
data = "0b0"
class DecimalZeroTest(RegularIntTest, TestCase):
data = "0"
class EmptyCINTTest(TestCase):
def test(self):
self.assertRaises(ValueError, lambda : CINT(""))
class HexZeroTest(CINTTest, TestCase):
data = "0x0"
class TestCINTParser(TestCase):
def test(self):
self.assertEqual(CINT("0"), 0)
def test_0x18(self):
self.assertEqual(CINT("0x18"), CINT(24, 16, 2))
def test_0b11100111(self):
# If difference of digits amount in two CINT does not affects
# string form, then it is negligible.
# Right CINT requires at least 0 digits, It does not result in leading
# zeros. Left CINT requires 8 digits, according to original string
# form. It has no leading zeros, apparently. So, CINTs are considered
# equal.
self.assertEqual(CINT("0b11100111"), CINT(231, 2, 0))
# Right CINT has 1 leading zero, CINTs are not equal.
self.assertNotEqual(CINT("0b11100111"), CINT(231, 2, 9))
if __name__ == "__main__":
main()
|
495508
|
import logging
# from homeassistant.const import 'serial_port', 'config_file', 'code'
# found advice in the homeassistant creating components manual
# https://home-assistant.io/developers/creating_components/
# Import the device class from the component that you want to support
from homeassistant.components.cover import ATTR_POSITION, CoverEntity
from .const import DOMAIN
# Home Assistant depends on 3rd party packages for API specific code.
_LOGGER = logging.getLogger(__name__)
SHUTTER_IDS = {"40", "41", "42", "47", "49", "4b", "4c", "4e", "70", "61"}
def is_shutter(id):
return any([id.startswith(i) for i in SHUTTER_IDS])
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Awesome Light platform."""
stick = hass.data[DOMAIN]['stick']
to_add = [DuofernShutter(device['id'], device['name'], stick, hass) for device in stick.config['devices'] if
is_shutter(device['id']) and not device['id'] in hass.data[DOMAIN]['devices'].keys()]
add_devices(to_add)
class DuofernShutter(CoverEntity):
"""Representation of Duofern cover type device."""
def __init__(self, id, desc, stick, hass):
"""Initialize the shutter."""
self._id = id
self._name = desc
self._state = None
self._stick = stick
hass.data[DOMAIN]['devices'][id] = self
@property
def name(self):
return self._name
@property
def current_cover_position(self):
"""Return the display name of this cover."""
return self._state
@property
def is_closed(self):
"""Return true if cover is close."""
return self._state == 0
@property
def should_poll(self):
"""Whether this entity should be polled or uses subscriptions"""
return False # TODO: Add config option for subscriptions over polling
@property
def unique_id(self):
return self._id
def open_cover(self):
"""roll up cover"""
self._stick.command(self._id, "up")
def close_cover(self):
"""close cover"""
self._stick.command(self._id, "down")
def stop_cover(self):
"""stop cover"""
self._stick.command(self._id, "stop")
def set_cover_position(self, **kwargs):
"""set position (100-position to make the default intuitive: 0%=closed, 100%=open"""
position = kwargs.get(ATTR_POSITION)
self._stick.command(self._id, "position", 100 - position)
def update(self):
"""Fetch new state data for this cover.
This is the only method that should fetch new data for Home Assistant.
(no new data needs to be fetched, the stick updates itsself in a thread)
(not the best style for homeassistant, I know. I'll port to asyncio if I find the time)
"""
try:
self._state = 100 - self._stick.duofern_parser.modules['by_code'][self._id]['position']
except KeyError:
self._state = None
|
495531
|
import pytest
@pytest.fixture(autouse=True)
def enable_db_access_for_all_tests(db):
pass
# https://blog.jerrycodes.com/no-http-requests/
@pytest.fixture(autouse=True)
def no_http_requests(monkeypatch):
def urlopen_mock(self, method, url, *args, **kwargs):
raise RuntimeError(
f"The test was about to {method} {self.scheme}://{self.host}{url}"
)
monkeypatch.setattr(
"urllib3.connectionpool.HTTPConnectionPool.urlopen", urlopen_mock
)
|
495597
|
import uuid
from typing import Optional, Union
import pyrsistent
from aioredis.client import Redis
from pyrsistent.typing import PVector
from .data import Todo
class Storage:
def __init__(self, *, redis: Redis, data_key: str) -> None:
self.redis = redis
self.data_key = data_key
def build_item_key(self, mixed: Union[str, Todo, uuid.UUID]) -> str:
uid = mixed.uid if isinstance(mixed, Todo) else mixed
return ":".join((self.data_key, str(uid)))
async def create_todo(self, todo: Todo) -> None:
await self.redis.rpush(self.data_key, str(todo.uid))
await self.save_todo(todo)
async def delete_todo(self, todo: Todo) -> int:
redis = self.redis
await redis.lrem(self.data_key, 0, str(todo.uid))
return await redis.delete(self.build_item_key(todo)) # type: ignore
async def delete_todos(self) -> int:
redis = self.redis
counter = 0
for key in await redis.lrange(self.data_key, 0, -1):
counter += await redis.delete(self.build_item_key(key))
await redis.delete(self.data_key)
return counter
async def get_todo(self, uid: uuid.UUID) -> Optional[Todo]:
data = await self.redis.hgetall(self.build_item_key(uid))
if not data:
return None
return Todo.from_storage(data, uid=uid)
async def list_todos(self) -> PVector[Todo]:
redis = self.redis
data = []
for key in await redis.lrange(self.data_key, 0, -1):
uid = uuid.UUID(key)
item_key = self.build_item_key(uid)
data.append(
Todo.from_storage(await redis.hgetall(item_key), uid=uid)
)
return pyrsistent.v(*data)
async def save_todo(self, todo: Todo) -> None:
await self.redis.hset(
self.build_item_key(todo), mapping=todo.to_storage()
)
|
495614
|
import numpy as np
from gym.spaces import Box
from collections import deque
# Implementation of SAGG-RIAC
class SAGG_RIAC():
def __init__(self, min, max, continuous_competence=False):
assert len(min) == len(max)
self.maxlen = 200
self.regions = [[deque(maxlen=self.maxlen + 1), deque(maxlen=self.maxlen + 1)]]
self.region_bounds = [Box(min, max, dtype=np.float32)]
self.interest = [0.]
self.probas = [1.]
self.nb_dims = len(min)
self.window_cp = 100
self.temperature = 20
self.nb_split_attempts = 50
self.continuous_competence = continuous_competence
self.max_difference = 0.3
self.init_size = max - min
def update(self, goals, binary_competence, continuous_competence=None):
if len(goals) > 0:
new_split = False
all_order = None
regions = [None] * len(goals)
for i, goal in enumerate(goals):
for j, rb in enumerate(self.region_bounds):
if rb.contains(goal):
regions[i] = j
break
if self.continuous_competence:
c_or_cps = continuous_competence
else:
c_or_cps = binary_competence
# add new outcomes and goals to regions
for reg, c_or_cp, goal in zip(regions, c_or_cps, goals):
self.regions[reg][0].append(c_or_cp)
self.regions[reg][1].append(goal)
# check if need to split
ind_split = []
new_bounds = []
new_sub_regions = []
for reg in range(self.nb_regions):
if len(self.regions[reg][0]) > self.maxlen:
# try nb_split_attempts splits
best_split_score = 0
best_abs_interest_diff = 0
best_bounds = None
best_sub_regions = None
is_split = False
for i in range(self.nb_split_attempts):
sub_reg1 = [deque(), deque()]
sub_reg2 = [deque(), deque()]
# repeat until the two sub regions contain at least 1/10 of the mother region
while len(sub_reg1[0]) < self.maxlen / 4 or len(sub_reg2[0]) < self.maxlen / 4:
# decide on dimension
dim = np.random.choice(range(self.nb_dims))
threshold = self.region_bounds[reg].sample()[dim]
bounds1 = Box(self.region_bounds[reg].low, self.region_bounds[reg].high, dtype=np.float32)
bounds1.high[dim] = threshold
bounds2 = Box(self.region_bounds[reg].low, self.region_bounds[reg].high, dtype=np.float32)
bounds2.low[dim] = threshold
bounds = [bounds1, bounds2]
valid_bounds = True
if np.any(bounds1.high - bounds1.low < self.init_size / 5):
valid_bounds = False
if np.any(bounds2.high - bounds2.low < self.init_size / 5):
valid_bounds = valid_bounds and False
# perform split in sub regions
sub_reg1 = [deque(), deque()]
sub_reg2 = [deque(), deque()]
for i, goal in enumerate(self.regions[reg][1]):
if bounds1.contains(goal):
sub_reg1[1].append(goal)
sub_reg1[0].append(self.regions[reg][0][i])
else:
sub_reg2[1].append(goal)
sub_reg2[0].append(self.regions[reg][0][i])
sub_regions = [sub_reg1, sub_reg2]
# compute interest
interest = np.zeros([2])
for i in range(2):
if self.continuous_competence:
cp_window = min(len(sub_regions[i][0]), self.window_cp)
cp = np.abs(np.array(sub_regions[i][0])[-cp_window].mean())
else:
cp_window = min(len(sub_regions[i][0]) // 2, self.window_cp)
cp = np.array(sub_regions[i][0])[- 2 * cp_window: - cp_window].mean() - np.array(sub_regions[i][0])[- cp_window:].mean()
interest[i] = np.abs(cp)
# compute score
split_score = len(sub_reg1) * len(sub_reg2) * np.abs(interest[0] - interest[1])
if split_score >= best_split_score and np.abs(interest[0] - interest[1]) >= self.max_difference / 2 and valid_bounds:
best_abs_interest_diff = np.abs(interest[0] - interest[1])
print(interest)
best_split_score = split_score
best_sub_regions = sub_regions
best_bounds = bounds
is_split = True
if interest[0] >= interest[1]:
order = [1, -1]
else:
order = [-1, 1]
if is_split:
ind_split.append(reg)
if best_abs_interest_diff > self.max_difference:
self.max_difference = best_abs_interest_diff
else:
self.regions[reg][0] = deque(np.array(self.regions[reg][0])[- int (3 * len(self.regions[reg][0]) / 4):], maxlen=self.maxlen + 1)
self.regions[reg][1] = deque(np.array(self.regions[reg][1])[- int(3 * len(self.regions[reg][1]) / 4):], maxlen=self.maxlen + 1)
new_bounds.append(best_bounds)
new_sub_regions.append(best_sub_regions)
# implement splits
for i, reg in enumerate(ind_split):
all_order = [0] * self.nb_regions
all_order.pop(reg)
all_order.insert(reg, order[0])
all_order.insert(reg, order[1])
new_split = True
self.region_bounds.pop(reg)
self.region_bounds.insert(reg, new_bounds[i][0])
self.region_bounds.insert(reg, new_bounds[i][1])
self.regions.pop(reg)
self.regions.insert(reg, new_sub_regions[i][0])
self.regions.insert(reg, new_sub_regions[i][1])
self.interest.pop(reg)
self.interest.insert(reg, 0)
self.interest.insert(reg, 0)
self.probas.pop(reg)
self.probas.insert(reg, 0)
self.probas.insert(reg, 0)
# recompute interest
for i in range(self.nb_regions):
if len(self.regions[i][0]) > 10:
if self.continuous_competence:
cp_window = min(len(self.regions[i][0]), self.window_cp)
cp = np.abs(np.array(self.regions[i][0])[-cp_window].mean())
else:
cp_window = min(len(self.regions[i][0]) // 2, self.window_cp)
cp = np.array(self.regions[i][0])[- 2 * cp_window: - cp_window].mean() - np.array(self.regions[i][0])[- cp_window:].mean()
else:
cp = 0
self.interest[i] = np.abs(cp)
exp_int = np.exp(self.temperature * np.array(self.interest))
probas = exp_int / exp_int.sum()
self.probas = probas.tolist()
assert len(self.probas) == len(self.regions)
return new_split, all_order
else:
return False, None
def sample_goal(self):
# sample region
if np.random.rand() < 0.2:
region_id = np.random.choice(range(self.nb_regions))
else:
region_id = np.random.choice(range(self.nb_regions), p=np.array(self.probas))
# sample goal
goal = self.region_bounds[region_id].sample()
return goal
@property
def nb_regions(self):
return len(self.regions)
@property
def get_regions(self):
return self.region_bounds
|
495706
|
import os
import sys
DATA_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.dirname(DATA_DIR)))
from selenium import webdriver
from Screenshot import Screenshot_Clipping
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
def test_full_screenshot():
ob = Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome(ChromeDriverManager(log_level=0).install())
url = "https://github.com/sam4u3/Selenium_Screenshot/tree/master/test"
driver.get(url)
img_url = ob.full_Screenshot(driver, save_path=r'.',image_name='Myimage.png',is_load_at_runtime=True,load_wait_time=3)
os.remove(img_url)
driver.close()
driver.quit()
def test_element_screenshot():
ob = Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome(ChromeDriverManager(log_level=0).install())
url = "https://github.com/PyWizards/Selenium_Screenshot"
driver.get(url)
element = driver.find_element_by_class_name('pagehead-actions')
img_url = ob.get_element(driver, element, r'.')
os.remove(img_url)
driver.close()
driver.quit()
def test_hide_element():
ob = Screenshot_Clipping.Screenshot()
driver = webdriver.Chrome(ChromeDriverManager(log_level=0).install())
url = "https://github.com/sam4u3"
driver.get(url)
hide_elements = ['class=avatar width-full height-full avatar-before-user-status'] # Use full class name
img_url = ob.full_Screenshot(driver, save_path=r'.', elements=hide_elements,
image_name='Myimage.png')
os.remove(img_url)
driver.close()
driver.quit()
|
495728
|
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
def convert(ss):
d = {}
return [d.setdefault(c, len(d)) for c in ss]
return convert(s) == convert(t)
|
495740
|
from django.urls import path
from .views import ResultListView, create_result, edit_results
urlpatterns = [
path("create/", create_result, name="create-result"),
path("edit-results/", edit_results, name="edit-results"),
path("view/all", ResultListView.as_view(), name="view-results"),
]
|
495783
|
EVENT_MEET_GUIDE = 64
EVENT_DESOLATED_CABIN = 65
EVENT_TRAINER_K1_PRE_1 = 66
EVENT_ITEMBALL_K1_PRE_1 = 67
EVENT_TRAINER_K1_PRE_2 = 68
EVENT_TRAINER_K1_PRE_3 = 69
EVENT_TRAINER_K1_PRE_4 = 70
EVENT_TUTORIAL_WILLOW = 71
EVENT_K1_PRE_COMPLETE = 72
EVENT_ITEMBALL_K1_POST_1 = 73
EVENT_SHOWED_METAPOD = 74
EVENT_TRAINER_K1_POST_1 = 75
EVENT_ITEMBALL_K1_POST_2 = 76
EVENT_ITEMBALL_K1_POST_3 = 77
EVENT_K1_POST_ANCIENT_PUZZLE_SOLVED = 78
EVENT_TRAINER_K1_POST_2 = 79
EVENT_K1_POST_CORRUPTION_CLEANSED = 80
EVENT_TRAINER_K1_POST_3 = 81
EVENT_TRAINER_K2_PRE_1 = 82
EVENT_TRAINER_K2_PRE_2 = 83
EVENT_TRAINER_K2_PRE_3 = 84
EVENT_TRAINER_K2_PRE_4 = 85
EVENT_ITEMBALL_K2_PRE_1 = 86
EVENT_K2_PRE_COMPLETE = 87
EVENT_K2_POST_ANCIENT_PUZZLE_SOLVED = 88
EVENT_ITEMBALL_K2_POST_1 = 89
EVENT_ITEMBALL_K2_POST_2 = 90
EVENT_TRAINER_K2_POST_1 = 91
EVENT_TRAINER_K2_POST_2 = 92
EVENT_SPOKE_TO_HEYV = 93
EVENT_RECEIVED_CUT = 94
EVENT_ITEMBALL_K2_POST_3 = 95
EVENT_TRAINER_K3_PRE_1 = 96
EVENT_TRAINER_K3_PRE_2 = 97
EVENT_TRAINER_K3_PRE_3 = 98
EVENT_K3_PRE_GIRL_EVENT = 99
EVENT_TRAINER_K3_PRE_4 = 100
EVENT_TRAINER_K3_PRE_5 = 101
EVENT_TRAINER_K3_PRE_6 = 102
EVENT_TRAINER_K3_PRE_7 = 103
EVENT_TRAINER_K3_PRE_8 = 104
EVENT_TRAINER_K3_PRE_9 = 105
EVENT_K3_PRE_GIRL_SAVED = 106
EVENT_K3_PRE_COMPLETE = 107
EVENT_K3_VISITED_DELIRIA = 108
EVENT_K3_DELIRIA_LEFTOVERS = 109
EVENT_ITEMBALL_K3_POST_1 = 110
EVENT_ITEMBALL_K3_POST_2 = 111
EVENT_ITEMBALL_K3_POST_3 = 112
EVENT_ITEMBALL_K3_POST_4 = 113
EVENT_ITEMBALL_K3_POST_5 = 114
EVENT_ITEMBALL_K3_POST_6 = 115
EVENT_ITEMBALL_K3_POST_7 = 116
EVENT_ITEMBALL_K3_POST_8 = 117
EVENT_ITEMBALL_K3_POST_9 = 118
EVENT_K3_LAYLAH_BLESSING = 119
EVENT_YAHAHA_FIRST = 120
EVENT_YAHAHA_INITIATED = 121
EVENT_YAHAHA_SECOND = 122
EVENT_YAHAHA_THIRD = 123
EVENT_YAHAHA_FOURTH = 124
EVENT_YAHAHA_FIFTH = 125
EVENT_K4_SECRET_PATH = 126
EVENT_TRAINER_K4_PRE_1 = 127
EVENT_TRAINER_K4_PRE_2 = 128
EVENT_ITEMBALL_K4_PRE_1 = 129
EVENT_K4_STOCK_MARKET_PROFIT = 130
EVENT_K4_BICY_POCKET_FUCK = 131
EVENT_TRAINER_K4_PRE_3 = 132
EVENT_TRAINER_K4_PRE_4 = 133
EVENT_TRAINER_K4_PRE_5 = 134
EVENT_ITEMBALL_K4_PRE_2 = 135
EVENT_ITEMBALL_K4_PRE_3 = 136
EVENT_K4_PRE_COMPLETE = 137
EVENT_K4_FUN_VALUE_CHANGED = 138
EVENT_ITEMBALL_K4_POST_1 = 139
EVENT_ITEMBALL_K4_POST_2 = 140
EVENT_K4_POST_ANCIENT_PUZZLE_SOLVED = 141
EVENT_K5_PRE_NAME_REGISTERED = 142
EVENT_TRAINER_K5_PRE_1 = 143
EVENT_TRAINER_K5_PRE_2 = 144
EVENT_K5_PRE_ROCKETS_DEFEATED = 145
EVENT_TRAINER_K5_PRE_3 = 146
EVENT_TRAINER_K5_ROCKET_1 = 147
EVENT_TRAINER_K5_ROCKET_2 = 148
EVENT_TRAINER_K5_ROCKET_3 = 149
EVENT_TRAINER_K5_ROCKET_4 = 150
EVENT_TRAINER_K5_ROCKET_5 = 151
EVENT_TRAINER_K5_ROCKET_6 = 152
EVENT_ITEMBALL_K5_PRE_1 = 153
EVENT_ITEMBALL_K5_PRE_2 = 154
EVENT_ITEMBALL_K5_PRE_3 = 155
EVENT_ITEMBALL_K5_PRE_4 = 156
EVENT_K5_PRE_ROCKETS_DEFEATED_NEG = 157
EVENT_K5_PRE_COMPLETE = 158
EVENT_ITEMBALL_K5_POST_1 = 159
EVENT_K5_POST_PENTAKILL = 160
EVENT_TRAINER_K6_PRE_1 = 161
EVENT_TRAINER_K6_PRE_2 = 162
EVENT_TRAINER_K6_PRE_3 = 163
EVENT_ITEMBALL_K6_PRE_1 = 164
EVENT_K5_PRE_FLASH_HOUSE = 165
EVENT_K5_PRE_MATH_HOMEWORK = 166
EVENT_TRAINER_K6_PRE_4 = 167
EVENT_TRAINER_K6_PRE_5 = 168
EVENT_TRAINER_K6_PRE_6 = 169
EVENT_TRAINER_K6_PRE_7 = 170
EVENT_TRAINER_K6_PRE_8 = 171
EVENT_K6_PRE_COMPLETE = 172
EVENT_ITEMBALL_K6_POST_1 = 173
EVENT_ITEMBALL_K6_POST_2 = 174
EVENT_ITEMBALL_K6_POST_3 = 175
EVENT_ITEMBALL_K6_POST_4 = 176
EVENT_ITEMBALL_K6_POST_5 = 177
EVENT_ITEMBALL_K6_POST_6 = 178
EVENT_K6_POST_LOST_ITEM = 179
EVENT_K1_RADIO = 180
EVENT_K2_RADIO = 181
EVENT_K3_RADIO = 182
EVENT_K5_RADIO = 183
EVENT_K2_LAPTOP = 184
EVENT_ITEMBALL_FINAL_1 = 185
EVENT_ITEMBALL_FINAL_2 = 186
EVENT_ITEMBALL_FINAL_3 = 187
EVENT_ITEMBALL_FINAL_4 = 188
EVENT_ITEMBALL_FINAL_5 = 189
EVENT_ITEMBALL_FINAL_6 = 190
EVENT_ITEMBALL_FINAL_7 = 191
EVENT_MISSINGNO_DEFEATED = 192
|
495785
|
import logging
import threading
from datetime import datetime, timedelta
from threading import Thread
from apiclient import discovery
logger = logging.getLogger(__name__)
DELAY_SECS = 60 * 5
MAX_FOLDER_AGE_MINS = 60 # An hour
class DriveCleanup:
SCOPES = 'https://www.googleapis.com/auth/drive'
drive = None
stopped = False
def __init__(self, credentials):
self.drive = discovery.build(serviceName='drive', version='v3', credentials=credentials)
self.stopped_event = threading.Event()
def start(self):
def job():
logger.info("Scheduled drive cleanup job")
while not self.stopped:
self._delete_old_no_raise()
self.stopped_event.wait(DELAY_SECS)
Thread(target=job).start()
return self
def stop(self):
logger.info("Stopping drive cleanup job")
self.stopped = True
self.stopped_event.set()
def _delete_old_no_raise(self):
try:
self._delete_old()
except:
logger.exception('Failed to delete old drive files')
def _delete_old(self):
logger.info("Searching for old drive folders")
now = datetime.utcnow()
max_folder_modification = (now - timedelta(minutes=MAX_FOLDER_AGE_MINS)).isoformat("T")
query = "mimeType = 'application/vnd.google-apps.folder' and modifiedTime <= '{}'" \
.format(max_folder_modification)
results = self.drive.files().list(q=query, fields="files(id, name)").execute()
files = results.get('files', [])
for file in files:
file_id = file['id']
logger.info("Deleting old folder. id=" + file_id + ', name=' + file['name'])
self.drive.files().delete(fileId=file_id).execute()
|
495791
|
import os, sys
import os.path as osp
import argparse
import warnings
import time
import numpy as np
from utils import paired_transforms_tv04 as p_tr
from PIL import Image
from skimage.io import imsave
from skimage.util import img_as_ubyte
from skimage.transform import resize
import torch
from models.get_model import get_arch
from utils.model_saving_loading import load_model
from skimage.measure import regionprops
# argument parsing
parser = argparse.ArgumentParser()
required_named = parser.add_argument_group('required arguments')
parser.add_argument('--model_path', help='experiments/subfolder where checkpoint is', default='experiments/wnet_drive')
parser.add_argument('--im_path', help='path to image to be segmented', default=None)
parser.add_argument('--mask_path', help='path to FOv mask, will be computed if not provided', default=None)
parser.add_argument('--tta', type=str, default='from_preds', help='test-time augmentation (no/from_logits/from_preds)')
parser.add_argument('--bin_thresh', type=float, default='0.4196', help='binarizing threshold')
# im_size overrides config file
parser.add_argument('--im_size', help='delimited list input, could be 600,400', type=str, default='512')
parser.add_argument('--device', type=str, default='cpu', help='where to run the training code (e.g. "cpu" or "cuda:0") [default: %(default)s]')
parser.add_argument('--result_path', type=str, default=None, help='path to save prediction)')
from skimage import measure, draw
import numpy as np
from torchvision.transforms import Resize
from scipy import optimize
from skimage.filters import threshold_minimum
from skimage.measure import regionprops
from scipy.ndimage import binary_fill_holes
from skimage.color import rgb2hsv
from skimage.exposure import equalize_adapthist
def get_circ(binary):
# https://stackoverflow.com/a/28287741
image = binary.astype(int)
regions = measure.regionprops(image)
bubble = regions[0]
y0, x0 = bubble.centroid
r = bubble.major_axis_length / 2.
def cost(params):
x0, y0, r = params
coords = draw.circle(y0, x0, r, shape=image.shape)
template = np.zeros_like(image)
template[coords] = 1
return -np.sum(template == image)
x0, y0, r = optimize.fmin(cost, (x0, y0, r))
return x0, y0, r
def create_circular_mask(sh, center=None, radius=None):
# https://stackoverflow.com/a/44874588
h, w = sh
if center is None: # use the middle of the image
center = (int(w/2), int(h/2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w-center[0], h-center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
return mask
def get_fov(img):
im_s = img.size
if max(im_s) > 500:
img = Resize(500)(img)
with np.errstate(divide='ignore'):
im_v = equalize_adapthist(np.array(img))[:, :, 1]
# im_v = equalize_adapthist(rgb2hsv(np.array(img))[:, :, 2])
thresh = threshold_minimum(im_v)
binary = binary_fill_holes(im_v > thresh)
x0, y0, r = get_circ(binary)
fov = create_circular_mask(binary.shape, center=(x0, y0), radius=r)
return Resize(im_s[ : :-1])(Image.fromarray(fov))
def crop_to_fov(img, mask):
mask = np.array(mask).astype(int)
minr, minc, maxr, maxc = regionprops(mask)[0].bbox
im_crop = Image.fromarray(np.array(img)[minr:maxr, minc:maxc])
return im_crop, [minr, minc, maxr, maxc]
def flip_ud(tens):
return torch.flip(tens, dims=[1])
def flip_lr(tens):
return torch.flip(tens, dims=[2])
def flip_lrud(tens):
return torch.flip(tens, dims=[1, 2])
def create_pred(model, tens, mask, coords_crop, original_sz, bin_thresh, tta='no'):
act = torch.sigmoid if model.n_classes == 1 else torch.nn.Softmax(dim=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
logits = model(tens.unsqueeze(dim=0).to(device)).squeeze(dim=0)
pred = act(logits)
if tta!='no':
with torch.no_grad():
logits_lr = model(tens.flip(-1).unsqueeze(dim=0).to(device)).squeeze(dim=0).flip(-1)
logits_ud = model(tens.flip(-2).unsqueeze(dim=0).to(device)).squeeze(dim=0).flip(-2)
logits_lrud = model(tens.flip(-1).flip(-2).unsqueeze(dim=0).to(device)).squeeze(dim=0).flip(-1).flip(-2)
if tta == 'from_logits':
mean_logits = torch.mean(torch.stack([logits, logits_lr, logits_ud, logits_lrud]), dim=0)
pred = act(mean_logits)
elif tta == 'from_preds':
pred_lr = act(logits_lr)
pred_ud = act(logits_ud)
pred_lrud = act(logits_lrud)
pred = torch.mean(torch.stack([pred, pred_lr, pred_ud, pred_lrud]), dim=0)
else: raise NotImplementedError
pred = pred.detach().cpu().numpy()[-1] # this takes last channel in multi-class, ok for 2-class
# Orders: 0: NN, 1: Bilinear(default), 2: Biquadratic, 3: Bicubic, 4: Biquartic, 5: Biquintic
pred = resize(pred, output_shape=original_sz, order=3)
full_pred = np.zeros_like(mask, dtype=float)
full_pred[coords_crop[0]:coords_crop[2], coords_crop[1]:coords_crop[3]] = pred
full_pred[~mask.astype(bool)] = 0
full_pred_bin = full_pred > bin_thresh
return full_pred, full_pred_bin
if __name__ == '__main__':
args = parser.parse_args()
if args.device.startswith("cuda"):
# In case one has multiple devices, we must first set the one
# we would like to use so pytorch can find it.
os.environ['CUDA_VISIBLE_DEVICES'] = args.device.split(":",1)[1]
if not torch.cuda.is_available():
raise RuntimeError("cuda is not currently available!")
print(f"* Running prediction on device '{args.device}'...")
device = torch.device("cuda")
else: #cpu
device = torch.device(args.device)
bin_thresh = args.bin_thresh
tta = args.tta
model_name = 'wnet'
model_path = args.model_path
im_path = args.im_path
im_loc = osp.dirname(im_path)
im_name = im_path.rsplit('/', 1)[-1]
mask_path = args.mask_path
result_path = args.result_path
if result_path is None:
result_path = im_loc
im_path_out = osp.join(result_path, im_name.rsplit('.', 1)[-2]+'_seg.png')
im_path_out_bin = osp.join(result_path, im_name.rsplit('.', 1)[-2]+'_bin_seg.png')
else:
os.makedirs(result_path, exist_ok=True)
im_path_out = osp.join(result_path, im_name.rsplit('.', 1)[-2]+'_seg.png')
im_path_out_bin = osp.join(result_path, im_name.rsplit('.', 1)[-2] + '_bin_seg.png')
im_size = tuple([int(item) for item in args.im_size.split(',')])
if isinstance(im_size, tuple) and len(im_size)==1:
tg_size = (im_size[0], im_size[0])
elif isinstance(im_size, tuple) and len(im_size)==2:
tg_size = (im_size[0], im_size[1])
else:
sys.exit('im_size should be a number or a tuple of two numbers')
print('* Segmenting image ' + im_path)
img = Image.open(im_path)
if mask_path is None:
print('* FOV mask not provided, generating it')
mask = get_fov(img)
print('* FOV mask generated')
else: mask = Image.open(mask_path).convert('L')
mask = np.array(mask).astype(bool)
img, coords_crop = crop_to_fov(img, mask)
original_sz = img.size[1], img.size[0] # in numpy convention
rsz = p_tr.Resize(tg_size)
tnsr = p_tr.ToTensor()
tr = p_tr.Compose([rsz, tnsr])
im_tens = tr(img) # only transform image
print('* Instantiating model = ' + str(model_name))
model = get_arch(model_name).to(device)
if model_name == 'wnet': model.mode='eval'
print('* Loading trained weights from ' + model_path)
model, stats = load_model(model, model_path, device)
model.eval()
print('* Saving prediction to ' + im_path_out)
start_time = time.perf_counter()
full_pred, full_pred_bin = create_pred(model, im_tens, mask, coords_crop, original_sz, bin_thresh=bin_thresh, tta=tta)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
imsave(im_path_out, img_as_ubyte(full_pred))
imsave(im_path_out_bin, img_as_ubyte(full_pred_bin))
print('Done, time spent = {:.3f} secs'.format(time.perf_counter() - start_time))
|
495793
|
import os
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from bop_toolkit_lib import pycoco_utils
import argparse
from bop_toolkit_lib import config
from bop_toolkit_lib import dataset_params
from bop_toolkit_lib import inout
from bop_toolkit_lib import misc
# PARAMETERS (some can be overwritten by the command line arguments below).
################################################################################
p = {
# Minimum visible surface fraction of a valid GT pose.
# -1 == k most visible GT poses will be considered, where k is given by
# the "inst_count" item loaded from "targets_filename".
'visib_gt_min': -1,
# Names of files with detection results for which to calculate the Average Precisions
# (assumed to be stored in folder p['results_path']).
'result_filenames': [
'json/file/with/coco/results',
],
# Folder with results to be evaluated.
'results_path': config.results_path,
# Folder for the calculated pose errors and performance scores.
'eval_path': config.eval_path,
# Folder with BOP datasets.
'datasets_path': config.datasets_path,
# Annotation type that should be evaluated. Can be 'segm' or 'bbox'.
'ann_type': 'segm',
# bbox type. Options: 'modal', 'amodal'.
'bbox_type': 'amodal',
# File with a list of estimation targets to consider. The file is assumed to
# be stored in the dataset folder.
'targets_filename': 'test_targets_bop19.json',
}
################################################################################
# Command line arguments.
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--result_filenames',
default=','.join(p['result_filenames']),
help='Comma-separated names of files with results.')
parser.add_argument('--results_path', default=p['results_path'])
parser.add_argument('--eval_path', default=p['eval_path'])
parser.add_argument('--targets_filename', default=p['targets_filename'])
parser.add_argument('--ann_type', default=p['ann_type'])
parser.add_argument('--bbox_type', default=p['bbox_type'])
args = parser.parse_args()
p['result_filenames'] = args.result_filenames.split(',')
p['results_path'] = str(args.results_path)
p['eval_path'] = str(args.eval_path)
p['targets_filename'] = str(args.targets_filename)
p['ann_type'] = str(args.ann_type)
p['bbox_type'] = str(args.bbox_type)
# Evaluation.
# ------------------------------------------------------------------------------
for result_filename in p['result_filenames']:
misc.log('===========')
misc.log('EVALUATING: {}'.format(result_filename))
misc.log('===========')
# Parse info about the method and the dataset from the filename.
result_name = os.path.splitext(os.path.basename(result_filename))[0]
result_info = result_name.split('_')
method = str(result_info[0])
dataset_info = result_info[1].split('-')
dataset = str(dataset_info[0])
split = str(dataset_info[1])
split_type = str(dataset_info[2]) if len(dataset_info) > 2 else None
# Load dataset parameters.
dp_split = dataset_params.get_split_params(
p['datasets_path'], dataset, split, split_type)
model_type = 'eval'
dp_model = dataset_params.get_model_params(
p['datasets_path'], dataset, model_type)
# Checking coco result file
check_passed,_ = inout.check_coco_results(os.path.join(p['results_path'], result_filename), ann_type=p['ann_type'])
if not check_passed:
misc.log('Please correct the coco result format of {}'.format(result_filename))
exit()
# Load coco resultsZ
misc.log('Loading coco results...')
coco_results = inout.load_json(os.path.join(p['results_path'], result_filename), keys_to_int=True)
# Load the estimation targets.
targets = inout.load_json(os.path.join(dp_split['base_path'], p['targets_filename']))
# Organize the targets by scene and image.
misc.log('Organizing estimation targets...')
targets_org = {}
for target in targets:
targets_org.setdefault(target['scene_id'], {}).setdefault(target['im_id'], {})
# Organize the results by scene.
misc.log('Organizing estimation results...')
results_org = {}
for result in coco_results:
if (p['ann_type'] == 'bbox' and result['bbox']) or (p['ann_type'] == 'segm' and result['segmentation']):
results_org.setdefault(result['scene_id'], []).append(result)
if not results_org:
misc.log('No valid coco results for annotation type: {}'.format(p['ann_type']))
misc.log('Merging coco annotations and predictions...')
# Merge coco scene annotations and results
for i, scene_id in enumerate(dp_split['scene_ids']):
scene_coco_ann_path = dp_split['scene_gt_coco_tpath'].format(scene_id=scene_id)
if p['ann_type'] == 'bbox' and p['bbox_type'] == 'modal':
scene_coco_ann_path = scene_coco_ann_path.replace('scene_gt_coco', 'scene_gt_coco_modal')
scene_coco_ann = inout.load_json(scene_coco_ann_path, keys_to_int=True)
scene_coco_results = results_org[scene_id] if scene_id in results_org else []
# filter target image ids
target_img_ids = targets_org[scene_id].keys()
scene_coco_ann['images'] = [img for img in scene_coco_ann['images'] if img['id'] in target_img_ids]
scene_coco_ann['annotations'] = [ann for ann in scene_coco_ann['annotations'] if ann['image_id'] in target_img_ids]
scene_coco_results = [res for res in scene_coco_results if res["image_id"] in target_img_ids]
if i == 0:
dataset_coco_ann = scene_coco_ann
dataset_coco_results = scene_coco_results
else:
dataset_coco_ann, image_id_offset = pycoco_utils.merge_coco_annotations(dataset_coco_ann, scene_coco_ann)
dataset_coco_results = pycoco_utils.merge_coco_results(dataset_coco_results, scene_coco_results, image_id_offset)
#initialize COCO ground truth api
cocoGt=COCO(dataset_coco_ann)
cocoDt=cocoGt.loadRes(dataset_coco_results)
# running evaluation
cocoEval = COCOeval(cocoGt, cocoDt, p['ann_type'])
cocoEval.params.imgIds = sorted(cocoGt.getImgIds())
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
res_type = ['AP', 'AP50', 'AP75', 'AP_small', 'AP_medium', 'AP_large',
'AR1', 'AR10', 'AR100', 'AR_small', 'AR_medium', 'AR_large']
coco_results = {res_type[i]:stat for i, stat in enumerate(cocoEval.stats)}
# Save the final scores.
os.makedirs(os.path.join(p['eval_path'], result_name), exist_ok=True)
final_scores_path = os.path.join(p['eval_path'], result_name, 'scores_bop22_coco_{}.json'.format(p['ann_type']))
if p['ann_type'] == 'bbox' and p['bbox_type'] == 'modal':
final_scores_path = final_scores_path.replace('.json', '_modal.json')
inout.save_json(final_scores_path, coco_results)
|
495866
|
import copy
import datetime
import contextlib
import six
import traitlets
import ipywidgets
from .. import types
# Copy the old implementation of `link` from traitlets 4.3.3.
# traitlets >= 5.0.0 introduce a `!=` comparison when updating,
# which fails on types that overload equality checking (like NumPy arrays, Images, etc.).
# https://github.com/ipython/traitlets/blob/5.0.0/traitlets/traitlets.py#L296
# For our use here, the simpler older version works fine.
# BEGIN copied from
# https://github.com/ipython/traitlets/blob/4.3.3/traitlets/traitlets.py#L243-L302
def _validate_link(*tuples):
"""Validate arguments for traitlet link functions"""
for t in tuples:
if not len(t) == 2:
raise TypeError(
"Each linked traitlet must be specified as (HasTraits, 'trait_name'), not %r"
% t
)
obj, trait_name = t
if not isinstance(obj, traitlets.HasTraits):
raise TypeError("Each object must be HasTraits, not %r" % type(obj))
if trait_name not in obj.traits():
raise TypeError("%r has no trait %r" % (obj, trait_name))
class SimpleLink:
"""Link traits from different objects together so they remain in sync.
Parameters
----------
source : (object / attribute name) pair
target : (object / attribute name) pair
"""
updating = False
def __init__(self, source, target):
_validate_link(source, target)
self.source, self.target = source, target
try:
setattr(target[0], target[1], getattr(source[0], source[1]))
finally:
source[0].observe(self._update_target, names=source[1])
target[0].observe(self._update_source, names=target[1])
@contextlib.contextmanager
def _busy_updating(self):
self.updating = True
try:
yield
finally:
self.updating = False
def _update_target(self, change):
if self.updating:
return
with self._busy_updating():
setattr(self.target[0], self.target[1], change.new)
def _update_source(self, change):
if self.updating:
return
with self._busy_updating():
setattr(self.source[0], self.source[1], change.new)
def unlink(self):
self.source[0].unobserve(self._update_target, names=self.source[1])
self.target[0].unobserve(self._update_source, names=self.target[1])
self.source, self.target = None, None
# END copy
class ReadOnlyDirectionalLink:
"Like ipywidgets.dlink, but forcibly sets the values of read-only traits on `target`"
# Copied with modifications from https://github.com/ipython/traitlets/blob/5.0.5/traitlets/traitlets.py#L317-L364
# NOTE: we don't just subclass `ipywidgets.dlink` because the internal implementations change a bit
# between 4.3.3 and 5.x
updating = False
def __init__(self, source, target, transform=None):
self._transform = transform if transform else lambda x: x
_validate_link(source, target)
self.source, self.target = source, target
src_obj, src_trait = source
try:
self._update({"new": getattr(src_obj, src_trait)})
finally:
src_obj.observe(self._update, names=src_trait)
@contextlib.contextmanager
def _busy_updating(self):
self.updating = True
try:
yield
finally:
self.updating = False
def _update(self, change):
if self.updating:
return
with self._busy_updating():
target, target_trait = self.target
target.set_trait(target_trait, self._transform(change["new"]))
def unlink(self):
self.source[0].unobserve(self._update, names=self.source[1])
class ProxytypeInstance(traitlets.Instance):
"""
Trait type that tries to promote values to a given Proxytype
Example
-------
>>> import traitlets
>>> import descarteslabs.workflows as wf
>>> from descarteslabs.workflows.interactive import ProxytypeInstance
>>> class ProxyTraits(traitlets.HasTraits): # doctest: +SKIP
... int_list = ProxytypeInstance(klass=wf.List[wf.Int]) # doctest: +SKIP
... @traitlets.observe('int_list') # doctest: +SKIP
... def int_list_changed(self, change): # doctest: +SKIP
... print(f"new int list: {change['new']}") # doctest: +SKIP
>>> pt = ProxyTraits() # doctest: +SKIP
>>> pt.int_list = [1, 2, 3] # doctest: +SKIP
new int list: <descarteslabs.workflows.types.containers.list_.List[Int] object at 0x...>
>>> pt.int_list = [1, 2, "not an int"] # doctest: +SKIP
TraitError: For parameter 'int_list', could not promote [1, 2, 'not an int']
to <class 'descarteslabs.workflows.types.containers.list_.List[Int]'>: List[Int]:
Expected iterable values of type <class 'descarteslabs.workflows.types.primitives.number.Int'>,
but for item 2, got 'not an int'
"""
def validate(self, obj, value):
if isinstance(value, self.klass):
return value
else:
try:
return self.klass._promote(value)
except types.ProxyTypeError as e:
raise traitlets.TraitError(
"For parameter {!r}, could not promote {!r} to {}: {}".format(
self.name, value, self.klass, e
)
)
py_type_to_trait = {
bool: traitlets.CBool,
int: traitlets.CInt,
float: traitlets.CFloat,
str: traitlets.CUnicode,
list: traitlets.List,
tuple: traitlets.Tuple,
dict: traitlets.Dict,
datetime.datetime: ipywidgets.trait_types.Datetime,
datetime.date: ipywidgets.trait_types.Date,
}
def obj_to_trait(obj, name):
"""
Construct a ``traitlets.TraitType`` instance suitable for holding ``obj`` as ``name``, based on its type.
If ``type(obj)`` is in the ``py_type_to_trait`` dict, it uses the associated trait type.
If ``obj`` is a `Proxytype`, it returns a `ProxytypeInstance` trait (which will take
new values and attempt to promote them to that `Proxytype`.)
Otherwise, if ``obj`` is a HasTraits instance, it will use whatever trait type that object
uses for its ``value`` trait.
Parameters
----------
obj: bool, int, float, str, list, tuple, dict, datetime.datetime, datetime.date, Proxytype, or traitlets.HasTraits
Create a trait to hold this value
name: str
Hold the trait under this name
Returns
-------
trait: traitlets.TraitType
Instantiated TraitType that could be added to a HasTraits class.
Raises
------
TypeError:
If there's no registered TraitType to hold this type of value.
If ``obj`` is a HasTraits instance without a ``value`` trait.
"""
type_ = type(obj)
try:
return py_type_to_trait[type_](name=name)
except KeyError:
if isinstance(obj, types.Proxytype):
return ProxytypeInstance(name=name, klass=type_)
elif isinstance(obj, traitlets.HasTraits):
try:
# get the descriptor (trait object) for the `value` trait
trait = type_.value
except AttributeError:
raise TypeError(
"Unsupported widget type {!r}, "
"since it has no `value` trait".format(type_.__name__)
)
else:
new_trait = copy.deepcopy(trait) # hope this actually works...
new_trait.name = name
return new_trait
raise TypeError(
"Cannot accept parameter of type {!r}. Must be a Proxytype, or one of: {}".format(
type_.__name__, ", ".join(t.__name__ for t in py_type_to_trait)
)
)
py_type_to_widget = {
bool: lambda **kwargs: ipywidgets.Checkbox(
layout=ipywidgets.Layout(width="initial"), indent=False, **kwargs
),
int: ipywidgets.IntText,
float: ipywidgets.FloatText,
str: lambda **kwargs: ipywidgets.Text(continuous_update=False, **kwargs),
datetime.datetime: ipywidgets.DatePicker,
datetime.date: ipywidgets.DatePicker,
}
def obj_to_widget(value, **kwargs):
"""
Construct an ipywidget for representing ``value``.
``type(value)`` must be in the ``py_type_to_widget`` dict.
Parameters
----------
value: bool, int, float, str, datetime.datetime, datetime.date
The value to make a widget control for
**kwargs
Additional keyword arguments to pass to the widget constructor
Returns
-------
widget: ipywidgets.Widget
Widget initialized with ``value``
Raises
------
TypeError:
If there's no registered widget type to represent this type of value.
"""
type_ = type(value)
try:
widget_type = py_type_to_widget[type_]
except KeyError:
raise TypeError("No widget to display a {!r}".format(type_.__name__))
else:
return widget_type(value=value, **kwargs)
class ParameterSet(traitlets.HasTraits):
"""
Parameters for a `WorkflowsLayer`, which updates the layer when new values are assigned.
A `ParameterSet` is constructed automatically when calling `.Image.visualize` and added to the `WorkflowsLayer`;
you shouldn't construct one manually.
You can access a widget for interactively controlling these parameters at `widget`.
Attributes
----------
widget: ipywidgets.Widget
A widget showing a table of controls, linked to this `ParameterSet`.
Updating the controls causes the map to update.
Example
-------
>>> import descarteslabs.workflows as wf
>>> imgs = wf.ImageCollection.from_id(
... "sentinel-1:GRD", start_datetime=wf.parameter("start", wf.Datetime)
... )
>>> filtered = imgs.filter(lambda img: img.properties['pass'] == wf.parameter("pass_dir", wf.Str))
>>> composite = imgs.mean(axis="images").pick_bands("vv")
>>> lyr = composite.visualize("vv mean", start=wf.Datetime(2018), pass_dir="ASCENDING") # doctest: +SKIP
>>> params = lyr.parameters # doctest: +SKIP
>>> # ^ get the ParameterSet for the layer
>>> params.pass_dir # doctest: +SKIP
"ASCENDING"
>>> params.pass_dir = "DESCENDING" # doctest: +SKIP
>>> # ^ this updates the layer on the map
>>> params.start = "2019-01-01" # doctest: +SKIP
>>> # ^ as does this
>>> params.link("start", my_ipywidget) # doctest: +SKIP
>>> # ^ this links the "start" parameter to an ipywidget's value
Notes
-----
The names and types of fields on a `ParameterSet` are fixed,
and can only be changed using `update`. This means that on
a `ParameterSet` that only has the field ``x``, which holds a float,
``params.x = "foo"`` will fail (wrong type), as will ``params.y = "bar"``
(``y`` doesn't exist).
When `.Image.visualize` creates a `ParameterSet` for you, it adds fields for
whichever parameter the imagery depends on. If ``img`` depends on
``wf.widgets.slider("slidey", 0, 1)`` and ``wf.parameter("td", wf.Timedelta)``
for example, then ``img.visualize("my layer", td=wf.Timedelta(days=2))``
will create the fields ``slidey`` and ``td``. More importantly, it infers the *type*
of those fields from their parameter types, so ``slidey`` would only accept floats,
and ``td`` would only accept Timedeltas.
Therefore, if you experience a ``TypeError`` assiging to a `ParameterSet` field,
you may need to change types of the initial parameters ``img`` depends on.
"""
def __init__(self, notify_object, notify_name, **traits):
"""
You shouldn't need to construct a ParameterSet manually, but here's how you would:
Parameters
----------
notify_object: traitlets.HasTraits instance
The object to notify when any of the traits on this `ParameterSet` change
notify_name: str
The ``name`` to use in the change notification sent to that object
**traits: traitlets.TraitType instances
The traits to add to this `ParameterSet`
"""
self._notify_object = notify_object
self._notify_name = notify_name
self._links = {}
self.add_traits(**traits)
self._order = tuple(traits)
self.widget = self.make_widget()
@traitlets.observe(traitlets.All, type=traitlets.All)
def _on_change(self, change):
self._notify_object.notify_change(
traitlets.Bunch(change, name=self._notify_name, key=change["name"])
)
# ^ NOTE(gabe): Bunch is workaround for traitlets bug https://github.com/ipython/traitlets/pull/536
new_contents = self._make_widget_contents()
self.widget.children = new_contents
def link(self, name, target, attr="value"):
"""
Link an attribute to an ipywidget (or other object).
If a link to the attribute was previously created, it is unlinked.
Parameters
----------
name: str
The name of the parameter to link
target: ipywidgets.Widget, any traitlets.HasTraits instance, or None
The object to link to.
If None, any existing link to ``name`` is removed.
attr: str, default "value"
The name of the attribute on ``target`` to link to.
Defaults to ``"value"``, since that works for most ipywidgets.
Example
-------
>>> import descarteslabs.workflows as wf
>>> from ipywidgets import FloatSlider
>>> img = wf.Image.from_id("landsat:LC08:PRE:TOAR:meta_LC80330352016022_v1") # doctest: +SKIP
>>> img = img.pick_bands("red") # doctest: +SKIP
>>> masked_img = img.mask(img > wf.parameter("threshold", wf.Float)) # doctest: +SKIP
>>> layer = masked_img.visualize("sample", colormap="plasma", threshold=0.07) # doctest: +SKIP
>>> layer.parameters.link("threshold", my_ipywidget) # doctest: +SKIP
>>> # ^ links the "threshold" parameter to an ipywidget's value
>>> layer2 = masked_img.visualize("sample", colormap="plasma", threshold=0.3) # doctest: +SKIP
>>> layer2.parameters.link("threshold", layer.parameters, attr="threshold") # doctest: +SKIP
>>> # ^ now the `threshold` parameter is linked between `layer` and `layer2`
>>> widget = FloatSlider(min=0, max=1) # doctest: +SKIP
>>> layer2.parameters.link("threshold", widget) # doctest: +SKIP
>>> # ^ now `threshold` is linked to the widget, and the link is broken to `layer`
"""
current_link = self._links.get(name, None)
if current_link is not None:
current_link.unlink()
if target is not None:
with self.hold_trait_notifications():
link_type = (
ReadOnlyDirectionalLink
if getattr(type(self), name).read_only
else SimpleLink
)
link = link_type((target, attr), (self, name))
self._links[name] = link
else:
if current_link is not None:
del self._links[name]
def to_dict(self):
"""
Key-value pairs of the parameters, in order.
Example
-------
>>> import descarteslabs.workflows as wf
>>> img = wf.Image.from_id("landsat:LC08:PRE:TOAR:meta_LC80330352016022_v1") # doctest: +SKIP
>>> img = img.pick_bands("red") # doctest: +SKIP
>>> masked_img = img.mask(img > wf.parameter("threshold", wf.Float)) # doctest: +SKIP
>>> layer = masked_img.visualize("sample", colormap="plasma", threshold=0.07) # doctest: +SKIP
>>> layer.parameters.to_dict() # doctest: +SKIP
{'threshold': 0.07}
"""
# return {name: getattr(self, name) for name in self._order}
# since `add_traits`, `remove_traits` are still in the public API,
# and we can't atomically `update` either, we must treat `self._order`
# as just a "guideline" and handle extra names that should or shouldn't
# be in it.
names = set(self.trait_names())
dct = {}
for name in self._order:
try:
dct[name] = getattr(self, name)
except AttributeError:
pass
else:
names.remove(name)
for name in names:
dct[name] = getattr(self, name)
return dct
def _make_widget_contents(self, skip=None):
if skip is None:
skip = ()
items = tuple((k, v) for k, v in six.iteritems(self.to_dict()) if k not in skip)
if not items:
return []
names, values = zip(*items)
labels = []
widgets = []
for name, value in zip(names, values):
link = self._links.get(name, None)
if link is not None and link.source is not None:
widget = link.source[0]
if not isinstance(widget, ipywidgets.Widget):
raise TypeError(
"The parameter {!r} is already linked to the non-widget {}. "
"To auto-generate controls for a ParameterSet, existing links must only be to widgets. "
"Instead, manually construct this link (with `ipywidgets.link()`, not `self.link()`) "
"*after* auto-generating the controls.".format(name, widget)
)
label = getattr(widget, "_label", name) or name
# ^ HACK: this magic attribute comes from `interactive/widgets`, where we stick any `label`
# on the widget object as `_label`. Otherwise, here in `ParameterSet`, we wouldn't know
# the longform label for the widget. And if we had actually set the label in `.description`,
# then we'd end up with two labels for the widget (one from ParameterSet and one generated by the
# widget itself).
else:
try:
widget = obj_to_widget(value)
except TypeError:
widget = ipywidgets.Label(
"No widget for {!r}".format(type(value).__name__)
)
else:
self.link(name, widget)
label = name
widgets.append(widget)
labels.append(ipywidgets.Label(label))
labels_col = ipywidgets.VBox(labels)
widgets_col = ipywidgets.VBox(widgets)
control = ipywidgets.HBox([labels_col, widgets_col])
contents = [control]
try:
title = self._notify_object.name
except AttributeError:
pass
else:
contents.insert(0, ipywidgets.HTML("<b>{}</b>".format(title)))
return contents
def make_widget(self, skip=None):
"""
Make a widget for controlling these parameters.
Widgets that were passed in or linked are displayed.
For values that aren't already linked to a widget, an appropriate
widget type is chosen if possible.
Note that this widget can become out of date and unlinked
once `update` is called; use the `widget` property for an
always-up-to-date widget.
Parameters
----------
skip: list[str]
Sequence of parameter names to *not* include in the widget
Returns
-------
widget: ipywidgets.Widget or None
A widget showing a table of controls, linked to this `ParameterSet`.
Updating the widgets causes the map to update.
If there are no parameters to display, returns None.
Example
-------
>>> import traitlets
>>> import descarteslabs.workflows as wf
>>> img = wf.Image.from_id("landsat:LC08:PRE:TOAR:meta_LC80330352016022_v1") # doctest: +SKIP
>>> img = img.pick_bands("red") # doctest: +SKIP
>>> masked_img = img.mask(img > wf.parameter("threshold", wf.Float)) # doctest: +SKIP
>>> layer = masked_img.visualize("sample", colormap="plasma", threshold=0.07, sample_param=0.0) # doctest: +SKIP
>>> layer.parameters.make_widget(skip=["sample_param"]) # doctest: +SKIP
>>> # ^ displays a widget for modifying a layer's parameters, optionally skipping params
"""
return ipywidgets.VBox(self._make_widget_contents(skip=skip))
def update(self, **new_values):
"""
Update the `ParameterSet` with new values.
New parameters are added as fields on the `ParameterSet`,
with their trait type inferred from the value.
Current parameters that are not present in ``new_values``
are removed from the `ParameterSet`.
Passing a value of a different type to a current parameter will change
its trait type.
If a value is an ipywidgets Widget, it will be linked (via its ``"value"`` attribute)
to that parameter. If a parameter was previously linked
to a widget, and a different widget instance (or non-widget) is passed
for its new value, the old widget is automatically unlinked.
If the same widget instance is passed as is already linked, no change occurs.
The `ParameterSet` will be reordered to the order of these new values.
Parameters
----------
**new_values: JSON-serializable value, Proxytype, or ipywidgets.Widget
Parameter names to new values. Values can be Python types,
`Proxytype` instances, or ``ipywidgets.Widget`` instances.
Example
-------
>>> import descarteslabs.workflows as wf
>>> from ipywidgets import FloatSlider
>>> img = wf.Image.from_id("landsat:LC08:PRE:TOAR:meta_LC80330352016022_v1") # doctest: +SKIP
>>> img = img.pick_bands("red") # doctest: +SKIP
>>> masked_img = img.mask(img > wf.parameter("threshold", wf.Float)) # doctest: +SKIP
>>> layer = masked_img.visualize("sample", colormap="plasma", threshold=0.07) # doctest: +SKIP
>>> scaled_img = img * wf.parameter("scale", wf.Float) + wf.parameter("offset", wf.Float) # doctest: +SKIP
>>> with layer.hold_trait_notifications(): # doctest: +SKIP
... layer.imagery = scaled_img # doctest: +SKIP
... layer.parameters.update(scale=FloatSlider(min=0, max=10, value=2), offset=2.5) # doctest: +SKIP
>>> # ^ re-use the same layer instance for a new Image with different parameters
"""
old_order = self._order
self._order = tuple(new_values)
self._update_traits_for_new_values(new_values)
self._assign_new_values(new_values)
if self._order != old_order:
# in case _just_ the order changed, but no values, ensure the
# widget is still redrawn
self.widget.children = self._make_widget_contents()
# def update_values(self, **new_values):
# """
# Update the current traits of the `ParameterSet` with new values.
# Unlike `update`, this does not add, remove, or change the type of any fields.
# If a value is an ipywidgets Widget, it will be linked (via its ``"value"`` attribute)
# to that parameter. If a parameter was previously linked
# to a widget, and a different widget instance (or non-widget) is passed
# for its new value, the old widget is automatically unlinked.
# If the same widget instance is passed as is already linked, no change occurs.
# Parameters
# ----------
# **new_values: JSON-serializable value, Proxytype, or ipywidgets.Widget
# Parameter names to new values. Values can be Python types,
# `Proxytype` instances, or ``ipywidgets.Widget`` instances.
# All the names must already be traits of this `ParameterSet`,
# and all values must be compatible with those current trait types.
# Raises
# ------
# ValueError:
# If given a kwarg name that isn't already a trait
# TypeError:
# If given a value that's incompatible with the current trait type for that name
# """
# for name in new_values:
# if not self.has_trait(name):
# raise ValueError(
# f"The trait {name!r} does not exist in {self.trait_names()!r}"
# )
# self._assign_new_values(new_values)
def _update_traits_for_new_values(self, new_values):
"""
Add, remove, and change the type of traits to be compatible with new values.
Parameters
----------
new_values: dict[str, any]
Parameter names to new values. Values can be Python types,
`Proxytype` instances, or ``ipywidgets.Widget`` instances.
"""
current_values = self.to_dict()
current_traits = self.traits()
current_names = six.viewkeys(current_traits)
new_names = six.viewkeys(new_values)
add_names = new_names - current_names
remove_names = current_names - new_names
new_traits = {}
# check for same name, but trait type has changed
for changed_name in new_names - add_names - remove_names:
old_value = current_values[changed_name]
new_value = new_values[changed_name]
if new_value is old_value:
# definitely don't need to change anything
continue
old_trait_type = type(current_traits[changed_name])
new_trait = obj_to_trait(new_value, changed_name) # todo handle error?
if old_trait_type != type(new_trait) or isinstance(
new_value, traitlets.HasTraits
):
# by golly, the trait type has changed!
# NOTE: we always replace when given a HasTraits instance, because though
# the type of the trait descriptor may not have changed, all its attributes could have.
remove_names.add(changed_name)
new_traits[changed_name] = new_trait
for name in remove_names:
# unlink names we're removing
self.link(name, None)
self.remove_traits(*remove_names)
new_traits.update(
{name: obj_to_trait(new_values[name], name) for name in add_names}
)
self.add_traits(**new_traits)
def _assign_new_values(self, new_values):
"""
Assign new values to traits, directly or by linking
If given an ipywidget as a value, it's linked to that trait,
and any previous link is unlinked
Parameters
----------
new_values: dict[str, any]
Parameter names to new values. Values can be Python types,
`Proxytype` instances, or ``ipywidgets.Widget`` instances.
"""
with self.hold_trait_notifications():
for name, value in six.iteritems(new_values):
current_link = self._links.get(name, None)
if current_link is not None:
if value is not current_link.source[0]:
# a new widget (or non-widget) is being used for this name; unlink the old one
current_link.unlink()
del self._links[name]
current_link = None # we'll check this below
if isinstance(value, traitlets.HasTraits):
if current_link is None:
link_type = (
ReadOnlyDirectionalLink
if getattr(type(self), name).read_only
else SimpleLink
)
self._links[name] = link_type((value, "value"), (self, name))
self._notify_trait(name, getattr(self, name), value.value)
else:
self.set_trait(name, value)
def add_traits(self, **traits):
"""
Dynamically add trait attributes to the HasTraits instance.
If you are manipulating a ParameterSet generated from a layer, instead
use :meth:`update <descarteslabs.workflows.interactive.ParameterSet.update>`,
which handles adding and removing traits in a more delclarative way.
Example
-------
>>> import traitlets
>>> import descarteslabs.workflows as wf
>>> class Listener(traitlets.HasTraits):
... @traitlets.observe("param")
... def handler(self, change):
... print(change['key'])
... print(change)
>>> listener = Listener()
>>> ps = wf.interactive.ParameterSet(listener, "param", foo=traitlets.Float()) # doctest: +SKIP
>>> ps.foo = 1.1 # doctest: +SKIP
foo
{'name': 'param',
'old': 0.0,
'new': 1.1,
'owner': ParameterSet({'foo': 1.1}),
'type': 'change',
'key': 'foo'
}
>>> ps.bar = "baz" # doctest: +SKIP
>>> # ^ nothing is printed, `bar` is not a trait
>>> ps.add_traits(bar=traitlets.Unicode()) # doctest: +SKIP
>>> ps.bar # doctest: +SKIP
''
>>> ps.bar = "quix" # doctest: +SKIP
bar
{'name': 'param',
'old': '',
'new': 'quix',
'owner': ParameterSet({'bar': 'quix', 'foo': 1.1}),
'type': 'change',
'key': 'bar'
}
"""
# Normally, `HasTraits.add_traits` dynamically constructs a new type
# that's a subclass of `type(self)`, appends *just* the new traits to that class's `__dict__`,
# and modifies `self.__class__` to point at the new subtype, using inheritance
# to make access on the existing traits fall back on the parent class.
# This makes removing traits extremely difficult, since the trait you want to remove
# may be defined some arbitrary depth up the MRO---and there's no way to know if it's safe
# to mutate that parent class, or if someone else is inheriting from it too.
# Since we'd like to be able to remove traits from a `ParameterSet`, we override `add_traits`
# to add the guarantee that instead of making a chain of subclasses for each new trait,
# *any `ParameterSet` instance with >0 traits is a direct child class of `ParameterSet`,
# and all `TraitType`s are set on that class's `__dict__`*.
# Achieving this is pretty simple: rather than having our new type inherit from our current type
# (causing chaining), we just always inherit from the base `ParameterSet`. And rather than just
# adding the new `traits` to that new type's `__dict__`, we also copy in all the traits defined
# on our current type's `__dict__`, so everything is in one place, and easy to remove.
# Copied and modified from:
# https://github.com/ipython/traitlets/blob/41551bc8b30ccc28af738e93615e3408cb94d5d3/traitlets/traitlets.py#L1405-L1415
# NOTE(gabe): there's a simpler(-ish) way we could do this, by just mutating `type(self)` directly
# and `setattr`ing the new traits in, but I was worried that if the base implementation of `add_traits`
# changed, that could break. Either way, this is rather brittle to traitlets changes, but fully overriding
# the method seems less prone to surprises (so long as the `MetaHasDescriptors` metaclass doesn't change much).
cls = self.__class__
attrs = {"__module__": cls.__module__}
if hasattr(cls, "__qualname__"):
# __qualname__ introduced in Python 3.3 (see PEP 3155)
attrs["__qualname__"] = cls.__qualname__
attrs.update(self.traits())
# ^ CHANGED: add in current traits to new type's `__dict__`
attrs.update(traits)
self.__class__ = type(cls.__name__, (ParameterSet,), attrs)
# ^ CHANGED: always use ParameterSet as base class to prevent guarantee 1-depth inheritance
for trait in traits.values():
trait.instance_init(self)
def remove_traits(self, *names):
"""
Remove traits that were dynamically added to the HasTraits instance
If you are manipulating a ParameterSet generated from a layer, instead
use :meth:`update <descarteslabs.workflows.interactive.ParameterSet.update>`,
which handles adding and removing traits in a more delclarative way.
Example
-------
>>> import traitlets
>>> import descarteslabs.workflows as wf
>>> class Listener(traitlets.HasTraits):
... @traitlets.observe("param")
... def handler(self, change):
... print(change['key'])
... print(change)
>>> listener = Listener()
>>> ps = wf.interactive.ParameterSet(listener, "param", foo=traitlets.Float()) # doctest: +SKIP
>>> ps.foo = 1.1 # doctest: +SKIP
foo
{'name': 'param',
'old': 0.0,
'new': 1.1,
'owner': ParameterSet({'foo': 1.1}),
'type': 'change',
'key': 'foo'
}
>>> ps.add_traits(bar=traitlets.Unicode()) # doctest: +SKIP
>>> ps.bar = 'quix' # doctest: +SKIP
bar
{'name': 'param',
'old': '',
'new': 'quix',
'owner': ParameterSet({'bar': 'quix', 'foo': 1.1}),
'type': 'change',
'key': 'bar'
}
>>> ps.remove_traits("foo") # doctest: +SKIP
>>> ps.foo = 2.2 # doctest: +SKIP
>>> # ^ nothing is printed, `foo` is no longer a trait
>>> ps.to_dict() # doctest: +SKIP
{'bar': 'quix'}
"""
# Thanks to our guarantee from our `add_traits` that:
# - `type(self)` is a singleton class and nobody else cares about it
# - all relevant `TraitType`s are set on `type(self).__dict__` and nowhere else
# to remove traits... we just delete them from `type(self)`. Pretty simple.
if not names:
return
cls = type(self)
for name in names:
try:
old = self._trait_values.pop(name)
except KeyError:
raise ValueError("The trait {} does not exist on {}".format(name, self))
delattr(cls, name)
# to play by the rules (and make the map update on parameter deletion),
# we notify that the trait is deleted. but `delete` is a non-standard
# event, so don't expect anyone to be listening.
self.notify_change(
traitlets.Bunch(
name=name,
old=old,
new=traitlets.Undefined,
owner=self,
type="delete",
)
)
def __repr__(self):
return "{}({})".format(type(self).__name__, self.to_dict())
|
495870
|
import logging
INSTALLED_APPS = [
'huey.contrib.djhuey',
'djangoex.test_app',
]
HUEY = {
'name': 'test-django',
'consumer': {
'blocking': True, # Use blocking list pop instead of polling Redis.
'loglevel': logging.DEBUG,
'workers': 4,
'scheduler_interval': 1,
'simple_log': True,
},
}
SECRET_KEY = 'foo'
|
495880
|
import unittest
from lib.EnumStatus import Status
class Test_Enum_Status(unittest.TestCase):
def test_successor(self):
self.assertEqual(1, Status.CREATED.value)
self.assertEqual(Status.WORK, Status.CREATED.succ())
self.assertEqual(Status.DONE, Status.CREATED.succ().succ())
with self.assertRaises(IndexError):
Status.DELETED.succ()
|
495900
|
import openslide
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--source_image_path",
type=str,
default='/data/camelyon16_original/training/normal/normal_056.tif')
parser.add_argument("--x",
type=int,
default=120832)
parser.add_argument("--y",
type=int,
default=23808)
parser.add_argument("--w",
type=int,
default=2816)
parser.add_argument("--h",
type=int,
default=2816)
parser.add_argument("--output_stain_target_image_path",
type=str,
default='/data/camelyon16/stain_target_image.jpg')
args = parser.parse_args()
LEVEL = 0
slide = openslide.OpenSlide(args.source_image_path)
image = slide.read_region((args.y, args.x), LEVEL, (args.w, args.h)).convert('RGB')
image.save(args.output_stain_target_image_path)
|
495912
|
from typing import Optional
from primehub import Helpful, cmd, Module
class Secrets(Helpful, Module):
"""
Query secrets for Image Pull Secrets
"""
@cmd(name='list', description='List secrets')
def list(self) -> list:
"""
List secrets
:rtype: list
:returns: secrets
"""
query = """
{
secrets(where: { ifDockerConfigJson: true }) {
id
name
type
}
}
"""
results = self.request({}, query)
return results['data']['secrets']
@cmd(name='get', description='Get a secret by id', return_required=True)
def get(self, id: str) -> Optional[dict]:
"""
Get the secret by id
:type id: str
:param id: the id of a secret
:rtype: Optional[dict]
:returns: a secret
"""
secret = self.list()
s = [x for x in secret if x['id'] == id]
if s:
return s[0]
return None
def help_description(self):
return "Get a secret or list secrets"
|
495926
|
from .comparesettable import CompareSetTable
import pandas as pd
class TableSetComparer:
def __init__(self):
return
def run(self, compTables: list) -> CompareSetTable:
# create data lists
nameList = []
nucF1List = []
nucKappaList = []
relF1List = []
relKappaList = []
conF1List = []
conKappaList = []
attF1List = []
attKappaList = []
averageF1List = []
averageKappaList = []
for compTable in compTables:
ratios = compTable.matchingRatios
kappas = compTable.cohensKappas
nameList.append(compTable.name)
nucF1List.append(ratios["Nuclearity"])
nucKappaList.append(kappas["Nuclearity"])
relF1List.append(ratios["Relation"])
relKappaList.append(kappas["Relation"])
conF1List.append(ratios["Constituent"])
conKappaList.append(kappas["Constituent"])
attF1List.append(ratios["Attachment point"])
attKappaList.append(kappas["Attachment point"])
averageF1List.append(ratios["Average"])
averageKappaList.append(kappas["Average"])
# generate dataframe
df = pd.DataFrame({"Name": nameList,
"Nuclearity-Ratio": nucF1List,
"Nuclearity-Kappa": nucKappaList,
"Relation-Ratio": relF1List,
"Relation-Kappa": relKappaList,
"Constituent-Ratio": conF1List,
"Constituent-Kappa": conKappaList,
"AttachmentPoint-Ratio": attF1List,
"AttachmentPoint-Kappa": attKappaList,
"Average-Ratio": averageF1List,
"Average-Kappa": averageKappaList})
return CompareSetTable(df)
|
495938
|
import os
import resource
import nltk
import torch
from graph4nlp.pytorch.data.dataset import Text2TextDataItem
from graph4nlp.pytorch.inference_wrapper.generator_inference_wrapper import (
GeneratorInferenceWrapper,
)
from graph4nlp.pytorch.models.graph2seq import Graph2Seq
from args import get_args
from dataset import IWSLT14Dataset
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
def remove_bpe(str_with_subword):
if isinstance(str_with_subword, list):
return [remove_bpe(ss) for ss in str_with_subword]
symbol = "@@ "
return str_with_subword.replace(symbol, "").strip()
if __name__ == "__main__":
opt = get_args()
if opt["use_gpu"] != 0 and torch.cuda.is_available():
print("[ Using CUDA ]")
device = torch.device("cuda" if opt["gpu"] < 0 else "cuda:%d" % opt["gpu"])
else:
print("[ Using CPU ]")
device = torch.device("cpu")
model = Graph2Seq.load_checkpoint(
os.path.join("examples/pytorch/nmt/save", opt["name"]), "best.pth"
).to(device)
wrapper = GeneratorInferenceWrapper(
cfg=opt,
model=model,
dataset=IWSLT14Dataset,
data_item=Text2TextDataItem,
beam_size=3,
lower_case=True,
tokenizer=nltk.RegexpTokenizer(" ", gaps=True).tokenize,
)
output = wrapper.predict(
raw_contents=[
"wissen sie , eines der großen vern@@ ü@@ gen beim reisen und eine der freu@@ den \
bei der eth@@ no@@ graph@@ ischen forschung ist , gemeinsam mit den menschen \
zu leben , die sich noch an die alten tage erinnern können . die ihre \
vergangenheit noch immer im wind spüren , sie auf vom regen ge@@ gl@@ ä\
@@ t@@ teten st@@ einen berü@@ hren , sie in den bit@@ teren blä@@ ttern \
der pflanzen schme@@ cken ."
],
batch_size=1,
)
output = remove_bpe(output)
print(output)
|
495946
|
import copy
from functools import reduce
import operator
def dict_diff(d1, d2):
diff = {}
for k1, v1 in d1.items():
kdiff = {}
if isinstance(v1, list):
v2 = d2.get(k1, [])
added = [v2i for v2i in v2 if v2i not in v1]
if added:
kdiff["added"] = added
removed = [v1i for v1i in v1 if v1i not in v2]
if removed:
kdiff["removed"] = removed
else:
v2 = d2.get(k1, None)
if v1 != v2:
if v1 is not None:
kdiff["removed"] = v1
if v2 is not None:
kdiff["added"] = v2
if kdiff:
diff[k1] = kdiff
for k2, v2 in d2.items():
if k2 in d1 or v2 is None:
continue
diff[k2] = {"added": v2}
return copy.deepcopy(diff)
def get_nested_val(d, key, separator="."):
try:
return reduce(operator.getitem, key.split(separator), d)
except (KeyError, TypeError):
return None
|
495955
|
f = plt.figure(figsize=(10,4))
ax = plt.gca()
ax.set_aspect('equal')
is_weird = lmos.p_sim <= (.05/len(pres))
pres.plot(color='lightgrey', ax=ax)
pres.assign(quads=lmos.q)[is_weird].plot('quads',
legend=True,
k=4, categorical=True,
cmap='bwr_r', ax=ax)
|
496013
|
import os
import pytest
import torch
import pytorch_lightning as pl
from pytorch_lightning import Callback, Trainer
import ray
from ray_lightning import RayShardedPlugin
from ray_lightning.tests.utils import BoringModel
@pytest.fixture
def ray_start_2_cpus():
address_info = ray.init(num_cpus=2)
yield address_info
ray.shutdown()
@pytest.fixture
def seed():
pl.seed_everything(0)
def test_ddp_choice_sharded(tmpdir, ray_start_2_cpus, seed):
"""Tests if sharded plugin is properly recognized."""
class CB(Callback):
def on_fit_start(self, trainer, pl_module):
assert isinstance(trainer.accelerator.training_type_plugin,
RayShardedPlugin)
raise SystemExit()
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
plugins=[RayShardedPlugin(num_workers=2)],
callbacks=[CB()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
def test_ddp_sharded_plugin_checkpoint(tmpdir, ray_start_2_cpus, seed):
"""Tests if checkpoint is saved correctly."""
model = BoringModel()
trainer = Trainer(
plugins=[RayShardedPlugin(num_workers=2)],
fast_dev_run=True,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(checkpoint_path)
saved_model = BoringModel.load_from_checkpoint(checkpoint_path)
# Assert model parameters are identical after loading.
for ddp_param, shard_param in zip(model.parameters(),
saved_model.parameters()):
assert torch.equal(ddp_param, shard_param)
def test_ddp_sharded_plugin_finetune(tmpdir, ray_start_2_cpus, seed):
"""Tests if we can save and restart training."""
model = BoringModel()
trainer = Trainer(
plugins=[RayShardedPlugin(num_workers=2)],
fast_dev_run=True,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(checkpoint_path)
saved_model = BoringModel.load_from_checkpoint(checkpoint_path)
trainer = Trainer(fast_dev_run=True, )
trainer.fit(saved_model)
def test_ddp_sharded_plugin_resume_from_checkpoint(tmpdir, ray_start_2_cpus,
seed):
"""Tests if resuming from checkpoint works."""
model = BoringModel()
trainer = Trainer(
plugins=[RayShardedPlugin(num_workers=2)],
fast_dev_run=True,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(checkpoint_path)
model = BoringModel()
trainer = Trainer(
plugins=[RayShardedPlugin(num_workers=2)],
fast_dev_run=True,
resume_from_checkpoint=checkpoint_path)
trainer.fit(model)
def test_ddp_sharded_plugin_test(tmpdir, ray_start_2_cpus, seed):
"""Tests if test works without fit."""
model = BoringModel()
trainer = Trainer(
plugins=[RayShardedPlugin(num_workers=2)],
fast_dev_run=True,
)
trainer.test(model)
def test_ddp_sharded_plugin_resume_from_checkpoint_downsize(
tmpdir, ray_start_2_cpus, seed):
"""Tests if we can save and resume training with less workers."""
model = BoringModel()
trainer = Trainer(
plugins=[RayShardedPlugin(num_workers=2)], fast_dev_run=True)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer.save_checkpoint(checkpoint_path)
model = BoringModel()
trainer = Trainer(
plugins=[RayShardedPlugin(num_workers=1)],
fast_dev_run=True,
resume_from_checkpoint=checkpoint_path)
trainer.fit(model)
|
496023
|
from pyradioconfig.parts.lynx.calculators.calc_modulator import CALC_Modulator_Lynx
class CALC_Modulator_Leopard(CALC_Modulator_Lynx):
#Inherit all from Lynx
pass
|
496036
|
from pipeline.runner import in_progress
def import_in_progress(request):
return {"import_in_progess": in_progress()}
|
496048
|
import abc
from rdr_service.genomic_enums import GenomicWorkflowState
class GenomicStateBase:
"""Abstract base class for genomic states"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def transition_function(self, signal):
return
class IgnoreState(GenomicStateBase):
"""
Ignore State, used to effectively remove GenomicSetMembers from
the genomics system.
"""
def transition_function(self, signal):
return GenomicWorkflowState.IGNORE
class ControlSampleState(GenomicStateBase):
"""
Control Sample State, used to mark programmatic control samples,
for example NIST samples.
"""
def transition_function(self, signal):
return GenomicWorkflowState.CONTROL_SAMPLE
class AW0ReadyState(GenomicStateBase):
"""
State representing new Genomic Set Members
ready for AW0 manifest state
"""
def transition_function(self, signal):
if signal == 'manifest-generated':
return GenomicWorkflowState.AW0
return GenomicWorkflowState.AW0_READY
class AW0State(GenomicStateBase):
"""State representing the AW0 manifest state"""
def transition_function(self, signal):
if signal == 'aw1-reconciled':
return GenomicWorkflowState.AW1
elif signal == 'aw1-failed':
return GenomicWorkflowState.AW1F_PRE
return GenomicWorkflowState.AW0
class AW1State(GenomicStateBase):
"""State representing the AW1 manifest state"""
def transition_function(self, signal):
if signal == 'aw1-failed':
return GenomicWorkflowState.AW1F_POST
elif signal == 'aw2':
return GenomicWorkflowState.AW2
return GenomicWorkflowState.AW1
class AW2State(GenomicStateBase):
"""State representing the AW2 manifest state"""
def transition_function(self, signal):
if signal == 'missing':
return GenomicWorkflowState.GC_DATA_FILES_MISSING
elif signal == 'fail':
return GenomicWorkflowState.AW2_FAIL
elif signal == 'cvl-ready':
return GenomicWorkflowState.CVL_READY
elif signal == 'gem-ready':
return GenomicWorkflowState.GEM_READY
return GenomicWorkflowState.AW2
class AW2MissingState(GenomicStateBase):
"""State representing the AW2 Missing Data state"""
def transition_function(self, signal):
if signal == 'missing':
return GenomicWorkflowState.GC_DATA_FILES_MISSING
elif signal == 'cvl-ready':
return GenomicWorkflowState.CVL_READY
elif signal == 'gem-ready':
return GenomicWorkflowState.GEM_READY
return GenomicWorkflowState.GC_DATA_FILES_MISSING
class GEMReadyState(GenomicStateBase):
"""State representing the GEM_READY state"""
def transition_function(self, signal):
if signal == 'manifest-generated':
return GenomicWorkflowState.A1
return GenomicWorkflowState.GEM_READY
class A1State(GenomicStateBase):
"""State representing the A1 manifest state"""
def transition_function(self, signal):
if signal == 'a2-gem-pass':
return GenomicWorkflowState.GEM_RPT_READY
if signal == 'a2-gem-fail':
return GenomicWorkflowState.A2F
if signal == 'unconsented':
return GenomicWorkflowState.GEM_RPT_PENDING_DELETE
return GenomicWorkflowState.A1
class A2PassState(GenomicStateBase):
"""State representing the A2 manifest state"""
def transition_function(self, signal):
if signal == 'report-ready':
return GenomicWorkflowState.GEM_RPT_READY
if signal == 'unconsented':
return GenomicWorkflowState.GEM_RPT_PENDING_DELETE
return GenomicWorkflowState.A2
class A2FailState(GenomicStateBase):
"""State representing the A2 manifest GEM failure state"""
def transition_function(self, signal):
if signal == 'report-ready':
return GenomicWorkflowState.GEM_RPT_READY
if signal == 'unconsented':
return GenomicWorkflowState.GEM_RPT_PENDING_DELETE
return GenomicWorkflowState.A2F
class A3State(GenomicStateBase):
"""State representing the A3 manifest; GEM Delete states"""
def transition_function(self, signal):
if signal == 'manifest-generated':
return GenomicWorkflowState.GEM_RPT_DELETED
return GenomicWorkflowState.A3
class GEMReportReady(GenomicStateBase):
"""State representing the GEM Report"""
def transition_function(self, signal):
if signal == 'unconsented':
return GenomicWorkflowState.GEM_RPT_PENDING_DELETE
return GenomicWorkflowState.GEM_RPT_READY
class GEMReportPendingDelete(GenomicStateBase):
"""State representing when Consent revoked, input to A3 Manifest"""
def transition_function(self, signal):
if signal == 'manifest-generated':
return GenomicWorkflowState.GEM_RPT_DELETED
if signal == 'reconsented':
return GenomicWorkflowState.GEM_READY
return GenomicWorkflowState.GEM_RPT_PENDING_DELETE
class GEMReportDeleted(GenomicStateBase):
"""State representing when Consent revoked, input to A3 Manifest"""
def transition_function(self, signal):
if signal == 'manifest-generated':
return GenomicWorkflowState.GEM_RPT_DELETED
if signal == 'reconsented':
return GenomicWorkflowState.GEM_READY
return GenomicWorkflowState.GEM_RPT_DELETED
class CVLReadyState(GenomicStateBase):
"""State representing the CVL_READY state"""
def transition_function(self, signal):
if signal == 'manifest-generated':
return GenomicWorkflowState.W1
return GenomicWorkflowState.CVL_READY
class W1State(GenomicStateBase):
"""State representing the W1 manifest state"""
def transition_function(self, signal):
if signal == 'w2-ingestion-success':
return GenomicWorkflowState.W2
class W2State(GenomicStateBase):
"""State representing the W2 manifest state"""
def transition_function(self, signal):
if signal == 'manifest-generated':
return GenomicWorkflowState.W3
class W3State(GenomicStateBase):
"""State representing the W3 manifest state"""
def transition_function(self, signal):
if signal == 'aw1c-reconciled':
return GenomicWorkflowState.AW1C
if signal == 'aw1c-failed':
# TODO: There may be a pre-accessioning state as well
return GenomicWorkflowState.AW1CF_POST
class GenomicStateHandler:
"""
Basic FSM for Genomic States. Returns call to state's transision_function()
"""
states = {
GenomicWorkflowState.IGNORE: IgnoreState(),
GenomicWorkflowState.CONTROL_SAMPLE: ControlSampleState(),
GenomicWorkflowState.AW0_READY: AW0ReadyState(),
GenomicWorkflowState.AW0: AW0State(),
GenomicWorkflowState.AW1: AW1State(),
GenomicWorkflowState.AW2: AW2State(),
GenomicWorkflowState.GC_DATA_FILES_MISSING: AW2MissingState(),
GenomicWorkflowState.CVL_READY: CVLReadyState(),
GenomicWorkflowState.W1: W1State(),
GenomicWorkflowState.W2: W2State(),
GenomicWorkflowState.W3: W3State(),
GenomicWorkflowState.GEM_READY: GEMReadyState(),
GenomicWorkflowState.A1: A1State(),
GenomicWorkflowState.A2: A2PassState(),
GenomicWorkflowState.A2F: A2FailState(),
GenomicWorkflowState.A3: A3State(),
GenomicWorkflowState.GEM_RPT_READY: GEMReportReady(),
GenomicWorkflowState.GEM_RPT_PENDING_DELETE: GEMReportPendingDelete(),
GenomicWorkflowState.GEM_RPT_DELETED: GEMReportDeleted(),
}
@classmethod
def get_new_state(cls, current_state, signal):
_state = cls.states.get(current_state, None)
if _state is not None:
return _state.transition_function(signal)
return
|
496102
|
import math
def mod(x, y):
return x - y * math.trunc(x / y)
def palindrome2(pow2, n):
t = [None] * 20
for i in range(0, 20):
t[i] = mod(math.trunc(n / pow2[i]), 2) == 1
nnum = 0
for j in range(1, 20):
if t[j]:
nnum = j
for k in range(0, math.trunc(nnum / 2) + 1):
if t[k] != t[nnum - k]:
return False
return True
p = 1
pow2 = [None] * 20
for i in range(0, 20):
p *= 2
pow2[i] = math.trunc(p / 2)
sum = 0
for d in range(1, 10):
if palindrome2(pow2, d):
print("%d\n" % d, end='')
sum += d
if palindrome2(pow2, d * 10 + d):
print("%d\n" % (d * 10 + d), end='')
sum += d * 10 + d
for a0 in range(0, 5):
a = a0 * 2 + 1
for b in range(0, 10):
for c in range(0, 10):
num0 = a * 100000 + b * 10000 + c * 1000 + c * 100 + b * 10 + a
if palindrome2(pow2, num0):
print("%d\n" % num0, end='')
sum += num0
num1 = a * 10000 + b * 1000 + c * 100 + b * 10 + a
if palindrome2(pow2, num1):
print("%d\n" % num1, end='')
sum += num1
num2 = a * 100 + b * 10 + a
if palindrome2(pow2, num2):
print("%d\n" % num2, end='')
sum += num2
num3 = a * 1000 + b * 100 + b * 10 + a
if palindrome2(pow2, num3):
print("%d\n" % num3, end='')
sum += num3
print("sum=%d\n" % sum, end='')
|
496123
|
from will.utils import warn
from will.backends.storage.file_backend import FileStorage
warn(
"""Deprecation - will.storage.file_storage has been moved to will.backends.storage.file_backend,
and will be removed in version 2.2. Please update your paths accordingly!"""
)
|
496136
|
from train import *
from DCRN import DCRN
if __name__ == '__main__':
# setup
setup()
# data pre-precessing: X, y, A, A_norm, Ad
X, y, A = load_graph_data(opt.args.name, show_details=False)
A_norm = normalize_adj(A, self_loop=True, symmetry=False)
Ad = diffusion_adj(A, mode="ppr", transport_rate=opt.args.alpha_value)
# to torch tensor
X = numpy_to_torch(X).to(opt.args.device)
A_norm = numpy_to_torch(A_norm, sparse=True).to(opt.args.device)
Ad = numpy_to_torch(Ad).to(opt.args.device)
# Dual Correlation Reduction Network
model = DCRN(n_node=X.shape[0]).to(opt.args.device)
# deep graph clustering
acc, nmi, ari, f1 = train(model, X, y, A, A_norm, Ad)
print("ACC: {:.4f},".format(acc), "NMI: {:.4f},".format(nmi), "ARI: {:.4f},".format(ari), "F1: {:.4f}".format(f1))
|
496161
|
import heapq
class mcf_graph():
n=1
pos=[]
g=[[]]
def __init__(self,N):
self.n=N
self.pos=[]
self.g=[[] for i in range(N)]
def add_edge(self,From,To,cap,cost):
assert 0<=From and From<self.n
assert 0<=To and To<self.n
m=len(self.pos)
self.pos.append((From,len(self.g[From])))
self.g[From].append({"to":To,"rev":len(self.g[To]),"cap":cap,"cost":cost})
self.g[To].append({"to":From,"rev":len(self.g[From])-1,"cap":0,"cost":-cost})
def get_edge(self,i):
m=len(self.pos)
assert 0<=i and i<m
_e=self.g[self.pos[i][0]][self.pos[i][1]]
_re=self.g[_e["to"]][_e["rev"]]
return {"from":self.pos[i][0],"to":_e["to"],"cap":_e["cap"]+_re["cap"],
"flow":_re["cap"],"cost":_e["cost"]}
def edges(self):
m=len(self.pos)
result=[{} for i in range(m)]
for i in range(m):
tmp=self.get_edge(i)
result[i]["from"]=tmp["from"]
result[i]["to"]=tmp["to"]
result[i]["cap"]=tmp["cap"]
result[i]["flow"]=tmp["flow"]
result[i]["cost"]=tmp["cost"]
return result
def flow(self,s,t,flow_limit=(1<<63)-1):
return self.slope(s,t,flow_limit)[-1]
def slope(self,s,t,flow_limit=(1<<63)-1):
assert 0<=s and s<self.n
assert 0<=t and t<self.n
assert s!=t
'''
variants (C = maxcost):
-(n-1)C <= dual[s] <= dual[i] <= dual[t] = 0
reduced cost (= e.cost + dual[e.from] - dual[e.to]) >= 0 for all edge
'''
dual=[0 for i in range(self.n)]
dist=[0 for i in range(self.n)]
pv=[0 for i in range(self.n)]
pe=[0 for i in range(self.n)]
vis=[False for i in range(self.n)]
def dual_ref():
for i in range(self.n):
dist[i]=(1<<63)-1
pv[i]=-1
pe[i]=-1
vis[i]=False
que=[]
heapq.heappush(que,(0,s))
dist[s]=0
while(que):
v=heapq.heappop(que)[1]
if vis[v]:continue
vis[v]=True
if v==t:break
'''
dist[v] = shortest(s, v) + dual[s] - dual[v]
dist[v] >= 0 (all reduced cost are positive)
dist[v] <= (n-1)C
'''
for i in range(len(self.g[v])):
e=self.g[v][i]
if vis[e["to"]] or (not(e["cap"])):continue
'''
|-dual[e.to]+dual[v]| <= (n-1)C
cost <= C - -(n-1)C + 0 = nC
'''
cost=e["cost"]-dual[e["to"]]+dual[v]
if dist[e["to"]]-dist[v]>cost:
dist[e["to"]]=dist[v]+cost
pv[e["to"]]=v
pe[e["to"]]=i
heapq.heappush(que,(dist[e["to"]],e["to"]))
if not(vis[t]):
return False
for v in range(self.n):
if not(vis[v]):continue
dual[v]-=dist[t]-dist[v]
return True
flow=0
cost=0
prev_cost=-1
result=[(flow,cost)]
while(flow<flow_limit):
if not(dual_ref()):
break
c=flow_limit-flow
v=t
while(v!=s):
c=min(c,self.g[pv[v]][pe[v]]["cap"])
v=pv[v]
v=t
while(v!=s):
self.g[pv[v]][pe[v]]["cap"]-=c
self.g[v][self.g[pv[v]][pe[v]]["rev"]]["cap"]+=c
v=pv[v]
d=-dual[s]
flow+=c
cost+=c*d
if(prev_cost==d):
result.pop()
result.append((flow,cost))
prev_cost=cost
return result
|
496165
|
from typing import List, cast, Dict
import numpy as np
from py_headless_daw.dto.waveform import Waveform
from py_headless_daw.project.content.audio_clip import AudioClip
from py_headless_daw.schema.clip_track_processing_strategy import ClipTrackProcessingStrategy
from py_headless_daw.schema.dto.time_interval import TimeInterval
from py_headless_daw.schema.events.event import Event
from py_headless_daw.services.wave_data_provider import WaveformProviderInterface
from py_headless_daw.shared.clip_intersection import ClipIntersection
class ProcessedWavData(Waveform):
pass
class Sampler(ClipTrackProcessingStrategy):
"""
Sampler is a strategy that produces audio basing on the number of associated AudioClips
"""
_processed_wav_data_cache: Dict[str, ProcessedWavData] = {}
def __init__(self, clips: List[AudioClip], wave_data_provider: WaveformProviderInterface):
super().__init__(clips)
for clip in clips:
if not isinstance(clip, AudioClip):
raise Exception(f"a clips was expected to be an AudioClip, instead it is {type(clip)}")
self.clips: List[AudioClip] = clips
self.wave_data_provider: WaveformProviderInterface = wave_data_provider
def render(self, interval: TimeInterval, stream_inputs: List[np.ndarray], stream_outputs: List[np.ndarray],
event_inputs: List[List[Event]], event_outputs: List[List[Event]]):
if self.unit is None:
raise Exception('unit is not set in this sampler')
[o.fill(0.0) for o in stream_outputs]
intersections = self._find_intersections(interval)
for intersection in intersections:
self._render_one_intersection(intersection, stream_outputs, interval, self.unit.host.sample_rate)
def _render_one_intersection(self, intersection: ClipIntersection, stream_outputs: List[np.ndarray],
interval: TimeInterval,
sample_rate: int):
clip: AudioClip = cast(AudioClip, intersection.clip)
wav_data = self._get_processed_wav_data(clip)
if wav_data.num_channels != 1:
# number of channels should match it not a mono sample
if wav_data.num_channels != len(stream_outputs):
raise Exception(
f"""number of channels in wav_data {wav_data.num_channels}
and in output {len(stream_outputs)} does not match. Related file: {clip.source_file_path}""")
if intersection.start_clip_time is None:
raise Exception('intersection is missing start clip time prop')
patch_start_in_wav_data_in_samples: int = round(clip.cue_sample + intersection.start_clip_time * sample_rate)
if patch_start_in_wav_data_in_samples > wav_data.length_in_samples():
return
if intersection.end_clip_time is None:
raise Exception('intersection is missing end clip time prop')
patch_end_in_wav_data_in_samples: int = min(
round(clip.cue_sample + intersection.end_clip_time * sample_rate),
wav_data.length_in_samples())
if intersection.start_project_time is None:
raise Exception('intersection is missing start project time')
patch_start_in_output_in_samples: int = round(
(intersection.start_project_time - interval.start_in_seconds) * sample_rate)
patch_length_in_samples: int = patch_end_in_wav_data_in_samples - patch_start_in_wav_data_in_samples
patch_end_in_output_in_samples: int = patch_start_in_output_in_samples + patch_length_in_samples
patch_length_in_wav_data = patch_end_in_wav_data_in_samples - patch_start_in_wav_data_in_samples
patch_length_in_output = patch_end_in_output_in_samples - patch_start_in_output_in_samples
if patch_length_in_wav_data != patch_length_in_output:
raise Exception(
f"""patch lengths in sample and in interval diff. In sample: {patch_length_in_wav_data},
in interval: {patch_length_in_output}""")
for i, output in enumerate(stream_outputs):
if wav_data.num_channels > 1:
channel_in_wav = wav_data.data[i]
else:
# mono samples is a special case - we add their single channel uniformly to all outputs
channel_in_wav = wav_data.data[0]
# slicing returns a view, so it must be efficient
patch_data: np.ndarray = channel_in_wav[patch_start_in_wav_data_in_samples:patch_end_in_wav_data_in_samples]
patched_data: np.ndarray = output[patch_start_in_output_in_samples:patch_end_in_output_in_samples]
if patch_data.shape != patched_data.shape:
raise Exception(f"""patch and patched have different shapes.
patch: {str(patch_data.shape)}
patched: {str(patched_data.shape)}
""")
np.add(patch_data, patched_data, out=patched_data)
def _get_processed_wav_data(self, clip: AudioClip) -> ProcessedWavData:
cache_key: str = self._generate_cache_key(clip)
if cache_key not in self._processed_wav_data_cache:
self._processed_wav_data_cache[cache_key] = self._generate_processed_wav_data(clip)
return self._processed_wav_data_cache[cache_key]
def _generate_cache_key(self, clip: AudioClip) -> str:
return clip.source_file_path + "_" + str(clip.rate)
def _generate_processed_wav_data(self, clip: AudioClip) -> ProcessedWavData:
raw_waveform = self.wave_data_provider.get_wave_data_by_file_path(clip.source_file_path)
(sample_rate, data) = (raw_waveform.sample_rate, raw_waveform.data)
# in the future, this will involve not just reading the wav file, but also
# converting it into host's sample rate (self.unit.hot.sample_rate),
# and taking into account clip's rate property.
# Also reading non-wav files.
# that's why Sample does not have sample rate property as it is already expected
# to be in host's one
return ProcessedWavData(
data=data,
sample_rate=sample_rate
)
|
496171
|
import torch
import numpy as np
import functools
def torchify(func):
"""Extends to NumPy arrays a function written for PyTorch tensors.
Converts input arrays to tensors and output tensors back to arrays.
Supports hybrid inputs where some are arrays and others are tensors:
- in this case all tensors should have the same device and float dtype;
- the output is not converted.
No data copy: tensors and arrays share the same underlying storage.
Warning: kwargs are currently not supported when using jit.
"""
# TODO: switch to @torch.jit.unused when is_scripting will work
@torch.jit.ignore
@functools.wraps(func)
def wrapped(*args, **kwargs):
device = None
dtype = None
for arg in args:
if isinstance(arg, torch.Tensor):
device_ = arg.device
if device is not None and device != device_:
raise ValueError(
'Two input tensors have different devices: '
f'{device} and {device_}')
device = device_
if torch.is_floating_point(arg):
dtype_ = arg.dtype
if dtype is not None and dtype != dtype_:
raise ValueError(
'Two input tensors have different float dtypes: '
f'{dtype} and {dtype_}')
dtype = dtype_
args_converted = []
for arg in args:
if isinstance(arg, np.ndarray):
arg = torch.from_numpy(arg).to(device)
if torch.is_floating_point(arg):
arg = arg.to(dtype)
args_converted.append(arg)
rets = func(*args_converted, **kwargs)
def convert_back(ret):
if isinstance(ret, torch.Tensor):
if device is None: # no input was torch.Tensor
ret = ret.cpu().numpy()
return ret
# TODO: handle nested struct with map tensor
if not isinstance(rets, tuple):
rets = convert_back(rets)
else:
rets = tuple(convert_back(ret) for ret in rets)
return rets
# BUG: is_scripting does not work in 1.6 so wrapped is always called
if torch.jit.is_scripting():
return func
else:
return wrapped
|
496192
|
from unittest import TestCase
import requests_mock
from parameterized import parameterized
from hvac import exceptions
from hvac.adapters import JSONAdapter
from hvac.api.auth_methods import AppRole
from hvac.constants.approle import DEFAULT_MOUNT_POINT
class TestAppRole(TestCase):
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT, "default", None),
("custom mount point", "approle-test", "default", None),
(
"bad token type",
DEFAULT_MOUNT_POINT,
"bad_token",
exceptions.ParamValidationError,
),
]
)
@requests_mock.Mocker()
def test_create_or_update_approle(
self, test_label, mount_point, token_type, raises, requests_mocker
):
expected_status_code = 204
role_name = "testrole"
mock_url = (
"http://localhost:8200/v1/auth/{mount_point}/role/{role_name}".format(
mount_point=mount_point, role_name=role_name
)
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
app_role = AppRole(adapter=JSONAdapter())
if raises is not None:
with self.assertRaises(raises) as cm:
app_role.create_or_update_approle(
role_name=role_name,
token_policies=["default"],
token_type=token_type,
mount_point=mount_point,
)
self.assertIn(member="unsupported token_type", container=str(cm.exception))
else:
response = app_role.create_or_update_approle(
role_name=role_name, token_policies=["default"], mount_point=mount_point
)
self.assertEqual(first=expected_status_code, second=response.status_code)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_list_roles(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {"keys": ["testrole"]},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role".format(
mount_point=mount_point
)
requests_mocker.register_uri(
method="LIST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.list_roles(mount_point=mount_point)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_read_role(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
mock_response = {
"auth": None,
"data": {
"bind_secret_id": True,
"local_secret_ids": False,
"secret_id_bound_cidrs": None,
"secret_id_num_uses": 0,
"secret_id_ttl": 0,
"token_bound_cidrs": None,
"token_explicit_max_ttl": 0,
"token_max_ttl": 0,
"token_no_default_poolicy": False,
"token_num_uses": 0,
"token_period": 14400,
"token_policies": None,
"token_ttl": 0,
"token_type": "default",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = (
"http://localhost:8200/v1/auth/{mount_point}/role/{role_name}".format(
mount_point=mount_point, role_name=role_name
)
)
requests_mocker.register_uri(
method="GET",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.read_role(role_name="testrole", mount_point=mount_point)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_delete_role(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
role_name = "testrole"
mock_url = (
"http://localhost:8200/v1/auth/{mount_point}/role/{role_name}".format(
mount_point=mount_point, role_name=role_name
)
)
requests_mocker.register_uri(
method="DELETE",
url=mock_url,
status_code=expected_status_code,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.delete_role(role_name=role_name, mount_point=mount_point)
self.assertEqual(first=expected_status_code, second=response.status_code)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_read_role_id(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
mock_response = {
"auth": None,
"data": {"role_id": "e5a7b66e-5d08-da9c-7075-71984634b882"},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/role-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="GET",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.read_role_id(role_name=role_name, mount_point=mount_point)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_update_role_id(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
role_id = "test_role_id"
mock_response = {
"auth": None,
"data": {"role_id": role_id},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/role-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.update_role_id(
role_name=role_name, role_id=role_id, mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT, None),
("custom mount point", "approle-test", exceptions.ParamValidationError),
]
)
@requests_mock.Mocker()
def test_generate_secret_id(self, test_label, mount_point, raises, requests_mocker):
expected_status_code = 200
role_name = "testrole"
mock_response = {
"auth": None,
"data": {
"secret_id": "<KEY>",
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
if raises is not None:
with self.assertRaises(raises) as cm:
app_role.generate_secret_id(
role_name=role_name,
metadata="metadata string",
mount_point=mount_point,
)
self.assertIn(
member="unsupported metadata argument", container=str(cm.exception)
)
else:
response = app_role.generate_secret_id(
role_name=role_name, cidr_list=["127.0.0.1/32"], mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT, None),
("custom mount point", "approle-test", exceptions.ParamValidationError),
]
)
@requests_mock.Mocker()
def test_create_custom_secret_id(
self, test_label, mount_point, raises, requests_mocker
):
expected_status_code = 200
role_name = "testrole"
secret_id = "custom_secret"
mock_response = {
"auth": None,
"data": {
"secret_id": secret_id,
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/custom-secret-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
if raises is not None:
with self.assertRaises(raises) as cm:
app_role.create_custom_secret_id(
role_name=role_name,
secret_id=secret_id,
cidr_list=["127.0.0.1/32"],
metadata="metadata string",
mount_point=mount_point,
)
self.assertIn(
member="unsupported metadata argument", container=str(cm.exception)
)
else:
response = app_role.create_custom_secret_id(
role_name=role_name,
secret_id=secret_id,
cidr_list=["127.0.0.1/32"],
mount_point=mount_point,
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_read_secret_id(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
secret_id = "custom_secret"
mock_response = {
"auth": None,
"data": {
"secret_id": secret_id,
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id/lookup".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.read_secret_id(
role_name=role_name, secret_id=secret_id, mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_destroy_secret_id(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
role_name = "testrole"
secret_id = "custom_secret"
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id/destroy".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.destroy_secret_id(
role_name=role_name, secret_id=secret_id, mount_point=mount_point
)
self.assertEqual(first=expected_status_code, second=response.status_code)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_list_secret_id_accessors(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
mock_response = {
"auth": None,
"data": {
"keys": [
"ce102d2a-8253-c437-bf9a-aceed4241491",
"a1c8dee4-b869-e68d-3520-2040c1a0849a",
"be83b7e2-044c-7244-07e1-47560ca1c787",
"84896a0c-1347-aa90-a4f6-aca8b7558780",
"239b1328-6523-15e7-403a-a48038cdc45a",
]
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="LIST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.list_secret_id_accessors(
role_name=role_name, mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_read_secret_id_accessor(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_name = "testrole"
secret_id = "custom_secret"
secret_id_accessor = "84896a0c-1347-aa90-a4f6-aca8b7558780"
mock_response = {
"auth": None,
"data": {
"secret_id": secret_id,
"secret_id_accessor": "84896a0c-1347-aa90-a4f6-aca8b7558780",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id-accessor/lookup".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.read_secret_id_accessor(
role_name=role_name,
secret_id_accessor=secret_id_accessor,
mount_point=mount_point,
)
self.assertEqual(first=mock_response, second=response)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_destroy_secret_id_accessor(self, test_label, mount_point, requests_mocker):
expected_status_code = 204
role_name = "testrole"
secret_id_accessor = "84896a0c-1347-aa90-a4f6-aca8b7558780"
mock_url = "http://localhost:8200/v1/auth/{mount_point}/role/{role_name}/secret-id-accessor/destroy".format(
mount_point=mount_point, role_name=role_name
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.destroy_secret_id_accessor(
role_name=role_name,
secret_id_accessor=secret_id_accessor,
mount_point=mount_point,
)
self.assertEqual(first=expected_status_code, second=response.status_code)
@parameterized.expand(
[
("default mount point", DEFAULT_MOUNT_POINT),
("custom mount point", "approle-test"),
]
)
@requests_mock.Mocker()
def test_login(self, test_label, mount_point, requests_mocker):
expected_status_code = 200
role_id = "test_role_id"
secret_id = "custom_secret"
mock_response = {
"data": None,
"auth": {
"renewable": True,
"lease_duration": 1200,
"metadata": None,
"token_policies": ["default"],
"accessor": "fd6c9a00-d2dc-3b11-0be5-af7ae0e1d374",
"client_token": "5b1a0318-679c-9c45-e5c6-d1b9a9035d49",
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "860a11a8-b835-cbab-7fce-de4edc4cf533",
"warnings": None,
"wrap_info": None,
}
mock_url = "http://localhost:8200/v1/auth/{mount_point}/login".format(
mount_point=mount_point,
)
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
app_role = AppRole(adapter=JSONAdapter())
response = app_role.login(
role_id=role_id, secret_id=secret_id, mount_point=mount_point
)
self.assertEqual(first=mock_response, second=response)
|
496202
|
import unittest
from talent_curator.apps.google import drive
class GoogleDriveAPITest(unittest.TestCase):
def setUp(self):
self.google_api = drive.GoogleDriveAPI()
def test_build_headers(self):
headers = self.google_api.build_headers(access_token="<PASSWORD>")
assert headers['Authorization'] == "Bearer hello, world"
|
496223
|
from typing import Optional
from citrine._serialization import properties
from citrine._serialization.serializable import Serializable
from citrine.informatics.constraints.constraint import Constraint
from citrine.informatics.descriptors import FormulationDescriptor
__all__ = ['IngredientCountConstraint']
class IngredientCountConstraint(Serializable['IngredientCountConstraint'], Constraint):
"""Represents a constraint on the total number of ingredients in a formulation.
Parameters
----------
formulation_descriptor: FormulationDescriptor
descriptor to constrain
min: int
minimum ingredient count
max: int
maximum ingredient count
label: Optional[str]
Optional label to constrain.
If specified, then only ingredients with the specified label will count towards the total.
Default is ``None``; all ingredients count towards the total
"""
formulation_descriptor = properties.Object(FormulationDescriptor, 'formulation_descriptor')
min = properties.Integer('min')
max = properties.Integer('max')
label = properties.Optional(properties.String, 'label')
typ = properties.String('type', default='IngredientCountConstraint')
def __init__(self, *,
formulation_descriptor: FormulationDescriptor,
min: int,
max: int,
label: Optional[str] = None):
self.formulation_descriptor: FormulationDescriptor = formulation_descriptor
self.min: int = min
self.max: int = max
self.label: Optional[str] = label
def __str__(self):
return '<IngredientCountConstraint {!r}>'.format(self.formulation_descriptor.key)
|
496257
|
import pytest
from flask import Flask
@pytest.fixture(scope="session")
def app():
app = Flask(__name__)
return app
@pytest.yield_fixture(scope="session")
def client(app):
return app.test_client()
|
496266
|
from rasa_nlu_examples.meta.printer import print_message
from rasa.shared.nlu.training_data.message import Message
def test_can_print_empty_message():
print_message(Message())
|
496294
|
import zlib
import uuid
import time
def busy_loop():
start = time.time()
for i in range(0, 50):
text = ''.join([str(uuid.uuid4()) for _ in range(0, 10000)])
text_encoded = text.encode('utf8')
text_compressed = zlib.compress(text_encoded)
zlib.decompress(text_compressed)
end = time.time()
delta = round(end - start, 2)
print(f"Execution time: {delta}s")
if __name__ == '__main__':
busy_loop()
|
496308
|
import numpy as np
from scipy import special as sp
def jacobi_recurrence(N, alpha=0., beta=0., probability=True):
r"""
Compute the recursion coefficients of Jacobi polynomials which are
orthonormal with respect to the Beta random variables
Parameters
----------
alpha : float
The first parameter of the Jacobi polynomials. For the Beta
distribution with parameters :math:`\hat{\alpha},\hat{\beta}` we have
:math:`\alpha=\hat{\beta}-1`
beta : float
The second parameter of the Jacobi polynomials
For the Beta distribution
with parameters :math:`\hat{\alpha},\hat{\beta}` we have
:math:`\beta=\hat{\alpha}-1`
Returns
-------
ab : np.ndarray (Nterms,2)
The recursion coefficients of the Nterms orthonormal polynomials
"""
if N < 1:
return np.ones((0, 2))
ab = np.ones((N, 2)) * np.array([beta**2. - alpha**2., 1.])
# Special cases
ab[0, 0] = (beta - alpha) / (alpha + beta + 2.)
ab[0, 1] = np.exp((alpha + beta + 1.) * np.log(2.) +
sp.gammaln(alpha + 1.) + sp.gammaln(beta + 1.) -
sp.gammaln(alpha + beta + 2.)
)
if N > 1:
ab[1, 0] /= (2. + alpha + beta) * (4. + alpha + beta)
ab[1, 1] = 4. * (alpha + 1.) * (beta + 1.) / (
(alpha + beta + 2.)**2 * (alpha + beta + 3.))
inds = np.arange(2., N)
ab[2:, 0] /= (2. * inds + alpha + beta) * (2 * inds + alpha + beta + 2.)
ab[2:, 1] = 4 * inds * (inds + alpha) * \
(inds + beta) * (inds + alpha + beta)
ab[2:, 1] /= (2. * inds + alpha + beta)**2 * \
(2. * inds + alpha + beta + 1.) * (2. * inds + alpha + beta - 1)
ab[:, 1] = np.sqrt(ab[:, 1])
if probability:
ab[0, 1] = 1.
return ab
def hermite_recurrence(Nterms, rho=0., probability=True):
r"""
Compute the recursion coefficients of for the Hermite
polynomial family.
.. math:: x^{2\rho}\exp(-x^2)
Parameters
----------
rho : float
The parameter of the hermite polynomials. The special case of
:math:`\rho=0` and probability=True returns the probablists
Hermite polynomial
Returns
-------
ab : np.ndarray (Nterms,2)
The recursion coefficients of the Nterms orthonormal polynomials
"""
if Nterms < 1:
return np.ones((0, 2))
ab = np.zeros((Nterms, 2))
ab[0, 1] = sp.gamma(rho+0.5) # = np.sqrt(np.pi) for rho=0
if rho == 0 and probability:
ab[1:, 1] = np.arange(1., Nterms)
else:
ab[1:, 1] = 0.5*np.arange(1., Nterms)
ab[np.arange(Nterms) % 2 == 1, 1] += rho
ab[:, 1] = np.sqrt(ab[:, 1])
if probability:
ab[0, 1] = 1.
return ab
def get_jacobi_recursion_coefficients(poly_name, opts, num_coefs):
"""
Get the recursion coefficients of a Jacobi polynomial.
Parameters
----------
opts : dictionary
Dictionary with the following attributes
alpha_poly : float
The alpha parameter of the jacobi polynomial. Only used and required
if poly_name is not None
beta_poly : float
The beta parameter of the jacobi polynomial. Only used and required
if poly_name is not None
shapes : dictionary
Shape parameters of the Beta distribution. shapes["a"] is the
a parameter of the Beta distribution and shapes["b"] is the
b parameter of the Beta distribution.
The parameter of the Jacobi polynomial are determined using the
following relationships: alpha_poly = b-1, beta_poly = a-1.
This option is not required or ignored when poly_name is not None
Returns
-------
recursion_coeffs : np.ndarray (num_coefs, 2)
"""
if poly_name is not None:
alpha_poly, beta_poly = opts["alpha_poly"], opts["beta_poly"]
else:
alpha_poly, beta_poly = opts["shapes"]["b"] - \
1, opts["shapes"]["a"]-1
return jacobi_recurrence(
num_coefs, alpha=alpha_poly, beta=beta_poly, probability=True)
def charlier_recurrence(N, a):
r"""
Compute the recursion coefficients of the polynomials which are
orthonormal with respect to the Poisson distribution.
Parameters
----------
N : integer
The number of polynomial terms requested
a: float
The rate parameter of the Poisson distribution
Returns
-------
ab : np.ndarray (N,2)
The recursion coefficients of the N orthonormal polynomials
Notes
-----
Note as rate gets smaller the number of terms that can be accurately
computed will decrease because the problem gets more ill conditioned.
This is caused because the number of masses with significant weights
gets smaller as rate does
"""
if N < 1:
return np.ones((0, 2))
ab = np.zeros((N+1, 2))
ab[0, 0] = a
ab[0, 1] = 1
for i in range(1, N+1):
ab[i, 0] = a + i
ab[i, 1] = a * i
# orthonormal
ab[:, 1] = np.sqrt(ab[:, 1])
return ab
def krawtchouk_recurrence(Nterms, Ntrials, p):
r"""
Compute the recursion coefficients of the polynomials which are
orthonormal with respect to the binomial probability mass function
.. math:: {N \choose k} p^k (1-p)^{(n-k)}
which is the probability of k successes from N trials.
Parameters
----------
Nterms : integer
The number of polynomial terms requested
Ntrials : integer
The number of trials
p : float
The probability of success :math:`p\in(0,1)`
Returns
-------
ab : np.ndarray (Nterms,2)
The recursion coefficients of the Nterms orthonormal polynomials
"""
assert(Nterms <= Ntrials)
assert p > 0 and p < 1
if Nterms < 1:
return np.ones((0, 2))
ab = np.array(
[[p*(Ntrials-n)+n*(1-p), p*(1-p)*n*(Ntrials-n+1)]
for n in range(Nterms)])
ab[:, 1] = np.sqrt(ab[:, 1])
ab[0, 1] = 1.0
# the probability flag does not apply here
# (beta0 comes out 0 in the three term recurrence), instead we set it
# to 1, the norm of the p0 polynomial
return ab
def hahn_recurrence(Nterms, N, alphaPoly, betaPoly):
r"""
Compute the recursion coefficients of the polynomials which are
orthonormal with respect to the hypergeometric probability mass function
.. math:: w(x)=\frac{{n \choose x}{M-n \choose N-x}}{{ M \choose N}}.
for
.. math:: \max(0, M-(M-n)) \le x \le \min(n, N)
which describes the probability of x successes in N draws, without
replacement, from a finite population of size M that contains exactly
n successes.
Parameters
----------
Nterms : integer
The number of polynomial terms requested
N : integer
The number of draws
alphaPoly : integer
:math:`-n+1`
betPoly : integer
:math:`-M-1+n`
Returns
-------
ab : np.ndarray (Nterms,2)
The recursion coefficients of the Nterms orthonormal polynomials
"""
assert(Nterms <= N)
if Nterms < 1:
return np.ones((0, 2))
An = np.zeros(Nterms)
Cn = np.zeros(Nterms)
for n in range(Nterms):
numA = (alphaPoly+n+1) * (N-n) * (n+alphaPoly+betaPoly+1)
numC = n * (betaPoly+n) * (N+alphaPoly+betaPoly+n+1)
denA = (alphaPoly+betaPoly+2*n+1) * (alphaPoly+betaPoly+2*n+2)
denC = (alphaPoly+betaPoly+2*n+1) * (alphaPoly+betaPoly+2*n)
An[n] = numA / denA
Cn[n] = numC / denC
if Nterms == 1:
return np.array([[An[0]+Cn[0], 1]])
ab = np.array(
[[An[0]+Cn[0], 1]]+[[An[n]+Cn[n], An[n-1]*Cn[n]]
for n in range(1, Nterms)])
ab[:, 1] = np.sqrt(ab[:, 1])
ab[0, 1] = 1.0
return ab
def discrete_chebyshev_recurrence(N, Ntrials):
r"""
Compute the recursion coefficients of the polynomials which are
orthonormal with respect to the probability measure
.. math:: w(x) = \frac{\delta_i(x)}{M}
where :math:`\delta_i(x)` is the dirac delta function which is one when
:math:`x=i`, for :math:`i=1,\ldots,M` and zero otherwise
Parameters
----------
N : integer
The number of polynomial terms requested
Ntrials : integer
The number of probability masses (M)
Returns
-------
ab : np.ndarray (N,2)
The recursion coefficients of the N orthonormal polynomials
"""
assert(N <= Ntrials)
if N < 1:
return np.ones((0, 2))
ab = np.zeros((N, 2))
ab[:, 0] = 0.5 * Ntrials * (1. - 1./Ntrials)
ab[0, 1] = Ntrials
for i in range(1, N):
ab[i, 1] = 0.25 * Ntrials**2 * (1-(i * 1./Ntrials)**2)/(4-1./i**2)
ab[:, 1] = np.sqrt(ab[:, 1])
ab[0, 1] = 1.0
return ab
def convert_orthonormal_recurence_to_three_term_recurence(recursion_coefs):
r"""
Convert two term recursion coefficients
.. math:: b_{n+1} p_{n+1} = (x - a_n) p_n - \sqrt{b_n} p_{n-1}
into the equivalent
three recursion coefficients
.. math:: p_{n+1} = \tilde{a}_{n+1}x - \tilde{b_n}p_n - \tilde{c}_n p_{n-1}
Parameters
----------
recursion_coefs : np.ndarray (num_recursion_coeffs,2)
The two term recursion coefficients
:math:`a_n,b_n`
Returns
-------
abc : np.ndarray (num_recursion_coeffs,3)
The three term recursion coefficients
:math:`\tilde{a}_n,\tilde{b}_n,\tilde{c}_n`
"""
num_terms = recursion_coefs.shape[0]
abc = np.zeros((num_terms, 3))
abc[:, 0] = 1./recursion_coefs[:, 1]
abc[1:, 1] = recursion_coefs[:-1, 0]/recursion_coefs[1:, 1]
abc[1:, 2] = recursion_coefs[:-1, 1]/recursion_coefs[1:, 1]
return abc
|
496328
|
import ray
import numpy as np
ray.init(num_cpus=8, object_store_memory=4e9)
@ray.remote
def consume(name, split):
i = 0
for row in split.iter_rows():
i += 1
def gen(i):
return {
"a": 4.0,
"b": "hello" * 1000,
"c": np.zeros(25000),
}
ds = ray.data.range(10000).map(gen)
print(ds, ds.size_bytes())
pipeline = ds.repeat(100).random_shuffle()
print(pipeline)
a, b, c, d, e = pipeline.split(5, equal=True)
x1 = consume.remote("consumer A", a)
x2 = consume.remote("consumer B", b)
x3 = consume.remote("consumer C", c)
x4 = consume.remote("consumer D", d)
x5 = consume.remote("consumer E", e)
ray.get([x1, x2, x3, x4, x5])
|
496407
|
from typing import Union, List, Dict, Any, Sequence, Iterable, Optional
import re
reCommaWhitespacePotentiallyBreaks = re.compile(r",\s+")
def dictString(d: Dict, brackets: Optional[str] = None):
s = ', '.join([f'{k}={toString(v)}' for k, v in d.items()])
if brackets is not None:
return brackets[:1] + s + brackets[-1:]
else:
return s
def listString(l: Iterable[Any], brackets="[]"):
return brackets[:1] + ", ".join((toString(x) for x in l)) + brackets[-1:]
def toString(x):
if type(x) == list:
return listString(x)
elif type(x) == tuple:
return listString(x, brackets="()")
elif type(x) == dict:
return dictString(x, brackets="{}")
else:
s = str(x)
s = reCommaWhitespacePotentiallyBreaks.sub(", ", s) # remove any unwanted line breaks and indentation after commas (as generated, for example, by sklearn objects)
return s
def objectRepr(obj, memberNamesOrDict: Union[List[str], Dict[str, Any]]):
if type(memberNamesOrDict) == dict:
membersDict = memberNamesOrDict
else:
membersDict = {m: toString(getattr(obj, m)) for m in memberNamesOrDict}
return f"{obj.__class__.__name__}[{dictString(membersDict)}]"
def orRegexGroup(allowedNames: Sequence[str]):
"""
:param allowedNames: strings to include as literals in the regex
:return: raw string of the type (<name1>| ...|<nameN>), where special characters in the names have been escaped
"""
allowedNames = [re.escape(name) for name in allowedNames]
return r"(%s)" % "|".join(allowedNames)
class ToStringMixin:
"""
Provides default implementations for __str__ and __repr__ which contain all attribute names and their values. The
latter also contains the object id.
"""
def _toStringClassName(self):
return type(self).__qualname__
def _toStringProperties(self, exclude: Optional[Union[str, Iterable[str]]] = None, include: Optional[Union[str, Iterable[str]]] = None,
excludeExceptions: Optional[List[str]] = None, includeForced: Optional[List[str]] = None,
additionalEntries: Dict[str, Any] = None) -> str:
"""
Creates a string of the class attributes, with optional exclusions/inclusions/additions.
Exclusions take precedence over inclusions.
:param exclude: attributes to be excluded
:param include: attributes to be included; if None/empty, include all that are not excluded
:param additionalEntries: additional key-value-pairs which are added to the string just like the other attributes
:return: a string containing attribute names and values
"""
def mklist(x):
if x is None:
return []
if type(x) == str:
return [x]
return x
exclude = mklist(exclude)
include = mklist(include)
includeForced = mklist(includeForced)
excludeExceptions = mklist(excludeExceptions)
def isExcluded(k):
if k in includeForced or k in excludeExceptions:
return False
if k in exclude:
return True
if self._toStringExcludePrivate():
isPrivate = k.startswith("_")
return isPrivate
else:
return False
# determine relevant attribute dictionary
if len(include) == 0: # exclude semantics (include everything by default)
attributeDict = self.__dict__
else: # include semantics (include only inclusions)
attributeDict = {k: getattr(self, k) for k in set(include + includeForced) if hasattr(self, k)}
# apply exclusions and remove underscores from attribute names
d = {k.strip("_"): v for k, v in attributeDict.items() if not isExcluded(k)}
if additionalEntries is not None:
d.update(additionalEntries)
return dictString(d)
def _toStringObjectInfo(self) -> str:
"""
Creates a string containing information on the object instance which is to appear between the square brackets in the string
representation, i.e. if the class name is Foo, then it is the asterisk in "Foo[*]".
By default will make use of all the exclusions/inclusions that are specified by other member functions.
This method can be overwritten by sub-classes to provide a custom string.
:return: a string containing the desired content
"""
return self._toStringProperties(exclude=self._toStringExcludes(), include=self._toStringIncludes(),
excludeExceptions=self._toStringExcludeExceptions(), includeForced=self._toStringIncludesForced(),
additionalEntries=self._toStringAdditionalEntries())
def _toStringExcludes(self) -> List[str]:
"""
Makes the string representation exclude the returned attributes.
Returns a list of attribute names to be excluded from __str__ and __repr__. This method can be overwritten by
sub-classes which can call super and extend the list returned.
This method will only have no effect if _toStringObjectInfo is overridden to not use its result.
:return: a list of attribute names
"""
return []
def _toStringIncludes(self) -> List[str]:
"""
Makes the string representation include only the returned attributes (i.e. introduces inclusion semantics) - except
if the list is empty, in which case all attributes are included by default.
To add an included attribute in a sub-class, regardless of any super-classes using exclusion or inclusion semantics,
use _toStringIncludesForced instead.
This method can be overwritten by sub-classes which can call super and extend the list.
This method will only have no effect if _toStringObjectInfo is overridden to not use its result.
:return: a list of attribute names; if empty, include all attributes (except the ones being excluded according to other methods)
"""
return []
def _toStringIncludesForced(self) -> List[str]:
"""
Defines a list of attribute names that are required to be present in the string representation, regardless of the
instance using include semantics or exclude semantics, thus facilitating added inclusions in sub-classes.
:return: a list of attribute names
"""
return []
def _toStringAdditionalEntries(self) -> Dict[str, Any]:
return {}
def _toStringExcludePrivate(self) -> bool:
"""
:return: whether to exclude properties that are private, i.e. start with an underscore; explicitly included attributes
will still be considered
"""
return False
def _toStringExcludeExceptions(self) -> List[str]:
"""
Defines attribute names which should not be excluded even though other rules (e.g. the exclusion of private members
via _toStringExcludePrivate) would otherwise exclude them.
:return: a list of attribute names
"""
return []
def __str__(self):
return f"{self._toStringClassName()}[{self._toStringObjectInfo()}]"
def __repr__(self):
info = f"id={id(self)}"
propertyInfo = self._toStringObjectInfo()
if len(propertyInfo) > 0:
info += ", " + propertyInfo
return f"{self._toStringClassName()}[{info}]"
def prettyStringRepr(s: Any, initialIndentationLevel=0, indentationString=" "):
"""
Creates a pretty string representation (using indentations) from the given object/string representation (as generated, for example, via
ToStringMixin). An indentation level is added for every opening bracket.
:param s: an object or object string representation
:param initialIndentationLevel: the initial indentation level
:param indentationString: the string which corresponds to a single indentation level
:return: a reformatted version of the input string with added indentations and line break
"""
if type(s) != str:
s = str(s)
indent = initialIndentationLevel
result = indentationString * indent
i = 0
def nl():
nonlocal result
result += "\n" + (indentationString * indent)
def take(cnt=1):
nonlocal result, i
result += s[i:i+cnt]
i += cnt
def findMatching(j):
start = j
op = s[j]
cl = {"[": "]", "(": ")", "'": "'"}[s[j]]
isBracket = cl != s[j]
stack = 0
while j < len(s):
if s[j] == op and (isBracket or j == start):
stack += 1
elif s[j] == cl:
stack -= 1
if stack == 0:
return j
j += 1
return None
brackets = "[("
quotes = "'"
while i < len(s):
isBracket = s[i] in brackets
isQuote = s[i] in quotes
if isBracket or isQuote:
iMatch = findMatching(i)
takeFullMatchWithoutBreak = False
if iMatch is not None:
k = iMatch + 1
takeFullMatchWithoutBreak = not isBracket or (k-i <= 60 and not("=" in s[i:k] and "," in s[i:k]))
if takeFullMatchWithoutBreak:
take(k-i)
if not takeFullMatchWithoutBreak:
take(1)
indent += 1
nl()
elif s[i] in "])":
take(1)
indent -= 1
elif s[i:i+2] == ", ":
take(2)
nl()
else:
take(1)
return result
|
496420
|
from utils.header import MagicField, Field
from load_command import LoadCommandHeader, LoadCommandCommand
class SubClientCommand(LoadCommandHeader):
ENDIAN = None
FIELDS = (
MagicField('cmd', 'I', {LoadCommandCommand.COMMANDS['LC_SUB_CLIENT']: 'LC_SUB_CLIENT'}),
Field('cmdsize', 'I'),
Field('client_offset', 'I'),
)
def __init__(self, bytes_=None, **kwargs):
self.client_offset = None
super(SubClientCommand, self).__init__(bytes_, **kwargs)
|
496443
|
import flask_login
import logging
from flask import jsonify, request
from server import app, user_db
from server.auth import user_mediacloud_client, user_name, user_admin_mediacloud_client,\
user_is_admin
from server.util.request import form_fields_required, arguments_required, api_error_handler
logger = logging.getLogger(__name__)
@app.route('/api/topics/search', methods=['GET'])
@flask_login.login_required
@arguments_required('searchStr')
@api_error_handler
def topic_search():
search_str = request.args['searchStr']
mode = request.args['mode'] if 'mode' in request.args else 'list'
user_mc = user_admin_mediacloud_client()
results = user_mc.topicList(name=search_str, limit=50)
if mode == 'full':
matching_topics = results['topics']
else:
# matching_topics = [{'name': x['name'], 'id': x['topics_id']} for x in results['topics']]
matching_topics = results['topics']
return jsonify({'topics': matching_topics})
@app.route('/api/topics/admin/list', methods=['GET'])
@flask_login.login_required
@api_error_handler
def topic_admin_list():
user_mc = user_admin_mediacloud_client()
# if a non-admin user calls this, using user_mc grantees this won't be a security hole
# but for admins this will return ALL topics
topics = user_mc.topicList(limit=500)['topics']
# we also want snapshot info
# topics = _add_snapshots_info_to_topics(topics)
topics = sorted(topics, key=lambda t: t['topics_id'], reverse=True)
return jsonify(topics)
@app.route('/api/topics/favorites', methods=['GET'])
@flask_login.login_required
@api_error_handler
def topic_favorites():
user_mc = user_mediacloud_client()
favorite_topic_ids = user_db.get_users_lists(user_name(), 'favoriteTopics')
favorited_topics = [user_mc.topic(tid) for tid in favorite_topic_ids]
for t in favorited_topics:
t['isFavorite'] = True
return jsonify({'topics': favorited_topics})
@app.route('/api/topics/queued-and-running', methods=['GET'])
@flask_login.login_required
@api_error_handler
def does_user_have_a_running_topic():
# save a costly set of paging queries when the user is admin
if user_is_admin():
return jsonify([])
# non-admin, so do the real check
user_mc = user_mediacloud_client()
queued_and_running_topics = []
more_topics = True
link_id = None
while more_topics:
results = user_mc.topicList(link_id=link_id, limit=100)
topics = results['topics']
queued_and_running_topics += [t for t in topics if t['state'] in ['running', 'queued']
and t['user_permission'] in ['admin']]
more_topics = 'next' in results['link_ids']
if more_topics:
link_id = results['link_ids']['next']
return jsonify(queued_and_running_topics)
def topics_user_can_access(topics, user_email, is_admin):
# we can't see all the permissions for a topic in topicList results, so we have to use some guesses here.
# pull out just the topics this user has permissions for (ie. remove public ones they don't own)
user_topics = []
for t in topics:
user_is_owner = user_email in [o['email'] for o in t['owners']]
# admins can see all topics, so to make this more manageable only show admins ones they own
ok_to_show = user_is_owner if is_admin else user_is_owner or (not t['is_public'])
if ok_to_show:
user_topics.append(t)
return user_topics
@app.route('/api/topics/personal', methods=['GET'])
@flask_login.login_required
@api_error_handler
def topic_personal():
user_mc = user_mediacloud_client()
link_id = request.args.get('linkId')
results = user_mc.topicList(link_id=link_id, limit=1000)
user_accessible_topics = topics_user_can_access(results['topics'], flask_login.current_user.profile['email'],
user_is_admin())
# update this in place so the results['link_ids'] don't change (for paging support)
results['topics'] = add_user_favorite_flag_to_topics(user_accessible_topics)
return jsonify(results)
@app.route('/api/topics/<topics_id>/favorite', methods=['PUT'])
@flask_login.login_required
@form_fields_required('favorite')
@api_error_handler
def topic_set_favorited(topics_id):
favorite = int(request.form["favorite"])
username = user_name()
if favorite == 1:
user_db.add_item_to_users_list(username, 'favoriteTopics', int(topics_id))
else:
user_db.remove_item_from_users_list(username, 'favoriteTopics', int(topics_id))
return jsonify({'isFavorite': favorite == 1})
def add_user_favorite_flag_to_topics(topics):
user_favorited = user_db.get_users_lists(user_name(), 'favoriteTopics')
for t in topics:
t['isFavorite'] = t['topics_id'] in user_favorited
return topics
|
496456
|
import vstruct
from vstruct.primitives import *
class BITMAPINFOHEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.biSize = v_uint32()
self.biWidth = v_int32()
self.biHeight = v_int32()
self.biPlanes = v_uint16()
self.biBitCount = v_uint16()
self.biCompression = v_uint32()
self.biSizeImage = v_uint32()
self.biXPelsPerMeter = v_int32()
self.biYPelsPerMeter = v_int32()
self.biClrUser = v_uint32()
self.biClrImportant = v_uint32()
|
496459
|
from typing import Union, Tuple, Any
import numpy as np
# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and
# `np.bytes_` are already subclasses of their builtin counterpart
_CharLike_co = Union[str, bytes]
# The 6 `<X>Like_co` type-aliases below represent all scalars that can be
# coerced into `<X>` (with the casting rule `same_kind`)
_BoolLike_co = Union[bool, np.bool_]
_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger]
_IntLike_co = Union[_BoolLike_co, int, np.integer]
_FloatLike_co = Union[_IntLike_co, float, np.floating]
_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating]
_TD64Like_co = Union[_IntLike_co, np.timedelta64]
_NumberLike_co = Union[int, float, complex, np.number, np.bool_]
_ScalarLike_co = Union[
int,
float,
complex,
str,
bytes,
np.generic,
]
# `_VoidLike_co` is technically not a scalar, but it's close enough
_VoidLike_co = Union[Tuple[Any, ...], np.void]
|
496462
|
import ssz
from eth2.beacon.types.voluntary_exits import SignedVoluntaryExit, VoluntaryExit
def test_defaults(sample_voluntary_exit_params, sample_signature):
exit = VoluntaryExit.create(**sample_voluntary_exit_params)
signed_exit = SignedVoluntaryExit.create(message=exit, signature=sample_signature)
assert exit.epoch == sample_voluntary_exit_params["epoch"]
assert ssz.encode(exit)
assert ssz.encode(signed_exit)
|
496491
|
from setuptools import setup
from os import path
HERE = path.abspath(path.dirname(__file__))
def readme():
with open(path.join(HERE, 'README.rst')) as f:
return f.read()
setup(
name='choix',
version='0.3.3',
author='<NAME>',
author_email='<EMAIL>',
description="Inference algorithms for models based on Luce's choice axiom.",
long_description=readme(),
url='https://github.com/lucasmaystre/choix',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='statistics ml bradley terry plackett luce choice comparison ranking',
packages=['choix'],
install_requires=[
'numpy',
'scipy',
],
setup_requires=['pytest-runner'],
tests_require=[
'pytest',
'networkx',
],
include_package_data=True,
zip_safe=False,
)
|
496500
|
from rest_framework.pagination import LimitOffsetPagination
class LimitOffsetPaginationWithMaxLimit(LimitOffsetPagination):
max_limit = 10
|
496578
|
def initialize_project():
print("Importing necessary methods...")
from utils.create_proto import create_proto_files
from utils.import_fixer import convert_to_relative_imports
print("Creating proto files...")
create_proto_files()
convert_to_relative_imports()
if __name__ == "__main__":
print("Initialising project...")
initialize_project()
|
496584
|
from server.user.interface import User
from server.context import Context
from django.conf import settings
from .models import Page
def frontend(request):
return {
'page': Page.get(),
'user_': (
User.get(Context.from_request(request), request.user.pk)
if request.user.is_authenticated else None
),
'groups': User.groups,
'debug': settings.DEBUG,
'no_board_groups': User.no_board_groups,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.