id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
312542
|
<gh_stars>0
# Copyright AllSeen Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import common
import validate
import memberdef
class FieldDef(memberdef.MemberDef):
"""Contains the description of a structure field"""
def __init__(self, name = None, arg_type = None):
"""Initialize an instance of the FieldDef class"""
memberdef.MemberDef.__init__(self, name, arg_type)
return
def parse(self, xml, lax_naming):
"""Parse the given field xml element"""
#print("Parsing Field '{0}'".format(xml.get('name')))
self.name = xml.get('name')
self.arg_type = xml.get('type')
validate.data_signature(self.arg_type, xml)
|
StarcoderdataPython
|
8072659
|
import pytest
from tri_declarative import (
class_shortcut,
evaluate,
evaluate_recursive,
evaluate_recursive_strict,
evaluate_strict,
filter_show_recursive,
matches,
Namespace,
remove_show_recursive,
Shortcut,
should_show,
)
from tri_declarative.evaluate import (
get_callable_description,
get_signature,
)
def test_evaluate_recursive():
foo = {
'foo': {'foo': lambda x: x * 2},
'bar': [{'foo': lambda x: x * 2}],
'baz': {lambda x: x * 2},
'boo': 17
}
assert evaluate_recursive(foo, x=2) == {
'foo': {'foo': 4},
'bar': [{'foo': 4}],
'baz': {4},
'boo': 17,
}
def test_remove_and_filter_show_recursive():
class Foo:
show = False
assert remove_show_recursive(filter_show_recursive({
'foo': [Foo(), {'show': False}, {'bar'}, {}, {'show': True}],
'bar': {'show': False},
'baz': Foo(),
'asd': {Foo(), 'bar'},
'qwe': {'show': True},
'quux': {'show': None},
})) == ({
'foo': [{'bar'}, {}, {}],
'asd': {'bar'},
'qwe': {},
})
def test_should_show():
class Foo:
show = False
assert should_show(Foo()) is False
assert should_show(Foo) is False
assert should_show(dict(show=False)) is False
assert should_show(dict(show=True)) is True
assert should_show(dict(show=[])) == []
with pytest.raises(AssertionError) as e:
assert should_show(dict(show=lambda: True))
assert str(e.value) == '`show` was a callable. You probably forgot to evaluate it. The callable was: lambda found at: `assert should_show(dict(show=lambda: True))`'
def test_no_evaluate_kwargs_mismatch():
def f(x):
return x * 2
assert evaluate(f) is f
assert evaluate(f, y=1) is f
def test_get_signature():
# noinspection PyUnusedLocal
def f(a, b):
pass
# noinspection PyUnusedLocal
def f2(b, a):
pass
assert get_signature(lambda a, b: None) == get_signature(f2) == get_signature(f) == 'a,b||'
# noinspection PyUnresolvedReferences
assert f.__tri_declarative_signature == 'a,b||'
def test_get_signature_fails_on_native():
# isinstance will return False for a native function. A string will also return False.
f = 'this is not a function'
assert get_signature(f) is None
def test_get_signature_on_class():
class Foo:
# noinspection PyUnusedLocal
def __init__(self, a, b):
pass
assert 'a,b,self||' == get_signature(Foo)
# noinspection PyUnresolvedReferences
assert 'a,b,self||' == Foo.__tri_declarative_signature
def test_get_signature_varargs():
assert get_signature(lambda a, b, **c: None) == "a,b||*"
def test_evaluate_subset_parameters():
def f(x, **_):
return x
assert evaluate(f, x=17, y=42) == 17
def test_match_caching():
assert matches("a,b", "a,b||")
assert matches("a,b", "a||*")
assert not matches("a,b", "c||*")
assert matches("a,b", "a||*")
assert not matches("a,b", "c||*")
def test_get_signature_description():
assert get_signature(lambda a, b: None) == 'a,b||'
assert get_signature(lambda a, b, c, d=None, e=None: None) == 'a,b,c|d,e|'
assert get_signature(lambda d, c, b=None, a=None: None) == 'c,d|a,b|'
assert get_signature(lambda a, b, c=None, d=None, **_: None) == 'a,b|c,d|*'
assert get_signature(lambda d, c, b=None, a=None, **_: None) == 'c,d|a,b|*'
assert get_signature(lambda **_: None) == '||*'
def test_match_optionals():
assert matches("a,b", "a,b||")
assert matches("a,b", "a,b|c|")
assert matches("a,b,c", "a,b|c|")
assert matches("a,b,c", "a,b|c,d|")
assert matches("a,b", "a,b|c|*")
assert not matches("a,b,d", "a,b|c|")
assert matches("a,b,d", "a,b|c|*")
assert matches("", "||")
assert not matches("a", "||")
def test_match_special_case():
assert not matches("", "||*")
assert not matches("a,b,c", "||*")
def test_evaluate_extra_kwargs_with_defaults():
# noinspection PyUnusedLocal
def f(x, y=17):
return x
assert evaluate(f, x=17) == 17
def test_evaluate_on_methods():
class Foo:
# noinspection PyMethodMayBeStatic
def bar(self, x):
return x
@staticmethod
def baz(x):
return x
assert evaluate(Foo().bar, x=17) == 17
assert evaluate(Foo().baz, x=17) == 17
f = Foo().bar
assert evaluate(f, y=17) is f
def test_early_return_from_get_signature():
# noinspection PyUnusedLocal
def foo(a, b, c):
pass
object.__setattr__(foo, '__tri_declarative_signature', 'foobar')
assert get_signature(foo) == 'foobar'
def test_evaluate_strict():
with pytest.raises(AssertionError) as e:
evaluate_strict(lambda foo: 1, bar=2, baz=4)
assert str(e.value) == "Evaluating lambda found at: `evaluate_strict(lambda foo: 1, bar=2, baz=4)` didn't resolve it into a value but strict mode was active, the signature doesn't match the given parameters. Note that you must match at least one keyword argument. We had these arguments: bar, baz"
def test_evaluate_recursive_strict():
with pytest.raises(AssertionError) as e:
evaluate_recursive_strict(dict(foo=lambda foo: 1), bar=2, baz=4)
assert str(e.value) == "Evaluating lambda found at: `evaluate_recursive_strict(dict(foo=lambda foo: 1), bar=2, baz=4)` didn't resolve it into a value but strict mode was active, the signature doesn't match the given parameters. Note that you must match at least one keyword argument. We had these arguments: bar, baz"
def test_non_strict_evaluate():
def foo(bar):
return bar
assert evaluate(foo, bar=True) is True # first the evaluated case
assert evaluate(foo, quuz=True) is foo # now we missed the signature, so we get the function unevaluated back
def test_get_callable_description():
# noinspection PyUnusedLocal
def foo(a, b, c, *, bar, **kwargs):
pass
description = get_callable_description(foo)
assert description.startswith('`<function test_get_callable_description.<locals>.foo at')
assert description.endswith('`')
def test_get_callable_description_nested_lambda():
foo = Namespace(bar=lambda x: x)
description = get_callable_description(foo)
assert description.startswith('`Namespace(bar=<function test_get_callable_description_nested_lambda.<locals>.<lambda> at')
assert description.endswith('`')
def test_get_signature_on_namespace_does_not_modify_its_contents():
foo = Namespace()
get_signature(foo)
assert str(foo) == 'Namespace()'
def test_shortcut_chaining():
def endpoint(**kwargs):
return kwargs
foo = Shortcut(
call_target=endpoint,
tag='foo',
)
bar = Shortcut(
call_target=foo,
bar=1,
# these two will get popped off by Namespace.__call__, let's make sure they are!
call_target__cls='randomcrap',
call_target__attribute='randomcrap',
)
assert bar() == dict(tag='foo', bar=1)
def test_class_shortcut__shortcut_stack():
class MyFoo:
@classmethod
@class_shortcut
def shortcut(cls, call_target):
return call_target()
@classmethod
@class_shortcut(
call_target__attribute='shortcut'
)
def shortcut2(cls, call_target, **kwargs):
return call_target(**kwargs)
middle = Shortcut(call_target=MyFoo.shortcut2)
class MyOtherFoo(MyFoo):
@classmethod
@class_shortcut(
call_target=middle
)
def shortcut3(cls, call_target, **kwargs):
return call_target(**kwargs)
assert MyOtherFoo().shortcut2().__tri_declarative_shortcut_stack == ['shortcut2', 'shortcut']
assert MyOtherFoo().shortcut3().__tri_declarative_shortcut_stack == ['shortcut3', 'shortcut2', 'shortcut']
|
StarcoderdataPython
|
9699189
|
<reponame>andrewinsoul/myDiary-python
from django.test import TestCase
class InitialTest3(TestCase):
def test_one(self):
self.assertEqual(2-2, 0)
|
StarcoderdataPython
|
313960
|
import os
datapath = os.path.join(os.path.dirname(__file__), 'plots')
if not os.path.exists(datapath):
os.makedirs(datapath)
def get_plot_path(filename):
return os.path.join(datapath, filename)
|
StarcoderdataPython
|
11223587
|
"""The BRMFlask Sitemap Blueprint."""
from flask import Blueprint
sitemap = Blueprint(
'sitemap',
__name__,
template_folder='templates',
static_folder='static'
)
from . import views
|
StarcoderdataPython
|
1749547
|
import json
import numpy as np
import os
import pytest
from numpy_encoder import NumpyEncoder, ndarray_hook
test_vector = np.array([1.0, 2.0, 3.0])
test_mat = np.eye(3)
here = os.path.dirname(os.path.realpath(__file__))
def write_encoded(obj, tdir):
fp = tdir.mkdir("sub").join("obj.json")
fp.write(obj)
return fp
def read_encoded(fp, hook=None):
with open(fp) as f:
return json.load(f, object_hook=hook)
@pytest.mark.parametrize(
"np_dtype",
[
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float_,
np.float16,
np.float32,
np.float64,
],
)
def test_number_cast(np_dtype, tmpdir):
num = 42
num_cast = np_dtype(num)
encoded = json.dumps(num_cast, cls=NumpyEncoder)
fp = write_encoded(encoded, tmpdir)
rw_num = read_encoded(fp)
rw_num = np_dtype(rw_num)
assert rw_num == num_cast, num_cast
@pytest.mark.parametrize(
"np_dtype",
[
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float_,
np.float16,
np.float32,
np.float64,
],
)
def test_array(np_dtype, tmpdir):
test_cast = np_dtype(test_mat)
encoded = json.dumps(test_cast, cls=NumpyEncoder)
fp = write_encoded(encoded, tmpdir)
mat = read_encoded(fp, hook=ndarray_hook)
assert mat.dtype == test_cast.dtype
assert mat.shape == test_cast.shape
assert np.allclose(test_cast, mat)
@pytest.mark.parametrize(
"metadata",
["D", "ms"],
)
def test_single_date(tmpdir, metadata):
date_in = np.datetime64("2020-04", metadata)
encoded = json.dumps(date_in, cls=NumpyEncoder)
fp = write_encoded(encoded, tmpdir)
date_out = np.datetime64(read_encoded(fp, hook=None))
assert date_in == date_out
assert isinstance(date_out, np.datetime64)
assert str(date_in.dtype) == str(date_out.dtype)
def test_date_array(tmpdir):
date_array = np.arange("2020-01-01", "2020-02-01", dtype=np.datetime64)
encoded = json.dumps(date_array, cls=NumpyEncoder)
fp = write_encoded(encoded, tmpdir)
dates_out = read_encoded(fp, hook=ndarray_hook)
assert (dates_out == date_array).all()
def test_array_as_list():
with open(os.path.join(here, "ndarray_as_list.json")) as fp:
float_array = np.array(json.load(fp, object_hook=ndarray_hook))
assert isinstance(float_array, np.ndarray)
|
StarcoderdataPython
|
187323
|
#!/usr/bin/python
'''
htsint
config file
'''
__author__ = "<NAME>"
import os,csv,re,ast
from .version import __version__
defaultCONFIG = {'data':'/usr/local/share/htsint',
'dbname':"",
'dbuser':"",
'dbpass':"",
'dbhost':"localhost",
'dbport':"5433",
'taxa': ['3702','4932','5476','7227','7955','8355','8364','9031','9606',\
'10090','10566','10116','28377']
}
class Configure(object):
"""
class to handle the database config
"""
def __init__(self):
"""
constructor
"""
self.logFileDir = os.path.join(os.path.expanduser('~'),".htsint")
if os.path.exists(self.logFileDir) == True and os.path.isdir(self.logFileDir) == False:
os.remove(self.logFileDir)
if os.path.isdir(self.logFileDir) == False:
os.mkdir(self.logFileDir)
self.logFilePath = os.path.join(self.logFileDir,"dbconfig.log")
if os.path.exists(self.logFilePath) == False:
fid = open(self.logFilePath,'w')
writer = csv.writer(fid)
for key,item in defaultCONFIG.items():
if item == None:
item = 'None'
elif type(item) != type('i am a string'):
item = str(item)
writer.writerow([key,item])
fid.close()
self.log = self.read_project_log(self.logFilePath)
## effectivly the only action necessary to save a project in its current state
def save(self):
fid = open(self.logFilePath,'w')
writer = csv.writer(fid)
for key,item in self.log.items():
if item == None:
item = 'None'
elif type(item) != type('i am a string'):
item = str(item)
writer.writerow([key,item])
fid.close()
## reads the log file assciated with the current project and returns a dict
def read_project_log(self,logPathName):
if os.path.isfile(logPathName) == False:
print("ERROR: invalid model logfile specified",logPathName)
return None
else:
logFileDict = {}
fid = open(logPathName,'r')
reader = csv.reader(fid)
for linja in reader:
if re.search("\[|\{|None",str(linja[1])):
try:
linja[1] = ast.literal_eval(str(linja[1]))
except:
print('ERROR: Logger -- string literal conversion failed', linja[1])
logFileDict[linja[0]] = linja[1]
fid.close()
return logFileDict
|
StarcoderdataPython
|
1749739
|
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Read a dicom media file"""
from pydicom.misc import size_in_bytes
from struct import Struct, unpack
extra_length_VRs_b = (b'OB', b'OW', b'OF', b'SQ', b'UN', b'UT')
ExplicitVRLittleEndian = b'1.2.840.10008.1.2.1'
ImplicitVRLittleEndian = b'1.2.840.10008.1.2'
DeflatedExplicitVRLittleEndian = b'1.2.840.10008.1.2.1.99'
ExplicitVRBigEndian = b'1.2.840.10008.1.2.2'
ItemTag = 0xFFFEE000 # start of Sequence Item
ItemDelimiterTag = 0xFFFEE00D # end of Sequence Item
SequenceDelimiterTag = 0xFFFEE0DD # end of Sequence of undefined length
class dicomfile(object):
"""Context-manager based DICOM file object with data element iteration"""
def __init__(self, filename):
self.fobj = fobj = open(filename, "rb")
# Read the DICOM preamble, if present
self.preamble = fobj.read(0x80)
dicom_prefix = fobj.read(4)
if dicom_prefix != b"DICM":
self.preamble = None
fobj.seek(0)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.fobj.close()
def __iter__(self):
# Need the transfer_syntax later
transfer_syntax_uid = None
# Yield the file meta info elements
file_meta_gen = data_element_generator(
self.fobj,
is_implicit_VR=False,
is_little_endian=True,
stop_when=lambda gp, elem: gp != 2)
for data_elem in file_meta_gen:
if data_elem[0] == (0x0002, 0x0010):
transfer_syntax_uid = data_elem[3]
yield data_elem
# Continue to yield elements from the main data
if transfer_syntax_uid:
if transfer_syntax_uid.endswith(b' ') or \
transfer_syntax_uid.endswith(b'\0'):
transfer_syntax_uid = transfer_syntax_uid[:-1]
is_implicit_VR, is_little_endian = transfer_syntax(
transfer_syntax_uid)
# print is_implicit_VR
else:
raise NotImplementedError("No transfer syntax in file meta info")
ds_gen = data_element_generator(self.fobj, is_implicit_VR,
is_little_endian)
for data_elem in ds_gen:
yield data_elem
raise StopIteration
def transfer_syntax(uid):
"""Parse the transfer syntax
:return: is_implicit_VR, is_little_endian
"""
# Assume a transfer syntax, correct it as necessary
is_implicit_VR = True
is_little_endian = True
if uid == ImplicitVRLittleEndian:
pass
elif uid == ExplicitVRLittleEndian:
is_implicit_VR = False
elif uid == ExplicitVRBigEndian:
is_implicit_VR = False
is_little_endian = False
elif uid == DeflatedExplicitVRLittleEndian:
raise NotImplementedError("This reader does not handle deflate files")
else:
# PS 3.5-2008 A.4 (p63): other syntax (e.g all compressed)
# should be Explicit VR Little Endian,
is_implicit_VR = False
return is_implicit_VR, is_little_endian
####
def data_element_generator(fp,
is_implicit_VR,
is_little_endian,
stop_when=None,
defer_size=None):
""":return: (tag, VR, length, value, value_tell,
is_implicit_VR, is_little_endian)
"""
if is_little_endian:
endian_chr = "<"
else:
endian_chr = ">"
if is_implicit_VR:
element_struct = Struct(endian_chr + "HHL")
else: # Explicit VR
# tag, VR, 2-byte length (or 0 if special VRs)
element_struct = Struct(endian_chr + "HH2sH")
extra_length_struct = Struct(endian_chr + "L") # for special VRs
extra_length_unpack = extra_length_struct.unpack # for lookup speed
# Make local variables so have faster lookup
fp_read = fp.read
fp_tell = fp.tell
element_struct_unpack = element_struct.unpack
defer_size = size_in_bytes(defer_size)
while True:
# Read tag, VR, length, get ready to read value
bytes_read = fp_read(8)
if len(bytes_read) < 8:
raise StopIteration # at end of file
if is_implicit_VR:
# must reset VR each time; could have set last iteration (e.g. SQ)
VR = None
group, elem, length = element_struct_unpack(bytes_read)
else: # explicit VR
group, elem, VR, length = element_struct_unpack(bytes_read)
if VR in extra_length_VRs_b:
bytes_read = fp_read(4)
length = extra_length_unpack(bytes_read)[0]
# Positioned to read the value, but may not want to -- check stop_when
value_tell = fp_tell()
if stop_when is not None:
if stop_when(group, elem):
rewind_length = 8
if not is_implicit_VR and VR in extra_length_VRs_b:
rewind_length += 4
fp.seek(value_tell - rewind_length)
raise StopIteration
# Reading the value
# First case (most common): reading a value with a defined length
if length != 0xFFFFFFFF:
if defer_size is not None and length > defer_size:
# Flag as deferred by setting value to None, and skip bytes
value = None
fp.seek(fp_tell() + length)
else:
value = fp_read(length)
# import pdb;pdb.set_trace()
yield ((group, elem), VR, length, value, value_tell)
# Second case: undefined length - must seek to delimiter,
# unless is SQ type, in which case is easier to parse it, because
# undefined length SQs and items of undefined lengths can be nested
# and it would be error-prone to read to the correct outer delimiter
else:
# Try to look up type to see if is a SQ
# if private tag, won't be able to look it up in dictionary,
# in which case just ignore it and read the bytes unless it is
# identified as a Sequence
if VR is None:
try:
VR = dictionary_VR(tag)
except KeyError:
# Look ahead to see if it consists of items and
# is thus a SQ
next_tag = TupleTag(unpack(endian_chr + "HH", fp_read(4)))
# Rewind the file
fp.seek(fp_tell() - 4)
if next_tag == ItemTag:
VR = b'SQ'
if VR == b'SQ':
yield ((group, elem), VR, length, None, value_tell)
# seq = read_sequence(fp, is_implicit_VR,
# is_little_endian, length, encoding)
# yield DataElement(tag, VR, seq, value_tell,
# is_undefined_length=True)
else:
raise NotImplementedError("This reader does not handle "
"undefined length except for SQ")
from pydicom.fileio.fileutil import read_undefined_length_value
delimiter = SequenceDelimiterTag
value = read_undefined_length_value(fp, is_little_endian,
delimiter, defer_size)
yield ((group, elem), VR, length, value, value_tell)
|
StarcoderdataPython
|
9650276
|
import re
import pytest
import numpy as np
import warnings
from unittest.mock import Mock
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils._testing import MinimalClassifier
from sklearn import datasets
from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression
from sklearn.datasets import make_friedman1
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier, HistGradientBoostingClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.base import BaseEstimator
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import PCA
class NaNTag(BaseEstimator):
def _more_tags(self):
return {"allow_nan": True}
class NoNaNTag(BaseEstimator):
def _more_tags(self):
return {"allow_nan": False}
class NaNTagRandomForest(RandomForestClassifier):
def _more_tags(self):
return {"allow_nan": True}
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_invalid_input():
clf = SGDClassifier(
alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None
)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
with pytest.raises(ValueError):
model.transform(data)
def test_input_estimator_unchanged():
# Test that SelectFromModel fits on a clone of the estimator.
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert transformer.estimator is est
@pytest.mark.parametrize(
"max_features, err_type, err_msg",
[
(-1, ValueError, "max_features =="),
(
data.shape[1] + 1,
ValueError,
"max_features ==",
),
(
lambda X: 1.5,
TypeError,
"max_features(X) must be an instance of int, not float.",
),
(
"gobbledigook",
TypeError,
"'max_features' must be either an int or a callable",
),
(
"all",
TypeError,
"'max_features' must be either an int or a callable",
),
],
)
def test_max_features_error(max_features, err_type, err_msg):
err_msg = re.escape(err_msg)
clf = RandomForestClassifier(n_estimators=5, random_state=0)
transformer = SelectFromModel(
estimator=clf, max_features=max_features, threshold=-np.inf
)
with pytest.raises(err_type, match=err_msg):
transformer.fit(data, y)
@pytest.mark.parametrize("max_features", [0, 2, data.shape[1]])
def test_inferred_max_features_integer(max_features):
"""Check max_features_ and output shape for integer max_features."""
clf = RandomForestClassifier(n_estimators=5, random_state=0)
transformer = SelectFromModel(
estimator=clf, max_features=max_features, threshold=-np.inf
)
X_trans = transformer.fit_transform(data, y)
assert transformer.max_features_ == max_features
assert X_trans.shape[1] == transformer.max_features_
@pytest.mark.parametrize(
"max_features",
[lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)],
)
def test_inferred_max_features_callable(max_features):
"""Check max_features_ and output shape for callable max_features."""
clf = RandomForestClassifier(n_estimators=5, random_state=0)
transformer = SelectFromModel(
estimator=clf, max_features=max_features, threshold=-np.inf
)
X_trans = transformer.fit_transform(data, y)
assert transformer.max_features_ == max_features(data)
assert X_trans.shape[1] == transformer.max_features_
@pytest.mark.parametrize("max_features", [lambda X: round(len(X[0]) / 2), 2])
def test_max_features_array_like(max_features):
X = [
[0.87, -1.34, 0.31],
[-2.79, -0.02, -0.85],
[-1.34, -0.48, -2.55],
[1.92, 1.48, 0.65],
]
y = [0, 1, 0, 1]
clf = RandomForestClassifier(n_estimators=5, random_state=0)
transformer = SelectFromModel(
estimator=clf, max_features=max_features, threshold=-np.inf
)
X_trans = transformer.fit_transform(X, y)
assert X_trans.shape[1] == transformer.max_features_
@pytest.mark.parametrize(
"max_features",
[lambda X: min(X.shape[1], 10000), lambda X: X.shape[1], lambda X: 1],
)
def test_max_features_callable_data(max_features):
"""Tests that the callable passed to `fit` is called on X."""
clf = RandomForestClassifier(n_estimators=50, random_state=0)
m = Mock(side_effect=max_features)
transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf)
transformer.fit_transform(data, y)
m.assert_called_with(data)
class FixedImportanceEstimator(BaseEstimator):
def __init__(self, importances):
self.importances = importances
def fit(self, X, y=None):
self.feature_importances_ = np.array(self.importances)
def test_max_features():
# Test max_features parameter using various values
X, y = datasets.make_classification(
n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
max_features = X.shape[1]
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer1 = SelectFromModel(estimator=est, threshold=-np.inf)
transformer2 = SelectFromModel(
estimator=est, max_features=max_features, threshold=-np.inf
)
X_new1 = transformer1.fit_transform(X, y)
X_new2 = transformer2.fit_transform(X, y)
assert_allclose(X_new1, X_new2)
# Test max_features against actual model.
transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025, random_state=42))
X_new1 = transformer1.fit_transform(X, y)
scores1 = np.abs(transformer1.estimator_.coef_)
candidate_indices1 = np.argsort(-scores1, kind="mergesort")
for n_features in range(1, X_new1.shape[1] + 1):
transformer2 = SelectFromModel(
estimator=Lasso(alpha=0.025, random_state=42),
max_features=n_features,
threshold=-np.inf,
)
X_new2 = transformer2.fit_transform(X, y)
scores2 = np.abs(transformer2.estimator_.coef_)
candidate_indices2 = np.argsort(-scores2, kind="mergesort")
assert_allclose(
X[:, candidate_indices1[:n_features]], X[:, candidate_indices2[:n_features]]
)
assert_allclose(transformer1.estimator_.coef_, transformer2.estimator_.coef_)
def test_max_features_tiebreak():
# Test if max_features can break tie among feature importance
X, y = datasets.make_classification(
n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
max_features = X.shape[1]
feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1])
for n_features in range(1, max_features + 1):
transformer = SelectFromModel(
FixedImportanceEstimator(feature_importances),
max_features=n_features,
threshold=-np.inf,
)
X_new = transformer.fit_transform(X, y)
selected_feature_indices = np.where(transformer._get_support_mask())[0]
assert_array_equal(selected_feature_indices, np.arange(n_features))
assert X_new.shape[1] == n_features
def test_threshold_and_max_features():
X, y = datasets.make_classification(
n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer1 = SelectFromModel(estimator=est, max_features=3, threshold=-np.inf)
X_new1 = transformer1.fit_transform(X, y)
transformer2 = SelectFromModel(estimator=est, threshold=0.04)
X_new2 = transformer2.fit_transform(X, y)
transformer3 = SelectFromModel(estimator=est, max_features=3, threshold=0.04)
X_new3 = transformer3.fit_transform(X, y)
assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1])
selected_indices = transformer3.transform(np.arange(X.shape[1])[np.newaxis, :])
assert_allclose(X_new3, X[:, selected_indices[0]])
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert hasattr(transformer.estimator_, "feature_importances_")
X_new = transformer.transform(X)
assert X_new.shape[1] < X.shape[1]
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_sample_weight():
# Ensure sample weights are passed to underlying estimator
X, y = datasets.make_classification(
n_samples=100,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = LogisticRegression(random_state=0, fit_intercept=False)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=None)
mask = transformer._get_support_mask()
transformer.fit(X, y, sample_weight=sample_weight)
weighted_mask = transformer._get_support_mask()
assert not np.all(weighted_mask == mask)
transformer.fit(X, y, sample_weight=3 * sample_weight)
reweighted_mask = transformer._get_support_mask()
assert np.all(weighted_mask == reweighted_mask)
def test_coef_default_threshold():
X, y = datasets.make_classification(
n_samples=100,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1, random_state=42))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_almost_equal(X_new, X[:, mask])
@skip_if_32bit
def test_2d_coef():
X, y = datasets.make_classification(
n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
n_classes=4,
)
est = LogisticRegression()
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
for order in [1, 2, np.inf]:
# Fit SelectFromModel a multi-class problem
transformer = SelectFromModel(
estimator=LogisticRegression(), threshold=threshold, norm_order=order
)
transformer.fit(X, y)
assert hasattr(transformer.estimator_, "coef_")
X_new = transformer.transform(X)
assert X_new.shape[1] < X.shape[1]
# Manually check that the norm is correctly performed
est.fit(X, y)
importances = np.linalg.norm(est.coef_, axis=0, ord=order)
feature_mask = importances > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(
random_state=0, shuffle=False, max_iter=5, tol=None
)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y, classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y, classes=np.unique(y))
new_model = transformer.estimator_
assert old_model is new_model
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_almost_equal(X_transform, transformer.transform(data))
# check that if est doesn't have partial_fit, neither does SelectFromModel
transformer = SelectFromModel(estimator=RandomForestClassifier())
assert not hasattr(transformer, "partial_fit")
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert transformer.estimator_.C == 100
def test_prefit():
# Test all possible combinations of the prefit parameter.
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_almost_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_almost_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
with pytest.raises(ValueError):
model.fit(data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_almost_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
# Test that the threshold can be set without refitting the model.
clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
model = SelectFromModel(clf, threshold="0.1 * mean")
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = "1.0 * mean"
assert X_transform.shape[1] > model.transform(data).shape[1]
def test_fit_accepts_nan_inf():
# Test that fit doesn't check for np.inf and np.nan values.
clf = HistGradientBoostingClassifier(random_state=0)
model = SelectFromModel(estimator=clf)
nan_data = data.copy()
nan_data[0] = np.NaN
nan_data[1] = np.Inf
model.fit(data, y)
def test_transform_accepts_nan_inf():
# Test that transform doesn't check for np.inf and np.nan values.
clf = NaNTagRandomForest(n_estimators=100, random_state=0)
nan_data = data.copy()
model = SelectFromModel(estimator=clf)
model.fit(nan_data, y)
nan_data[0] = np.NaN
nan_data[1] = np.Inf
model.transform(nan_data)
def test_allow_nan_tag_comes_from_estimator():
allow_nan_est = NaNTag()
model = SelectFromModel(estimator=allow_nan_est)
assert model._get_tags()["allow_nan"] is True
no_nan_est = NoNaNTag()
model = SelectFromModel(estimator=no_nan_est)
assert model._get_tags()["allow_nan"] is False
def _pca_importances(pca_estimator):
return np.abs(pca_estimator.explained_variance_)
@pytest.mark.parametrize(
"estimator, importance_getter",
[
(
make_pipeline(PCA(random_state=0), LogisticRegression()),
"named_steps.logisticregression.coef_",
),
(PCA(random_state=0), _pca_importances),
],
)
def test_importance_getter(estimator, importance_getter):
selector = SelectFromModel(
estimator, threshold="mean", importance_getter=importance_getter
)
selector.fit(data, y)
assert selector.transform(data).shape[1] == 1
@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression])
def test_select_from_model_pls(PLSEstimator):
"""Check the behaviour of SelectFromModel with PLS estimators.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12410
"""
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
estimator = PLSEstimator(n_components=1)
model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y)
assert model.score(X, y) > 0.5
def test_estimator_does_not_support_feature_names():
"""SelectFromModel works with estimators that do not support feature_names_in_.
Non-regression test for #21949.
"""
pytest.importorskip("pandas")
X, y = datasets.load_iris(as_frame=True, return_X_y=True)
all_feature_names = set(X.columns)
def importance_getter(estimator):
return np.arange(X.shape[1])
selector = SelectFromModel(
MinimalClassifier(), importance_getter=importance_getter
).fit(X, y)
# selector learns the feature names itself
assert_array_equal(selector.feature_names_in_, X.columns)
feature_names_out = set(selector.get_feature_names_out())
assert feature_names_out < all_feature_names
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
selector.transform(X.iloc[1:3])
|
StarcoderdataPython
|
9714389
|
from ..message_server import Message, ClosingMessage
from ..interaction import instantiate
from ..network_error import NetworkError
class HelloMessage(Message):
"""
Let's name ourselves!
"""
def __init__(self, name=""):
Message.__init__(self)
self.name = name
def __setstate__(self, d):
if "name" in d:
self.name = d["name"]
class LogMessage(Message):
"""
Error to be saved to log.
"""
def __init__(self, event=None):
Message.__init__(self)
self.event = event
def __setstate__(self, d):
if "event" in d:
self.event = d["event"]
if isinstance(self.event, dict):
self.event = NetworkError()
self.event.__setstate__(d["event"])
class ReportQuery(Message):
"""
Request log files encoded into specific format.
"""
def __init__(self, fmt="pure"):
Message.__init__(self)
self.fmt = fmt
def __setstate__(self, d):
if "fmt" in d:
self.fmt = d["fmt"]
class ReportReply(Message):
"""
Return log files encoded into specific format.
"""
def __init__(self, report=None, fmt="pure"):
Message.__init__(self)
self.fmt = fmt
self.report = report
def __setstate__(self, d):
if "fmt" in d:
self.fmt = d["fmt"]
if "report" in d:
self.report = d["report"]
|
StarcoderdataPython
|
5097722
|
from haversine import haversine, Unit
from statistics import mean
import ast
import urllib
import aiohttp
import asyncio
import json
import sqlite3
import googlemaps
from db_handler import DatabaseHandler
class ApiClient:
base_url_walk_score = "https://api.walkscore.com/score?"
base_url_google_geocode = "https://maps.googleapis.com/maps/api/geocode/json?"
def __init__(self, db_path="src/db.sqlite", k_walk_score="UNSET", k_google="UNSET"):
self.k_walk_score = k_walk_score
self.k_google = k_google
self.client_http = aiohttp.ClientSession() # close this
self.db = DatabaseHandler(db_path)
# query: [houseNumber, streetName, city, state, zip]
async def search_properties(self, query_id, query, params, radius: float):
query = """
SELECT * FROM property
WHERE id = ?
LIMIT 1
"""
params = query_id
query_property = self.db.read(query, params)
# Find all surrounding properties as dict
surrounding_property = self.db.read(query, params)
self.update_property_coords(query_id)
self.update_property_score(query_id)
# go through each entry in dict,
for property in surrounding_property:
self.update_property_coords(property['id'])
self.update_property_score(property['id'])
params = {'id': property['id']}
if haversine.haversine((query_property['latitude'], query_property['longitude']), (property['latitude'], [property['longitude']]), unit=Unit.MILES) > radius:
surrounding_property.remove(property)
# return the dict
return surrounding_property
# requests the walk score of the current address and gets rid of the excess space
async def get_score(self, address, latitude, longitude):
"""
Returns a dictionary of the walk, bike, and transit scores + descriptions, if available.
Or a `None` if an error occurred
"""
params = {'format': 'json', 'transit': '1', 'bike': '1',
'wsapikey': self.k_walk_score, 'latitude': latitude, 'longitude': longitude, 'address': address}
query = ApiClient.base_url_walk_score
result = await self.client_http.get(query, params=params)
try:
result = json.loads(result.content._buffer[0])
return {
'walk_score': result['walkscore'],
'walk_desc': result['description'],
'bike_score': result['bike']['score'],
'bike_desc': result['bike']['description'],
'transit_score': result['transit']['score'],
'transit_desc': result['transit']['description'],
'transit_summary': result['transit']['summary']}
except Exception as e:
print(e)
return None
# return the latitude and longitude of the given address
async def get_geo_coord(self, address):
params = {
'key': self.k_google,
'address': "{} {} {} {}".format(address.address_line, address.city, address.state, address.zip_code)
}
response = await self.client_http.get(ApiClient.base_url_google_geocode, params=params)
data = response.json()
if data['status'] == 'OK':
result = data['results'][0]
location = result['geometry']['location']
return (location['latitude'], location['lng'])
else:
return (None, None)
# if the current property has no walk/bike/transit scores, call the walkScore api to find them and update
async def update_property_score(self, id, force=False):
query = """
SELECT * FROM property
WHERE id = ?
LIMIT 1
"""
params = id
response = self.db.read(query, params)
try:
if not force and response['latitude'] != None and response['longitude'] != None:
return
# catch? not sure what force is doing so hard for me to predict the error
except sqlite3.Error as e:
print(e)
score = self.get_score(response['address'], response['latitude'], response['longitude'])
query = """
UPDATE property
SET walk_score = ?,
bike_score = ?,
transit_score = ?,
transit_summary = ?
WHERE id = ?
"""
params = {
score['walk_score'],
score['bike_score'],
score['transit_score'],
score['transit_summary'],
id
}
self.db.write(query, params)
# if the current property has no geo coordinates, call the google api to find them and update
async def update_property_coords(self, id, force=False):
query = """
SELECT * FROM property
WHERE id = ?
"""
# keeping LIMIT 1 after expecting a match on ID will mask duplicates, which need correcting
params = id
response = self.db.read(query, params)
try:
if not force and response['walk_score'] != None:
return
# generic. if different/specific, change (didn't know what else to expect)
except sqlite3.Error as e:
print(e)
response['latitude'], response['longitude'] = self.get_geo_coord(response['address'])
query = """
UPDATE property
SET (latitude = ?,
longitude = ?)
WHERE id = ?
"""
params = {response['latitude'], response['longitude'], response['id']}
self.db.write(query, params)
def get_most_similar(self, house):
query = """
SELECT * from property
WHERE (num_bedrooms = ?
AND num_bathrooms = ?
AND ABS(? - close_price) < (? * 0.1))
LIMIT 10
"""
# Yields list of top most similar properties
params = [house['num_bedrooms'], house['num_bathrooms'], house['list_price'], house['list_price']]
response = self.db.read(query, params)
return response
# Simplified, mechanical averaging of dict values
def get_average_close_price(self, dataList):
number_of_entries = 0
sum = 0
for dict in dataList:
for key in dict:
if key == 'close_price':
sum += dict['close_price']
number_of_entries += 1
avg = sum / float(1 + number_of_entries)
return avg
|
StarcoderdataPython
|
6612264
|
<reponame>krlex/aws-python-examples
# lamdda_function.py
# It handles a simple AWS Lambda function that shows the content (JSON) of the call
# to the lambda function and returns a message including this content.
def lambda_handler(event, context):
message = 'Hello {} {}!'.format(event['first_name'],
event['last_name'])
return {
'message' : message
}
|
StarcoderdataPython
|
305403
|
from github import Github, UnknownObjectException, BadCredentialsException
from gitopscli.gitops_exception import GitOpsException
from .abstract_git_util import AbstractGitUtil
class GithubGitUtil(AbstractGitUtil):
def __init__(self, tmp_dir, organisation, repository_name, username, password, git_user, git_email):
super().__init__(tmp_dir, username, password, git_user, git_email)
self._organisation = organisation
self._repository_name = repository_name
self._github = Github(self._username, self._password)
def get_clone_url(self):
return self.__get_repo().clone_url
def create_pull_request(self, from_branch, to_branch, title, description):
repo = self.__get_repo()
pull_request = repo.create_pull(title=title, body=description, head=from_branch, base=to_branch)
return pull_request
def get_pull_request_url(self, pull_request):
return pull_request.html_url
def merge_pull_request(self, pull_request):
pull_request.merge()
def add_pull_request_comment(self, pr_id, text, parent_id=None):
pull_request = self.__get_pull_request(pr_id)
pr_comment = pull_request.create_issue_comment(text)
return pr_comment
def delete_branch(self, branch):
repo = self.__get_repo()
try:
git_ref = repo.get_git_ref(f"heads/{branch}")
except UnknownObjectException as ex:
raise GitOpsException(f"Branch '{branch}' does not exist.") from ex
git_ref.delete()
def get_pull_request_branch(self, pr_id):
pull_request = self.__get_pull_request(pr_id)
return pull_request.head.ref
def __get_pull_request(self, pr_id):
repo = self.__get_repo()
try:
return repo.get_pull(pr_id)
except UnknownObjectException as ex:
raise GitOpsException(f"Pull request with ID '{pr_id}' does not exist.") from ex
def __get_repo(self):
try:
return self._github.get_repo(f"{self._organisation}/{self._repository_name}")
except BadCredentialsException as ex:
raise GitOpsException("Bad credentials") from ex
except UnknownObjectException as ex:
raise GitOpsException(f"Repository '{self._organisation}/{self._repository_name}' does not exist.") from ex
|
StarcoderdataPython
|
1757186
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# from sklearn.impute import SimpleImputer
# from sklearn.compose import ColumnTransformer
# from sklearn.preprocessing import OneHotEncoder
# from sklearn.preprocessing import LabelEncoder
Dataset = pd.read_csv('Data.csv')
X = Dataset.iloc[:,:-1].values
y = Dataset.iloc[:,-1].values
# imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
# imputer.fit(X[:, 1:3])
# X[:, 1:3] = imputer.transform(X[:, 1:3])
# ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')
# X = np.array(ct.fit_transform(X))
# le = LabelEncoder()
# y = le.fit_transform(y)
X_train, X_test, y_train,y_test = train_test_split(X, y, test_size=0.3, random_state=16)
|
StarcoderdataPython
|
6544613
|
<reponame>zhfeing/graduation-project<gh_stars>0
print("[info]: import get_data successful")
|
StarcoderdataPython
|
3571125
|
# Generate a nautilus shell from triangles
# This method is based in the one described in Origami4
# "Paper Nautili: A Model for Three Dimensional Planispiral Growth"
# by <NAME>
# Starting with a right triangle ABC where A is the origin,
# B is up and C is to the side. The goal is to calculate
# the next triangle BDC where r*len BA = len CD
# and another triable CFD where F is the midpoint of BD
# the end product is to draw CD and CF for some number of
# iterations
#
# A straight line approach leads to flat origami. I've
# modified the algorithm to use bezier curves to get
# a self-shaping curved nautilus.
from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import collections as mc
from matplotlib.patches import Ellipse, Wedge, Polygon
import itertools
class point:
def __init__(self, x, y):
self.x = x
self.y = y
def lengthTo(self, p):
x2 = (self.x-p.x)**2
y2 = (self.y-p.y)**2
len = sqrt(x2 + y2)
return len
def pts(self):
return [self.x, self.y]
def negative(self):
return point(self.x, -self.y)
def add_plot(ax, pt1, pt2, ptb, color):
bezier(ax, pt1, pt2, ptb, color)
def bezier(ax, pt1, pt2, ptb, color):
npoints = 2
numbers = [i for i in range(npoints)]
bezier_path = np.arange(0.0, 1.01, 0.01)
x1y1 = x1, y1 = pt1.x, pt1.y
x2y2 = x2, y2 = pt2.x, pt2.y
xbyb = xb, yb = ptb.x, ptb.y
# Compute and store the Bezier curve points
x = (1 - bezier_path)** 2 * x1 + 2 * (1 - bezier_path) * bezier_path * xb + bezier_path** 2 * x2
y = (1 - bezier_path)** 2 * y1 + 2 * (1 - bezier_path) * bezier_path * yb + bezier_path** 2 * y2
ax.plot(x, y, color)
def make_plot(r, N, ax, cf_fun, cd_fun, offset, double=True, beta=1.0, gamma=1.0):
a = point(gamma,0)
b = point(gamma, beta)
c = point(gamma*2, 0)
if double:
ori = offset(0, point(0,beta/r))
outerPolyVertices=[ori.negative().pts(), ori.pts()]
else:
outerPolyVertices=[a.pts(), b.pts()]
for i in range(0, N):
if double is True:
a = offset(i, a)
b = offset(i, b)
c = offset(i, c)
ba = b.lengthTo(a)
d = point(c.x, c.y + r * ba )
diff_ba_dc = (r-1)*ba
f = point(b.x + 0.5 * (c.x - b.x), b.y + 0.5 * diff_ba_dc)
# now, fold lines on cd and on cf
if double is True:
nf = f.negative()
nc = c.negative()
nd = d.negative()
add_plot(ax, c.negative(), f.negative(), color='r', ptb=cf_fun(c.negative(), f.negative()))
add_plot(ax, c.negative(), d.negative(), color='b', ptb=cd_fun(c.negative(), d.negative()))
add_plot(ax, c, f, color='r', ptb=cf_fun(c,f))
add_plot(ax, c, d, color='b', ptb=cd_fun(c,d))
# for debugging:
#ptb_cf = cf_fun(c,f)
#ax.plot(ptb_cf.x, ptb_cf.y, 'rx')
#ptb_cd = cd_fun(c,f)
#ax.plot(ptb_cd.x, ptb_cd.y, 'bx')
# move the generator over
newc = point(c.x + (gamma/beta)*r*ba, c.y)
a = c
b = d
c = newc
# finish outer polygon for cut line
if double is True:
o = offset(N, d)
o.x += gamma
outerPolyVertices.extend([o.pts(), o.negative().pts()])
else:
outerPolyVertices.extend([d.pts(), c.pts()])
poly = Polygon(outerPolyVertices, facecolor='1.0', edgecolor='k')
ax.add_patch(poly)
ax.set_aspect(1)
plt.axis('off')
def ptb_1x_avey(pt1, pt2):
xb = pt1.x
yb = (pt1.y+pt2.y)/2.0
return point(xb, yb)
def ptb_1x105_avey(pt1, pt2):
xb = 1.05*pt1.x
yb = (pt1.y+pt2.y)/2.0
return point(xb, yb)
def ptb_sumxdiv21_avey(pt1, pt2):
xb = (pt1.x+pt2.x)/2.1
yb = (pt1.y+pt2.y)/2.0
return point(xb, yb)
def ptb_sumxdiv21_y0(pt1, pt2):
xb = (pt1.x+pt2.x)/2.1
return point(xb, 0.0)
def ptb_pt1(pt1, pt2):
return pt1
show_plot = True
# when doubled, the offset function is how much the two sides are separated
def offset_mult02(i, p, mult=0.02):
offset = mult*i
return point(p.x, p.y+offset)
figure, ax = plt.subplots()
name = "shellgen_Lommel_r108_16"
make_plot(r=1.08, N=16, ax=ax, cf_fun=ptb_pt1, cd_fun=ptb_pt1, offset=offset_mult02)
plt.savefig(name + ".svg")
if show_plot: plt.title(name), plt.show()
figure, ax = plt.subplots()
name = "nautilus_halfshell"
make_plot(r=1.08, N=16, ax=ax, double=False,
cf_fun=ptb_sumxdiv21_avey, cd_fun=ptb_sumxdiv21_avey, offset=offset_mult02)
plt.savefig(name + ".svg")
if show_plot: plt.title(name), plt.show()
figure, ax = plt.subplots()
name = "shellgen_d3_r108_n10"
make_plot(r=1.08, N=10, ax=ax,
cf_fun=ptb_sumxdiv21_avey, cd_fun=ptb_sumxdiv21_avey, offset=offset_mult02)
plt.savefig(name + ".svg")
if show_plot: plt.title(name), plt.show()
figure, ax = plt.subplots()
name = "shellgen_d3_r120_n10"
make_plot(r=1.20, N=10, ax=ax,
cf_fun=ptb_sumxdiv21_avey, cd_fun=ptb_sumxdiv21_avey, offset=offset_mult02)
plt.savefig(name + ".svg")
if show_plot: plt.title(name), plt.show()
figure, ax = plt.subplots()
name = "shellgen_d3_r130_n10"
make_plot(r=1.3, N=10, ax=ax,
cf_fun=ptb_sumxdiv21_avey, cd_fun=ptb_sumxdiv21_avey, offset=offset_mult02)
plt.savefig(name + ".svg")
if show_plot: plt.title(name), plt.show()
|
StarcoderdataPython
|
5166022
|
<reponame>jdlesage/tf-yarn
"""\
To run the example
1. Download winequality-*.csv from the Wine Quality dataset at UCI
ML repository
(https://archive.ics.uci.edu/ml/datasets/Wine+Quality).
2. Upload it to HDFS.
3. Pass a full URI to either of the CSV files to the example.
For instance, if you prefer red wine::
%example.py% hdfs://path/to/winequality-red.csv
You can check the configured ``fs.defaultFS`` value by running::
$ hdfs getconf -confKey fs.defaultFS
"""
import typing
import tensorflow as tf
FEATURES = [
"fixed_acidity", "volatile_acidity", "citric_acid", "residual_sugar",
"chlorides", "free_sulfur_dioxide", "total_sulfur_dioxide", "density",
"pH", "sulphates", "alcohol"
]
LABEL = "quality"
def get_train_eval_datasets(
path: str,
train_fraction: float = 0.7
) -> typing.Tuple[tf.data.Dataset, tf.data.Dataset]:
def split_label(*row):
return dict(zip(FEATURES, row)), row[-1]
def in_training_set(*row):
num_buckets = 1000
key = tf.string_join(list(map(tf.as_string, row)))
bucket_id = tf.string_to_hash_bucket_fast(key, num_buckets)
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(*row):
return ~in_training_set(*row)
data = tf.contrib.data.CsvDataset(
path,
[tf.float32] * len(FEATURES) + [tf.int32],
header=True,
field_delim=";")
train = data.filter(in_training_set).map(split_label).cache()
test = data.filter(in_test_set).map(split_label).cache()
return train, test
def get_feature_columns():
return [tf.feature_column.numeric_column(name) for name in FEATURES]
def get_n_classes():
return 10
|
StarcoderdataPython
|
11216561
|
from unittest import TestCase
from httpbase.fields import BoolField
from httpbase.exceptions import SerializationError, NonNullableField
from httpbase.resources import Resource
class TestBoolField(TestCase):
def test_bool_field(self):
value = True
class Foo(Resource):
foo = BoolField(label="foo")
resource = Foo(foo=value)
self.assertEqual(resource.foo.value, value)
self.assertEqual(resource.dict(), {"foo": value})
def test_nullable(self):
with self.assertRaises(NonNullableField):
class Foo(Resource):
foo = BoolField(label="foo", nullable=True)
def test_default(self):
value = True
class Foo(Resource):
foo = BoolField(label="foo", default=value)
resource = Foo()
self.assertEqual(resource.foo.value, value)
self.assertEqual(resource.dict(), {"foo": value})
def test_printable(self):
value = True
class Foo(Resource):
foo = BoolField(label="foo", printable=False)
resource = Foo(foo=value)
self.assertEqual(resource.foo.value, value)
self.assertEqual(resource.dict(), {"foo": value})
self.assertNotIn(str(value), repr(resource.foo))
|
StarcoderdataPython
|
12859097
|
<reponame>nickcamel/IgApi<filename>lib/watchlists.py
# REF: https://labs.ig.com/rest-trading-api-reference
class Watchlists:
"""
DO NOT CHANGE
Adding is ok ... and encouraged ;)
"""
base = {
'path': 'watchlists',
'GET': {
'version': '1',
'tokens': True,
}
# Not supported yet: 'POST'
}
id = {
'path': 'watchlists/',
'GET': {
'version': '1',
'tokens': True,
}
# Not supported yet: 'PUT', 'DELETE'
}
|
StarcoderdataPython
|
391617
|
<filename>code/frameworks/pisr/utils/__init__.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .utils import *
|
StarcoderdataPython
|
82135
|
<filename>src/pytti/Image/VQGANImage.py<gh_stars>0
from pathlib import Path
from os.path import exists as path_exists
import sys
import subprocess
import shutil
from loguru import logger
from taming.models import cond_transformer, vqgan
from pytti import DEVICE, replace_grad, clamp_with_grad, vram_usage_mode
import torch
from torch.nn import functional as F
from pytti.Image import EMAImage
from torchvision.transforms import functional as TF
from PIL import Image
from omegaconf import OmegaConf
VQGAN_MODEL = None
VQGAN_NAME = None
VQGAN_IS_GUMBEL = None
# migrate these to config files
VQGAN_MODEL_NAMES = ["imagenet", "coco", "wikiart", "sflckr", "openimages"]
VQGAN_CONFIG_URLS = {
"imagenet": [
"curl -L -o imagenet.yaml -C - https://heibox.uni-heidelberg.de/f/274fb24ed38341bfa753/?dl=1"
],
"coco": ["curl -L -o coco.yaml -C - https://dl.nmkd.de/ai/clip/coco/coco.yaml"],
"wikiart": [
"curl -L -o wikiart.yaml -C - http://eaidata.bmk.sh/data/Wikiart_16384/wikiart_f16_16384_8145600.yaml"
],
"sflckr": [
"curl -L -o sflckr.yaml -C - https://heibox.uni-heidelberg.de/d/73487ab6e5314cb5adba/files/?p=%2Fconfigs%2F2020-11-09T13-31-51-project.yaml&dl=1"
],
"faceshq": [
"curl -L -o faceshq.yaml -C - https://drive.google.com/uc?export=download&id=1fHwGx_hnBtC8nsq7hesJvs-Klv-P0gzT"
],
"openimages": [
"curl -L -o openimages.yaml -C - https://heibox.uni-heidelberg.de/d/2e5662443a6b4307b470/files/?p=%2Fconfigs%2Fmodel.yaml&dl=1"
],
}
VQGAN_CHECKPOINT_URLS = {
"imagenet": [
"curl -L -o imagenet.ckpt -C - https://heibox.uni-heidelberg.de/f/867b05fc8c4841768640/?dl=1"
],
"coco": ["curl -L -o coco.ckpt -C - https://dl.nmkd.de/ai/clip/coco/coco.ckpt"],
"wikiart": [
"curl -L -o wikiart.ckpt -C - http://eaidata.bmk.sh/data/Wikiart_16384/wikiart_f16_16384_8145600.ckpt"
],
"sflckr": [
"curl -L -o sflckr.ckpt -C - https://heibox.uni-heidelberg.de/d/73487ab6e5314cb5adba/files/?p=%2Fcheckpoints%2Flast.ckpt&dl=1"
],
"faceshq": [
"curl -L -o faceshq.ckpt -C - https://app.koofr.net/content/links/a04deec9-0c59-4673-8b37-3d696fe63a5d/files/get/last.ckpt?path=%2F2020-11-13T21-41-45_faceshq_transformer%2Fcheckpoints%2Flast.ckpt"
],
"openimages": [
"curl -L -o openimages.ckpt -C - https://heibox.uni-heidelberg.de/d/2e5662443a6b4307b470/files/?p=%2Fckpts%2Flast.ckpt&dl=1"
],
}
def load_vqgan_model(config_path, checkpoint_path):
config = OmegaConf.load(config_path)
if config.model.target == "taming.models.vqgan.VQModel":
model = vqgan.VQModel(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
gumbel = False
elif config.model.target == "taming.models.cond_transformer.Net2NetTransformer":
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
parent_model.eval().requires_grad_(False)
parent_model.init_from_ckpt(checkpoint_path)
model = parent_model.first_stage_model
gumbel = False
elif config.model.target == "taming.models.vqgan.GumbelVQ":
model = vqgan.GumbelVQ(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
gumbel = True
else:
raise ValueError(f"unknown model type: {config.model.target}")
del model.loss
return model, gumbel
def vector_quantize(x, codebook, fake_grad=True):
d = (
x.pow(2).sum(dim=-1, keepdim=True)
+ codebook.pow(2).sum(dim=1)
- 2 * x @ codebook.T
)
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return replace_grad(x_q, x)
class VQGANImage(EMAImage):
"""
VQGAN latent image representation
width: (positive integer) approximate image width in pixels (will be rounded down to nearest multiple of 16)
height: (positive integer) approximate image height in pixels (will be rounded down to nearest multiple of 16)
model: (VQGAN) vqgan model
"""
@vram_usage_mode("VQGAN Image")
def __init__(self, width, height, scale=1, model=VQGAN_MODEL, ema_val=0.99):
if model is None:
model = VQGAN_MODEL
if model is None:
raise RuntimeError(
"ERROR: model is None and VQGAN is not initialized loaded"
)
if VQGAN_IS_GUMBEL:
e_dim = 256
n_toks = model.quantize.n_embed
vqgan_quantize_embedding = model.quantize.embed.weight
else:
e_dim = model.quantize.e_dim
n_toks = model.quantize.n_e
vqgan_quantize_embedding = model.quantize.embedding.weight
f = 2 ** (model.decoder.num_resolutions - 1)
self.e_dim = e_dim
self.n_toks = n_toks
width *= scale
height *= scale
# set up parameter dimensions
toksX, toksY = width // f, height // f
sideX, sideY = toksX * f, toksY * f
self.toksX, self.toksY = toksX, toksY
# we can't use our own vqgan_quantize_embedding yet because the buffer isn't
# registered, and we can't register the buffer without the value of z
z = self.rand_latent(vqgan_quantize_embedding=vqgan_quantize_embedding)
super().__init__(sideX, sideY, z, ema_val)
self.output_axes = ("n", "s", "y", "x")
self.lr = 0.15 if VQGAN_IS_GUMBEL else 0.1
self.latent_strength = 1
# extract the parts of VQGAN we need
self.register_buffer(
"vqgan_quantize_embedding", vqgan_quantize_embedding, persistent=False
)
# self.vqgan_quantize_embedding = torch.nn.Parameter(vqgan_quantize_embedding)
self.vqgan_decode = model.decode
self.vqgan_encode = model.encode
def clone(self):
dummy = VQGANImage(*self.image_shape)
with torch.no_grad():
dummy.tensor.set_(self.tensor.clone())
dummy.accum.set_(self.accum.clone())
dummy.biased.set_(self.biased.clone())
dummy.average.set_(self.average.clone())
dummy.decay = self.decay
return dummy
def get_latent_tensor(self, detach=False, device=DEVICE):
z = self.tensor
if detach:
z = z.detach()
z_q = vector_quantize(z, self.vqgan_quantize_embedding).movedim(3, 1).to(device)
return z_q
@classmethod
def get_preferred_loss(cls):
from pytti.LossAug.LatentLossClass import LatentLoss
return LatentLoss
def decode(self, z, device=DEVICE):
z_q = vector_quantize(z, self.vqgan_quantize_embedding).movedim(3, 1).to(device)
out = self.vqgan_decode(z_q).add(1).div(2)
width, height = self.image_shape
return clamp_with_grad(out, 0, 1)
# return F.interpolate(clamp_with_grad(out, 0, 1).to(device, memory_format = torch.channels_last), (height, width), mode='nearest')
@torch.no_grad()
def encode_image(self, pil_image, device=DEVICE, **kwargs):
pil_image = pil_image.resize(self.image_shape, Image.LANCZOS)
pil_image = TF.to_tensor(pil_image)
z, *_ = self.vqgan_encode(pil_image.unsqueeze(0).to(device) * 2 - 1)
self.tensor.set_(z.movedim(1, 3))
self.reset()
@torch.no_grad()
def make_latent(self, pil_image, device=DEVICE):
pil_image = pil_image.resize(self.image_shape, Image.LANCZOS)
pil_image = TF.to_tensor(pil_image)
z, *_ = self.vqgan_encode(pil_image.unsqueeze(0).to(device) * 2 - 1)
z_q = (
vector_quantize(z.movedim(1, 3), self.vqgan_quantize_embedding)
.movedim(3, 1)
.to(device)
)
return z_q
@torch.no_grad()
def encode_random(self):
self.tensor.set_(self.rand_latent())
self.reset()
def rand_latent(self, device=DEVICE, vqgan_quantize_embedding=None):
if vqgan_quantize_embedding is None:
vqgan_quantize_embedding = self.vqgan_quantize_embedding
n_toks = self.n_toks
toksX, toksY = self.toksX, self.toksY
one_hot = F.one_hot(
torch.randint(n_toks, [toksY * toksX], device=device), n_toks
).float()
z = one_hot @ vqgan_quantize_embedding
z = z.view([-1, toksY, toksX, self.e_dim])
return z
@staticmethod
def init_vqgan(model_name, model_artifacts_path, device=DEVICE):
global VQGAN_MODEL, VQGAN_NAME, VQGAN_IS_GUMBEL
if VQGAN_NAME == model_name:
return
if model_name not in VQGAN_MODEL_NAMES:
raise ValueError(
f"VQGAN model {model_name} is not supported. Supported models are {VQGAN_MODEL_NAMES}"
)
model_artifacts_path = Path(model_artifacts_path)
logger.info(model_artifacts_path)
model_artifacts_path.mkdir(parents=True, exist_ok=True)
vqgan_config = model_artifacts_path / f"{model_name}.yaml"
vqgan_checkpoint = model_artifacts_path / f"{model_name}.ckpt"
logger.debug(vqgan_config)
logger.debug(vqgan_checkpoint)
# good lord... the nested if statements and calling curl with subprocess... so much about this needs to change.
# for now, let's just use it the way it is and copy the file where it needs to go.
if not path_exists(vqgan_config):
logger.warning(
f"WARNING: VQGAN config file {vqgan_config} not found. Initializing download."
)
command = VQGAN_CONFIG_URLS[model_name][0].split(" ", 6)
subprocess.run(command)
shutil.move(vqgan_config.name, vqgan_config)
if not path_exists(vqgan_config):
logger.critical(
f"ERROR: VQGAN model {model_name} config failed to download! Please contact model host or find a new one."
)
raise FileNotFoundError(f"VQGAN {model_name} config not found")
if not path_exists(vqgan_checkpoint):
logger.warning(
f"WARNING: VQGAN checkpoint file {vqgan_checkpoint} not found. Initializing download."
)
command = VQGAN_CHECKPOINT_URLS[model_name][0].split(" ", 6)
subprocess.run(command)
shutil.move(vqgan_checkpoint.name, vqgan_checkpoint)
if not path_exists(vqgan_checkpoint):
logger.critical(
f"ERROR: VQGAN model {model_name} checkpoint failed to download! Please contact model host or find a new one."
)
raise FileNotFoundError(f"VQGAN {model_name} checkpoint not found")
VQGAN_MODEL, VQGAN_IS_GUMBEL = load_vqgan_model(vqgan_config, vqgan_checkpoint)
with vram_usage_mode("VQGAN"):
VQGAN_MODEL = VQGAN_MODEL.to(device)
VQGAN_NAME = model_name
@staticmethod
def free_vqgan():
global VQGAN_MODEL
VQGAN_MODEL = None # should this maybe be `del VQGAN_MODEL` instead?
|
StarcoderdataPython
|
11314262
|
<gh_stars>0
from pathlib import Path
from typing import List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...transform_encoder import TransformerTorchEncoder
_EMBEDDING_DIM = 768
@pytest.fixture(scope='session')
def basic_encoder() -> TransformerTorchEncoder:
return TransformerTorchEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.pooling_strategy == 'mean'
def test_compute_tokens(basic_encoder: TransformerTorchEncoder):
tokens = basic_encoder._generate_input_tokens(
['hello this is a test', 'and another test']
)
assert tokens['input_ids'].shape == (2, 7)
assert tokens['attention_mask'].shape == (2, 7)
@pytest.mark.parametrize('hidden_seqlen', [4, 8])
def test_compute_embeddings(hidden_seqlen: int, basic_encoder: TransformerTorchEncoder):
embedding_size = 10
tokens = basic_encoder._generate_input_tokens(['hello world'])
hidden_states = tuple(
torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7)
)
embeddings = basic_encoder._compute_embedding(
hidden_states=hidden_states, input_tokens=tokens
)
assert embeddings.shape == (1, embedding_size)
def test_encoding_cpu():
enc = TransformerTorchEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = TransformerTorchEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'model_name',
[
'microsoft/deberta-base',
'distilbert-base-uncased',
'mrm8488/longformer-base-4096-finetuned-squadv2',
],
)
def test_models(model_name: str):
encoder = TransformerTorchEncoder(model_name)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize('layer_index', [0, 1, -1])
def test_layer_index(layer_index: int):
encoder = TransformerTorchEncoder(layer_index=layer_index)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize('pooling_strategy', ['cls', 'mean', 'min', 'max'])
def test_pooling_strategy(pooling_strategy: str):
encoder = TransformerTorchEncoder(pooling_strategy=pooling_strategy)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: TransformerTorchEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: TransformerTorchEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: TransformerTorchEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
StarcoderdataPython
|
9694918
|
# Modified version of
# DQN implementation by <NAME> found at
# https://github.com/mrkulk/deepQN_tensorflow
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
class DQN:
def __init__(self, params):
self.params = params
self.network_name = 'qnet'
self.sess = tf.compat.v1.Session()
self.x = tf.compat.v1.placeholder('float', [None, params['width'],params['height'], 4],name=self.network_name + '_x')
self.q_t = tf.compat.v1.placeholder('float', [None], name=self.network_name + '_q_t')
self.actions = tf.compat.v1.placeholder("float", [None, 4], name=self.network_name + '_actions')
self.rewards = tf.compat.v1.placeholder("float", [None], name=self.network_name + '_rewards')
self.terminals = tf.compat.v1.placeholder("float", [None], name=self.network_name + '_terminals')
# Layer 1 (Convolutional)
layer_name = 'conv1' ; size = 3 ; channels = 4 ; filters = 16 ; stride = 1
self.w1 = tf.Variable(tf.compat.v1.random_normal([size,size,channels,filters], stddev=0.01),name=self.network_name + '_'+layer_name+'_weights')
self.b1 = tf.Variable(tf.constant(0.1, shape=[filters]),name=self.network_name + '_'+layer_name+'_biases')
self.c1 = tf.nn.conv2d(self.x, self.w1, strides=[1, stride, stride, 1], padding='SAME',name=self.network_name + '_'+layer_name+'_convs')
self.o1 = tf.nn.relu(tf.add(self.c1,self.b1),name=self.network_name + '_'+layer_name+'_activations')
# Layer 2 (Convolutional)
layer_name = 'conv2' ; size = 3 ; channels = 16 ; filters = 32 ; stride = 1
self.w2 = tf.Variable(tf.compat.v1.random_normal([size,size,channels,filters], stddev=0.01),name=self.network_name + '_'+layer_name+'_weights')
self.b2 = tf.Variable(tf.constant(0.1, shape=[filters]),name=self.network_name + '_'+layer_name+'_biases')
self.c2 = tf.nn.conv2d(self.o1, self.w2, strides=[1, stride, stride, 1], padding='SAME',name=self.network_name + '_'+layer_name+'_convs')
self.o2 = tf.nn.relu(tf.add(self.c2,self.b2),name=self.network_name + '_'+layer_name+'_activations')
o2_shape = self.o2.get_shape().as_list()
# Layer 3 (Fully connected)
layer_name = 'fc3' ; hiddens = 256 ; dim = o2_shape[1]*o2_shape[2]*o2_shape[3]
self.o2_flat = tf.reshape(self.o2, [-1,dim],name=self.network_name + '_'+layer_name+'_input_flat')
self.w3 = tf.Variable(tf.compat.v1.random_normal([dim,hiddens], stddev=0.01),name=self.network_name + '_'+layer_name+'_weights')
self.b3 = tf.Variable(tf.constant(0.1, shape=[hiddens]),name=self.network_name + '_'+layer_name+'_biases')
self.ip3 = tf.add(tf.matmul(self.o2_flat,self.w3),self.b3,name=self.network_name + '_'+layer_name+'_ips')
self.o3 = tf.nn.relu(self.ip3,name=self.network_name + '_'+layer_name+'_activations')
# Layer 4
layer_name = 'fc4' ; hiddens = 4 ; dim = 256
self.w4 = tf.Variable(tf.compat.v1.random_normal([dim,hiddens], stddev=0.01),name=self.network_name + '_'+layer_name+'_weights')
self.b4 = tf.Variable(tf.constant(0.1, shape=[hiddens]),name=self.network_name + '_'+layer_name+'_biases')
self.y = tf.add(tf.matmul(self.o3,self.w4),self.b4,name=self.network_name + '_'+layer_name+'_outputs')
#Q,Cost,Optimizer
self.discount = tf.constant(self.params['discount'])
self.yj = tf.add(self.rewards, tf.multiply(1.0-self.terminals, tf.multiply(self.discount, self.q_t)))
self.Q_pred = tf.compat.v1.reduce_sum(tf.multiply(self.y,self.actions), reduction_indices=1)
self.cost = tf.compat.v1.reduce_sum(tf.pow(tf.subtract(self.yj, self.Q_pred), 2))
if self.params['load_file'] is not None:
self.global_step = tf.Variable(int(self.params['load_file'].split('_')[-1]),name='global_step', trainable=False)
else:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
# self.optim = tf.train.RMSPropOptimizer(self.params['lr'],self.params['rms_decay'],0.0,self.params['rms_eps']).minimize(self.cost,global_step=self.global_step)
self.optim = tf.compat.v1.train.AdamOptimizer(self.params['lr']).minimize(self.cost, global_step=self.global_step)
self.saver = tf.compat.v1.train.Saver(max_to_keep=0)
self.sess.run(tf.compat.v1.global_variables_initializer())
if self.params['load_file'] is not None:
print('Loading checkpoint...')
self.saver.restore(self.sess,self.params['load_file'])
def train(self,bat_s,bat_a,bat_t,bat_n,bat_r):
feed_dict={self.x: bat_n, self.q_t: np.zeros(bat_n.shape[0]), self.actions: bat_a, self.terminals:bat_t, self.rewards: bat_r}
q_t = self.sess.run(self.y,feed_dict=feed_dict)
q_t = np.amax(q_t, axis=1)
feed_dict={self.x: bat_s, self.q_t: q_t, self.actions: bat_a, self.terminals:bat_t, self.rewards: bat_r}
_,cnt,cost = self.sess.run([self.optim, self.global_step,self.cost],feed_dict=feed_dict)
return cnt, cost
def save_ckpt(self,filename):
self.saver.save(self.sess, filename)
|
StarcoderdataPython
|
3301136
|
<reponame>zakharovadaria/receipts<gh_stars>1-10
from flask_jwt_extended import jwt_required
from flask_restplus import Namespace, reqparse, Resource, fields
from app.models.ingredient import Ingredient
from app.models.receipt import Receipt
from app.web.controllers.entities.basic_response import BasicResponse, BasicResponseSchema
from db import session
from schemas import ReceiptClientSchema
receipts_namespace = Namespace('receipts', description='Receipts CRUD')
receipts_fields = receipts_namespace.model('Receipt', {
'name': fields.String(example='Meat'),
'description': fields.String(example='Long description'),
'ingredients': fields.List(fields.Integer(), example=[1]),
'calories': fields.Float(example=300.0),
'steps': fields.List(fields.String(), example=['step1, step2']),
})
auth_parser = receipts_namespace.parser()
auth_parser.add_argument('Authorization', location='headers', help='Bearer token')
@receipts_namespace.route('/', strict_slashes=True)
@receipts_namespace.expect(auth_parser, validate=True)
class ReceiptsListResource(Resource):
method_decorators = [jwt_required]
def create_params(self) -> dict:
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True)
parser.add_argument('description', type=str, required=False, default='')
parser.add_argument('ingredients', type=int, action='append', required=False, default=None)
parser.add_argument('calories', type=float, required=False, default=0)
parser.add_argument('steps', type=str, action='append', required=False, default=None)
return parser.parse_args()
def get(self) -> dict:
receipts = session.query(Receipt).all()
receipts = ReceiptClientSchema().dump(receipts, many=True)
response = BasicResponse(receipts)
return BasicResponseSchema().dump(response)
@receipts_namespace.doc(model=receipts_fields)
@receipts_namespace.expect(receipts_fields, validate=True)
def post(self) -> dict:
create_params = self.create_params()
create_params["ingredients"] = session.query(Ingredient).filter(Ingredient.id.in_(create_params["ingredients"])).all()
receipt = Receipt(**create_params)
session.add(receipt)
session.commit()
receipt = ReceiptClientSchema().dump(receipt)
response = BasicResponse(receipt)
return BasicResponseSchema().dump(response)
@receipts_namespace.route('/<int:id>/', strict_slashes=True)
@receipts_namespace.doc(params={'id': 'Receipt ID'})
@receipts_namespace.expect(auth_parser, validate=True)
class ReceiptsResource(Resource):
method_decorators = [jwt_required]
def update_params(self) -> dict:
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, store_missing=False, required=False)
parser.add_argument('description', type=str, store_missing=False, required=False)
parser.add_argument('ingredients', type=int, store_missing=False, action='append', required=False)
parser.add_argument('calories', type=float, store_missing=False, required=False)
parser.add_argument('steps', type=str, store_missing=False, action='append', required=False)
return parser.parse_args()
@receipts_namespace.doc(model=receipts_fields)
def get(self, id: int) -> dict:
receipt = session.query(Receipt).get(id)
receipt = ReceiptClientSchema().dump(receipt)
response = BasicResponse(receipt)
return BasicResponseSchema().dump(response)
@receipts_namespace.doc(model=receipts_fields)
@receipts_namespace.expect(receipts_fields, validate=True)
def put(self, id: int) -> dict:
receipt = session.query(Receipt).get(id)
update_params = self.update_params()
if 'ingredients' in update_params:
update_params['ingredients'] = session.query(Ingredient).filter(Ingredient.id.in_(update_params["ingredients"])).all()
for key, value in update_params.items():
setattr(receipt, key, value)
session.commit()
receipt = ReceiptClientSchema().dump(receipt)
response = BasicResponse(receipt)
return BasicResponseSchema().dump(response)
def delete(self, id: int) -> dict:
receipt = session.query(Receipt).get(id)
session.delete(receipt)
session.commit()
response = BasicResponse(None)
return BasicResponseSchema().dump(response)
|
StarcoderdataPython
|
9674993
|
import os
print("Creating minified javascript")
os.system("terser --compress --mangle -- tinytemplate.js > tinytemplate.min.js")
large = os.path.getsize("tinytemplate.js")
small = os.path.getsize("tinytemplate.min.js")
print("Reduced size from %d bytes to %d bytes (%.02f%%)" % (large, small, ((large - small) / large) * 100))
|
StarcoderdataPython
|
6422025
|
# coding: utf-8
import numpy as np
import pandas as pd
from scipy import interpolate
import os
import shapefile
class Grid(object):
def __init__(self, shp_file_path, region, density=100):
"""
建立网格对象, 以shp文件获取的 region 外包矩形为网格外边界
:param shp_file_path: str -> 要打开的 shapefile 文件
:param region: str or int -> 地区名 或 地区编码
会检索 shp file infos 的所有 record 进行匹配, 因此,
需保证此值唯一性, 否则将获取拥有此值的最后一个元素
:param density: float -> 网格密度, 单位:格/度
"""
self.region = region
self.shp_file_path = shp_file_path
self.density = density
self.shp_file = shapefile.Reader(shp_file_path)
for shape_record in self.shp_file.shapeRecords():
if str(region) in shape_record.record:
self.points = shape_record.shape.points
# self.points = self.shp_file.shape().points
self.lons, self.lats = zip(*self.points)
self.min_lon = min(self.lons)
self.max_lon = max(self.lons)
self.min_lat = min(self.lats)
self.max_lat = max(self.lats)
self.grid_lon_num = (self.max_lon - self.min_lon) * density
self.grid_lat_num = (self.max_lat - self.min_lat) * density
self.grid = np.mgrid[
self.min_lon:self.max_lon:complex(0, self.grid_lon_num),
self.min_lat:self.max_lat:complex(0, self.grid_lat_num)
]
def grid_data(
self, file, method='nearest',
lon_column='经度', lat_column='纬度', value_column='值',
**kwargs
):
"""
根据已有原始数据插值建立空间网格模型, 并将各参数绑定到 Grid 对象中。
:param file: str -> 数据文件的路径,(CSV 文件)
:param method: str -> 插值方法,采用 scipy.interpolate.griddata() 方法进行计算。
:param lon_column: str -> 数据 经度列的字段名
:param lat_column: str -> 数据 纬度列的字段名
:param value_column: str -> 数据 数据值列的字段名
:return: grid -> 网格值, 同 scipy.interpolate.griddata() 方法
"""
if not os.path.exists(file):
return None
data_frame = pd.read_csv(file)
self.ori_data_lons = np.array(data_frame.get(lon_column))
self.ori_data_lats = np.array(data_frame.get(lat_column))
self.ori_data_value = np.array(data_frame.get(value_column))
data = interpolate.griddata(
np.array([self.ori_data_lons, self.ori_data_lats]).T,
self.ori_data_value, self.grid.T, method=method, **kwargs
)
self.data = data
return data.T
if __name__ == '__main__':
grid = Grid(1)
grid_data = grid.grid_data(os.path.join('data', '0.csv'), method='cubic')
t = np.array([*grid.grid, grid_data])
print(grid.grid)
|
StarcoderdataPython
|
4935716
|
'''
Created on May 24, 2018
@author: kjnether
'''
from __future__ import unicode_literals
from django.db import models
class Destinations(models.Model):
'''
Defines the destinations, uses key words in the job to define the destinations
Keyword relates to this table.
'''
dest_key = models.CharField(max_length=3, primary_key=True)
dest_service_name = models.CharField(max_length=30, blank=False, null=False)
dest_host = models.CharField(max_length=30, blank=False, null=False)
dest_port = models.IntegerField(blank=True, null=True)
# for now everything will be oracle, but leaves door open for other config
dest_type = models.CharField(max_length=30)
def __str__(self):
"""Return a human readable representation of the model instance."""
return "{}".format(self.dest_key)
|
StarcoderdataPython
|
1764925
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from nose.plugins.attrib import attr
from heat.engine import properties
from heat.common import exception
@attr(tag=['unit', 'properties'])
@attr(speed='fast')
class PropertyTest(unittest.TestCase):
def test_required_default(self):
p = properties.Property({'Type': 'String'})
self.assertFalse(p.required())
def test_required_false(self):
p = properties.Property({'Type': 'String', 'Required': False})
self.assertFalse(p.required())
def test_required_true(self):
p = properties.Property({'Type': 'String', 'Required': True})
self.assertTrue(p.required())
def test_implemented_default(self):
p = properties.Property({'Type': 'String'})
self.assertTrue(p.implemented())
def test_implemented_false(self):
p = properties.Property({'Type': 'String', 'Implemented': False})
self.assertFalse(p.implemented())
def test_implemented_true(self):
p = properties.Property({'Type': 'String', 'Implemented': True})
self.assertTrue(p.implemented())
def test_no_default(self):
p = properties.Property({'Type': 'String'})
self.assertFalse(p.has_default())
def test_default(self):
p = properties.Property({'Type': 'String', 'Default': 'wibble'})
self.assertEqual(p.default(), 'wibble')
def test_type(self):
p = properties.Property({'Type': 'String'})
self.assertEqual(p.type(), 'String')
def test_bad_type(self):
self.assertRaises(AssertionError,
properties.Property, {'Type': 'Fish'})
def test_bad_key(self):
self.assertRaises(AssertionError,
properties.Property,
{'Type': 'String', 'Foo': 'Bar'})
def test_string_pattern_good(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = properties.Property(schema)
self.assertEqual(p.validate_data('foo'), 'foo')
def test_string_pattern_bad_prefix(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, '1foo')
def test_string_pattern_bad_suffix(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, 'foo1')
def test_string_value_list_good(self):
schema = {'Type': 'String',
'AllowedValues': ['foo', 'bar', 'baz']}
p = properties.Property(schema)
self.assertEqual(p.validate_data('bar'), 'bar')
def test_string_value_list_bad(self):
schema = {'Type': 'String',
'AllowedValues': ['foo', 'bar', 'baz']}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, 'blarg')
def test_int_good(self):
schema = {'Type': 'Integer',
'MinValue': 3,
'MaxValue': 3}
p = properties.Property(schema)
self.assertEqual(p.validate_data(3), 3)
def test_int_bad(self):
schema = {'Type': 'Integer'}
p = properties.Property(schema)
self.assertRaises(TypeError, p.validate_data, '3')
def test_integer_low(self):
schema = {'Type': 'Integer',
'MinValue': 4}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, 3)
def test_integer_high(self):
schema = {'Type': 'Integer',
'MaxValue': 2}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, 3)
def test_integer_value_list_good(self):
schema = {'Type': 'Integer',
'AllowedValues': [1, 3, 5]}
p = properties.Property(schema)
self.assertEqual(p.validate_data(5), 5)
def test_integer_value_list_bad(self):
schema = {'Type': 'Integer',
'AllowedValues': [1, 3, 5]}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, 2)
def test_number_good(self):
schema = {'Type': 'Number',
'MinValue': '3',
'MaxValue': '3'}
p = properties.Property(schema)
self.assertEqual(p.validate_data('3'), '3')
def test_number_value_list_good(self):
schema = {'Type': 'Number',
'AllowedValues': ['1', '3', '5']}
p = properties.Property(schema)
self.assertEqual(p.validate_data('5'), '5')
def test_number_value_list_bad(self):
schema = {'Type': 'Number',
'AllowedValues': ['1', '3', '5']}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, '2')
def test_number_low(self):
schema = {'Type': 'Number',
'MinValue': '4'}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, '3')
def test_number_high(self):
schema = {'Type': 'Number',
'MaxValue': '2'}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, '3')
def test_boolean_true(self):
p = properties.Property({'Type': 'Boolean'})
self.assertEqual(p.validate_data('True'), True)
self.assertEqual(p.validate_data('true'), True)
self.assertEqual(p.validate_data(True), True)
def test_boolean_false(self):
p = properties.Property({'Type': 'Boolean'})
self.assertEqual(p.validate_data('False'), False)
self.assertEqual(p.validate_data('false'), False)
self.assertEqual(p.validate_data(False), False)
def test_boolean_invalid(self):
p = properties.Property({'Type': 'Boolean'})
self.assertRaises(ValueError, p.validate_data, 'fish')
def test_list_string(self):
p = properties.Property({'Type': 'List'})
self.assertRaises(TypeError, p.validate_data, 'foo')
def test_list_good(self):
p = properties.Property({'Type': 'List'})
self.assertEqual(p.validate_data(['foo', 'bar']), ['foo', 'bar'])
def test_list_dict(self):
p = properties.Property({'Type': 'List'})
self.assertRaises(TypeError, p.validate_data, {'foo': 'bar'})
def test_list_value_list_bad(self):
schema = {'Type': 'List',
'AllowedValues': ['foo', 'bar', 'baz']}
p = properties.Property(schema)
self.assertRaises(ValueError, p.validate_data, ['foo', 'wibble'])
def test_list_value_list_good(self):
schema = {'Type': 'List',
'AllowedValues': ['foo', 'bar', 'baz']}
p = properties.Property(schema)
self.assertEqual(p.validate_data(['bar', 'foo']), ['bar', 'foo'])
def test_map_string(self):
p = properties.Property({'Type': 'Map'})
self.assertRaises(TypeError, p.validate_data, 'foo')
def test_map_list(self):
p = properties.Property({'Type': 'Map'})
self.assertRaises(TypeError, p.validate_data, ['foo'])
def test_map_schema_good(self):
map_schema = {'valid': {'Type': 'Boolean'}}
p = properties.Property({'Type': 'Map', 'Schema': map_schema})
self.assertEqual(p.validate_data({'valid': 'TRUE'}), {'valid': True})
def test_map_schema_bad_data(self):
map_schema = {'valid': {'Type': 'Boolean'}}
p = properties.Property({'Type': 'Map', 'Schema': map_schema})
self.assertRaises(ValueError, p.validate_data, {'valid': 'fish'})
def test_map_schema_missing_data(self):
map_schema = {'valid': {'Type': 'Boolean'}}
p = properties.Property({'Type': 'Map', 'Schema': map_schema})
self.assertEqual(p.validate_data({}), {'valid': None})
def test_map_schema_missing_required_data(self):
map_schema = {'valid': {'Type': 'Boolean', 'Required': True}}
p = properties.Property({'Type': 'Map', 'Schema': map_schema})
self.assertRaises(ValueError, p.validate_data, {})
def test_list_schema_good(self):
map_schema = {'valid': {'Type': 'Boolean'}}
list_schema = {'Type': 'Map', 'Schema': map_schema}
p = properties.Property({'Type': 'List', 'Schema': list_schema})
self.assertEqual(p.validate_data(
[{'valid': 'TRUE'}, {'valid': 'False'}]),
[{'valid': True}, {'valid': False}])
def test_list_schema_bad_data(self):
map_schema = {'valid': {'Type': 'Boolean'}}
list_schema = {'Type': 'Map', 'Schema': map_schema}
p = properties.Property({'Type': 'List', 'Schema': list_schema})
self.assertRaises(ValueError, p.validate_data, [{'valid': 'True'},
{'valid': 'fish'}])
def test_list_schema_int_good(self):
list_schema = {'Type': 'Integer'}
p = properties.Property({'Type': 'List', 'Schema': list_schema})
self.assertEqual(p.validate_data([1, 2, 3]), [1, 2, 3])
def test_list_schema_int_bad_data(self):
list_schema = {'Type': 'Integer'}
p = properties.Property({'Type': 'List', 'Schema': list_schema})
self.assertRaises(TypeError, p.validate_data, [42, 'fish'])
@attr(tag=['unit', 'properties'])
@attr(speed='fast')
class PropertiesTest(unittest.TestCase):
def setUp(self):
schema = {
'int': {'Type': 'Integer'},
'string': {'Type': 'String'},
'required_int': {'Type': 'Integer', 'Required': True},
'bad_int': {'Type': 'Integer'},
'missing': {'Type': 'Integer'},
'defaulted': {'Type': 'Integer', 'Default': 1},
'default_override': {'Type': 'Integer', 'Default': 1},
}
data = {
'int': 21,
'string': 'foo',
'bad_int': 'foo',
'default_override': 21,
}
double = lambda d: d * 2
self.props = properties.Properties(schema, data, double, 'wibble')
def test_integer_good(self):
self.assertEqual(self.props['int'], 42)
def test_string_good(self):
self.assertEqual(self.props['string'], 'foofoo')
def test_missing_required(self):
self.assertRaises(ValueError, self.props.get, 'required_int')
def test_integer_bad(self):
self.assertRaises(TypeError, self.props.get, 'bad_int')
def test_missing(self):
self.assertEqual(self.props['missing'], None)
def test_default(self):
self.assertEqual(self.props['defaulted'], 1)
def test_default_override(self):
self.assertEqual(self.props['default_override'], 42)
def test_bad_key(self):
self.assertEqual(self.props.get('foo', 'wibble'), 'wibble')
@attr(tag=['unit', 'properties'])
@attr(speed='fast')
class PropertiesValidationTest(unittest.TestCase):
def test_required(self):
schema = {'foo': {'Type': 'String', 'Required': True}}
props = properties.Properties(schema, {'foo': 'bar'})
self.assertEqual(props.validate(), None)
def test_missing_required(self):
schema = {'foo': {'Type': 'String', 'Required': True}}
props = properties.Properties(schema, {})
self.assertRaises(exception.StackValidationFailed, props.validate)
def test_missing_unimplemented(self):
schema = {'foo': {'Type': 'String', 'Implemented': False}}
props = properties.Properties(schema, {})
self.assertEqual(props.validate(), None)
def test_present_unimplemented(self):
schema = {'foo': {'Type': 'String', 'Implemented': False}}
props = properties.Properties(schema, {'foo': 'bar'})
self.assertRaises(exception.StackValidationFailed, props.validate)
def test_missing(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {})
self.assertEqual(props.validate(), None)
def test_bad_data(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'foo': 42})
self.assertRaises(exception.StackValidationFailed, props.validate)
def test_unknown_typo(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'food': 42})
self.assertRaises(exception.StackValidationFailed, props.validate)
|
StarcoderdataPython
|
6598143
|
<reponame>Chris2L/gnss-ins-sim
# -*- coding: utf-8 -*-
# Filename: demo_mag_cal.py
"""
The simplest demo of soft iron and hard iron calibration.
Created on 2018-07-09
@author: dongxiaoguang
"""
import os
import math
import numpy as np
from gnss_ins_sim.sim import imu_model
from gnss_ins_sim.sim import ins_sim
# globals
D2R = math.pi/180
motion_def_path = os.path.abspath('.//demo_motion_def_files//')
fs = 100.0 # IMU sample frequency
fs_gps = 10.0 # GPS sample frequency
fs_mag = fs # magnetometer sample frequency, not used for now
def test_mag_cal():
'''
test soft iron and hard iron calibration.
'''
print("This demo only runs on Ubuntu x64.")
#### IMU model, typical for IMU381
imu_err = 'mid-accuracy'
# do not generate GPS data
imu = imu_model.IMU(accuracy=imu_err, axis=9, gps=False)
mag_error = {'si': np.eye(3) + np.random.randn(3, 3)*0.1,
'hi': np.array([10.0, 10.0, 10.0])*1.0
}
imu.set_mag_error(mag_error)
#### Algorithm
from demo_algorithms import mag_calibrate
algo = mag_calibrate.MagCal()
#### start simulation
sim = ins_sim.Sim([fs, fs_gps, fs_mag],
motion_def_path+"//motion_def_mag_cal.csv",
# motion_def_path+"//test_mag_cal//",
ref_frame=1,
imu=imu,
mode=None,
env=None,
algorithm=algo)
sim.run(1)
# save simulation data to files
sim.results()
# plot data
sim.plot(['mag', 'mag_cal'], opt={'mag': 'projection', 'mag_cal': 'projection'}, extra_opt='.')
# show calibration params:
print('true soft iron is:')
print(np.linalg.inv(mag_error['si']))
print('estimated soft iron is:')
print(sim.dmgr.soft_iron.data)
print('true hard iron is:')
print(mag_error['hi'])
print('estimated hard iron is:')
print(sim.dmgr.hard_iron.data)
if __name__ == '__main__':
test_mag_cal()
|
StarcoderdataPython
|
4826365
|
from lib.utils.common import *
from PIL import Image
import io
import h5py
from lib.data import ScanNet2DLoader
import multiprocessing as mp
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
class ScanRefer2DDataset(ScanNet2DLoader):
def __init__(self, hparams, phase, target_samples, transforms, vocab_tools):
super().__init__(
hparams=hparams,
phase=phase,
target_samples=target_samples,
transforms=transforms
)
self.transforms = transforms
self.vocab_tools = vocab_tools
self.multiprocess_dict = {}
def __getitem__(self, index):
# pid = mp.current_process().pid
# if pid not in self.multiprocess_dict:
# self.multiprocess_dict[pid] = h5py.File(self.hparams['paths.db_2d'], 'r')
# db = self.multiprocess_dict[pid]
with h5py.File(self.hparams['paths.db_2d'], 'r') as db:
scene_id = self.verified_list[index]['scene_id']
object_id = self.verified_list[index]['object_id']
ann_id = self.verified_list[index]['ann_id']
sample_id = '{}-{}_{}'.format(scene_id, object_id, ann_id)
image_id = np.array([self.get_int_id(scene_id, object_id, ann_id)], dtype=np.long)
rgba = Image.open(io.BytesIO(np.array(db['color'][sample_id])))
# rgba size in PIL mode: (1296, 968)
# rgba shape in Array mode: (968, 1296, 4)
instance_mask = Image.open(io.BytesIO(np.array(db['instancemask'][sample_id])))
if self.hparams['downsample']:
new_width = 320
new_height = 240
width_scale, height_scale = new_width / rgba.size[0], new_height / rgba.size[1]
rgba = rgba.resize(size=(new_width, new_height))
instance_mask = instance_mask.resize(size=(new_width, new_height))
color = rgba.convert('RGB')
color = torch.from_numpy(np.asarray(color).astype(np.float32)).permute(2, 0, 1)
color /= 255.0
instance_mask = np.array(instance_mask)
caption = self.vocab_tools.get_caption(sample_id=sample_id) # returns fixed size tokenized caption, added sos eos and pad tokens.
caption_len = np.array([self.vocab_tools.get_caption_len(sample_id=sample_id)], dtype=np.int16)
color_width = np.array([color.shape[1]], dtype=np.int16)
color_height = np.array([color.shape[2]], dtype=np.int16)
boxes = np.array(db['box'][sample_id], dtype=np.float32)
if self.hparams['downsample']:
boxes[:, 0] = boxes[:, 0] * width_scale
boxes[:, 2] = boxes[:, 2] * width_scale
boxes[:, 1] = boxes[:, 1] * height_scale
boxes[:, 3] = boxes[:, 3] * height_scale
oids = np.array(db['objectid'][sample_id]) # 0 (background) exluded: oids > 0.
target_object_idx = np.where(oids == int(object_id))[0] # return scalar or numpy ndarray warning
assert target_object_idx.shape[0] >= 1
masks = instance_mask == oids[:, None, None] # convert color encoded -> binary mask
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = np.zeros(boxes.shape[0], dtype=np.int64)
# DOUBLE CHECK THE LABELS LATER
labels = np.array(db['semanticclass'][sample_id], dtype=np.long)
targets = {
'image_id': image_id,
'image_width': color_width, # <- Required for coco evaluation
'image_height': color_height, # <- Required for coco evaluation
'boxes': boxes,
'labels': labels,
'masks': masks,
'area': area,
'iscrowd': iscrowd,
'target_object_idx': target_object_idx
}
if self.transforms is not None:
color = self.transforms['image_transforms'](color)
targets = self.transforms['dltodt'](targets)
caption = self.transforms['np_to_tensor'](caption)
caption_len = self.transforms['np_to_tensor'](caption_len)
depth = None
if self.hparams['depth_channel']:
depth = Image.open(io.BytesIO(np.array(db['depth'][sample_id])))
if self.hparams['downsample']:
depth = depth.resize(size=(new_width, new_height))
# make a [1, W, H] tensor
depth = np.array(depth).astype(np.float32)
depth = np.expand_dims(depth, axis=0)
depth = self.transforms['depth_transforms'](depth)
depth = self.transforms['np_to_tensor'](depth)
return color, depth, targets, caption, caption_len, sample_id
def _collate(self, batch):
return tuple(zip(*batch))
|
StarcoderdataPython
|
1601560
|
<filename>src/data/914.py
import sys
sys.setrecursionlimit(500005)
stdin = sys.stdin
ni = lambda: int(ns())
nm = lambda: map(int, stdin.readline().split())
nl = lambda: list(nm())
ns = lambda: stdin.readline().strip()
class LCADoubling:
"""
I used these sites as reference
- https://ikatakos.com/pot/programming_algorithm/graph_theory/lowest_common_ancestor
- https://algo-logic.info/lca/
"""
def __init__(self, graph, root=0, with_weight=False):
n = len(graph)
self.depths = [-1] * n
self.distances = [-1] * n
prev_ancestors = self._init_dfs(graph, root, with_weight)
self.ancestors = [prev_ancestors]
max_depth = max(self.depths)
d = 1
while d < max_depth:
next_ancestors = [prev_ancestors[p] for p in prev_ancestors]
self.ancestors.append(next_ancestors)
d <<= 1
prev_ancestors = next_ancestors
def _init_dfs(self, graph, root=0, with_weight=False):
q = [(root, -1, 0, 0)]
direct_ancestors = [-1] * (len(graph) + 1)
while q:
v, p, dep, dist = q.pop()
direct_ancestors[v] = p
self.depths[v] = dep
self.distances[v] = dist
if with_weight:
q.extend(
(u, v, dep + 1, dist + w) for u, w in graph[v] if u != p)
else:
q.extend((u, v, dep + 1, dist + 1) for u in graph[v] if u != p)
return direct_ancestors
def get_lca(self, u, v):
du, dv = self.depths[u], self.depths[v]
if du > dv:
u, v = v, u
du, dv = dv, du
tu = u
tv = self.upstream(v, dv - du)
if u == tv:
return u
for k in range(du.bit_length() - 1, -1, -1):
mu = self.ancestors[k][tu]
mv = self.ancestors[k][tv]
if mu != mv:
tu = mu
tv = mv
lca = self.ancestors[0][tu]
assert lca == self.ancestors[0][tv]
return lca
def upstream(self, v, k):
i = 0
while k:
if k & 1:
v = self.ancestors[i][v]
k >>= 1
i += 1
return v
def get_distance(self, u, v):
lca = self.get_lca(u, v)
return self.distances[u] + self.distances[v] - 2 * self.distances[lca]
def is_on_path(self, u, v, a):
return self.get_distance(u, a) + self.get_distance(
a, v) == self.get_distance(u, v)
n, q = nm()
g = [[] for i in range(n)]
for _ in range(n - 1):
a, b = nm()
a -= 1
b -= 1
g[a].append(b)
g[b].append(a)
lca = LCADoubling(g)
for _ in range(q):
c, d = nm()
c -= 1
d -= 1
dis = lca.get_distance(c, d)
if dis % 2 == 0:
print('Town')
else:
print('Road')
|
StarcoderdataPython
|
5105293
|
<reponame>karlneco/kanji-test-maker<gh_stars>1-10
from flask import url_for
import requests
from hktm import db
from hktm.models import User
def login(client,username,password):
return client.post('/',data=dict(email=username,password=password), follow_redirects=True)
def logout(client):
return client.get('/users/logout',follow_redirects=True)
def test_index_page(test_client,init_database): #needs init other wise it will die in a test chain
"""
GIVEN a Flask application
WHEN the '/' page is requested (GET)
THEN check the response is valid
"""
response = test_client.get('/')
assert response.status_code == 200
assert '補習校漢字テスト'.encode('utf-8') in response.data
assert 'value="ログイン"'.encode('utf-8') in response.data
assert b'<a href="/users/register"' in response.data
def test_user_home(client,auth_user,init_database,authenticated_request):
"""
GIVEN a Flask application
WHEN the '/users/login or / (index) page is posted to (POST) with valid creds
THEN login the user
"""
response = client.post(url_for('root.index'),data=dict(username='<EMAIL>',password='password'))
# try to get home
response = client.get(url_for('root.home'))
assert response.status_code == 200
#assert 0
assert '新規作成または、'.encode('utf-8') in response.data
def test_valid_registration(test_client, init_database):
"""
GIVEN a Flask application
WHEN the '/register' page is posted with valid data
THEN check the response is valid and the user is logged in
"""
response = test_client.post('/users/register',
data=dict(email='<EMAIL>',
password='password',
password_confirm='password'),
follow_redirects=True)
assert response.status_code == 200
assert '新しくアカウントが作成されました。'.encode('utf-8') in response.data
user = User.query.filter_by(email='<EMAIL>').first()
assert isinstance(user, User)
assert user.grades == 'none'
def test_duplicate_registration(test_client, init_database):
"""
GIVEN a Flask application
WHEN the '/register' page is posted to (POST)
THEN check the response is valid and the user is logged in
"""
response = test_client.post('/users/register',
data=dict(email='<EMAIL>',
password='password',
password_confirm='password'),
follow_redirects=True)
assert response.status_code == 200
assert '新しくアカウントが作成されました。'.encode('utf-8') in response.data
response = test_client.post('/users/register',
data=dict(email='<EMAIL>',
password='password',
password_confirm='password'),
follow_redirects=True)
assert response.status_code == 200
assert 'このメールアドレスは既に登録済みです。'.encode('utf-8') in response.data
def test_user_login(test_client, init_database):
"""
GIVEN a Flask application
WHEN the '/users/login or / (index) page is posted to (POST) with valid creds
THEN login the user
"""
#add a test user
user = User('<EMAIL>','password')
user.grades = '1' # a valid user needs a grade(s)
db.session.add(user)
db.session.commit()
#try to login
response = test_client.post('/',
data=dict(email='<EMAIL>',
password='password'),
follow_redirects=True)
assert response.status_code == 200
assert 'ログインに成功しました。'.encode('utf-8') in response.data
def test_user_login_fail(test_client, init_database):
"""
GIVEN a Flask application
WHEN the '/users/login or / (index) page is posted to (POST) with INVALID creds
THEN login the user
"""
#add a test user
user = User('<EMAIL>','password')
user.grades = '1' # a valid user needs a grade(s)
db.session.add(user)
db.session.commit()
#try to login wtih bad password
response = test_client.post('/',
data=dict(email='<EMAIL>',
password='<PASSWORD>'),
follow_redirects=True)
assert response.status_code == 200
assert 'メールアドレスまたはパスワードが一致しません。'.encode('utf-8') in response.data
#try to login wtih bad username
response = test_client.post('/',
data=dict(email='<EMAIL>',
password='<PASSWORD>'),
follow_redirects=True)
assert response.status_code == 200
assert 'メールアドレスまたはパスワードが一致しません。'.encode('utf-8') in response.data
###############################################################################
# to see full response use try block to intercept the assetion
# try:
# make asserts here
# except AssertionError as e:
# raise ResponseAssertionError(e, response)
#
class ResponseAssertionError(AssertionError):
def __init__(self, e, response):
response_dump = "\n + where full response was:\n" \
"HTTP/1.1 {}\n" \
"{}{}\n".format(response.status, response.headers, response.data)
self.args = (e.args[0] + response_dump,)
|
StarcoderdataPython
|
336459
|
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.cryptocurrency.defi import terraengineer_model
@pytest.mark.vcr
@pytest.mark.parametrize(
"asset,address",
[("ust", "terra1tmnqgvg567ypvsvk6rwsga3srp7e3lg6u0elp8")],
)
def test_get_history_asset_from_terra_address(asset, address, recorder):
df = terraengineer_model.get_history_asset_from_terra_address(
asset=asset,
address=address,
)
recorder.capture(df)
|
StarcoderdataPython
|
9601966
|
<filename>app/StockModelLinear.py<gh_stars>0
import numpy as np
import pandas as pd
import datetime
import logging
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.metrics import r2_score
from StockModel import StockModel
from StockDateValidator import StockDateValidator
class StockModelLinear(StockModel):
"""
A class to encapsulate a trained model that uses Linear Regression and the date range for which it was trained.
...
Attributes
----------
model : sklearn.pipeline.Pipeline
the trained model
from_date : date
beginning date range
to_date : date
ending date range
"""
def __init__(self, symbol: str, df: pd.DataFrame, from_date: datetime.date, to_date: datetime.date, df_info: pd.DataFrame):
"""
Constructs all the necessary attributes for the TrainedModelLinear object.
Parameters
----------
symbol : str
The stock ticker symbol, e.g. GOOGL, MSFT, etc.
df : Pandas.DataFrame
The underlying historical DataFrame
from_date : datetime.date
beginning date range
to_date : datetime.date
ending date range
df_info : Pandas.DataFrame
Additional symbol information dataframe, columns:
'symbol', 'name', 'close_price', 'volume', 'fifty_two_week_low', 'fifty_two_week_high', 'eps', 'div_yield', 'sector', 'industry', 'begin_train', 'end_train'
"""
self.symbol = symbol
self.df = df
self.df_info = df_info
if ('Date' in df.columns):
self.df = df.sort_values(by=['Date'])
self.modelAdjCloseNextDay = make_pipeline(StandardScaler(with_mean=False), LinearRegression())
self.modelOpen = make_pipeline(StandardScaler(with_mean=False), LinearRegression())
self.modelHigh = make_pipeline(StandardScaler(with_mean=False), LinearRegression())
self.modelLow = make_pipeline(StandardScaler(with_mean=False), LinearRegression())
self.modelClose = make_pipeline(StandardScaler(with_mean=False), LinearRegression())
self.from_date = from_date
self.to_date = to_date
# For now set to end date and empty test data frames, but this will be updated after function fit runs
self.last_trained_date = to_date
self.X_testAdjCloseNextDay = pd.DataFrame()
self.y_testAdjCloseNextDay = pd.DataFrame()
def fit(self, test_size: int, random_state: int):
"""
Fits the inputs using TrainedModelLinear model, otherwise known as training the model. Initially we started
using train_test_split to split our training and test data but then realized we did not want to split it
randomly but rather in order since we want to train with most of the data and we want to track what the
last N Date is we trained for when we start testing the effectiveness of the model.
Parameters
----------
test_size : numeric
How much of the training data gets set aside for test data
random_state : numeric
Numeric that allows us to repeat a "random value"
"""
# Set the last_trained_date based on the test_size index - 1
self.last_trained_date = self.df.loc[self.df.tail(test_size).index - 1, 'Date'].values[0]
self.last_trained_date = datetime.datetime.strptime(np.datetime_as_string(self.last_trained_date,unit='D'), '%Y-%m-%d')
#Split into explanatory and response variables for Adj Close Next Day
X2_AdjCloseNextDay = self.df[['Open', 'High', 'Low', 'Close', 'Adj Close']]
y2_AdjCloseNextDay = self.df['Adj Close Next Day']
#Split into train and test, then fit the model for Adj Close Next Day
##X_trainAdjCloseNextDay, X_testAdjCloseNextDay, y_trainAdjCloseNextDay, y_testAdjCloseNextDay = train_test_split(X2_AdjCloseNextDay, y2_AdjCloseNextDay, test_size = test_size, random_state = random_state)
X_trainAdjCloseNextDay = X2_AdjCloseNextDay.drop(X2_AdjCloseNextDay.tail(test_size).index)
y_trainAdjCloseNextDay = y2_AdjCloseNextDay.drop(y2_AdjCloseNextDay.tail(test_size).index)
# Save test data for optional call to function evaluate model
self.X_testAdjCloseNextDay = X2_AdjCloseNextDay.tail(test_size)
self.y_testAdjCloseNextDay = y2_AdjCloseNextDay.tail(test_size)
# Run the model fit for Adj Close Next Day
self.modelAdjCloseNextDay.fit(X_trainAdjCloseNextDay, y_trainAdjCloseNextDay)
#Split into explanatory and response variables for Open
X2_Open = self.df[['High', 'Low', 'Close', 'Adj Close']]
y2_Open = self.df['Open']
#Split into train and test, then fit the model for Open
##X_trainOpen, X_testOpen, y_trainOpen, y_testOpen = train_test_split(X2_Open, y2_Open, test_size = test_size, random_state = random_state)
X_trainOpen = X2_Open.drop(X2_Open.tail(test_size).index)
y_trainOpen = y2_Open.drop(y2_Open.tail(test_size).index)
self.modelOpen.fit(X_trainOpen, y_trainOpen)
#Split into explanatory and response variables for High
X2_High = self.df[['Open', 'Low', 'Close', 'Adj Close']]
y2_High = self.df['High']
#Split into train and test, then fit the model for High
##X_trainHigh, X_testHigh, y_trainHigh, y_testHigh = train_test_split(X2_High, y2_High, test_size = test_size, random_state = random_state)
X_trainHigh = X2_High.drop(X2_High.tail(test_size).index)
y_trainHigh = y2_High.drop(y2_High.tail(test_size).index)
self.modelHigh.fit(X_trainHigh, y_trainHigh)
#Split into explanatory and response variables for Low
X2_Low = self.df[['Open', 'High', 'Close', 'Adj Close']]
y2_Low = self.df['Low']
#Split into train and test, then fit the model for Low
##X_trainLow, X_testLow, y_trainLow, y_testLow = train_test_split(X2_Low, y2_Low, test_size = test_size, random_state = random_state)
X_trainLow = X2_Low.drop(X2_Low.tail(test_size).index)
y_trainLow = y2_Low.drop(y2_Low.tail(test_size).index)
self.modelLow.fit(X_trainLow, y_trainLow)
#Split into explanatory and response variables for Close
X2_Close = self.df[['Open', 'High', 'Low', 'Adj Close']]
y2_Close = self.df['Close']
#Split into train and test, then fit the model for Close
##X_trainClose, X_testClose, y_trainClose, y_testClose = train_test_split(X2_Close, y2_Close, test_size = test_size, random_state = random_state)
X_trainClose = X2_Close.drop(X2_Close.tail(test_size).index)
y_trainClose = y2_Close.drop(y2_Close.tail(test_size).index)
self.modelClose.fit(X_trainClose, y_trainClose)
def predict(self, current_date: datetime.date, df_curr: pd.DataFrame):
"""
Make the prediction for the given current_date using the TrainedModelLinear object.
We account for scenarios where tests are run for data within the range of our test data,
meaning we have the reported Open, High, Low, Close, Adj Close already, *or* the case
where we are asked to predict beyond our test data.
Parameters
----------
current_date : datetime.date
The date being predicted
df_curr : Pandas.DataFrame
The DataFrame containing the inputs for the previous date used to predict current_date data
Returns
-------
df_next : Pandas.DataFrame
Resulting DataFrame containing current_date predictions
"""
# Handle the case where the date we want to predict falls outside of our test data so
# we have to predict the Open, High, Low, Close, Adj Close for the date in question
# in order to set up current_date + 1 data, otherwise, we can simply use the existing
# test data values to predict current_date + 1
if (current_date > self.to_date):
# Get the inputs we need to make our prediction
df_next = df_curr.tail(1).copy()
X_AdjCloseNextDay = df_next[['Open', 'High', 'Low', 'Close', 'Adj Close']]
y_AdjCloseNextDay = self.modelAdjCloseNextDay.predict(X_AdjCloseNextDay)
# Set previous day Adj Close Next Day
df_next.iloc[-1, df_curr.columns.get_loc('Adj Close Next Day')] = y_AdjCloseNextDay[0]
# Now predict all of our indicators which will be used when predicting since in this case
# current_date is beyond the size of our existing dataset which ends on self.to_date
X_Open = df_next[[ 'High', 'Low', 'Close', 'Adj Close']]
y_Open = self.modelOpen.predict(X_Open)
X_High = df_next[['Open', 'Low', 'Close', 'Adj Close']]
y_High = self.modelHigh.predict(X_High)
X_Low = df_next[['Open', 'High', 'Close', 'Adj Close']]
y_Low = self.modelLow.predict(X_Low)
X_Close = df_next[['Open', 'High', 'Low', 'Adj Close']]
y_Close = self.modelClose.predict(X_Close)
# Add our new record for current_date that uses all of our predicted data
# Notice we don't fill in Adj Close Next Day as that gets set when you run
# for current_date + 1
df_ret = df_next.append({'Symbol': self.symbol, 'Date': current_date, 'Open': y_Open[0], 'High': y_High[0], 'Low': y_Low[0], 'Close': y_Close[0], 'Adj Close': y_AdjCloseNextDay[0], 'Adj Close Next Day': np.nan}, ignore_index=True)
else:
df_ret = self.df[self.df['Date'] == current_date].copy()
# Get the inputs we need to make our prediction
df_next = df_curr.tail(1).copy()
X_AdjCloseNextDay = df_next[['Open', 'High', 'Low', 'Close', 'Adj Close']]
y_AdjCloseNextDay = self.modelAdjCloseNextDay.predict(X_AdjCloseNextDay)
# Set previous day Adj Close Next Day
df_ret.iloc[:, df_ret.columns.get_loc('Adj Close Next Day')] = y_AdjCloseNextDay[0]
return df_ret
def evaluateModel(self, X_test = pd.DataFrame(), Y_test = pd.DataFrame()):
"""
Evaluates the given model with the given X test data and expected Y test data
Parameters
----------
X_test: Pandas.DataFrame
Optional X input test data, if not given use the test data we have self.X_testAdjCloseNextDay
Y_test: Pandas.DataFrame
Optional Y output test data, if not given use the test data we have self.y_testAdjCloseNextDay
Returns
-------
results (dict) - Dictionary of relevant scores
Examples
--------
>>> evaluate_model()
>>> evaluate_model(X_test, Y_test)
"""
# Use our object test values if none given
if len(X_test) == 0:
X_test = self.X_testAdjCloseNextDay
if len(Y_test) == 0:
Y_test = self.y_testAdjCloseNextDay
# run the model predictions to evaluate performance
y_pred = self.modelAdjCloseNextDay.predict(X_test)
#Rsquared and y_test
length_y_test = len(Y_test)#num in Y_test
rsquared_score = -1
if (length_y_test < 2):
logging.debug(" The r-squared score of the model is NOT calculated for sample size less than 2: {} value(s).".format(length_y_test))
else:
rsquared_score = r2_score(Y_test, y_pred)#r2_score
logging.debug(" The r-squared score of the model {:.2f} on {} values.".format(rsquared_score, length_y_test))
actuals = Y_test
predicted = y_pred
##plt.scatter(actuals, predicted, color='Darkblue')
##plt.xlabel("Actual Price")
##plt.ylabel("Predicted Price")
##plt.show()
x2 = actuals.mean()
y2 = predicted.mean()
accuracy = x2/y2
results = { 'R²|R-squared Score, closer to 1.00 the better': rsquared_score
, 'Tested Values|Number of data points tested to determine model accuracy': length_y_test
, 'Accuracy|Model accuracy based on the mean ratios of actual prices over predicted prices, closer to 1.00 the better, higher than 1.00 we are under valueing': accuracy
, 'MAE|Mean Absolute Error or MAE measures the average magnitude of the errors in a set of predictions, without considering their direction, the closer to 0.00 the better': metrics.mean_absolute_error(actuals, predicted)
, 'MSE|Mean Squared Error or MSE is the quadratic scoring rule that also measures the average magnitude of the error, the closer to 0.00 the better': metrics.mean_squared_error(actuals, predicted)
, 'RMSE|Root Mean Squared Error or RMSE measures the average of the squares of the errors, the average squared difference between the estimated values and the actual value, the closer to 0.00 the better': np.sqrt(metrics.mean_squared_error(actuals, predicted))
}
return results, actuals, predicted
|
StarcoderdataPython
|
8087429
|
from math import radians, degrees, sin, cos, tan, asin, acos, atan2
import numpy as np
import matplotlib.pyplot as plt
#Define Latitude in radians
lat=radians(49.3978620896919)
#Define hoirzontal limit in altitude in degree
horizon_limit=12
def equ_to_altaz(ha,dec):
""" Transforms equatorial coordinates (hourangle, declination)
to horizontal coordinates (azimuth,altitude).
Input: ha in hours as float, dec in degree as float.
Returns altitude and azimuth as float in degrees.
"""
#Check if Input arrays have same dimensions
if not np.isscalar(ha) and not np.isscalar(dec):
if (len(ha)!=len(dec) or ha.ndim!=1 or dec.ndim!=1):
return 0
#Convert hour angle to radians
#Convert hour angle to degree first and convert negative hour angles to
#positive ones (e.g. -11 to 13)
ha=ha+24*(ha<0)
ha=np.radians(ha*15.)
#Convert declination to radians
dec=np.radians(dec)
#Calculate altitude and azimuth (formulaes from celestial mechanics script
#of <NAME>)
#For altitudwe have the formula:
#sin(alt)=cos(ha)*cos(lat)*cos(dec)+sin(lat)*sin(dec))
alt=np.arcsin(np.sin(lat)*np.sin(dec)+np.cos(lat)*np.cos(dec)*np.cos(ha))
#For azimuth we have the formula
#tan(az)=-sin(ha)/(cos(lat)*tan(dec)-sin(lat)*cos(ha))
az=np.arctan2(np.sin(ha),(-np.cos(lat)*np.tan(dec)+np.sin(lat)*np.cos(ha)))
#Convert alt and az to degrees
alt=np.degrees(alt)
az=np.degrees(az)
#If Input was an array longer than 1 return the float arrays
if not np.isscalar(alt):
return (alt,az)
#If Input was single values than also format the Output
#In that case transform arrays to float
alt=float(alt)
az=float(az)
formated_coord_list=[]
#Also Format alt/az to +dd°mm'ss" as string
#Get the sign of ha_float
for coord in [alt,az]:
if coord>=0:
sign='+'
elif coord<0:
sign='-'
#Calculate the absolute of coord to convert it to hh mm ss
coord=abs(coord)
#Format hour angle to hh:mm:ss
deg=int(coord)
rest=abs(coord-deg)*60
minutes=int(rest)
rest=abs(rest-minutes)*60
#We want to round seconds to get a more continous updating of seconds
seconds=round(rest)
#But we have to take care of rounding up to 60. Increase minutes by one in that case.
if seconds==60:
seconds=0
minutes=minutes+1
coord='''{}{:02}°{:02}'{:02}"'''.format(sign,deg,minutes,seconds)
formated_coord_list.append(coord)
#Return altitude and azimuth
return (alt,az,formated_coord_list[0],formated_coord_list[1])
def altaz_to_equ(alt,az):
""" Transforms horizontal coordinates (azimuth,altitude).
to equatorial coordinates (hourangle, declination).
Input: alt in degrees as float or array of floats,
az in degrees as float or array of floats.
Returns ha as float in hours and dec as float in degrees.
"""
#Convert alt and az to radians
alt=np.radians(alt)
az=np.radians(az)
#Calculate hour angle and declination (formulaes from celestial mechanics script
#of Genevieve Parmentier)
#For hour angle we have the formula:
#tan(ha)=(sin(az))/(cos(lat)*tan(alt)+cos(az)*sin(lat))
ha=np.arctan2(np.sin(az),np.cos(lat)*np.tan(alt)+np.cos(az)*np.sin(lat))
#For declination we have the formula:
#sin(dec)=sin(lat)*sin(alt)-cos(lat)*cos(alt)*cos(az)
dec=np.arcsin(np.sin(lat)*np.sin(alt)-np.cos(lat)*np.cos(alt)*np.cos(az))
#Convert ha to hours
ha=np.degrees(ha)/15.
#Convert dec to degrees
dec=np.degrees(dec)
return (ha, dec)
def check_coordinates(alt,az):
"""Checks if coordinates are observable and safe to slew.
Returns True if coordinates do not reach limits.
Returns False if coordinates are in limits.
"""
#Check if alt and az are set as floats
if (not isinstance(alt, (int, float))
or not isinstance(az, (int, float))):
return False
#Calculate altitude limit
alt_limit=calc_alt_limit(az)
#Check if altitude is above or below limit
if alt>=alt_limit:
return True
else:
return False
def calc_alt_limit(az):
""" Calculates altitude limits.
Returns Altitude limit in degrees.
Input: Array of az as floats between -180 and 180
"""
#Check Input: If int or float make array
if isinstance(az,(float,int)):
az=np.array([az])
elif isinstance(az,list):
az=np.array(az)
#Define limits. All limits are included.
#We go from -180.01 to 180.01 to make sure that 180.0 is properly
#included. Also include 360.01. Shouldn't be inserted normally,
#but since it still works we include it for bad inputs.
limits=np.array([[horizon_limit, -180.01],
[horizon_limit, 97.0],
[18.047683650516614, 101.22273624588037],
[19.922540694112776, 108.09973819537765],
[19.92473999691732, 112.96653118269231],
[18.1891109125214, 115.94832778139686],
[17.26156820756814, 119.6115621873876],
[17.3079984461787, 124.02768286442313],
[17.61337050520085, 128.47376745531645],
[16.514643086444128, 131.5063030183839],
[17.105176559235456, 135.7850030762675],
[15.574353529644203, 138.2131928476609],
[15.367408374687445, 141.5357258928432],
[13.465127305224598, 143.60311637027976],
[12.635376162837199,146.34084417895636],
[horizon_limit, 150.0],
[horizon_limit, 180.01],
[horizon_limit, 360.01]])
#Create multidimensional arrays of same shape
#Idea is to have all differences of one given az in one line
#So we want a board_lim_matrix where in each line all azimuths of
#the board limits are represented. So it has as many lines as given azs
#and in each line all limit_az are repeated
az_lim_matrix=np.array(np.tile(limits[:,1],az.shape[0]))
az_lim_matrix=az_lim_matrix.reshape(az.shape[0],limits.shape[0])
#The az_matrix is constructed so that in each line only one value of
#input az is written
#It has as many columns as azimuth limits
az_matrix=np.array(np.repeat(az,limits.shape[0]))
az_matrix=az_matrix.reshape(az.shape[0],limits.shape[0])
#Calculate difference matrix
diff=az_lim_matrix-az_matrix
#Calculate matrices with only positive and negative values respectively
pos=(diff>=0)*diff
neg=(diff<0)*diff
#insert +/- infinity for 0, to avoid finding maxima/minima there
pos[pos==0]=np.inf
neg[neg==0]=-np.inf
#Find one limit at lowest positive value of difference in az
#The other at greatest negative value
up_az_lim=az_lim_matrix[np.where(diff==np.amin(pos,axis=1,keepdims=True))]
low_az_lim=az_lim_matrix[np.where(diff==np.amax(neg,axis=1,keepdims=True))]
#Define 1D array with az_limits from limits
az_lim=limits[:,1]
#Get indices of sorted array. Note that it normally should be sorted already.
#But it is useful if one would insert new limits in unsorted order.
#Note that array is not really sorted, so we do not lose indices.
#We only get the indices with which you could sort the array
az_lim_sorted = np.argsort(az_lim)
#Perform searchsort with sorted indices. Search for up_az_lim values
#in general az_limits
up_pos = np.searchsorted(az_lim[az_lim_sorted], up_az_lim)
#get the indices, where you found the up_az_limits
up_indices = az_lim_sorted[up_pos]
#And take the up_alt_lim at these indices
up_alt_lim=limits[up_indices,0]
#Analog for low_alt_lim
low_pos = np.searchsorted(az_lim[az_lim_sorted], low_az_lim)
low_indices = az_lim_sorted[low_pos]
low_alt_lim=limits[low_indices,0]
#Take the maximum element wise. So we always want the largest limit
#of the two limit borders.
alt_lim=np.maximum(up_alt_lim,low_alt_lim)
return alt_lim
def calc_obs_time(ha,dec):
"""Calculates timespan, one can still observe star until it reaches
horizon limit.
Better use approx_obs_time.
"""
#First calculate altitude and azimuth
alt,az=equ_to_altaz(ha,dec)[:2]
#Save current hour angle in hours
ha_now=ha
#Convert hour angle to radians
ha=radians(ha*15.)
#Convert declination to radians
dec=radians(dec)
horizon_limit_rad=radians(horizon_limit)
def calc_ha_set(ha,dec):
"""Calculates hour angle at which star reaches horizontal limit.
"""
#Calculate ha_Set in radian
try:
ha_set=acos((sin(horizon_limit_rad)-sin(lat)*sin(dec))/
(cos(lat)*cos(dec)))
#Calculate ha_set in hours
ha_set=degrees(ha_set)/15.
return ha_set
except ValueError:
return False
#Check if coordinates are within limits
if not check_coordinates(alt,az):
message = "Currently unobservable"
return message
else:
ha_set=calc_ha_set(ha,dec)
if not ha_set:
message="Circumpolar"
return message
else:
#Calculate observing time (in sidereal hours)
obs_time=ha_set-ha_now
#Convert to solar time units (in seconds)
obs_time=obs_time*0.9972695601852*3600
return obs_time
def approx_obs_time(star_ha,star_dec):
"""Calculates an approximate observing time for a star.
Input: Hour angle in hours as float. Declination in degrees as float.
Output: Observable time in solar seconds.
Uses hard limits of the Waltz Telescope
"""
#First check if star is already under a limit
star_alt,star_az,_,__=equ_to_altaz(star_ha,star_dec)
if not check_coordinates(star_alt,star_az):
return 0
#Create az and alt_limit array
az=np.arange(-180,180,0.01)
alt_limit=np.zeros(len(az))
#Define dec_limits and hour angle arrays
dec_limit=np.zeros(len(az))
ha=np.zeros(len(az))
#Calculate alt_limit for every az
alt_limit=calc_alt_limit(az)
#Transform altaz limits to ha,dec limits
ha,dec_limit=altaz_to_equ(alt_limit,az)
#Define star trajectory (dec stays constant, hour angle increases)
traj_ha=np.arange(-11.999,11.999,0.05)
traj_dec=np.ones(len(traj_ha))*star_dec
#Approximately calculate time until hard limit is reached
#If Star is circumpolar obs_time is 24 hours
if star_dec > np.amax(dec_limit):
sid_obs_time=24.
obs_time=24.
else:
#If not circumpolar
#Calculate the absolute differences between those dec_limits
#that are at hour angles larger than the stars hour angle
#(to prevent to get the intersection on the eastern side)
#and the stars declination
dec_diff=np.abs(dec_limit[ha>star_ha]-star_dec)
#Also cut out the hour angle values on the eastern side in ha array
#Needed to get same dimension
#Otherwise argmin wouldn't work
ha_later=ha[ha>star_ha]
#Hour Anlge at setting (reaching red limit) is at the same index as the
#minimum of dec_diff
ha_set=ha_later[np.argmin(dec_diff)]
#Calculate the sidereal time until the star sets
sid_obs_time=ha_set-star_ha
#Sidereal hours convert to solar hours (normal time)
#via 1h_sid=0.9972695601852h_sol
obs_time=sid_obs_time*0.9972695601852
return obs_time
def calc_tree_limit(az):
"""Calculates tree limit in altitude for given azimuth.
Returns alt_limit in degrees.
Input: azimuth in degrees.
"""
#Define Tree limits as np.array
#I put zero alt limit, where horizontal or cupboard limit is reached,
#because we do not care about tree limits below hard limits.
#Does not include all cupboard or horizontal areas, because it is zero there anyway
#Check Input: If int or float make array
if isinstance(az,(float,int)):
az=np.array([az])
elif isinstance(az,list):
az=np.array(az)
tree_lim=np.array([[0.0,-180.01],
[0.0,-165.3455807696162],
[13.926531858678072,-161.22484660697867],
[17.195599636413682,-157.44910241374103],
[0.0,-145.0],
[0.0,-148.91114359009313],
[21.58816304165209,-149.10471551491722],
[12.182100707176437,-135.7205489225959],
[17.29251687747958,-132.16310694376807],
[17.358959391076436,-128.24795814317287],
[16.554208994852967,-123.34078740486738],
[13.011626593972498,-115.7274970740633],
[0.0,-110.73615475166689],
[0.0,-100.57064217069362],
[0.0,-88.89096473867299],
[12.767315026462386,-81.11510631225428],
[13.63658755486348,-71.9033237952604],
[13.730797953998692,-62.34671848645827],
[16.517753594055026,-54.995932340824126],
[15.385051933925672,-45.40736739783195],
[14.249827605471754,-36.515993587901434],
[17.244394510206345,-29.80225310600212],
[16.786645206804543,-25.728955461859304],
[19.385806016233353,-22.649468723617428],
[17.815069758506976,-18.39429645277085],
[13.917540178597369,-13.926187167552994],
[15.800229806019255,-7.090540182345275],
[14.402137910308108,0.0],
[14.206429726562314,6.944851122938447],
[13.917540178597369,13.926187167552998],
[14.39736773524922,21.242902795231192],
[18.693114258587876,26.36363462208435],
[19.88987006933479,30.822460198427866],
[17.742310373009012,38.16015634421578],
[15.23857922621577,45.32492015300472],
[19.331358903370177,52.23568016291085],
[19.875106943741056,57.28528304962664],
[25.903731403911603,66.86068454273801],
[27.92145790178483,73.94757655442089],
[25.482784869502435,82.40122417583163],
[24.75519103243617,87.06221750457497],
[21.78914070627903,89.41320012139184],
[0.0,97.0],
[0.0,101.22273624588037],
[0.0,108.09973819537765],
[0.0,112.96653118269231],
[0.0,115.94832778139686],
[21.796949268702594,124.046616191669],
[0.0,119.6115621873876],
[20.05071675877851,126.59684863727593],
[0.0,124.02768286442313],
[0.0,146.34084417895636],
[0.0,180.01]])
#Create multidimensional arrays of same shape
#Idea is to have all differences of one given az in one line
#So we want a board_lim_matrix where in each line all azimuths of
#the board limits are represented. So it has as many lines as given azs
#and in each line all limit_az are repeated
az_lim_matrix=np.array(np.tile(tree_lim[:,1],az.shape[0]))
az_lim_matrix=az_lim_matrix.reshape(az.shape[0],tree_lim.shape[0])
#The az_matrix is constructed so that in each line only one value of
#input az is written
#It has as many columns as azimuth limits
az_matrix=np.array(np.repeat(az,tree_lim.shape[0]))
az_matrix=az_matrix.reshape(az.shape[0],tree_lim.shape[0])
#Calculate difference matrix
diff=az_lim_matrix-az_matrix
#Calculate matrices with only positive and negative values respectively
pos=(diff>=0)*diff
neg=(diff<0)*diff
#insert +/- infinity for 0, to avoid finding maxima/minima there
pos[pos==0]=np.inf
neg[neg==0]=-np.inf
#Find one limit at lowest positive value of difference in az
#The other at greatest negative value
up_az_lim=az_lim_matrix[np.where(diff==np.amin(pos,axis=1,keepdims=True))]
low_az_lim=az_lim_matrix[np.where(diff==np.amax(neg,axis=1,keepdims=True))]
#Define 1D array with az_limits from limits
az_lim=tree_lim[:,1]
#Get indices of sorted array. Note that it normally should be sorted already.
#But it is useful if one would insert new limits in unsorted order.
#Note that array is not really sorted, so we do not lose indices.
#We only get the indices with which you could sort the array
az_lim_sorted = np.argsort(az_lim)
#Perform searchsort with sorted indices. Search for up_az_lim values
#in general az_limits
up_pos = np.searchsorted(az_lim[az_lim_sorted], up_az_lim)
#get the indices, where you found the up_az_limits
up_indices = az_lim_sorted[up_pos]
#And take the up_alt_lim at these indices
up_alt_lim=tree_lim[up_indices,0]
#Analog for low_alt_lim
low_pos = np.searchsorted(az_lim[az_lim_sorted], low_az_lim)
low_indices = az_lim_sorted[low_pos]
low_alt_lim=tree_lim[low_indices,0]
#Take the maximum element wise. So we always want the largest limit
#of the two limit borders.
tree_lim=np.maximum(up_alt_lim,low_alt_lim)
return tree_lim
def calculate_refraction_from_true_coord(ha,dec,temp=10,press=101.0):
"""Calculates refraction for given coordinates.
Input: True ha in hours, dec in degrees. Temp in degrees Celsius.
Pressure in kPa.
Output: Refraction correction in arcminutes.
"""
#Calculate altitude and azimuth
alt,az,_,__=equ_to_altaz(ha,dec)
#Calculate temperature/pressure factor
factor= press/101*283/(273+temp)
#Calculate refraction in arcminutes
R=1.02*(1/tan(radians(alt+10.3/(alt+5.11))))*factor
return R
def calculate_refraction_from_apparent_coord(ha,dec,temp=10,press=101.0):
"""Calculates refraction for given coordinates.
Input: Apparent ha in hours, dec in degrees. Temp in degrees Celsius.
Pressure in kPa.
Output: Refraction correction in arcminutes.
"""
#Calculate altitude and azimuth
alt,az,_,__=equ_to_altaz(ha,dec)
#Calculate temperature/pressure factor
factor= press/101*283/(273+temp)
#Calculate refraction in arcminutes
R=(1/tan(radians(alt+7.31/(alt+4.4))))*factor
return R
def calculate_apparent_pos_from_true_pos(ha_true,dec_true,
temp=10,press=101.0):
"""Calculates apparent position (accounting for refraction)
for given true coordinates.
Input: True ha in hours, dec in degrees. Temp in degrees Celsius.
Pressure in kPa.
Output: Apparent ha in hours, dec in degrees.
"""
#Calculate true alt and az
alt_true,az_true,_,__=equ_to_altaz(ha_true,dec_true)
#Calculate Refraction in arcminutes
R_arcmin=calculate_refraction_from_true_coord(ha_true,dec_true,
temp=temp,press=press)
#Transform to degrees
R_deg=R_arcmin/60.
#Refraction makes stars to appear higher than they actually areas
#So we need to add the Refraction degrees to the true altitude.
alt_app=alt_true+R_deg
#Azimuth stays the same
az_app=az_true
#Transform to ha and dec
ha_app,dec_app=altaz_to_equ(alt_app,az_app)
return ha_app, dec_app
|
StarcoderdataPython
|
9623933
|
import torch
import torch.nn as nn
import numpy as np
from models.real_nvp.real_nvp import RealNVP
from models.resnet.resnet import ResNet
class MLP_ACVAE(nn.Module):
"""RealNVP Model
Based on the paper:
"Density estimation using Real NVP"
by <NAME>, <NAME>, and <NAME>
(https://arxiv.org/abs/1605.08803).
Args:
num_scales (int): Number of scales in the RealNVP model.
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the intermediate layers.
num_blocks (int): Number of residual blocks in the s and t network of
`Coupling` layers.
"""
def __init__(self, shape, num_scales=2, in_channels=3, mid_channels=64, num_blocks=8, conv_encoder=False):
super().__init__()
# Register data_constraint to pre-process images, not learnable
self.register_buffer('data_constraint', torch.tensor([0.9], dtype=torch.float32))
self.shape = shape
self.latent_dim = np.product(shape)
if conv_encoder:
encoder_layers = [
ResNet(in_channels=in_channels,
mid_channels=mid_channels,
out_channels=in_channels * 2,
num_blocks=num_blocks * 2,
kernel_size=3,
padding=1,
double_after_norm=False),
nn.Flatten()
]
else:
encoder_layers = [
nn.Flatten(),
nn.Linear(self.latent_dim, self.latent_dim),
nn.ReLU(),
nn.Linear(self.latent_dim, 2 * self.latent_dim),
]
self.encoder = nn.Sequential(*encoder_layers)
self.decoder = RealNVP(num_scales=num_scales,
in_channels=in_channels,
mid_channels=mid_channels,
num_blocks=num_blocks)
self.sigmoid = nn.Sigmoid()
def encode(self, x):
out = self.encoder(x)
mean = out[..., :self.latent_dim]
logvar = out[..., self.latent_dim:]
return mean, logvar
def decode(self, z):
return self.sigmoid(self.decoder(z, reverse=True)[0])
def reparameterize(self, mean, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mean
def forward(self, x, sample=False):
if sample:
return self.decode(x)
mean, logvar = self.encode(x)
z = self.reparameterize(mean, logvar).reshape([-1] + list(self.shape))
x_hat = self.decode(z)
return x_hat, mean, logvar
|
StarcoderdataPython
|
9729407
|
# -*- coding: utf-8 -*-
import factory
from .models import Secret
class SecretFactory(factory.DjangoModelFactory):
"""DjangoModelFactory for object Secret."""
name = factory.Faker('name')
text = factory.Faker('bs')
class Meta:
model = Secret
|
StarcoderdataPython
|
8103613
|
<filename>samples/pore_detection.py
import os
import re
import shutil
import subprocess
from os import listdir
from os.path import isfile, join
import cv2
from PIL import Image
from itertools import product
def splitImage(current_working_directory, inputImagePath, outputDirectory, resolution):
path = current_working_directory+"/PoresDetection/high_resolution_image/"
name = "my_image_resized.jpg"
im = Image.open(inputImagePath)
size = im.size[0]*8, im.size[1]*8
print("Resizing image...")
im_resized = im.resize(size, Image.ANTIALIAS)
print("Image resized successfully.")
im_resized.save(path+name)
print("Resized image saved to" + path)
img = Image.open(path+name);
w, h = img.size
grid = product(range(0, h - h % resolution, resolution), range(0, w - w % resolution, resolution))
numberOfImages = 0;
for i, j in grid:
box = (j, i, j + resolution, i + resolution)
out = os.path.join(outputDirectory, f'{i}_{j}.jpg')
img.crop(box).save(out)
numberOfImages = numberOfImages+1;
print("Image splitted successfully into " +str(numberOfImages) + " pictures.")
return size
def joinImages(size, current_working_directory):
inputDirectory = current_working_directory+"/PoresDetection/pores_detected/"
file_names = os.listdir(inputDirectory)
joinedImage = Image.new("RGB", (size[0], size[1]), "white")
print("Re-creating image...")
counter = 0
for name in file_names:
counter=counter+1
splittedByDot = name.split(".", )
splitted = splittedByDot[0].split("_", )
num = ""
for c in name:
if c.isdigit():
num = num + c
imagePart = Image.open(inputDirectory+name)
if(len(splitted) >= 2):
joinedImage.paste(imagePart, (int(splitted[1]), int(splitted[0])))
joinedImage.save(current_working_directory+"/PoresDetection/final_fingerprint/pores_predicted_final_image.jpg")
print("DONE!")
def detectPores(inputDirectory):
os.system('python mask_rcnn_detect.py')
def remove_content_of_folders(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
if __name__ == '__main__':
current_working_directory = os.getcwd()
print(current_working_directory)
if not os.path.exists(current_working_directory+'/PoresDetection/parts_of_image'):
os.makedirs(current_working_directory+'/PoresDetection/parts_of_image')
if not os.path.exists(current_working_directory+'/PoresDetection/pores_detected'):
os.makedirs(current_working_directory+'/PoresDetection/pores_detected')
if not os.path.exists(current_working_directory+'/PoresDetection/final_fingerprint'):
os.makedirs(current_working_directory+'/PoresDetection/final_fingerprint')
if not os.path.exists(current_working_directory+'/PoresDetection/high_resolution_image'):
os.makedirs(current_working_directory+'/PoresDetection/high_resolution_image')
fullImageInputPath = current_working_directory
print("Removing content of folders...")
remove_content_of_folders(current_working_directory+"/PoresDetection/parts_of_image/")
remove_content_of_folders(current_working_directory+"/PoresDetection/pores_detected/")
inputImageName = input("Please enter image path: ")
fullImageInputPath = fullImageInputPath + '/' + inputImageName
size = splitImage(current_working_directory, fullImageInputPath, current_working_directory+"/PoresDetection/parts_of_image/", 512)
detectPores(current_working_directory+"/PoresDetection/parts_of_image/")
joinImages(size, current_working_directory)
|
StarcoderdataPython
|
11263566
|
<reponame>thundercrawl/book-of-qna-code<filename>ch3/dependency-parser-nivre/app/features/extractors.py
# Copyright 2010 <NAME>
##
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
##
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
"""
Various feature extractors for arc-eager and arc-standard transition based parsers
Author: <NAME>
"""
from common import *
import os
# Features #{{{
__EXTRACTORS__ = {}
# extractor combinators #{{{
class ChainedFeatureExtractor: # {{{
def __init__(self, extSeq):
self.extractors = extSeq
def extract(self, stack, deps, sent, i):
fs = []
extend = fs.extend
[extend(e.extract(stack, deps, sent, i)) for e in self.extractors]
return fs
#}}}
class AppendingFeatureExtractor: # {{{
def __init__(self, ext1, ext2):
self.e1 = ext1
self.e2 = ext2
def extract(self, stack, deps, sent, i):
fs = []
fs1 = self.e1.extract(stack, deps, sent, i)
fs2 = self.e2.extract(stack, deps, sent, i)
fs = fs1[:]
for f1 in fs1:
for f2 in fs2:
fs.append("%s^%s" % (f1, f2))
return fs
#}}}
#}}} end combinators
class BetterParentFeatureExtractor: # {{{ doesn't work
"""
this one doesn't really work very well..
the features are "is there a better parent for X then Y,
for example, is there a better parent for the word on top of the
stack than the next input word?
"""
def __init__(self, parentPredictor):
self.parentPredictor = parentPredictor
self.current_sent = None
def extract(self, stack, deps, sent, i):
if self.current_sent != sent:
self.current_sent = sent
self.parentPredictor.set_sent(sent)
fs = []
add_feature = fs.append
w = sent[i] if len(sent) + 1 < i else PAD
s = stack[-1] if len(stack) > 0 else PAD
s1 = stack[-2] if len(stack) > 1 else PAD
wparents = [p for p in self.parentPredictor.best_parents(w['id'], 3)]
sparents = [p for p in self.parentPredictor.best_parents(s['id'], 3)]
s1parents = [p for p in self.parentPredictor.best_parents(s1['id'], 3)]
# is there a better parent for w then s ?
try:
idx = wparents.index(s['id'])
is_better = "Y" if wparents[:idx] else "N"
except ValueError:
is_better = "T"
add_feature("bp_ws_%s" % is_better)
# is there a better parent for w than s-1 ?
try:
idx = wparents.index(s1['id'])
is_better = "Y" if wparents[:idx] else "N"
except ValueError:
is_better = "T"
add_feature("bp_ws1_%s" % is_better)
# is there a better parent for s than s-1 ?
try:
idx = sparents.index(s1['id'])
is_better = "Y" if sparents[:idx] else "N"
except ValueError:
is_better = "T"
add_feature("bp_ss1_%s" % is_better)
# is there a better parent for s-1 than s ?
try:
idx = s1parents.index(s['id'])
is_better = "Y" if s1parents[:idx] else "N"
except ValueError:
is_better = "T"
add_feature("bp_s1s_%s" % is_better)
return fs
#}}}
# {{{ inherit for easy graph feature extractors
class GraphBasedFeatureExtractor:
def __init__(self, parentPredictor):
self.pp = parentPredictor
self.sent = None
self.parents = {}
self.childs = {}
self.toks = {}
def _init_sent(self, sent):
if self.sent != sent:
self.sent = sent
self.pp.set_sent(sent)
self.parents = {}
self.childs = defaultdict(list)
self.toks = {}
for tok in sent:
self.toks[tok['id']] = tok
for tok in sent:
id = tok['id']
self.parents[id] = self.pp.best_parents(id, 3)
for parent in self.parents[id]:
self.childs[parent].append(id)
def extract(self, stack, deps, sent, i):
self._init_sent(sent)
return self._extract(stack, deps, sent, i)
def _extract(self, stack, deps, sent, i):
assert False, "must implement in child"
#}}}
class ChildsOfNextWordFeatureExtractor(GraphBasedFeatureExtractor): # {{{
def _extract(self, stack, deps, sent, i):
fs = []
if i >= len(sent):
return fs
w = sent[i]
built_childs = deps.all_childs
for child in self.childs[w['id']]:
if child not in built_childs:
#fs.append("w_cld_%s" % self.toks[child]['tag'])
#fs.append("w_cld_%s" % self.toks[child]['form'])
fs.append(
"st_s_cld_%s_%s" %
(self.toks[child]['tag'], w['tag']))
fs.append(
"st_s_cld_%s_%s" %
(self.toks[child]['form'], w['tag']))
return fs
#}}}
class ChildsOfStackWordFeatureExtractor(GraphBasedFeatureExtractor): # {{{
def _extract(self, stack, deps, sent, i):
return []
fs = []
if not stack:
return fs
w = stack[-1]
built_childs = deps.all_childs
possible_childs = set(self.childs[w['id']])
for child in possible_childs - built_childs:
#fs.append("s_cld_%s" % (self.toks[child]['tag']))
#fs.append("s_cld_%s" % (self.toks[child]['form']))
fs.append("st_s_cld_%s_%s" % (self.toks[child]['tag'], w['tag']))
fs.append("st_s_cld_%s_%s" % (self.toks[child]['form'], w['tag']))
#fs.append("sf_s_cld_%s_%s" % (self.toks[child]['tag'],w['form']))
#fs.append("sf_s_cld_%s_%s" % (self.toks[child]['form'],w['form']))
fs.append("s#pos_childs_%s" % len(possible_childs - built_childs))
# fs.append("ts#pos_childs_%s_%s" % (w['tag'],len(possible_childs -
# built_childs)))
if not len(stack) > 1:
return fs
w = stack[-2]
built_childs = deps.all_childs
possible_childs = set(self.childs[w['id']])
for child in possible_childs - built_childs:
#fs.append("s1_cld_%s" % (self.toks[child]['tag']))
#fs.append("s1_cld_%s" % (self.toks[child]['form']))
fs.append("s1t_s_cld_%s_%s" % (self.toks[child]['tag'], w['tag']))
fs.append("s1t_s_cld_%s_%s" % (self.toks[child]['form'], w['tag']))
#fs.append("s1f_s_cld_%s_%s" % (self.toks[child]['tag'],w['form']))
#fs.append("s1f_s_cld_%s_%s" % (self.toks[child]['form'],w['form']))
fs.append("s1#pos_childs_%s" % len(possible_childs - built_childs))
# fs.append("ts1#pos_childs_%s_%s" % ((w['tag']),len(possible_childs -
# built_childs)))
return fs
#}}}
# Working baselines #{{{
class WenbinFeatureExtractor: # {{{
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
import math
# new features, which I think helps..
if len(sent) < i + 2:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
w = sent[i]
w1 = sent[i + 1]
s = stack[-1]
s1 = stack[-2]
s2 = stack[-3]
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
w = w['form']
Tw1 = w1['tag']
w1 = w1['form']
Ts = s['tag']
s = s['form']
Ts1 = s1['tag']
s1 = s1['form']
Ts2 = s2['tag']
s2 = s2['form']
# unigram
features.append("s_%s" % s)
features.append("s1_%s" % s1)
features.append("w_%s" % w)
features.append("Ts_%s" % Ts)
features.append("Ts1_%s" % Ts1)
features.append("Tw_%s" % Tw)
features.append("Tss_%s_%s" % (Ts, s))
features.append("Ts1s1_%s_%s" % (Ts1, s1))
features.append("Tww_%s_%s" % (Tw, w))
# bigram
features.append("ss1_%s_%s" % (s, s1)) # @
features.append("Tss1Ts1_%s_%s_%s" % (Ts, s1, Ts1))
features.append("sTss1_%s_%s_%s" % (s, Ts, s1)) # @
features.append("TsTs1_%s_%s" % (Ts, Ts1))
features.append("ss1Ts1_%s_%s_%s" % (s, s1, Ts1)) # @
features.append("sTss1Ts1_%s_%s_%s_%s" % (s, Ts, s1, Ts1)) # @
features.append("TsTw_%s_%s" % (Ts, Tw))
features.append("sTsTs1_%s_%s_%s" % (s, Ts, Ts1))
# more bigrams! [look at next word FORM] # with these, 87.45 vs 87.09,
# train 15-18, test 22
features.append("ws1_%s_%s" % (w, s1)) # @
features.append("ws_%s_%s" % (w, s)) # @
features.append("wTs1_%s_%s" % (w, Ts1)) # @
features.append("wTs_%s_%s" % (w, Ts)) # @
# trigram
features.append("TsTwTw1_%s_%s_%s" % (Ts, Tw, Tw1))
features.append("sTwTw1_%s_%s_%s" % (s, Tw, Tw1))
features.append("Ts1TsTw_%s_%s_%s" % (Ts1, Ts, Tw))
features.append("Ts1sTw1_%s_%s_%s" % (Ts1, s, Tw))
features.append("Ts2Ts1Ts_%s_%s_%s" % (Ts2, Ts1, Ts))
# modifier
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTrcs_%s_%s_%s" % (Ts1, Ts, Trcs))
features.append("Ts1sTlcs_%s_%s_%s" % (Ts1, s, Tlcs))
features.append("Ts1Trcs1Ts_%s_%s_%s" % (Ts1, Trcs1, Ts))
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTlcs_%s_%s_%s" % (Ts1, Ts, Tlcs))
features.append("Ts1Trcs1s_%s_%s_%s" % (Ts1, Trcs1, s))
return features
#}}}
class WenbinFeatureExtractor_plus: # {{{
"""
like WenbinFeatureExtractor but include also POS of sent[i-1],sent[i-2]
"""
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
import math
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 2:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
w = sent[i]
w1 = sent[i + 1]
s = stack[-1]
s1 = stack[-2]
s2 = stack[-3]
wm1 = sent[i - 1] if i - 1 > 0 else PAD
wm2 = sent[i - 2] if i - 2 > 0 else PAD
Twm1 = wm1['tag']
Twm2 = wm2['tag']
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
w = w['form']
Tw1 = w1['tag']
w1 = w1['form']
Ts = s['tag']
s = s['form']
Ts1 = s1['tag']
s1 = s1['form']
Ts2 = s2['tag']
s2 = s2['form']
# unigram
features.append("s_%s" % s)
features.append("s1_%s" % s1)
features.append("w_%s" % w)
features.append("Ts_%s" % Ts)
features.append("Ts1_%s" % Ts1)
features.append("Tw_%s" % Tw)
features.append("Tss_%s_%s" % (Ts, s))
features.append("Ts1s1_%s_%s" % (Ts1, s1))
features.append("Tww_%s_%s" % (Tw, w))
#@NEW 4
features.append("Twm1_%s" % Twm1)
features.append("Twm2_%s" % Twm2)
features.append("Twm1_%s_%s" % (Twm1, wm1['form']))
features.append("Twm2_%s_%s" % (Twm2, wm1['form']))
# bigram
features.append("ss1_%s_%s" % (s, s1)) # @
features.append("Tss1Ts1_%s_%s_%s" % (Ts, s1, Ts1))
features.append("sTss1_%s_%s_%s" % (s, Ts, s1)) # @
features.append("TsTs1_%s_%s" % (Ts, Ts1))
features.append("ss1Ts1_%s_%s_%s" % (s, s1, Ts1)) # @
features.append("sTss1Ts1_%s_%s_%s_%s" % (s, Ts, s1, Ts1)) # @
features.append("TsTw_%s_%s" % (Ts, Tw))
features.append("sTsTs1_%s_%s_%s" % (s, Ts, Ts1))
# more bigrams! [look at next word FORM] # with these, 87.45 vs 87.09,
# train 15-18, test 22
features.append("ws1_%s_%s" % (w, s1)) # @
features.append("ws_%s_%s" % (w, s)) # @
features.append("wTs1_%s_%s" % (w, Ts1)) # @
features.append("wTs_%s_%s" % (w, Ts)) # @
features.append("TwTwm1_%s_%s" % (Tw, Twm1)) # @NEW
features.append("Twm1Twm2_%s_%s" % (Twm1, Twm2)) # @NEW
# trigram
features.append("TsTwTw1_%s_%s_%s" % (Ts, Tw, Tw1))
features.append("sTwTw1_%s_%s_%s" % (s, Tw, Tw1))
features.append("Ts1TsTw_%s_%s_%s" % (Ts1, Ts, Tw))
features.append("Ts1sTw1_%s_%s_%s" % (Ts1, s, Tw))
features.append("Ts2Ts1Ts_%s_%s_%s" % (Ts2, Ts1, Ts))
# modifier
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTrcs_%s_%s_%s" % (Ts1, Ts, Trcs))
features.append("Ts1sTlcs_%s_%s_%s" % (Ts1, s, Tlcs))
features.append("Ts1Trcs1Ts_%s_%s_%s" % (Ts1, Trcs1, Ts))
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTlcs_%s_%s_%s" % (Ts1, Ts, Tlcs))
features.append("Ts1Trcs1s_%s_%s_%s" % (Ts1, Trcs1, s))
return features
#}}}
class Degree2FeatureExtractor: # {{{
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
import math
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 2:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
w = sent[i]
w1 = sent[i + 1]
s = stack[-1]
s1 = stack[-2]
s2 = stack[-3]
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
w = w['form']
Tw1 = w1['tag']
w1 = w1['form']
Ts = s['tag']
s = s['form']
Ts1 = s1['tag']
s1 = s1['form']
Ts2 = s2['tag']
s2 = s2['form']
# unigram
features.append("s_%s" % s)
features.append("s1_%s" % s1)
features.append("w_%s" % w)
features.append("w1_%s" % w1)
features.append("Ts_%s" % Ts)
features.append("Ts1_%s" % Ts1)
features.append("Tw_%s" % Tw)
features.append("Tw1_%s" % Tw1)
features.append("Tlcs_%s" % Tlcs)
features.append("Tlcs1_%s" % Tlcs1)
features.append("Trcs_%s" % Trcs)
features.append("Trcs1_%s" % Trcs1)
# @@ TODO: feature expand
fs = ["%s_%s" % (f1, f2) for f1 in features for f2 in features]
return fs
#}}}
class EagerWenbinFeatureExtractor: # {{{
"""
my adaptation of WenbinFeatureExtractor to work for arc-eager
(trivial -- just shift the focus from stack-1,stack-2 to stack-1,input[0])
"""
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
import math
if len(sent) < i + 3:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
w = sent[i + 1]
w1 = sent[i + 2]
s = sent[i]
s1 = stack[-1]
s2 = stack[-2]
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
w = w['form']
Tw1 = w1['tag']
w1 = w1['form']
Ts = s['tag']
s = s['form']
Ts1 = s1['tag']
s1 = s1['form']
Ts2 = s2['tag']
s2 = s2['form']
# unigram
features.append("s_%s" % s)
features.append("s1_%s" % s1)
features.append("w_%s" % w)
features.append("Ts_%s" % Ts)
features.append("Ts1_%s" % Ts1)
features.append("Tw_%s" % Tw)
features.append("Tss_%s_%s" % (Ts, s))
features.append("Ts1s1_%s_%s" % (Ts1, s1))
features.append("Tww_%s_%s" % (Tw, w))
# bigram
features.append("ss1_%s_%s" % (s, s1)) # @
features.append("Tss1Ts1_%s_%s_%s" % (Ts, s1, Ts1))
features.append("sTss1_%s_%s_%s" % (s, Ts, s1)) # @
features.append("TsTs1_%s_%s" % (Ts, Ts1))
features.append("ss1Ts1_%s_%s_%s" % (s, s1, Ts1)) # @
features.append("sTss1Ts1_%s_%s_%s_%s" % (s, Ts, s1, Ts1)) # @
features.append("TsTw_%s_%s" % (Ts, Tw))
features.append("sTsTs1_%s_%s_%s" % (s, Ts, Ts1))
# more bigrams! [look at next word FORM] # with these, 87.45 vs 87.09,
# train 15-18, test 22
features.append("ws1_%s_%s" % (w, s1)) # @
features.append("ws_%s_%s" % (w, s)) # @
features.append("wTs1_%s_%s" % (w, Ts1)) # @
features.append("wTs_%s_%s" % (w, Ts)) # @
# trigram
features.append("TsTwTw1_%s_%s_%s" % (Ts, Tw, Tw1))
features.append("sTwTw1_%s_%s_%s" % (s, Tw, Tw1))
features.append("Ts1TsTw_%s_%s_%s" % (Ts1, Ts, Tw))
features.append("Ts1sTw1_%s_%s_%s" % (Ts1, s, Tw))
features.append("Ts2Ts1Ts_%s_%s_%s" % (Ts2, Ts1, Ts))
# modifier
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTrcs_%s_%s_%s" % (Ts1, Ts, Trcs))
features.append("Ts1sTlcs_%s_%s_%s" % (Ts1, s, Tlcs))
features.append("Ts1Trcs1Ts_%s_%s_%s" % (Ts1, Trcs1, Ts))
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTlcs_%s_%s_%s" % (Ts1, Ts, Tlcs))
features.append("Ts1Trcs1s_%s_%s_%s" % (Ts1, Trcs1, s))
return features
#}}}
class EagerDegree2FeatureExtractor: # {{{
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
import math
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 3:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
w = sent[i + 1]
w1 = sent[i + 2]
s = sent[i]
s1 = stack[-1]
s2 = stack[-2]
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
w = w['form']
Tw1 = w1['tag']
w1 = w1['form']
Ts = s['tag']
s = s['form']
Ts1 = s1['tag']
s1 = s1['form']
Ts2 = s2['tag']
s2 = s2['form']
# unigram
features.append("s_%s" % s)
features.append("s1_%s" % s1)
features.append("w_%s" % w)
features.append("w1_%s" % w1)
features.append("Ts_%s" % Ts)
features.append("Ts1_%s" % Ts1)
features.append("Tw_%s" % Tw)
features.append("Tw1_%s" % Tw1)
features.append("Tlcs_%s" % Tlcs)
features.append("Tlcs1_%s" % Tlcs1)
features.append("Trcs_%s" % Trcs)
features.append("Trcs1_%s" % Trcs1)
# @@ TODO: feature expand
fs = ["%s_%s" % (f1, f2) for f1 in features for f2 in features]
return fs
#}}}
class EagerZhangFeatureExtractor: # {{{
"""
arc-eager features from "Tale of two parsers"
http://www.aclweb.org/anthology/D/D08/D08-1059.pdf
table 3
"""
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
import math
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 3:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
st0 = stack[-1]
n0 = sent[i]
n1 = sent[i + 1]
n2 = sent[i + 2]
# stack top
STw = st0['form']
STt = st0['tag']
features.append("STwt_%s%s" % (STw, STt))
features.append("STw_%s" % (STw))
features.append("STt_%s" % (STt))
# current word
N0w = n0['form']
N0t = n0['tag']
features.append("N0wt_%s%s" % (N0w, N0t))
features.append("N0t_%s" % N0t)
features.append("N0w_%s" % N0w)
# next word
N1w = n1['form']
N1t = n1['tag']
N2t = n2['tag']
features.append("N1wt_%s%s" % (N1w, N1t))
features.append("N1t_%s" % N1t)
features.append("N1w_%s" % N1w)
# ST and N0
features.append("STwtN0wt_%s_%s_%s_%s" % (STw, STt, N0w, N0t))
features.append("STwtN0w_%s_%s_%s" % (STw, STt, N0w))
features.append("STwN0wt_%s_%s_%s" % (STw, N0w, N0t))
features.append("STwtN0t_%s_%s_%s" % (STw, STt, N0t))
features.append("STtN0wt_%s_%s_%s" % (STt, N0w, N0t))
features.append("STwN0w_%s_%s" % (STw, N0w))
features.append("STtN0t_%s_%s" % (STt, N0t))
# pos bigram
features.append("N0tN1t_%s_%s" % (N0t, N1t))
# pos trigram
STPt = deps.parent(st0)
if STPt:
STPt = STPt['tag']
STRCt = deps.right_child(st0)
if STRCt:
STRCt = STRCt['tag']
STLCt = deps.left_child(st0)
if STLCt:
STLCt = STLCt['tag']
N0LCt = deps.left_child(n0)
if N0LCt:
N0LCt = N0LCt['tag']
features.append("N0tN1tN2t_%s_%s_%s" % (N0t, N1t, N2t))
features.append("STtN0tN1t_%s_%s_%s" % (STt, N0t, N1t))
features.append("STPtSTtN0t_%s_%s_%s" % (STPt, STt, N0t))
features.append("STtSTLCtN0t_%s_%s_%s" % (STt, STLCt, N0t))
features.append("STtSTRCtN0t_%s_%s_%s" % (STt, STRCt, N0t))
features.append("STtN0tN0LCt_%s_%s_%s" % (STt, N0t, N0LCt))
# N0 word
features.append("N0wN1tN2t_%s_%s_%s" % (N0w, N1t, N2t))
features.append("STtN0wN1t_%s_%s_%s" % (STt, N0w, N1t))
features.append("STPtSTtN0w_%s_%s_%s" % (STPt, STt, N0w))
features.append("STtSTLCtN0w_%s_%s_%s" % (STt, STLCt, N0w))
features.append("STtSTRCtN0w_%s_%s_%s" % (STt, STRCt, N0w))
features.append("STtN0wN0LCt_%s_%s_%s" % (STt, N0w, N0LCt))
return features
#}}}
class ExtendedEagerZhangFeatureExtractor: # {{{
"""
arc-eager features from "Tale of two parsers"
http://www.aclweb.org/anthology/D/D08/D08-1059.pdf
table 3
extended with additional features,
for the "cut trees" parsing
"""
def __init__(self, level=1):
self.level = level
pass
def extract(self, stack, deps, sent, i):
features = []
import math
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 3:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
st0 = stack[-1]
n0 = sent[i]
n1 = sent[i + 1]
n2 = sent[i + 2]
st0par = deps.parent(st0)
# Extended participants
nm1 = sent[i - 1] if i - 1 > 0 else PAD
nm2 = sent[i - 2] if i - 2 > 0 else PAD
nm3 = sent[i - 3] if i - 3 > 0 else PAD
st0par2 = deps.parent(st0par) if st0par is not None else None
st0par3 = deps.parent(st0par2) if st0par2 is not None else None
# stack top
STw = st0['form']
STt = st0['tag']
features.append("STwt_%s%s" % (STw, STt))
features.append("STw_%s" % (STw))
features.append("STt_%s" % (STt))
# current word
N0w = n0['form']
N0t = n0['tag']
features.append("N0wt_%s%s" % (N0w, N0t))
features.append("N0t_%s" % N0t)
features.append("N0w_%s" % N0w)
# next word
N1w = n1['form']
N1t = n1['tag']
N2t = n2['tag']
features.append("N1wt_%s%s" % (N1w, N1t))
features.append("N1t_%s" % N1t)
features.append("N1w_%s" % N1w)
# ST and N0
features.append("STwtN0wt_%s_%s_%s_%s" % (STw, STt, N0w, N0t))
features.append("STwtN0w_%s_%s_%s" % (STw, STt, N0w))
features.append("STwN0wt_%s_%s_%s" % (STw, N0w, N0t))
features.append("STwtN0t_%s_%s_%s" % (STw, STt, N0t))
features.append("STtN0wt_%s_%s_%s" % (STt, N0w, N0t))
features.append("STwN0w_%s_%s" % (STw, N0w))
features.append("STtN0t_%s_%s" % (STt, N0t))
# pos bigram
features.append("N0tN1t_%s_%s" % (N0t, N1t))
# pos trigram
STPt = st0par
if STPt:
STPt = STPt['tag']
STRCt = deps.right_child(st0)
if STRCt:
STRCt = STRCt['tag']
STLCt = deps.left_child(st0)
if STLCt:
STLCt = STLCt['tag']
N0LCt = deps.left_child(n0)
if N0LCt:
N0LCt = N0LCt['tag']
features.append("N0tN1tN2t_%s_%s_%s" % (N0t, N1t, N2t))
features.append("STtN0tN1t_%s_%s_%s" % (STt, N0t, N1t))
features.append("STPtSTtN0t_%s_%s_%s" % (STPt, STt, N0t))
features.append("STtSTLCtN0t_%s_%s_%s" % (STt, STLCt, N0t))
features.append("STtSTRCtN0t_%s_%s_%s" % (STt, STRCt, N0t))
features.append("STtN0tN0LCt_%s_%s_%s" % (STt, N0t, N0LCt))
# N0 word
features.append("N0wN1tN2t_%s_%s_%s" % (N0w, N1t, N2t))
features.append("STtN0wN1t_%s_%s_%s" % (STt, N0w, N1t))
features.append("STPtSTtN0w_%s_%s_%s" % (STPt, STt, N0w))
features.append("STtSTLCtN0w_%s_%s_%s" % (STt, STLCt, N0w))
features.append("STtSTRCtN0w_%s_%s_%s" % (STt, STRCt, N0w))
features.append("STtN0wN0LCt_%s_%s_%s" % (STt, N0w, N0LCt))
# Extended
Nm1t = nm1['tag']
Nm2t = nm2['tag']
Nm3t = nm3['tag']
Nm1w = nm1['form']
features.append("N-1tN0t_%s_%s" % (Nm1t, N0t))
features.append("N-2tN-1t_%s_%s" % (Nm2t, Nm1t))
features.append("N-3tN-2t_%s_%s" % (Nm3t, Nm2t))
features.append("N-1tN0tN0w_%s_%s_%s" % (Nm1t, N0t, N0w))
features.append("N-1tN-1wN0w_%s_%s_%s" % (Nm1t, Nm1w, N0t))
# extended plus
if self.level > 1:
pars = []
par = deps.parent(st0)
while par is not None:
pars.append(par)
par = deps.parent(par)
for par in pars:
features.append("stppt_N0t_%s_%s" % (par['tag'], N0t))
features.append(
"stppt_N0tN0w_%s_%s_%s" %
(par['tag'], N0t, N0w))
features.append(
"stpptw_N0t_%s_%s_%s" %
(par['tag'], par['form'], N0t))
features.append(
"stpptw_N0tN0w_%s_%s_%s_%s" %
(par['tag'], par['form'], N0t, N0w))
# extended plusplus
if self.level > 2:
_top = pars[-1] if pars else st0
if _top is not NOPARENT:
_idx = stack.index(_top)
prev = stack[_idx - 1] if _idx - 1 > 0 else PAD
else:
prev = NOPARENT
features.append("stPRVt_st0t_%s_%s" % (prev['tag'], STt))
features.append("stPRVt_stTopt_%s_%s" % (prev['tag'], _top['tag']))
features.append("stPRVt_stTopt_%s_%s" % (_top['tag'], STt))
# extended plus-plus-plus: looking at all valid previous tokens
if self.level > 3:
lencurrent = 0
for _tok in sent[i - 1:0:-1]:
if deps.parent(_tok) is None:
lencurrent = sent[i]['id'] - _tok['id']
break
for x in (0, 1, 2, 3, 5, 7, 10):
if lencurrent > x:
features.append("len>%s" % x)
else:
features.append("len<=%s" % x)
# sent[i]['__par']=-99
# for t in sent[:i]:
# par = t['__par']
# if par==-99:
# par = deps.parent(t)
# t['__par']=par
# if (par is None) or par['id'] < t['id']:
# features.append("PRVt_st0t_%s_%s" % (t['tag'],STt))
# features.append("PRVt_n0t_%s_%s" % (t['tag'],N0t))
# features.append("PRVt_st0t_%s_%s_%s_%s" % (t['tag'],STt,t['form'],STw))
# features.append("PRVt_n0t_%s_%s_%s_%s" % (t['tag'],N0t,t['form'],N0w))
return features
#}}}
class EagerMaltFeatureExtractor: # {{{
"""
Arabic malt-parser features
Based on ara.par, without the morph/lem/dep features
"""
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 3:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
""" {{{
POS STACK
POS INPUT
POS INPUT 1
POS INPUT 2
POS STACK 1
POS STACK 0 0 0 -1 1
POS INPUT 0 -1
CPOS STACK
CPOS INPUT
CPOS STACK 0 0 1 -1
DEP STACK
FEATS STACK
FEATS INPUT
LEMMA STACK
LEMMA INPUT
LEX STACK
LEX INPUT
LEX INPUT 1
LEX STACK 0 0 1
LEX INPUT 0 -1
LEX STACK 0 0 0 1 -1
""" # }}}
# participants
in0 = sent[i]
in1 = sent[i + 1]
in2 = sent[i + 2]
st0 = stack[-1]
st1 = stack[-2]
st000_11 = deps.sibling(deps.left_child(st0), 1)
in0_1 = sent[i - 1]
st001 = deps.parent(st0)
st001_1 = deps.left_child(st001)
st0001_1 = deps.sibling(deps.right_child(st0), -1)
if not st001:
st001 = {'tag': None, 'ctag': None, 'form': None}
if not st000_11:
st000_11 = {'tag': None, 'ctag': None, 'form': None}
if not st001_1:
st001_1 = {'tag': None, 'ctag': None, 'form': None}
if not st0001_1:
st0001_1 = {'tag': None, 'ctag': None, 'form': None}
features.append("Tst0_%s" % st0['tag'])
features.append("Tin0_%s" % in0['tag'])
features.append("Tin1_%s" % in1['tag'])
features.append("Tin2_%s" % in2['tag'])
features.append("Tst1_%s" % st1['tag'])
features.append("Tst000-11_%s" % st000_11['tag'])
features.append("Tin0-1_%s" % in0_1['tag'])
features.append("CTst0_%s" % st0['ctag'])
features.append("CTin0_%s" % in0['ctag'])
features.append("CTst001_1_%s" % st001_1['ctag'])
# dep_st0 -- skipped
# feats, lemmas: skipped
features.append("Lst0_%s" % st0['form'])
features.append("Lin0_%s" % in0['form'])
features.append("Lin1_%s" % in1['form'])
features.append("Lst001_%s" % st001['form'])
features.append("Lin0-1_%s" % in0_1['form'])
features.append("Lst0001-1_%s" % st0001_1['form'])
if 'morph' in st0:
for f in st0['morph']:
features.append("Mst0_%s" % f)
if 'morph' in in0:
for f in in0['morph']:
features.append("Min0_%s" % f)
if 'lem' in in0:
features.append("LMin0_%s" % in0['lem'])
if 'lem' in st0:
features.append("LMst0_%s" % st0['lem'])
# @@ TODO: feature expand
fs = ["%s_%s" % (f1, f2) for f1 in features for f2 in features]
return fs
#}}}
class EagerMaltFeatureExtractor: # {{{
"""
Arabic malt-parser features
Based on ara.par, without the morph/lem/dep features
"""
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 3:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
""" {{{
POS STACK
POS INPUT
POS INPUT 1
POS INPUT 2
POS STACK 1
POS STACK 0 0 0 -1 1
POS INPUT 0 -1
CPOS STACK
CPOS INPUT
CPOS STACK 0 0 1 -1
DEP STACK
FEATS STACK
FEATS INPUT
LEMMA STACK
LEMMA INPUT
LEX STACK
LEX INPUT
LEX INPUT 1
LEX STACK 0 0 1
LEX INPUT 0 -1
LEX STACK 0 0 0 1 -1
""" # }}}
# participants
in0 = sent[i]
in1 = sent[i + 1]
in2 = sent[i + 2]
st0 = stack[-1]
st1 = stack[-2]
st000_11 = deps.sibling(deps.left_child(st0), 1)
in0_1 = sent[i - 1]
st001 = deps.parent(st0)
st001_1 = deps.left_child(st001)
st0001_1 = deps.sibling(deps.right_child(st0), -1)
if not st001:
st001 = {'tag': None, 'ctag': None, 'form': None}
if not st000_11:
st000_11 = {'tag': None, 'ctag': None, 'form': None}
if not st001_1:
st001_1 = {'tag': None, 'ctag': None, 'form': None}
if not st0001_1:
st0001_1 = {'tag': None, 'ctag': None, 'form': None}
features.append("Tst0_%s" % st0['tag'])
features.append("Tin0_%s" % in0['tag'])
features.append("Tin1_%s" % in1['tag'])
features.append("Tin2_%s" % in2['tag'])
features.append("Tst1_%s" % st1['tag'])
features.append("Tst000-11_%s" % st000_11['tag'])
features.append("Tin0-1_%s" % in0_1['tag'])
features.append("CTst0_%s" % st0['ctag'])
features.append("CTin0_%s" % in0['ctag'])
features.append("CTst001_1_%s" % st001_1['ctag'])
# dep_st0 -- skipped
# feats, lemmas: skipped
features.append("Lst0_%s" % st0['form'])
features.append("Lin0_%s" % in0['form'])
features.append("Lin1_%s" % in1['form'])
features.append("Lst001_%s" % st001['form'])
features.append("Lin0-1_%s" % in0_1['form'])
features.append("Lst0001-1_%s" % st0001_1['form'])
if 'morph' in st0:
for f in st0['morph']:
features.append("Mst0_%s" % f)
if 'morph' in in0:
for f in in0['morph']:
features.append("Min0_%s" % f)
if 'lem' in in0:
features.append("LMin0_%s" % in0['lem'])
if 'lem' in st0:
features.append("LMst0_%s" % st0['lem'])
# @@ TODO: feature expand
fs = ["%s_%s" % (f1, f2) for f1 in features for f2 in features]
return fs
#}}}
class EagerMaltEnglishFeatureExtractor: # {{{
"""
English malt-parser features
Based on eng.par, without the dep features
"""
def __init__(self, allpairs=False, words=None):
self.allpairs = allpairs
pass
def extract(self, stack, deps, sent, i):
features = []
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 4:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
""" {{{
POS STACK
POS INPUT
POS INPUT 1
POS INPUT 2
POS INPUT 3
POS STACK 1
POS STACK 0 0 0 -1
POS INPUT 0 0 0 -1
CPOS STACK
CPOS INPUT
CPOS STACK 0 -1
DEP STACK
DEP STACK 0 0 0 -1
DEP STACK 0 0 0 1
DEP INPUT 0 0 0 -1
LEX STACK
LEX INPUT
LEX INPUT 1
LEX STACK 0 0 1
""" # }}}
# participants
in0 = sent[i]
in1 = sent[i + 1]
in2 = sent[i + 2]
in3 = sent[i + 3]
st0 = stack[-1]
st1 = stack[-2]
st000_1 = deps.left_child(st0) # left child of stack
in000_1 = deps.left_child(in0)
if st0['id'] == 0 or st0 == PAD:
st0_1 = PAD
else:
# token just before top-of-stack in input string
st0_1 = sent[st0['id'] - 1]
assert(st0_1['id'] == st0['id'] -
1), "%s %s" % (st0_1['id'], st0['id'] - 1)
st0001 = deps.right_child(st0) # right child of stack
st001 = deps.parent(st0)
if not st001:
st001 = {'tag': None, 'ctag': None, 'form': None}
if not st000_1:
st000_1 = {'tag': None, 'ctag': None, 'form': None}
if not in000_1:
in000_1 = {'tag': None, 'ctag': None, 'form': None}
if not st0_1:
st0_1 = {'tag': None, 'ctag': None, 'form': None}
if not st0001:
st0001 = {'tag': None, 'ctag': None, 'form': None}
f = features.append
f("ps_%s" % st0['tag'])
f("pi_%s" % in0['tag'])
f("pi1_%s" % in1['tag'])
f("pi2_%s" % in2['tag'])
f("pi3_%s" % in3['tag'])
f("ps1_%s" % st1['tag'])
f("ps000-1_%s" % st000_1['tag'])
f("pi000-1_%s" % in000_1['tag'])
f("cps_%s" % st0['ctag'])
f("cpi_%s" % in0['ctag'])
f("cps0-1_%s" % st0_1['ctag'])
# dep_st... -- skipped
f("ls_%s" % st0['form'])
f("li_%s" % in0['form'])
f("li1_%s" % in1['form'])
f("ls001_%s" % st001['form'])
# @@ TODO: feature expand
if self.allpairs:
fs = ["%s_%s" % (f1, f2) for f1 in features for f2 in features]
return fs
return features
#}}}
class WenbinFeatureExtractor2: # {{{
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
import math
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
append = features.append
# participants
w = sent[i] if len(sent) > i else PAD
w1 = sent[i + 1] if len(sent) > i + 1 else PAD
s = stack[-1] if len(stack) > 0 else PAD
s1 = stack[-2] if len(stack) > 1 else PAD
s2 = stack[-3] if len(stack) > 2 else PAD
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
w = w['form']
Tw1 = w1['tag']
w1 = w1['form']
Ts = s['tag']
s = s['form']
Ts1 = s1['tag']
s1 = s1['form']
Ts2 = s2['tag']
s2 = s2['form']
# unigram
append("s_%s" % s)
append("s1_%s" % s1)
append("w_%s" % w)
append("Ts_%s" % Ts)
append("Ts1_%s" % Ts1)
append("Tw_%s" % Tw)
append("Tss_%s_%s" % (Ts, s))
append("Ts1s1_%s_%s" % (Ts1, s1))
append("Tww_%s_%s" % (Tw, w))
# bigram
append("ss1_%s_%s" % (s, s1))
append("Tss1Ts1_%s_%s_%s" % (Ts, s1, Ts1))
append("sTss1_%s_%s_%s" % (s, Ts, s1))
append("TsTs1_%s_%s" % (Ts, Ts1))
append("ss1Ts1_%s_%s_%s" % (s, s1, Ts1))
append("sTss1Ts1_%s_%s_%s_%s" % (s, Ts, s1, Ts1))
append("TsTw_%s_%s" % (Ts, Tw))
append("sTsTs1_%s_%s_%s" % (s, Ts, Ts1))
# trigram
append("TsTwTw1_%s_%s_%s" % (Ts, Tw, Tw1))
append("sTwTw1_%s_%s_%s" % (s, Tw, Tw1))
append("Ts1TsTw_%s_%s_%s" % (Ts1, Ts, Tw))
append("Ts1sTw1_%s_%s_%s" % (Ts1, s, Tw))
append("Ts2Ts1Ts_%s_%s_%s" % (Ts2, Ts1, Ts))
# modifier
append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
append("Ts1TsTrcs_%s_%s_%s" % (Ts1, Ts, Trcs))
append("Ts1sTlcs_%s_%s_%s" % (Ts1, s, Tlcs))
append("Ts1Trcs1Ts_%s_%s_%s" % (Ts1, Trcs1, Ts))
append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
append("Ts1TsTlcs_%s_%s_%s" % (Ts1, Ts, Tlcs))
append("Ts1Trcs1s_%s_%s_%s" % (Ts1, Trcs1, s))
return features
#}}}
#}}}
class UnlexFeatureExtractor: # {{{ 83.81 train 15-18 test 22
def __init__(self):
self.last_sent = None
def extract(self, stack, deps, sent, i):
features = []
if len(sent) < i + 2:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
w = sent[i]
w1 = sent[i + 1]
s = stack[-1]
s1 = stack[-2]
s2 = stack[-3]
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
Tw1 = w1['tag']
Ts = s['tag']
Ts1 = s1['tag']
Ts2 = s2['tag']
# unigram
features.append("Ts_%s" % Ts)
features.append("Ts1_%s" % Ts1)
features.append("Tw_%s" % Tw)
# bigram
features.append("TsTs1_%s_%s" % (Ts, Ts1))
features.append("TsTw_%s_%s" % (Ts, Tw))
# trigram
features.append("TsTwTw1_%s_%s_%s" % (Ts, Tw, Tw1))
features.append("Ts1TsTw_%s_%s_%s" % (Ts1, Ts, Tw))
features.append("Ts2Ts1Ts_%s_%s_%s" % (Ts2, Ts1, Ts))
# modifier
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTrcs_%s_%s_%s" % (Ts1, Ts, Trcs))
features.append("Ts1Trcs1Ts_%s_%s_%s" % (Ts1, Trcs1, Ts))
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTlcs_%s_%s_%s" % (Ts1, Ts, Tlcs))
return features
#}}}
from collections import defaultdict
class UnlexWenbinPlusFeatureExtractor: # {{{ Good one!
def __init__(self):
self.dat = {}
datafile = open(os.path.join(os.path.dirname(__file__), "data"), "r")
for line in datafile.readlines():
line = line.strip().split()
self.dat[(line[0], line[1])] = int(line[2])
self.dat = defaultdict(int, [(k, v)
for k, v in self.dat.items() if v > 5])
def extract(self, stack, deps, sent, i):
features = []
import math
# new features, which I think helps..
#features.append("toend_%s" % round(math.log(len(sent)+3-i)))
if len(sent) < i + 2:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
w = sent[i]
w1 = sent[i + 1]
s = stack[-1]
s1 = stack[-2]
s2 = stack[-3]
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
w = w['form']
Tw1 = w1['tag']
w1 = w1['form']
Ts = s['tag']
s = s['form']
Ts1 = s1['tag']
s1 = s1['form']
Ts2 = s2['tag']
s2 = s2['form']
# if Ts1=='IN':
# features.append("1par_s1_s:%s" % (self.dat[(s1,s)],))
# features.append("1par_s_s1:%s" % (self.dat[(s,s1)],))
# features.append("1par_s1_w:%s" % (self.dat[(s1,w)],))
# features.append("1par_w_s1:%s" % (self.dat[(w,s1)],))
# if Ts == 'IN':
# features.append("2par_s1_s:%s" % (self.dat[(s1,s)],))
# features.append("2par_s_s1:%s" % (self.dat[(s,s1)],))
# features.append("2par_s_w:%s" % (self.dat[(s,w)],))
# features.append("2par_w_s:%s" % (self.dat[(w,s)],))
# if Tw=='IN':
# features.append("3par_w_s1:%s" % (self.dat[(w,s1)],))
# features.append("3par_s1_w:%s" % (self.dat[(s1,w)],))
# features.append("3par_w_s:%s" % (self.dat[(w,s)],))
# features.append("3par_s_w:%s" % (self.dat[(s,w)],))
if Tw == 'IN':
if (s1, w) in self.dat or (s, w) in self.dat:
features.append("m1_%s" %
(self.dat[(s1, w)] > self.dat[(s, w)]))
else:
features.append("m1_NA")
# if Ts=='IN':
# features.append("m2_%s" % (self.dat[(s1,s)]-self.dat[(w,s)]))
# if Ts1=='IN':
# features.append("m3_%s" % (self.dat[(s,s1)]-self.dat[(w,s1)]))
# unigram 87,71 (conditioning) vs 87,5 (removing)
if Ts[0] not in 'JNV':
features.append("s_%s" % s)
if Ts1[0] not in 'JNV':
features.append("s1_%s" % s1)
if Tw[0] not in 'JNV':
features.append("w_%s" % w)
features.append("Ts_%s" % Ts)
features.append("Ts1_%s" % Ts1)
features.append("Tw_%s" % Tw)
#features.append("Tss_%s_%s" % (Ts,s))
#features.append("Ts1s1_%s_%s" % (Ts1,s1))
#features.append("Tww_%s_%s" % (Tw,w))
# bigram
# features.append("ss1_%s_%s" % (s,s1)) #@
features.append("Tss1Ts1_%s_%s_%s" % (Ts, s1, Ts1))
# features.append("sTss1_%s_%s_%s" % (s,Ts,s1)) #@
features.append("TsTs1_%s_%s" % (Ts, Ts1))
# features.append("ss1Ts1_%s_%s_%s" % (s,s1,Ts1)) #@
# features.append("sTss1Ts1_%s_%s_%s_%s" % (s,Ts,s1,Ts1)) #@
features.append("TsTw_%s_%s" % (Ts, Tw))
features.append("sTsTs1_%s_%s_%s" % (s, Ts, Ts1))
# more bigrams! [look at next word FORM] # with these, 87.45 vs 87.09, train 15-18, test 22
# features.append("ws1_%s_%s" % (w,s1)) #@
# features.append("ws_%s_%s" % (w,s)) #@
features.append("wTs1_%s_%s" % (w, Ts1)) # @
features.append("wTs_%s_%s" % (w, Ts)) # @
# trigram
features.append("TsTwTw1_%s_%s_%s" % (Ts, Tw, Tw1))
features.append("sTwTw1_%s_%s_%s" % (s, Tw, Tw1))
features.append("Ts1TsTw_%s_%s_%s" % (Ts1, Ts, Tw))
features.append("Ts1sTw_%s_%s_%s" % (Ts1, s, Tw))
features.append("Ts2Ts1Ts_%s_%s_%s" % (Ts2, Ts1, Ts))
# modifier
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTrcs_%s_%s_%s" % (Ts1, Ts, Trcs))
features.append("Ts1sTlcs_%s_%s_%s" % (Ts1, s, Tlcs))
features.append("Ts1Trcs1Ts_%s_%s_%s" % (Ts1, Trcs1, Ts))
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTlcs_%s_%s_%s" % (Ts1, Ts, Tlcs))
features.append("Ts1Trcs1s_%s_%s_%s" % (Ts1, Trcs1, s))
return features
#}}}
class BestSoFarFeatureExtractor: # {{{ 87.71 train 15-18 test 22
def __init__(self):
pass
def extract(self, stack, deps, sent, i):
features = []
import math
if len(sent) < i + 2:
sent = sent[:]
sent.append(PAD)
sent.append(PAD)
if len(stack) < 3:
stack = [PAD, PAD, PAD] + stack
# participants
w = sent[i]
w1 = sent[i + 1]
s = stack[-1]
s1 = stack[-2]
s2 = stack[-3]
Tlcs1 = deps.left_child(s1)
if Tlcs1:
Tlcs1 = Tlcs1['tag']
Tlcs = deps.left_child(s)
if Tlcs:
Tlcs = Tlcs['tag']
Trcs = deps.right_child(s)
if Trcs:
Trcs = Trcs['tag']
Trcs1 = deps.right_child(s1)
if Trcs1:
Trcs1 = Trcs1['tag']
Tw = w['tag']
w = w['form']
Tw1 = w1['tag']
w1 = w1['form']
Ts = s['tag']
s = s['form']
Ts1 = s1['tag']
s1 = s1['form']
Ts2 = s2['tag']
s2 = s2['form']
# unigram 87,71 (conditioning) vs 87,5 (removing)
if Ts[0] not in 'JNV':
features.append("s_%s" % s)
if Ts1[0] not in 'JNV':
features.append("s1_%s" % s1)
if Tw[0] not in 'JNV':
features.append("w_%s" % w)
features.append("Ts_%s" % Ts)
features.append("Ts1_%s" % Ts1)
features.append("Tw_%s" % Tw)
#features.append("Tss_%s_%s" % (Ts,s))
#features.append("Ts1s1_%s_%s" % (Ts1,s1))
#features.append("Tww_%s_%s" % (Tw,w))
# bigram
# features.append("ss1_%s_%s" % (s,s1)) #@
features.append("Tss1Ts1_%s_%s_%s" % (Ts, s1, Ts1))
# features.append("sTss1_%s_%s_%s" % (s,Ts,s1)) #@
features.append("TsTs1_%s_%s" % (Ts, Ts1))
# features.append("ss1Ts1_%s_%s_%s" % (s,s1,Ts1)) #@
# features.append("sTss1Ts1_%s_%s_%s_%s" % (s,Ts,s1,Ts1)) #@
features.append("TsTw_%s_%s" % (Ts, Tw))
features.append("sTsTs1_%s_%s_%s" % (s, Ts, Ts1))
# more bigrams! [look at next word FORM] # with these, 87.45 vs 87.09, train 15-18, test 22
# features.append("ws1_%s_%s" % (w,s1)) #@
# features.append("ws_%s_%s" % (w,s)) #@
features.append("wTs1_%s_%s" % (w, Ts1)) # @
features.append("wTs_%s_%s" % (w, Ts)) # @
# trigram
features.append("TsTwTw1_%s_%s_%s" % (Ts, Tw, Tw1))
features.append("sTwTw1_%s_%s_%s" % (s, Tw, Tw1))
features.append("Ts1TsTw_%s_%s_%s" % (Ts1, Ts, Tw))
features.append("Ts1sTw_%s_%s_%s" % (Ts1, s, Tw))
features.append("Ts2Ts1Ts_%s_%s_%s" % (Ts2, Ts1, Ts))
# modifier
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTrcs_%s_%s_%s" % (Ts1, Ts, Trcs))
features.append("Ts1sTlcs_%s_%s_%s" % (Ts1, s, Tlcs))
features.append("Ts1Trcs1Ts_%s_%s_%s" % (Ts1, Trcs1, Ts))
features.append("Ts1Tlcs1Ts_%s_%s_%s" % (Ts1, Tlcs1, Ts))
features.append("Ts1TsTlcs_%s_%s_%s" % (Ts1, Ts, Tlcs))
features.append("Ts1Trcs1s_%s_%s_%s" % (Ts1, Trcs1, s))
return features
#}}}
#}}}
__EXTRACTORS__['eager.zhang'] = EagerZhangFeatureExtractor()
__EXTRACTORS__['eager.zhang.ext'] = ExtendedEagerZhangFeatureExtractor()
__EXTRACTORS__['eager.zhang.ext2'] = ExtendedEagerZhangFeatureExtractor(2)
__EXTRACTORS__['eager.zhang.ext3'] = ExtendedEagerZhangFeatureExtractor(3)
__EXTRACTORS__['eager.zhang.ext4'] = ExtendedEagerZhangFeatureExtractor(4)
__EXTRACTORS__['eager.malt.eng'] = EagerMaltEnglishFeatureExtractor(
allpairs=True)
__EXTRACTORS__['standard.wenbin'] = WenbinFeatureExtractor() # Good one
__EXTRACTORS__['standard.wenbinplus'] = WenbinFeatureExtractor_plus() # Good one
__EXTRACTORS__['standard.deg2'] = Degree2FeatureExtractor()
__EXTRACTORS__['standard.unlex.wb'] = UnlexWenbinPlusFeatureExtractor()
__EXTRACTORS__['standard.unlex'] = UnlexFeatureExtractor()
def get(name):
try:
return __EXTRACTORS__[name]
except KeyError:
import sys
sys.stderr.write(
"invalid feature extactor %s. possible values: %s\n" %
(name, __EXTRACTORS__.keys()))
sys.exit()
|
StarcoderdataPython
|
8077024
|
<reponame>CityU-AIM-Group/GFBS
from .GateVGG import *
from .GateResNet import *
from .GateResNet50 import *
from .GateMobileNetv2 import *
from .GateDenseNet40 import *
|
StarcoderdataPython
|
11353250
|
<gh_stars>0
import pygame
import pygame_textinput
from scene.scene_base import SceneBase
#from scene.lobby_scene import LobbyScene
from scene.waiting_scene import WaitingScene
class InputScene(SceneBase):
def __init__(self):
SceneBase.__init__(self)
self.textinput = pygame_textinput.TextInput()
def ProcessInput(self, events, pressed_keys):
if self.textinput.update(events):
self.SwitchToScene(WaitingScene(self.textinput.get_text()))
#for event in events:
# if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
# # Move to the next scene when the user pressed Enter
# self.SwitchToScene(LobbyScene())
def Update(self):
pass
def Render(self, screen):
# For the sake of brevity, the title scene is a blank red screen
screen.fill((0, 255, 255))
#print("Render Title Scene")
screen.blit(self.textinput.get_surface(), (10, 10))
|
StarcoderdataPython
|
34718
|
<reponame>garysb/dismantle
import os
from pathlib import Path
import pytest
from dismantle.package import DirectoryPackageFormat, PackageFormat
def test_inherits() -> None:
assert issubclass(DirectoryPackageFormat, PackageFormat) is True
def test_grasp_exists(datadir: Path) -> None:
src = datadir.join('directory_src')
assert DirectoryPackageFormat.grasps(src) is True
def test_grasp_non_existant(datadir: Path) -> None:
src = datadir.join('directory_non_existant')
assert DirectoryPackageFormat.grasps(src) is False
def test_grasp_not_supported(datadir: Path) -> None:
src = datadir.join('package.zip')
assert DirectoryPackageFormat.grasps(src) is False
def test_extract_not_supported(datadir: Path) -> None:
src = datadir.join('package.zip')
dest = datadir.join(f'{src}_output')
message = 'formatter only supports directories'
with pytest.raises(ValueError, match=message):
DirectoryPackageFormat.extract(src, dest)
def test_extract_non_existant(datadir: Path) -> None:
src = datadir.join('directory_non_existant')
dest = datadir.join(f'{src}_output')
message = 'formatter only supports directories'
with pytest.raises(ValueError, match=message):
DirectoryPackageFormat.extract(src, dest)
def test_extract_already_exists(datadir: Path) -> None:
src = datadir.join('directory_src')
dest = datadir.join('directory_exists')
DirectoryPackageFormat.extract(src, dest)
assert os.path.exists(dest) is True
assert os.path.exists(dest / 'package.json') is True
def test_extract_create(datadir: Path) -> None:
src = datadir.join('directory_src')
dest = datadir.join('directory_created')
DirectoryPackageFormat.extract(src, dest)
assert os.path.exists(dest) is True
assert os.path.exists(dest / 'package.json') is True
|
StarcoderdataPython
|
6658476
|
<reponame>pkusensei/adventofcode2017
from collections import deque
def p1(step: int):
nums = [0]
idx = 0
current = 1
while current <= 2017:
idx = (idx + step) % len(nums) + 1
nums.insert(idx, current)
current += 1
return nums[idx + 1]
def p2(step: int):
nums = deque([0])
for num in range(1, 50_000_001):
nums.rotate(-step)
nums.append(num)
return nums[nums.index(0) + 1]
assert p1(3) == 638
assert p1(348) == 417
assert p2(348) == 34334221
|
StarcoderdataPython
|
11317935
|
import csv
import json
import os
import iomb.dqi as dqi
import iomb.matio as matio
import numpy
class Sector(object):
def __init__(self):
self.id = ''
self.index = 0
self.name = ''
self.code = ''
self.location = ''
self.description = ''
def as_json_dict(self):
return {
'id': self.id,
'index': self.index,
'name': self.name,
'code': self.code,
'location': self.location
'description': self.description
}
class Indicator(object):
def __init__(self):
self.id = ''
self.index = 0
self.group = ''
self.code = ''
self.unit = ''
self.name = ''
def as_json_dict(self):
return {
'id': self.id,
'index': self.index,
'group': self.group,
'code': self.code,
'unit': self.unit,
'name': self.name
}
class Model(object):
def __init__(self, folder: str):
self.folder = folder # type: str
self.sectors = read_sectors(folder) # type: Dict[str, Sector]
sorted_sectors = [s for s in self.sectors.values()]
sorted_sectors.sort(key=lambda s: s.index)
self.sector_ids = [s.id for s in sorted_sectors]
self.indicators = read_indicators(folder) # type: List[Indicator]
self.indicators.sort(key=lambda i: i.index)
self.indicator_ids = [i.id for i in self.indicators]
self.matrix_cache = {}
def get_matrix(self, name: str):
m = self.matrix_cache.get(name)
if m is not None:
return m
path = '%s/%s.bin' % (self.folder, name)
if not os.path.isfile(path):
return None
m = matio.read_matrix(path)
self.matrix_cache[name] = m
return m
def get_dqi_matrix(self, name: str):
m = self.matrix_cache.get(name)
if m is not None:
return m
path = '%s/%s.csv' % (self.folder, name)
if not os.path.isfile(path):
return None
dm = dqi.Matrix.from_csv(path)
m = dm.to_string_list()
self.matrix_cache[name] = m
return m
def calculate(self, demand):
if demand is None:
return
perspective = demand.get('perspective')
d = self.demand_vector(demand)
data = None
if perspective == 'direct':
s = self.scaling_vector(d)
D = self.get_matrix('D')
data = scale_columns(D, s)
elif perspective == 'intermediate':
s = self.scaling_vector(d)
U = self.get_matrix('U')
data = scale_columns(U, s)
elif perspective == 'final':
U = self.get_matrix('U')
data = scale_columns(U, d)
else:
print('ERROR: unknown perspective %s' % perspective)
if data is None:
print('ERROR: no data')
return None
result = {
'indicators': self.indicator_ids,
'sectors': self.sector_ids,
'data': data.tolist()
}
return result
def demand_vector(self, demand):
L = self.get_matrix('L')
d = numpy.zeros(L.shape[0], dtype=numpy.float64)
entries = demand.get('demand') # type: dict
if entries is None:
return d
for e in entries:
sector_key = e.get('sector')
amount = e.get('amount')
if sector_key is None or amount is None:
continue
amount = float(amount)
sector = self.sectors.get(sector_key)
if sector is None:
continue
d[sector.index] = amount
return d
def scaling_vector(self, demand_vector: numpy.ndarray) -> numpy.ndarray:
s = numpy.zeros(demand_vector.shape[0], dtype=numpy.float64)
L = self.get_matrix('L')
for i in range(0, demand_vector.shape[0]):
d = demand_vector[i]
if d == 0:
continue
col = L[:, i]
s += d * col
return s
def read_sectors(folder: str):
m = {}
path = '%s/sectors.csv' % folder
with open(path, 'r', encoding='utf-8', newline='\n') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
s = Sector()
s.index = int(row[0])
s.id = row[1]
s.name = row[2]
s.code = row[3]
s.location = row[4]
s.description = row[5]
m[s.id] = s
return m
def read_indicators(folder: str):
indicators = []
path = '%s/indicators.csv' % folder
with open(path, 'r', encoding='utf-8', newline='\n') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
i = Indicator()
i.index = int(row[0])
i.id = row[3]
i.name = row[2]
i.code = row[3]
i.unit = row[4]
i.group = row[5]
indicators.append(i)
return indicators
def scale_columns(matrix: numpy.ndarray, v: numpy.ndarray) -> numpy.ndarray:
result = numpy.zeros(matrix.shape, dtype=numpy.float64)
for i in range(0, v.shape[0]):
s = v[i]
if s == 0:
continue
result[:, i] = s * matrix[:, i]
return result
|
StarcoderdataPython
|
6695894
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import unittest
from pystrings.pangram import Pangram
class PangramTests(unittest.TestCase):
def test_empty_string(self):
pangram = Pangram("")
self.assertFalse(pangram.is_pangram())
def test_valid_pangram(self):
pangram = Pangram('the quick brown fox jumps over the lazy dog')
self.assertTrue(
pangram.is_pangram())
def test_invalid_pangram(self):
pangram = Pangram('the quick brown fish jumps over the lazy dog')
self.assertFalse(pangram.is_pangram())
def test_missing_x(self):
pangram = Pangram('a quick movement of the enemy will jeopardize five gunboats')
self.assertFalse(pangram.is_pangram())
def test_mixedcase_and_punctuation(self):
pangram = Pangram('"Five quacking Zephyrs jolt my wax bed."')
self.assertTrue(pangram.is_pangram())
def test_unchecked_german_umlaute(self):
pangram = Pangram('<NAME> zwölf Boxkämpfer quer über den großen Sylter Deich.')
self.assertTrue(pangram.is_pangram())
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
8043352
|
<filename>src/__init__.py<gh_stars>0
# /dust/src/__init__.py
import os
import sys
if not '__file__' in globals():
__file__ = os.path.join(os.path.abspath('.'), '__init__.py')
def __setpaths__(level):
PATHS = [os.path.abspath('.')]
for i in range(level):
PATHS.append(os.path.split(PATHS[-1])[0])
for PATH in PATHS:
if(PATH not in sys.path):
sys.path.append(PATH)
__setpaths__(2)
import dust.src as dust
|
StarcoderdataPython
|
9630013
|
import csv
from time import sleep
import requests
STATES = [
"Sachsen-Anhalt",
"Niedersachsen",
"Sachsen",
"Bayern",
"Mecklenburg-Vorpommern",
"Hamburg",
"Schleswig-Holstein",
"Rheinland-Pfalz",
"Hessen",
"Baden-Württemberg",
"Thüringen",
"Saarland",
"Bremen",
"Brandenburg",
"Nordrhein-Westfalen",
"Berlin"
]
INPUT_PATH = "raw/original_data.csv"
OUTPUT_PATH = "intermediate/ort_bundesland.csv"
GEOCODE_API = "https://nominatim.openstreetmap.org/search?format=json&country=germany&accept-language=de&q="
def geocode_city(city):
res = requests.get(GEOCODE_API + city)
res_json = res.json()
for result in res_json:
# The display name string contains the Bundesland but the position in
# the string varies among the data points. Thus, go over each each
# possible candidate.
candidates = result["display_name"].split(",")
i = 0
while i < len(candidates):
trimmed = candidates[i].strip()
if trimmed in STATES: # excact case sensitive match
return trimmed
i += 1
return None
# we only need to look up each city once
city_names = set()
# read in data
with open(INPUT_PATH, newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
city_names.add(row[6])
print(city_names)
# main work happens here
data = []
for city in city_names:
sleep(0.1)
state = geocode_city(city)
print(city)
print(state)
data.append({'Ort': city, 'Bundesland': state})
# persist to CSV
with open(OUTPUT_PATH, 'w', newline='') as csvfile:
fieldnames = ['Ort', 'Bundesland']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(data)
|
StarcoderdataPython
|
3372297
|
<reponame>WPoelman/thesis
import concurrent.futures
import logging
from argparse import ArgumentParser, Namespace
from pathlib import Path
import pandas as pd
from tqdm import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from synse.config import Config
from synse.grew_rewrite import Grew
from synse.helpers import PMB, smatch_score
from synse.sbn_spec import get_doc_id
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
GREW = Grew()
def get_args() -> Namespace:
parser = ArgumentParser()
parser.add_argument(
"-p",
"--starting_path",
type=str,
required=True,
help="Path to start recursively search for sbn / ud files.",
)
parser.add_argument(
"-l",
"--language",
default=Config.SUPPORTED_LANGUAGES.EN.value,
choices=Config.SUPPORTED_LANGUAGES.all_values(),
type=str,
help="Language to use for ud pipelines.",
)
parser.add_argument(
"-s",
"--ud_system",
default=Config.UD_SYSTEM.STANZA.value,
type=str,
choices=Config.UD_SYSTEM.all_values(),
help="System pipeline to use for generating UD parses.",
)
parser.add_argument(
"--data_split",
default=Config.DATA_SPLIT.TRAIN.value,
choices=Config.DATA_SPLIT.all_values(),
type=str,
help="Data split to run inference on.",
)
parser.add_argument(
"-r",
"--results_file",
type=str,
help="CSV file to write results and scores to.",
)
parser.add_argument(
"-w",
"--max_workers",
default=16,
help="Max concurrent workers used to run inference with. Be careful "
"with setting this too high since mtool might error (segfault) if hit "
"too hard by too many concurrent tasks.",
)
# Main options
parser.add_argument(
"--clear_previous",
action="store_true",
help="When visiting a directory, clear the previously predicted "
"output if it's there.",
)
parser.add_argument(
"--store_visualizations",
action="store_true",
help="Store png of prediction in 'predicted' directory.",
)
parser.add_argument(
"--store_sbn",
action="store_true",
help="Store SBN of prediction in 'predicted' directory.",
)
return parser.parse_args()
def generate_result(args, ud_filepath):
current_dir = ud_filepath.parent
predicted_dir = Path(current_dir / "predicted")
predicted_dir.mkdir(exist_ok=True)
if args.clear_previous:
for item in predicted_dir.iterdir():
if item.is_file():
item.unlink()
raw_sent = Path(current_dir / f"{args.language}.raw").read_text().rstrip()
res = GREW.run(ud_filepath)
if args.store_visualizations:
res.to_png(Path(predicted_dir / f"output.png"))
if args.store_sbn:
res.to_sbn(Path(predicted_dir / f"output.sbn"))
penman_path = res.to_penman(Path(predicted_dir / f"output.penman"))
scores = smatch_score(
current_dir / f"{args.language}.drs.penman",
penman_path,
)
penman_lenient_path = res.to_penman(
Path(predicted_dir / f"output.lenient.penman"),
split_sense=True,
)
lenient_scores = smatch_score(
current_dir / f"{args.language}.drs.lenient.penman",
penman_lenient_path,
)
result_record = {
"pmb_id": get_doc_id(args.language, ud_filepath),
"raw_sent": raw_sent,
**scores,
**{f"{k}_lenient": v for k, v in lenient_scores.items()},
}
return result_record
def full_run(args, ud_filepath):
try:
return generate_result(args, ud_filepath), str(ud_filepath)
except Exception as e:
logger.error(e)
return None, str(ud_filepath)
def main():
args = get_args()
results_records = []
ud_file_format = f"{args.language}.ud.{args.ud_system}.conll"
failed = 0
files_with_errors = []
pmb = PMB(args.data_split)
with concurrent.futures.ThreadPoolExecutor(
max_workers=args.max_workers
) as executor:
futures = []
for filepath in pmb.generator(
args.starting_path,
f"**/{args.language}.drs.penman",
desc_tqdm="Gathering data",
):
ud_filepath = Path(filepath.parent / ud_file_format)
if not ud_filepath.exists():
continue
futures.append(executor.submit(full_run, args, ud_filepath))
for res in tqdm(
concurrent.futures.as_completed(futures), desc="Running inference"
):
result, path = res.result()
if result:
results_records.append(result)
else:
files_with_errors.append(path)
failed += 1
df = pd.DataFrame().from_records(results_records)
if args.results_file:
df.to_csv(args.results_file, index=False)
if files_with_errors:
Path("paths_with_errors.txt").write_text("\n".join(files_with_errors))
print(
f"""
PARSED DOCS: {len(df)}
FAILED DOCS: {failed}
TOTAL DOCS: {len(df) + failed}
AVERAGE F1 (strict): {df["f1"].mean():.3} ({df["f1"].min():.3} - {df["f1"].max():.3})
AVERAGE F1 (lenient): {df["f1_lenient"].mean():.3} ({df["f1_lenient"].min():.3} - {df["f1_lenient"].max():.3})
"""
)
if __name__ == "__main__":
with logging_redirect_tqdm():
main()
|
StarcoderdataPython
|
8097541
|
<filename>tests/unit/classification/test_classification_widget.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import numpy as np
from utils_cv.classification.widget import AnnotationWidget, ResultsWidget
def test_annotation_widget(tiny_ic_data_path, tmp):
ANNO_PATH = os.path.join(tmp, "cvbp_ic_annotation.txt")
w_anno_ui = AnnotationWidget(
labels=["can", "carton", "milk_bottle", "water_bottle"],
im_dir=os.path.join(tiny_ic_data_path, "can"),
anno_path=ANNO_PATH,
im_filenames=None, # Set to None to annotate all images in IM_DIR
)
w_anno_ui.update_ui()
def test_results_widget(model_pred_scores):
learn, pred_scores = model_pred_scores
w_results = ResultsWidget(
dataset=learn.data.valid_ds,
y_score=pred_scores,
y_label=[
learn.data.classes[x] for x in np.argmax(pred_scores, axis=1)
],
)
w_results.update()
|
StarcoderdataPython
|
12802281
|
import cv2
import sys
import logging as log
import datetime as dt
from time import sleep
video_capture = cv2.VideoCapture(0)
picCount = 0
while True:
if not video_capture.isOpened():
print('Unable to load camera.')
sleep(5)
pass
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('Video', frame)
inputKey = cv2.waitKey(1)
if inputKey == ord('c'):
status = cv2.imwrite('/home/user/Project/faceRag/my-NewData/pic'+str(picCount)+'.jpg',frame)
print('/home/user/Project/faceRag/my-NewData/pic'+str(picCount)+'.jpg have been saved.')
picCount += 1
inputKey = -2
elif inputKey == ord('q'):
break
# Display the resulting frame
cv2.imshow('Video', frame)
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
6661672
|
from oldowan.mtconvert.sites2seq import sites2seq
from oldowan.mtdna import rCRS
from oldowan.polymorphism import Polymorphism
def test_single_site_as_string():
sites = '16129-'
seq = sites2seq(sites)
assert '-' in seq
def test_single_site_as_string_in_list():
sites = ['16129-']
seq = sites2seq(sites)
assert '-' in seq
def test_single_site_as_Polymorphism_in_list():
sites = [Polymorphism(16129,0,'-')]
seq = sites2seq(sites)
assert '-' in seq
def test_two_sites_in_string():
sites = '316C 316.1A'
seq = sites2seq(sites, region=range(314,319))
assert seq == 'CCCACT'
def test_two_sites_as_string_in_list():
sites = ['316C', '316.1A']
seq = sites2seq(sites, region=(314,318))
assert seq == 'CCCACT'
def test_two_sites_as_Polymorphisms_in_list():
sites = [Polymorphism(316,0,'C'), Polymorphism(316,1,'A')]
seq = sites2seq(sites, region=(314,318))
assert seq == 'CCCACT'
def test_mix_of_string_and_Polymorphisms_in_list():
sites = [Polymorphism(316,0,'C'), '316.1A']
seq = sites2seq(sites, region=(314,318))
assert seq == 'CCCACT'
# reverse the order
sites = ['316.1A', Polymorphism(316,0,'C')]
seq = sites2seq(sites, region=(314,318))
assert seq == 'CCCACT'
def test_default_sequence_range_is_HVR1():
assert sites2seq('') == rCRS[16023:16365]
assert sites2seq('', region=(16024,16365)) == rCRS[16023:16365]
def test_rCRS_argument_returns_rCRS():
assert sites2seq('rCRS') == rCRS[16023:16365]
def test_HVR2_range_argument():
assert sites2seq('', region='HVR2') == rCRS[72:340]
def test_HVR1andHVR2_range_argument():
assert sites2seq('', region='HVR1and2') == rCRS[16023:16365]+rCRS[72:340]
def test_HVR1toHVR2_range_argument():
assert sites2seq('', region='HVR1to2') == rCRS[16023:]+rCRS[:340]
def test_coding_range_argument():
assert sites2seq('', region='coding') == rCRS[576:15992]
def test_all_range_argument():
assert sites2seq('', region='all') == rCRS
def test_single_substition_in_correct_place():
# rCRS string is 0-based count
# rCRS as DNA sequence is 1-based
# the the numbering discrepancy
assert 'A' == sites2seq('16129A', region='all')[16128]
def test_custom_range():
assert 'GAG' == sites2seq('', region=[1,5,9])
def test_insertion_in_custom_range():
assert 'GCAT' == sites2seq('1.1C', region=[1,2,3])
def test_two_insertions_in_custom_range():
assert 'GCAGT' == sites2seq('1.1C 2.1G', region=[1,2,3])
def test_deletion_at_start_of_custom_range():
assert '-AT' == sites2seq('1-', region=[1,2,3])
def test_deletion_in_middle_of_custom_range():
assert 'G-T' == sites2seq('2-', region=[1,2,3])
def test_sites_outside_region_snp():
assert sites2seq('73G') == rCRS[16023:16365]
def test_sites_outside_region_del():
assert sites2seq('489d') == rCRS[16023:16365]
def test_sites_outside_region_ins():
assert sites2seq('315.1C') == rCRS[16023:16365]
|
StarcoderdataPython
|
6409375
|
from pipeline.elastic import Elastic
from pipeline.elastic.documents import Webpage, Service, Port
from utils.config.ini import Ini
from utils.config.env import Env
ini = Ini(Env.read('CONFIG_FILE'))
def test_start_connection():
with Elastic(ini=ini) as conn:
assert conn
def test_add_new_documents():
with Elastic(ini=ini):
webpage = Webpage(
meta={'id': 1},
url='https://www.test.onion',
domain='www.test.onion',
title='test title',
screenshot='https://screenshot.link.dummy/test.jpg',
language='ko',
)
webpage.source = """<html>
<test>is it test?</test>
</html>
"""
webpage.save()
assert Webpage.get(id=1)
services = [
Service(number=80, status=True),
Service(number=443, status=False),
Service(number=8080, status=False),
]
Port(meta={'id': 1}, services=services).save()
assert Port.get(id=1)
# remove index after test
Webpage._index.delete()
Port._index.delete()
|
StarcoderdataPython
|
5010504
|
#! -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='bert4torch',
version='0.1.5',
description='an elegant bert4torch',
long_description='bert4torch: https://github.com/Tongjilibo/bert4torch',
license='MIT Licence',
url='https://github.com/Tongjilibo/bert4torch',
author='Tongjilibo',
install_requires=['torch>1.0'],
packages=find_packages()
)
|
StarcoderdataPython
|
3575092
|
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This tool build tar files from a list of inputs."""
from contextlib import contextmanager
from datetime import datetime
import os
import shutil
import subprocess
import sys
from third_party.py import gflags
gflags.DEFINE_string('dest', None, 'The absolute path of file to copy the file to')
gflags.DEFINE_string('src', None, 'The absolute path of file to copy the file from')
gflags.DEFINE_string('key', None, 'The path to local file system file relative to store_location')
gflags.DEFINE_string('store_location', None, 'The location of the store relative to git root')
gflags.DEFINE_string('git_root', None, 'The absolute path to local git root')
gflags.DEFINE_string('method', 'get', 'FileSystemStore method either get, put')
gflags.DEFINE_string('status_file', None, 'The status file to record success or failure of the operation')
gflags.DEFINE_string('suppress_error', False, 'Suppress Error')
gflags.MarkFlagAsRequired('key')
gflags.MarkFlagAsRequired('store_location')
FLAGS = gflags.FLAGS
class LocalGitStore(object):
"""A class to get and put file in local GIT File System"""
class LocalGitStoreError(Exception):
pass
def __init__(self, store_location, key, git_root, suppress_error, status_file=None):
try:
self.git_root = git_root or os.environ['GIT_ROOT']
self.store_location = os.path.join(self.git_root, store_location)
self.key = key
self.status_file = status_file
self.suppress_error = suppress_error
except KeyError:
raise LocalGitStore.LocalGitStoreError("Git root not found. Either use --git_root or bazel command line flag --action_env=GIT_ROOT=`pwd`")
def __enter__(self):
self.status_code = 0
return self
def __exit__(self, t, v, traceback):
if self.status_file:
with open(self.status_file, "w") as f:
f.write("{0}".format(self.status_code))
@contextmanager
def _execute(self, commands):
try:
for command in commands:
self.status_code = subprocess.check_call(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.status_code = e.returncode
print(e)
if self.suppress_error:
return
raise LocalGitStore.LocalGitStoreError(e)
def get(self, get):
"""Get file and copy it to bazel workspace"""
file_location = os.path.join(self.store_location, self.key)
if os.path.exists(get) and self.suppress_error:
os.remove(get)
# Make sure the directory exists.
dirname = os.path.dirname(get)
if not os.path.exists(dirname):
os.makedirs(dirname)
self._execute(commands = [
['cp', file_location, get]
])
def put_if_not_exists(self, src):
file_location = os.path.join(self.store_location, self.key)
if not os.path.exists(file_location):
self.put(src)
def put(self, src):
"""Put file from Bazel workspace to local file system"""
file_location = os.path.join(self.store_location, self.key)
self._execute(commands = [
['mkdir', '-p', os.path.dirname(file_location)],
['cp', src, file_location]
])
def main(unused_argv):
if FLAGS.method == "get" and not FLAGS.dest:
raise LocalGitStore.LocalGitStoreError(
"Please specify the destination using --dest to store the file"
)
elif FLAGS.method == "put" and not FLAGS.src:
raise LocalGitStore.LocalGitStoreError(
"Please specify the file to put into store using --src"
)
elif FLAGS.method not in ["put", "get"]:
raise LocalGitStore.LocalGitStoreError("Method {0} not found".format(FLAGS.method))
with LocalGitStore(store_location=FLAGS.store_location,
key=FLAGS.key,
git_root = FLAGS.git_root,
suppress_error=FLAGS.suppress_error,
status_file=FLAGS.status_file) as git_store:
if FLAGS.method == "get":
git_store.get(FLAGS.dest)
elif FLAGS.method == "put":
git_store.put_if_not_exists(FLAGS.src)
if __name__ == '__main__':
main(FLAGS(sys.argv))
|
StarcoderdataPython
|
4838593
|
<filename>Lib/symbol.py<gh_stars>1-10
#! /usr/bin/env python
#
# Non-terminal symbols of Python grammar (from "graminit.h")
#
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/symbol.py
#--start constants--
single_input = 256
file_input = 257
eval_input = 258
funcdef = 259
parameters = 260
varargslist = 261
fpdef = 262
fplist = 263
stmt = 264
simple_stmt = 265
small_stmt = 266
expr_stmt = 267
print_stmt = 268
del_stmt = 269
pass_stmt = 270
flow_stmt = 271
break_stmt = 272
continue_stmt = 273
return_stmt = 274
raise_stmt = 275
import_stmt = 276
dotted_name = 277
global_stmt = 278
exec_stmt = 279
assert_stmt = 280
compound_stmt = 281
if_stmt = 282
while_stmt = 283
for_stmt = 284
try_stmt = 285
except_clause = 286
suite = 287
test = 288
and_test = 289
not_test = 290
comparison = 291
comp_op = 292
expr = 293
xor_expr = 294
and_expr = 295
shift_expr = 296
arith_expr = 297
term = 298
factor = 299
power = 300
atom = 301
lambdef = 302
trailer = 303
subscriptlist = 304
subscript = 305
sliceop = 306
exprlist = 307
testlist = 308
dictmaker = 309
classdef = 310
arglist = 311
argument = 312
#--end constants--
sym_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
sym_name[_value] = _name
def main():
import sys
import token
if len(sys.argv) == 1:
sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
token.main()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1934126
|
# coding: utf8
import os
import pathlib
import shutil
from os import system
import pytest
from clinicadl import MapsManager
@pytest.fixture(
params=[
"data/stopped_jobs/stopped_1",
"data/stopped_jobs/stopped_2",
"data/stopped_jobs/stopped_3",
"data/stopped_jobs/stopped_4",
]
)
def input_directory(request):
return request.param
def test_resume(input_directory):
flag_error = not system(f"clinicadl -vv train resume {input_directory}")
assert flag_error
maps_manager = MapsManager(input_directory)
split_manager = maps_manager._init_split_manager()
for split in split_manager.split_iterator():
performances_flag = pathlib.Path(
input_directory, f"split-{split}", "best-loss", "train"
).exists()
assert performances_flag
shutil.rmtree(input_directory)
|
StarcoderdataPython
|
5069061
|
from .literals import TEST_RECEIVE_KEY, TEST_SEARCH_FINGERPRINT
def mock_recv_keys(self, keyserver, *keyids):
class ImportResult:
count = 1
fingerprints = [TEST_SEARCH_FINGERPRINT]
self.import_keys(TEST_RECEIVE_KEY)
return ImportResult()
|
StarcoderdataPython
|
3581036
|
<filename>deeppavlov/models/ranking/ranking_network.py
from keras.layers import Input, LSTM, Embedding, GlobalMaxPooling1D, Lambda, subtract, Conv2D, Dense, Activation
from keras.layers.merge import Dot, Subtract, Add, Multiply
from keras.models import Model
from keras.layers.wrappers import Bidirectional
from keras.optimizers import Adam
from keras.initializers import glorot_uniform, Orthogonal
from keras import losses
from keras import backend as K
import tensorflow as tf
import numpy as np
from deeppavlov.core.models.tf_backend import TfModelMeta
from deeppavlov.core.common.log import get_logger
from deeppavlov.core.layers import keras_layers
from pathlib import Path
from deeppavlov.models.ranking.emb_dict import EmbDict
log = get_logger(__name__)
class RankingNetwork(metaclass=TfModelMeta):
"""Class to perform context-response matching with neural networks.
Args:
toks_num: A size of `tok2int` vocabulary to build embedding layer.
chars_num: A size of `char2int` vocabulary to build character-level embedding layer.
learning_rate: Learning rate.
device_num: A number of a device to perform model training on if several devices are available in a system.
seed: Random seed.
shared_weights: Whether to use shared weights in the model to encode contexts and responses.
triplet_mode: Whether to use a model with triplet loss.
If ``False``, a model with crossentropy loss will be used.
margin: A margin parameter for triplet loss. Only required if ``triplet_mode`` is set to ``True``.
distance: Distance metric (similarity measure) to compare context and response representations in the model.
Possible values are ``cos_similarity`` (cosine similarity), ``euqlidian`` (euqlidian distance),
``sigmoid`` (1 minus sigmoid).
token_embeddings: Whether to use token (word) embeddings in the model.
use_matrix: Whether to use trainable matrix with token (word) embeddings.
max_sequence_length: A maximum length of a sequence in tokens.
Longer sequences will be truncated and shorter ones will be padded.
tok_dynamic_batch: Whether to use dynamic batching. If ``True``, a maximum length of a sequence for a batch
will be equal to the maximum of all sequences lengths from this batch,
but not higher than ``max_sequence_length``.
embedding_dim: Dimensionality of token (word) embeddings.
char_embeddings: Whether to use character-level token (word) embeddings in the model.
max_token_length: A maximum length of a token for representing it by a character-level embedding.
char_dynamic_batch: Whether to use dynamic batching for character-level embeddings.
If ``True``, a maximum length of a token for a batch
will be equal to the maximum of all tokens lengths from this batch,
but not higher than ``max_token_length``.
char_emb_dim: Dimensionality of character-level embeddings.
reccurent: A type of the RNN cell. Possible values are ``lstm`` and ``bilstm``.
hidden_dim: Dimensionality of the hidden state of the RNN cell. If ``reccurent`` equals ``bilstm``
to get the actual dimensionality ``hidden_dim`` should be doubled.
max_pooling: Whether to use max-pooling operation to get context (response) vector representation.
If ``False``, the last hidden state of the RNN will be used.
"""
def __init__(self,
toks_num: int,
chars_num: int,
emb_dict: EmbDict,
max_sequence_length: int,
max_token_length: int = None,
learning_rate: float = 1e-3,
device_num: int = 0,
seed: int = None,
shared_weights: bool = True,
triplet_mode: bool = True,
margin: float = 0.1,
distance: str = "cos_similarity",
token_embeddings: bool = True,
use_matrix: bool = False,
tok_dynamic_batch: bool = False,
embedding_dim: int = 300,
char_embeddings: bool = False,
char_dynamic_batch: bool = False,
char_emb_dim: int = 32,
highway_on_top: bool = False,
reccurent: str = "bilstm",
hidden_dim: int = 300,
max_pooling: bool = True):
self.distance = distance
self.toks_num = toks_num
self.emb_dict = emb_dict
self.use_matrix = use_matrix
self.seed = seed
self.hidden_dim = hidden_dim
self.learning_rate = learning_rate
self.margin = margin
self.embedding_dim = embedding_dim
self.device_num = device_num
self.shared_weights = shared_weights
self.pooling = max_pooling
self.recurrent = reccurent
self.token_embeddings = token_embeddings
self.char_embeddings = char_embeddings
self.chars_num = chars_num
self.char_emb_dim = char_emb_dim
self.highway_on_top = highway_on_top
self.triplet_mode = triplet_mode
if tok_dynamic_batch:
self.max_sequence_length = None
else:
self.max_sequence_length = max_sequence_length
if char_dynamic_batch:
self.max_token_length = None
else:
self.max_token_length = max_token_length
self.sess = self._config_session()
K.set_session(self.sess)
self.optimizer = Adam(lr=self.learning_rate)
self.duplet = self.duplet()
if self.triplet_mode:
self.loss = self.triplet_loss
self.obj_model = self.triplet_model()
else:
self.loss = losses.binary_crossentropy
self.obj_model = self.duplet_model()
self.obj_model.compile(loss=self.loss, optimizer=self.optimizer)
self.score_model = self.duplet
self.context_embedding = Model(inputs=self.duplet.inputs,
outputs=self.duplet.get_layer(name="pooling").get_output_at(0))
self.response_embedding = Model(inputs=self.duplet.inputs,
outputs=self.duplet.get_layer(name="pooling").get_output_at(1))
# self.score_model = Model(inputs=[self.obj_model.inputs[0], self.obj_model.inputs[1]],
# outputs=self.obj_model.get_layer(name="score_model").get_output_at(0))
# self.context_embedding = Model(inputs=[self.obj_model.inputs[0], self.obj_model.inputs[1]],
# outputs=self.obj_model.get_layer(name="pooling").get_output_at(0))
# self.response_embedding = Model(inputs=[self.obj_model.inputs[2], self.obj_model.inputs[3]],
# outputs=self.obj_model.get_layer(name="pooling").get_output_at(1))
def _config_session(self):
"""
Configure session for particular device
Returns:
tensorflow.Session
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(self.device_num)
return tf.Session(config=config)
def load(self, path):
log.info("[initializing `{}` from saved]".format(self.__class__.__name__))
self.obj_model.load_weights(path)
def save(self, path):
log.info("[saving `{}`]".format(self.__class__.__name__))
self.obj_model.save_weights(path)
self.context_embedding.save(str(Path(path).parent / 'sen_emb_model.h5'))
def init_from_scratch(self, emb_matrix):
log.info("[initializing new `{}`]".format(self.__class__.__name__))
if self.token_embeddings and not self.char_embeddings:
if self.use_matrix:
if self.shared_weights:
self.duplet.get_layer(name="embedding").set_weights([emb_matrix])
if self.shared_weights:
self.duplet.get_layer(name="embedding_a").set_weights([emb_matrix])
self.duplet.get_layer(name="embedding_b").set_weights([emb_matrix])
def embedding_layer(self):
if self.shared_weights:
out_a = Embedding(self.toks_num,
self.embedding_dim,
input_length=self.max_sequence_length,
trainable=True, name="embedding")
return out_a, out_a
else:
out_a = Embedding(self.toks_num,
self.embedding_dim,
input_length=self.max_sequence_length,
trainable=True, name="embedding_a")
out_b = Embedding(self.toks_num,
self.embedding_dim,
input_length=self.max_sequence_length,
trainable=True, name="embedding_b")
return out_a, out_b
def lstm_layer(self):
"""Create a LSTM layer of a model."""
if self.pooling:
ret_seq = True
else:
ret_seq = False
ker_in = glorot_uniform(seed=self.seed)
rec_in = Orthogonal(seed=self.seed)
if self.shared_weights:
if self.recurrent == "bilstm" or self.recurrent is None:
out_a = Bidirectional(LSTM(self.hidden_dim,
input_shape=(self.max_sequence_length, self.embedding_dim,),
kernel_initializer=ker_in,
recurrent_initializer=rec_in,
return_sequences=ret_seq), merge_mode='concat')
elif self.recurrent == "lstm":
out_a = LSTM(self.hidden_dim,
input_shape=(self.max_sequence_length, self.embedding_dim,),
kernel_initializer=ker_in,
recurrent_initializer=rec_in,
return_sequences=ret_seq)
return out_a, out_a
else:
if self.recurrent == "bilstm" or self.recurrent is None:
out_a = Bidirectional(LSTM(self.hidden_dim,
input_shape=(self.max_sequence_length, self.embedding_dim,),
kernel_initializer=ker_in,
recurrent_initializer=rec_in,
return_sequences=ret_seq), merge_mode='concat')
out_b = Bidirectional(LSTM(self.hidden_dim,
input_shape=(self.max_sequence_length, self.embedding_dim,),
kernel_initializer=ker_in,
recurrent_initializer=rec_in,
return_sequences=ret_seq), merge_mode='concat')
elif self.recurrent == "lstm":
out_a = LSTM(self.hidden_dim,
input_shape=(self.max_sequence_length, self.embedding_dim,),
kernel_initializer=ker_in,
recurrent_initializer=rec_in,
return_sequences=ret_seq)
out_b = LSTM(self.hidden_dim,
input_shape=(self.max_sequence_length, self.embedding_dim,),
kernel_initializer=ker_in,
recurrent_initializer=rec_in,
return_sequences=ret_seq)
return out_a, out_b
def triplet_loss(self, y_true, y_pred):
"""Triplet loss function"""
return K.mean(K.maximum(self.margin - y_pred, 0.), axis=-1)
def duplet(self):
if self.token_embeddings and not self.char_embeddings:
if self.use_matrix:
context = Input(shape=(self.max_sequence_length,))
response = Input(shape=(self.max_sequence_length,))
emb_layer_a, emb_layer_b = self.embedding_layer()
emb_c = emb_layer_a(context)
emb_r = emb_layer_b(response)
else:
context = Input(shape=(self.max_sequence_length, self.embedding_dim,))
response = Input(shape=(self.max_sequence_length, self.embedding_dim,))
emb_c = context
emb_r = response
elif not self.token_embeddings and self.char_embeddings:
context = Input(shape=(self.max_sequence_length, self.max_token_length,))
response = Input(shape=(self.max_sequence_length, self.max_token_length,))
char_cnn_layer = keras_layers.char_emb_cnn_func(n_characters=self.chars_num,
char_embedding_dim=self.char_emb_dim)
emb_c = char_cnn_layer(context)
emb_r = char_cnn_layer(response)
elif self.token_embeddings and self.char_embeddings:
context = Input(shape=(self.max_sequence_length, self.max_token_length,))
response = Input(shape=(self.max_sequence_length, self.max_token_length,))
if self.use_matrix:
c_tok = Lambda(lambda x: x[:,:,0])(context)
r_tok = Lambda(lambda x: x[:,:,0])(response)
emb_layer_a, emb_layer_b = self.embedding_layer()
emb_c = emb_layer_a(c_tok)
emb_rp = emb_layer_b(r_tok)
c_char = Lambda(lambda x: x[:,:,1:])(context)
r_char = Lambda(lambda x: x[:,:,1:])(response)
else:
c_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(context)
r_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(response)
emb_c = c_tok
emb_rp = r_tok
c_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(context)
r_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(response)
char_cnn_layer = keras_layers.char_emb_cnn_func(n_characters=self.chars_num,
char_embedding_dim=self.char_emb_dim)
emb_c_char = char_cnn_layer(c_char)
emb_r_char = char_cnn_layer(r_char)
emb_c = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_c, emb_c_char])
emb_r = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_rp, emb_r_char])
lstm_layer_a, lstm_layer_b = self.lstm_layer()
lstm_c = lstm_layer_a(emb_c)
lstm_r = lstm_layer_b(emb_r)
if self.pooling:
pooling_layer = GlobalMaxPooling1D(name="pooling")
lstm_c = pooling_layer(lstm_c)
lstm_r = pooling_layer(lstm_r)
if self.distance == "cos_similarity":
cosine_layer = Dot(normalize=True, axes=-1, name="score_model")
score = cosine_layer([lstm_c, lstm_r])
score = Lambda(lambda x: 1. - x)(score)
elif self.distance == "euclidian":
dist_score = Lambda(lambda x: K.expand_dims(self.euclidian_dist(x)), name="score_model")
score = dist_score([lstm_c, lstm_r])
elif self.distance == "sigmoid":
dist = Lambda(self.diff_mult_dist)([lstm_c, lstm_r])
score = Dense(1, activation='sigmoid', name="score_model")(dist)
score = Lambda(lambda x: 1. - x)(score)
model = Model([context, response], score)
return model
def duplet_model(self):
duplet = self.duplet
c_shape = K.int_shape(duplet.inputs[0])
r_shape = K.int_shape(duplet.inputs[1])
c = Input(batch_shape=c_shape)
r = Input(batch_shape=r_shape)
score = duplet([c, r])
score = Lambda(lambda x: 1. - x)(score)
model = Model([c, r], score)
return model
def triplet_model(self):
duplet = self.duplet
c_shape = K.int_shape(duplet.inputs[0])
r_shape = K.int_shape(duplet.inputs[1])
c1 = Input(batch_shape=c_shape)
r1 = Input(batch_shape=r_shape)
c2 = Input(batch_shape=c_shape)
r2 = Input(batch_shape=r_shape)
score1 = duplet([c1, r1])
score2 = duplet([c2, r2])
score_diff = Subtract()([score2, score1])
model = Model([c1, r1, c2, r2], score_diff)
return model
def diff_mult_dist(self, inputs):
input1, input2 = inputs
a = K.abs(input1-input2)
b = Multiply()(inputs)
return K.concatenate([input1, input2, a, b])
def euclidian_dist(self, x_pair):
x1_norm = K.l2_normalize(x_pair[0], axis=1)
x2_norm = K.l2_normalize(x_pair[1], axis=1)
diff = x1_norm - x2_norm
square = K.square(diff)
sum = K.sum(square, axis=1)
sum = K.clip(sum, min_value=1e-12, max_value=None)
dist = K.sqrt(sum)
return dist
def train_on_batch(self, batch, y):
batch = [x for el in batch for x in el]
if self.token_embeddings and not self.char_embeddings:
if self.use_matrix:
self.obj_model.train_on_batch(x=[np.asarray(x) for x in batch], y=np.asarray(y))
else:
b = batch
for i in range(len(b)):
b[i] = self.emb_dict.get_embs(b[i])
self.obj_model.train_on_batch(x=b, y=np.asarray(y))
elif not self.token_embeddings and self.char_embeddings:
self.obj_model.train_on_batch(x=[np.asarray(x) for x in batch], y=np.asarray(y))
elif self.token_embeddings and self.char_embeddings:
if self.use_matrix:
self.obj_model.train_on_batch(x=[np.asarray(x) for x in batch], y=np.asarray(y))
else:
b = [x[0] for x in batch]
for i in range(len(b)):
b[i] = self.emb_dict.get_embs(b[i])
self.obj_model.train_on_batch(x=b, y=np.asarray(y))
def predict_score_on_batch(self, batch):
if self.token_embeddings and not self.char_embeddings:
if self.use_matrix:
return self.score_model.predict_on_batch(x=batch)
else:
b = batch
for i in range(len(b)):
b[i] = self.emb_dict.get_embs(b[i])
return self.score_model.predict_on_batch(x=b)
elif not self.token_embeddings and self.char_embeddings:
return self.score_model.predict_on_batch(x=batch)
elif self.token_embeddings and self.char_embeddings:
if self.use_matrix:
return self.score_model.predict_on_batch(x=batch)
else:
b = [batch[i][:,:,0] for i in range(len(batch))]
b = [np.concatenate([b[i], batch[i][:,:,1:]], axis=2) for i in range(len(batch))]
return self.score_model.predict_on_batch(x=b)
def predict_embedding_on_batch(self, batch, type='context'):
if type == 'context':
embedding = self.context_embedding
elif type == 'response':
embedding = self.response_embedding
if self.token_embeddings and not self.char_embeddings:
if self.use_matrix:
return embedding.predict_on_batch(x=batch)
else:
b = batch
b = [self.emb_dict.get_embs(el) for el in b]
return embedding.predict_on_batch(x=b)
elif not self.token_embeddings and self.char_embeddings:
return embedding.predict_on_batch(x=batch)
elif self.token_embeddings and self.char_embeddings:
if self.use_matrix:
return embedding.predict_on_batch(x=batch)
else:
b = [self.emb_dict.get_embs(batch[i][:,:,0]) for i in range(len(batch))]
b = [np.concatenate([b[i], batch[i][:,:,1:]], axis=2) for i in range(len(batch))]
return embedding.predict_on_batch(x=b)
def predict_embedding(self, batch, bs, type='context'):
num_batches = len(batch[0]) // bs
embs = []
for i in range(num_batches):
b = [batch[j][i * bs:(i + 1) * bs] for j in range(len(batch))]
embs.append(self.predict_embedding_on_batch(b))
if len(batch[0]) % bs != 0:
b = [batch[j][num_batches * bs:] for j in range(len(batch))]
embs.append(self.predict_embedding_on_batch(b, type=type))
embs = np.vstack(embs)
return embs
# def triplet_model(self):
# if self.embedding_level is None or self.embedding_level == 'token':
# if self.use_matrix:
# context1 = Input(shape=(self.max_sequence_length,))
# response_positive = Input(shape=(self.max_sequence_length,))
# context2 = Input(shape=(self.max_sequence_length,))
# response_negative = Input(shape=(self.max_sequence_length,))
# emb_layer_a, emb_layer_b = self.embedding_layer()
# emb_c1 = emb_layer_a(context1)
# emb_c2 = emb_layer_a(context2)
# emb_rp = emb_layer_b(response_positive)
# emb_rn = emb_layer_b(response_negative)
# else:
# context1 = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# response_positive = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# context2 = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# response_negative = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# emb_c1 = context1
# emb_c2 = context2
# emb_rp = response_positive
# emb_rn = response_negative
# elif self.embedding_level == 'char':
# context1 = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_positive = Input(shape=(self.max_sequence_length, self.max_token_length,))
# context2 = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_negative = Input(shape=(self.max_sequence_length, self.max_token_length,))
#
# char_cnn_layer = keras_layers.char_emb_cnn_func(n_characters=self.chars_num,
# char_embedding_dim=self.char_emb_dim)
# emb_c1 = char_cnn_layer(context1)
# emb_c2 = char_cnn_layer(context2)
# emb_rp = char_cnn_layer(response_positive)
# emb_rn = char_cnn_layer(response_negative)
#
# elif self.embedding_level == 'token_and_char':
# context1 = Input(shape=(self.max_sequence_length, self.max_token_length,))
# context2 = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_positive = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_negative = Input(shape=(self.max_sequence_length, self.max_token_length,))
#
# if self.use_matrix:
# c_tok1 = Lambda(lambda x: x[:,:,0])(context1)
# c_tok2 = Lambda(lambda x: x[:,:,0])(context2)
# rp_tok = Lambda(lambda x: x[:,:,0])(response_positive)
# rn_tok = Lambda(lambda x: x[:,:,0])(response_negative)
# emb_layer_a, emb_layer_b = self.embedding_layer()
# emb_c1 = emb_layer_a(c_tok1)
# emb_c2 = emb_layer_a(c_tok2)
# emb_rp = emb_layer_b(rp_tok)
# emb_rn = emb_layer_b(rn_tok)
# c_char1 = Lambda(lambda x: x[:,:,1:])(context1)
# c_char2 = Lambda(lambda x: x[:,:,1:])(context2)
# rp_char = Lambda(lambda x: x[:,:,1:])(response_positive)
# rn_char = Lambda(lambda x: x[:,:,1:])(response_negative)
# else:
# c_tok1 = Lambda(lambda x: x[:,:,:self.embedding_dim])(context1)
# c_tok2 = Lambda(lambda x: x[:,:,:self.embedding_dim])(context2)
# rp_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(response_positive)
# rn_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(response_negative)
# emb_c1 = c_tok1
# emb_c2 = c_tok2
# emb_rp = rp_tok
# emb_rn = rn_tok
# c_char1 = Lambda(lambda x: x[:,:,self.embedding_dim:])(context1)
# c_char2 = Lambda(lambda x: x[:,:,self.embedding_dim:])(context2)
# rp_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(response_positive)
# rn_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(response_negative)
#
# char_cnn_layer = keras_layers.char_emb_cnn_func(n_characters=self.chars_num,
# char_embedding_dim=self.char_emb_dim)
#
# emb_c_char1 = char_cnn_layer(c_char1)
# emb_c_char2 = char_cnn_layer(c_char2)
# emb_rp_char = char_cnn_layer(rp_char)
# emb_rn_char = char_cnn_layer(rn_char)
#
# emb_c1 = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_c1, emb_c_char1])
# emb_c2 = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_c2, emb_c_char2])
# emb_rp = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_rp, emb_rp_char])
# emb_rn = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_rn, emb_rn_char])
#
# lstm_layer_a, lstm_layer_b = self.lstm_layer()
# lstm_c1 = lstm_layer_a(emb_c1)
# lstm_c2 = lstm_layer_a(emb_c2)
# lstm_rp = lstm_layer_b(emb_rp)
# lstm_rn = lstm_layer_b(emb_rn)
# if self.pooling:
# pooling_layer = GlobalMaxPooling1D(name="pooling")
# lstm_c1 = pooling_layer(lstm_c1)
# lstm_c2 = pooling_layer(lstm_c2)
# lstm_rp = pooling_layer(lstm_rp)
# lstm_rn = pooling_layer(lstm_rn)
# if self.distance == "euclidian":
# dist_score = Lambda(lambda x: K.expand_dims(self.euclidian_dist(x)), name="score_model")
# dist_pos = dist_score([lstm_c1, lstm_rp])
# dist_neg = dist_score([lstm_c2, lstm_rn])
# elif self.distance == "cos_similarity":
# cosine_layer = Dot(normalize=True, axes=-1, name="score_model")
# dist_pos = cosine_layer([lstm_c1, lstm_rp])
# dist_pos = Lambda(lambda x: 1. - x)(dist_pos)
# dist_neg = cosine_layer([lstm_c2, lstm_rn])
# dist_neg = Lambda(lambda x: 1. - x)(dist_neg)
# score_diff = Subtract()([dist_neg, dist_pos])
# model = Model([context1, response_positive, context2, response_negative], score_diff)
# return model
# def triplet_model(self):
# if self.embedding_level is None or self.embedding_level == 'token':
# if self.use_matrix:
# context1 = Input(shape=(self.max_sequence_length,))
# response_positive = Input(shape=(self.max_sequence_length,))
# context2 = Input(shape=(self.max_sequence_length,))
# response_negative = Input(shape=(self.max_sequence_length,))
# emb_layer_a, emb_layer_b = self.embedding_layer()
# emb_c1 = emb_layer_a(context1)
# emb_c2 = emb_layer_a(context2)
# emb_rp = emb_layer_b(response_positive)
# emb_rn = emb_layer_b(response_negative)
# else:
# context1 = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# response_positive = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# context2 = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# response_negative = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# emb_c1 = context1
# emb_c2 = context2
# emb_rp = response_positive
# emb_rn = response_negative
# elif self.embedding_level == 'char':
# context1 = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_positive = Input(shape=(self.max_sequence_length, self.max_token_length,))
# context2 = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_negative = Input(shape=(self.max_sequence_length, self.max_token_length,))
#
# char_cnn_layer = keras_layers.char_emb_cnn_func(n_characters=self.chars_num,
# char_embedding_dim=self.char_emb_dim)
# emb_c1 = char_cnn_layer(context1)
# emb_c2 = char_cnn_layer(context2)
# emb_rp = char_cnn_layer(response_positive)
# emb_rn = char_cnn_layer(response_negative)
#
# elif self.embedding_level == 'token_and_char':
# context1 = Input(shape=(self.max_sequence_length, self.max_token_length,))
# context2 = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_positive = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_negative = Input(shape=(self.max_sequence_length, self.max_token_length,))
#
# if self.use_matrix:
# c_tok1 = Lambda(lambda x: x[:,:,0])(context1)
# c_tok2 = Lambda(lambda x: x[:,:,0])(context2)
# rp_tok = Lambda(lambda x: x[:,:,0])(response_positive)
# rn_tok = Lambda(lambda x: x[:,:,0])(response_negative)
# emb_layer_a, emb_layer_b = self.embedding_layer()
# emb_c1 = emb_layer_a(c_tok1)
# emb_c2 = emb_layer_a(c_tok2)
# emb_rp = emb_layer_b(rp_tok)
# emb_rn = emb_layer_b(rn_tok)
# c_char1 = Lambda(lambda x: x[:,:,1:])(context1)
# c_char2 = Lambda(lambda x: x[:,:,1:])(context2)
# rp_char = Lambda(lambda x: x[:,:,1:])(response_positive)
# rn_char = Lambda(lambda x: x[:,:,1:])(response_negative)
# else:
# c_tok1 = Lambda(lambda x: x[:,:,:self.embedding_dim])(context1)
# c_tok2 = Lambda(lambda x: x[:,:,:self.embedding_dim])(context2)
# rp_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(response_positive)
# rn_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(response_negative)
# emb_c1 = c_tok1
# emb_c2 = c_tok2
# emb_rp = rp_tok
# emb_rn = rn_tok
# c_char1 = Lambda(lambda x: x[:,:,self.embedding_dim:])(context1)
# c_char2 = Lambda(lambda x: x[:,:,self.embedding_dim:])(context2)
# rp_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(response_positive)
# rn_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(response_negative)
#
# char_cnn_layer = keras_layers.char_emb_cnn_func(n_characters=self.chars_num,
# char_embedding_dim=self.char_emb_dim)
#
# emb_c_char1 = char_cnn_layer(c_char1)
# emb_c_char2 = char_cnn_layer(c_char2)
# emb_rp_char = char_cnn_layer(rp_char)
# emb_rn_char = char_cnn_layer(rn_char)
#
# emb_c1 = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_c1, emb_c_char1])
# emb_c2 = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_c2, emb_c_char2])
# emb_rp = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_rp, emb_rp_char])
# emb_rn = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_rn, emb_rn_char])
#
# lstm_layer_a, lstm_layer_b = self.lstm_layer()
# lstm_c1 = lstm_layer_a(emb_c1)
# lstm_c2 = lstm_layer_a(emb_c2)
# lstm_rp = lstm_layer_b(emb_rp)
# lstm_rn = lstm_layer_b(emb_rn)
# if self.pooling:
# pooling_layer = GlobalMaxPooling1D(name="pooling")
# lstm_c1 = pooling_layer(lstm_c1)
# lstm_c2 = pooling_layer(lstm_c2)
# lstm_rp = pooling_layer(lstm_rp)
# lstm_rn = pooling_layer(lstm_rn)
# if self.distance == "euclidian":
# dist_score = Lambda(lambda x: K.expand_dims(self.euclidian_dist(x)), name="score_model")
# dist_pos = dist_score([lstm_c1, lstm_rp])
# dist_neg = dist_score([lstm_c2, lstm_rn])
# elif self.distance == "cos_similarity":
# cosine_layer = Dot(normalize=True, axes=-1, name="score_model")
# dist_pos = cosine_layer([lstm_c1, lstm_rp])
# dist_pos = Lambda(lambda x: 1. - x)(dist_pos)
# dist_neg = cosine_layer([lstm_c2, lstm_rn])
# dist_neg = Lambda(lambda x: 1. - x)(dist_neg)
# score_diff = Subtract()([dist_neg, dist_pos])
# model = Model([context1, response_positive, context2, response_negative], score_diff)
# return model
# def triplet_hinge_loss_model(self):
# if self.embedding_level is None or self.embedding_level == 'token':
# if self.use_matrix:
# context = Input(shape=(self.max_sequence_length,))
# response_positive = Input(shape=(self.max_sequence_length,))
# response_negative = Input(shape=(self.max_sequence_length,))
# emb_layer_a, emb_layer_b = self.embedding_layer()
# emb_c = emb_layer_a(context)
# emb_rp = emb_layer_b(response_positive)
# emb_rn = emb_layer_b(response_negative)
# else:
# context = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# response_positive = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# response_negative = Input(shape=(self.max_sequence_length, self.embedding_dim,))
# emb_c = context
# emb_rp = response_positive
# emb_rn = response_negative
# elif self.embedding_level == 'char':
# context = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_positive = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_negative = Input(shape=(self.max_sequence_length, self.max_token_length,))
#
# char_cnn_layer = keras_layers.char_emb_cnn_func(n_characters=self.chars_num,
# char_embedding_dim=self.char_emb_dim)
# emb_c = char_cnn_layer(context)
# emb_rp = char_cnn_layer(response_positive)
# emb_rn = char_cnn_layer(response_negative)
#
# elif self.embedding_level == 'token_and_char':
# context = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_positive = Input(shape=(self.max_sequence_length, self.max_token_length,))
# response_negative = Input(shape=(self.max_sequence_length, self.max_token_length,))
#
# if self.use_matrix:
# c_tok = Lambda(lambda x: x[:,:,0])(context)
# rp_tok = Lambda(lambda x: x[:,:,0])(response_positive)
# rn_tok = Lambda(lambda x: x[:,:,0])(response_negative)
# emb_layer_a, emb_layer_b = self.embedding_layer()
# emb_c = emb_layer_a(c_tok)
# emb_rp = emb_layer_b(rp_tok)
# emb_rn = emb_layer_b(rn_tok)
# c_char = Lambda(lambda x: x[:,:,1:])(context)
# rp_char = Lambda(lambda x: x[:,:,1:])(response_positive)
# rn_char = Lambda(lambda x: x[:,:,1:])(response_negative)
# else:
# c_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(context)
# rp_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(response_positive)
# rn_tok = Lambda(lambda x: x[:,:,:self.embedding_dim])(response_negative)
# emb_c = c_tok
# emb_rp = rp_tok
# emb_rn = rn_tok
# c_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(context)
# rp_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(response_positive)
# rn_char = Lambda(lambda x: x[:,:,self.embedding_dim:])(response_negative)
#
# char_cnn_layer = keras_layers.char_emb_cnn_func(n_characters=self.chars_num,
# char_embedding_dim=self.char_emb_dim)
#
# emb_c_char = char_cnn_layer(c_char)
# emb_rp_char = char_cnn_layer(rp_char)
# emb_rn_char = char_cnn_layer(rn_char)
#
# emb_c = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_c, emb_c_char])
# emb_rp = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_rp, emb_rp_char])
# emb_rn = Lambda(lambda x: K.concatenate(x, axis=-1))([emb_rn, emb_rn_char])
#
# lstm_layer_a, lstm_layer_b = self.lstm_layer()
# lstm_c = lstm_layer_a(emb_c)
# lstm_rp = lstm_layer_b(emb_rp)
# lstm_rn = lstm_layer_b(emb_rn)
# if self.pooling:
# pooling_layer = GlobalMaxPooling1D(name="pooling")
# lstm_c = pooling_layer(lstm_c)
# lstm_rp = pooling_layer(lstm_rp)
# lstm_rn = pooling_layer(lstm_rn)
# if self.distance == "euclidian":
# dist_score = Lambda(self.euclidian_dist,
# output_shape=self.euclidian_dist_output_shape,
# name="score_model")
# dist_pos = dist_score([lstm_c, lstm_rp])
# dist_neg = dist_score([lstm_c, lstm_rn])
# score_diff = Subtract()([dist_neg, dist_pos])
# elif self.distance == "cos_similarity":
# cosine_layer = Dot(normalize=True, axes=-1, name="score_model")
# dist_pos = cosine_layer([lstm_c, lstm_rp])
# dist_neg = cosine_layer([lstm_c, lstm_rn])
# score_diff = Subtract()([dist_pos, dist_neg])
# model = Model([context, response_positive, response_negative], score_diff)
# return model
|
StarcoderdataPython
|
1621756
|
import test_interface
from difficulty import Difficulty
from question import Question
class MultiplyTest(test_interface.TestI):
def get_description(self) -> str:
return "Positive multiply"
def get_question(self, difficulty: Difficulty) -> Question:
numbers = self.generate_numbers(difficulty)
question = f"What is {numbers[0]} * {numbers[1]}?"
answer = numbers[0] * numbers[1]
return Question(question, answer)
|
StarcoderdataPython
|
3304133
|
<reponame>jeffvswanson/LeetCode<filename>0019_RemoveNthNodeFromEndOfList/python/test_solution.py
import pytest
import solution
def create_linked_list(raw_list) -> solution.ListNode:
for i, val in enumerate(raw_list):
if i == 0:
node = solution.ListNode(val=val)
head = node
if i + 1 < len(raw_list):
node.next = solution.ListNode(val=raw_list[i+1])
node = node.next
return head
@pytest.mark.parametrize(
"raw_list,n,expected",
[
([1, 2, 3, 4, 5], 2, [1, 2, 3, 5]),
([1], 1, None),
([1, 2], 1, [1]),
]
)
def test_initial_pass(raw_list, n, expected):
head = create_linked_list(raw_list)
got = solution.initial_pass(head, n)
if expected:
expected = create_linked_list(expected)
if got and expected:
while got.val and expected.val:
assert got.val == expected.val
got = got.next
expected = expected.next
if got is None:
assert expected is None
break
else:
assert got is None
|
StarcoderdataPython
|
5010746
|
__version__ = 'v3.0.1-dev'
|
StarcoderdataPython
|
6697670
|
import rosbag
import rospy
from tqdm import tqdm
import sys
import numpy as np
import matplotlib.pyplot as plt
from math import atan2, sin, cos
from relative_nav.msg import NodeInfo
inbag = rosbag.Bag('/home/superjax/rosbag/small_loop.bag', mode='r')
outbag = rosbag.Bag('/home/superjax/rosbag/small_loop.new.bag', mode='w')
for topic, msg, t in tqdm(inbag.read_messages(), total=inbag.get_message_count()):
if topic == '/keyframe':
node_msg = NodeInfo()
node_msg.header = msg.header
node_msg.node_id = node_msg.keyframe_id = msg.keyframe_id
node_msg.node_to_body.w = 1.0
node_msg.node_to_body.x = 0.0
node_msg.node_to_body.y = 0.0
node_msg.node_to_body.z = 0.0
node_msg.camera_to_body.rotation = node_msg.node_to_body
node_msg.camera_to_body.translation.x = 0.0
node_msg.camera_to_body.translation.y = 0.0
node_msg.camera_to_body.translation.z = 0.0
outbag.write('/node', node_msg, t)
outbag.write(topic, msg, t)
outbag.close()
outbag.reindex()
|
StarcoderdataPython
|
5187655
|
import pandas as pd
"""
This script concatenates a summary of the parameters/results for the cryptic phenotype models fit within each dataset. The table is used downstream by CollectResults_FilterFinalDiseases.py
"""
UCSFTable=pd.read_pickle('../UCSF/SummaryTable-7/UCSFModelingResults.pth')
UKBBTable=pd.read_pickle('../UKBB/FinalModels-4/ConvergenceResultsTable.pth')
converged_in_both=UKBBTable[UKBBTable['Converged']==True].index.intersection(UCSFTable.index[UCSFTable['Inference Converged']==True])
combined_table=UCSFTable.loc[converged_in_both][['Annotated HPO Terms', 'Annotated HPO Terms UKBB','Covariate Set']]
combined_table['UCSF Max. Model Rank']=UCSFTable.loc[converged_in_both]['Max. Model Rank']
combined_table['UKBB Max. Model Rank']=UKBBTable.loc[converged_in_both]['Rank']
combined_table['UCSF Inference Parameters']=UCSFTable.loc[converged_in_both]['Inference Parameters']
combined_table['UKBB Inference Parameters']=UKBBTable.loc[converged_in_both]['Inference Parameters']
combined_table.to_pickle('ModelInferenceCombinedResults.pth')
with open('ConvergedInBoth.txt','w') as f:
f.write('\n'.join(list(combined_table.index))+'\n')
|
StarcoderdataPython
|
1987714
|
<gh_stars>0
class Meerk40tError(Exception):
"""
This root Meerk40t exception is provided in case we ever want to provide common functionality
across all Meerk40t exceptions.
"""
class BadFileError(Meerk40tError):
"""Abort loading a malformed file"""
|
StarcoderdataPython
|
4982258
|
# Example of sampling from a normal probability density function
import scipy.stats
from pylab import *; ion()
import probayes as pb
norm_range = {-2., 2.}
set_size = {-10000} # size negation denotes random sampling
x = pb.RV("x", norm_range, prob=scipy.stats.norm, loc=0, scale=1)
rx = x.evaluate(set_size)
hist(rx['x'], 100)
|
StarcoderdataPython
|
1600176
|
# String for add/edit.component.html
user_add_edit_string = """ <div class="form-group">
<input type="{type}" class="form-control input-underline input-lg" id="{field}"
required formControlName = "{field}" placeholder=" {field}">
<div *ngIf="{field}.invalid && ({field}.dirty || {field}.touched)"
class="alert alert-danger">
<div *ngIf="{field}.errors.required">
{field} is required.
</div>
</div>
</div> """
boolean_form_string = """
<fieldset class="form-group">
<label>{Field}</label>
<label class="radio-inline">
<input type="radio" id="{field}" value="true" formControlName = "{field}" checked> yes</label>
<label class="radio-inline">
<input type="radio" id="{field}" value="true" formControlName = "{field}" checked> no</label>
</fieldset>
"""
#Add to module-add.component.ts
#Common for edit and add
FormControl_string = """
{field}: new FormControl('', [ Validators.required,]),
"""
getter_string = """
get {field}() {{ return this.{Resource}AddForm.get('{field}'); }}
"""
attribute_string = """
"{field}" : this.{Resource}AddForm.value.{field},
"""
#Add to module-edit.component.ts
edit_FormControl_value_string = """
"{field}" : res.data.attributes.{field},
"""
edit_attribute_string = """
"{field}" : this.{Resource}EditForm.value.{field},
"""
edit_getter_string = """
get {field}() {{ return this.{Resource}EditForm.get('{field}'); }}
"""
########### module.component.html Fields ##########
table_header_field = """
<th>{Field}</th>"""
table_row_field = """
<td>{{{{ {resource}.attributes.{field} }}}}</td>"""
table_date_row_field = """
<td>{{{{ {resource}.attributes.{field} | date:'long' }}}}</td>"""
|
StarcoderdataPython
|
3471703
|
# Attempt to import proto file
import a.b.demo_pb2
|
StarcoderdataPython
|
6476268
|
# 保存
import requests
import json
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',
'Content-Type': 'application/json; charset=UTF-8'
}
data = {
"projectDeclare": {
"id": "f4001645-c27e-4e3b-92ee-1574191aa69e",
"xmjhztz": "35.3",
"xmdwzje": "22",
"xmwczje": "22",
"xmgqzje": "22",
"cjqshte": "22",
"xmzqzje": "34",
"lwyyqkje": "35",
"lwyyqknhll": "",
"lwyyqkqx": "35",
"yhckmfxdje": "",
"zddbjkje": "",
"yhckmfxdqx": "",
"yhckmfxdnhll": "",
"lwyyqk": "无息贷款",
"chinaGqzb": "22",
"projectGqzb": "34",
"otherGqzb": "34",
"chinaZqzb": "34",
"projectZqzb": "45",
"otherZqzb": "45",
"lwyyqkValue": [
"无息贷款"
],
"zddbjkqk": 2,
"yhckmfxd": 2
},
"projectGqgcs": [
{
"type": 1,
"gdmc": "",
"name": "股东名称"
},
{
"type": 2,
"gdmc": "",
"name": "股东名称"
},
{
"type": 3,
"gdmc": "",
"name": "股东名称"
}
],
"projectZqgcs": [
{
"type": 1,
"gdmc": "",
"name": "股东名称"
},
{
"type": 2,
"gdmc": "",
"name": "股东名称"
},
{
"type": 3,
"gdmc": "",
"name": "股东名称"
}
]
}
cookies = {}
with open("cookies.txt", 'r') as file:
for line in file.read().split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
requests = requests.post('http://localhost:8081/ydyl/submit/saveProjectTwo', data=json.dumps(data),
headers=headers,
cookies=cookies);
print(requests.text);
|
StarcoderdataPython
|
4935636
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Version: 0.2
# 该模块主要做vad切割,核心函数是segmentVoiceByZero.
import librosa
import numpy as np
import matplotlib.pyplot as plt
import time
import sys
import os
matrix_size = 500
def smooth_filter(data):
'''
filter audio data by smooth
Parameters
----------
data: numpy array of float32
audio PCM data
Returns
----------
smooth_data: numpy array of float32
audio PCM data
'''
return np.append((data[:-1] + data[1:]) / 2, data[-1])
def audioFrameZeroCrossingRate_m(segment, threshold=0.01):
x = segment
x[x > threshold] = 1
x[x < - threshold] = -1
x = x.astype(np.int)
y = x[:-1] * x[1:]
z = y[y == -1]
return - np.sum(z)/ len(segment)
def audioFrameZeroCrossingRate_matrix(data, windowSize=320, length=matrix_size, threshold=0.01):
'''
将每帧数据叠加成矩阵,用numpy矩阵计算每帧的过零率
Parameters
----------
data: 加过窗的每帧数据,nFrame*windowSize
length:默认100, 即每次计算100*320的矩阵,一秒钟大小的数据
Returns
----------
rms_lst:每帧的过零率
'''
n = len(data) // length
zs_lst = []
for i in range(n):
data_matrix = data[i*length:(i+1)*length]
data_matrix[data_matrix > threshold] = 1
data_matrix[data_matrix < - threshold] = -1
data_matrix = data_matrix.astype(np.int)
data_matrix = data_matrix[:, :-1]*data_matrix[:, 1:]
data_matrix[data_matrix > -1] = 0
data_matrix = -np.sum(data_matrix, axis=1) / windowSize
zs_lst.append(data_matrix)
zs_lst = np.array(zs_lst).flatten()
if len(data) - n*length > 0:
data_matrix = data[n*length:]
data_matrix[data_matrix > threshold] = 1
data_matrix[data_matrix < - threshold] = -1
data_matrix = data_matrix.astype(np.int)
data_matrix = data_matrix[:, :-1]*data_matrix[:, 1:]
data_matrix[data_matrix > -1] = 0
data_matrix = -np.sum(data_matrix, axis=1) / windowSize
zs_lst = np.append(zs_lst, data_matrix)
zs_lst = smooth_filter(zs_lst)
return zs_lst
def audioFrameRMS_m(segment):
segment = segment*segment
return np.sqrt(np.sum(segment)/len(segment))
def audioFrameRMS_matrix(data, windowSize=320, length=matrix_size):
'''
将每帧数据叠加成矩阵,用numpy矩阵计算每帧的能量特征
Parameters
----------
data: 加过窗的每帧数据,nFrame*windowSize
length:默认100, 即每次计算100*320的矩阵,一秒钟大小的数据
Returns
----------
rms_lst:每帧的能量值
'''
n = len(data) // length
rms_lst = []
for i in range(n):
data_matrix = data[i*length:(i+1)*length]
data_matrix = data_matrix*data_matrix
data_sum = np.sum(data_matrix, axis=1) / windowSize
data_sqart = np.sqrt(data_sum)
rms_lst.append(data_sqart)
rms_lst = np.array(rms_lst).flatten()
if len(data) - n*length > 0:
data_matrix = data[n*length:]
data_matrix = data_matrix*data_matrix
data_sum = np.sum(data_matrix, axis=1) / windowSize
data_sqart = np.sqrt(data_sum)
rms_lst = np.append(rms_lst, data_sqart)
rms_lst = smooth_filter(rms_lst)
return rms_lst
def selectWindow(windowSize):
''' window selection '''
# Window 3 - Hann Window
windowHann = 0.5 * (1 - np.cos(2 * np.pi / (windowSize - 1) * np.arange(windowSize)))
windowName = 'Hanning'
return windowHann, windowName
def data_to_frame(data, windowHann, hopSize=160, windowSize=320):
data_matrix = []
nFrame = data.size // hopSize - 1 # 舍弃最后不满一帧的数据
for i in range(nFrame):
frameStart = i * hopSize
frame_x = data[frameStart:(frameStart + windowSize)]
# Multiply frame data with window
frame_x = frame_x * windowHann
data_matrix.append(frame_x)
return np.array(data_matrix)
def plot_piece(data, runningFrameFeature1, runningFrameFeature2, segmentPosition,
start=0, label=None, hopSize=160, samplingRate=16000):
'''
将切割出的片段画出来,分别在原始数据,能量,过零率上显示。.
Parameters
----------
data: numpy array of float32
audio PCM data
runningFrameFeature1: 能量
runningFrameFeature2: 过零率
segStart:每个片段的开头位置
segEnd:每个片段的结尾位置
nFrame:总帧数
Returns
----------
'''
nFrame = data.size // hopSize - 1 # 舍弃最后不满一帧的数据
# Plot audio waveform
segStart = np.array(segmentPosition[:,0]) / samplingRate
segEnd = np.array(segmentPosition[:,1]) / samplingRate
fig1 = plt.figure(1, figsize=(18, 9))
time = np.arange(0, data.size) * 1.0 / samplingRate
time_x = np.arange(nFrame) * hopSize * 1.0 / samplingRate
numWord = len(segStart)
plt.subplot(311)
plt.plot(time, data, 'b', label='Waveform')
plt.legend()
label_max = np.max(data)
for i in range(numWord):
plt.plot([segStart[i], segStart[i]], [-label_max, label_max], 'r')
for i in range(numWord):
plt.plot([segEnd[i], segEnd[i]], [-label_max*0.5, label_max*0.5], 'r')
# Plot energy in RMS
plt.subplot(312)
plt.plot(time_x, runningFrameFeature1, 'g', label='RMS')
plt.legend()
label_max = np.max(runningFrameFeature1)
for i in range(numWord):
plt.plot([segStart[i], segStart[i]], [0, label_max], 'r')
for i in range(numWord):
plt.plot([segEnd[i], segEnd[i]], [0, label_max*0.5], 'r')
energy_mean = np.mean(runningFrameFeature1) * 0.6
plt.plot([0, data.size / samplingRate], [energy_mean, energy_mean], "y")
# Plot Zero-crossing rate
plt.subplot(313)
plt.plot(time_x, runningFrameFeature2, 'y', label='Zero-Crossing')
plt.xlabel('Time (sec)')
plt.legend()
label_max = np.max(runningFrameFeature2)
# plt.plot([0, nFrame*hopLength/1000], [ambientZeroCrossingRateLevel, ambientZeroCrossingRateLevel], 'r')
for i in range(numWord):
plt.plot([segStart[i], segStart[i]], [0, label_max], 'r')
for i in range(numWord):
plt.plot([segEnd[i], segEnd[i]], [0, label_max * 0.5], 'r')
energy_mean = np.mean(runningFrameFeature2) * 2
plt.plot([0, data.size / samplingRate], [energy_mean, energy_mean], "b")
plt.plot([0, data.size / samplingRate], [energy_mean * 0.1, energy_mean * 0.1], "g")
# label
if label is not None:
n = len(label)
for i in range(n):
if label[i].start - start / samplingRate > segEnd[numWord - 1]:
break
if label[i].start >= start / samplingRate:
plt.plot([label[i].start - start / samplingRate, label[i].start - start / samplingRate], [0, 0.15], 'g')
for i in range(n):
if label[i].start - start / samplingRate > segEnd[numWord - 1]:
break
if label[i].start >= start / samplingRate:
plt.plot([label[i].end - start / samplingRate, label[i].end - start / samplingRate], [0, 0.1], 'g')
# fig1.canvas.manager.window.move(-1900, 850) # 调整窗口在屏幕上弹出的位置
plt.show()
# plt.close(fig1)
def running_feature(data, nFrame, window, hopSize=160, windowSize=320):
'''
计算每帧的特征。
Parameters
----------
data: numpy array of float32
audio PCM data
window:汉明窗
segEnd:每个片段的结尾位置
nFrame:总帧数
Returns
----------
runningFrameFeature2: 过零率
max_zs_peak_mean:过零率波峰的平均值
'''
# mean value of data
mean_value = np.mean(np.abs(data))
runningFrameFeature1 = [] # Short-time RMS
runningFrameFeature2 = [] # Short-time zero-crossing rate
for i in range(nFrame):
frameStart = i * hopSize
frame_x = data[frameStart:(frameStart + windowSize)]
# Multiply frame data with window
frame_x = frame_x * window
# Compute frame feature 1 energy
frameRMS = audioFrameRMS_m(frame_x)
runningFrameFeature1.append(frameRMS)
# runningFrameFeature1[i] = (runningFrameFeature1[i - 1] + runningFrameFeature1[i]) / 2
# Compute frame feature 2 zero-crossing rate
frameZeroCrossingRate = audioFrameZeroCrossingRate_m(frame_x, mean_value)
runningFrameFeature2.append(frameZeroCrossingRate)
# if i > 2:
# runningFrameFeature1[i] = (runningFrameFeature1[i - 2] + runningFrameFeature1[i - 1] + runningFrameFeature1[
# i]) / 3
# 平滑特征值
runningFrameFeature1 = smooth_filter(np.array(runningFrameFeature1))
runningFrameFeature2 = smooth_filter(np.array(runningFrameFeature2))
return runningFrameFeature1, runningFrameFeature2
def double_gate_zs(runningFrameFeature1, runningFrameFeature2, Zs, ZL, ML, thresholdLength, min_frame=0.05,
top_limit=0.8, hopSize=160, sr=16000):
'''
使用双门限法对特征进行切割。
Parameters
----------
runningFrameFeature2: 每帧过零率
runningFrameFeature1: 每帧能量值
Zs:过零率上限
ZL:过零率下限
ML:能量阈值
thresholdLength:片段长度限制
min_frame:过零率扩展后,舍弃小于0.05秒的片段
top_limit:限制开头拓展的位置,不超过最大长度的0.8倍
Returns
----------
segStart: 每个片段的开头
segEnd: 每个片段的结尾
'''
min_frame = min_frame * (sr / hopSize) # 每秒有100帧
segStart = []
segEnd = []
# Step 1: Zs leveling
isLookForStart = True
for i in range(1, len(runningFrameFeature2)):
if isLookForStart:
if (runningFrameFeature2[i] >= Zs) & (runningFrameFeature2[i - 1] < Zs):
# 片段融合,下一个开头到上一个开头小于限制长度的一半,则舍弃上一个结尾,重新找
if len(segStart) > 0 and i - segStart[-1] < thresholdLength * 0.5:
segEnd = segEnd[:-1]
isLookForStart = False
else:
segStart.append(i)
isLookForStart = False
else:
if i - segStart[-1] <= thresholdLength:
if (runningFrameFeature2[i] < Zs):
segEnd.append(i)
isLookForStart = True
elif i - segStart[-1] == thresholdLength:
segEnd.append(i)
isLookForStart = True
if isLookForStart == False:
segEnd.append(i)
# Step 2: ZL leveling
# Adjust end boundary
numWord = len(segStart)
for i in range(numWord):
index = segEnd[i]
if i == (numWord - 1):
search = len(runningFrameFeature2)
else:
search = segStart[i + 1]
while index < search:
if (runningFrameFeature2[int(index)] < ZL):
segEnd[i] = index
break
elif index - segStart[i] == thresholdLength:
segEnd[i] = index
break
else:
index += 1
# Adjust start boundary
for i in range(numWord):
index = segStart[i]
if i == 0:
search = 0
else:
search = segEnd[i - 1]
while index > search:
if (runningFrameFeature2[int(index)] < ZL):
segStart[i] = index
break
elif segEnd[i] - index >= thresholdLength*top_limit:
segStart[i] = index
break
else:
index -= 1
# 舍弃只有几帧的小片段
segStart = np.array(segStart)
segEnd = np.array(segEnd)
segLengthMask = (segEnd - segStart) > min_frame
segStart = segStart[segLengthMask]
segEnd = segEnd[segLengthMask]
# Step 3: ML leveling
# Adjust end boundary
numWord = len(segStart)
for i in range(numWord):
index = segEnd[i]
if i == (numWord - 1):
search = len(runningFrameFeature2)
else:
search = segStart[i+1]
while index < search:
if runningFrameFeature1[int(index)] < ML:
segEnd[i] = index
break
elif index - segStart[i] == thresholdLength:
segEnd[i] = index
break
else:
index += 1
# Adjust start boundary
for i in range(numWord):
index = segStart[i]
if i == 0:
search = 0
else:
search = segEnd[i - 1]
while index > search:
if (runningFrameFeature1[int(index)] < ML):
segStart[i] = index
break
elif segEnd[i] - index >= thresholdLength*top_limit:
segStart[i] = index
break
else:
index -= 1
return segStart, segEnd
def piece_choice(segStart, segEnd, thresholdLength_min, hopSize=160):
'''
对片段进行筛选,保证在一定长度内。
Parameters
----------
segStart:每个片段的开头位置
segEnd:每个片段的结尾位置
thresholdLength_min;片段长度限制
Returns
----------
segmentPosition: 每个片段的原始位置
'''
segStart = np.array(segStart)
segEnd = np.array(segEnd)
assert segStart.shape[0] == segEnd.shape[0]
segLengthMask = (segEnd - segStart) > thresholdLength_min
segStartMerge = segStart[segLengthMask]
segEndMerge = segEnd[segLengthMask]
numWord = len(segStartMerge)
segmentPosition = np.vstack([segStartMerge, segEndMerge]).T * hopSize
return numWord, segmentPosition
def segmentVoiceByZero(samplingRate, data, frameLength=20, hopLength=10, label=None, verbose=False):
'''
Segment audio data by zero crossing rate,data from start to end.
Ref
https://blog.csdn.net/rocketeerLi/article/details/83307435
Parameters
----------
samplingRate: float32
data: numpy array of float32
audio PCM data
frameLength: 每帧长度
hopLength: 滑窗大小
start:可以选择从data的那个位置开始计算
end:data结束的位置
label:text文件中的标签值
Returns
----------
numWord: int
number of words in data
segmentPosition: numpy array of size (numWord, 2)
position information for each segmented word
'''
# 0.Frame configuration
frameSize = int(frameLength / 1000 * samplingRate)
hopSize = int(hopLength / 1000 * samplingRate)
# 1.adjustable parameter
thresholdEnergyGain = 0.6 # threshold value of Energy
thresholdZSGain = 2.0 # 过零率最大值的倍数,作为过零率阈值
thresholdZSGainLow = 0.1 # 过零率阈值的倍数,作为延展阈值
thresholdLength = samplingRate * 0.4 // hopSize # limit the max length of vad pieces
thresholdLength_min = samplingRate * 0.12 // hopSize # limit the min length of vad pieces
# 2.数据预处理
data = smooth_filter(data)
mean_value = np.mean(np.abs(data))
# 加汉明窗
windowSize = frameSize
window, windowName = selectWindow(windowSize)
# Total number of frames
data_matrix = data_to_frame(data, window)
# 3.特征计算
runningFrameFeature1 = audioFrameRMS_matrix(data_matrix)
runningFrameFeature2 = audioFrameZeroCrossingRate_matrix(data_matrix, threshold=mean_value)
ambientZeroCrossingRateLevel = np.mean(runningFrameFeature2)
Zs = ambientZeroCrossingRateLevel * thresholdZSGain # 过零率阈值上限
ZL = Zs * thresholdZSGainLow # 过零率阈值下限
ambientRMSLevel = np.mean(runningFrameFeature1)
ML = ambientRMSLevel * thresholdEnergyGain # 能量阈值下限
# 4.计算片段起始
segStart, segEnd = double_gate_zs(runningFrameFeature1, runningFrameFeature2, Zs, ZL, ML, thresholdLength)
# 5.筛选片段
numWord, segmentPosition = piece_choice(segStart, segEnd, thresholdLength_min, hopSize=hopSize)
if verbose:
print('---------------------------------------------------------')
print('Voice Word Segmentation')
print('|\tnumWord\t\t=\t{0}'.format(numWord))
for i in range(numWord):
print('|\t#{0}\t{1}'.format(i, segmentPosition[i] / samplingRate),
"\t len: ", (segmentPosition[i][1] - segmentPosition[i][0]) / samplingRate, "s")
print('---------------------------------------------------------')
# 6.画图
plot_piece(data, runningFrameFeature1, runningFrameFeature2, segmentPosition,
label=label, hopSize=hopSize, samplingRate=samplingRate)
return numWord, segmentPosition
if __name__ == "__main__":
print("ok")
t0 = time.time()
path = "C:/Users/44379/Desktop/b041/0002572461213034484205d7ff31_01_000a_2020_08_03_07_28_09_00048.wav"
data, samplingRate = librosa.load(path, sr=16000, res_type="kaiser_fast")
print("test start! read wav cost time:", time.time()-t0)
t1 = time.time()
numWord, segmentPosition = segmentVoiceByZero(samplingRate, data, verbose=1)
print("end seg time:", time.time()-t1)
|
StarcoderdataPython
|
5065359
|
<reponame>NeCTAR-RC/bumblebee
import collections
from django import template
from django.template.defaultfilters import safe
import six
register = template.Library()
def iterable(arg):
return (
isinstance(arg, collections.Iterable)
and not isinstance(arg, six.string_types)
)
@register.filter(name='print_data_as_html_table')
def print_data_as_html_table(data) -> str:
table = '<table>'
if hasattr(data, 'items'):
for key, value in data.items():
if iterable(value):
value = print_data_as_html_table(value)
table += f"<tr><td>{key}</td><td>{value}</td></tr>"
else:
for value in data:
if iterable(value):
value = print_data_as_html_table(value)
table += f"<tr><td>{value}</td></tr>"
return safe(table + '</table>')
@register.filter(name='print_2d_list_in_table_body')
def print_2d_list_in_table_body(data) -> str:
table = '<tbody>'
for item in data:
table += "<tr>"
for value in item:
if iterable(value):
value = print_data_as_html_table(value)
table += f"<td>{value}</td>"
table += "</tr>"
return safe(table + '</tbody>')
|
StarcoderdataPython
|
1808982
|
<gh_stars>0
'''面试题6:从尾到头打印链表
输入一个链表的头节点,从尾到头反过来打印出每个节点的值。
----------------
Example
input: 1->5->3->4
output:4351
----------------------
如果要修改输入数据要询问(逆转链表),观测输入输出顺序考虑特殊辅助结构,栈、递归和循环间的关系
'''
class Node(object):
def __init__(self, value=None, next_node = None):
self.value = value
self.next_node = next_node
def print_list_reversely(head):
if head is None:
return
cur = head
temp = []
while cur is not None:
temp.append(cur.value)
cur = cur.next_node
for i in range(len(temp)):
print(temp.pop(),end=' ')
print()
def __init_list(arr):
cur = head = None
for x in arr:
if cur is None:
cur = head = Node(x)
else:
cur.next_node = Node(x)
cur = cur.next_node
return head
if __name__ == "__main__":
data1 = [1,2,3,4,5]
data2 = [1]
link_list1 = __init_list(data1)
link_list2 = __init_list(data2)
print_list_reversely(None)
print_list_reversely(link_list1)
print_list_reversely(link_list2)
|
StarcoderdataPython
|
201650
|
<filename>enki/modeltokenverify.py<gh_stars>1-10
from google.appengine.ext.ndb import model
class EnkiModelTokenVerify( model.Model ):
token = model.StringProperty()
email = model.StringProperty()
user_id = model.IntegerProperty() # ndb user ID
time_created = model.DateTimeProperty( auto_now_add = True )
type = model.StringProperty( choices = [ 'register',
'passwordchange',
'emailchange',
'accountdelete',
'accountandpostsdelete',
'preventmultipost',
] )
auth_ids_provider = model.StringProperty() # store auth Id info for registration
|
StarcoderdataPython
|
355702
|
<reponame>ndjuric93/MusicOrganizer
""" Flask application """
from micro_player import create_app
from micro_player.config import SERVER_CONFIG
SERVICE_NAME = 'MicroPlayer'
if __name__ == '__main__':
app = create_app(name=SERVICE_NAME, **SERVER_CONFIG)
app.run(
host=SERVER_CONFIG['host'],
port=SERVER_CONFIG['port'],
debug=True
)
|
StarcoderdataPython
|
6606176
|
<reponame>uon-language/uon-parser
import struct
from validation.schema import Schema
from validation.validator import Validator
from validation.types.number.uint_type_validation import UintTypeValidation
from validation.properties.number.number_max_property import MaxNumberValidation
from validation.properties.number.number_min_property import MinNumberValidation
from validation.properties.number.quantity_validation_property import (
LengthQuantityValidation
)
from binary.utils import EOL, encode_string
# We use < for little-endian.
float32_struct = struct.Struct("<f")
float64_struct = struct.Struct("<d")
int32_struct = struct.Struct("<i")
int64_struct = struct.Struct("<q")
uint32_struct = struct.Struct("<I")
uint64_struct = struct.Struct("<Q")
char_struct = struct.Struct("<b")
# ============================== ENCODING ==============================
class TestValidatorToBinary:
def test_simple_validator_to_binary(self):
v = Validator(UintTypeValidation(),
[MinNumberValidation(0.0), MaxNumberValidation(125.0)],
{"description": "An unsigned integer validator"})
assert (b"\x1f\x19\x30\x0f\x15\x07" + struct.pack("<d", 0)
+ b"\x0f\x15\x08" + struct.pack("<d", 125)
+ b"\x1e" + b"\x04" + encode_string("An unsigned integer"
" validator")
) == v.to_binary()
def test_simple_validator_number_quantity_to_binary(self):
v = Validator(UintTypeValidation(),
[MinNumberValidation(0.0), MaxNumberValidation(125.0),
LengthQuantityValidation()])
assert (b"\x1f\x19\x30\x0f\x15\x07" + struct.pack("<d", 0)
+ b"\x0f\x15\x08" + struct.pack("<d", 125)
+ b"\x0f" + b"\x20"
) == v.to_binary()
class TestSchemaToBinary:
def test_simple_schema_to_binary(self):
v = Validator(UintTypeValidation(),
[MinNumberValidation(0.0), MaxNumberValidation(125.0)],
{"description": "An unsigned integer validator"})
s = Schema("person", {"age": v}, name="person",
description="a person schema")
assert (b"\x18" + encode_string("person")
+ b"\x11" + encode_string("person")
+ b"\x11" + encode_string("a person schema")
+ EOL
+ b"\x12" + encode_string("age")
+ v.to_binary()
+ EOL) == s.to_binary()
|
StarcoderdataPython
|
11209869
|
<gh_stars>0
# Collection data type that is ordered and immutable same is list but just that it cannot be changed after creation
# It allows duplicate
my_tuple = ("Max", 123, "Hello")
print(my_tuple)
print(type(my_tuple))
# parenthesis is optional
my_tuple_1 = "World", "XYZ"
print(my_tuple_1)
# tuple with single element
my_single_1 = ("Hello")
print(type(my_single_1)) # this prints string
my_single_fix = ("Hello",)
print(type(my_single_fix))
# Create using tuple
my_tuple_2 = tuple("Hello")
print(type(my_tuple_2))
my_tuple_3 = tuple(["Hello", "world"])
print(type(my_tuple_3))
# Operations on tuples
print("Operation")
my_index_tuple = (1, 2, 3)
print(my_index_tuple[2])
# my_index_tuple[1] = 4 # This will give error
for i in my_index_tuple:
print(i)
my_tuple_4 = ('a', 'p', 'p', 'l', 'e')
if 'a' in my_tuple_4:
print("a present")
# Functions
print("Functions")
# Counts how many times p appeared
print(my_tuple_4.count('p'))
# Returns first index
print(my_tuple_4.index('e'))
# set removes duplicate and is not ordered
print(set(my_tuple_4))
# tuple slicing is same as list slicing
print("Slicing")
slice_1 = my_tuple_4[1:4]
print(slice_1)
# Reverse a list
slice_2 = my_tuple_4[::-1]
print(slice_2)
# Unpacking also know as destructing in js
print("Unpack")
unpack_tuple = (11, 22, 33)
i1, i2, i3 = unpack_tuple
print(i1)
print(i2)
print(i3)
# Unpacking less var will give an error
# i1, i2 = unpack_tuple
# Destructing less variable
unpack_tuple_2 = (11, 22, 33, 44, 55)
i1, *i, il = unpack_tuple_2
print(i) # this will be a list
my_list = unpack_tuple_2
print(my_list)
# Tuples are efficient to create and takes less memory
|
StarcoderdataPython
|
1686646
|
<reponame>bwmichael/jccc-cis142-python<filename>labs/unit07/unit07Lab.py
##
# @author <NAME>
# This program asks the user for a year that we can check to see if it is a
# leap year or not.
## Determines if a year is a leap year
# @param year The year to test (Integer)
# @return true or false where the year is a leap year
def isLeapYear(year):
# If the year is divisible by 400 or 100 and divisible by 4 then its
# a leap year.
if (year%400==0 or year%100!=0) and (year%4==0):
return True
else:
return False
## Displays the welcome message to the program
# @return string message describing the program
def welcomeMessage():
return "Enter a year to validate if it is a leap year!"
## This is the main entry point to the program
def main():
# Set the input to y for the first checking of the leap year
userInput = "y"
# Always loop until a break occurs
while True:
# Check if the user wants to continue
if userInput == "y" or userInput == "Y":
# Get the year and convert it to an integer
inputYear = int(input("Please enter a year to test: "))
# Check to make sure the user entered a valid year
if inputYear >= 0:
# Check to see if the leap year function is returning true
# or false. Print the result with the year.
if isLeapYear(inputYear):
print(str(inputYear) + " is a leap year!")
else:
print(str(inputYear) + " is not a leap year.")
# Ask the user if they want to continue
userInput = input("Enter another leap year? (y or n): ")
else:
# Set the userInput to yes so we can check the year instead of
# prompting the user
userInput = "y"
# Check to see if the user wants to continue
elif userInput == "n" or userInput == "N":
# Exit the loop and exit the program.
break
# Describe to the user what the program does
print(welcomeMessage())
# Call the main function to start up the program
main()
|
StarcoderdataPython
|
121731
|
<reponame>accelerationa/DistributedSpider<gh_stars>0
import logging
from task_status import TaskStatus
import time
import pymongo
from init_mongo_client import init_mongo_client
class TaskDBMongoDao:
def __init__(self, database_name, collection_name, stack):
self.client = init_mongo_client(stack=stack)
self.database = self.client[database_name]
self.collection = self.database[collection_name]
# Returns task URL. Returns None if there's no unprocessed task.
def findAndReturnAnUnprocessedTask(self):
task = self.collection.find_one_and_update(
{ "status" : TaskStatus.new.value },
{ "$set": { "status" : TaskStatus.downloading.value}})
if not task:
print("Empty Entry...")
return None
task_url = task['url']
print("Task url is: {}.".format(task_url))
return task_url
def removeTask(self, task_url):
self.collection.delete_one({'url': task_url})
def newTask(self, url):
self.collection.insert_one({'status': 'New', 'url': url, 'createdOn': time.time()})
|
StarcoderdataPython
|
8175494
|
<filename>src/api/datahub/databus/tests/modules/hdfs_import_views_test.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from datahub.access.tests.utils import get, post
from datahub.databus.models import DatabusHdfsImportTask
@pytest.mark.httpretty
@pytest.mark.django_db
def test_create_success(mocker):
"""业务ID不为数字"""
params = {
"result_table_id": "103_xxx_test",
"data_dir": "test_data_dir3",
"description": "test",
}
mocker.patch(
"datahub.databus.rt.get_databus_rt_info",
return_value={
"result_table_id": "103_xxx_test",
"hdfs.data_type": "hdfs",
"channel_name": "test_channel",
"channel_type": "kafka",
"geog_area": "inland",
"hdfs": {
"connection_info": '{"flush_size": 1000000, "servicerpc_port": 53310, "hdfs_url": "hdfs://hdfsTest", '
'"rpc_port": 9000, "interval": 60000, "ids": "nn1", "hdfs_cluster_name": "hdfsTest",'
' "topic_dir": "/kafka/data/", "log_dir": "/kafka/logs", "hdfs_default_params": '
'{"dfs.replication": 2}, "hosts": "test-master-01", "port": 8081}'
},
"bootstrap.servers": "test_kafka_bs",
},
)
res = post("/v3/databus/import_hdfs/", params)
assert res["result"]
assert res["data"]["result_table_id"] == "103_xxx_test"
assert res["data"]["data_dir"] == "test_data_dir3"
assert res["data"]["finished"] == 0
assert res["data"]["status"] == ""
assert res["data"]["kafka_bs"] == "test_kafka_bs"
assert res["data"]["hdfs_conf_dir"] == "hdfsTest"
@pytest.mark.httpretty
@pytest.mark.django_db
def test_create_no_rt(mocker):
"""业务ID不为数字"""
# mock_startShipperTask.return_val = True
# result_table.get_ok()
params = {
"result_table_id": "103_xxx_test",
"data_dir": "test_data_dir3",
"description": "test",
}
mocker.patch("datahub.databus.rt.get_databus_rt_info", return_value=None)
res = post("/v3/databus/import_hdfs/", params)
assert not res["result"]
assert res["data"] is None
assert res["message"] == u"离线导入任务创建失败,result_table_id:103_xxx_test 对应的结果表不存在! "
assert res["code"] == "1570008"
@pytest.mark.httpretty
@pytest.mark.django_db
def test_create_integrityerror(mocker):
params = {
"result_table_id": "103_xxx_test",
"data_dir": "test_data_dir3",
"description": None,
}
mocker.patch(
"datahub.databus.rt.get_databus_rt_info",
return_value={
"result_table_id": "103_xxx_test",
"hdfs": {
"connection_info": '{"flush_size": 1000000, "servicerpc_port": 53310, "hdfs_url": "hdfs://hdfsTest"'
', "rpc_port": 9000, "interval": 60000, "ids": "nn1", "hdfs_cluster_name": '
'"hdfsTest", "topic_dir": "/kafka/data/", "log_dir": "/kafka/logs", '
'"hdfs_default_params": {"dfs.replication": 2}, "hosts": "test-master-01", '
'"port": 8081}'
},
"bootstrap.servers": "test_kafka_bs",
},
)
res = post("/v3/databus/import_hdfs/", params)
assert res["result"] is False
assert res["data"] is None
assert res["message"] == u"离线导入任务创建失败!"
assert res["code"] == "1570008"
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_retrieve_success():
res = get("/v3/databus/import_hdfs/1/")
assert res["result"]
assert res["data"]["status"] == "t"
assert res["data"]["data_dir"] == "test_data_dir"
assert res["data"]["result_table_id"] == "101_xxx_test"
assert res["data"]["finished"] == 0
assert res["data"]["hdfs_conf_dir"] == "test_hdfs_conf_dir"
assert res["data"]["kafka_bs"] == "test_kafka_bs"
@pytest.mark.httpretty
@pytest.mark.django_db
def test_retrieve_failed():
res = get("/v3/databus/import_hdfs/1/")
assert not res["result"]
assert res["message"] == u"查询离线导入任务失败,任务Id:1"
assert res["code"] == "1570004"
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_udpate_task_finished():
params = {
"result_table_id": "101_xxx_test",
"data_dir": "test_data_dir",
"finished": "1",
"status": "t",
}
res = post("/v3/databus/import_hdfs/1/update/", params)
assert res["result"]
assert res["data"]["status"] == "t"
assert res["data"]["data_dir"] == "test_data_dir"
assert res["data"]["result_table_id"] == "101_xxx_test"
assert res["data"]["finished"] == 1
assert res["data"]["hdfs_conf_dir"] == "test_hdfs_conf_dir"
assert res["data"]["kafka_bs"] == "test_kafka_bs"
assert res["data"]["hdfs_custom_property"] == ""
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_udpate_task_not_finish():
params = {
"result_table_id": "101_xxx_test",
"data_dir": "test_data_dir",
"finished": "0",
"status": "t",
}
res = post("/v3/databus/import_hdfs/1/update/", params)
assert res["result"]
assert res["data"]["status"] == "t"
assert res["data"]["data_dir"] == "test_data_dir"
assert res["data"]["result_table_id"] == "101_xxx_test"
assert res["data"]["finished"] == 0
assert res["data"]["hdfs_conf_dir"] == "test_hdfs_conf_dir"
assert res["data"]["kafka_bs"] == "test_kafka_bs"
assert res["data"]["hdfs_custom_property"] == ""
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_udpate_task_failed(mocker):
params = {
"result_table_id": "101_xxx_test",
"data_dir": "test_data_dir",
"finished": "0",
"status": "t",
}
mocker.patch(
"datahub.databus.models.DatabusHdfsImportTask.objects.get",
side_effect=DatabusHdfsImportTask.DoesNotExist,
)
res = post("/v3/databus/import_hdfs/1/update/", params)
assert not res["result"]
assert res["data"] is None
assert res["message"] == u"更新离线导入任务失败,任务Id:1, result_table_id:101_xxx_test, data_dir:test_data_dir, " u"finished:0"
assert res["code"] == "1570007"
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_update_hdfs_import_finished():
params = {
"id": 1,
"rt_id": "101_xxx_test",
"data_dir": "test_data_dir",
"finish": "true",
"status_update": "t",
}
res = post("/v3/databus/import_hdfs/update_hdfs_import_task/", params)
assert res["result"]
assert res["data"]["status"] == "t"
assert res["data"]["data_dir"] == "test_data_dir"
assert res["data"]["result_table_id"] == "101_xxx_test"
assert res["data"]["finished"] == 1
assert res["data"]["hdfs_conf_dir"] == "test_hdfs_conf_dir"
assert res["data"]["kafka_bs"] == "test_kafka_bs"
assert res["data"]["hdfs_custom_property"] == ""
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_import_task_not_finish():
params = {
"id": 1,
"rt_id": "101_xxx_test",
"data_dir": "test_data_dir",
"finish": "false",
"status_update": "t",
}
res = post("/v3/databus/import_hdfs/update_hdfs_import_task/", params)
assert res["result"]
assert res["data"]["status"] == "t"
assert res["data"]["data_dir"] == "test_data_dir"
assert res["data"]["result_table_id"] == "101_xxx_test"
assert res["data"]["finished"] == 0
assert res["data"]["hdfs_conf_dir"] == "test_hdfs_conf_dir"
assert res["data"]["kafka_bs"] == "test_kafka_bs"
assert res["data"]["hdfs_custom_property"] == ""
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_update_hdfs_import_task_failed(mocker):
params = {
"id": 1,
"rt_id": "101_xxx_test",
"data_dir": "test_data_dir",
"finish": "false",
"status_update": "t",
}
mocker.patch(
"datahub.databus.models.DatabusHdfsImportTask.objects.get",
side_effect=DatabusHdfsImportTask.DoesNotExist,
)
res = post("/v3/databus/import_hdfs/update_hdfs_import_task/", params)
assert not res["result"]
assert res["data"] is None
assert (
res["message"] == u"更新离线导入任务失败,任务Id:1, result_table_id:101_xxx_test, data_dir:test_data_dir, " u"finished:false"
)
assert res["code"] == "1570007"
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_get_earliest_success():
res = get("/v3/databus/import_hdfs/get_earliest/?limit=1")
assert res["result"]
assert res["data"][0]["status"] == "t"
assert res["data"][0]["data_dir"] == "test_data_dir"
assert res["data"][0]["result_table_id"] == "101_xxx_test"
assert res["data"][0]["finished"] == 0
assert res["data"][0]["hdfs_conf_dir"] == "test_hdfs_conf_dir"
assert res["data"][0]["kafka_bs"] == "test_kafka_bs"
assert res["data"][0]["hdfs_custom_property"] == ""
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_get_earliest_noparam():
res = get("/v3/databus/import_hdfs/get_earliest/")
assert res["result"]
assert res["data"][0]["status"] == "t"
assert res["data"][0]["data_dir"] == "test_data_dir"
assert res["data"][0]["result_table_id"] == "101_xxx_test"
assert res["data"][0]["finished"] == 0
assert res["data"][0]["hdfs_conf_dir"] == "test_hdfs_conf_dir"
assert res["data"][0]["kafka_bs"] == "test_kafka_bs"
assert res["data"][0]["hdfs_custom_property"] == ""
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add")
def test_get_earliest_failed(mocker):
mocker.patch(
"datahub.databus.models.DatabusHdfsImportTask.objects.filter",
side_effect=Exception,
)
res = get("/v3/databus/import_hdfs/get_earliest/")
assert not res["result"]
assert res["data"] is None
assert res["message"] == u"查询最近离线导入任务列表失败!"
assert res["code"] == "1570006"
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add_finished")
def test_clean_history_success():
params = {"days": 1}
res = post("/v3/databus/import_hdfs/clean_history/", params)
assert res["result"]
assert res["data"]["days"] == 1
assert res["data"]["del_count"] == 1
@pytest.mark.httpretty
@pytest.mark.django_db
def test_clean_history_no_data():
params = {"days": 1}
res = post("/v3/databus/import_hdfs/clean_history/", params)
assert res["result"]
assert res["data"]["days"] == 1
assert res["data"]["del_count"] == 0
@pytest.mark.httpretty
@pytest.mark.django_db
def test_clean_history_failed(mocker):
params = {"days": 1}
mocker.patch(
"datahub.databus.models.DatabusHdfsImportTask.objects.filter",
side_effect=Exception,
)
res = post("/v3/databus/import_hdfs/clean_history/", params)
assert not res["result"]
assert res["code"] == "1570005"
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add_multi")
def test_check_tasks_success():
res = get("/v3/databus/import_hdfs/check/?interval_min=60")
assert res["result"]
assert res["data"]["BAD_DATA_FILE"] == 1
assert res["data"]["EMPTY_DATA_DIR"] == 1
assert res["data"]["FILE_TOO_BIG"] == 1
assert res["data"]["UNFINISHED_TASKS"] == 2
@pytest.mark.httpretty
@pytest.mark.django_db
@pytest.mark.usefixtures("task_add_multi")
def test_check_tasks_failed(mocker):
mocker.patch("datahub.databus.import_hdfs.check_tasks", side_effect=Exception)
res = get("/v3/databus/import_hdfs/check/?interval_min=60")
assert res["result"] is False
assert res["data"] is None
assert res["message"] == u"离线任务状态检查失败!"
assert res["code"] == "1570020"
|
StarcoderdataPython
|
9737603
|
from . import sampling, sa
|
StarcoderdataPython
|
11361462
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .user import User
from .asset import Asset
class AppInfo:
"""Represents the application info for the bot provided by Discord.
Attributes
-------------
id: :class:`int`
The application ID.
name: :class:`str`
The application name.
owner: :class:`User`
The application owner.
icon: Optional[:class:`str`]
The icon hash, if it exists.
description: Optional[:class:`str`]
The application description.
bot_public: :class:`bool`
Whether the bot can be invited by anyone or if it is locked
to the application owner.
bot_require_code_grant: :class:`bool`
Whether the bot requires the completion of the full oauth2 code
grant flow to join.
rpc_origins: Optional[List[:class:`str`]]
A list of RPC origin URLs, if RPC is enabled.
"""
__slots__ = ('_state', 'description', 'id', 'name', 'rpc_origins',
'bot_public', 'bot_require_code_grant', 'owner', 'icon')
def __init__(self, state, data):
self._state = state
self.id = int(data['id'])
self.name = data['name']
self.description = data['description']
self.icon = data['icon']
self.rpc_origins = data['rpc_origins']
self.bot_public = data['bot_public']
self.bot_require_code_grant = data['bot_require_code_grant']
self.owner = User(state=self._state, data=data['owner'])
def __repr__(self):
return '<{0.__class__.__name__} id={0.id} name={0.name!r} description={0.description!r} public={0.bot_public} ' \
'owner={0.owner!r}>'.format(self)
@property
def icon_url(self):
""":class:`.Asset`: Retrieves the application's icon asset."""
return Asset._from_icon(self._state, self, 'app')
|
StarcoderdataPython
|
11252191
|
from pathlib import Path
from string import punctuation
from nltk.corpus import stopwords
ROOT = Path(__file__).resolve().parent
DATA_FOLDER = ROOT.joinpath("data")
STOP_WORDS = set(stopwords.words('english') + list(punctuation) + ['AT_USER', 'URL'])
|
StarcoderdataPython
|
9694873
|
from core.data.ans_punct import prep_ans
from core.data.save_glove_embeds import StoredEmbeds
import numpy as np
import random, re, json
from torch.utils.data._utils.collate import default_collate
try:
import en_vectors_web_lg
except ImportError:
import spacy
def shuffle_list(ans_list):
random.shuffle(ans_list)
def save_json(obj, fname):
with open(fname, 'w') as f:
json.dump(obj, f)
def load_json(fname):
with open(fname, 'r') as f:
data_ = json.load(f)
return data_
# ------------------------------
# ---- Initialization Utils ----
# ------------------------------
def img_feat_path_load(path_list):
iid_to_path = {}
for ix, path in enumerate(path_list):
iid = str(int(path.split('/')[-1].split('_')[-1].split('.')[0]))
iid_to_path[iid] = path
return iid_to_path
def img_feat_load(path_list):
iid_to_feat = {}
for ix, path in enumerate(path_list):
iid = str(int(path.split('/')[-1].split('_')[-1].split('.')[0]))
img_feat = np.load(path)
img_feat_x = img_feat['x'].transpose((1, 0))
iid_to_feat[iid] = img_feat_x
print('\rPre-Loading: [{} | {}] '.format(ix, path_list.__len__()), end=' ')
return iid_to_feat
def ques_load(ques_list):
qid_to_ques = {}
for ques in ques_list:
qid = str(ques['question_id'])
qid_to_ques[qid] = ques
return qid_to_ques
def get_words(question_str):
return re.sub(
r"([.,'!?\"()*#:;])",
'',
question_str.lower()
).replace('-', ' ').replace('/', ' ').split()
def tokenize(stat_ques_list, use_glove, save_embeds=False):
# This function basically requires use_glove to be true in order to work correctly.
# Otherwise, the indicies in token_to_ix don't match the actual embedding matrix.
token_to_ix = {
'PAD': 0,
'UNK': 1,
'[MASK]': 2,
'[CLS]': 3
}
spacy_tool = None
pretrained_emb = []
stored_embeds = StoredEmbeds(embed_fname='./ckpts/glove_embeds.pkl')
if use_glove:
try:
spacy_tool = en_vectors_web_lg.load()
except NameError:
try:
spacy_tool = spacy.load('en_vectors_web_lg')
except OSError:
if not stored_embeds.has_embeds():
raise ValueError('Spacy could not be loaded and no stored glove embeddings were found.')
return stored_embeds.get_embeds()
known_vec = spacy_tool('the').vector
mu = 0.
sigma = np.sqrt(1. / known_vec.shape[0])
pretrained_emb.append(spacy_tool('PAD').vector)
pretrained_emb.append(spacy_tool('UNK').vector)
pretrained_emb.append(
sigma * np.random.randn(*known_vec.shape).astype(dtype=known_vec.dtype) + mu
) # Embedding for [MASK]
pretrained_emb.append(
sigma * np.random.randn(*known_vec.shape).astype(dtype=known_vec.dtype) + mu
) # Embedding for [CLS]
for ques in stat_ques_list:
words = get_words(ques['question'])
for word in words:
if word not in token_to_ix:
token_to_ix[word] = len(token_to_ix)
if use_glove:
pretrained_emb.append(spacy_tool(word).vector)
if save_embeds:
# Embeddings will not be overwritten if file already exists.
stored_embeds.set_embeds(token_to_ix, pretrained_emb)
stored_embeds.save()
pretrained_emb = np.array(pretrained_emb)
return token_to_ix, pretrained_emb
# def ans_stat(stat_ans_list, ans_freq):
# ans_to_ix = {}
# ix_to_ans = {}
# ans_freq_dict = {}
#
# for ans in stat_ans_list:
# ans_proc = prep_ans(ans['multiple_choice_answer'])
# if ans_proc not in ans_freq_dict:
# ans_freq_dict[ans_proc] = 1
# else:
# ans_freq_dict[ans_proc] += 1
#
# ans_freq_filter = ans_freq_dict.copy()
# for ans in ans_freq_dict:
# if ans_freq_dict[ans] <= ans_freq:
# ans_freq_filter.pop(ans)
#
# for ans in ans_freq_filter:
# ix_to_ans[ans_to_ix.__len__()] = ans
# ans_to_ix[ans] = ans_to_ix.__len__()
#
# return ans_to_ix, ix_to_ans
def ans_stat(json_file):
ans_to_ix, ix_to_ans = json.load(open(json_file, 'r'))
return ans_to_ix, ix_to_ans
# ------------------------------------
# ---- Real-Time Processing Utils ----
# ------------------------------------
def proc_img_feat(img_feat, img_feat_pad_size):
if img_feat.shape[0] > img_feat_pad_size:
img_feat = img_feat[:img_feat_pad_size]
img_feat = np.pad(
img_feat,
((0, img_feat_pad_size - img_feat.shape[0]), (0, 0)),
mode='constant',
constant_values=0
)
return img_feat
def proc_ques(ques, token_to_ix, max_token, add_cls=False):
if not add_cls:
ques_ix = np.zeros(max_token, np.int64)
start_ix = 0
max_len = max_token
else:
ques_ix = np.zeros(max_token + 1, np.int64)
ques_ix[0] = token_to_ix['[CLS]']
start_ix = 1
max_len = max_token + 1
words = get_words(ques['question'])
for ix, word in enumerate(words, start=start_ix):
if word in token_to_ix:
ques_ix[ix] = token_to_ix[word]
else:
ques_ix[ix] = token_to_ix['UNK']
if ix + 1 == max_len:
break
return ques_ix
def get_score(occur):
if occur == 0:
return .0
elif occur == 1:
return .3
elif occur == 2:
return .6
elif occur == 3:
return .9
else:
return 1.
def proc_ans(ans, ans_to_ix):
ans_score = np.zeros(ans_to_ix.__len__(), np.float32)
ans_prob_dict = {}
for ans_ in ans['answers']:
ans_proc = prep_ans(ans_['answer'])
if ans_proc not in ans_prob_dict:
ans_prob_dict[ans_proc] = 1
else:
ans_prob_dict[ans_proc] += 1
for ans_ in ans_prob_dict:
if ans_ in ans_to_ix:
ans_score[ans_to_ix[ans_]] = get_score(ans_prob_dict[ans_])
return ans_score
def refset_collate(batch):
tgt, refs, label, pos, qid_data = zip(*batch)
tgt, label, pos = default_collate(tgt), default_collate(label), default_collate(pos)
refs = list(refs)
n_refs = len(refs[0]) # number of reference examples
batched_refs = []
for i in range(n_refs):
ref_i_all = [per_row[i] for per_row in refs]
ref_i_batched = default_collate(ref_i_all)
batched_refs.append(ref_i_batched)
return tgt, batched_refs, label, pos, qid_data
def refset_tocuda(refset_data):
tgt, batched_refs, label, pos, qid_data = refset_data
label, pos = label.cuda(), pos.cuda()
tgt = (tgt[0].cuda(),tgt[1].cuda(), tgt[2].cuda())
if all(len(x) for x in batched_refs):
batched_refs = [(x[0].cuda(), x[1].cuda(), x[2].cuda()) for x in batched_refs]
return tgt, batched_refs, label, pos, qid_data
def refset_point_refset_index(question_list, max_token, novel_indices=None, aug_factor=1):
# This assumes that each concept only appears once in the question.
# If this is a bad assumption, then we need to iterate over question['concepts']
n_questions = len(question_list)
is_novel = [False for _ in range(n_questions)]
if novel_indices:
for x in novel_indices:
is_novel[x] = True
rs_idx = []
count_novel = 0
for qidx, question in enumerate(question_list):
if question.get('refsets', None):
for c, crefs in question['refsets'].items():
has_refs = True
for dkey, vals in crefs.items():
if not len(vals['index']) or not len(vals['question_id']):
has_refs = False
break
# Assumes each concepts appears once.
if get_concept_position(question, c) < max_token and has_refs:
rs_idx.append((qidx, c))
if is_novel[qidx]:
for _ in range(aug_factor-1):
rs_idx.append((qidx, c))
count_novel += 1
print('Added {x} number of novel questions for the refset'.format(x=count_novel))
return rs_idx
def prune_refsets(question_list, refset_sizes, max_token):
for question in question_list:
if question.get('refsets', None):
for c, crefs in question['refsets'].items():
for dkey, _ in refset_sizes:
for i, idx in reversed(list(enumerate(crefs[dkey]['index']))):
if len(get_words(question_list[idx]['question'])) > max_token:
crefs[dkey]['index'].pop(i)
crefs[dkey]['question_id'].pop(i)
return question_list
def get_concept_position(question, concept):
# This assumes that concepts are only 1 word. Also, as in refset_index(), we assume each concept appears once.
return question['concepts'].get(concept, [[-1]])[0][0]
def do_token_masking(token_id, token_to_ix, mask_mode):
# target = do what we do now
# bert = do what BERT does
# even = 50 / 50 Mask or keep same
# https://github.com/google-research/bert/blob/0fce551b55caabcfba52c61e18f34b541aef186a/create_pretraining_data.py#L342
masked_token_id = None
if mask_mode == 'target':
masked_token_id = token_to_ix['[MASK]']
elif mask_mode == 'bert':
# 80% of the time, replace with [MASK]
if random.random() < 0.8:
masked_token_id = token_to_ix['[MASK]']
else:
# 10% of the time, keep original
if random.random() <= 0.5:
masked_token_id = token_id
# 10% of the time, replace with random word
else:
masked_token_id = random.randint(4, len(token_to_ix) - 1) # start at 4 to account for PAD, UNK, MASK, CLS
elif mask_mode == 'even':
if random.random() <= 0.5:
masked_token_id = token_to_ix['[MASK]']
else:
masked_token_id = token_id
elif mask_mode is None or mask_mode.lower() == 'none':
masked_token_id = token_id
else:
raise ValueError('mask_mode must be in [target, bert, even, none/None]')
return masked_token_id
def filter_concept_skill(ques_list, ans_list, concept, skill):
N, N_ans = len(ques_list), len(ans_list)
assert N == N_ans
novel_ques_ids, novel_indices = get_novel_ids(ques_list, concept, skill)
count = 0
for id in reversed(novel_indices): # going back to front, delete novel idx
del ques_list[id]
del ans_list[id]
count += 1
print('Removed {x} number of novel questions from the current split'.format(x=count))
print('New dataset size is {x}'.format(x=len(ques_list)))
def get_novel_ids(ques_list, concept, skill):
novel_ids, novel_indices = [], []
if not concept: return novel_ids, novel_indices
if isinstance(concept, str):
concept = concept.split(',')
concept_set = set(concept)
N = len(ques_list)
for i in range(N):
ques = ques_list[i]
if 'all_concepts' not in ques:
curr_concepts = set(ques['concepts'])
else:
curr_concepts = set(ques['all_concepts'])
found_concept = bool(len(concept_set & curr_concepts))
if not found_concept:
continue
if (skill is None or skill.lower() == 'none') or ques['skill'] == skill:
# Found a match, add question id
novel_ids.append(ques['question_id'])
novel_indices.append(i)
print('Found {x} number of novel question ids'.format(x= len(novel_ids)))
return novel_ids, novel_indices
def sample_references(question, concept, reftype_key, n_samples=1):
return random.sample(question['refsets'][concept][reftype_key]['index'], k=n_samples)
def sample_refset(question, concept, refset_sizes):
sampled_rs = []
for dkey, n_samples in refset_sizes:
sampled_rs.append(sample_references(question, concept, dkey, n_samples))
return sampled_rs
def build_skill_references(question_list):
skill_refs = []
for i, ques in enumerate(question_list):
if ques.get('skill_refset', None):
if len(ques['skill_refset']['pos']) and len(ques['skill_refset']['neg']) > 1:
skill_refs.append((i, 'none'))
return skill_refs
def sample_contrasting_skills(question, n_pos_samples, n_neg_samples):
pos_samples_ = random.sample(question['skill_refset']['pos'], k=n_pos_samples)
neg_samples_ = random.sample(question['skill_refset']['neg'], k=n_neg_samples)
return pos_samples_, neg_samples_
|
StarcoderdataPython
|
9799161
|
<filename>api/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.simple_request, name='send_comment'),
path('image/', views.image_request, name='send_image')
]
|
StarcoderdataPython
|
3367238
|
<filename>util/detector.py
from detectron2.data.detection_utils import read_image
from detectron2.config import get_cfg
from detectron2.engine.defaults import DefaultPredictor
import numpy as np
import cv2
import os
import glob
from tqdm import tqdm
import pickle
class Args():
def __init__(self):
self.config_file = 'util/detector_configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml'
self.opts = ['MODEL.WEIGHTS', './models/model_detector/model_final_68b088.pkl']
self.confidence_threshold = 0.5
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def makedir(path):
if not os.path.exists(path):
os.makedirs(path)
args = Args()
cfg = setup_cfg(args)
predictor = DefaultPredictor(cfg)
def detect(imgname):
img = read_image(imgname, format="BGR")
predictions = predictor(img)['instances'].get_fields()
bboxes, scrs, classes = predictions['pred_boxes'].tensor.cpu().numpy(), predictions['scores'].cpu().numpy(), predictions['pred_classes'].cpu().numpy()
bboxes, scrs, classes = list(bboxes), list(scrs), list(classes)
boxes = []
for bbox,scr,c in zip(bboxes,scrs,classes):
if c==0:
# select the person class
boxes.append(bbox)
return img, boxes
def crop(img, bboxes):
results = []
resboxes = []
for b in bboxes:
wh = max(b[2]-b[0], b[3]-b[1])
center = [0.5*(b[2]+b[0]), 0.5*(b[3]+b[1])]
wh = wh * 1.4
corner = [ center[0] - wh*0.5, center[1] - wh*0.5 ]
H = np.float32([[1,0,-corner[0]], [0,1,-corner[1]]])
cropped = cv2.warpAffine(img, H, (int(wh),int(wh)))
results.append(cropped)
newbox = [corner[0], corner[1], corner[0]+wh, corner[1]+wh]
resboxes.append(newbox)
return results, resboxes
def run_frames(path):
print('Detecting: ', path)
makedir(os.path.join(path, 'bboxes/'))
for i in tqdm(glob.glob(os.path.join(path,'imgs/*.jpg'))):
i = i.replace('\\','/')
img, bboxes = detect(i)
cropped, bboxes = crop(img, bboxes)
foldername = os.path.basename(i)
foldername = os.path.splitext(foldername)[0]
makedir( os.path.join(path, 'cropped/%s/'%(foldername)) )
for i,c in enumerate(cropped):
cv2.imwrite( os.path.join(path, 'cropped/%s/%04d.png'%(foldername,i)), c)
pickle.dump(bboxes, open( os.path.join(path, 'bboxes/%s.pkl'%(foldername)), 'wb'))
if __name__=='__main__':
run_frames('./data/')
|
StarcoderdataPython
|
212414
|
<gh_stars>1-10
#!/usr/bin/env python
import hglib
from hglib.util import b
import os
import sys
def main():
if 'BUILD_URL' not in os.environ:
print('Warning: This script should be called on jenkins only')
return -1
if len(sys.argv) > 2:
print('Unknown parameter: {}'.format(sys.argv))
return -1
patch = b(sys.argv[1]) if len(sys.argv) == 2 else b('../patch.diff')
if not os.path.isfile(patch):
print('Patch file "{}" does not exists'.format(patch))
return -1
cfg = ['extensions.hgext.purge=',
'extensions.hgext.strip=',
'phases.new-commit=draft']
client = hglib.open(configs=cfg)
print('Revert workspace...')
client.revert(b('.'), all=True, nobackup=True)
print('Purge workspace...')
client.rawcommand([b('purge'), b('--all')])
revs = len(client.log(revrange='secret() or draft()'))
print('Found secret/draft changesets: {}'.format(revs))
if revs > 0:
print('Strip secret and draft changesets...')
client.rawcommand([b('strip'),
b('-r'),
b('secret() or draft()'),
b('--no-backup'),
b('--force')])
print('Import patch: {}'.format(patch))
client.import_([patch],
user='jenkins@review',
date='today',
message='jenkins patch review')
return 0
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
9708665
|
import logging
from app.config_common import *
# DEBUG can only be set to True in a development environment for security reasons
DEBUG = True
# Secret key for generating tokens
SECRET_KEY = 'ishant'
# Admin credentials
ADMIN_CREDENTIALS = ('admin', 'admin')
# Database choice
SQLALCHEMY_DATABASE_URI = 'sqlite:///app.db'
SQLALCHEMY_TRACK_MODIFICATIONS = True
# Configuration of a Gmail account for sending mails
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = 'ishantbansal162'
MAIL_PASSWORD = '<PASSWORD>'
ADMINS = ['<EMAIL>']
# Number of times a password is hashed
BCRYPT_LOG_ROUNDS = 12
LOG_LEVEL = logging.DEBUG
LOG_FILENAME = 'activity.log'
LOG_MAXBYTES = 1024
LOG_BACKUPS = 2
UPLOAD_FOLDER = '/home/ishant/Desktop/flask/app/static'
|
StarcoderdataPython
|
5196767
|
# See LICENSE file for full copyright and licensing details.
from odoo.tests import common
from datetime import datetime
from dateutil.relativedelta import relativedelta as rd
class TestAttendance(common.TransactionCase):
def setUp(self):
super(TestAttendance, self).setUp()
self.daily_attendance_obj = self.env['daily.attendance']
self.student_leave_request = self.env['studentleave.request']
self.teacher = self.env.ref('school.demo_school_teacher_2')
self.school_std = self.env.ref('school.demo_school_standard_2')
self.academic_year = self.env.ref('school.demo_academic_year_2')
self.month = self.env.ref('school.demo_academic_month_current_6')
self.stud_id = self.env.ref('school.demo_student_student_5')
self.daily_attendance_line_obj = self.env['daily.attendance.line']
self.monthly_attendance_obj = self.env['monthly.attendance.sheet']
self.sheet_line = self.env['attendance.sheet.line']
self.attendance_sheet_obj = self.env['attendance.sheet']
self.attend_report_obj = self.env['student.attendance.by.month']
current_date = datetime.now()
old_date = current_date - rd(days=27)
attend_date = datetime.strftime(old_date, '%m/%d/%Y')
leave_start_date = current_date + rd(days=10)
leave_start = datetime.strftime(leave_start_date, '%m/%d/%Y')
leave_end_date = current_date + rd(days=20)
leave_end = datetime.strftime(leave_end_date, '%m/%d/%Y')
# create daily attendance
self.daily_attendance = self.daily_attendance_obj.\
create({'user_id': self.teacher.id,
'standard_id': self.school_std.id,
'date': attend_date,
})
self.daily_attendance._compute_total()
self.daily_attendance._compute_present()
self.daily_attendance._compute_absent()
self.daily_attendance.onchange_standard_id()
self.daily_attendance.attendance_draft()
self.daily_attendance.attendance_validate()
self.daily_attendance_line = self.daily_attendance_line_obj.\
search([('standard_id', '=', self.daily_attendance.id)])
for rec in self.daily_attendance_line:
rec.onchange_attendance()
rec.onchange_absent()
# Monthly attendance
self.monthly_attendance = self.monthly_attendance_obj.\
create({'standard_id': self.school_std.id,
'year_id': self.academic_year.id,
'month_id': self.month.id
})
self.monthly_attendance.monthly_attendance_sheet_open_window()
# Attendance sheet
self.attendance_sheet = self.attendance_sheet_obj.\
search([('standard_id', '=',
self.monthly_attendance.standard_id.id),
('year_id', '=', self.monthly_attendance.year_id.id),
('month_id', '=', self.monthly_attendance.month_id.id)])
self.attendance_sheet_obj.onchange_class_info()
self.sheet = self.sheet_line.search([('standard_id', '=',
self.attendance_sheet.id)])
for rec in self.sheet:
rec._compute_percentage()
self.studentleave_create = self.student_leave_request.\
create({'name': '<NAME>',
'student_id': self.stud_id.id,
'start_date': leave_start,
'end_date': leave_end,
'reason': 'Trip'})
self.studentleave_create.onchange_student()
self.studentleave_create._compute_days()
self.studentleave_create.toapprove_state()
self.studentleave_create.approve_state()
self.studentleave_create.reject_state()
def test_attendance(self):
self.assertEqual(self.daily_attendance.user_id,
self.daily_attendance.standard_id.user_id)
self.assertEqual(self.monthly_attendance.year_id,
self.monthly_attendance.month_id.year_id)
self.assertEqual(self.studentleave_create.student_id.state, 'done')
|
StarcoderdataPython
|
1944303
|
# -*- encoding: utf-8 -*-
import sys
sys.path.append("..")
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from ..grid import getClassificationGridDict
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from ..base import basePredict
import logging
import time
class autoClassify(basePredict):
"""
Automate classification prediction
The function runClassification automates finding supervised
classification model.The runClassification function supports the following
models right now
"""
def __init__(self,cv=3,verbosity="INFO",models=None,
encoder='label',scaler=None
,useGridtuning=False,gridDict = None
,score='roc_auc',random_state=None):
"""
:param cv: cross validation sets
:param verbosity: level of logging - 0,1,2
:param models: List of model objects for which you want to run the train data through
below is a sample input, by default this is null in which case train data
,could be run through all supported models with default parameters of autopredict.
if this argument is passed in then use_grid_tuning would be over-riden to False
i.e - you can not grid search for parameter tuning
[LogisticRegression(random_state=100)
,DecisionTreeClassifier(random_state=100,max_depth=3)]
:param encoder: this variable decides how to convert string/object column in input data
autopredict for now supports only labelencoder, if you choose
to with some other encoding scheme like onehotencoder tranform the
input array before passing it in
:param scaler: this would decide the scaling strategy,some of prediction models
perform better with scaled features while few others like trees can handle
unscaled values,default value for this is None, supported values- 'minmax'
for sklearn's minmax sclaer ,'standard' - for sklearn's standard scaler
:param useGridtuning: set this to True if you want to use grid search over the
supported classifier, the grid is selected based on configuration
saved in ./grid/_bases.py file in Dictionary gridDict
:param gridDict: This variable is required if you use the Grid tuning option in autopredict
by setting useGridtuning= True in such scenario autopredict has ready made ranges for paramter
tweaking that it is going to test the model performance on , in case you want to over-ride those
parameters you can pass values in this argument , sample
gridDict={'LogisticRegression': {'penalty': ['l2'], 'C': [0.001, 0.1, 1, 10]}, 'DecisionTreeClassifier': {'max_depth': [4, 5, 6, 7, 8, 9, 10]}}
if you want to see the default config used by autopredict,use below function in autopredict.grid
grid.getClassificationGridDict() , this function will return the possible values
you can use the output, tweak the values and pass it as input to autopredict. You can not add
new keys in the dict as the keys present are the only ones supported, you could edit the values
of the dict to change the grid tuning params
:param score: scoring parameter to be used for grid search
:param random_state: seed parameter
"""
## add checks here
if verbosity not in self._logSupport:
raise Exception('verbosity paramater can only have the values {_logSupport}')
logging.basicConfig(level=verbosity)
self.multiClass = False
super().__init__(objective=self._classify, cv=cv, verbosity=verbosity, models=models,
encoder=encoder, scaler=scaler
,useGridtuning=useGridtuning, gridDict=gridDict
, score=score, random_state=random_state)
def fit(self, X, y):
startTime = time.time()
if X.isna().any().sum() > 0:
logging.warning('Input DataFrame passed has null values - autopredict '
'is going to replace the Nan with the most frequent occuring '
'value in each column')
try:
for rec in X.columns:
X = X.fillna(X.mode().iloc[0])
except Exception as e:
raise Exception(f'Failed to replace NAN values in the dataframe {str(e)}')
if self.scaler:
if self.scaler not in self._scaler_dict.keys():
raise ValueError(f'Scaler key not defined, look at the scaler parameter '
f'that is being passed in {self.scaler}')
X_scaled = self._scaleData(X, self._scaler_dict[self.scaler])
## check if any data to be converted from str/object
X = self._encodeData(X, self._encode_dict[self.encoder])
if self.scaler:
X_scaled = self._encodeData(X_scaled, self._encode_dict[self.encoder])
## check if binary classification
if len(y.value_counts()) > 2:
logging.warning('More then 2 target classes detected , scoring '
'for grid search will be over-ridden to - ''Accuracy''')
self.score = 'accuracy'
self.multiClass = True
X_scaled = X_scaled if self.scaler else None
super().fit(X,X_scaled, y,self.multiClass)
## get scores
logging.info('Training of models complete')
logging.info(f'Total training time {round(time.time()-startTime,1)} seconds')
return self
|
StarcoderdataPython
|
1618510
|
<gh_stars>10-100
import pandas as pd
from IPython import display
def side_by_side(df1, df2, name1='', name2=''):
if isinstance(df1, pd.Series):
df1 = df1.to_frame(name=df1.name)
if isinstance(df2, pd.Series):
df2 = df2.to_frame(name=df2.name)
inline = 'style="display: float; max-width:50%" class="table"'
q = '''
<div class="table-responsive col-md-6">{}</div>
<div class="table-responsive col-md-6">{}</div>
'''.format(df1.style.set_table_attributes(inline)
.set_caption(name1).render(),
df2.style.set_table_attributes(inline)
.set_caption(name2).render())
return display.HTML(q)
def join_date_time(df):
def convert(col):
s = (df[col].astype(str)
.str.replace('\.0', '')
.str.rjust(4, '0'))
td = pd.to_timedelta(s.str.slice(0, 2) + 'H' +
s.str.slice(-2) + 'm',
errors='coerce')
return df['fl_date'] + td
return df.assign(dep=convert('dep_time'),
arr=convert('arr_time'))
def show(df, max_rows=75, max_cols=50):
row = pd.option_context('display.max_rows', max_rows)
col = pd.option_context('display.max_columns', max_cols)
with row, col:
display.display(df)
|
StarcoderdataPython
|
3451209
|
<filename>src/core/fields.py
import uuid
from django.db import models
class UUIDPrimaryKey(models.UUIDField):
def __init__(self, **kwargs):
kwargs['primary_key'] = True
kwargs.setdefault('editable', False)
kwargs.setdefault('default', uuid.uuid4)
super().__init__(**kwargs)
|
StarcoderdataPython
|
196576
|
import numpy as np
import pandas as pd
from scipy import ndimage
import json
import h5py
import keras
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x #归一化输入
def extract_lable(path):
with open(path,'rb') as f:
data=json.load(f)
data=pd.DataFrame.from_dict(data)
del data['image_url']
data.sort_values(by='image_id', inplace=True)
data = data.reset_index(drop=True)
image_file=data['image_id']
label= np.array( list(data['label_id'])).astype(np.int32)
label= keras.utils.to_categorical(label, 80) #对80个类型标签进行01二元编码
return image_file,label #返回图片和标签
def main():
image_file, label = extract_lable('image/ai_challenger_scene_train_20170904/scene_train_annotations_20170904.json')
image_path = 'image/resize_image_train/' + image_file
for times in range(539):
if times == 0:
h5f = h5py.File('data/train_data.h5', 'w')
x = h5f.create_dataset("x_train", (100, 299, 299,3),maxshape=(None, 299, 299,3),
# chunks=(1, 1000, 1000),
dtype=np.float32)
y = h5f.create_dataset('y_train',(100,80),maxshape=(None,80),dtype=np.int32) #使用h5py库读写超过内存的大型数据
else:
h5f = h5py.File('data/train_data.h5', 'a')
x = h5f['x_train']
y = h5f['y_train']
# 关键:这里的h5f与dataset并不包含真正的数据,
# 只是包含了数据的相关信息,不会占据内存空间
# 仅当使用数组索引操作(eg. dataset[0:10])
# 或类方法.value(eg. dataset.value() or dataset.[()])时数据被读入内存中
image = np.array(list(map(lambda x: ndimage.imread(x, mode='RGB'), image_path[times*100:(times+1)*100]))).astype(np.float32)
# 调整数据预留存储空间(可以一次性调大些)
image = preprocess_input(image)
ytem = label[times*100:(times+1)*100]
if times != 538:
x.resize([times * 100 + 100, 299, 299,3])
y.resize([times * 100 + 100, 80])
# 数据被读入内存
x[times * 100:times * 100 + 100] = image
y[times * 100:times * 100 + 100] = ytem
# print(sys.getsizeof(h5f))
print('%d images are dealed with' %(times))
else:
x.resize([times * 100 + 79, 299, 299, 3])
y.resize([times * 100 + 79, 80])
# 数据被读入内存
x[times * 100:times * 100 + 79] = image
y[times * 100:times * 100 + 79] = ytem
# print(sys.getsizeof(h5f))
print('%d images are dealed with' % (times))
h5f.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4968796
|
import numpy as np
import tensorflow as tf
from tasks import Task
class CopyTask(Task):
epsilon = 1e-2
def __init__(self, vector_size, min_seq, train_max_seq, n_copies):
self.vector_size = vector_size
self.min_seq = min_seq
self.train_max_seq = train_max_seq
self.n_copies = n_copies
self.max_seq_curriculum = self.min_seq + 1
self.max_copies = 5
self.x_shape = [None, None, self.vector_size]
self.y_shape = [None, None, self.vector_size]
self.mask = [None, None, self.vector_size]
# Used for curriculum training
self.state = 0
self.consecutive_thresh = 100
def update_training_state(self, cost):
if cost <= CopyTask.epsilon:
self.state += 1
else:
self.state = 0
def check_lesson_learned(self):
if self.state < self.consecutive_thresh:
return False
else:
return True
def next_lesson(self):
self.state = 0
# if self.max_seq_curriculum < self.train_max_seq:
# self.max_seq_curriculum += 1
# print("Increased max_seq to", self.max_seq_curriculum)
if self.n_copies < 5:
self.n_copies += 1
print("Increased n_copies to", self.n_copies)
else:
print("Done with the training!!!")
def update_state(self, cost):
self.update_training_state(cost)
if self.check_lesson_learned():
self.next_lesson()
def cost(self, outputs, correct_output, mask=None):
sigmoid_cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=outputs,
labels=correct_output)
return tf.reduce_mean(sigmoid_cross_entropy)
def generate_data(self, batch_size=16, train=True, cost=9999):
if train:
# Update curriculum training state
self.update_state(cost)
self.max_seq_curriculum = self.train_max_seq
# self.n_copies = self.max_copies
data_batch = CopyTask.generate_n_copies(batch_size, self.vector_size, self.min_seq,
self.max_seq_curriculum,
self.n_copies)
else:
data_batch = CopyTask.generate_n_copies(batch_size, self.vector_size, self.train_max_seq,
self.train_max_seq,
self.n_copies)
return data_batch
def display_output(self, prediction, data_batch, mask):
pass
def test(self, sess, output, pl, batch_size):
pass
@staticmethod
def generate_n_copies(batch_size, inp_vector_size, min_seq, max_seq, n_copies):
copies_list = [
CopyTask.generate_copy_pair(batch_size, inp_vector_size, min_seq, max_seq)
for _ in range(n_copies)]
output = np.concatenate([i[0] for i in copies_list], axis=2)
total_length = np.sum([i[1] for i in copies_list])
mask = np.ones((batch_size, total_length, inp_vector_size))
return output, [total_length] * batch_size, mask
@staticmethod
def generate_copy_pair(batch_size, vector_size, min_s, max_s):
"""
:param batch_size:
:param vector_size:
:param min_s:
:param max_s:
:return: np array of shape [batch_size, vector_size, total_length]
"""
sequence_length = np.random.randint(min_s, max_s + 1)
total_length = 2 * sequence_length + 2
shape = (batch_size, total_length, vector_size)
inp_sequence = np.zeros(shape, dtype=np.float32)
out_sequence = np.zeros(shape, dtype=np.float32)
for i in range(batch_size):
ones = np.random.binomial(1, 0.5, (1, sequence_length, vector_size - 1))
inp_sequence[i, :sequence_length, :-1] = ones
out_sequence[i, sequence_length + 1:2 * sequence_length + 1, :-1] = ones
inp_sequence[i, sequence_length, -1] = 1 # adding the marker, so the network knows when to start copying
return np.array([inp_sequence, out_sequence]), total_length
if __name__ == "__main__":
b = 5
v = 3
total = 12
min_s = 1
max_s = int((total - 2) / 2)
n_copies = 2
val = CopyTask.generate_n_copies(b, v, min_s, max_s, n_copies)
print(val)
|
StarcoderdataPython
|
3470129
|
<filename>HW2 - State Estimation/ZHAO_FRANKLIN_HW2.py
Rank of Observability Matrix for four-state system: 3
|
StarcoderdataPython
|
9719288
|
<filename>cjsite/__init__.py
default_app_config = "cjsite.apps.AppConfig"
|
StarcoderdataPython
|
6465350
|
<gh_stars>100-1000
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple datapath control framework for POX datapaths
"""
from pox.core import core
from pox.lib.ioworker.workers import *
from pox.lib.ioworker import *
from pox.lib.revent import *
# IOLoop for our IO workers
_ioloop = None
# Log
log = None
class CommandEvent (Event):
"""
Event fired whenever a command is received
"""
def __init__ (self, worker, cmd):
super(CommandEvent,self).__init__()
self.worker = worker
self.cmd = cmd
@property
def first (self):
return self.cmd.strip().split()[0]
@property
def args (self):
return self.cmd.strip().split()[1:]
def __str__ (self):
return "<%s: %s>" % (self.worker, self.cmd)
class ServerWorker (TCPServerWorker, RecocoIOWorker):
"""
Worker to accept connections
"""
pass
#TODO: Really should just add this to the ioworker package.
class Worker (RecocoIOWorker):
"""
Worker to receive POX dpctl commands
"""
def __init__ (self, *args, **kw):
super(Worker, self).__init__(*args, **kw)
self._connecting = True
self._buf = b''
def _process (self, data):
self._buf += data
while '\n' in self._buf:
fore,self._buf = self._buf.split('\n', 1)
core.ctld.raiseEventNoErrors(CommandEvent, self, fore)
def _handle_rx (self):
self._buf += self.read()
self._process(self.read())
def _exec (self, msg):
msg.split()
class Server (EventMixin):
"""
Listens on a TCP socket for control
"""
_eventMixin_events = set([CommandEvent])
def __init__ (self, port = 7791):
self.port = port
w = ServerWorker(child_worker_type=Worker, port = port)
self.server_worker = w
_ioloop.register_worker(w)
def create_server (port = 7791):
# Set up logging
global log
if not log:
log = core.getLogger()
# Set up IO loop
global _ioloop
if not _ioloop:
_ioloop = RecocoIOLoop()
#_ioloop.more_debugging = True
_ioloop.start()
c = Server(port = int(port))
return c
def server (port = 7791):
c = create_server(int(port))
core.register("ctld", c)
def launch (cmd, address = None, port = 7791):
core.quit()
if not address:
address = "127.0.0.1"
import socket
core.getLogger('core').setLevel(100)
log = core.getLogger('ctl')
try:
s = socket.create_connection((address,port), timeout=2)
except:
log.error("Couldn't connect")
return
try:
s.settimeout(2)
s.send(cmd + "\n")
d = s.recv(4096).strip()
core.getLogger("ctl").info(d)
except socket.timeout:
log.warn("No response")
except:
log.exception("While communicating")
|
StarcoderdataPython
|
8027186
|
#!/bin/python
"""
@author <NAME> (renegat0x0)
"""
import base64
import os
import re
import argparse
import logging
import shutil
import traceback
import sys
from io import BytesIO
import ftpshutil.dircrc as dircrc
logging.basicConfig(level=logging.INFO)
from ftplib import *
def ftp_path_join(*paths):
""" FTP paths should have Linux OS separator? """
joined = os.path.join(*paths)
return joined.replace("\\", "/")
def normpath(path):
""" FTP paths should have Linux OS separator? """
path = os.path.normpath(path)
path = path.replace("\\", "/")
return path
def safe_root(path):
""" os path join will not work on a directory that starts with root """
if len(path) > 0:
if path[0] == '/' or path[0] == '\\':
path = path[1:]
return path
def make_root(path):
if path[0] != "/":
path = "/"+path
return path
def listdir_ex(ftplib, path):
lines = []
files = []
dirs = []
ftplib.cwd(path)
ftplib.retrlines('LIST', lines.append)
for line in lines:
m = re.search("([a-z-]*\s*\d*\s*\w*\s*\w*\s*\d*\s*\w*\s*\d*\s*\d*[:]*\d*\s*)", line)
fname = line[m.end(1):]
if line.find("d") == 0:
ftype = "dir"
elif line.find('-') == 0:
ftype = "file"
else:
ftype = "none"
if ftype == "dir":
dirs.append(fname)
if ftype=="file":
files.append(fname)
return dirs, files
def walk_ftp_dir(ftp_shutil_obj, root_dir, topdown=True):
ftp = ftp_shutil_obj.get_ftplib_handle()
dirs, files = listdir_ex(ftp, root_dir)
if not topdown:
for inner_dir in dirs:
for inner_root, inner_dirs, inner_files in walk_ftp_dir( ftp_shutil_obj, ftp_path_join(root_dir, inner_dir), topdown):
yield ftp_path_join(root_dir,inner_root), inner_dirs, inner_files
yield root_dir, dirs, files
if topdown:
for inner_dir in dirs:
for inner_root, inner_dirs, inner_files in walk_ftp_dir( ftp_shutil_obj, ftp_path_join(root_dir, inner_dir), topdown):
yield ftp_path_join(root_dir,inner_root), inner_dirs, inner_files
class FTPShutil(object):
def __init__(self, host='', user='', passwd=''):
self._ftp = FTP(host, user=user, passwd=<PASSWORD> ) # connect to host, default port
def login(self, user, passwd):
return self._ftp.login(user, passwd)
def quit(self):
self._ftp.quit()
def get_ftplib_handle(self):
return self._ftp
def listdir(self, root_dir):
dir_list = self._ftp.nlst(root_dir)
dir_list = [ os.path.split(x)[1] for x in dir_list]
return dir_list
def listdir_ex(self, root_dir):
ftp = self.get_ftplib_handle()
return listdir_ex(ftp, root_dir)
def read(self, file_path):
r = BytesIO()
self._ftp.retrbinary('RETR {0}'.format(file_path), r.write)
return r.getvalue()
def write(self, file_path, data):
r = BytesIO(data)
self._ftp.storbinary('STOR {0}'.format(file_path), r)
def isfile(self, file_path):
path, file_name = os.path.split(file_path)
dirs, files = self.listdir_ex(path)
return file_name in files
def isdir(self, dir_path):
path, dir_name = os.path.split(dir_path)
dirs, files = self.listdir_ex(path)
return dir_name in dirs
def exists(self, path):
'''
@brief for FTP part.
'''
logging.info("Checking if path exists {0}".format(path))
split_name = os.path.split(path)
try:
dir_list = self.listdir(split_name[0])
except Exception as E:
print(E)
return False
if not split_name[1] in dir_list:
return False
return True
def safe_remove(self, path):
try:
if self.isfile(path):
self._ftp.delete(path)
elif self.isdir(path):
self.rmtree(path)
else:
raise IOError("FTP: Specified path does not exist: {0}".format(path))
except Exception as E:
print("Problem with removing: {0}".format(path))
#raise E
def remove_file(self, path):
try:
path = normpath(path)
self._ftp.delete(path)
except Exception as E:
print("Problem with removing: {0}".format(path))
raise E
def remove_dir(self, path):
try:
path = normpath(path)
self._ftp.rmd(path)
except Exception as E:
print("Problem with removing: {0}".format(path))
raise E
def rmtree(self, directory):
logging.info("Removing directory: {0}".format(directory))
for root, dirs, files in walk_ftp_dir(self, directory, False):
for afile in files:
root_file = ftp_path_join(root, afile)
self.remove_file(root_file)
for adir in dirs:
self.rmtree( ftp_path_join(root, adir))
self.remove_dir(directory)
def mkdir(self, path):
logging.info("Creating remote directory {0}".format(path))
split_name = os.path.split(path)
self._ftp.cwd(split_name[0])
self._ftp.mkd(split_name[1])
def makedirs(self, path):
'''
@brief for FTP part.
'''
split_name = os.path.split(path)
# cannot go more than root - limit the recursion
if len(split_name) == 1:
raise IOError("Could not create directories")
try:
self._ftp.cwd(split_name[0])
except error_perm as E:
self.makedirs(split_name[0])
self._ftp.cwd(split_name[0])
logging.info("Creating remote directories {0}".format(path))
self._ftp.mkd(split_name[1])
def rename(self, fromname, toname):
logging.info("Rename file: {0} -> {1}".format(fromname, toname))
self._ftp.rename(fromname, toname)
def uploadfile(self, local_file, remote_file):
'''
@brief Better supply with absolute FTP paths
'''
logging.info("Uploading file: {0} -> {1}".format(local_file, remote_file))
split_name = os.path.split(remote_file)
self._ftp.cwd(split_name[0])
self._ftp.storbinary('STOR {0}'.format(split_name[1]), open(local_file, 'rb'))
def downloadfile(self, remote_file, local_file):
logging.info("Downloading file: {0} -> {0}".format(remote_file, local_file) )
split_name = os.path.split(remote_file)
path = split_name[0]
path = make_root(path)
self._ftp.cwd(path)
with open( local_file, 'wb') as fp:
self._ftp.retrbinary('RETR {0}'.format(split_name[1]), fp.write)
def make_local(self, local_root_dir, remote_root_dir, rem_dirs, rem_files, crc_data = None):
"""
@brief Adapts the local site according to the remote files.
@TODO remove local directories that are not present on the remote site.
@TODO remove local files that are not present on the remote site.
"""
if crc_data is not None:
rem_files = crc_data.get_1st_more_files()
rem_dirs = crc_data.get_1st_more_dirs()
rem_files.extend(crc_data.get_modified_files())
redundant_things = crc_data.get_2nd_more_files()
redundant_things.extend( crc_data.get_2nd_more_dirs())
rem_files = [ os.path.split(item)[1] for item in rem_files ]
rem_dirs = [ os.path.split(item)[1] for item in rem_dirs ]
redundant_things = [ os.path.split(item)[1] for item in redundant_things ]
for redundant in redundant_things:
redundant = os.path.join(local_root_dir, redundant)
if os.path.isfile(redundant):
os.remove(redundant)
else:
shutil.rmtree(redundant)
for adir in rem_dirs:
full_dir = os.path.join(local_root_dir, adir)
if not os.path.isdir(full_dir):
os.makedirs(full_dir)
for afile in rem_files:
remote_file = ftp_path_join(remote_root_dir, afile)
local_file = os.path.join(local_root_dir, afile)
if not os.path.isdir(local_root_dir):
os.makedirs(local_root_dir)
self.downloadfile(remote_file, local_file)
def make_remote(self, remote_root_dir, local_root_dir, loc_dirs, loc_files, crc_data = None):
"""
@brief Adapts the local site according to the remote files.
@TODO remove local directories that are not present on the remote site.
@TODO remove local files that are not present on the remote site.
"""
if crc_data is not None:
loc_files = crc_data.get_2nd_more_files()
loc_dirs = crc_data.get_2nd_more_dirs()
loc_files.extend(crc_data.get_modified_files())
redundant_things = crc_data.get_1st_more_files()
redundant_things.extend( crc_data.get_1st_more_dirs())
loc_files = [ os.path.split(item)[1] for item in loc_files ]
loc_dirs = [ os.path.split(item)[1] for item in loc_dirs ]
redundant_things = [ os.path.split(item)[1] for item in redundant_things ]
for redundant in redundant_things:
redundant = ftp_path_join(remote_root_dir, redundant)
self.safe_remove(redundant)
for adir in loc_dirs:
full_dir = ftp_path_join(remote_root_dir, adir)
if not self.exists(full_dir):
self.makedirs(full_dir)
for afile in loc_files:
remote_file = ftp_path_join(remote_root_dir, afile)
local_file = os.path.join(local_root_dir, afile)
if not self.isdir(remote_root_dir):
self.makedirs(remote_root_dir)
self.uploadfile(local_file, remote_file)
def downloadtree(self, directory, destination):
'''
@brief Probably best to supply directory as an absolute FTP path.
'''
logging.info("Downloading tree: {0} -> {1}".format(directory, destination))
try:
for root, dirs, files in walk_ftp_dir(self, directory):
remote_root_dir = safe_root(root)
local_root_dir = os.path.join(destination, remote_root_dir)
self.make_local(local_root_dir, remote_root_dir, dirs, files)
except Exception as e:
print("Could not obtain directory {0}\n{1}".format(directory, str(e) ))
def uploadtree(self, directory, destination):
logging.info("Uploading tree: {0} -> {1}".format(directory, destination))
lastdir = os.path.split(directory)[1]
for root, dirs, files in os.walk(directory, topdown=True):
inner_root = root.replace(directory, "")
inner_root = safe_root(inner_root)
remote_root = ftp_path_join(destination, lastdir, inner_root)
if remote_root.endswith("/"):
remote_root = remote_root[:-1]
self.make_remote(remote_root, root, dirs, files)
def diff_dircrc(self, local_crc_file, remote_crc_file):
"""
@returns True if files are different, or one of the files is missing
False if both files are present and equal.
"""
if os.path.isfile(local_crc_file):
if self.exists(remote_crc_file):
remote_crc = self.read(remote_crc_file)
with open(local_crc_file, 'rb') as fh:
local_crc = fh.read()
if remote_crc == local_crc:
return False
return True
def downloadtree_sync(self, directory, destination):
'''
@brief Probably best to supply directory as an absolute FTP path.
'''
logging.info("Downloading tree: {0} -> {1}".format(directory, destination))
dircrc.create_dircrcs(destination)
try:
for root, dirs, files in walk_ftp_dir(self, directory, topdown=True):
remote_root_dir = safe_root(root)
local_root_dir = os.path.join(destination, remote_root_dir)
local_crc_file = os.path.join(local_root_dir, dircrc.CRC_FILE_NAME)
remote_crc_file = ftp_path_join(root, dircrc.CRC_FILE_NAME)
if not self.exists(remote_crc_file) or not os.path.isfile(local_crc_file):
logging.info("Processing directory: {0}".format(root))
if os.path.isdir(local_root_dir):
shutil.rmtree(local_root_dir)
self.make_local(local_root_dir, remote_root_dir, dirs, files)
else:
remote_crc_data = self.read(remote_crc_file).decode("utf-8")
with open(local_crc_file, 'r') as fh:
local_crc_data = fh.read()
comp = dircrc.Comparator(remote_crc_data, local_crc_data)
if comp.is_same():
logging.info("Skipping directory: {0}".format(root))
continue
logging.info("Processing directory: {0}".format(root))
self.make_local(local_root_dir, remote_root_dir, dirs, files, comp)
except Exception as e:
print("Could not obtain directory {0}\n{1}".format(directory, str(e) ))
def uploadtree_sync(self, directory, destination):
logging.info("Uploading tree: {0} -> {1}".format(directory, destination))
dircrc.create_dircrcs(directory)
lastdir = os.path.split(directory)[1]
for root, dirs, files in os.walk(directory, topdown=True):
inner_root = root.replace(directory, "")
inner_root = safe_root(inner_root)
remote_root = ftp_path_join(destination, lastdir, inner_root)
if remote_root.endswith("/"):
remote_root = remote_root[:-1]
remote_crc_file = ftp_path_join(remote_root, dircrc.CRC_FILE_NAME)
local_crc_file = os.path.join(root, dircrc.CRC_FILE_NAME)
if not self.exists(remote_crc_file) or not os.path.isfile(local_crc_file):
logging.info("Processing directory: {0}".format(root))
if self.isdir(remote_root):
self.rmtree(remote_root)
self.make_remote(remote_root, root, dirs, files)
else:
remote_crc_data = self.read(remote_crc_file).decode("utf-8")
with open(local_crc_file, 'r') as fh:
local_crc_data = fh.read()
comp = dircrc.Comparator(remote_crc_data, local_crc_data)
if comp.is_same():
logging.info("Skipping directory: {0}".format(root))
continue
logging.info("Processing directory: {0}".format(root))
self.make_remote(remote_root, root, dirs, files, comp)
|
StarcoderdataPython
|
5070473
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import time
if raw_input("restart swift or not (y/n):")=='y':
for k in os.popen('sudo python setup.py install').readlines():
pass
for j in os.popen('sudo swift-init main restart').readlines():
pass
# print j,
# time.sleep(0.02)
name = raw_input("Please input the name(for token):")
while not name:
name = raw_input("Please input the name(for token):")
if name == "Admin" or name == "sandy":
content = os.popen("curl -D- -H 'X-Storage-User:%s' -H 'X-Storage-Pass:<PASSWORD>' http://127.0.0.1:8080/auth/v1.0" %name).readlines()
else:
content = os.popen("curl -D- -H 'X-Storage-User:%s' http://127.0.0.1:8080/auth/v1.0" %name).readlines()
token = content[2].strip()
url = content[1].split(':',1)[-1].strip()
#for i in content:
# print i,
# time.sleep(0.3)
#print token
#getmethod = os.popen("curl -k -X GET -H '%s' %s" %(token,url)).readlines()
#for dd in getmethod:
# print dd,
# time.sleep(0.3)
geturl = '/'.join([url,'ytf'])
print "curl -X GET -H '%s' %s"%(token,geturl)
print "curl -X PUT -T ./1.txt -D- -H 'object_name:小酒窝' -H 'parent_secl_id:7' -H 'obj_seclevel:4' -H 'Content-Type:audio/mp3' -H '%s' %s" %(token,url)
|
StarcoderdataPython
|
265005
|
import boto3
import json
import datetime
import os
ecs_client = boto3.client('ecs')
alb = boto3.client('elbv2')
sts = boto3.client('sts')
sns = boto3.client('sns')
#CROSSACCOUNT DYNAMO ACCESS TO SHARED ACCOUNT
stsrolearn = os.environ['STSROLEARN']
response = sts.assume_role(RoleArn=stsrolearn, RoleSessionName='CrossAccountECSDynamoTableAccess')
aws_access_key_id=response['Credentials']['AccessKeyId']
aws_secret_access_key=response['Credentials']['SecretAccessKey']
aws_session_token=response['Credentials']['SessionToken']
dynamo_client = boto3.client('dynamodb', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,aws_session_token=aws_session_token)
resource = boto3.resource('dynamodb', aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,aws_session_token=aws_session_token)
table = resource.Table('ECS_Inventory_TrainingDA')
#RETURN BODY FOR API
def return_body(status_code, message):
body = {
'statusCode': str(status_code),
'body': json.dumps(message),
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
}
return body
def getstageenv_image(appname, task_env):
taskdef = appname + '-' + task_env
response = ecs_client.describe_task_definition(taskDefinition=taskdef)
image_uri = response['taskDefinition']['containerDefinitions'][0]['image']
print image_uri
return image_uri
### SEND SNS NOTIFICATION ###
def sendnotification(appname, env, prod_acc, version):
env_name = appname + "-" + env
sns_arn = "arn:aws:sns:us-east-1:" + prod_acc + ":ECSNotifications-" + env_name
sns_message = "New Version " + version + " is getting deployed to Blue Environment " + env_name
subject = "Deploying Version " + version + " to Environment " + env_name
response = sns.publish(TopicArn=sns_arn, Message=sns_message, Subject=subject)
print response
# CREATING ECS TASK DEFINITION
def create_taskdefinitions(apiname, repo_uri, env):
try:
container = {
'name': apiname + '-' + env,
'image': repo_uri,
'portMappings': [
{
"containerPort": 80,
"hostPort": 80,
"protocol": "tcp"
}
],
'essential': True
}
resp = ecs_client.register_task_definition(
family = apiname + '-' + env,
taskRoleArn = 'ecsTaskExecutionRole',
executionRoleArn = 'ecsTaskExecutionRole',
networkMode = 'awsvpc',
containerDefinitions=[container],
requiresCompatibilities=['FARGATE'],
cpu= "256",
memory= "512"
)
print resp
message = "Created Task Definiton " + apiname + '-' + env
print message
except Exception as e:
message = e
print message
# CREATING ECS SERVICE
def update_service(apiname, env, techteam):
try:
ecs_resp = ecs_client.update_service(
cluster=apiname,
service=apiname + '-' + env,
taskDefinition=apiname + '-' + env,
desiredCount=2,
forceNewDeployment=True,
healthCheckGracePeriodSeconds=30
)
print ecs_resp
except Exception as e:
message = e
print message
def lambda_handler(event, context):
body = json.loads(event['body'])
try:
appname = body['applicationName']
if appname == "":
status_code = 400
message = {"errorMessage": "applicationName cannot be empty"}
return_message = return_body(status_code, message)
return return_message
except KeyError:
status_code = 400
message = {"errorMessage": "applicationName needs to be mentioned"}
return_message = return_body(status_code, message)
return return_message
try:
dynamo_response = dynamo_client.get_item(TableName="ECS_Inventory_NonProduction", Key={'ApplicationName': {'S': appname}, 'Environment': {'S': 'STAGE'}}, AttributesToGet=['TechnicalTeam', 'Version'])
techteam = dynamo_response['Item']['TechnicalTeam']['S']
version = dynamo_response['Item']['Version']['S']
task_env = 'stage'
except Exception as e:
try:
dynamo_response = dynamo_client.get_item(TableName="ECS_Inventory_NonProduction", Key={'ApplicationName': {'S': appname}, 'Environment': {'S': 'QA'}}, AttributesToGet=['TechnicalTeam', 'Version'])
techteam = dynamo_response['Item']['TechnicalTeam']['S']
version = dynamo_response['Item']['Version']['S']
task_env = 'qa'
except Exception as e:
try:
dynamo_response = dynamo_client.get_item(TableName="ECS_Inventory_NonProduction", Key={'ApplicationName': {'S': appname}, 'Environment': {'S': 'DEV'}}, AttributesToGet=['TechnicalTeam', 'Version'])
techteam = dynamo_response['Item']['TechnicalTeam']['S']
version = dynamo_response['Item']['Version']['S']
task_env = 'dev'
except Exception as e:
print e
status_code = 409
message = {'errorMessage': appname + " does not exist. You must create the Stack first"}
return_message = return_body(status_code, message)
return return_message
devqa_vpc = os.environ['DEVQA_VPC']
devqa_subnet = os.environ['DEVQA_SUBNET']
devqa_sg = os.environ['DEVQA_SG']
nonprod_acc = os.environ['NONPROD_ACC']
repo_uri = getstageenv_image(appname, task_env)
env = ["training", "da"]
for eachenv in env:
create_taskdefinitions(appname, repo_uri, eachenv)
update_service(appname, eachenv, techteam)
sendnotification(appname, eachenv, nonprod_acc, version)
time = datetime.datetime.now()
resp = dynamo_client.update_item(TableName="ECS_Inventory_TrainingDA", Key={'ApplicationName': {'S': appname}, 'Environment': {'S': eachenv.upper()}}, AttributeUpdates={'Version':{'Value':{'S': version}}, 'Time':{'Value':{'S': str(time)}}})
print resp
status_code = 200
message = {'message': "Deployed version " + version + " to " + appname + " in Training and DA Environment"}
return_message = return_body(status_code, message)
return return_message
|
StarcoderdataPython
|
3355760
|
#-*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
TIMEZONE_CHOICES = [
('Etc/GMT+12', _("(GMT -12:00) Eniwetok, Kwajalein")),
('Etc/GMT+11', _("(GMT -11:00) Midway Island, Samoa")),
('Etc/GMT+10', _("(GMT -10:00) Hawaii")),
('Pacific/Marquesas', _("(GMT -9:30) Marquesas Islands")),
('Etc/GMT+9', _("(GMT -9:00) Alaska")),
('Etc/GMT+8', _("(GMT -8:00) Pacific Time (US & Canada)")),
('Etc/GMT+7', _("(GMT -7:00) Mountain Time (US & Canada)")),
('Etc/GMT+6', _("(GMT -6:00) Central Time (US & Canada), Mexico City")),
('Etc/GMT+5', _("(GMT -5:00) Eastern Time (US & Canada), Bogota, Lima")),
('America/Caracas', _("(GMT -4:30) Venezuela")),
('Etc/GMT+4', _("(GMT -4:00) Atlantic Time (Canada), Caracas, La Paz")),
('Etc/GMT+3', _("(GMT -3:00) Brazil, Buenos Aires, Georgetown")),
('Etc/GMT+2', _("(GMT -2:00) Mid-Atlantic")),
('Etc/GMT+1', _("(GMT -1:00) Azores, Cape Verde Islands")),
('UTC', _("(GMT) Western Europe Time, London, Lisbon, Casablanca")),
('Etc/GMT-1', _("(GMT +1:00) Brussels, Copenhagen, Madrid, Paris")),
('Etc/GMT-2', _("(GMT +2:00) Kaliningrad, South Africa")),
('Etc/GMT-3', _("(GMT +3:00) Baghdad, Riyadh, Moscow, St. Petersburg")),
('Etc/GMT-4', _("(GMT +4:00) Abu Dhabi, Muscat, Baku, Tbilisi")),
('Asia/Kabul', _("(GMT +4:30) Afghanistan")),
('Etc/GMT-5', _("(GMT +5:00) Ekaterinburg, Islamabad, Karachi, Tashkent")),
('Asia/Kolkata', _("(GMT +5:30) India, Sri Lanka")),
('Asia/Kathmandu', _("(GMT +5:45) Nepal")),
('Etc/GMT-6', _("(GMT +6:00) Almaty, Dhaka, Colombo")),
('Indian/Cocos', _("(GMT +6:30) Cocos Islands, Myanmar")),
('Etc/GMT-7', _("(GMT +7:00) Bangkok, Hanoi, Jakarta")),
('Etc/GMT-8', _("(GMT +8:00) Beijing, Perth, Singapore, Hong Kong")),
('Australia/Eucla', _("(GMT +8:45) Australia (Eucla)")),
('Etc/GMT-9', _("(GMT +9:00) Tokyo, Seoul, Osaka, Sapporo, Yakutsk")),
('Australia/North', _("(GMT +9:30) Australia (Northern Territory)")),
('Etc/GMT-10', _("(GMT +10:00) Eastern Australia, Guam, Vladivostok")),
('Etc/GMT-11', _("(GMT +11:00) Magadan, Solomon Islands, New Caledonia")),
('Pacific/Norfolk', _("(GMT +11:30) Norfolk Island")),
('Etc/GMT-12', _("(GMT +12:00) Auckland, Wellington, Fiji, Kamchatka")),
]
|
StarcoderdataPython
|
4937731
|
import os
HOME = os.environ['HOME']
TEST_DATA_DIR = '{}/data/CastorClientTestData'.format(HOME)
TEST_DATA_EXCEL_FILE = 'ESPRESSO_v2.0_DHBA_excel_export_20201112094203.xlsx'
TEST_DATA_HOSPITAL_ID = 'dhba_verrichting_upn'
TEST_DATA_SURGERY_DATE = 'dhba_datok1'
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.