metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JiajianZeng/caffe-face-attributes",
"score": 2
} |
#### File: caffe-face-attributes/tools/train_plain_zf_for_face_attributes.py
```python
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
import os.path as osp
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a plain ZF network for face attributes')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# set proposal method 'gt' to make this imdb load annotation
imdb.set_proposal_method('gt')
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
n = 'plain_zf'
# Solvers
solvers = [[net_name, n, 'plain_zf_for_face_attributes_solver.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations
max_iters = [100000]
return solvers, max_iters
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(gpu_id):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
def train_plain_zf(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a plain ZF for face attributes prediction.
"""
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
# initialize caffe
_init_caffe(cfg.GPU_ID)
roidb, imdb = get_roidb(imdb_name)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train plain ZF
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
#for i in model_paths[:-1]:
#os.remove(i)
plain_zf_model_path = model_paths[-1]
# Send plain ZF model path over the multiprocessing queue
queue.put({'model_path': plain_zf_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
cfg.MODELS_DIR = osp.abspath(osp.join(cfg.ROOT_DIR, 'models', 'celeba'))
#cfg.MODELS_DIR = osp.abspath(osp.join(cfg.ROOT_DIR, 'models', 'lfwa'))
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc.
solvers, max_iters = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Plain ZF, for face attributes prediction using CelebA dataset'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_plain_zf, kwargs=mp_kwargs)
p.start()
plain_zf_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(plain_zf_out['model_path']),
'celeba_plain_zf_final.caffemodel')
#final_path = os.path.join(
#os.path.dirname(plain_zf_out['model_path']),
#'lfwa_plain_zf_final.caffemodel')
print 'cp {} -> {}'.format(
plain_zf_out['model_path'], final_path)
shutil.copy(plain_zf_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
``` |
{
"source": "jiajiaxu123/Orca",
"score": 2
} |
#### File: orca/core/datetimes.py
```python
import itertools
import pandas as pd
from .common import default_session
from .internal import _ConstantSP
def _orca_unary_op(func, infered_ddb_dtypestr=None):
# infered_ddb_dtypestr is used to convert Timestamp's data type
# to make the category of two objects compatible
@property
def ufunc(self):
return self._unary_op(func, infered_ddb_dtypestr)
return ufunc
def _orca_logical_unary_op(func):
@property
def lufunc(self):
return self._logical_unary_op(func)
return lufunc
class Timestamp(object):
def __init__(self, ts_input, freq=None, tz=None, unit=None, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, nanosecond=None, tzinfo=None, session=default_session()):
if isinstance(ts_input, pd.Timestamp):
data = ts_input
else:
data = pd.Timestamp(ts_input, freq, tz, unit, year, month, day, hour, minute, second, microsecond, nanosecond, tzinfo)
self._session = session
self._internal = data.to_numpy()
self._var = _ConstantSP.upload_obj(session, self._internal)
@property
def _var_name(self):
return self._var._var_name
def __repr__(self):
return self._internal.__repr__()
def __str__(self):
return self._internal.__str__()
class DatetimeProperties(object):
date = _orca_unary_op("date", "date")
time = _orca_unary_op("second", "second")
year = _orca_unary_op("year")
month = _orca_unary_op("monthOfYear")
day = _orca_unary_op("dayOfMonth")
hour = _orca_unary_op("hour")
minute = _orca_unary_op("minuteOfHour")
hourminute = _orca_unary_op("minute", "minute")
second = _orca_unary_op("secondOfMinute")
microsecond = _orca_unary_op("(t->nanotimestamp(t).microsecond())")
nanosecond = _orca_unary_op("(t->nanotimestamp(t).nanosecond()%1000)") # TODO: nanosecond support other dtypes
dayofyear = _orca_unary_op("dayOfYear")
weekofyear = _orca_unary_op("weekOfYear")
week = weekofyear
dayofweek = _orca_unary_op("weekday{,false}")
weekday = dayofweek
quarter = _orca_unary_op("quarterOfYear")
daysinmonth = _orca_unary_op("daysInMonth")
days_in_month = daysinmonth
is_month_start = _orca_logical_unary_op("isMonthStart")
is_month_end = _orca_logical_unary_op("isMonthEnd")
is_quarter_start = _orca_logical_unary_op("isQuarterStart")
is_quarter_end = _orca_logical_unary_op("isQuarterEnd")
is_year_start = _orca_logical_unary_op("isYearStart")
is_year_end = _orca_logical_unary_op("isYearEnd")
is_leap_year = _orca_logical_unary_op("isLeapYear")
class DatetimeMethods(DatetimeProperties):
def __init__(self, s):
self._s = s
def _logical_unary_op(self, func):
from .operator import BooleanExpression
return BooleanExpression(self._s, None, func, 1)
def _unary_op(self, func, infered_ddb_dtypestr):
from .operator import ArithExpression
return ArithExpression(self._s, None, func, 0,
infered_ddb_dtypestr=infered_ddb_dtypestr)
```
#### File: orca/core/groupby.py
```python
import abc
import itertools
from typing import Iterable
from .indexes import Index
from .internal import _InternalAccessor
from .merge import MergeExpression
from .operator import (ArithExpression, BooleanExpression, DataFrameLike,
SeriesLike, StatOpsMixin)
from .series import Series
from .utils import (ORCA_INDEX_NAME_FORMAT, _infer_level,
_unsupport_columns_axis, check_key_existence,
dolphindb_numeric_types, get_orca_obj_from_script,
sql_select, to_dolphindb_literal)
def _orca_groupby_op(func, numeric_only):
def gfunc(self):
return self._groupby_op(func, numeric_only)
return gfunc
def _orca_contextby_op(func, numeric_only):
def cfunc(self):
return self._contextby_op(func, numeric_only)
return cfunc
class GroupByOpsMixin(metaclass=abc.ABCMeta):
all = _orca_groupby_op("all", numeric_only=False)
any = _orca_groupby_op("any", numeric_only=False)
count = _orca_groupby_op("count", numeric_only=False)
size = _orca_groupby_op("size", numeric_only=False)
sum = _orca_groupby_op("sum", numeric_only=True)
sum2 = _orca_groupby_op("sum2", numeric_only=True)
prod = _orca_groupby_op("prod", numeric_only=True)
mean = _orca_groupby_op("mean", numeric_only=True)
median = _orca_groupby_op("median", numeric_only=True)
min = _orca_groupby_op("min", numeric_only=False)
max = _orca_groupby_op("max", numeric_only=False)
std = _orca_groupby_op("std", numeric_only=True)
var = _orca_groupby_op("var", numeric_only=True)
sem = _orca_groupby_op("sem", numeric_only=True)
mad = _orca_groupby_op("mad", numeric_only=True)
skew = _orca_groupby_op("skew", numeric_only=True)
kurtosis = _orca_groupby_op("kurtosis", numeric_only=True)
first = _orca_groupby_op("first", numeric_only=False)
last = _orca_groupby_op("last", numeric_only=False)
ohlc = _orca_groupby_op("ohlc", numeric_only=True)
ffill = _orca_contextby_op("ffill", numeric_only=False)
pad = ffill
bfill = _orca_contextby_op("bfill", numeric_only=False)
backfill = bfill
cumcount = _orca_contextby_op("cumcount", numeric_only=False)
cummax = _orca_contextby_op("cummax", numeric_only=False)
cummin = _orca_contextby_op("cummin", numeric_only=False)
cumprod = _orca_contextby_op("cumprod", numeric_only=True)
cumsum = _orca_contextby_op("cumsum", numeric_only=True)
pct_change = _orca_contextby_op("percentChange", numeric_only=True)
def diff(self, periods=1, axis=0):
_unsupport_columns_axis(self, axis)
if periods != 1:
raise ValueError("periods must be 1")
return self._contextby_op("deltas", numeric_only=True)
_STRING_TO_NUMERIC_ONLY = {
"all": False,
"any": False,
"count": False,
"size": False,
"sum": True,
"sum2": True,
"prod": True,
"mean": True,
"median": True,
"min": False,
"max": False,
"std": True,
"var": True,
"sem": True,
"med": True,
"skew": True,
"kurtosis": True,
"first": False,
"last": False,
"ohlc": True,
"bfill": False,
"ffill": False,
"cumcount": False,
"cummax": False,
"cummin": False,
"cumprod": True,
"cumsum": True,
"pct_change": True,
"diff": True,
}
def rank(self, axis=0, method='min', na_option='top', ascending=True, pct=False, rank_from_zero=False, group_num=None):
from .operator import _check_rank_arguments
func = _check_rank_arguments(axis, method, na_option, ascending, pct, rank_from_zero, group_num)
return self._contextby_op(func, numeric_only=False)
def ols(self, y, x, column_names, intercept=True):
y, _ = check_key_existence(y, self._data_columns)
x, _ = check_key_existence(x, self._data_columns)
if len(y) != 1:
raise ValueError("y must be a single column")
y_script = y[0]
x_script = ",".join(x)
intercept = "true" if intercept else "false"
column_names_literal = to_dolphindb_literal(column_names)
script = f"ols({y_script}, ({x_script}), {intercept}) as {column_names_literal}"
orderby_list = self._orderby_list if self._sort else None
script = sql_select([script], self._var_name, self._where_expr,
groupby_list=self._groupby_list, orderby_list=orderby_list,
asc=self._ascending)
return self._run_groupby_script("ols", script, self._result_index_map)
def aggregate(self, func, *args, **kwargs):
return self._groupby_op(func, False)
agg = aggregate
def apply(self, func, *args, **kwargs):
if not isinstance(func, str):
raise ValueError("Orca does not support callable func; func must be a string representing a DolphinDB function")
select_list = [func]
orderby_list = self._orderby_list if self._sort else None
if isinstance(self._internal, MergeExpression):
var_name = self._internal._from_clause
else:
var_name = self._var_name
script = sql_select(select_list, var_name, self._where_expr,
groupby_list=self._groupby_list, orderby_list=orderby_list,
asc=self._ascending)
return self._run_groupby_script(func, script, self._result_index_map)
def transform(self, func="", *args, **kwargs):
if not isinstance(func, str):
raise ValueError("Orca does not support callable func; func must be a string representing a DolphinDB function")
return self._contextby_op(func, False)
@staticmethod
def _get_groupby_list_orderby_list_and_index_map(groupby_columns, index_names, sort, resample):
index_columns = [ORCA_INDEX_NAME_FORMAT(i) for i in range(len(index_names))]
groupby_list = [f"{groupby_column} as {index_column}"
for groupby_column, index_column in zip(groupby_columns, index_columns)]
if sort:
orderby_list = index_columns
elif resample:
orderby_list = index_columns[-1:]
else:
orderby_list = None
index_map = [(index_column, None) if index_name is None
else (index_column, (index_name,))
for index_name, index_column in zip(index_names, index_columns)]
contextby_index_map = [(index_column, None) if index_name is None
else (index_name, (index_name,))
for index_name, index_column in zip(index_names, index_columns)]
return groupby_list, orderby_list, index_map, contextby_index_map
def _generate_groupby_select_list_and_value_list(self, func, groupkeys, numeric_only):
def check_func_existance(func):
return self._STRING_TO_NUMERIC_ONLY.get(func, False)
def ohlc_select_list(select_col, col):
return [f"first({select_col}) as {col}_open",
f"max({select_col}) as {col}_high",
f"min({select_col}) as {col}_low",
f"last({select_col}) as {col}_close"]
def funcname_alias(func):
ALIAS = {"pad": "ffill", "backfill": "bfill", "pct_change": "percentChange", "diff": "deltas"}
return ALIAS.get(func, func)
select_columns = self._get_data_select_list()
data_columns = self._data_columns
# special functions
if func == "size":
return ["count(*)"], []
if func == "ohlc":
column_ohlcs = (ohlc_select_list(select_col, col)
for select_col, col in zip(select_columns, data_columns))
return list(itertools.chain(*column_ohlcs)), []
if isinstance(func, str):
func = funcname_alias(func)
numeric_only = check_func_existance(func)
elif isinstance(func, list):
select_list = []
func_names = []
for func_name in func:
if not isinstance(func_name, str):
raise TypeError(f"Only strings are supported to be used as function names")
func_names.append(funcname_alias(func_name))
select_list= ([f"{func_name}({col}) as {col}_{func_name}" for func_name in func_names]
for col in select_columns if col not in groupkeys)
select_list = list(itertools.chain(*select_list))
return select_list, []
elif isinstance(func, dict):
select_list = []
for col, func_name in func.items():
if not isinstance(func_name, str):
raise TypeError(f"Only strings are supported to be used as function names")
try:
col_idx = data_columns.index(col)
except ValueError:
raise KeyError(col)
func_name = funcname_alias(func_name)
# check_func_existance(func_name)
select_col = select_columns[col_idx]
if func_name == "ohlc":
select_list.extend(ohlc_select_list(select_col, col))
else:
select_list.append(f"{func_name}({select_col}) as {col}")
return select_list, []
else:
raise TypeError(f"Only strings are supported to be used as function names")
# is_op_on_different_columns = False
if isinstance(self._internal, (ArithExpression, BooleanExpression)):
numeric_only = False
ddb_dtypes = self._ddb_dtypes
select_list = []
value_list = []
for select_col, col in zip(select_columns, data_columns):
if (col not in groupkeys
and (not numeric_only
or ddb_dtypes[col] in dolphindb_numeric_types)):
select_list.append(f"{func}({select_col}) as {col}")
value_list.append(f"{func}({select_col})")
return select_list, value_list
def _run_groupby_script(self, func, script, groupkeys, is_apply=False):
groupby_size = (func == "size")
groupby_having = (func == "")
session = self._session
index = groupkeys if self._as_index or groupby_size or groupby_having else []
if isinstance(func, list):
column_index = ([(col, func_name) for func_name in func]
for col in self._data_columns if col not in self._groupkeys)
column_index = list(itertools.chain(*column_index))
return get_orca_obj_from_script(session, script, index, column_index=column_index)
if func == "ohlc":
column_index = ([(col, "open"), (col, "high"), (col, "low"), (col, "close")] for col in self._data_columns)
column_index = list(itertools.chain(*column_index))
return get_orca_obj_from_script(session, script, index, column_index=column_index)
data = get_orca_obj_from_script(session, script, index)
if groupby_size:
s = data["count"]
s.rename(None, inplace=True)
return s
elif is_apply:
s = data[data._data_columns[0]]
s.rename(None, inplace=True)
return s
elif self._is_series_like:
s = data[data._data_columns[0]]
s.rename(self._name, inplace=True)
return s
else:
return data
def _get_data_select_list(self):
internal = self._internal
if isinstance(internal, (ArithExpression, BooleanExpression, MergeExpression)):
return internal._get_data_select_list()
else:
return self._data_columns
@abc.abstractmethod
def _groupby_op(self, func, numeric_only):
select_list, _ = \
self._generate_groupby_select_list_and_value_list(func, self._groupkeys, numeric_only)
if len(select_list) == 0: # TODO: handle
raise NotImplementedError()
orderby_list = self._orderby_list if self._sort else None
if isinstance(self._internal, MergeExpression):
var_name = self._internal._from_clause
else:
var_name = self._var_name
script = sql_select(select_list, var_name, self._where_expr,
groupby_list=self._groupby_list, orderby_list=orderby_list,
asc=self._ascending)
return self._run_groupby_script(func, script, self._result_index_map)
@abc.abstractmethod
def _contextby_op(self, func, numeric_only): # TODO: context by order
select_list, value_list = \
self._generate_groupby_select_list_and_value_list(func, self._groupkeys, numeric_only)
klass = SeriesContextByExpression if self._is_series_like else DataFrameContextByExpression
return klass(self._session, self._internal, func, self._where_expr, self._name,
select_list, value_list, self._groupby_list)
class ContextByExpression(_InternalAccessor):
"""
Expression related to DolphinDB context by expressions.
"""
def __init__(self, session, internal, func, where_expr, name,
select_list, value_list, groupby_list):
self._session = session
self._internal = internal
self._func = func
self._where_expr = where_expr
self._name = name
self._select_list = select_list
self._value_list = value_list
self._groupby_list = groupby_list
self._as_index = True
def compute(self):
select_list = self._select_list
if len(select_list) == 0:
raise NotImplementedError()
select_list = itertools.chain(self._index_columns, select_list)
script = sql_select(select_list, self._var_name, self._where_expr,
groupby_list=self._groupby_list, is_groupby=False, hint=128)
return GroupByOpsMixin._run_groupby_script(self, self._func, script, self._index_map)
def to_pandas(self):
return self.compute().to_pandas()
def _get_data_select_list(self):
return self._value_list
def _get_contextby_list(self):
return self._groupby_list
class DataFrameContextByExpression(DataFrameLike, ContextByExpression):
pass
class SeriesContextByExpression(SeriesLike, ContextByExpression):
pass
class GroupBy(_InternalAccessor, GroupByOpsMixin, metaclass=abc.ABCMeta):
def __init__(self, session, internal, index, by, level, as_index, sort, ascending, where_expr, name,
groupkeys=None, groupby_list=None, orderby_list=None, result_index_map=None,
contextby_result_index_map=None):
self._session = session
self._internal = internal
self._index = index
self._as_index = as_index
self._sort = sort
self._ascending = ascending
self._where_expr = where_expr
self._name = name
if (groupkeys is not None and groupby_list is not None
and orderby_list is not None and result_index_map is not None
and contextby_result_index_map is not None):
self._groupkeys = groupkeys
self._groupby_list = groupby_list
self._orderby_list = orderby_list
self._result_index_map = result_index_map
self._contextby_result_index_map = contextby_result_index_map
return
index_names = []
groupkeys = []
if by is None and level is None:
raise TypeError("You have to supply one of 'by' and 'level'")
if level is not None:
groupkeys, _, index_names, _ = _infer_level(level, self._index_map)
else:
for column in by:
if isinstance(column, str):
groupkeys.append(column)
index_names.append(column)
elif isinstance(column, Series):
if column._var_name != self._var_name:
raise ValueError("Unable to groupby with an external Series")
groupkeys.append(column._data_columns[0])
index_names.append(column._name)
elif isinstance(column, Index):
if column._var_name != self._var_name:
raise ValueError("Unable to groupby with an external Index")
groupkeys += column._index_columns
index_names += column._index_columns
elif isinstance(column, (ArithExpression, BooleanExpression)):
if not column._is_series_like:
raise ValueError("Grouper is not 1-dimensional")
if column._var_name != self._var_name:
raise ValueError("Unable to groupby with an external Index")
groupkeys.append(column._get_data_select_list()[0])
index_names.append(column._name)
else:
raise ValueError("Each element in by must be a label")
self._groupkeys = groupkeys
self._groupby_list, self._orderby_list, \
self._result_index_map, self._contextby_result_index_map = \
GroupByOpsMixin._get_groupby_list_orderby_list_and_index_map(
groupkeys, index_names, sort, resample=False)
@property
@abc.abstractmethod
def _is_series_like(self):
pass
@property
@abc.abstractmethod
def _is_dataframe_like(self):
pass
def __getitem__(self, key):
if isinstance(key, str):
klass = SeriesGroupBy
name = key
elif isinstance(key, Iterable):
klass = DataFrameGroupBy
name = self._name
else:
raise KeyError(key)
new_odf = self._internal[key]
return klass(self._session, new_odf, self._index, None, None,
self._as_index, self._sort, self._ascending, self._where_expr, name,
self._groupkeys, self._groupby_list, self._orderby_list, self._result_index_map, self._contextby_result_index_map)
def _groupby_op(self, *args, **kwargs):
return GroupByOpsMixin._groupby_op(self, *args, **kwargs)
def _contextby_op(self, *args, **kwargs):
return GroupByOpsMixin._contextby_op(self, *args, **kwargs)
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None, lazy=False, **kwargs):
from .resample import SeriesResampler, DataFrameResampler
klass = SeriesResampler if self._is_series_like else DataFrameResampler
StatOpsMixin._validate_resample_arguments(how=how, axis=axis, fill_method=fill_method, closed=closed,
label=label, convention=convention, kind=kind, loffset=loffset,
limit=limit, base=base, on=on, level=level)
return klass(self._session, self._internal, self._index, rule, on=on, level=level,
where_expr=self._where_expr, name=self._name, groupkeys=self._groupkeys, sort=self._sort)
def filter(self, func, dropna=True, *args, **kwargs):
if not dropna:
raise NotImplementedError()
if not isinstance(func, str):
raise ValueError("Orca does not support callable func; func must be a string representing a HAVING condition")
index_columns = self._index_columns
select_list = index_columns + self._get_data_select_list()
script = sql_select(select_list, self._var_name, self._where_expr,
groupby_list=self._groupby_list, is_groupby=False,
having_list=[func])
return self._run_groupby_script("", script, self._index_map)
class DataFrameGroupBy(DataFrameLike, GroupBy):
pass
class SeriesGroupBy(SeriesLike, GroupBy):
pass
class HavingGroupBy(_InternalAccessor, GroupByOpsMixin, metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def _is_series_like(self):
pass
@property
@abc.abstractmethod
def _is_dataframe_like(self):
pass
class DataFrameHavingGroupBy(DataFrameLike, HavingGroupBy):
pass
class SeriesHavingGroupBy(DataFrameLike, HavingGroupBy):
pass
```
#### File: orca/core/indexes.py
```python
import abc
import itertools
from typing import Iterable
import dolphindb as ddb
import numpy as np
import pandas as pd
from .common import default_session
from .datetimes import DatetimeProperties
from .internal import _ConstantSP, _InternalFrame, _InternalAccessor
from .operator import IndexLike, SeriesLike, ArithOpsMixin, StatOpsMixin, LogicalOpsMixin, IOOpsMixin
from .utils import (
_to_freq, dolphindb_temporal_types, _to_numpy_dtype,
is_dolphindb_uploadable, sql_select,
get_orca_obj_from_script)
class IndexOpsMixin(ArithOpsMixin, LogicalOpsMixin, metaclass=abc.ABCMeta):
def __init__(self, internal, session):
self._internal = internal
self._session = session
if isinstance(internal, _ConstantSP):
self._name = None
else:
names = [name[0] if name is not None else None
for _, name in internal.index_map]
if len(names) == 0:
self._name = None
self._names = None
elif len(names) == 1:
self._name = names[0]
self._names = None
elif len(names) > 1:
self._name = None
self._names = names
# self._dtype = internal.dtype
def __len__(self):
return len(self._internal)
# @property TODO: name getter and setter
# def name(self):
# return self.name
# @name.setter
# def name(self, name):
# self.rename(inplace=True)
@property
def _ddb_dtype(self):
if isinstance(self._internal, _ConstantSP):
return self._type
else:
index_column = self._index_column
return self._ddb_dtypes[index_column]
@property
def ndim(self):
return 1
@property
def size(self):
return len(self)
@property
def dtype(self):
return _to_numpy_dtype(self._ddb_dtype)
@abc.abstractmethod
def to_numpy(self):
pass
@abc.abstractmethod
def to_pandas(self):
pass
@property
def is_monotonic(self):
return self._unary_agg_op("isSorted", axis=None, level=None, numeric_only=False)
@property
def is_monotonic_increasing(self):
return self._unary_agg_op("isSorted", axis=None, level=None, numeric_only=False)
@property
def is_monotonic_decreasing(self):
return self._unary_agg_op("isSorted{,false}", axis=None, level=None, numeric_only=False)
@property
def is_unique(self):
len_self = len(self)
return len_self == 1 or self.nunique == len_self
@property
def hasnans(self):
return self._unary_agg_op("hasNull", axis=None, level=None, numeric_only=False)
def _unary_op(self, *args, **kwargs):
return ArithOpsMixin._unary_op(self, *args, **kwargs)
def _binary_op(self, *args, **kwargs):
return ArithOpsMixin._binary_op(self, *args, **kwargs)
def _extended_binary_op(self, *args, **kwargs):
return ArithOpsMixin._extended_binary_op(self, *args, **kwargs)
def _logical_op(self, *args, **kwargs):
return LogicalOpsMixin._logical_op(self, *args, **kwargs)
def _logical_unary_op(self, *args, **kwargs):
return LogicalOpsMixin._logical_unary_op(self, *args, **kwargs)
def _to_script(self):
odf = self._internal
if isinstance(odf, _ConstantSP):
return self._var_name
select_list = self._index_columns
return sql_select(select_list, self._var_name)
# elif self._segmented:
# select_list = self._index_columns
# return sql_select(select_list, self._var_name, is_exec=True)
# else:
# assert len(self._index_columns) == 1
# var_name, column_name = self._var_name, self._index_column
# return f"{var_name}.{column_name}"
def _binary_op_on_different_indices(self, other, func, axis):
"""
Implementation of binary operator between Series on different
indices. A new Series representing an in-memory DolphinDB table
is returned. It is garenteed that both Series have no where_expr.
Parameters
----------
other : _Frame
Right hand side of the operator.
func : str
Fuction name.
Returns
-------
orca.DataFrame
The result of the operation.
Raises
------
NotImplementedError
To be implemented.
"""
from .merge import _generate_joiner
_COLUMN_NAME = "ORCA_DIFFERENT_INDICES_COLUMN"
if other._is_series_like:
session = self._session
self_var_name, other_var_name = self._var_name, other._var_name
self_column_name = self._data_columns[0]
other_column_name = other._data_columns[0]
select_list = [f"{func}({self_var_name}.{self_column_name}, {other_var_name}.{other_column_name}) as {_COLUMN_NAME}"]
index_list, from_clause = _generate_joiner(
self_var_name, other_var_name, self._index_columns, other._index_columns)
select_list = itertools.chain(index_list, select_list)
script = sql_select(select_list, from_clause)
index_map = [(s_map[0], None if s_map[1] != o_map[1] else s_map[1])
for s_map, o_map
in zip(self._internal.index_map, other._internal.index_map)]
return self._get_from_script(
session, script, data_columns=[_COLUMN_NAME], index_map=index_map)
elif other._is_dataframe_like:
raise NotImplementedError()
class Index(IndexLike, _InternalAccessor, IndexOpsMixin, IOOpsMixin):
"""
Accessor for DataFrame and Series.
When calling get_select_list, a specific identifier is added before the
column.
When names are not given, a specific identifier is used instead.
"""
def __init__(self, data, dtype=None, copy=False, name=None, tupleize_cols=None, session=default_session()):
if isinstance(data, _ConstantSP):
assert dtype is None
assert not copy
assert tupleize_cols is None
IndexOpsMixin.__init__(self, data, session)
self._name = name
elif isinstance(data, _InternalFrame):
assert dtype is None
assert name is None
assert not copy
assert tupleize_cols is None
IndexOpsMixin.__init__(self, data, session)
else:
if isinstance(data, (pd.Index, pd.Series)):
idx = (data if dtype is None and name is None and tupleize_cols is None
else pd.Index(data, dtype=dtype, name=name, tupleize_cols=tupleize_cols))
else:
idx = pd.Index(data=data, dtype=dtype, copy=False, name=name,
tupleize_cols=tupleize_cols) # TODO: copy = True or False ?, freq?
# var = _ConstantSP.upload_obj(session, idx.to_numpy())
# var._framize(name=idx.name)
# IndexOpsMixin.__init__(self, var, session)
# self._name = idx.name
odf = _InternalFrame.from_pandas(session, idx)
IndexOpsMixin.__init__(self, odf, session)
self._where_expr = None
def __repr__(self):
if self._segmented:
return "<.index.Index object representing a column in a DolphinDB segmented table>"
else:
return self.to_pandas().__repr__()
def __eq__(self, other):
if type(self) != type(other):
return False
else:
return (self._var_name == other._var_name
and self._index_columns == other._index_columns)
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _from_internal(cls, odf, index=None):
"""
Create an orca Index indicated by an _InternalFrame and another
pandas or orca Index.
Parameters
----------
odf : _InternalFrame
odf provides the metadata of the represented DolphinDB table
and servers as the _internal attribute of the Index
index : pd.Index or orca.Index, optional
index provides the metadata such as name, frequency, etc. of
the Index, by default None
"""
session = odf._session
if index is None or not isinstance(index, pd.DatetimeIndex):
if odf.is_any_vector:
index = Index(index, session=session)
elif len(odf.index_map) == 1:
if odf._ddb_dtypes[odf._index_columns[0]] in dolphindb_temporal_types:
index = DatetimeIndex._from_internal(odf, index)
else:
index = Index(odf, session=session)
elif len(odf.index_map) == 0:
index = Index([], session=session)
else:
index = MultiIndex(odf, session=session)
elif isinstance(index, pd.DatetimeIndex):
index = DatetimeIndex._from_internal(odf, index)
else:
raise TypeError("Unsupported index type")
return index
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if value is not None and not isinstance(value, str):
raise TypeError("Index.name must be a string")
self._name = value
@property
def names(self):
return self._names
def rename(self, value, inplace=False):
raise NotImplementedError()
@property
def _index_column(self):
assert isinstance(self._internal, _InternalFrame)
return self._index_columns[0]
@property
def _index(self):
return self
def _get_data_select_list(self):
if isinstance(self._internal, _ConstantSP):
return [self._var_name]
else:
return self._index_columns
def _to_script_list(self):
if isinstance(self._internal, _ConstantSP):
assert(self._form != ddb.settings.DF_TABLE)
return [self._var_name]
else:
return [sql_select([col], self._var_name, is_exec=True)
for col in self._index_columns]
def to_pandas(self):
if isinstance(self._internal, _ConstantSP):
df = self._session.run(self._to_script())
return pd.Index(df).rename(self._name)
elif len(self._index_columns) == 0:
raise ValueError("Frame has no default index if it is not in memory")
else:
df = self._session.run(self._to_script())
return pd.Index(df.iloc[:,0]).rename(self._name)
def to_numpy(self):
return self.to_pandas().to_numpy()
def _get_data_select_list(self):
if isinstance(self._internal, _ConstantSP):
return [self._var_name]
else:
return [f"{self._var_name}.{self._index_column}"]
def _unary_agg_op(self, func, *args, **kwargs):
if isinstance(self._internal, _ConstantSP):
script = f"{func}({self._var_name})"
else:
index_column = self._index_column
select_list = [f"{func}({index_column})"]
script = sql_select(select_list, self._var_name, is_exec=True)
return get_orca_obj_from_script(self._session, script, [], as_index=True)
def min(self, axis=None, skipna=True, *args, **kwargs):
return self._unary_agg_op("min")
def max(self, axis=None, skipna=True, *args, **kwargs):
return self._unary_agg_op("max")
def unique(self, level=None):
pass
def nunique(self, dropna=True):
pass
isna = LogicalOpsMixin.isna
notna = LogicalOpsMixin.notna
isnull = LogicalOpsMixin.isnull
notnull = LogicalOpsMixin.notnull
fillna = StatOpsMixin.fillna
dropna = StatOpsMixin.dropna
# def _binary_op(self, other, func):
# from .frame import DataFrame
# from .series import Series
# if is_dolphindb_uploadable(self):
# raise NotImplementedError()
# elif not isinstance(self, Index):
# raise TypeError("Operand must be a Series")
# elif is_dolphindb_uploadable(other):
# raise NotImplementedError()
# elif isinstance(other, DataFrame):
# raise NotImplementedError()
# elif isinstance(other, Series):
# raise NotImplementedError()
# else:
# raise TypeError("Operand must be a Series or DataFrame")
# def _logical_op(self, other, func):
# raise NotImplementedError()
# def _logical_unary_op(self, func):
# raise NotImplementedError()
@property
def values(self):
#warnings.warn("orca objects does not store data in numpy arrays. Accessing values will retrive whole data from the remote node.", Warning)
return self.to_numpy()
@property
def shape(self):
return (len(self),)
@property
def nbytes(self):
session = self._session
script = sql_select(["bytes"], "objs()", where_expr=f"name='{self._var_name}'", is_exec=True)
script += "[0]"
return session.run(script)
@property
def ndim(self):
return 1
@property
def T(self):
return self
@property
def is_all_dates(self):
return False
class MultiIndex(Index):
def __init__(self, data, names=None, session=default_session()):
if isinstance(data, _InternalFrame):
assert names is None
Index.__init__(self, data, session=session)
elif isinstance(data, pd.MultiIndex):
assert names is None
frame = data.to_frame()
var = _ConstantSP.upload_obj(session, frame)
Index.__init__(self, var, session=session)
self._names = list(data.names)
@staticmethod
def _from_pandas_multiindex(session, index):
from .frame import DataFrame
return DataFrame(data=None, session=session, index=index).index
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None, session=default_session()):
return cls._from_pandas_multiindex(session, pd.MultiIndex.from_arrays(arrays, sortorder, names))
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None, session=default_session()):
return cls._from_pandas_multiindex(session, pd.MultiIndex.from_tuples(tuples, sortorder, names))
@classmethod
def from_product(cls, iterables, sortorder=None, names=None, session=default_session()):
return cls._from_pandas_multiindex(session, pd.MultiIndex.from_product(iterables, sortorder, names))
@classmethod
def from_frame(cls, df, sortorder=None, names=None, session=default_session()): # TODO: directly upload frame
return cls._from_pandas_multiindex(session, pd.MultiIndex.from_frame(df, sortorder, names))
@property
def names(self):
return self._names
@names.setter
def names(self, value):
raise NotImplementedError()
# def _to_script(self):
# select_list = self._index_columns
# return sql_select(select_list, self._var_name)
def to_pandas(self): # TODO: dealing with where clause
df = self._session.run(self._to_script())
return pd.MultiIndex.from_frame(df).rename(self.names)
def _unary_op(self, func):
raise TypeError(f"cannot perform {func} with thid index type: MultiIndex")
def _binary_op(self, other, func):
raise TypeError(f"cannot perform {func} with thid index type: MultiIndex")
def _logical_op(self, other, func):
raise TypeError(f"cannot perform {func} with thid index type: MultiIndex")
def _logical_unary_op(self, func):
raise TypeError(f"cannot perform {func} with thid index type: MultiIndex")
class RangeIndex(Index):
def __init__(self, start=None, stop=None, step=1, name=None, session=default_session()):
self._start = start
self._stop = stop
self._step = step
self._name = name
self._session = session
self._internal = None
@classmethod
def _from_internal(cls, frame):
odf = frame._internal
session = frame._session
names = odf.index_map[0][1]
name = names if names is None else names[0]
obj = cls(start=0, stop=len(odf), name=name, session=session)
obj._internal = odf
return obj
@classmethod
def _from_internal_frame_and_range(cls, session, internal, index):
assert isinstance(index, RangeIndex)
assert isinstance(internal, _InternalFrame)
start, stop, step, name = index.start, index.stop, index.step, index.name
range_index = RangeIndex(start, stop, step, name, session)
range_index._internal = internal
return range_index
def __len__(self):
# TODO: step
return max(self.stop - self.start, 0)
def __eq__(self, other):
if isinstance(other, RangeIndex):
return (self.start == other.start
and self.stop == other.stop
and self.step == other.step)
else:
return False
@property
def start(self):
return self._start
@property
def stop(self):
return self._stop
@property
def step(self):
return self._step
@property
def dtype(self):
return np.dtype(np.int64)
@property
def id(self): # Pseudo id to be used for reference
return f"_orca_range_index_{self.start}_{self.stop}"
def _to_script(self):
return f"({self.start}..{self.stop - 1})"
def __repr__(self):
if self.name is None:
return f"RangeIndex(start={self.start}, stop={self.stop}, step={self.step})"
else:
return f"RangeIndex(start={self.start}, stop={self.stop}, step={self.step}, name='{self.name}')"
def to_pandas(self):
return pd.RangeIndex(self.start, self.stop, self.step, name=self.name)
def _unary_func(self, func):
script = f"{func}({self.start}..{self.stop - 1})"
return self._session.run(script)
def _binary_func(self, other, func):
session = self._session
script = f"{func}({self.start}..{self.stop - 1}, {other})"
data = session.run(script)
return Index(data, session=session)
# def _get_select_list(self, sql=False):
# if self._internal is None and sql:
# return []
# elif self._internal is None:
# name = self._real_column_names[0]
# script = f"{self._to_script()} as {name}"
# return [script]
# else:
# return Index._get_select_list(self)
# def _get_update_list(self):
# if self._internal is None:
# name = self._real_column_names[0]
# script = f"{name} = {self._to_script()}"
# return (script,)
# else:
# return Index._get_update_list(self)
# def append(self, other):
# session = self._session
# if isinstance(other, RangeIndex) and other.start == self.stop:
# return RangeIndex(self.start, other.end, session=session)
# script = f"join({self._to_script()}, {other._getter_script})"
# data = run_script(session, script)
# return Index(data, session=session)
def head(self, n=5):
if n == 0:
return RangeIndex(0, 0, self.name)
elif n > 0:
if len(self) <= n:
return RangeIndex(self.start, self.stop, name=self.name)
else:
return RangeIndex(self.start, self.start + n, name=self.name)
else:
if len(self) <= abs(n):
return RangeIndex(self.start, self.start, name=self.name)
else:
return RangeIndex(self.start, self.stop + n, name=self.name)
def tail(self, n=5):
if n == 0:
return RangeIndex(0, 0, self.name)
elif n > 0:
if len(self) <= n:
return RangeIndex(self.start, self.stop, name=self.name)
else:
return RangeIndex(self.stop - n, self.stop, name=self.name)
else:
if len(self) <= abs(n):
return RangeIndex(self.stop, self.stop, name=self.name)
else:
return RangeIndex(self.start - n, self.stop, name=self.name)
class DatetimeIndex(DatetimeProperties, Index):
def __init__(self, data, freq=None, dtype=None, tz=None, session=default_session()):
if isinstance(data, _InternalFrame):
assert freq is None
assert dtype is None
assert tz is None
index_columns = data._index_columns
ddb_dtype = data._ddb_dtypes[index_columns[0]]
assert len(index_columns) == 1
assert ddb_dtype in dolphindb_temporal_types
Index.__init__(self, data, session=session)
self._freq = _to_freq(ddb_dtype)
self._dtype = _to_numpy_dtype(ddb_dtype)
self._tz = None
elif isinstance(data, pd.DatetimeIndex):
data = (data if freq is None and dtype is None and tz is None
else pd.DatetimeIndex(data, freq=freq, dtype=dtype, tz=tz))
Index.__init__(self, data, session=session)
self._freq = data.freq
self._dtype = data.dtype
self._tz = data.tz
else:
raise NotImplementedError()
# def __eq__(self, other):
# if isinstance(other, DatetimeIndex):
# raise NotImplementedError()
# else:
# return False
@classmethod
def _from_internal(cls, odf, index):
obj = cls(odf, session=odf._session)
if index is None:
# sample_script = sql_select(odf._index_columns, odf._var_name, is_exec=True, limit=3)
# sample_data = odf._session.run(sample_script)
# try:
# obj._freq = pd.infer_freq(pd.DatetimeIndex(sample_data))
# except ValueError:
# obj._freq = None
obj._freq = None
obj._dtype = None
obj._tz = None
else:
try:
# FIXME:
# oidx = orca.to_datetime(['20130101 09:00:00','20130101 09:00:02','20130101 09:00:03','20130101 09:00:05','20130101 09:00:06'])
# odf = orca.DataFrame({'A': ["a", "c", "w", "f", "f"], 'B': [0, 1, 2, np.nan, 4]}, index=orca.Index(data=oidx,name='time'))
obj._freq = index.freq
except ValueError:
obj._freq = None
obj._dtype = index.dtype
obj._tz = index.tz
return obj
@property
def freq(self):
return self._freq
@property
def tz(self):
return self._tz
@property
def is_all_dates(self):
return True
# @property
# def dtype(self):
# return self._dtype # TODO: sophisticated datetime
def to_pandas(self):
odf = self._internal
if isinstance(odf, _ConstantSP):
data = self._session.run(self._to_script())
else:
pdf = self._session.run(self._to_script())
data = pdf[pdf.columns[0]]
return pd.DatetimeIndex(data=data, freq=self._freq, tz=self._tz).set_names(self._name)
# return pd.DatetimeIndex(data=pdf, freq=self._freq, dtype=self._dtype)
def _logical_unary_op(self, func):
from .operator import BooleanExpression
return BooleanExpression(self, None, func, 1)
def _unary_op(self, func, infered_ddb_dtypestr):
from .operator import ArithExpression
return ArithExpression(self, None, func, 0,
infered_ddb_dtypestr=infered_ddb_dtypestr)
class PeriodIndex(Index):
def __init__(self):
pass
```
#### File: orca/core/internal.py
```python
import itertools
import warnings
from typing import Iterable, List, Optional, Tuple, Union
import dolphindb as ddb
import pandas as pd
from pandas.api.types import is_list_like
from .common import (AttachDefaultIndexWarning, _get_verbose,
_warn_not_dolphindb_identifier)
from .utils import (ORCA_COLUMN_NAME_FORMAT, ORCA_INDEX_NAME_FORMAT,
_new_orca_identifier, _to_column_index, _to_index_map,
check_key_existence, is_dolphindb_identifier, sql_select,
sql_update, to_dolphindb_literal)
IndexMap = Tuple[str, Optional[Tuple[str, ...]]]
class _ConstantSP(object):
"""
The internal object which represents DolphinDB objects and works like
a smart pointer. When its reference count is reduced to zero (which
is automatically controlled by Python's memory management system),
the represented object's memory is released.
Scripts which modify DolphinDB variables (i.e. assignments and updates)
should be encapsulated in methods of this class and called from the
_InternalFrame which owns the instance of this class.
.. note:: this is an internal class. It is not supposed to be exposed to
users and users should not directly access to it.
"""
ORCA_IDENTIFIER = "ORCA_"
def __init__(self, session, id):
self._id = id
self._session = session
self._form, self._type, self._segmented, self._in_memory = None, None, False, False
self._update_metadata()
def __del__(self):
var_name = self.var_name
script = f"{var_name} = NULL; undef('{var_name}', VAR)"
self._session.run(script)
def __len__(self):
if not self.segmented:
script = f"size({self.var_name})"
else:
script = f"exec count(*) from {self.var_name}"
return self._session.run(script)
@property
def form(self):
return self._form
@property
def type(self):
return self._type
@property
def schema(self):
return self._schema
@property
def in_memory(self):
return self._in_memory
@property
def segmented(self):
return self._segmented
@property
def id(self):
return self._id
@property
def var_name(self): # TODO: lazyproperty?
return _ConstantSP.ORCA_IDENTIFIER + self._id
@property
def _var_name(self): # TODO: lazyproperty?
return _ConstantSP.ORCA_IDENTIFIER + self._id
@classmethod
def upload_obj(cls, session, obj):
obj_id = _new_orca_identifier()
var_name = _ConstantSP.ORCA_IDENTIFIER + obj_id
session.upload({var_name: obj})
return cls(session, obj_id)
@classmethod
def run_script(cls, session, script):
obj_id = _new_orca_identifier()
var_name = _ConstantSP.ORCA_IDENTIFIER + obj_id
if _get_verbose():
print(script)
session.run(f"&{var_name} = ({script})")
return cls(session, obj_id)
def _to_script(self):
return self.var_name
def _update_metadata(self):
var_name, session = self.var_name, self._session
form, dtype, typestr = session.run(f"[form({var_name}), type({var_name}), typestr({var_name})]")
form, dtype, typestr = int(form), int(dtype), str(typestr)
self._form, self._type = form, dtype
self._segmented = typestr.find("SEGMENTED") >= 0
self._in_memory = typestr.find("IN-MEMORY") >= 0 or not self._segmented
self._schema = (session.run(f"schema({self.var_name}).colDefs").set_index("name")
if form == ddb.settings.DF_TABLE else None)
def _sql_update(self, column_names: List[str],
new_values: List[str],
from_table_joiner: Optional[str] = None,
where_expr=None,
contextby_list: Optional[List[str]] = None):
session = self._session
table_name = self.var_name
script = sql_update(table_name, column_names, new_values,
from_table_joiner, where_expr, contextby_list)
if _get_verbose():
print(script)
session.run(script)
self._update_metadata()
def squeeze(self, index_columns=[], data_columns=None, name=None, as_index=False, squeeze_axis=None):
"""
Reduce the dimension of a DataFrame or Series if possible.
Parameters
----------
index_columns : list[str], optional
The index columns of the input DataFrame or Series, used as
name of the result
data_columns : list[str], optional
The data columns of the input DataFrame or Series. Only
these columns will be used, by default None, that is, to use
all columns
name : str, optional
The name of the returned Series if squeezed to a Series,
by default None
as_index : bool, optional
Whether to return a Index instead of a Series, by default
False
squeeze_axis : 0, 1 or None
A specific axis to squeeze. 0 for index, 1 for column, None
for both. By default None
"""
from .indexes import Index
from .series import Series
session = self._session
var_name, form = self.var_name, self.form
if form == ddb.settings.DF_SCALAR:
return session.run(var_name)
elif form == ddb.settings.DF_VECTOR:
return session.run(f"{var_name}[0]")
elif form == ddb.settings.DF_TABLE:
all_columns = self.schema.index
if index_columns: # TODO: MultiIndex
index_column = index_columns[0]
name = session.run(f"(exec {index_column} from {var_name})[0]")
data_columns = data_columns or [col for col in all_columns if col not in index_columns]
assert all(col in self.schema.index for col in data_columns)
script = sql_select(data_columns, var_name)
if squeeze_axis is None and len(data_columns) == 1:
return session.run(f"values({script})[0][0]")
index = _ConstantSP.upload_obj(session, data_columns)
if len(set(self.schema.typeInt[data_columns])) > 1: # has mixed type
self.reset_with_script(f"loop(first, values({script}))")
if as_index:
return Index(self, name=name, session=session)
else:
return Series(self, index=index, name=name, session=session)
else:
if as_index:
self.reset_with_script(f"each(first, values({script}))")
return Index(self, name=name, session=session)
else:
self.reset_with_script(
f"table({index.var_name} as {ORCA_INDEX_NAME_FORMAT(0)}, "
f"each(first, values({script})) as ORCA_EXPRESSION_COLUMN)")
index_map = [(ORCA_INDEX_NAME_FORMAT(0), None)]
data_columns = ["ORCA_EXPRESSION_COLUMN"]
odf = _InternalFrame(session, self, index_map, data_columns)
return Series(odf, name=name, session=session)
else:
raise ValueError(f"Unsupported form: {form}")
def rename(self, columns):
old_names, new_names = zip(*columns.items())
old_names_literal = to_dolphindb_literal(old_names)
new_names_literal = to_dolphindb_literal(new_names)
session, var_name = self._session, self.var_name
if _get_verbose():
print(f"rename!({var_name}, {old_names_literal}, {new_names_literal})")
session.run(f"rename!({var_name}, {old_names_literal}, {new_names_literal})")
self._update_metadata()
def reset_with_script(self, script):
session, var_name = self._session, self.var_name
session.run(f"&{var_name} = ({script})")
self._update_metadata()
def append(self, script):
session, var_name = self._session, self.var_name
if _get_verbose():
print(f"append!({var_name}, {script})")
session.run(f"append!({var_name}, {script})")
self._update_metadata()
def attach_index(self, index, index_map):
script_list = index._to_script_list()
replace_column_scripts = []
update_column_list = []
update_column_scripts = []
for (col, _), script in zip(index_map, script_list):
if col in self.schema:
replace_column_scripts.append(f"replaceColumn!({self.var_name}, {to_dolphindb_literal(col)}, {script})")
else:
update_column_list.append(col)
update_column_scripts.append(script)
self._sql_update(update_column_list, update_column_scripts)
self._session.run(";".join(replace_column_scripts))
self._update_metadata()
def drop_columns(self, column_names):
session, var_name = self._session, self.var_name
column_names_literal = to_dolphindb_literal(column_names)
script = f"{var_name}.drop!({column_names_literal})"
if _get_verbose():
print(script)
session.run(script)
self._update_metadata()
def attach_default_index(self):
if self.segmented:
warnings.warn("Unable to attach an default index to segmented table.", AttachDefaultIndexWarning)
return False
form, var_name = self._form, self.var_name
size = len(self)
if size <= 0:
return False
if form == ddb.settings.DF_TABLE:
column_names = [ORCA_INDEX_NAME_FORMAT(0)]
new_values = [f"0..{size-1}"]
try:
self._sql_update(column_names, new_values)
except RuntimeError as ex:
ex_msg = str(ex)
if (ex_msg.startswith("The table is not allowed to update")
or ex_msg.startswith("<Server Exception> in run: The table is not allowed to update")
or ex_msg.startswith("<Server Exception> in run: The category")
or ex_msg.startswith("<Server Exception> in run: The data type")
or ex_msg.endswith("the table shouldn't be shared and the size of the new column must equal to the size of the table.")):
warnings.warn("Unable to attach an default index to the table.", AttachDefaultIndexWarning)
return False
else:
raise
elif form == ddb.settings.DF_VECTOR:
script = f"table({var_name}, 0..{size-1} as {ORCA_INDEX_NAME_FORMAT(0)})"
self.reset_with_script(script)
return True
def as_frame(self, name):
form, var_name = self._form, self.var_name
assert form == ddb.settings.DF_VECTOR
if name is None or not is_dolphindb_identifier(name):
_warn_not_dolphindb_identifier()
name = ORCA_INDEX_NAME_FORMAT(0)
script = f"table({var_name} as {name})"
self.reset_with_script(script)
class _InternalAccessor(object):
def __init__(self):
self._internal = None
@property
def _var_name(self):
"""
The variable name of the DolphinDB object represented by this
DataFrame or Series.
"""
return self._internal._var_name
@property
def _in_memory(self):
"""
Whether the DolphinDB object represented by the orca object is in memory.
If in_memory is True, modifications to the object are allowed.
"""
return self._internal.in_memory
@property
def _segmented(self):
"""
Whether the DolphinDB object represented by the orca object is segmented.
If segmented is True, direct access to the object is not allowed. A SQL
query is used instead.
"""
return self._internal.segmented
@property
def _data_columns(self):
"""
The real data column names in the DolphinDB table.
"""
return self._internal._data_columns
@property
def _index_columns(self):
"""
The real index column names in the DolphinDB table.
"""
return self._internal._index_columns
@property
def _column_index(self):
return self._internal._column_index
@property
def _column_index_names(self):
return self._internal._column_index_names
@property
def _column_index_level(self):
return self._internal._column_index_level
def _column_name_for(self, column_name_or_index):
return self._internal.column_name_for(column_name_or_index)
@property
def _index_map(self):
return self._internal._index_map
@property
def _index_names(self):
return self._internal._index_names
@property
def _index_name(self):
return self._internal._index_name
@property
def _schema(self):
"""
The schema of the DolphinDB table with the column names as the index.
"""
return self._internal.schema
@property
def _form(self):
return self._internal.form
@property
def _ddb_dtypes(self):
return self._internal._ddb_dtypes
@property
def _ddb_dtypestr(self):
return self._internal._ddb_dtypestr
@property
def _type(self):
"""
The type of the DolphinDB object.
"""
return self._internal.type
@property
def _var(self):
return self._internal.var
class _InternalFrame(object):
"""
The internal DataFrame which represents DolphinDB objects (a DolphinDB
table or vector) and manage indices.
.. note:: this is an internal class. It is not supposed to be exposed to
users and users should not directly access to it.
"""
def __init__(self, session: ddb.session,
var: _ConstantSP,
index_map: Optional[List[IndexMap]] = None,
data_columns: Optional[List[str]] = None,
column_index: Optional[List[Tuple[str, ...]]] = None,
column_index_names: Optional[List[str]] = None, index_of_any_vector=None):
# index=None, is_any_vector=False):
from .indexes import Index
self._session = session
self._var = var
if index_of_any_vector:
self._is_any_vector = True
self._index = index_of_any_vector
return
else:
self._is_any_vector = False
if index_map is None or index_map == []: # TODO: create RangeIndex
index_map = [(ORCA_INDEX_NAME_FORMAT(0), None)] if var.attach_default_index() else []
else:
self.check_index_map_validity(index_map)
assert data_columns is None or all(isinstance(col, str) for col in data_columns)
self._index_map = index_map
if data_columns is None:
index_columns = {index_column for index_column, _ in index_map}
self._data_columns = [col for col in var.schema.index if col not in index_columns]
else:
self._data_columns = data_columns
if column_index is None:
self._column_index = _to_column_index(self._data_columns)
else:
assert len(column_index) == len(self._data_columns)
assert all(isinstance(i, tuple) for i in column_index), column_index
assert len({len(i) for i in column_index}) <= 1, column_index
self._column_index = column_index
if len(self._column_index) != len(set(self._column_index)):
raise ValueError("DolphinDB does not support duplicated column names")
if column_index_names is not None and not is_list_like(column_index_names):
raise ValueError('column_index_names should be list-like or None for a MultiIndex')
if (isinstance(column_index_names, list)
and all(name is None for name in column_index_names)):
self._column_index_names = None
else:
self._column_index_names = column_index_names
self._update_data_select_list()
def _update_data_select_list(self):
self._data_select_list = [f"{self.var_name}.{col}" for col in self.data_columns]
def __len__(self):
return len(self.var)
def __getitem__(self, key):
keys, _ = check_key_existence(key, self.data_columns)
return _InternalFrame(self._session, self.var, index_map=self.index_map, data_columns=keys)
# data_columns=keys, index=self.index)
@classmethod
def create_any_vector(cls, var, index):
return cls(var._session, var, index_of_any_vector=index)
@classmethod
def from_upload_obj(cls, session, obj):
var = _ConstantSP.upload_obj(session, obj)
return cls(session, var)
@classmethod
def from_run_script(cls, session, script):
var = _ConstantSP.run_script(session, script)
return cls(session, var)
@classmethod
def from_pandas(cls, session, pdf: Union[pd.DataFrame, pd.Index]):
if isinstance(pdf, pd.Index):
var = _ConstantSP.upload_obj(session, pdf.to_numpy())
var.as_frame(name=pdf.name)
index_map = _to_index_map(pdf)
return cls(session, var, index_map)
columns = pdf.columns
if len(columns) == 0 and len(pdf) == 0: # trivial case
pdf.index = pd.RangeIndex(0)
if isinstance(columns, pd.RangeIndex):
_warn_not_dolphindb_identifier()
data_columns = [f"{ORCA_COLUMN_NAME_FORMAT(i)}" for i, _ in enumerate(columns)]
else:
data_columns = ["_".join(column) if isinstance(column, tuple)
else column if is_dolphindb_identifier(column)
else f"{ORCA_COLUMN_NAME_FORMAT(i)}"
for i, column in enumerate(columns)]
column_index = _to_column_index(columns)
column_index_names = columns.names
index_map = _to_index_map(pdf.index)
index_columns = [index_column for index_column, _ in index_map]
reset_index = pdf.reset_index()
reset_index.columns = index_columns + data_columns
# TODO: koalas check is datatime
try:
var = _ConstantSP.upload_obj(session, reset_index)
except RuntimeError as e:
ex_msg = str(e)
if ex_msg.startswith("Unable to cast Python instance of type"):
raise RuntimeError(ex_msg + "; You might have created a table with non-string as column names")
elif ex_msg.startswith("All columns must have the same size"):
raise RuntimeError(ex_msg + "; You might have passed duplicated column names")
else:
raise
return cls(session, var, index_map, data_columns, column_index, column_index_names)
@staticmethod
def check_index_map_validity(index_map):
if all(isinstance(index_field, str)
and (index_name is None or isinstance(index_name, tuple))
for index_field, index_name in index_map):
return
else:
raise ValueError(f"Invalid column map: '{index_map}'")
@property
def _column_index_level(self):
""" Return the level of the column index. """
column_index = self._column_index
if len(column_index) == 0:
return 1
else:
levels = set(0 if idx is None else len(idx) for idx in column_index)
assert len(levels) == 1, levels
return list(levels)[0]
@property # TODO: @lazyproperty
def _column_index_to_name(self):
return dict(zip(self.column_index, self.data_columns))
def column_name_for(self, column_name_or_index):
if column_name_or_index in self._column_index_to_name:
return self._column_index_to_name[column_name_or_index]
else:
if not isinstance(column_name_or_index, str):
raise KeyError(column_name_or_index)
return column_name_or_index
@property
def data_columns(self):
return self._data_columns
@property # TODO: @lazyproperty
def _index_columns(self):
return [col for col, _ in self.index_map]
@property
def column_index(self):
return self._column_index
@property
def column_index_names(self):
return self._column_index_names
@property
def index_map(self):
return self._index_map
@property
def index_names(self):
return [name[0] if isinstance(name, tuple) else name
for _, name in self._index_map]
# name = self.index_map[0][1]
# if name is not None:
# return name[0]
# else:
# return name
@property
def index_name(self):
name = self.index_map[0][1]
if name is not None:
return name[0]
else:
return name
@property
def _index_names(self):
return self.index_names
@property
def _index_name(self):
return self.index_name
# @property
# def use_range_index(self):
# return self._use_range_index
@property
def is_any_vector(self):
return self._is_any_vector
@property
def in_memory(self):
return self.var.in_memory
@property
def segmented(self):
return self.var.segmented
@property
def id(self):
return self.var.id
@property
def _var_name(self): # TODO: lazyproperty
return self.var.var_name
@property
def var_name(self):
return self._var_name
@property
def var(self):
return self._var
@property
def form(self):
return self.var.form
@property
def type(self):
return self.var.type
@property
def schema(self):
return self.var.schema
@property
def _ddb_dtypes(self):
return self.schema.typeInt
@property
def _ddb_dtypestr(self):
class _Typestr(object):
def __getitem__(this, key):
return self.schema.typeString[key].lower()
return _Typestr()
# @property
# def index(self):
# return self._index
# @property
# def dtype(self):
# return self._dtype
@property
def data_select_list(self):
return self._data_select_list
def _to_script(self, ignore_index=False):
index_columns = [] if ignore_index else self._index_columns
select_list = itertools.chain(index_columns, self.data_columns)
return sql_select(select_list, self.var_name)
def set_columns(self, columns):
assert len(columns) == len(self.data_columns)
column_index = _to_column_index(columns)
if len(column_index) != len(set(column_index)):
raise ValueError("DolphinDB does not support duplicated column names")
if isinstance(columns, pd.Index):
column_index_names = columns.names
else:
column_index_names = None
self._column_index = column_index
self._column_index_names = column_index_names
def copy_as_in_memory_table(self, inplace=False):
session = self._session
data_columns = self.data_columns
column_index = self.column_index
column_index_names = self.column_index_names
select_list = itertools.chain(self._index_columns, data_columns)
script = sql_select(select_list, self._var_name)
if inplace:
if self.segmented:
script = f"loadTableBySQL(<{script}>)"
self._var.reset_with_script(script)
return
else:
return
if self.segmented:
script = f"loadTableBySQL(<{script}>)"
var = _ConstantSP.run_script(session, script)
return _InternalFrame(session, var, self.index_map, data_columns,
column_index, column_index_names)
def attach_index(self, index):
from .indexes import Index
assert isinstance(index, Index)
var = self.var
all_columns = self._data_columns + self._index_columns
index_map = _to_index_map(index, all_columns)
var.drop_columns(self._index_columns)
var.attach_index(index, index_map)
self._index_map = index_map
self._index = Index._from_internal(self, index)
def append(self, other, ignore_index, sort):
# TODO: align columns and sort
select_list = self.schema.index
other_script = sql_select(select_list, other._var_name)
self.var.append(other_script)
if ignore_index:
index_map = [(ORCA_INDEX_NAME_FORMAT(0), None)] if self.var.attach_default_index() else []
self._index_map = index_map
def rename(self, columns, level):
self._var.rename(columns=columns)
column_index_level = self._column_index_level
new_data_columns = []
new_column_index = []
# new_column_index_names = [] # TODO: check correctness
# for data_column, col_idx, name in \
# zip(self._data_columns, self._column_index, self._column_index_names):
for data_column, col_idx in zip(self._data_columns, self._column_index):
new_col = columns.get(data_column)
if new_col is not None:
if level is None:
new_col_idx = tuple([new_col] * column_index_level)
else:
new_col_idx = list(col_idx)
new_col_idx[level] = new_col
new_col_idx = tuple(new_col_idx)
new_data_columns.append(new_col)
new_column_index.append(new_col_idx)
# new_column_index_names.append(name)
else:
new_data_columns.append(data_column)
new_column_index.append(col_idx)
# new_column_index_names.append(name)
self._data_columns = new_data_columns
self._column_index = new_column_index
self._update_data_select_list()
def get_script_with_unary_op(self, func):
data_columns = self.data_columns
select_list = (f"{func}({col}) as {col}"
for col in data_columns)
return sql_select(select_list, self._var_name, is_exec=True)
# def get_select_clause(self):
# return ",".join(self.column_names)
# def get_to_pandas_script(self, select=True):
# if not self.segmented and self.is_table_like:
# return "{}.{}".format(self.var_name, self.column_names[0])
# elif self.is_table_like:
# select_or_exec = "select" if select else "exec"
# return "{select_or_exec} {select_clause} from {var_name}".format(
# select_or_exec=select_or_exec,
# select_clause=self.get_select_clause(),
# var_name=self.var_name
# )
# else:
# return self.var_name
```
#### File: benchmark/testPerformance/driver.py
```python
import argparse
import json
import sys
import csv
import time
import os.path as path
from contexttimer import Timer
from setup.settings import *
from pandas_driver import PandasDriver
from orca_driver import OrcaDriver
from orca_partition_driver import OrcaPartitionDriver
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pandas operations')
parser.add_argument('n', help='rows of records in a file')
parser.add_argument('program', help='one of pandas, orca or orcapartition')
args = parser.parse_args()
csvfile = open(path.abspath(path.join(__file__, "../../../reports/benchmark_report_" +
time.strftime('%Y-%m-%d', time.localtime(time.time())))) + ".csv", 'a')
writer = csv.writer(csvfile)
writer.writerow([time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))])
writer.writerow(["'program':" + args.program, "'n':" + args.n])
results = {'program': args.program, 'n': args.n}
def reportToCsv(operation, timecost):
lines = [operation, timecost]
writer.writerow(lines)
dataDir = path.abspath(path.join(__file__, "../setup/data"))
employee_file = dataDir + "/ticker_" + args.n + ".csv"
bonus_file = dataDir + "/value_" + args.n + ".csv"
# functions = [s for s in dir(OrcaDriver) if not s.startswith("__") and s!='join']
functions = ('a_load', 'filter', 'groupby', 'select', 'sort', 'resample_M', 'resample_3M',
'resample_A', 'resample_3A', 'resample_Q', 'resample_3Q')
# functions = ['a_load', 'resample_D']
if args.program == "pandas":
driver = PandasDriver(employee_file, bonus_file)
elif args.program == "orca":
driver = OrcaDriver(employee_file, bonus_file)
elif args.program == "orcapartition":
driver = OrcaPartitionDriver(employee_file, bonus_file)
functions = ('a_load', 'filter', 'groupby', 'select', 'sort', 'resample_M', 'resample_3M',
'resample_A', 'resample_3A', 'resample_Q', 'resample_3Q')
# functions = [s for s in dir(OrcaDriver) if not s.startswith("__") and s != 'join']
else:
raise ValueError("bad value for program")
for task in functions:
with Timer() as timer:
getattr(driver, task)()
results[task] = timer.elapsed
reportToCsv(task, results[task])
csvfile.close()
# json.dump(results, sys.stdout)
```
#### File: benchmark/testPerformance/orca_partition_driver.py
```python
from columns import value_columns, ticker_columns
from setup.settings import *
import orca
orca.connect(HOST, PORT, "admin", "123456")
class OrcaPartitionDriver(object):
def __init__(self, ticker_file, value_file):
self.ticker_file = ticker_file
self.value_file = value_file
self.df_ticker = None
self.df_value = None
script = """
login('admin', '123456')
if(existsDatabase('dfs://testOrcaTicker'))
dropDatabase('dfs://testOrcaTicker')
schema=extractTextSchema('{data1}')
db=database('dfs://testOrcaTicker', HASH, [DATE,20])
loadTextEx(db,`tickers,`date, '{data1}')
if(existsDatabase('dfs://testOrcaValue'))
dropDatabase('dfs://testOrcaValue')
schema=extractTextSchema('{data2}')
db=database('dfs://testOrcaValue', HASH, [INT, 4])
loadTextEx(db,`values,`id, '{data2}')
""".format(data1=ticker_file, data2=value_file)
s = orca.default_session()
s.run(script)
def a_load(self):
self.df_ticker = orca.read_table("dfs://testOrcaTicker", 'tickers')
self.df_ticker.columns = ticker_columns
self.df_value = orca.read_table("dfs://testOrcaValue", 'values')
self.df_value.columns = value_columns
def groupby(self):
self.df_ticker.groupby("type").agg({'svalue': 'mean', 'price': 'sum'})
def filter(self):
_ = self.df_ticker[self.df_ticker['type'] == 'a'].compute()
def select(self):
_ = self.df_ticker[["ticker", "type"]].compute()
def sort(self):
self.df_ticker.sort_values(by='ticker')
def join(self):
joined = self.df_ticker.merge(self.df_value, on='type')
joined['total'] = joined['value'] + joined['svalue']
def resample_D(self):
self.df_ticker.resample('D', on='date')['svalue'].mean()
def resample_3D(self):
self.df_ticker.resample('3D', on='date')['svalue'].mean()
def resample_Q(self):
self.df_ticker.resample('Q', on='date')['svalue'].mean()
def resample_3Q(self):
self.df_ticker.resample('3Q', on='date')['svalue'].mean()
def resample_A(self):
self.df_ticker.resample('A', on='date')['svalue'].mean()
def resample_3A(self):
self.df_ticker.resample('3A', on='date')['svalue'].mean()
```
#### File: tests/numpy_unit_testing/test_array_attributes.py
```python
import unittest
from setup.settings import *
from numpy.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class ArrayAttributesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_array_attributes_itemsize(self):
npa = np.array([1, 2, 3, 4, 5])
dnpa = dnp.array([1, 2, 3, 4, 5])
self.assertEqual(dnpa.itemsize, npa.itemsize)
# TODO: dtype bug
# npa = np.array([1, 2, 3, 4, 5], dtype=np.int8)
# dnpa = dnp.array([1, 2, 3, 4, 5], dtype=np.int8)
# self.assertEqual(dnpa.itemsize, npa.itemsize)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/numpy_unit_testing/test_function_binary_operator_true_divide.py
```python
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionTruedivideTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_true_divide_scalar(self):
self.assertEqual(dnp.true_divide(1.2 + 1j, 1.2 - 1j), np.true_divide(1.2 + 1j, 1.2 - 1j))
self.assertEqual(dnp.true_divide(0.5, 9), np.true_divide(0.5, 9))
self.assertEqual(dnp.true_divide(-1, 8.5), np.true_divide(-1, 8.5))
self.assertEqual(dnp.true_divide(1, 4), np.true_divide(1, 4))
self.assertEqual(dnp.true_divide(1, -5), np.true_divide(1, -5))
self.assertEqual(dnp.true_divide(0, 9), np.true_divide(0, 9))
self.assertEqual(dnp.isnan(dnp.true_divide(dnp.nan, -5)), True)
self.assertEqual(np.isnan(np.true_divide(dnp.nan, -5)), True)
def test_function_math_binary_true_divide_list(self):
lst1 = [1, 2, 3]
lst2 = [4, 6, 9]
assert_array_equal(dnp.true_divide(lst1, lst2), np.true_divide(lst1, lst2))
def test_function_math_binary_true_divide_array_with_scalar(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
assert_array_equal(dnp.true_divide(dnpa, 1), np.true_divide(npa, 1))
assert_array_equal(dnp.true_divide(dnpa, dnp.nan), np.true_divide(npa, np.nan))
assert_array_equal(dnp.true_divide(1, dnpa), np.true_divide(1, npa))
def test_function_math_binary_true_divide_array_with_array(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.true_divide(dnpa1, dnpa2), np.true_divide(npa1, npa2))
def test_function_math_binary_true_divide_array_with_array_param_out(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.true_divide(npa1, npa2, out=npa)
dnp.true_divide(dnpa1, dnpa2, out=dnpa)
assert_array_equal(dnpa, npa)
def test_function_math_binary_true_divide_array_with_series(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
ps = pd.Series([4, 6, 9])
os = orca.Series([4, 6, 9])
assert_series_equal(dnp.true_divide(dnpa, os).to_pandas(), np.true_divide(npa, ps))
assert_series_equal(dnp.true_divide(os, dnpa).to_pandas(), np.true_divide(ps, npa))
def test_function_math_binary_true_divide_array_with_dataframe(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
pdf = pd.DataFrame({'A': [4, 6, 9]})
odf = orca.DataFrame({'A': [4, 6, 9]})
# TODO: orca true_divide bug
# assert_frame_equal(odf.true_divide(dnpa, axis=0).to_pandas(), pdf.true_divide(npa, axis=0))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/numpy_unit_testing/test_function_creation_linspace.py
```python
import unittest
from setup.settings import *
from numpy.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionLinspaceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_linspace(self):
npa = np.linspace(2.0, 3.0, num=5)
# TODO: NOT IMPLEMENTED
# dnpa = dnp.linspace(2.0, 3.0, num=5)
# assert_equal(dnpa, npa)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/numpy_unit_testing/test_function_statistical_bincount.py
```python
import unittest
from setup.settings import *
from numpy.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionBincountTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_bincount_list(self):
npa = np.bincount([1, 8, 27, 0, 5])
dnpa = dnp.bincount([1, 8, 27, 0, 5,])
assert_array_equal(dnpa, npa)
def test_function_math_bincount_array(self):
npa = np.bincount(np.array([1, 8, 27, 0, 5]))
dnpa = dnp.bincount(dnp.array([1, 8, 27, 0, 5,]))
assert_array_equal(dnpa, npa)
def test_function_math_bincount_series(self):
ps = pd.Series([8, 27, 0, 5])
os = orca.Series(ps)
assert_array_equal(dnp.bincount(os), np.bincount(ps))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/numpy_unit_testing/test_function_statistical_median.py
```python
import unittest
from setup.settings import *
from numpy.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionMedianTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_median_scalar(self):
self.assertEqual(dnp.median(0.5), np.median(0.5))
self.assertEqual(dnp.median(1), np.median(1))
self.assertEqual(dnp.median(-1), np.median(-1))
self.assertEqual(dnp.median(0), np.median(0))
self.assertEqual(dnp.isnan(dnp.median(dnp.nan)), True)
self.assertEqual(np.isnan(np.median(np.nan)), True)
def test_function_math_median_list(self):
npa = np.median([1, 8, 27, -27, 0, 5, np.nan])
dnpa = dnp.median([1, 8, 27, -27, 0, 5, dnp.nan])
assert_array_equal(dnpa, npa)
def test_function_math_median_array(self):
npa = np.median(np.array([1, 8, 27, -27, 0, 5, np.nan]))
dnpa = dnp.median(dnp.array([1, 8, 27, -27, 0, 5, dnp.nan]))
assert_array_equal(dnpa, npa)
def test_function_math_median_series(self):
ps = pd.Series([-1, 8, 27, -27, 0, 5, np.nan])
os = orca.Series(ps)
self.assertEqual(dnp.isnan(dnp.median(os)), True)
self.assertEqual(np.isnan(np.median(ps)), True)
ps = pd.Series([-1, 8, 27, -27, 0, 5])
os = orca.Series(ps)
self.assertEqual(dnp.median(os), np.median(ps))
def test_function_math_median_dataframe(self):
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, 2.0, np.nan],
"colb": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, np.nan, 2.0]})
odf = orca.DataFrame(pdf)
self.assertEqual(dnp.isnan(dnp.median(odf)), True)
self.assertEqual(np.isnan(np.median(pdf)), True)
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0],
"colb": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0]})
odf = orca.DataFrame(pdf)
self.assertEqual(dnp.median(odf), np.median(pdf))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/numpy_unit_testing/test_function_statistical_percentile.py
```python
import unittest
from setup.settings import *
from numpy.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionPercentileTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_percentile_scalar(self):
self.assertEqual(dnp.percentile(0.5, 30), np.percentile(0.5, 30))
self.assertEqual(dnp.percentile(1, 30), np.percentile(1, 30))
self.assertEqual(dnp.percentile(-1, 30), np.percentile(-1, 30))
self.assertEqual(dnp.percentile(0, 30), np.percentile(0, 30))
self.assertEqual(dnp.isnan(dnp.percentile(dnp.nan, 30)), True)
self.assertEqual(np.isnan(np.percentile(np.nan, 30)), True)
def test_function_math_percentile_list(self):
npa = np.percentile([1, 8, 27, -27, 0, 5, np.nan], 60)
dnpa = dnp.percentile([1, 8, 27, -27, 0, 5, dnp.nan], 60)
assert_array_equal(dnpa, npa)
def test_function_math_percentile_array(self):
npa = np.percentile(np.array([1, 8, 27, -27, 0, 5, np.nan]), 60)
dnpa = dnp.percentile(dnp.array([1, 8, 27, -27, 0, 5, dnp.nan]), 60)
assert_array_equal(dnpa, npa)
def test_function_math_percentile_series(self):
ps = pd.Series([-1, 8, 27, -27, 0, 5, np.nan])
os = orca.Series(ps)
self.assertEqual(dnp.isnan(dnp.percentile(os, 30)), True)
self.assertEqual(np.isnan(np.percentile(ps, 30)), True)
ps = pd.Series([-1, 8, 27, -27, 0, 5])
os = orca.Series(ps)
self.assertEqual(dnp.percentile(os, 60), np.percentile(ps, 60))
def test_function_math_percentile_dataframe(self):
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, 2.0, np.nan],
"colb": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, np.nan, 2.0]})
odf = orca.DataFrame(pdf)
self.assertEqual(dnp.isnan(dnp.percentile(odf, 30)), True)
self.assertEqual(np.isnan(np.percentile(pdf, 30)), True)
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0],
"colb": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0]})
odf = orca.DataFrame(pdf)
self.assertEqual(dnp.percentile(odf, 60), np.percentile(pdf, 60))
def test_function_math_percentile_param_q_list_X_scalar(self):
assert_array_equal(dnp.percentile(0.5, [60, 45, 20]), np.percentile(0.5, [60, 45, 20]))
assert_array_equal(dnp.percentile(1, [60, 45, 20]), np.percentile(1, [60, 45, 20]))
assert_array_equal(dnp.percentile(-1, [60, 45, 20]), np.percentile(-1, [60, 45, 20]))
assert_array_equal(dnp.percentile(0, [60, 45, 20]), np.percentile(0, [60, 45, 20]))
assert_array_equal(dnp.isnan(dnp.percentile(dnp.nan, [60, 45, 20])), True)
assert_array_equal(np.isnan(np.percentile(np.nan, [60, 45, 20])), True)
def test_function_math_percentile_param_q_list_X_list(self):
npa = np.percentile([1, 8, 27, -27, 0, 5, np.nan], [60, 45, 20])
dnpa = dnp.percentile([1, 8, 27, -27, 0, 5, dnp.nan], [60, 45, 20])
assert_array_equal(dnpa, npa)
def test_function_math_percentile_param_q_list_X_array(self):
npa = np.percentile(np.array([1, 8, 27, -27, 0, 5, np.nan]), [60, 45, 20])
dnpa = dnp.percentile(dnp.array([1, 8, 27, -27, 0, 5, dnp.nan]), [60, 45, 20])
assert_array_equal(dnpa, npa)
def test_function_math_percentile_param_q_list_X_series(self):
ps = pd.Series([-1, 8, 27, -27, 0, 5, np.nan])
os = orca.Series(ps)
assert_array_equal(dnp.isnan(dnp.percentile(os, [60, 45, 20])), True)
assert_array_equal(np.isnan(np.percentile(ps, [60, 45, 20])), True)
ps = pd.Series([-1, 8, 27, -27, 0, 5])
os = orca.Series(ps)
assert_array_equal(dnp.percentile(os, [60, 45, 20]), np.percentile(ps, [60, 45, 20]))
def test_function_math_percentile_param_q_list_X_dataframe(self):
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, 2.0, np.nan],
"colb": [-1, 8, 27, -27, 0, 5, np.nan, 1.5, 1.7, np.nan, 2.0]})
odf = orca.DataFrame(pdf)
assert_array_equal(dnp.isnan(dnp.percentile(odf, [60, 45, 20])), True)
assert_array_equal(np.isnan(np.percentile(pdf, [60, 45, 20])), True)
pdf = pd.DataFrame({"cola": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0],
"colb": [-1, 8, 27, -27, 0, 5, 1.5, 1.7, 2.0]})
odf = orca.DataFrame(pdf)
assert_array_equal(dnp.percentile(odf, [60, 45, 20]), np.percentile(pdf, [60, 45, 20]))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_combining_merge_asof_partition.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
def _create_odf_csv(datal, datar):
dfsDatabase = "dfs://testMergeAsofDB"
s = orca.default_session()
dolphindb_script = """
login('admin', '<PASSWORD>')
if(existsDatabase('{dbPath}'))
dropDatabase('{dbPath}')
db=database('{dbPath}', VALUE, 2010.01M..2010.05M)
stb1=extractTextSchema('{data1}')
update stb1 set type="SYMBOL" where name="type"
stb2=extractTextSchema('{data2}')
update stb2 set type="SYMBOL" where name="ticker"
loadTextEx(db,`tickers,`date, '{data1}',,stb1)
loadTextEx(db,`values,`date, '{data2}',,stb2)
""".format(dbPath=dfsDatabase, data1=datal, data2=datar)
s.run(dolphindb_script)
class Csv:
odfs_csv_left = None
odfs_csv_right = None
pdf_csv_left = None
pdf_csv_right = None
class DfsMergeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
left_fileName = 'test_merge_asof_left_table.csv'
right_fileName = 'test_merge_asof_right_table.csv'
datal = os.path.join(DATA_DIR, left_fileName)
datal= datal.replace('\\', '/')
datar = os.path.join(DATA_DIR, right_fileName)
datar = datar.replace('\\', '/')
dfsDatabase = "dfs://testMergeAsofDB"
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
_create_odf_csv(datal, datar)
# import
Csv.odfs_csv_left = orca.read_table(dfsDatabase, 'tickers')
Csv.pdf_csv_left = pd.read_csv(datal, parse_dates=[0])
Csv.odfs_csv_right = orca.read_table(dfsDatabase, 'values')
Csv.pdf_csv_right = pd.read_csv(datar, parse_dates=[0])
@property
def odfs_csv_left(self):
return Csv.odfs_csv_left
@property
def odfs_csv_right(self):
return Csv.odfs_csv_right
@property
def pdf_csv_left(self):
return Csv.pdf_csv_left
@property
def pdf_csv_right(self):
return Csv.pdf_csv_right
@property
def odfs_csv_left_index(self):
return Csv.odfs_csv_left.set_index("date")
@property
def odfs_csv_right_index(self):
return Csv.odfs_csv_right.set_index("date")
@property
def pdf_csv_left_index(self):
return Csv.pdf_csv_left.set_index("date")
@property
def pdf_csv_right_index(self):
return Csv.pdf_csv_right.set_index("date")
@property
def odfs_bid_csv_left(self):
return self.odfs_csv_left.sort_values(by=['bid', 'date']).reset_index(drop=True)
@property
def odfs_bid_csv_right(self):
return self.odfs_csv_right.sort_values(by=['bid', 'date']).reset_index(drop=True)
@property
def pdf_bid_csv_left(self):
return self.pdf_csv_left.sort_values(by=['bid', 'date']).reset_index(drop=True)
@property
def pdf_bid_csv_right(self):
return self.pdf_csv_right.sort_values(by=['bid', 'date']).reset_index(drop=True)
@property
def odfs_bid_csv_left_index(self):
return self.odfs_csv_left.sort_values(by=['bid', 'date']).set_index('bid')
@property
def odfs_bid_csv_right_index(self):
return self.odfs_csv_right.sort_values(by=['bid', 'date']).set_index('bid')
@property
def pdf_bid_csv_left_index(self):
return self.pdf_csv_left.sort_values(by=['bid', 'date']).set_index('bid')
@property
def pdf_bid_csv_right_index(self):
return self.pdf_csv_right.sort_values(by=['bid', 'date']).set_index('bid')
def test_assert_original_dataframe_equal(self):
assert_frame_equal(self.odfs_csv_left.to_pandas(), self.pdf_csv_left, check_dtype=False)
assert_frame_equal(self.odfs_csv_right.to_pandas(), self.pdf_csv_right, check_dtype=False)
assert_frame_equal(self.odfs_csv_left_index.to_pandas(), self.pdf_csv_left_index, check_dtype=False)
assert_frame_equal(self.odfs_csv_right_index.to_pandas(), self.pdf_csv_right_index, check_dtype=False)
assert_frame_equal(self.odfs_bid_csv_left.to_pandas(), self.pdf_bid_csv_left, check_dtype=False)
assert_frame_equal(self.odfs_bid_csv_right.to_pandas(), self.pdf_bid_csv_right, check_dtype=False)
assert_frame_equal(self.odfs_bid_csv_left_index.to_pandas(), self.pdf_bid_csv_left_index, check_dtype=False)
assert_frame_equal(self.odfs_bid_csv_right_index.to_pandas(), self.pdf_bid_csv_right_index, check_dtype=False)
def test_merge_asof_from_dfs_param_on(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date')
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date')
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid')
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid')
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date')
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date')
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid')
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid')
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True)
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True)
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True, right_index=True)
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True, right_index=True)
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_by(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', by='ticker')
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', by='ticker')
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftbyrightby(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', left_by='ticker',
right_by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date',
suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date',
suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid',
suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid',
suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_by(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date', by='ticker')
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date', by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid', by='ticker')
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid', by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_leftbyrightby(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', left_by='ticker', right_by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', left_by='ticker',
right_by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', left_by='ticker', right_by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_by(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
by='ticker')
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True, by='ticker')
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True,
right_index=True, by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_leftbyrightby(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
left_by='ticker', right_by='ticker')
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True, left_by='ticker',
right_by='ticker')
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True,
right_index=True, left_by='ticker',
right_by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True, suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True,
right_index=True,
suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_by_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', by='ticker', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', by='ticker',
suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', by='ticker',
suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', by='ticker',
suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_leftbyrightby_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date',
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date',
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', left_by='ticker',
right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_by_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date',
by='ticker', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid', by='ticker',
suffixes=('_left', '_right'))
pdf.fillna("", inplace=True)
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid', by='ticker',
suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_leftbyrightby_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date',
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date',
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid',
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid',
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_by_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
by='ticker', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True,
by='ticker', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True,
right_index=True,
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_leftbyrightby_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True,
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True, right_index=True,
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_index(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right, left_index=True, right_on='date')
# TODO:ORCA error left_index, right_on not supported
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_on='date')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right_index, right_index=True, left_on='date')
# TODO:ORCA error left_index, right_on not supported
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right_index, right_index=True, left_on='date')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right, left_index=True, right_on='bid')
# TODO:ORCA error left_index, right_on not supported
# odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right, left_index=True, right_on='bid')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right_index, right_index=True, left_on='bid')
# TODO:ORCA error left_index, right_on not supported
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right_index, right_index=True, left_on='bid')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_index_param_by(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right, left_index=True, right_on='date', by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_on='date', by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right_index, right_index=True, left_on='date', by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, right_index=True, left_on='date', by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right, left_index=True, right_on='bid',
by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right, left_index=True, right_on='bid', by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right_index, right_index=True, left_on='bid',
by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right_index, right_index=True, left_on='bid', by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_index_param_by_param_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right, left_index=True, right_on='date',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_on='date',
# by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right_index, right_index=True, left_on='date',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, right_index=True, left_on='date',
# by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right, left_index=True, right_on='bid',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right, left_index=True, right_on='bid',
# by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right_index, right_index=True, left_on='bid',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right_index, right_index=True, left_on='bid',
# by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_combining_merge.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
class Csv:
pdf_csv_left = None
pdf_csv_right = None
odf_csv_left = None
odf_csv_right = None
class MergeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
left_fileName = 'test_merge_left_table.csv'
right_fileName = 'test_merge_right_table.csv'
data_left = os.path.join(DATA_DIR, left_fileName)
data_left = data_left.replace('\\', '/')
data_right = os.path.join(DATA_DIR, right_fileName)
data_right = data_right.replace('\\', '/')
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
# import
Csv.odf_csv_left = orca.read_csv(data_left)
Csv.pdf_csv_left = pd.read_csv(data_left, parse_dates=[0, 1])
Csv.odf_csv_right = orca.read_csv(data_right)
Csv.pdf_csv_right = pd.read_csv(data_right)
@property
def odf_csv_left(self):
return Csv.odf_csv_left
@property
def odf_csv_right(self):
return Csv.odf_csv_right
@property
def pdf_csv_left(self):
return Csv.pdf_csv_left
@property
def pdf_csv_right(self):
return Csv.pdf_csv_right
@property
def odf_csv_left_index(self):
return Csv.odf_csv_left.set_index("type")
@property
def odf_csv_right_index(self):
return Csv.odf_csv_right.set_index("type")
@property
def pdf_csv_left_index(self):
return Csv.pdf_csv_left.set_index("type")
@property
def pdf_csv_right_index(self):
return Csv.pdf_csv_right.set_index("type")
@property
def pdf_series_right(self):
return Csv.pdf_series_left
@property
def odf_left_small(self):
return orca.DataFrame({"type": [1, 2], "val": [1, 2]}, index=[1, 2])
@property
def pdf_left_small(self):
return pd.DataFrame({"type": [1, 2], "val": [1, 2]}, index=[1, 2])
@property
def odf_left_small_index(self):
return self.odf_left_small.set_index("type")
@property
def pdf_left_small_index(self):
return self.pdf_left_small.set_index("type")
@property
def odf_right_small(self):
return orca.DataFrame({"type": [2, 3], "vol": [3, 4]}, index=[1, 2])
@property
def pdf_right_small(self):
return pd.DataFrame({"type": [2, 3], "vol": [3, 4]}, index=[1, 2])
@property
def odf_right_small_index(self):
return self.odf_right_small.set_index("type")
@property
def pdf_right_small_index(self):
return self.pdf_right_small.set_index("type")
def test_assert_original_dataframe_equal(self):
assert_frame_equal(self.odf_csv_left.to_pandas(), self.pdf_csv_left, check_dtype=False)
assert_frame_equal(self.odf_csv_right.to_pandas(), self.pdf_csv_right, check_dtype=False)
assert_frame_equal(self.odf_csv_left_index.to_pandas(), self.pdf_csv_left_index, check_dtype=False)
assert_frame_equal(self.odf_csv_right_index.to_pandas(), self.pdf_csv_right_index, check_dtype=False)
def test_merge_from_csv_param_suffix(self):
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, on="type", suffixes=('_left', '_right'))
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.sort_values("date").to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_how(self):
# how = left
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="left", on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="left", on="type")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = right
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="right", on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="right", on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# default how = inner
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="inner", on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="inner", on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = outer
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="outer", on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="outer", on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_on(self):
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_leftonrighton(self):
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, left_on="type", right_on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_index(self):
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, left_index=True, right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, left_index=True, right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_csv_index_param_suffix(self):
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_csv_index_param_on(self):
odf_merge = self.odf_csv_left.merge(self.odf_csv_right_index, left_on="type", right_index=True)
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right_index, left_on="type", right_index=True)
assert_frame_equal(odf_merge.sort_values(by=["id", "value"]).to_pandas(),
pdf_merge.sort_values(by=["id", "value"]), check_dtype=False, check_like=False)
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right, right_on="type", left_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right, right_on="type", left_index=True)
assert_frame_equal(odf_merge.sort_values(by=["id", "value"]).to_pandas(),
pdf_merge.sort_values(by=["id", "value"]), check_dtype=False, check_like=False)
def test_merge_small_param_on(self):
odf_merge = self.odf_left_small.merge(self.odf_right_small_index, left_on="type", right_index=True)
pdf_merge = self.pdf_left_small.merge(self.pdf_right_small_index, left_on="type", right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# odf_merge = self.odf_left_small.merge(self.odf_right_small, left_on="type", right_index=True)
# pdf_merge = self.pdf_left_small.merge(self.pdf_right_small, left_on="type", right_index=True)
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
odf_merge = self.odf_left_small_index.merge(self.odf_right_small, right_on="type", left_index=True)
pdf_merge = self.pdf_left_small_index.merge(self.pdf_right_small, right_on="type", left_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_index_param_how(self):
# how = left
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="left", left_index=True,
right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="left", left_index=True,
right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
# how = right
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="right", left_index=True,
right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="right", left_index=True,
right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
# default how = inner
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, left_index=True, right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, left_index=True, right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
# how = outer
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="outer", left_index=True,
right_index=True)
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="outer", left_index=True,
right_index=True)
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_csv_param_suffix_param_how(self):
# how = left
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="left", on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="left", on="type", suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = right
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="right", on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="right", on="type", suffixes=('_left', '_right'))
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# default how = inner
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, on="type", suffixes=('_left', '_right'))
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = outer
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="outer", on="type", suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="outer", on="type", suffixes=('_left', '_right'))
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_param_how_param_leftonrighton(self):
# how = left
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="left", left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="left", left_on="type", right_on="type")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = right
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="right", left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="right", left_on="type", right_on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# default how = inner
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, left_on="type", right_on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = outer
odf_merge = self.odf_csv_left.merge(self.odf_csv_right, how="outer", left_on="type", right_on="type")
pdf_merge = self.pdf_csv_left.merge(self.pdf_csv_right, how="outer", left_on="type", right_on="type")
# TODO: PARTITIONED TABLE IS IN RANDOM ORDER
# assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_csv_index_param_suffix_param_how(self):
# how = left
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="left", left_index=True,
right_index=True, suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="left", left_index=True,
right_index=True, suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False, check_like=True)
# how = right
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="right", left_index=True,
right_index=True, suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="right", left_index=True,
right_index=True, suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# default how = inner
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False, check_like=False)
# how = outer
odf_merge = self.odf_csv_left_index.merge(self.odf_csv_right_index, how="outer", left_index=True,
right_index=True, suffixes=('_left', '_right'))
pdf_merge = self.pdf_csv_left_index.merge(self.pdf_csv_right_index, how="outer", left_index=True,
right_index=True, suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.sort_index().to_pandas(), pdf_merge, check_dtype=False, check_like=False)
def test_merge_from_dataframe_param_suffix(self):
odf = orca.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
odf_other = orca.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
odf_merge = odf.merge(odf_other, on="key", suffixes=('_left', '_right'))
pdf = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
pdf_other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
pdf_merge = pdf.merge(pdf_other, on="key", suffixes=('_left', '_right'))
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_dataframe_param_leftonrighton(self):
odf = orca.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
odf_other = orca.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
odf_merge = odf.merge(odf_other, left_on="key", right_on="key")
pdf = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
pdf_other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
pdf_merge = pdf.merge(pdf_other, left_on="key", right_on="key")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_dataframe_how(self):
odf = orca.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
odf_other = orca.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
pdf = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], 'A': [1, 2, 3, 4, 5, 6]})
pdf_other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], 'B': [11, 22, 33]})
# how = left
odf_merge = odf.merge(odf_other, how="left", on='key')
pdf_merge = pdf.merge(pdf_other, how="left", on="key")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = right
odf_merge = odf.merge(odf_other, how="right", on='key')
pdf_merge = pdf.merge(pdf_other, how="right", on="key")
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = inner
odf_merge = odf.merge(odf_other, how="inner", on='key')
pdf_merge = pdf.merge(pdf_other, how="inner", on='key')
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = outer
odf_merge = odf.merge(odf_other, how="outer", on='key')
pdf_merge = pdf.merge(pdf_other, how="outer", on='key')
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_dataframe_index(self):
orca_left = orca.DataFrame({'A': [1, 2, 3], 'B': [11, 22, 33]}, index=['K0', 'K1', 'K2'])
orca_right = orca.DataFrame({'C': [111, 222, 333], 'D': [1111, 2222, 3333]}, index=['K0', 'K2', 'K3'])
odf_merge = orca_left.merge(orca_right, left_index=True, right_index=True)
pd_left = pd.DataFrame({'A': [1, 2, 3], 'B': [11, 22, 33]}, index=['K0', 'K1', 'K2'])
pd_right = pd.DataFrame({'C': [111, 222, 333], 'D': [1111, 2222, 3333]}, index=['K0', 'K2', 'K3'])
pdf_merge = pd_left.merge(pd_right, left_index=True, right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
def test_merge_from_dataframe_index_param_how(self):
orca_left = orca.DataFrame({'A': [1, 2, 3], 'B': [11, 22, 33]}, index=['K0', 'K1', 'K2'])
orca_right = orca.DataFrame({'C': [111, 222, 333], 'D': [1111, 2222, 3333]}, index=['K0', 'K2', 'K3'])
pd_left = pd.DataFrame({'A': [1, 2, 3], 'B': [11, 22, 33]}, index=['K0', 'K1', 'K2'])
pd_right = pd.DataFrame({'C': [111, 222, 333], 'D': [1111, 2222, 3333]}, index=['K0', 'K2', 'K3'])
# by default, how = left
# how = right
odf_merge = orca_left.merge(orca_right, how="right", left_index=True, right_index=True)
pdf_merge = pd_left.merge(pd_right, how="right", left_index=True, right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = inner
odf_merge = orca_left.merge(orca_right, how="inner", left_index=True, right_index=True)
pdf_merge = pd_left.merge(pd_right, how="inner", left_index=True, right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
# how = outer
odf_merge = orca_left.merge(orca_right, how="outer", left_index=True, right_index=True)
pdf_merge = pd_left.merge(pd_right, how="outer", left_index=True, right_index=True)
assert_frame_equal(odf_merge.to_pandas(), pdf_merge, check_dtype=False)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_dataframe_function_application.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
class Csv:
pdf_csv = None
odf_csv = None
class DataFrameFuncApplicationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'USPricesSample.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
# Csv.odf_csv = orca.read_csv(data, dtype={"DLSTCD": np.float32, "DLPRC": np.float32})
Csv.odf_csv = orca.read_csv(data, dtype={"PERMNO": np.int32, "date": 'DATE', "TRDSTAT": 'SYMBOL',
"DLSTCD": np.float32, "DLPRC": np.float32, "VOL": np.float32,
"SHROUT": np.float32, "CFACPR":np.float32, "CFACSHR": np.float32})
# pdf from import
Csv.pdf_csv = pd.read_csv(data, parse_dates=[1], dtype={"PERMNO": np.int32, "SHRCD": np.int32, "HEXCD": np.int32,
"DLSTCD": np.float32, "DLPRC": np.float32,
"VOL": np.float32, "SHROUT": np.float32})
Csv.odf_csv = Csv.odf_csv.drop(columns=['DLRET'])
Csv.pdf_csv.drop(columns=['DLRET'], inplace=True)
@property
def pdf_csv(self):
return Csv.pdf_csv
@property
def odf_csv(self):
return Csv.odf_csv
@property
def pdf(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def odf(self):
return orca.DataFrame(self.pdf)
def test_dataframe_function_application_apply(self):
pdf = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
odf = orca.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
assert_frame_equal(odf.apply(np.sqrt).to_pandas(), pdf.apply(np.sqrt))
assert_frame_equal(odf.apply(np.sum, axis=0).to_pandas(), pdf.apply(np.sum, axis=0).to_frame())
def test_dataframe_function_application_applymap(self):
pdf = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
odf = orca.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
assert_frame_equal(odf.applymap(np.sqrt).to_pandas(), pdf.applymap(np.sqrt))
def test_dataframe_function_application_pipe(self):
# TODO NOT IMPLEMENTED ERROR
pass
def test_dataframe_function_application_agg(self):
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=['A', 'B', 'C'])
odf = orca.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=['A', 'B', 'C'])
assert_frame_equal(odf.agg(['sum', 'min']).to_pandas(), pdf.agg(['sum', 'min']))
def test_dataframe_function_application_aggregate(self):
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=['A', 'B', 'C'])
odf = orca.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=['A', 'B', 'C'])
assert_frame_equal(odf.aggregate(['sum', 'min']).to_pandas(), pdf.aggregate(['sum', 'min']))
assert_frame_equal(odf.aggregate({'A': ['sum', 'min'], 'B': ['min', 'max']}).to_pandas(),
pdf.aggregate({'A': ['sum', 'min'], 'B': ['min', 'max']}))
# TODO:DIFFS
# assert_frame_equal(odf.aggregate("mean", axis="columns").to_pandas(), pdf.aggregate("mean", axis="columns"))
def test_dataframe_function_application_transform(self):
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=['A', 'B', 'C'])
odf = orca.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=['A', 'B', 'C'])
assert_frame_equal(odf.transform([np.sqrt, np.exp]).to_pandas(), pdf.transform([np.sqrt, np.exp]))
def test_dataframe_function_application_expanding(self):
# pdf = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
# odf = orca.DataFrame({'B': [0, 1, 2, np.nan, 4]})
# assert_frame_equal(pdf.expanding(2).sum(), odf.expanding(2).sum().to_pandas())
# TODO NOT IMPLEMENTED ERROR
pass
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_dataframe_reindexing_selection.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
class Csv:
pdf_csv = None
odf_csv = None
class DataFrameReindexingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'USPricesSample.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
Csv.odf_csv = orca.read_csv(data, dtype={"DLSTCD": np.float32, "DLPRC": np.float32})
# pdf from import
Csv.pdf_csv = pd.read_csv(data, parse_dates=[1], dtype={"DLSTCD": np.float32, "DLPRC": np.float32})
Csv.odf_csv = Csv.odf_csv.drop(columns=['DLRET'])
Csv.pdf_csv.drop(columns=['DLRET'], inplace=True)
@property
def pdf_csv(self):
return Csv.pdf_csv
@property
def odf_csv(self):
return Csv.odf_csv
@property
def pdf(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def odf(self):
return orca.DataFrame(self.pdf)
def test_dataframe_reindexing_selection_label_mainpulation_between_time(self):
idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')
pdf = pd.DataFrame({'A': [1, 2, 3, 4]}, index=idx)
odf = orca.DataFrame(pdf)
assert_frame_equal(odf.between_time('0:15', '0:45').to_pandas(), pdf.between_time('0:15', '0:45'))
assert_frame_equal(odf.between_time('0:45', '0:15').to_pandas(), pdf.between_time('0:45', '0:15'))
def test_dataframe_reindexing_selection_label_mainpulation_take(self):
n = np.array([0, 1, 4])
assert_frame_equal(self.odf.take(n).to_pandas(), self.pdf.take(n))
assert_frame_equal(self.odf.take([]).to_pandas(), self.pdf.take([]))
assert_frame_equal(self.odf.take([0, 1], axis=1).to_pandas(), self.pdf.take([0, 1], axis=1))
assert_frame_equal(self.odf.take([-1, -2], axis=0).to_pandas(), self.pdf.take([-1, -2], axis=0))
n = np.random.randint(0, 2999, 100)
assert_frame_equal(self.odf_csv.take(n).to_pandas(), self.pdf_csv.take(n), check_dtype=False)
assert_frame_equal(self.odf_csv.take([0, 1, 5, 7, 11, 15], axis=1).to_pandas(), self.pdf_csv.take([0, 1, 5, 7, 11, 15], axis=1), check_dtype=False)
def test_dataframe_reindexing_selection_label_mainpulation_equals(self):
pdf = pd.DataFrame({1: [10], 2: [20]})
p_exactly_equal = pd.DataFrame({1: [10], 2: [20]})
p_different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
p_different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
odf = orca.DataFrame(pdf)
o_exactly_equal = orca.DataFrame(p_exactly_equal)
o_different_column_type = orca.DataFrame(p_different_column_type)
o_different_data_type = orca.DataFrame(p_different_data_type)
self.assertEqual(odf.equals(o_exactly_equal), pdf.equals(p_exactly_equal))
self.assertEqual(odf.equals(o_different_column_type), pdf.equals(p_different_column_type))
self.assertEqual(odf.equals(o_different_data_type), pdf.equals(p_different_data_type))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_filtering.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
class Csv:
pdf_csv = None
odf_csv = None
pdf_csv_re = None
odf_csv_re = None
class FilteringTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'USPricesSample.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
# Orca connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
# odf from import
Csv.odf_csv = orca.read_csv(data, dtype={"PERMNO": np.int32, "date": 'DATE', "TRDSTAT": 'SYMBOL',
"DLSTCD": np.float32,
"DLPRC": np.float32, "VOL": np.float32, "SHROUT": np.float32})
Csv.odf_csv_re = Csv.odf_csv.set_index("PERMNO")
Csv.odf_csv.set_index("date", inplace=True)
# pdf from import
Csv.pdf_csv = pd.read_csv(data, parse_dates=[1],
dtype={"PERMNO": np.int32, "SHRCD": np.int32, "HEXCD": np.int32, "DLSTCD": np.float32,
"DLPRC": np.float32, "VOL": np.float32, "SHROUT": np.float32})
Csv.pdf_csv_re = Csv.pdf_csv.set_index("PERMNO")
Csv.pdf_csv.set_index("date", inplace=True)
# set column 'date' as index
@property
def pdf_csv(self):
return Csv.pdf_csv
@property
def odf_csv(self):
return Csv.odf_csv
# set column 'PERMNO' as index
@property
def pdf_re(self):
return Csv.pdf_csv_re
@property
def odf_re(self):
return Csv.odf_csv_re
# drop literal columns (temporal column 'date' has been set as index)
@property
def pdf_d(self):
return self.pdf_csv.drop(columns=['TICKER', 'CUSIP', 'TRDSTAT', 'DLRET'])
@property
def odf_d(self):
return self.odf_csv.drop(columns=['TICKER', 'CUSIP', 'TRDSTAT', 'DLRET'])
@property
def pdf(self):
n = 100 # note that n should be a multiple of 10
re = n / 10
return pd.DataFrame({
'date': np.repeat(pd.date_range('2019.08.01', periods=10, freq='D'), re),
'tsymbol': np.repeat(['a', 'b', 'c', 'd', 'e', 'QWW', 'FEA', 'FFW', 'DER', 'POD'], re),
'tbool': np.repeat(np.repeat(np.arange(2, dtype='bool'), 5), re),
'tchar': np.repeat(np.arange(1, 11, 1, dtype='int8'), re),
'tshort': np.repeat(np.arange(1, 11, 1, dtype='int16'), re),
'tint': np.repeat(np.arange(1, 11, 1, dtype='int32'), re),
'tlong': np.repeat(np.arange(1, 11, 1, dtype='int64'), re),
'tfloat': np.repeat(np.arange(1, 11, 1, dtype='float32'), re),
'tdouble': np.repeat(np.arange(1, 11, 1, dtype='float64'), re)
}, index=pd.Index(np.arange(1, n + 1, 1, dtype='int32'), name="id"))
@property
def odf(self):
return orca.DataFrame(self.pdf)
# drop temporal, literal and logical columns
@property
def pdf_dr(self):
return self.pdf.drop(columns=['date', 'tsymbol', 'tbool'])
@property
def odf_dr(self):
return self.odf.drop(columns=['date', 'tsymbol', 'tbool'])
def test_from_pandas_filtering_param_cond_equal(self):
# filtering ==
a = self.odf[self.odf["tsymbol"] == 'a'].to_pandas()
b = self.pdf[self.pdf["tsymbol"] == 'a']
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_param_cond_not_equal(self):
# filtering !=
a = self.odf[self.odf["tsymbol"] != 'a'].to_pandas()
b = self.pdf[self.pdf["tsymbol"] != 'a']
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_param_cond_less_than_or_equal(self):
# filtering <=
a = self.odf[self.odf["tfloat"] <= 3.0].to_pandas()
b = self.pdf[self.pdf["tfloat"] <= 3.0]
assert_frame_equal(a, b, check_dtype=False)
a = self.odf[(self.odf["date"] < orca.Timestamp("2019.08.05"))].to_pandas()
b = self.pdf[(self.pdf["date"] < pd.Timestamp("2019.08.05"))]
assert_frame_equal(a, b, check_dtype=False)
a = self.odf[(self.odf["date"] < "2019.08.05")].to_pandas()
b = self.pdf[(self.pdf["date"] < "2019.08.05")]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_param_cond_greater_than_or_equal(self):
# filtering >=
a = self.odf[self.odf["tfloat"] >= 3.0].to_pandas()
b = self.pdf[self.pdf["tfloat"] >= 3.0]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_param_cond_less_than(self):
# filtering <
a = self.odf[self.odf["tfloat"] < 3.0].to_pandas()
b = self.pdf[self.pdf["tfloat"] < 3.0]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_param_cond_greater_than(self):
# filtering >
a = self.odf[self.odf["tfloat"] > 3.0].to_pandas()
b = self.pdf[self.pdf["tfloat"] > 3.0]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_and(self):
# filtering &
odf = self.odf
pdf = self.pdf
a = odf[(odf["tfloat"] < 3.0) & (odf["tfloat"] > 1.0)].to_pandas()
b = pdf[(pdf["tfloat"] < 3.0) & (pdf["tfloat"] > 1.0)]
ad = odf[(odf["tfloat"] < 3.0), (odf["tfloat"] > 1.0)].to_pandas()
assert_frame_equal(a, b, check_dtype=False)
assert_frame_equal(ad, b, check_dtype=False)
def test_from_pandas_filtering_and_and(self):
# filtering & &
odf = self.odf
pdf = self.pdf
a = odf[(odf["tfloat"] < 3.0) & (odf["tfloat"] > 1.0) & (odf["tint"] > 2)].to_pandas()
b = pdf[(pdf["tfloat"] < 3.0) & (pdf["tfloat"] > 1.0) & (pdf["tint"] > 2)]
ad = odf[(odf["tfloat"] < 3.0), (odf["tfloat"] > 1.0), (odf["tint"] > 2)].to_pandas()
assert_frame_equal(a, b, check_dtype=False)
assert_frame_equal(ad, b, check_dtype=False)
def test_from_pandas_filtering_and_or(self):
# filtering & |
odf = self.odf
pdf = self.pdf
a = odf[(odf["tfloat"] < 3.0) & (odf["tfloat"] > 1.0) | (odf["tfloat"] == 4.0)].to_pandas()
b = pdf[(pdf["tfloat"] < 3.0) & (pdf["tfloat"] > 1.0) | (pdf["tfloat"] == 4.0)]
ad = odf[(odf["tfloat"] < 3.0), (odf["tfloat"] > 1.0) | (odf["tfloat"] == 4.0)].to_pandas()
bd = pdf[(pdf["tfloat"] < 3.0) & ((pdf["tfloat"] > 1.0) | (pdf["tfloat"] == 4.0))]
assert_frame_equal(a, b, check_dtype=False)
assert_frame_equal(ad, bd, check_dtype=False)
def test_from_pandas_filtering_or(self):
# filtering |
odf = self.odf
pdf = self.pdf
b = pdf[(pdf["tfloat"] < 3.0) | (pdf["tfloat"] > 4.0)]
a = odf[(odf["tfloat"] < 3.0) | (odf["tfloat"] > 4.0)].to_pandas()
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_or_and(self):
# filtering | &
odf = self.odf
pdf = self.pdf
a = odf[(odf["tfloat"] == 3.0) | (odf["tfloat"] > 1.0) & (odf["tint"] > 2)].to_pandas()
b = pdf[(pdf["tfloat"] == 3.0) | (pdf["tfloat"] > 1.0) & (pdf["tint"] > 2)]
ad = odf[(odf["tfloat"] == 3.0) | (odf["tfloat"] > 1.0), (odf["tint"] > 2)].to_pandas()
assert_frame_equal(a, b, check_dtype=False)
assert_frame_equal(ad, b, check_dtype=False)
def test_from_pandas_filtering_or_or(self):
# filtering | |
odf = self.odf
pdf = self.pdf
a = odf[(odf["tfloat"] < 3.0) | (odf["tfloat"] > 1.0) | (odf["tfloat"] == 4.0)].to_pandas()
b = pdf[(pdf["tfloat"] < 3.0) | (pdf["tfloat"] > 1.0) | (pdf["tfloat"] == 4.0)]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_operation_add(self):
# filtering +
a = (self.odf_dr[self.odf_dr["tfloat"] < 3.0] + self.odf_dr[self.odf_dr["tfloat"] > 1.0]).to_pandas()
b = self.pdf_dr[self.pdf_dr["tfloat"] < 3.0] + self.pdf_dr[self.pdf_dr["tfloat"] > 1.0]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_operation_sub(self):
# filtering -
a = (self.odf_dr[self.odf_dr["tfloat"] < 3.0] - self.odf_dr[self.odf_dr["tfloat"] > 1.0]).to_pandas()
b = self.pdf_dr[self.pdf_dr["tfloat"] < 3.0] - self.pdf_dr[self.pdf_dr["tfloat"] > 1.0]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_operation_mul(self):
# filtering *
a = (self.odf_dr[self.odf_dr["tfloat"] < 3.0] * self.odf_dr[self.odf_dr["tfloat"] > 1.0]).to_pandas()
b = self.pdf_dr[self.pdf_dr["tfloat"] < 3.0] * self.pdf_dr[self.pdf_dr["tfloat"] > 1.0]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_operation_div(self):
# filtering /
# note that operation div is only allowed for numbers, exclusive of temporal, literal and logical
# columns in Orca.
a = (self.odf_dr[self.odf_dr["tfloat"] < 3.0] / self.odf_dr[self.odf_dr["tfloat"] > 1.0]).to_pandas()
b = self.pdf_dr[self.pdf_dr["tfloat"] < 3.0] / self.pdf_dr[self.pdf_dr["tfloat"] > 1.0]
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_operation_groupby(self):
# filtering groupby sum
a = self.odf[self.odf["tfloat"] < 3.0].groupby("tsymbol").sum().to_pandas()
b = self.pdf[self.pdf["tfloat"] < 3.0].groupby("tsymbol").sum()
b['tbool'] = b['tbool'].astype(int)
assert_frame_equal(a, b, check_dtype=False)
# filtering groupby count
a = self.odf[self.odf["tfloat"] < 3.0].groupby("tsymbol").count().to_pandas()
b = self.pdf[self.pdf["tfloat"] < 3.0].groupby("tsymbol").count()
b['tbool'] = b['tbool'].astype(int)
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_operation_resample(self):
# filtering groupby sum
a = self.odf[self.odf["tfloat"] < 5.0].resample("d", on="date").sum().to_pandas()
b = self.pdf[self.pdf["tfloat"] < 5.0].resample("d", on="date").sum()
b['tbool'] = b['tbool'].astype(int)
assert_frame_equal(a, b, check_dtype=False)
def test_from_pandas_filtering_operation_rolling(self):
# filtering groupby sum
a = self.odf[self.odf["tfloat"] < 5.0].rolling(window=2, on="date").sum().to_pandas()
b = self.pdf[self.pdf["tfloat"] < 5.0].rolling(window=2, on="date").sum()
assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_param_cond_equal(self):
# filtering ==
a = self.odf_csv[self.odf_csv["TICKER"] == "EGAS"].to_pandas().fillna(value="")
b = self.pdf_csv[self.pdf_csv["TICKER"] == "EGAS"].fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_param_cond_not_equal(self):
# filtering !=
a = self.odf_csv[self.odf_csv["TICKER"] != "EGAS"].to_pandas().fillna(value="")
b = self.pdf_csv[self.pdf_csv["TICKER"] != "EGAS"].fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_param_cond_less_than_or_equal(self):
# filtering <=
a = self.odf_csv[self.odf_csv["PRC"] <= 10.25].to_pandas().fillna(value="")
b = self.pdf_csv[self.pdf_csv["PRC"] <= 10.25].fillna(value="")
# DIFFS
# assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_param_cond_greater_than_or_equal(self):
# filtering >=
a = self.odf_csv[self.odf_csv["PRC"] >= 10.25].to_pandas().fillna(value="")
b = self.pdf_csv[self.pdf_csv["PRC"] >= 10.25].fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_param_cond_less_than(self):
# filtering <
a = self.odf_csv[self.odf_csv["PRC"] < 10.25].to_pandas().fillna(value="")
b = self.pdf_csv[self.pdf_csv["PRC"] < 10.25].fillna(value="")
# DIFFS
# assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_param_cond_greater_than(self):
# filtering >
a = self.odf_csv[self.odf_csv["PRC"] > 10.25].to_pandas().fillna(value="")
b = self.pdf_csv[self.pdf_csv["PRC"] > 10.25].fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_and(self):
# filtering &
a = self.odf_csv[(self.odf_csv["CFACPR"] > 1.4) & (self.odf_csv["SHROUT"] > 5000)].to_pandas().fillna(value="")
b = self.pdf_csv[(self.pdf_csv["CFACPR"] > 1.4) & (self.pdf_csv["SHROUT"] > 5000)].fillna(value="")
ad = self.odf_csv[(self.odf_csv["CFACPR"] > 1.4), (self.odf_csv["SHROUT"] > 5000)].to_pandas().fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
assert_frame_equal(ad, b, check_dtype=False)
def test_from_import_filtering_or(self):
# filtering |
a = self.odf_csv[(self.odf_csv["SHROUT"] > 16600) | (self.odf_csv["CFACPR"] > 0.1)].to_pandas().fillna(value="")
b = self.pdf_csv[(self.pdf_csv["SHROUT"] > 16600) | (self.pdf_csv["CFACPR"] > 0.1)].fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_and_and(self):
# filtering & &
a = self.odf_csv[(self.odf_csv["OPENPRC"] > 40) & (self.odf_csv["SHROUT"] > 15000) & (self.odf_csv["CFACPR"] > 1)].to_pandas().fillna(value="")
b = self.pdf_csv[(self.pdf_csv["OPENPRC"] > 40) & (self.pdf_csv["SHROUT"] > 15000) & (self.pdf_csv["CFACPR"] > 1)].fillna(value="")
ad = self.odf_csv[(self.odf_csv["OPENPRC"] > 40), (self.odf_csv["SHROUT"] > 15000), (self.odf_csv["CFACPR"] > 1)].to_pandas().fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
assert_frame_equal(ad, b, check_dtype=False)
def test_from_import_filtering_and_or(self):
# filtering & |
a = self.odf_csv[(self.odf_csv["OPENPRC"] > 40) & (self.odf_csv["SHROUT"] > 15000) | (self.odf_csv["TRDSTAT"] == "A")].to_pandas().fillna(value="")
b = self.pdf_csv[(self.pdf_csv["OPENPRC"] > 40) & (self.pdf_csv["SHROUT"] > 15000) | (self.pdf_csv["TRDSTAT"] == "A")].fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
ad = self.odf_csv[(self.odf_csv["OPENPRC"] > 40), (self.odf_csv["SHROUT"] > 15000) | (self.odf_csv["TRDSTAT"] == "A")].to_pandas().fillna(value="")
bd = self.pdf_csv[(self.pdf_csv["OPENPRC"] > 40) & ((self.pdf_csv["SHROUT"] > 15000) | (self.pdf_csv["TRDSTAT"] == "A"))].fillna(value="")
assert_frame_equal(ad, bd, check_dtype=False)
def test_from_import_filtering_or_or(self):
# filtering | |
a = self.odf_csv[(self.odf_csv["OPENPRC"] > 40) | (self.odf_csv["SHROUT"] > 15000) | (self.odf_csv["TRDSTAT"] == "A")].to_pandas().fillna(value="")
b = self.pdf_csv[(self.pdf_csv["OPENPRC"] > 40) | (self.pdf_csv["SHROUT"] > 15000) | (self.pdf_csv["TRDSTAT"] == "A")].fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_or_and(self):
# filtering | &
a = self.odf_csv[(self.odf_csv["OPENPRC"] > 40) | (self.odf_csv["SHROUT"] > 15000) & (self.odf_csv["TRDSTAT"] == "A")].to_pandas().fillna(value="")
b = self.pdf_csv[(self.pdf_csv["OPENPRC"] > 40) | (self.pdf_csv["SHROUT"] > 15000) & (self.pdf_csv["TRDSTAT"] == "A")].fillna(value="")
assert_frame_equal(a, b, check_dtype=False)
ad = self.odf_csv[(self.odf_csv["OPENPRC"] > 40) | (self.odf_csv["SHROUT"] > 15000), (self.odf_csv["TRDSTAT"] == "A")].to_pandas().fillna(value="")
assert_frame_equal(ad, b, check_dtype=False)
def test_from_import_filtering_operation_add(self):
# filtering +
a = (self.odf_d[self.odf_d["OPENPRC"] > 40] + self.odf_d[self.odf_d["SHROUT"] > 15000]).to_pandas()
b = self.pdf_d[self.pdf_d["OPENPRC"] > 40] + self.pdf_d[self.pdf_d["SHROUT"] > 15000]
# TODO: ASSERT RANDOM ORDER
# assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_operation_sub(self):
# filtering -
a = (self.odf_d[self.odf_d["OPENPRC"] > 40] - self.odf_d[self.odf_d["SHROUT"] > 15000]).to_pandas()
b = self.pdf_d[self.pdf_d["OPENPRC"] > 40] - self.pdf_d[self.pdf_d["SHROUT"] > 15000]
# TODO: ASSERT RANDOM ORDER
# assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_operation_mul(self):
# filtering *
a = (self.odf_d[self.odf_d["OPENPRC"] > 40] * self.odf_d[self.odf_d["SHROUT"] > 15000]).to_pandas()
b = self.pdf_d[self.pdf_d["OPENPRC"] > 40] * self.pdf_d[self.pdf_d["SHROUT"] > 15000]
# TODO: ASSERT RANDOM ORDER
# assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_operation_div(self):
# filtering /
# note that operation div is only allowed for numbers, exclusive of temporal, literal and logical
# columns in Orca.
a = (self.odf_d[self.odf_d["OPENPRC"] > 40] / self.odf_d[self.odf_d["SHROUT"] > 15000]).to_pandas()
b = self.pdf_d[self.pdf_d["OPENPRC"] > 40] / self.pdf_d[self.pdf_d["SHROUT"] > 15000]
# TODO: ASSERT RANDOM ORDER
# assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_operation_groupby(self):
# filtering groupby sum
a = self.odf_csv[self.odf_csv["OPENPRC"] > 40].groupby("date").sum().to_pandas()
b = self.pdf_csv[self.pdf_csv["OPENPRC"] > 40].groupby("date").sum()
assert_frame_equal(a, b, check_dtype=False)
def test_from_import_filtering_operation_resample(self):
# filtering groupby sum
a = self.odf_re[self.odf_re["OPENPRC"] > 40]
b = self.pdf_re[self.pdf_re["OPENPRC"] > 40]
assert_frame_equal(a.to_pandas().iloc[:, 0:8], b.iloc[:, 0:8], check_dtype=False, check_index_type=False)
assert_frame_equal(a.to_pandas().iloc[:, 9:], b.iloc[:, 9:], check_dtype=False, check_index_type=False)
a = self.odf_re[self.odf_re["OPENPRC"] > 40].resample("d", on="date").sum().to_pandas().sort_index()
b = self.pdf_re[self.pdf_re["OPENPRC"] > 40].resample("d", on="date").sum().sort_index()
# TODO: FILTER.RESAMPLE
# assert_frame_equal(a, b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_from_import_filtering_operation_rolling(self):
# filtering groupby sum
a = self.odf_re[self.odf_re["OPENPRC"] > 40].rolling(window=2, on="date").sum().to_pandas()
b = self.pdf_re[self.pdf_re["OPENPRC"] > 40].rolling(window=2, on="date").sum()
# TODO: ASSERT RANDOM ORDER
# assert_frame_equal(a, b, check_dtype=False, check_index_type=False, check_less_precise=1)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_func_application_groupby_partition.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
def _create_odf_csv(data, dfsDatabase):
# call function default_session() to get session object
s = orca.default_session()
dolphindb_script = """
login("admin", "<PASSWORD>")
dbPath="dfs://groupbyDateDB"
if(existsDatabase(dbPath))
dropDatabase(dbPath)
schema = extractTextSchema('{data}')
cols = exec name from schema
types = ["INT", "DATE", "SYMBOL", "BOOL", "SHORT", "INT", "LONG", "FLOAT", "DOUBLE"]
schema = table(50000:0, cols, types)
tt=schema(schema).colDefs
tt.drop!(`typeInt)
tt.rename!(`name`type)
db = database(dbPath, RANGE, 1 501 1001 1501 2001 2501 3001)
tb = db.createPartitionedTable(schema, `tb, `id)
db.loadTextEx(`tb,`id, '{data}' ,, tt)""".format(data=data)
s.run(dolphindb_script)
return orca.read_table(dfsDatabase, 'tb')
class Csv:
pdf_csv = None
odfs_csv = None
class DfsGroupByTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'groupbyDate.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
dfsDatabase = "dfs://groupbyDateDB"
# Orca connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
Csv.pdf_csv = pd.read_csv(data, parse_dates=[1], dtype={"id": np.int32, "tbool": np.bool, "tshort": np.int16,
"tint": np.int32, "tlong": np.int64, "tfloat": np.float32,
"tdouble": np.float64})
Csv.pdf_csv['tbool'] = Csv.pdf_csv["tbool"].astype(np.bool)
Csv.odfs_csv = _create_odf_csv(data, dfsDatabase)
Csv.odfs_csv.set_index("id", inplace=True)
Csv.pdf_csv.set_index("id", inplace=True)
@property
def pdf_csv(self):
return Csv.pdf_csv
@property
def odfs_csv(self):
return Csv.odfs_csv
def test_dfs_groupby_param_by_date_all(self):
pass
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
# a = self.odfs_csv.groupby('date').all()
# b = self.pdf_csv.groupby('date').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_any(self):
pass
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
# a = self.odfs_csv.groupby('date').any()
# b = self.pdf_csv.groupby('date').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_bfill(self):
a = self.odfs_csv.groupby('date').bfill()
b = self.pdf_csv.groupby('date').bfill()
# TODO: bfill for strings is not allowed in Orca
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_count(self):
a = self.odfs_csv.groupby('date').count()
b = self.pdf_csv.groupby('date').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumcount(self):
a = self.odfs_csv.groupby('date').cumcount()
b = self.pdf_csv.groupby('date').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cummax(self):
a = self.odfs_csv.drop(columns=['tsymbol']).groupby('date').cummax()
b = self.pdf_csv.drop(columns=['tsymbol']).groupby('date').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_date_cummin(self):
a = self.odfs_csv.drop(columns=['tsymbol']).groupby('date').cummin()
b = self.pdf_csv.drop(columns=['tsymbol']).groupby('date').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumprod(self):
a = self.odfs_csv.groupby('date').cumprod()
b = self.pdf_csv.groupby('date').cumprod()
# TODO: TO MUCH DIFFS
assert_frame_equal(a.to_pandas().iloc[0:5].reset_index(drop=True), b.iloc[0:5].reset_index(drop=True), check_dtype=False,
check_index_type=False, check_less_precise=1)
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_cumsum(self):
a = self.odfs_csv.groupby('date').cumsum()
b = self.pdf_csv.groupby('date').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ffill(self):
a = self.odfs_csv.groupby('date').ffill()
b = self.pdf_csv.groupby('date').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_first(self):
a = self.odfs_csv.groupby('date').first()
b = self.pdf_csv.groupby('date').first()
b['tbool'] = b['tbool'].astype(np.bool, errors="ignore")
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('date').head()
# b = self.pdf_csv.groupby('date').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_last(self):
a = self.odfs_csv.groupby('date').last()
b = self.pdf_csv.groupby('date').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_max(self):
a = self.odfs_csv.groupby('date').max()
b = self.pdf_csv.groupby('date').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_mean(self):
a = self.odfs_csv.groupby('date').mean()
b = self.pdf_csv.groupby('date').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('date').median()
# b = self.pdf_csv.groupby('date').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_min(self):
a = self.odfs_csv.groupby('date').min()
b = self.pdf_csv.groupby('date').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('date').ngroup()
# b = self.pdf_csv.groupby('date').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('date').nth(0)
# b = self.pdf_csv.groupby('date').nth(0)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_ohlc(self):
a = self.odfs_csv.drop(columns=['tsymbol', "date"]).groupby(['tint', 'tbool']).ohlc()
b = self.pdf_csv.drop(columns=['tsymbol', "date"]).groupby(['tint', 'tbool']).ohlc()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_prod(self):
a = self.odfs_csv.groupby('date').prod()
b = self.pdf_csv.groupby('date').prod()
# TODO:DIFFS
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_rank(self):
a = self.odfs_csv.groupby('date').rank()
# TODO: pandas doesn't support
# b = self.pdf_csv.groupby('date').rank()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "tsymbol"]).groupby('date').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "tsymbol"]).groupby('date').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False, check_less_precise=2)
def test_dfs_groupby_param_by_date_size(self):
a = self.odfs_csv.groupby('date').size()
b = self.pdf_csv.groupby('date').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('date').sem()
# b = self.pdf_csv.groupby('date').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_std(self):
a = self.odfs_csv.groupby('date').std()
b = self.pdf_csv.groupby('date').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_sum(self):
a = self.odfs_csv.groupby('date').sum()
b = self.pdf_csv.groupby('date').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_var(self):
a = self.odfs_csv.groupby('date').var()
b = self.pdf_csv.groupby('date').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_date_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('date').tail()
# b = self.pdf_csv.groupby('date').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_all(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').all()
# b = self.pdf_csv.groupby('tsymbol').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_any(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').any()
# b = self.pdf_csv.groupby('tsymbol').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_bfill(self):
a = self.odfs_csv.groupby('tsymbol').bfill()
b = self.pdf_csv.groupby('tsymbol').bfill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_count(self):
a = self.odfs_csv.groupby('tsymbol').count()
b = self.pdf_csv.groupby('tsymbol').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cumcount(self):
a = self.odfs_csv.groupby('tsymbol').cumcount()
b = self.pdf_csv.groupby('tsymbol').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cummax(self):
a = self.odfs_csv.drop(columns=['date']).groupby('tsymbol').cummax()
b = self.pdf_csv.drop(columns=['date']).groupby('tsymbol').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_symbol_cummin(self):
a = self.odfs_csv.drop(columns=['date']).groupby('tsymbol').cummin()
b = self.pdf_csv.drop(columns=['date']).groupby('tsymbol').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_symbol_cumprod(self):
a = self.odfs_csv.groupby('tsymbol').cumprod()
b = self.pdf_csv.groupby('tsymbol').cumprod()
assert_frame_equal(a.to_pandas().iloc[0:5].reset_index(drop=True), b.iloc[0:5].reset_index(drop=True), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_cumsum(self):
a = self.odfs_csv.groupby('tsymbol').cumsum()
b = self.pdf_csv.groupby('tsymbol').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ffill(self):
a = self.odfs_csv.groupby('tsymbol').ffill()
b = self.pdf_csv.groupby('tsymbol').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_first(self):
a = self.odfs_csv.groupby('tsymbol').first()
b = self.pdf_csv.groupby('tsymbol').first()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tsymbol').head()
# b = self.pdf_csv.groupby('tsymbol').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_last(self):
a = self.odfs_csv.groupby('tsymbol').last()
b = self.pdf_csv.groupby('tsymbol').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_max(self):
a = self.odfs_csv.groupby('tsymbol').max()
b = self.pdf_csv.groupby('tsymbol').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_mean(self):
a = self.odfs_csv.groupby('tsymbol').mean()
b = self.pdf_csv.groupby('tsymbol').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').median()
# b = self.pdf_csv.groupby('tsymbol').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_min(self):
a = self.odfs_csv.groupby('tsymbol').min()
b = self.pdf_csv.groupby('tsymbol').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tsymbol').ngroup()
# b = self.pdf_csv.groupby('tsymbol').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tsymbol').nth(0)
# b = self.pdf_csv.groupby('tsymbol').nth(0)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_ohlc(self):
a = self.odfs_csv.groupby('tsymbol').ohlc()
# pandas doesn't support
# b = self.pdf_csv.groupby('tsymbol').ohlc()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_prod(self):
a = self.odfs_csv.groupby('tsymbol').prod()
b = self.pdf_csv.groupby('tsymbol').prod()
assert_frame_equal(a.to_pandas(), b.fillna(0), check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_rank(self):
a = self.odfs_csv.groupby('tsymbol').rank()
b = self.pdf_csv.groupby('tsymbol').rank()
# TODO: DIFFERENT METHOD
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "date"]).groupby('tsymbol').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "date"]).groupby('tsymbol').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False,
check_less_precise=2)
def test_dfs_groupby_param_by_symbol_size(self):
a = self.odfs_csv.groupby('tsymbol').size()
b = self.pdf_csv.groupby('tsymbol').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tsymbol').sem()
# b = self.pdf_csv.groupby('tsymbol').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_std(self):
a = self.odfs_csv.groupby('tsymbol').std()
b = self.pdf_csv.groupby('tsymbol').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_sum(self):
a = self.odfs_csv.groupby('tsymbol').sum()
b = self.pdf_csv.groupby('tsymbol').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_var(self):
a = self.odfs_csv.groupby('tsymbol').var()
b = self.pdf_csv.groupby('tsymbol').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_symbol_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tsymbol').tail()
# b = self.pdf_csv.groupby('tsymbol').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_all(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').all()
# b = self.pdf_csv.groupby('tlong').all()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_any(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').any()
# b = self.pdf_csv.groupby('tlong').any()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_bfill(self):
a = self.odfs_csv.groupby('tlong').bfill()
b = self.pdf_csv.groupby('tlong').bfill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_count(self):
a = self.odfs_csv.groupby('tlong').count()
b = self.pdf_csv.groupby('tlong').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cumcount(self):
a = self.odfs_csv.groupby('tlong').cumcount()
b = self.pdf_csv.groupby('tlong').cumcount()
# TODO: TO MUCH DIFFS
self.assertIsInstance(a.to_pandas(), DataFrame)
self.assertIsInstance(b, Series)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cummax(self):
a = self.odfs_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummax()
b = self.pdf_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummax()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_long_cummin(self):
a = self.odfs_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummin()
b = self.pdf_csv.drop(columns=['date', 'tsymbol']).groupby('tlong').cummin()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_dfs_groupby_param_by_long_cumprod(self):
a = self.odfs_csv.groupby('tlong').cumprod()
b = self.pdf_csv.groupby('tlong').cumprod()
# TODO: TO MUCH DIFFS
assert_frame_equal(a.to_pandas().iloc[0:50].reset_index(drop=True), b.iloc[0:50].reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_cumsum(self):
a = self.odfs_csv.groupby('tlong').cumsum()
b = self.pdf_csv.groupby('tlong').cumsum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True),
check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ffill(self):
a = self.odfs_csv.groupby('tlong').ffill()
b = self.pdf_csv.groupby('tlong').ffill()
assert_frame_equal(a.to_pandas().sort_index().reset_index(drop=True).iloc[:, 1:],
b.sort_index().reset_index(drop=True).iloc[:, 1:], check_dtype=False,
check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_first(self):
a = self.odfs_csv.groupby('tlong').first()
b = self.pdf_csv.groupby('tlong').first()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_head(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tlong').head()
# b = self.pdf_csv.groupby('tlong').head()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_last(self):
a = self.odfs_csv.groupby('tlong').last()
b = self.pdf_csv.groupby('tlong').last()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_max(self):
a = self.odfs_csv.groupby('tlong').max()
b = self.pdf_csv.groupby('tlong').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_mean(self):
a = self.odfs_csv.groupby('tlong').mean()
b = self.pdf_csv.groupby('tlong').mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_median(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').median()
# b = self.pdf_csv.groupby('tlong').median()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_min(self):
a = self.odfs_csv.groupby('tlong').min()
b = self.pdf_csv.groupby('tlong').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ngroup(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tlong').ngroup()
# b = self.pdf_csv.groupby('tlong').ngroup()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_nth(self):
# TODO: NOT IMPLEMENTED
pass
# a = self.odfs_csv.groupby('tlong').nth()
# b = self.pdf_csv.groupby('tlong').nth()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_ohlc(self):
a = self.odfs_csv.drop(columns=['tsymbol', "date"]).groupby('tlong').ohlc()
b = self.pdf_csv.drop(columns=['tsymbol', "date"]).groupby('tlong').ohlc()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_prod(self):
a = self.odfs_csv.groupby('tlong').prod()
b = self.pdf_csv.groupby('tlong').prod()
# TODO:DIFFS
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_rank(self):
a = self.odfs_csv.groupby('tlong').rank()
# b = self.pdf_csv.groupby('tlong').rank()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_pct_change(self):
a = self.odfs_csv.drop(columns=["tbool", "tsymbol", "date"]).groupby('tlong').pct_change()
b = self.pdf_csv.drop(columns=["tbool", "tsymbol", "date"]).groupby('tlong').pct_change()
assert_frame_equal(a.to_pandas(), b.replace(np.inf, np.nan), check_dtype=False, check_index_type=False, check_less_precise=2)
def test_dfs_groupby_param_by_long_size(self):
a = self.odfs_csv.groupby('tlong').size().loc[0:]
b = self.pdf_csv.groupby('tlong').size()
assert_series_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_sem(self):
# TODO: NOT SUPPORTED FOR PARTITIONED TABLE
pass
# a = self.odfs_csv.groupby('tlong').sem()
# b = self.pdf_csv.groupby('tlong').sem()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_std(self):
a = self.odfs_csv.groupby('tlong').std()
b = self.pdf_csv.groupby('tlong').std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_sum(self):
a = self.odfs_csv.groupby('tlong').sum()
b = self.pdf_csv.groupby('tlong').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_var(self):
a = self.odfs_csv.groupby('tlong').var()
b = self.pdf_csv.groupby('tlong').var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
def test_dfs_groupby_param_by_long_tail(self):
# TODO: NOT SUPPORTED FOR groupby
pass
# a = self.odfs_csv.groupby('tlong').tail()
# b = self.pdf_csv.groupby('tlong').tail()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False, check_index_type=False, check_less_precise=1)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_func_application_rolling.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
class Csv:
pdf_csv = None
odf_csv = None
class RollingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'onlyNumericalColumns.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
# Orca connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
Csv.pdf_csv = pd.read_csv(data)
Csv.odf_csv = orca.read_csv(data)
@property
def pdf_csv(self):
return Csv.pdf_csv
@property
def odf_csv(self):
return Csv.odf_csv
@property
def pdf(self):
n = 100 # note that n should be a multiple of 10
re = n / 10
pdf_da = pd.DataFrame({'id': np.arange(1, n + 1, 1, dtype='int32'),
'date': np.repeat(pd.date_range('2019.08.01', periods=10, freq='D'), re),
'tchar': np.repeat(np.arange(1, 11, 1, dtype='int8'), re),
'tshort': np.repeat(np.arange(1, 11, 1, dtype='int16'), re),
'tint': np.repeat(np.arange(1, 11, 1, dtype='int32'), re),
'tlong': np.repeat(np.arange(1, 11, 1, dtype='int64'), re),
'tfloat': np.repeat(np.arange(1, 11, 1, dtype='float32'), re),
'tdouble': np.repeat(np.arange(1, 11, 1, dtype='float64'), re)
})
return pdf_da.set_index("id")
@property
def pdf_da(self):
n = 9 # note that n should be a multiple of 10
ps = pd.to_datetime(
["20170101", "20170103", "20170105", "20170106", "20171231", "20180615", "20181031", "20190501",
"20190517"]).to_series()
pdf_da = pd.DataFrame({'id': np.arange(1, n + 1, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
return pdf_da.set_index("id")
@property
def odf(self):
return orca.DataFrame(self.pdf)
@property
def odf_da(self):
return orca.DataFrame(self.pdf_da)
def test_rolling_allocation_verification(self):
self.assertIsInstance(self.odf.rolling(window=5, on="date")['date'].count().to_pandas(), Series)
with self.assertRaises(KeyError):
self.odf.rolling(window=5, on="date")['hello'].count()
with self.assertRaises(KeyError):
self.odf.rolling(window=5, on="date")[['dare', 5, 0]].count()
with self.assertRaises(KeyError):
self.odf.rolling(window=5, on="date")[['hello', 'world']].count()
with self.assertRaises(KeyError):
self.odf.rolling(window=5, on="date")[np.array([1, 2, 3])].count()
with self.assertRaises(KeyError):
self.odf.rolling(window=5, on="date")[5].count()
with self.assertRaises(KeyError):
self.odf.rolling(window=5, on="date")[[16.5, 5]].count()
def test_rolling_from_pandas_param_window_sum(self):
a = self.odf.rolling(window=5, on="date").sum()
b = self.pdf.rolling(window=5, on="date").sum()
assert_frame_equal(a.to_pandas(), b)
a = self.odf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.reset_index()
pdf_dai = self.pdf.reset_index()
a = odf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
b = pdf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.set_index('date')
pdf_dai = self.pdf.set_index('date')
a = odf_dai.rolling(window=5).sum()
b = pdf_dai.rolling(window=5).sum()
assert_frame_equal(a.to_pandas(), b)
a = odf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
b = pdf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_day_sum(self):
a = self.odf_da.rolling(window='d', on="date").sum()
b = self.pdf_da.rolling(window='d', on="date").sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
b = self.pdf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_da.rolling(window='d', on="date")[
'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum()
b = self.pdf_da.rolling(window='d', on="date")[
'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.reset_index()
pdf_dai = self.pdf_da.reset_index()
a = odf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
b = pdf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.set_index('date')
pdf_dai = self.pdf_da.set_index('date')
a = odf_dai.rolling(window='d').sum()
b = pdf_dai.rolling(window='d').sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
b = pdf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_hour_sum(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 10:10:15", "20170101 11:10:15", "20170101 11:20:15", "20170101 11:21:00", "20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_h = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_h.set_index("id", inplace=True)
odf_h = orca.DataFrame(pdf_h)
# TODO: ALL ASSERT FAIL
a = odf_h.rolling(window='h', on="date").sum()
b = pdf_h.rolling(window='h', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_h.rolling(window='2h', on="date").sum()
b = pdf_h.rolling(window='2h', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
# a = odf_hrolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.reset_index()
# pdf_dai = pdf_h.reset_index()
# a = odf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.set_index('date')
# pdf_dai = pdf_h.set_index('date')
# a = odf_dai.rolling(window='h').sum()
# b = pdf_dai.rolling(window='h').sum()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_minute_sum(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:11:10", "20170101 9:11:17", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_t = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_t.set_index("id", inplace=True)
odf_t = orca.DataFrame(pdf_t)
# TODO: ALL ASSERT FAIL
a = odf_t.rolling(window='t', on="date").sum()
b = pdf_t.rolling(window='t', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.reset_index()
# pdf_dai = pdf_t.reset_index()
# a = odf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.set_index('date')
# pdf_dai = pdf_t.set_index('date')
# a = odf_dai.rolling(window='t').sum()
# b = pdf_dai.rolling(window='t').sum()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_second_sum(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:10:16", "20170101 9:11:17", "20170101 9:11:17",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_s = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_s.set_index("id", inplace=True)
odf_s = orca.DataFrame(pdf_s)
# TODO: ALL ASSERT FAIL
a = odf_s.rolling(window='s', on="date").sum()
b = pdf_s.rolling(window='s', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_s.rolling(window='2s', on="date").sum()
b = pdf_s.rolling(window='2s', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.reset_index()
# pdf_dai = pdf_s.reset_index()
# a = odf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.set_index('date')
# pdf_dai = pdf_s.set_index('date')
# a = odf_dai.rolling(window='s').sum()
# b = pdf_dai.rolling(window='s').sum()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_milli_sum(self):
ps = pd.to_datetime(
["20170101 9:10:15.000", "20170101 9:10:15.000", "20170101 9:10:15.001", "20170101 9:11:17.015", "20170101 9:11:17.015",
"20180615 9:10:15.015", "20181031 9:10:15.015", "20190501 9:10:15.015",
"20190517 9:10:15.015"]).to_series()
pdf_l = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_l.set_index("id", inplace=True)
odf_l = orca.DataFrame(pdf_l)
# TODO: ALL ASSERT FAIL
# a = odf_l.rolling(window='l', on="date").sum()
# b = pdf_l.rolling(window='l', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
#
# a = odf_l.rolling(window='2l', on="date").sum()
# b = pdf_l.rolling(window='2l', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# b = pdf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_l.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# b = pdf.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.reset_index()
# pdf_dai = pdf_l.reset_index()
# a = odf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.set_index('date')
# pdf_dai = pdf_l.set_index('date')
# a = odf_dai.rolling(window='l').sum()
# b = pdf_dai.rolling(window='l').sum()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_micro_sum(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000", "20170101 9:10:15.000000", "20170101 9:10:15.000001", "20170101 9:11:17.015001",
"20170101 9:11:17.015002",
"20180615 9:10:15.015000", "20181031 9:10:15.015000", "20190501 9:10:15.015000",
"20190517 9:10:15.015000"]).to_series()
pdf_u = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_u = pdf_u.set_index("id")
odf_u = orca.DataFrame(pdf_u)
# TODO: ALL ASSERT FAIL
a = odf_u.rolling(window='u', on="date").sum()
b = pdf_u.rolling(window='u', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_u.rolling(window='2u', on="date").sum()
b = pdf_u.rolling(window='2u', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.reset_index()
# pdf_dai = pdf_u.reset_index()
# a = odf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.set_index('date')
# pdf_dai = pdf_u.set_index('date')
# a = odf_dai.rolling(window='u').sum()
# b = pdf_dai.rolling(window='u').sum()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_nano_sum(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000000", "20170101 9:10:15.000000000", "20170101 9:10:15.000000001", "20170101 9:11:17.015000001",
"20170101 9:11:17.015002001",
"20180615 9:10:15.015000001", "20181031 9:10:15.015000001", "20190501 9:10:15.015000001",
"20190517 9:10:15.015000001"]).to_series()
pdf_n = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_n = pdf_n.set_index("id")
odf_n = orca.DataFrame(pdf_n)
# TODO: ALL ASSERT FAIL
a = odf_n.rolling(window='n', on="date").sum()
b = pdf_n.rolling(window='n', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_n.rolling(window='2n', on="date").sum()
b = pdf_n.rolling(window='2n', on="date").sum()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.reset_index()
# pdf_dai = pdf_n.reset_index()
# a = odf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.set_index('date')
# pdf_dai = pdf_n.set_index('date')
# a = odf_dai.rolling(window='n').sum()
# b = pdf_dai.rolling(window='n').sum()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# b = pdf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_count(self):
a = self.odf.rolling(window=5, on="date").count()
b = self.pdf.rolling(window=5, on="date").count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.reset_index()
pdf_dai = self.pdf.reset_index()
a = odf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
b = pdf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.set_index('date')
pdf_dai = self.pdf.set_index('date')
a = odf_dai.rolling(window=5).count()
b = pdf_dai.rolling(window=5).count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
b = pdf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_day_count(self):
a = self.odf_da.rolling(window='d', on="date").count()
b = self.pdf_da.rolling(window='d', on="date").count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
b = self.pdf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_da.rolling(window='d', on="date")[
'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].count()
b = self.pdf_da.rolling(window='d', on="date")[
'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.reset_index()
pdf_dai = self.pdf_da.reset_index()
a = odf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
b = pdf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.set_index('date')
pdf_dai = self.pdf_da.set_index('date')
a = odf_dai.rolling(window='d').count()
b = pdf_dai.rolling(window='d').count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
b = pdf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_hour_count(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 10:10:15", "20170101 11:10:15", "20170101 11:20:15", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_h = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_h = pdf_h.set_index("id")
odf_h = orca.DataFrame(pdf_h)
# TODO: ALL ASSERT FAIL
a = odf_h.rolling(window='h', on="date").count()
b = pdf_h.rolling(window='h', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_h.rolling(window='2h', on="date").count()
b = pdf_h.rolling(window='2h', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_hrolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].count()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.reset_index()
# pdf_dai = pdf_h.reset_index()
# a = odf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.set_index('date')
# pdf_dai = pdf_h.set_index('date')
# a = odf_dai.rolling(window='h').count()
# b = pdf_dai.rolling(window='h').count()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_minute_count(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:11:10", "20170101 9:11:17", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_t = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_t = pdf_t.set_index("id")
odf_t = orca.DataFrame(pdf_t)
# TODO: ALL ASSERT FAIL
a = odf_t.rolling(window='t', on="date").count()
b = pdf_t.rolling(window='t', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.reset_index()
# pdf_dai = pdf_t.reset_index()
# a = odf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.set_index('date')
# pdf_dai = pdf_t.set_index('date')
# a = odf_dai.rolling(window='t').count()
# b = pdf_dai.rolling(window='t').count()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_second_count(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:10:16", "20170101 9:11:17", "20170101 9:11:17",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_s = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_s = pdf_s.set_index("id")
odf_s = orca.DataFrame(pdf_s)
# TODO: ALL ASSERT FAIL
a = odf_s.rolling(window='s', on="date").count()
b = pdf_s.rolling(window='s', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_s.rolling(window='2s', on="date").count()
b = pdf_s.rolling(window='2s', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.reset_index()
# pdf_dai = pdf_s.reset_index()
# a = odf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.set_index('date')
# pdf_dai = pdf_s.set_index('date')
# a = odf_dai.rolling(window='s').count()
# b = pdf_dai.rolling(window='s').count()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_milli_count(self):
ps = pd.to_datetime(
["20170101 9:10:15.000", "20170101 9:10:15.000", "20170101 9:10:15.001", "20170101 9:11:17.015",
"20170101 9:11:17.015",
"20180615 9:10:15.015", "20181031 9:10:15.015", "20190501 9:10:15.015",
"20190517 9:10:15.015"]).to_series()
pdf_l = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_l = pdf_l.set_index("id")
odf_l = orca.DataFrame(pdf_l)
# TODO: ALL ASSERT FAIL
a = odf_l.rolling(window='l', on="date").count()
b = pdf_l.rolling(window='l', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_l.rolling(window='2l', on="date").count()
b = pdf_l.rolling(window='2l', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# b = pdf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_l.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# b = pdf.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.reset_index()
# pdf_dai = pdf_l.reset_index()
# a = odf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.set_index('date')
# pdf_dai = pdf_l.set_index('date')
# a = odf_dai.rolling(window='l').count()
# b = pdf_dai.rolling(window='l').count()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_micro_count(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000", "20170101 9:10:15.000000", "20170101 9:10:15.000001", "20170101 9:11:17.015001",
"20170101 9:11:17.015002",
"20180615 9:10:15.015000", "20181031 9:10:15.015000", "20190501 9:10:15.015000",
"20190517 9:10:15.015000"]).to_series()
pdf_u = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_u = pdf_u.set_index("id")
odf_u = orca.DataFrame(pdf_u)
# TODO: ALL ASSERT FAIL
a = odf_u.rolling(window='u', on="date").count()
b = pdf_u.rolling(window='u', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_u.rolling(window='2u', on="date").count()
b = pdf_u.rolling(window='2u', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.reset_index()
# pdf_dai = pdf_u.reset_index()
# a = odf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.set_index('date')
# pdf_dai = pdf_u.set_index('date')
# a = odf_dai.rolling(window='u').count()
# b = pdf_dai.rolling(window='u').count()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_nano_count(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000000", "20170101 9:10:15.000000000", "20170101 9:10:15.000000001",
"20170101 9:11:17.015000001",
"20170101 9:11:17.015002001",
"20180615 9:10:15.015000001", "20181031 9:10:15.015000001", "20190501 9:10:15.015000001",
"20190517 9:10:15.015000001"]).to_series()
pdf_n = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_n = pdf_n.set_index("id")
odf_n = orca.DataFrame(pdf_n)
# TODO: ALL ASSERT FAIL
a = odf_n.rolling(window='n', on="date").count()
b = pdf_n.rolling(window='n', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_n.rolling(window='2n', on="date").count()
b = pdf_n.rolling(window='2n', on="date").count()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.reset_index()
# pdf_dai = pdf_n.reset_index()
# a = odf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.set_index('date')
# pdf_dai = pdf_n.set_index('date')
# a = odf_dai.rolling(window='n').count()
# b = pdf_dai.rolling(window='n').count()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# b = pdf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_mean(self):
a = self.odf.rolling(window=5, on="date").mean()
b = self.pdf.rolling(window=5, on="date").mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.reset_index()
pdf_dai = self.pdf.reset_index()
a = odf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
b = pdf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.set_index('date')
pdf_dai = self.pdf.set_index('date')
a = odf_dai.rolling(window=5).mean()
b = pdf_dai.rolling(window=5).mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
b = pdf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_day_mean(self):
a = self.odf_da.rolling(window='d', on="date").mean()
b = self.pdf_da.rolling(window='d', on="date").mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
b = self.pdf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_da.rolling(window='d', on="date")[
'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].mean()
b = self.pdf_da.rolling(window='d', on="date")[
'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.reset_index()
pdf_dai = self.pdf_da.reset_index()
a = odf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
b = pdf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.set_index('date')
pdf_dai = self.pdf_da.set_index('date')
a = odf_dai.rolling(window='d').mean()
b = pdf_dai.rolling(window='d').mean()
assert_frame_equal(a.to_pandas(), b)
a = odf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
b = pdf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_hour_mean(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 10:10:15", "20170101 11:10:15", "20170101 11:20:15", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_h = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_h = pdf_h.set_index("id")
odf_h = orca.DataFrame(pdf_h)
# TODO: ALL ASSERT FAIL
a = odf_h.rolling(window='h', on="date").mean()
b = pdf_h.rolling(window='h', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_h.rolling(window='2h', on="date").mean()
b = pdf_h.rolling(window='2h', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_hrolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].mean()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.reset_index()
# pdf_dai = pdf_h.reset_index()
# a = odf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.set_index('date')
# pdf_dai = pdf_h.set_index('date')
# a = odf_dai.rolling(window='h').mean()
# b = pdf_dai.rolling(window='h').mean()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_minute_mean(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:11:10", "20170101 9:11:17", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_t = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_t = pdf_t.set_index("id")
odf_t = orca.DataFrame(pdf_t)
# TODO: ALL ASSERT FAIL
a = odf_t.rolling(window='t', on="date").mean()
b = pdf_t.rolling(window='t', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.reset_index()
# pdf_dai = pdf_t.reset_index()
# a = odf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.set_index('date')
# pdf_dai = pdf_t.set_index('date')
# a = odf_dai.rolling(window='t').mean()
# b = pdf_dai.rolling(window='t').mean()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_second_mean(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:10:16", "20170101 9:11:17", "20170101 9:11:17",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_s = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_s = pdf_s.set_index("id")
odf_s = orca.DataFrame(pdf_s)
# TODO: ALL ASSERT FAIL
a = odf_s.rolling(window='s', on="date").mean()
b = pdf_s.rolling(window='s', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_s.rolling(window='2s', on="date").mean()
b = pdf_s.rolling(window='2s', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.reset_index()
# pdf_dai = pdf_s.reset_index()
# a = odf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.set_index('date')
# pdf_dai = pdf_s.set_index('date')
# a = odf_dai.rolling(window='s').mean()
# b = pdf_dai.rolling(window='s').mean()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_milli_mean(self):
ps = pd.to_datetime(
["20170101 9:10:15.000", "20170101 9:10:15.000", "20170101 9:10:15.001", "20170101 9:11:17.015",
"20170101 9:11:17.015",
"20180615 9:10:15.015", "20181031 9:10:15.015", "20190501 9:10:15.015",
"20190517 9:10:15.015"]).to_series()
pdf_l = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_l = pdf_l.set_index("id")
odf_l = orca.DataFrame(pdf_l)
# TODO: ALL ASSERT FAIL
a = odf_l.rolling(window='l', on="date").mean()
b = pdf_l.rolling(window='l', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_l.rolling(window='2l', on="date").mean()
b = pdf_l.rolling(window='2l', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# b = pdf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_l.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# b = pdf.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.reset_index()
# pdf_dai = pdf_l.reset_index()
# a = odf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.set_index('date')
# pdf_dai = pdf_l.set_index('date')
# a = odf_dai.rolling(window='l').mean()
# b = pdf_dai.rolling(window='l').mean()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_micro_mean(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000", "20170101 9:10:15.000000", "20170101 9:10:15.000001", "20170101 9:11:17.015001",
"20170101 9:11:17.015002",
"20180615 9:10:15.015000", "20181031 9:10:15.015000", "20190501 9:10:15.015000",
"20190517 9:10:15.015000"]).to_series()
pdf_u = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_u = pdf_u.set_index("id")
odf_u = orca.DataFrame(pdf_u)
# TODO: ALL ASSERT FAIL
a = odf_u.rolling(window='u', on="date").mean()
b = pdf_u.rolling(window='u', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_u.rolling(window='2u', on="date").mean()
b = pdf_u.rolling(window='2u', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.reset_index()
# pdf_dai = pdf_u.reset_index()
# a = odf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.set_index('date')
# pdf_dai = pdf_u.set_index('date')
# a = odf_dai.rolling(window='u').mean()
# b = pdf_dai.rolling(window='u').mean()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_nano_mean(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000000", "20170101 9:10:15.000000000", "20170101 9:10:15.000000001",
"20170101 9:11:17.015000001",
"20170101 9:11:17.015002001",
"20180615 9:10:15.015000001", "20181031 9:10:15.015000001", "20190501 9:10:15.015000001",
"20190517 9:10:15.015000001"]).to_series()
pdf_n = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_n = pdf_n.set_index("id")
odf_n = orca.DataFrame(pdf_n)
# TODO: ALL ASSERT FAIL
a = odf_n.rolling(window='n', on="date").mean()
b = pdf_n.rolling(window='n', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_n.rolling(window='2n', on="date").mean()
b = pdf_n.rolling(window='2n', on="date").mean()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.reset_index()
# pdf_dai = pdf_n.reset_index()
# a = odf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.set_index('date')
# pdf_dai = pdf_n.set_index('date')
# a = odf_dai.rolling(window='n').mean()
# b = pdf_dai.rolling(window='n').mean()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# b = pdf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_max(self):
a = self.odf.rolling(window=5, on="date").max()
b = self.pdf.rolling(window=5, on="date").max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.reset_index()
pdf_dai = self.pdf.reset_index()
a = odf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
b = pdf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.set_index('date')
pdf_dai = self.pdf.set_index('date')
a = odf_dai.rolling(window=5).max()
b = pdf_dai.rolling(window=5).max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
b = pdf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_day_max(self):
a = self.odf_da.rolling(window='d', on="date").max()
b = self.pdf_da.rolling(window='d', on="date").max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
b = self.pdf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_da.rolling(window='d', on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
b = self.pdf_da.rolling(window='d', on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.reset_index()
pdf_dai = self.pdf_da.reset_index()
a = odf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
b = pdf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.set_index('date')
pdf_dai = self.pdf_da.set_index('date')
a = odf_dai.rolling(window='d').max()
b = pdf_dai.rolling(window='d').max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
b = pdf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_hour_max(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 10:10:15", "20170101 11:10:15", "20170101 11:20:15", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_h = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_h = pdf_h.set_index("id")
odf_h = orca.DataFrame(pdf_h)
# TODO: ALL ASSERT FAIL
a = odf_h.rolling(window='h', on="date").max()
b = pdf_h.rolling(window='h', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_h.rolling(window='2h', on="date").max()
b = pdf_h.rolling(window='2h', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_hrolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].max()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.reset_index()
# pdf_dai = pdf_h.reset_index()
# a = odf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.set_index('date')
# pdf_dai = pdf_h.set_index('date')
# a = odf_dai.rolling(window='h').max()
# b = pdf_dai.rolling(window='h').max()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_minute_max(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:11:10", "20170101 9:11:17", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_t = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_t = pdf_t.set_index("id")
odf_t = orca.DataFrame(pdf_t)
# TODO: ALL ASSERT FAIL
a = odf_t.rolling(window='t', on="date").max()
b = pdf_t.rolling(window='t', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.reset_index()
# pdf_dai = pdf_t.reset_index()
# a = odf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.set_index('date')
# pdf_dai = pdf_t.set_index('date')
# a = odf_dai.rolling(window='t').max()
# b = pdf_dai.rolling(window='t').max()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_second_max(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:10:16", "20170101 9:11:17", "20170101 9:11:17",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_s = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_s = pdf_s.set_index("id")
odf_s = orca.DataFrame(pdf_s)
# TODO: ALL ASSERT FAIL
a = odf_s.rolling(window='s', on="date").max()
b = pdf_s.rolling(window='s', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_s.rolling(window='2s', on="date").max()
b = pdf_s.rolling(window='2s', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.reset_index()
# pdf_dai = pdf_s.reset_index()
# a = odf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.set_index('date')
# pdf_dai = pdf_s.set_index('date')
# a = odf_dai.rolling(window='s').max()
# b = pdf_dai.rolling(window='s').max()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_milli_max(self):
ps = pd.to_datetime(
["20170101 9:10:15.000", "20170101 9:10:15.000", "20170101 9:10:15.001", "20170101 9:11:17.015",
"20170101 9:11:17.015",
"20180615 9:10:15.015", "20181031 9:10:15.015", "20190501 9:10:15.015",
"20190517 9:10:15.015"]).to_series()
pdf_l = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_l = pdf_l.set_index("id")
odf_l = orca.DataFrame(pdf_l)
# TODO: ALL ASSERT FAIL
a = odf_l.rolling(window='l', on="date").max()
b = pdf_l.rolling(window='l', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_l.rolling(window='2l', on="date").max()
b = pdf_l.rolling(window='2l', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# b = pdf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_l.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# b = pdf.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.reset_index()
# pdf_dai = pdf_l.reset_index()
# a = odf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.set_index('date')
# pdf_dai = pdf_l.set_index('date')
# a = odf_dai.rolling(window='l').max()
# b = pdf_dai.rolling(window='l').max()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_micro_max(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000", "20170101 9:10:15.000000", "20170101 9:10:15.000001", "20170101 9:11:17.015001",
"20170101 9:11:17.015002",
"20180615 9:10:15.015000", "20181031 9:10:15.015000", "20190501 9:10:15.015000",
"20190517 9:10:15.015000"]).to_series()
pdf_u = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_u = pdf_u.set_index("id")
odf_u = orca.DataFrame(pdf_u)
# TODO: ALL ASSERT FAIL
a = odf_u.rolling(window='u', on="date").max()
b = pdf_u.rolling(window='u', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_u.rolling(window='2u', on="date").max()
b = pdf_u.rolling(window='2u', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.reset_index()
# pdf_dai = pdf_u.reset_index()
# a = odf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.set_index('date')
# pdf_dai = pdf_u.set_index('date')
# a = odf_dai.rolling(window='u').max()
# b = pdf_dai.rolling(window='u').max()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_nano_max(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000000", "20170101 9:10:15.000000000", "20170101 9:10:15.000000001",
"20170101 9:11:17.015000001",
"20170101 9:11:17.015002001",
"20180615 9:10:15.015000001", "20181031 9:10:15.015000001", "20190501 9:10:15.015000001",
"20190517 9:10:15.015000001"]).to_series()
pdf_n = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_n = pdf_n.set_index("id")
odf_n = orca.DataFrame(pdf_n)
# TODO: ALL ASSERT FAIL
a = odf_n.rolling(window='n', on="date").max()
b = pdf_n.rolling(window='n', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_n.rolling(window='2n', on="date").max()
b = pdf_n.rolling(window='2n', on="date").max()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.reset_index()
# pdf_dai = pdf_n.reset_index()
# a = odf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.set_index('date')
# pdf_dai = pdf_n.set_index('date')
# a = odf_dai.rolling(window='n').max()
# b = pdf_dai.rolling(window='n').max()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# b = pdf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_min(self):
a = self.odf.rolling(window=5, on="date").min()
b = self.pdf.rolling(window=5, on="date").min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.reset_index()
pdf_dai = self.pdf.reset_index()
a = odf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
b = pdf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.set_index('date')
pdf_dai = self.pdf.set_index('date')
a = odf_dai.rolling(window=5).min()
b = pdf_dai.rolling(window=5).min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
b = pdf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_day_min(self):
a = self.odf_da.rolling(window='d', on="date").min()
b = self.pdf_da.rolling(window='d', on="date").min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
b = self.pdf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_da.rolling(window='d', on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
b = self.pdf_da.rolling(window='d', on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.reset_index()
pdf_dai = self.pdf_da.reset_index()
a = odf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
b = pdf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.set_index('date')
pdf_dai = self.pdf_da.set_index('date')
a = odf_dai.rolling(window='d').min()
b = pdf_dai.rolling(window='d').min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
b = pdf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_hour_min(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 10:10:15", "20170101 11:10:15", "20170101 11:20:15", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_h = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_h = pdf_h.set_index("id")
odf_h = orca.DataFrame(pdf_h)
# TODO: ALL ASSERT FAIL
a = odf_h.rolling(window='h', on="date").min()
b = pdf_h.rolling(window='h', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_h.rolling(window='2h', on="date").min()
b = pdf_h.rolling(window='2h', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_hrolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].min()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.reset_index()
# pdf_dai = pdf_h.reset_index()
# a = odf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.set_index('date')
# pdf_dai = pdf_h.set_index('date')
# a = odf_dai.rolling(window='h').min()
# b = pdf_dai.rolling(window='h').min()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_minute_min(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:11:10", "20170101 9:11:17", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_t = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_t = pdf_t.set_index("id")
odf_t = orca.DataFrame(pdf_t)
# TODO: ALL ASSERT FAIL
a = odf_t.rolling(window='t', on="date").min()
b = pdf_t.rolling(window='t', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.reset_index()
# pdf_dai = pdf_t.reset_index()
# a = odf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.set_index('date')
# pdf_dai = pdf_t.set_index('date')
# a = odf_dai.rolling(window='t').min()
# b = pdf_dai.rolling(window='t').min()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_second_min(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:10:16", "20170101 9:11:17", "20170101 9:11:17",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_s = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_s = pdf_s.set_index("id")
odf_s = orca.DataFrame(pdf_s)
# TODO: ALL ASSERT FAIL
a = odf_s.rolling(window='s', on="date").min()
b = pdf_s.rolling(window='s', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_s.rolling(window='2s', on="date").min()
b = pdf_s.rolling(window='2s', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.reset_index()
# pdf_dai = pdf_s.reset_index()
# a = odf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.set_index('date')
# pdf_dai = pdf_s.set_index('date')
# a = odf_dai.rolling(window='s').min()
# b = pdf_dai.rolling(window='s').min()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_milli_min(self):
ps = pd.to_datetime(
["20170101 9:10:15.000", "20170101 9:10:15.000", "20170101 9:10:15.001", "20170101 9:11:17.015",
"20170101 9:11:17.015",
"20180615 9:10:15.015", "20181031 9:10:15.015", "20190501 9:10:15.015",
"20190517 9:10:15.015"]).to_series()
pdf_l = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_l = pdf_l.set_index("id")
odf_l = orca.DataFrame(pdf_l)
# TODO: ALL ASSERT FAIL
a = odf_l.rolling(window='l', on="date").min()
b = pdf_l.rolling(window='l', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_l.rolling(window='2l', on="date").min()
b = pdf_l.rolling(window='2l', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# b = pdf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_l.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# b = pdf.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.reset_index()
# pdf_dai = pdf_l.reset_index()
# a = odf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.set_index('date')
# pdf_dai = pdf_l.set_index('date')
# a = odf_dai.rolling(window='l').min()
# b = pdf_dai.rolling(window='l').min()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_micro_min(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000", "20170101 9:10:15.000000", "20170101 9:10:15.000001", "20170101 9:11:17.015001",
"20170101 9:11:17.015002",
"20180615 9:10:15.015000", "20181031 9:10:15.015000", "20190501 9:10:15.015000",
"20190517 9:10:15.015000"]).to_series()
pdf_u = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_u = pdf_u.set_index("id")
odf_u = orca.DataFrame(pdf_u)
# TODO: ALL ASSERT FAIL
a = odf_u.rolling(window='u', on="date").min()
b = pdf_u.rolling(window='u', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_u.rolling(window='2u', on="date").min()
b = pdf_u.rolling(window='2u', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.reset_index()
# pdf_dai = pdf_u.reset_index()
# a = odf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.set_index('date')
# pdf_dai = pdf_u.set_index('date')
# a = odf_dai.rolling(window='u').min()
# b = pdf_dai.rolling(window='u').min()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_nano_min(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000000", "20170101 9:10:15.000000000", "20170101 9:10:15.000000001",
"20170101 9:11:17.015000001",
"20170101 9:11:17.015002001",
"20180615 9:10:15.015000001", "20181031 9:10:15.015000001", "20190501 9:10:15.015000001",
"20190517 9:10:15.015000001"]).to_series()
pdf_n = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_n = pdf_n.set_index("id")
odf_n = orca.DataFrame(pdf_n)
# TODO: ALL ASSERT FAIL
a = odf_n.rolling(window='n', on="date").min()
b = pdf_n.rolling(window='n', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_n.rolling(window='2n', on="date").min()
b = pdf_n.rolling(window='2n', on="date").min()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.reset_index()
# pdf_dai = pdf_n.reset_index()
# a = odf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.set_index('date')
# pdf_dai = pdf_n.set_index('date')
# a = odf_dai.rolling(window='n').min()
# b = pdf_dai.rolling(window='n').min()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# b = pdf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_std(self):
a = self.odf.rolling(window=5, on="date").std()
b = self.pdf.rolling(window=5, on="date").std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.reset_index()
pdf_dai = self.pdf.reset_index()
a = odf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
b = pdf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.set_index('date')
pdf_dai = self.pdf.set_index('date')
a = odf_dai.rolling(window=5).std()
b = pdf_dai.rolling(window=5).std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
b = pdf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_day_std(self):
a = self.odf_da.rolling(window='d', on="date").std()
b = self.pdf_da.rolling(window='d', on="date").std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
b = self.pdf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_da.rolling(window='d', on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
b = self.pdf_da.rolling(window='d', on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.reset_index()
pdf_dai = self.pdf_da.reset_index()
a = odf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
b = pdf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.set_index('date')
pdf_dai = self.pdf_da.set_index('date')
a = odf_dai.rolling(window='d').std()
b = pdf_dai.rolling(window='d').std()
assert_frame_equal(a.to_pandas(), b)
a = odf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
b = pdf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_hour_std(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 10:10:15", "20170101 11:10:15", "20170101 11:20:15", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_h = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_h = pdf_h.set_index("id")
odf_h = orca.DataFrame(pdf_h)
# TODO: ALL ASSERT FAIL
a = odf_h.rolling(window='h', on="date").std()
b = pdf_h.rolling(window='h', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_h.rolling(window='2h', on="date").std()
b = pdf_h.rolling(window='2h', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_hrolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].std()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.reset_index()
# pdf_dai = pdf_h.reset_index()
# a = odf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.set_index('date')
# pdf_dai = pdf_h.set_index('date')
# a = odf_dai.rolling(window='h').std()
# b = pdf_dai.rolling(window='h').std()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_minute_std(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:11:10", "20170101 9:11:17", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_t = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_t = pdf_t.set_index("id")
odf_t = orca.DataFrame(pdf_t)
# TODO: ALL ASSERT FAIL
a = odf_t.rolling(window='t', on="date").std()
b = pdf_t.rolling(window='t', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.reset_index()
# pdf_dai = pdf_t.reset_index()
# a = odf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.set_index('date')
# pdf_dai = pdf_t.set_index('date')
# a = odf_dai.rolling(window='t').std()
# b = pdf_dai.rolling(window='t').std()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_second_std(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:10:16", "20170101 9:11:17", "20170101 9:11:17",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_s = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_s = pdf_s.set_index("id")
odf_s = orca.DataFrame(pdf_s)
# TODO: ALL ASSERT FAIL
a = odf_s.rolling(window='s', on="date").std()
b = pdf_s.rolling(window='s', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_s.rolling(window='2s', on="date").std()
b = pdf_s.rolling(window='2s', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.reset_index()
# pdf_dai = pdf_s.reset_index()
# a = odf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.set_index('date')
# pdf_dai = pdf_s.set_index('date')
# a = odf_dai.rolling(window='s').std()
# b = pdf_dai.rolling(window='s').std()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_milli_std(self):
ps = pd.to_datetime(
["20170101 9:10:15.000", "20170101 9:10:15.000", "20170101 9:10:15.001", "20170101 9:11:17.015",
"20170101 9:11:17.015",
"20180615 9:10:15.015", "20181031 9:10:15.015", "20190501 9:10:15.015",
"20190517 9:10:15.015"]).to_series()
pdf_l = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_l = pdf_l.set_index("id")
odf_l = orca.DataFrame(pdf_l)
# TODO: ALL ASSERT FAIL
a = odf_l.rolling(window='l', on="date").std()
b = pdf_l.rolling(window='l', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_l.rolling(window='2l', on="date").std()
b = pdf_l.rolling(window='2l', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# b = pdf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_l.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# b = pdf.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.reset_index()
# pdf_dai = pdf_l.reset_index()
# a = odf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.set_index('date')
# pdf_dai = pdf_l.set_index('date')
# a = odf_dai.rolling(window='l').std()
# b = pdf_dai.rolling(window='l').std()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_micro_std(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000", "20170101 9:10:15.000000", "20170101 9:10:15.000001", "20170101 9:11:17.015001",
"20170101 9:11:17.015002",
"20180615 9:10:15.015000", "20181031 9:10:15.015000", "20190501 9:10:15.015000",
"20190517 9:10:15.015000"]).to_series()
pdf_u = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_u = pdf_u.set_index("id")
odf_u = orca.DataFrame(pdf_u)
# TODO: ALL ASSERT FAIL
a = odf_u.rolling(window='u', on="date").std()
b = pdf_u.rolling(window='u', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_u.rolling(window='2u', on="date").std()
b = pdf_u.rolling(window='2u', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.reset_index()
# pdf_dai = pdf_u.reset_index()
# a = odf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.set_index('date')
# pdf_dai = pdf_u.set_index('date')
# a = odf_dai.rolling(window='u').std()
# b = pdf_dai.rolling(window='u').std()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_nano_std(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000000", "20170101 9:10:15.000000000", "20170101 9:10:15.000000001",
"20170101 9:11:17.015000001",
"20170101 9:11:17.015002001",
"20180615 9:10:15.015000001", "20181031 9:10:15.015000001", "20190501 9:10:15.015000001",
"20190517 9:10:15.015000001"]).to_series()
pdf_n = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_n = pdf_n.set_index("id")
odf_n = orca.DataFrame(pdf_n)
# TODO: ALL ASSERT FAIL
a = odf_n.rolling(window='n', on="date").std()
b = pdf_n.rolling(window='n', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_n.rolling(window='2n', on="date").std()
b = pdf_n.rolling(window='2n', on="date").std()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.reset_index()
# pdf_dai = pdf_n.reset_index()
# a = odf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.set_index('date')
# pdf_dai = pdf_n.set_index('date')
# a = odf_dai.rolling(window='n').std()
# b = pdf_dai.rolling(window='n').std()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# b = pdf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_var(self):
a = self.odf.rolling(window=5, on="date").var()
b = self.pdf.rolling(window=5, on="date").var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
b = self.pdf.rolling(window=5, on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.reset_index()
pdf_dai = self.pdf.reset_index()
a = odf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
b = pdf_dai.rolling(window=5, on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf.set_index('date')
pdf_dai = self.pdf.set_index('date')
a = odf_dai.rolling(window=5).var()
b = pdf_dai.rolling(window=5).var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
b = pdf_dai.rolling(window=5)[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_day_var(self):
a = self.odf_da.rolling(window='d', on="date").var()
b = self.pdf_da.rolling(window='d', on="date").var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
b = self.pdf_da.rolling(window='3d', on="date")[
'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_da.rolling(window='d', on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
b = self.pdf_da.rolling(window='d', on="date")[
'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.reset_index()
pdf_dai = self.pdf_da.reset_index()
a = odf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
b = pdf_dai.rolling(window='d', on="date")[
'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_da.set_index('date')
pdf_dai = self.pdf_da.set_index('date')
a = odf_dai.rolling(window='d').var()
b = pdf_dai.rolling(window='d').var()
assert_frame_equal(a.to_pandas(), b)
a = odf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
b = pdf_dai.rolling(window='d')[
'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_hour_var(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 10:10:15", "20170101 11:10:15", "20170101 11:20:15", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_h = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_h = pdf_h.set_index("id")
odf_h = orca.DataFrame(pdf_h)
# TODO: ALL ASSERT FAIL
a = odf_h.rolling(window='h', on="date").var()
b = pdf_h.rolling(window='h', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_h.rolling(window='2h', on="date").var()
b = pdf_h.rolling(window='2h', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_hrolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].var()
# b = pdf_h.rolling(window='h', on="date")[
# 'date', 'tdouble','tchar', 'tint', 'tlong', 'tfloat','tshort'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.reset_index()
# pdf_dai = pdf_h.reset_index()
# a = odf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='h', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_h.set_index('date')
# pdf_dai = pdf_h.set_index('date')
# a = odf_dai.rolling(window='h').var()
# b = pdf_dai.rolling(window='h').var()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='h')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_minute_var(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:11:10", "20170101 9:11:17", "20170101 11:21:00",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_t = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_t = pdf_t.set_index("id")
odf_t = orca.DataFrame(pdf_t)
# TODO: ALL ASSERT FAIL
a = odf_t.rolling(window='t', on="date").var()
b = pdf_t.rolling(window='t', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# b = pdf_t.rolling(window='t', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.reset_index()
# pdf_dai = pdf_t.reset_index()
# a = odf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='t', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_t.set_index('date')
# pdf_dai = pdf_t.set_index('date')
# a = odf_dai.rolling(window='t').var()
# b = pdf_dai.rolling(window='t').var()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='t')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_second_var(self):
ps = pd.to_datetime(
["20170101 9:10:15", "20170101 9:10:16", "20170101 9:10:16", "20170101 9:11:17", "20170101 9:11:17",
"20180615 9:10:15", "20181031 9:10:15", "20190501 9:10:15",
"20190517 9:10:15"]).to_series()
pdf_s = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_s = pdf_s.set_index("id")
odf_s = orca.DataFrame(pdf_s)
# TODO: ALL ASSERT FAIL
a = odf_s.rolling(window='s', on="date").var()
b = pdf_s.rolling(window='s', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_s.rolling(window='2s', on="date").var()
b = pdf_s.rolling(window='2s', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# b = pdf_s.rolling(window='s', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.reset_index()
# pdf_dai = pdf_s.reset_index()
# a = odf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='s', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_s.set_index('date')
# pdf_dai = pdf_s.set_index('date')
# a = odf_dai.rolling(window='s').var()
# b = pdf_dai.rolling(window='s').var()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='s')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_milli_var(self):
ps = pd.to_datetime(
["20170101 9:10:15.000", "20170101 9:10:15.000", "20170101 9:10:15.001", "20170101 9:11:17.015",
"20170101 9:11:17.015",
"20180615 9:10:15.015", "20181031 9:10:15.015", "20190501 9:10:15.015",
"20190517 9:10:15.015"]).to_series()
pdf_l = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_l = pdf_l.set_index("id")
odf_l = orca.DataFrame(pdf_l)
# TODO: ALL ASSERT FAIL
a = odf_l.rolling(window='l', on="date").var()
b = pdf_l.rolling(window='l', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_l.rolling(window='2l', on="date").var()
b = pdf_l.rolling(window='2l', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# b = pdf_l.rolling(window='l', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_l.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# b = pdf.rolling(window='l', on="date")[
# 'date'_l, 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.reset_index()
# pdf_dai = pdf_l.reset_index()
# a = odf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='l', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_l.set_index('date')
# pdf_dai = pdf_l.set_index('date')
# a = odf_dai.rolling(window='l').var()
# b = pdf_dai.rolling(window='l').var()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='l')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_micro_var(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000", "20170101 9:10:15.000000", "20170101 9:10:15.000001", "20170101 9:11:17.015001",
"20170101 9:11:17.015002",
"20180615 9:10:15.015000", "20181031 9:10:15.015000", "20190501 9:10:15.015000",
"20190517 9:10:15.015000"]).to_series()
pdf_u = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_u = pdf_u.set_index("id")
odf_u = orca.DataFrame(pdf_u)
# TODO: ALL ASSERT FAIL
a = odf_u.rolling(window='u', on="date").var()
b = pdf_u.rolling(window='u', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_u.rolling(window='2u', on="date").var()
b = pdf_u.rolling(window='2u', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# b = pdf_u.rolling(window='u', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.reset_index()
# pdf_dai = pdf_u.reset_index()
# a = odf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='u', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_u.set_index('date')
# pdf_dai = pdf_u.set_index('date')
# a = odf_dai.rolling(window='u').var()
# b = pdf_dai.rolling(window='u').var()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='u')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_pandas_param_window_rule_nano_var(self):
ps = pd.to_datetime(
["20170101 9:10:15.000000000", "20170101 9:10:15.000000000", "20170101 9:10:15.000000001",
"20170101 9:11:17.015000001",
"20170101 9:11:17.015002001",
"20180615 9:10:15.015000001", "20181031 9:10:15.015000001", "20190501 9:10:15.015000001",
"20190517 9:10:15.015000001"]).to_series()
pdf_n = pd.DataFrame({'id': np.arange(1, 10, 1, dtype='int32'),
'date': ps,
'tchar': np.arange(1, 10, 1, dtype='int8'),
'tshort': np.arange(1, 10, 1, dtype='int16'),
'tint': np.arange(1, 10, 1, dtype='int32'),
'tlong': np.arange(1, 10, 1, dtype='int64'),
'tfloat': np.arange(1, 10, 1, dtype='float32'),
'tdouble': np.arange(1, 10, 1, dtype='float64')
})
pdf_n = pdf_n.set_index("id")
odf_n = orca.DataFrame(pdf_n)
# TODO: ALL ASSERT FAIL
a = odf_n.rolling(window='n', on="date").var()
b = pdf_n.rolling(window='n', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = odf_n.rolling(window='2n', on="date").var()
b = pdf_n.rolling(window='2n', on="date").var()
# assert_frame_equal(a.to_pandas(), b, check_dtype=False)
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tchar', 'tint', 'tlong', 'tfloat'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# a = odf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# b = pdf_n.rolling(window='n', on="date")[
# 'date', 'tdouble', 'tchar', 'tint', 'tlong', 'tfloat', 'tshort'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.reset_index()
# pdf_dai = pdf_n.reset_index()
# a = odf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='n', on="date")[
# 'date', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
#
# odf_dai = odf_n.set_index('date')
# pdf_dai = pdf_n.set_index('date')
# a = odf_dai.rolling(window='n').var()
# b = pdf_dai.rolling(window='n').var()
# assert_frame_equal(a.to_pandas(), b)
#
# a = odf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# b = pdf_dai.rolling(window='n')[
# 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
# assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_import_param_window_sum(self):
a = self.odf_csv.rolling(window=5).sum()
b = self.pdf_csv.rolling(window=5).sum()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id','tchar','tshort','tint','tlong','tfloat'].sum()
b = self.pdf_csv.rolling(window=5)[
'id','tchar','tshort','tint','tlong','tfloat'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id','tdouble','tbool','tchar','tshort','tint','tlong','tfloat'].sum()
b = self.pdf_csv.rolling(window=5)[
'id','tdouble','tbool','tchar','tshort','tint','tlong','tfloat'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.reset_index()
pdf_dai = self.pdf_csv.reset_index()
a = odf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
b = pdf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.set_index('id')
pdf_dai = self.pdf_csv.set_index('id')
a = odf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
b = pdf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].sum()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_import_param_window_count(self):
a = self.odf_csv.rolling(window=5).count()
b = self.pdf_csv.rolling(window=5).count()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].count()
b = self.pdf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].count()
b = self.pdf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.reset_index()
pdf_dai = self.pdf_csv.reset_index()
a = odf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
b = pdf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.set_index('id')
pdf_dai = self.pdf_csv.set_index('id')
a = odf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
b = pdf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].count()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_import_param_window_mean(self):
a = self.odf_csv.rolling(window=5).mean()
b = self.pdf_csv.rolling(window=5).mean()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].mean()
b = self.pdf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].mean()
b = self.pdf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.reset_index()
pdf_dai = self.pdf_csv.reset_index()
a = odf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
b = pdf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.set_index('id')
pdf_dai = self.pdf_csv.set_index('id')
a = odf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
b = pdf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].mean()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_import_param_window_max(self):
a = self.odf_csv.rolling(window=5).max()
b = self.pdf_csv.rolling(window=5).max()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].max()
b = self.pdf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].max()
b = self.pdf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.reset_index()
pdf_dai = self.pdf_csv.reset_index()
a = odf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
b = pdf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.set_index('id')
pdf_dai = self.pdf_csv.set_index('id')
a = odf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
b = pdf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].max()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_import_param_window_min(self):
a = self.odf_csv.rolling(window=5).min()
b = self.pdf_csv.rolling(window=5).min()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].min()
b = self.pdf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].min()
b = self.pdf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.reset_index()
pdf_dai = self.pdf_csv.reset_index()
a = odf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
b = pdf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.set_index('id')
pdf_dai = self.pdf_csv.set_index('id')
a = odf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
b = pdf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].min()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_import_param_window_std(self):
a = self.odf_csv.rolling(window=5).std()
b = self.pdf_csv.rolling(window=5).std()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].std()
b = self.pdf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].std()
b = self.pdf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.reset_index()
pdf_dai = self.pdf_csv.reset_index()
a = odf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
b = pdf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.set_index('id')
pdf_dai = self.pdf_csv.set_index('id')
a = odf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
b = pdf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].std()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
def test_rolling_from_import_param_window_var(self):
a = self.odf_csv.rolling(window=5).var()
b = self.pdf_csv.rolling(window=5).var()
assert_frame_equal(a.to_pandas(), b, check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].var()
b = self.pdf_csv.rolling(window=5)[
'id', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
a = self.odf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].var()
b = self.pdf_csv.rolling(window=5)[
'id', 'tdouble', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.reset_index()
pdf_dai = self.pdf_csv.reset_index()
a = odf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
b = pdf_dai.rolling(window=5, on="id")[
'id', 'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
odf_dai = self.odf_csv.set_index('id')
pdf_dai = self.pdf_csv.set_index('id')
a = odf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
b = pdf_dai.rolling(window=5)[
'tbool', 'tchar', 'tshort', 'tint', 'tlong', 'tfloat', 'tdouble'].var()
assert_frame_equal(a.to_pandas().reset_index(drop=True), b.reset_index(drop=True), check_dtype=False)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_io.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
class InputOutputTest(unittest.TestCase):
def setUp(self):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def loadData(self, filename):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
data = os.path.join(DATA_DIR, filename)
data = data.replace('\\', '/')
return data
def test_read_csv_param_sep(self):
data = self.loadData('USPricesSample.csv')
pdf = pd.read_csv(data, parse_dates=[1])
pdf.iloc[:, 9].fillna("", inplace=True)
odf = orca.read_csv(data, dtype={"DLSTCD": "DOUBLE", "DLPRC": "DOUBLE"}).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
# test white space
data = self.loadData('test_io.csv')
pdf = pd.read_csv(data, parse_dates=[1], sep=" ")
odf = orca.read_csv(data, sep=" ").to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
# test delimiter
odf = orca.read_csv(data, delimiter=" ").to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
def test_read_csv_param_names(self):
# whitout header
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[1], names=['A', 'B', 'C'])
odf = orca.read_csv(data, names=['A', 'B', 'C']).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
# with header
# pandas will parse the header to data, but orca will delete it.
data = self.loadData('test_io.csv')
pdf = pd.read_csv(data, parse_dates=[1], names=['A', 'B', 'C'], sep=' ')
odf = orca.read_csv(data, names=['A', 'B', 'C'], sep=' ').to_pandas()
# assert_frame_equal(pdf, odf, check_dtype=False)
def test_read_csv_param_index_col(self):
# test white space
data = self.loadData('test_io.csv')
pdf = pd.read_csv(data, parse_dates=[1], sep=" ", index_col=[1])
odf = orca.read_csv(data, sep=" ", index_col=1).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
data = self.loadData('test_io.csv')
pdf = pd.read_csv(data, parse_dates=[1], sep=" ", index_col=['date'])
odf = orca.read_csv(data, sep=" ", index_col=['date']).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
# whitout header
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[1], names=['A', 'B', 'C'], index_col=1)
odf = orca.read_csv(data, names=['A', 'B', 'C'], index_col=1).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
# whitout header and use set names
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[1], names=['A', 'B', 'C'], index_col=['A'])
odf = orca.read_csv(data, names=['A', 'B', 'C'], index_col=['A']).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
def test_read_csv_param_index_engine(self):
# just to test where engine to use
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[1], names=['A', 'B', 'C'], index_col=['A'])
odf = orca.read_csv(data, names=['A', 'B', 'C'], index_col=['A'], engine='c', parse_dates=[1]).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
def test_read_csv_param_usecols(self):
# tip:parse_dates choose the data of order dertermine by usecols
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[0], names=['A', 'B', 'C'], usecols=[1, 2])
odf = orca.read_csv(data, names=['A', 'B', 'C'], usecols=[1, 2]).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
# without the header and use the names
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[0], names=['A', 'B', 'C'], usecols=['B'])
odf = orca.read_csv(data, names=['A', 'B', 'C'], usecols=['B']).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
data = self.loadData('test_io.csv')
pdf = pd.read_csv(data, parse_dates=[0], sep=" ", usecols=['date'])
odf = orca.read_csv(data, sep=" ", usecols=['date']).to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
def test_read_csv_param_squeeze(self):
# TODO pandas的Seires是有name的,而orca的没有name
data = self.loadData('test_io_squeeze.csv')
pdf = pd.read_csv(data, squeeze=True)
odf = orca.read_csv(data, squeeze=True).to_pandas()
assert_series_equal(pdf, odf, check_dtype=False, check_names=False)
# without the header and use the names
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[0], names=['A', 'B', 'C'], usecols=['B'], squeeze=True)
odf = orca.read_csv(data, names=['A', 'B', 'C'], usecols=['B'], squeeze=True).to_pandas()
assert_series_equal(pdf, odf, check_dtype=False, check_names=False)
def test_read_csv_param_prefix(self):
# whitout header and use set names, the prefix is not used
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[1], names=['A', 'B', 'C'], prefix='X')
odf = orca.read_csv(data, names=['A', 'B', 'C'], prefix='X').to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
# without header and names
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[1], header=None, prefix='X')
odf = orca.read_csv(data, prefix='X').to_pandas()
assert_frame_equal(pdf, odf, check_dtype=False)
def test_read(self):
data = self.loadData('test_io_names.csv')
pdf = pd.read_csv(data, parse_dates=[1])
# feather to_parquet need dependency pyarraw
file = pdf.to_html()
# assert_equal(pd.read_html(file)[0], orca.read_html(file)[0])
# pdf.to_pickle("test.pickle")
data = self.loadData('test.pickle')
# assert_frame_equal(pd.read_pickle(data), orca.read_pickle(data))
file = pdf.to_msgpack()
# assert_frame_equal(pd.read_msgpack(file), orca.read_msgpack(file))
'''
pdf.to_parquet("test.parquet")
data = self.loadData('test.parquet')
assert_frame_equal(pd.read_parquet(data), orca.read_parquet(data))
pdf.to_feather("test.feather")
data=self.loadData('test.feather')
assert_frame_equal(pd.read_hdf(file), orca.read_hdf(file))
'''
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_orca_general_functions.py
```python
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
WORK_DIR = WORK_DIR.replace('\\', '/')
class Csv:
pdf_csv = None
odf_csv = None
class DBNames:
disk = WORK_DIR + "onDiskUnpartitionedDB"
diskPRange = WORK_DIR + "onDiskPartitionedRangeDB"
diskPValue = WORK_DIR + "onDiskPartitionedValueDB"
dfsRange = "dfs://RangeDB"
dfsValue = "dfs://ValueDB"
class TBNames:
shared = "tshared"
streamShared = "tStreamShared"
diskTB = "tb1"
dfsTB = "tb1"
def _clear(dbName):
s = orca.default_session()
s.run("""
login("admin", "<PASSWORD>")
dbPath='{dir}'
if(exists(dbPath))
dropDatabase(dbPath)
""".format(dir=dbName))
def _create_tables(DATA_DIR):
s = orca.default_session()
dolphindb_script = """
login("admin", "<PASSWORD>")
names=`id`date`month`time`minute`second`datetime`timestamp`nanotime`nanotimestamp`tstring`tsymbol`tbool`tchar`tshort`tint`tlong`tfloat`tdouble
types=`INT`DATE`DATE`TIME`SECOND`SECOND`DATETIME`TIMESTAMP`NANOTIME`NANOTIMESTAMP`STRING`SYMBOL`BOOL`CHAR`SHORT`INT`LONG`FLOAT`DOUBLE
schema=table(names as name, types as typeString)
// in-memory
t=loadText('{datadir}',,schema)
// shared
share t as {shared}
//stream shared
share streamTable(100:0, names, types) as {streamShared}
{streamShared}.append!(t)
//disk
dbPath='{disk}'
if(exists(dbPath))
dropDatabase(dbPath)
//saveTable(dbPath, t, `{disktb})
//disk range partition
dbPath='{diskPR}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath, RANGE, 1 21 41 61 81 101)
tb=db.createPartitionedTable(t,`{disktb},`id)
tb.append!(t)
//disk value partition
dbPath='{diskPV}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath, VALUE, `A`B`C`D`E`F`G)
tb=db.createPartitionedTable(t,`{disktb},`tstring)
tb.append!(t)
//dfs range partition
dbPath='{dfsR}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath, RANGE, 1 21 41 61 81 101)
tb=db.createPartitionedTable(t,`{dfstb},`id)
tb.append!(t)
//dfs value partition
dbPath='{dfsV}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath, VALUE, `A`B`C`D`E`F`G)
tb=db.createPartitionedTable(t,`{dfstb},`tstring)
tb.append!(t)""".format(datadir=DATA_DIR, shared=TBNames.shared, streamShared=TBNames.streamShared,
disk=DBNames.disk, diskPR=DBNames.diskPRange, diskPV=DBNames.diskPValue,
dfsR=DBNames.dfsRange, dfsV=DBNames.dfsValue,
disktb=TBNames.diskTB, dfstb=TBNames.dfsTB)
s.run(dolphindb_script)
class GeneralFunctionsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
DATA_DIR = DATA_DIR.replace('\\', '/')
fileName = 'testTables.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
Csv.odf_csv = orca.read_csv(data, dtype={"date": 'DATE', "tstring": "STRING", "tsymbol": "SYMBOL",
"tbool": "BOOL", "tchar": np.int8, "tshort": np.int16,
"tlong": np.int64, "tfloat": np.float32})
Csv.pdf_csv = pd.read_csv(data, parse_dates=[1, 2, 3, 4, 5, 6, 7, 8, 9],
dtype={"id": np.int32, "tbool": np.bool, "tchar": np.int8, "tshort": np.int16,
"tint": np.int32, "tfloat": np.float32})
_create_tables(data)
@property
def pdf_csv(self):
return Csv.pdf_csv
@property
def odf_csv(self):
return Csv.odf_csv
@property
def odf_disk(self):
return orca.read_table(DBNames.disk, 'tb1')
@property
def odf_disk_partitioned_range(self):
return orca.read_table(DBNames.diskPRange, 'tb1')
@property
def odf_disk_partitioned_value(self):
return orca.read_table(DBNames.diskPValue, 'tb1')
@property
def odfs_range(self):
return orca.read_table(DBNames.dfsRange, 'tb1')
@property
def odfs_value(self):
return orca.read_table(DBNames.dfsValue, 'tb1')
@property
def pdf(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def odf(self):
return orca.DataFrame(self.pdf)
def test_orca_to_datetime(self):
lt = ['3/11/2000', '3/12/2000', '3/13/2000'] * 100
assert_index_equal(orca.to_datetime(lt), pd.to_datetime(lt))
# ps = pd.Series(lt)
# os = orca.Series(ps)
# self.assertEqual(pd.to_datetime(ps, infer_datetime_format=True),
# orca.to_datetime(os, infer_datetime_format=True))
def test_orca_concat_series(self):
s1 = pd.Series(['a', 'b'])
s2 = pd.Series(['c', 'd'])
o1 = orca.Series(['a', 'b'])
o2 = orca.Series(['c', 'd'])
assert_series_equal(pd.concat([s1, s2]), orca.concat([o1, o2]).to_pandas())
assert_series_equal(pd.concat([s1, s2], ignore_index=True),
orca.concat([o1, o2], ignore_index=True).to_pandas())
def test_orca_concat_dataframe(self):
pdf1 = pd.DataFrame([['a', 1], ['b', 2]], columns=['letter', 'number'])
pdf2 = pd.DataFrame([['c', 3], ['d', 4]], columns=['letter', 'number'])
odf1 = orca.DataFrame([['a', 1], ['b', 2]], columns=['letter', 'number'])
odf2 = orca.DataFrame([['c', 3], ['d', 4]], columns=['letter', 'number'])
assert_frame_equal(pd.concat([pdf1, pdf2]), orca.concat([odf1, odf2]).to_pandas())
# assert_frame_equal(pd.concat([pdf1, pdf1]), orca.concat([odf1, odf1]).to_pandas())
assert_frame_equal(pd.concat([pdf1, pdf2], join="inner"), orca.concat([odf1, odf2], join="inner").to_pandas())
assert_frame_equal(pd.concat([pdf1, pdf2], ignore_index=True),
orca.concat([odf1, odf2], ignore_index=True).to_pandas())
pdf1 = pd.DataFrame([[3, 1], [6, 2]], columns=['letter', 'number'])
odf1 = orca.DataFrame([[3, 1], [6, 2]], columns=['letter', 'number'])
pdf3 = pd.DataFrame([[100, 3, 16], [90, 4, 7]], columns=['letter', 'number', 'animal'])
odf3 = orca.DataFrame([[100, 3, 16], [90, 4, 7]], columns=['letter', 'number', 'animal'])
assert_frame_equal(pd.concat([pdf1, pdf3], join="inner"), orca.concat([odf1, odf3], join="inner").to_pandas())
assert_frame_equal(pd.concat([pdf1, pdf3], join="outer", sort=False),
orca.concat([odf1, odf3], join="outer", sort=False).to_pandas())
assert_frame_equal(pd.concat([pdf1, pdf3], ignore_index=True, sort=False),
orca.concat([odf1, odf3], ignore_index=True, sort=False).to_pandas())
tuples = [('cobra', 'mark i'), ('cobra', 'mark ii'), ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'),
('viper', 'mark ii'), ('viper', 'mark iii')]
index = pd.MultiIndex.from_tuples(tuples)
values = [[12, 2], [0, 4], [10, 20], [1, 4], [7, 1], [16, 36]]
pdf = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index)
index = orca.MultiIndex.from_tuples(tuples)
odf = orca.DataFrame(values, columns=['max_speed', 'shield'], index=index)
assert_frame_equal(pd.concat([pdf, pdf1], ignore_index=True, sort=False),
orca.concat([odf, odf1], ignore_index=True, sort=False).to_pandas())
def test_orca_read_shared_table(self):
odf = orca.read_shared_table(TBNames.shared)
assert_frame_equal(odf.to_pandas(), self.odf_csv.to_pandas())
orca.default_session().run("undef(`{sh},SHARED)".format(sh=TBNames.shared))
def test_orca_read_shared_streamtable(self):
odf = orca.read_shared_table(TBNames.streamShared)
assert_frame_equal(odf.to_pandas(), self.odf_csv.to_pandas())
orca.default_session().run("undef(`{sh},SHARED)".format(sh=TBNames.streamShared))
def test_orca_save_table_disk(self):
orca.save_table(DBNames.disk, TBNames.diskTB, self.odf)
odf_disk = orca.read_table(DBNames.disk, TBNames.diskTB)
# index will be reset
assert_frame_equal(self.pdf.reset_index(drop=True), odf_disk.to_pandas())
_clear(DBNames.disk)
def test_orca_save_table_disk_patition(self):
odf = self.odf_csv
# range
orca.save_table(DBNames.diskPRange, TBNames.diskTB, self.odf_csv)
x = orca.read_table(DBNames.diskPRange, TBNames.diskTB)
assert_frame_equal(odf.to_pandas(), x.to_pandas())
_clear(DBNames.diskPRange)
# value
orca.save_table(DBNames.diskPValue, TBNames.diskTB, self.odf_csv)
x = orca.read_table(DBNames.diskPValue, TBNames.diskTB)
assert_frame_equal(odf.to_pandas(), x.to_pandas())
_clear(DBNames.diskPValue)
def test_orca_save_table_dfs(self):
odf = self.odf_csv
# range
orca.save_table(DBNames.dfsRange, TBNames.dfsTB, self.odf_csv)
x = orca.read_table(DBNames.dfsRange, TBNames.dfsTB)
assert_frame_equal(odf.append(odf).to_pandas().sort_values("id").reset_index(drop=True),
x.to_pandas().sort_values("id").reset_index(drop=True), check_index_type=False)
# value
orca.save_table(DBNames.dfsValue, TBNames.dfsTB, self.odf_csv)
x = orca.read_table(DBNames.dfsValue, TBNames.dfsTB)
assert_frame_equal(odf.append(odf).to_pandas().sort_values("id").reset_index(drop=True),
x.to_pandas().sort_values("id").reset_index(drop=True), check_index_type=False)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_plotting.py
```python
import unittest
import os
import orca
import pandas as pd
import matplotlib.pyplot as plt
import os.path as path
import base64
from io import BytesIO
from setup.settings import *
class Csv(object):
pdf_csv = None
odf_csv = None
class PLottingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
fileName = 'USPricesSample.csv'
data = os.path.join(DATA_DIR, fileName)
data = data.replace('\\', '/')
# Orca connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
# import
Csv.odf_csv = orca.read_csv(data)
Csv.odf_csv.set_index('date', inplace=True)
Csv.pdf_csv = pd.read_csv(data)
Csv.pdf_csv.set_index('date', inplace=True)
@property
def odf_series_plot(self):
return Csv.odf_csv.groupby('date').sum()['BID']
@property
def odf_frame_plot(self):
return Csv.odf_csv.groupby('date').sum()
@property
def pdf_series_plot(self):
return Csv.pdf_csv.groupby('date').sum()['BID']
@property
def pdf_frame_plot(self):
return Csv.pdf_csv.groupby('date').sum()
# def test_plot_area(self):
# self.odf_series_plot.plot.area(x='date', y='BID')
# # plt.show()
# self.pdf_series_plot.plot.area(x='date', y='BID')
# # plt.show()
def test_plot_bar(self):
self.odf_series_plot.plot.bar(x='date', y='BID')
# plt.show()
self.pdf_series_plot.plot.bar(x='date', y='BID')
# plt.show()
def test_plot_barh(self):
self.odf_series_plot.plot.barh(x='date', y='BID')
# plt.show()
self.pdf_series_plot.plot.barh(x='date', y='BID')
# plt.show()
def test_plot_box(self):
self.odf_series_plot.plot.box(by='date')
# plt.show()
self.pdf_series_plot.plot.box(by='date')
# plt.show()
def test_plot_density(self):
self.odf_series_plot.plot.density(bw_method=0.3)
# plt.show()
self.pdf_series_plot.plot.density(bw_method=0.3)
# plt.show()
def test_plot_hexbin(self):
self.odf_frame_plot.plot.hexbin(x='SHRCD', y='BID', gridsize=20)
# plt.show()
self.pdf_frame_plot.plot.hexbin(x='SHRCD', y='BID', gridsize=20)
# plt.show()
def test_plot_hist(self):
self.odf_series_plot.plot.hist(by='date', bins=10)
# plt.show()
self.pdf_series_plot.plot.hist(by='date', bins=10)
# plt.show()
def test_series_hist(self):
# TODO: NOT IMPLEMENTED
pass
# pdf = pd.DataFrame({
# 'a': [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
# }, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10])
#
# odf = orca.DataFrame(pdf)
#
# def plot_to_base64(ax):
# bytes_data = BytesIO()
# ax.figure.savefig(bytes_data, format='png')
# bytes_data.seek(0)
# b64_data = base64.b64encode(bytes_data.read())
# plt.close(ax.figure)
# return b64_data
#
# _, ax1 = plt.subplots(1, 1)
# # Using plot.hist() because pandas changes ticos props when called hist()
# ax1 = pdf['a'].plot.hist()
# _, ax2 = plt.subplots(1, 1)
# ax2 = odf['a'].hist()
# self.assertEqual(plot_to_base64(ax1), plot_to_base64(ax2))
def test_plot_kde(self):
self.odf_series_plot.plot.kde(bw_method=0.3)
# plt.show()
self.pdf_series_plot.plot.kde(bw_method=0.3)
# plt.show()
# def test_plot_line(self):
# self.odf_series_plot.plot.line(x='date', y='BID')
# # plt.show()
# self.pdf_series_plot.plot.line(x='date', y='BID')
# # plt.show()
def test_plot_pie(self):
self.odf_series_plot.plot.pie(y='date', figsize=(6, 3))
# plt.show()
self.pdf_series_plot.plot.pie(y='date', figsize=(6, 3))
# plt.show()
def test_plot_scatter(self):
self.odf_frame_plot.plot.scatter(x='SHRCD', y='BID')
# plt.show()
self.pdf_frame_plot.plot.scatter(x='SHRCD', y='BID')
# plt.show()
def test_boxplot(self):
self.odf_frame_plot.boxplot()
# plt.show()
self.pdf_frame_plot.boxplot()
# plt.show()
def test_hist(self):
self.pdf_series_plot.hist()
# plt.show()
self.odf_series_plot.hist()
# plt.show()
if __name__ == '__main__':
unittest.main()
```
#### File: tests/orca_unit_testing/test_series_str.py
```python
import unittest
import orca
from setup.settings import *
from pandas.util.testing import *
class SeriesStrTest(unittest.TestCase):
def setUp(self):
self.PRECISION = 5
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
@property
def ps(self):
return pd.Series(['Foo', 'ss ', 'sW', 'qa'], name='x')
@property
def os(self):
return orca.Series(self.ps)
@property
def psa(self):
return pd.Series([10, 1, 19, np.nan], index=['a', 'b', 'c', 'd'])
@property
def psb(self):
return pd.Series([-1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])
def test_series_str_count(self):
assert_series_equal(self.ps.str.count('a'), self.os.str.count("a").to_pandas(),check_dtype=False)
def test_series_str_startsWith(self):
assert_series_equal(self.ps.str.startswith('Fo'), self.os.str.startswith('Fo').to_pandas(), check_dtype=False)
def test_series_str_endswith(self):
assert_series_equal(self.ps.str.endswith('W'), self.os.str.endswith('W').to_pandas(), check_dtype=False)
def test_series_str_find(self):
assert_series_equal(self.ps.str.find('Fo'), self.os.str.find('Fo').to_pandas(), check_dtype=False)
def test_series_str_get(self):
assert_series_equal(self.ps.str.get(1), self.os.str.get(1).to_pandas(), check_dtype=False)
def test_series_str_just(self):
# TODO: pandas not cut the str when length is not enough
# assert_series_equal(self.ps.str.ljust(1), self.os.str.ljust(1).to_pandas(), check_dtype=False)
assert_series_equal(self.ps.str.ljust(10), self.os.str.ljust(10).to_pandas(), check_dtype=False)
assert_series_equal(self.ps.str.ljust(10,'A'), self.os.str.ljust(10,'A').to_pandas(), check_dtype=False)
assert_series_equal(self.ps.str.rjust(10), self.os.str.rjust(10).to_pandas(), check_dtype=False)
assert_series_equal(self.ps.str.rjust(10, 'A'), self.os.str.rjust(10, 'A').to_pandas(), check_dtype=False)
def test_series_str_is(self):
assert_series_equal(self.ps.str.isalnum(),self.os.str.isalnum().to_pandas())
assert_series_equal(self.ps.str.isalpha(), self.os.str.isalpha().to_pandas())
assert_series_equal(self.ps.str.isdigit(), self.os.str.isdigit().to_pandas())
assert_series_equal(self.ps.str.isspace(), self.os.str.isspace().to_pandas())
assert_series_equal(self.ps.str.islower(), self.os.str.islower().to_pandas())
assert_series_equal(self.ps.str.isupper(), self.os.str.isupper().to_pandas())
assert_series_equal(self.ps.str.istitle(), self.os.str.istitle().to_pandas())
assert_series_equal(self.ps.str.isnumeric(), self.os.str.isnumeric().to_pandas())
assert_series_equal(self.ps.str.isdecimal(), self.os.str.isdecimal().to_pandas())
``` |
{
"source": "jiajic/Giotto_site_suite",
"score": 2
} |
#### File: inst/python/silhouette_rank_wrapper.py
```python
import sys
import os
import re
import numpy as np
import subprocess
import math
import scipy
import silhouetteRank.spatial_genes as spatial_genes
from shutil import copyfile
from operator import itemgetter
from scipy.spatial.distance import squareform, pdist
from scipy.stats import percentileofscore
from sklearn.metrics import roc_auc_score
import pandas as pd
import argparse
import silhouetteRank
import silhouetteRank.prep as prep
import silhouetteRank.evaluate_exact_one_2b as evaluate_exact_one_2b
import silhouetteRank.use_previous_cluster as use_previous_cluster
import silhouetteRank.combine as combine
import logging
def silhouette_rank(expr="expression.txt", centroid="Xcen.good", overwrite_input_bin=True, rbp_ps=[0.95, 0.99], examine_tops=[0.005, 0.010, 0.050, 0.100, 0.300], matrix_type="dissim", num_core=4, parallel_path="/usr/bin", output=".", query_sizes=10, verbose=True):
args = argparse.Namespace(expr=expr, centroid=centroid, rbp_ps=rbp_ps, examine_tops=examine_tops, matrix_type=matrix_type, output=output, query_sizes=query_sizes, overwrite_input_bin=overwrite_input_bin, parallel_path=parallel_path, num_core=num_core, verbose=verbose)
if not os.path.isdir(args.output):
os.mkdir(args.output)
logdir = "%s/logs" % args.output
if not os.path.isdir(logdir):
os.mkdir(logdir)
verbose = args.verbose
log_file = "%s/master.log" % args.output
logger = logging.getLogger("master")
logger.setLevel(logging.DEBUG)
if not logger.hasHandlers():
handler = logging.FileHandler(log_file)
handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(handler)
if verbose:
logger.addHandler(logging.StreamHandler())
args1 = argparse.Namespace(expr=args.expr, centroid=args.centroid, rbp_ps=args.rbp_ps, examine_tops=args.examine_tops, matrix_type=args.matrix_type, output=args.output, query_sizes=args.query_sizes, overwrite_input_bin=args.overwrite_input_bin, verbose=verbose, log_file="master.prep.log")
prep.do_one(args1)
fw = open("%s/args" % args.output, "w")
for rbp_p in args.rbp_ps:
for examine_top in args.examine_tops:
freq_file = "%s/result_5000_%.2f_%.3f/gene.freq.good.txt" % (args.output, rbp_p, examine_top)
if args.matrix_type=="sim":
freq_file = "%s/result_sim_5000_%.2f_%.3f/gene.freq.good.txt" % (args.output, rbp_p, examine_top)
uniq_freq = 0
f = open(freq_file)
for l in f:
l = l.rstrip("\n")
uniq_freq+=1
f.close()
num_query_sizes = args.query_sizes
if uniq_freq<=num_query_sizes:
num_query_sizes = uniq_freq
for i in range(num_query_sizes):
fw.write("%.2f\n" % rbp_p)
fw.write("%.3f\n" % examine_top)
fw.write("%d\n" % i)
fw.close()
fw = open("%s/args.basic" % args.output, "w")
for rbp_p in args.rbp_ps:
for examine_top in args.examine_tops:
fw.write("%.2f\n" % rbp_p)
fw.write("%.3f\n" % examine_top)
fw.close()
bin_path = os.path.dirname(silhouetteRank.__file__)
for i in range(4):
bin_path = os.path.dirname(bin_path)
bin_path = os.path.join(bin_path, "bin")
logger.info("Start calculating silhouette rank, saving logs to log directory (check progress here)...")
cmd = "cat '%s'/args.basic | '%s'/parallel --jobs %d --max-args=2 \\''%s'\\'''/silhouette_rank_main -x \\''%s'\\''' -c \\''%s'\\''' -r {1} -e {2} -m %s -o \\''%s'\\'''" % (args.output, args.parallel_path, args.num_core, bin_path, args.expr, args.centroid, args.matrix_type, args.output)
os.system(cmd)
logger.info("Start randomization, saving logs to log directory (check progress here)...")
cmd="cat '%s'/args | '%s'/parallel --jobs %d --max-args=3 \\''%s'\\'''/silhouette_rank_random -r {1} -e {2} -m %s -o \\''%s'\\''' -q {3}" % (args.output, args.parallel_path, args.num_core, bin_path, args.matrix_type, args.output)
os.system(cmd)
logger.info("Start computing P-values...")
for rbp_p in args.rbp_ps:
for examine_top in args.examine_tops:
random_dir = "%s/result_sim_5000_%.2f_%.3f" % (args.output, rbp_p, examine_top)
score_file = "%s/silhouette.sim.exact.rbp.%.2f.top.%.3f.txt" % (args.output, rbp_p, examine_top)
output_score_file = "%s/silhouette.sim.exact.rbp.%.2f.top.%.3f.pval.txt" % (args.output, rbp_p, examine_top)
if args.matrix_type=="dissim":
random_dir = "%s/result_5000_%.2f_%.3f" % (args.output, rbp_p, examine_top)
score_file = "%s/silhouette.exact.rbp.%.2f.top.%.3f.txt" % (args.output, rbp_p, examine_top)
output_score_file = "%s/silhouette.exact.rbp.%.2f.top.%.3f.pval.txt" % (args.output, rbp_p, examine_top)
args1 = argparse.Namespace(expr=args.expr, centroid=args.centroid, examine_top=examine_top, input=score_file, input_random=random_dir, output=output_score_file, outdir=args.output, query_sizes=args.query_sizes, overwrite_input_bin=False, verbose=verbose, log_file="master.pvalue.log")
use_previous_cluster.do_one(args1)
combined_file = "%s/silhouette.overall.pval.txt" % args.output
if args.matrix_type=="sim":
combined_file = "%s/silhouette.sim.overall.pval.txt" % args.output
args1 = argparse.Namespace(rbp_ps=args.rbp_ps, examine_tops=args.examine_tops, matrix_type=args.matrix_type, input=args.output, output=combined_file)
combine.do_one(args1)
res = {"gene":[], "chisq":[], "pval":[], "qval":[]}
f = open(combined_file)
for l in f:
l = l.rstrip("\n")
ll = l.split()
res["gene"].append(ll[0])
res["chisq"].append(float(ll[1]))
res["pval"].append(float(ll[2]))
res["qval"].append(float(ll[3]))
f.close()
df = pd.DataFrame(res, columns=["gene", "chisq", "pval", "qval"])
return df
``` |
{
"source": "jiajlin/TrickHLA",
"score": 2
} |
#### File: Modified_data/SpaceFOM/SpaceFOMRefFrameObject.py
```python
import trick
from ..TrickHLA.TrickHLAObjectConfig import *
from ..TrickHLA.TrickHLAAttributeConfig import *
class SpaceFOMRefFrameObject(TrickHLAObjectConfig):
trick_frame_sim_obj_name = None
def __init__( self,
create_frame_object,
frame_instance_name,
frame_S_define_instance,
frame_S_define_instance_name,
frame_lag_comp = None,
frame_ownership = None,
frame_thla_manager_object = None ):
# The Reference Frame FOM name is fixed for the SpaceFOM.
frame_FOM_name = 'ReferenceFrame'
# Copy the frame federation execution instance name.
frame_federation_instance_name = str( frame_instance_name )
# Save the frame name to use for trick_data_name generation.
self.trick_frame_sim_obj_name = str( frame_S_define_instance_name )
# By SpaceFOM rule 5-1 the Reference Frame instance name must exactly
# match the Reference Frame name in the data.
if ( create_frame_object ) :
frame_S_define_instance.set_name( frame_instance_name )
else:
frame_S_define_instance.set_name( '' )
# Call the base class constructor.
TrickHLAObjectConfig.__init__( self,
create_frame_object,
frame_instance_name,
frame_FOM_name,
frame_lag_comp,
frame_ownership,
frame_S_define_instance,
frame_thla_manager_object )
# Build the object attribute list.
self.add_attributes()
return
def initialize( self, thla_object ):
# Call the base class initialization utility function.
TrickHLAObjectConfig.initialize( self, thla_object )
return
def add_attributes( self ):
# Short cut the sim_object name for the frame data.
frame_instance_name = self.trick_frame_sim_obj_name
## Set up the map to the reference frame's name.
trick_data_name = str(frame_instance_name) + '.name'
attribute = TrickHLAAttributeConfig( 'name',
trick_data_name,
self.hla_create,
not self.hla_create,
self.hla_create,
trick.TrickHLA.CONFIG_INITIALIZE + trick.TrickHLA.CONFIG_CYCLIC,
trick.TrickHLA.ENCODING_UNICODE_STRING )
self.add_attribute( attribute )
## Set up the map to the name of the reference frame's parent frame.
trick_data_name = str(frame_instance_name) + '.parent_name'
attribute = TrickHLAAttributeConfig( 'parent_name',
trick_data_name,
self.hla_create,
not self.hla_create,
self.hla_create,
trick.TrickHLA.CONFIG_INITIALIZE + trick.TrickHLA.CONFIG_CYCLIC,
trick.TrickHLA.ENCODING_UNICODE_STRING )
self.add_attribute( attribute )
## Set up the map to the reference frame's space/time coordinate state.
trick_data_name = str(frame_instance_name) + '.stc_encoder.buffer'
attribute = TrickHLAAttributeConfig( 'state',
trick_data_name,
self.hla_create,
not self.hla_create,
self.hla_create,
trick.TrickHLA.CONFIG_INITIALIZE + trick.TrickHLA.CONFIG_CYCLIC,
trick.TrickHLA.ENCODING_OPAQUE_DATA )
self.add_attribute( attribute )
return
```
#### File: Modified_data/TrickHLA/TrickHLAAttributeConfig.py
```python
import trick
class TrickHLAAttributeConfig( object ):
FOM_name = None
trick_name = None
publish = True
subscribe = False
locally_owned = True
config = trick.TrickHLA.CONFIG_CYCLIC
rti_encoding = trick.TrickHLA.ENCODING_UNICODE_STRING
def __init__( self,
FOM_name,
trick_name,
publish = True,
subscribe = True,
locally_owned = True,
config = trick.TrickHLA.CONFIG_CYCLIC,
rti_encoding = trick.TrickHLA.ENCODING_UNICODE_STRING ):
self.FOM_name = FOM_name
self.trick_name = trick_name
self.publish = publish
self.subscribe = subscribe
self.locally_owned = locally_owned
self.config = config
self.rti_encoding = rti_encoding
return
def initialize( self, attribute ):
attribute.FOM_name = self.FOM_name
attribute.trick_name = self.trick_name
attribute.publish = self.publish
attribute.subscribe = self.subscribe
attribute.locally_owned = self.locally_owned
attribute.config = self.config
attribute.rti_encoding = self.rti_encoding
return
``` |
{
"source": "jiajudu/qa2sent",
"score": 2
} |
#### File: jiajudu/qa2sent/qa2sent.py
```python
import json
import os
from pattern import en as patten
from stanza.server import CoreNLPClient
from tqdm import tqdm
# Map to pattern.en aliases
# http://www.clips.ua.ac.be/pages/pattern-en#conjugation
POS_TO_PATTERN = {
'vb': 'inf', # Infinitive
'vbp': '1sg', # non-3rd-person singular present
'vbz': '3sg', # 3rd-person singular present
'vbg': 'part', # gerund or present participle
'vbd': 'p', # past
'vbn': 'ppart', # past participle
}
# Tenses prioritized by likelihood of arising
PATTERN_TENSES = ['inf', '3sg', 'p', 'part', 'ppart', '1sg']
class ConstituencyParse(object):
"""A CoreNLP constituency parse (or a node in a parse tree).
Word-level constituents have |word| and |index| set and no children.
Phrase-level constituents have no |word| or |index| and have at least one child.
"""
def __init__(self, tag, children=None, word=None, index=None):
self.tag = tag
if children:
self.children = children
else:
self.children = None
self.word = word
self.index = index
@classmethod
def _recursive_parse_corenlp(cls, tokens, i, j):
orig_i = i
if tokens[i] == '(':
tag = tokens[i + 1]
children = []
i = i + 2
while True:
child, i, j = cls._recursive_parse_corenlp(tokens, i, j)
if isinstance(child, cls):
children.append(child)
if tokens[i] == ')':
return cls(tag, children), i + 1, j
else:
if tokens[i] != ')':
raise ValueError('Expected ")" following leaf')
return cls(tag, word=child, index=j), i + 1, j + 1
else:
# Only other possibility is it's a word
return tokens[i], i + 1, j
@classmethod
def from_corenlp(cls, s):
"""Parses the "parse" attribute returned by CoreNLP parse annotator."""
# "parse": "(ROOT\n (SBARQ\n (WHNP (WDT What)\n (NP (NN portion)\n (PP (IN of)\n (NP\n (NP (NNS households))\n (PP (IN in)\n (NP (NNP Jacksonville)))))))\n (SQ\n (VP (VBP have)\n (NP (RB only) (CD one) (NN person))))\n (. ? )))",
s_spaced = s.replace('\n', ' ').replace('(', ' ( ').replace(')', ' ) ')
tokens = [t for t in s_spaced.split(' ') if t]
tree, index, num_words = cls._recursive_parse_corenlp(tokens, 0, 0)
if index != len(tokens):
raise ValueError('Only parsed %d of %d tokens' % (index, len(tokens)))
return tree
def is_singleton(self):
if self.word: return True
if len(self.children) > 1: return False
return self.children[0].is_singleton()
def print_tree(self, indent=0):
spaces = ' ' * indent
if self.word:
print(('%s%s: %s (%d)' % (spaces, self.tag, self.word, self.index)).encode('utf-8'))
else:
print('%s%s:' % (spaces, self.tag))
for c in self.children:
c.print_tree(indent=indent + 1)
def get_phrase(self):
if self.word: return self.word
toks = []
for i, c in enumerate(self.children):
p = c.get_phrase()
if i == 0 or p.startswith("'"):
toks.append(p)
else:
toks.append(' ' + p)
return ''.join(toks)
def get_start_index(self):
if self.index is not None: return self.index
return self.children[0].get_start_index()
def get_end_index(self):
if self.index is not None: return self.index + 1
return self.children[-1].get_end_index()
@classmethod
def _recursive_replace_words(cls, tree, new_words, i):
if tree.word:
new_word = new_words[i]
return (cls(tree.tag, word=new_word, index=tree.index), i + 1)
new_children = []
for c in tree.children:
new_child, i = cls._recursive_replace_words(c, new_words, i)
new_children.append(new_child)
return cls(tree.tag, children=new_children), i
@classmethod
def replace_words(cls, tree, new_words):
"""Return a new tree, with new words replacing old ones."""
new_tree, i = cls._recursive_replace_words(tree, new_words, 0)
if i != len(new_words):
raise ValueError('len(new_words) == %d != i == %d' % (len(new_words), i))
return new_tree
def read_const_parse(parse_str):
tree = ConstituencyParse.from_corenlp(parse_str)
new_tree = compress_whnp(tree)
return new_tree
def compress_whnp(tree, inside_whnp=False):
if not tree.children:
return tree
for i, c in enumerate(tree.children):
tree.children[i] = compress_whnp(c, inside_whnp=inside_whnp or tree.tag == 'WHNP')
if tree.tag != 'WHNP':
if inside_whnp:
return ConstituencyParse('NP', children=[tree])
return tree
wh_word = None
new_np_children = []
new_siblings = []
for i, c in enumerate(tree.children):
if i == 0:
if c.tag in ('WHNP', 'WHADJP', 'WHAVP', 'WHPP'):
wh_word = c.children[0]
new_np_children.extend(c.children[1:])
elif c.tag in ('WDT', 'WP', 'WP$', 'WRB'):
wh_word = c
else:
return tree
else:
if c.tag == 'SQ':
new_siblings = tree.children[i:]
break
new_np_children.append(ConstituencyParse('NP', children=[c]))
if new_np_children:
new_np = ConstituencyParse('NP', children=new_np_children)
new_tree = ConstituencyParse('WHNP', children=[wh_word, new_np])
else:
new_tree = tree
if new_siblings:
new_tree = ConstituencyParse('SBARQ', children=[new_tree] + new_siblings)
return new_tree
class ConversionRule(object):
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
raise NotImplementedError
class ConstituencyRule(ConversionRule):
"""A rule for converting question to sentence based on constituency parse."""
def __init__(self, in_pattern, out_pattern, postproc=None):
self.in_pattern = in_pattern
self.out_pattern = str(out_pattern)
self.name = in_pattern
if postproc:
self.postproc = postproc
else:
self.postproc = {}
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
pattern_toks = self.in_pattern.split(' ')
match = match_pattern(self.in_pattern, const_parse)
appended_clause = False
if not match:
appended_clause = True
new_pattern = '$PP , ' + self.in_pattern
pattern_toks = new_pattern.split(' ')
match = match_pattern(new_pattern, const_parse)
if not match:
new_pattern = '$SBAR , ' + self.in_pattern
pattern_toks = new_pattern.split(' ')
match = match_pattern(new_pattern, const_parse)
if not match:
return None
appended_clause_match = None
fmt_args = [a]
for t, m in zip(pattern_toks, match):
if t.startswith('$') or '/' in t:
phrase = convert_whp(m, q, a, tokens)
if not phrase:
phrase = m.get_phrase()
fmt_args.append(phrase)
if appended_clause:
appended_clause_match = fmt_args[1]
fmt_args = [a] + fmt_args[2:]
for i in range(len(fmt_args)):
if i in self.postproc:
fmt_args[i] = run_postprocessing(fmt_args[i], self.postproc[i], fmt_args)
output = self.gen_output(fmt_args)
if appended_clause:
output = appended_clause_match + ', ' + output
if run_fix_style:
output = fix_style(output)
return output
def gen_output(self, fmt_args):
"""By default, use self.out_pattern. Can be overridden."""
return self.out_pattern.format(*fmt_args)
def run_postprocessing(s, rules, all_args):
rule_list = rules.split(',')
for rule in rule_list:
if rule == 'lower':
s = s.lower()
elif rule.startswith('tense-'):
ind = int(rule[6:])
orig_vb = all_args[ind]
tenses = patten.tenses(orig_vb)
for tense in PATTERN_TENSES: # Prioritize by PATTERN_TENSES
if tense in tenses:
break
else: # Default to first tense
tense = PATTERN_TENSES[0]
s = patten.conjugate(s, tense)
elif rule in POS_TO_PATTERN:
s = patten.conjugate(s, POS_TO_PATTERN[rule])
return s
class FindWHPRule(ConversionRule):
"""A rule that looks for $WHP's from right to left and does replacements."""
name = 'FindWHP'
def _recursive_convert(self, node, q, a, tokens, found_whp):
if node.word:
return node.word, found_whp
if not found_whp:
whp_phrase = convert_whp(node, q, a, tokens)
if whp_phrase:
return whp_phrase, True
child_phrases = []
for c in node.children[::-1]:
c_phrase, found_whp = self._recursive_convert(c, q, a, tokens, found_whp)
child_phrases.append(c_phrase)
out_toks = []
for i, p in enumerate(child_phrases[::-1]):
if i == 0 or p.startswith("'"):
out_toks.append(p)
else:
out_toks.append(' ' + p)
return ''.join(out_toks), found_whp
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
out_phrase, found_whp = self._recursive_convert(const_parse, q, a, tokens, False)
if found_whp:
if run_fix_style:
out_phrase = fix_style(out_phrase)
return out_phrase
return None
CONVERSION_RULES = [
ConstituencyRule('$WHP:what $Be $NP called that $VP', '{2} that {3} {1} called {1}'),
ConstituencyRule('how $JJ $Be $NP $IN $NP', '{3} {2} {0} {1} {4} {5}'),
ConstituencyRule('how $JJ $Be $NP $SBAR', '{3} {2} {0} {1} {4}'),
ConstituencyRule('how $JJ $Be $NP', '{3} {2} {0} {1}'),
ConstituencyRule('$WHP:when/where $Do $NP', '{3} occurred in {1}'),
ConstituencyRule('$WHP:when/where $Do $NP $Verb', '{3} {4} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Do $NP $Verb $NP/$PP', '{3} {4} {5} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Do $NP $Verb $NP $PP', '{3} {4} {5} {6} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Be $NP', '{3} {2} in {1}'),
ConstituencyRule('$WHP:when/where $Verb $NP $VP/$ADJP', '{3} {2} {4} in {1}'),
ConstituencyRule("$WHP:what/which/who $Do $NP do", '{3} {1}', {0: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb", '{3} {4} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $IN/$NP", '{3} {4} {5} {1}', {4: 'tense-2', 0: 'vbg'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $PP", '{3} {4} {1} {5}', {4: 'tense-2', 0: 'vbg'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $NP $VP", '{3} {4} {5} {6} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb to $VB", '{3} {4} to {5} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb to $VB $VP", '{3} {4} to {5} {1} {6}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $NP $IN $VP", '{3} {4} {5} {6} {1} {7}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $PP/$S/$VP/$SBAR/$SQ", '{3} {4} {1} {5}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $PP $PP/$S/$VP/$SBAR", '{3} {4} {1} {5} {6}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP of $NP $Verb/$Part $IN", '{3} of {4} {2} {5} {6} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $NP $IN", '{3} {2} {4} {5} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $VP/$IN", '{3} {2} {4} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $IN $NP/$VP", '{1} {2} {3} {4} {5}'),
ConstituencyRule('$WHP:what/which/who $Be/$MD $NP $Verb $PP', '{3} {2} {4} {1} {5}'),
ConstituencyRule('$WHP:what/which/who $Be/$MD $NP/$VP/$PP', '{1} {2} {3}'),
ConstituencyRule("$WHP:how $Be/$MD $NP $VP", '{3} {2} {4} by {1}'),
ConstituencyRule("$WHP:what/which/who $VP", '{1} {2}'),
ConstituencyRule('$IN what/which $NP $Do $NP $Verb $NP', '{5} {6} {7} {1} the {3} of {0}',
{1: 'lower', 6: 'tense-4'}),
ConstituencyRule('$IN what/which $NP $Be $NP $VP/$ADJP', '{5} {4} {6} {1} the {3} of {0}',
{1: 'lower'}),
ConstituencyRule('$IN what/which $NP $Verb $NP/$ADJP $VP', '{5} {4} {6} {1} the {3} of {0}',
{1: 'lower'}),
FindWHPRule(),
]
def fix_style(s):
"""Minor, general style fixes for questions."""
s = s.replace('?', '')
s = s.strip(' .')
if s[0] == s[0].lower():
s = s[0].upper() + s[1:]
return s + '.'
def match_pattern(pattern, const_parse):
pattern_toks = pattern.split(' ')
whole_phrase = const_parse.get_phrase()
if whole_phrase.endswith('?') or whole_phrase.endswith('.'):
pattern_toks.append(whole_phrase[-1])
matches = []
success = _recursive_match_pattern(pattern_toks, [const_parse], matches)
if success:
return matches
else:
return None
def _check_match(node, pattern_tok):
if pattern_tok in CONST_PARSE_MACROS:
pattern_tok = CONST_PARSE_MACROS[pattern_tok]
if ':' in pattern_tok:
lhs, rhs = pattern_tok.split(':')
match_lhs = _check_match(node, lhs)
if not match_lhs:
return False
phrase = node.get_phrase().lower()
retval = any(phrase.startswith(w) for w in rhs.split('/'))
return retval
elif '/' in pattern_tok:
return any(_check_match(node, t) for t in pattern_tok.split('/'))
return ((pattern_tok.startswith('$') and pattern_tok[1:] == node.tag) or
(node.word and pattern_tok.lower() == node.word.lower()))
CONST_PARSE_MACROS = {
'$Noun': '$NP/$NN/$NNS/$NNP/$NNPS',
'$Verb': '$VB/$VBD/$VBP/$VBZ',
'$Part': '$VBN/$VG',
'$Be': 'is/are/was/were',
'$Do': "do/did/does/don't/didn't/doesn't",
'$WHP': '$WHADJP/$WHADVP/$WHNP/$WHPP',
}
def convert_whp(node, q, a, tokens):
if node.tag in ('WHNP', 'WHADJP', 'WHADVP', 'WHPP'):
cur_phrase = node.get_phrase()
cur_tokens = tokens[node.get_start_index():node.get_end_index()]
for r in WHP_RULES:
phrase = r.convert(cur_phrase, a, cur_tokens, node, run_fix_style=False)
if phrase:
return phrase
return None
def _recursive_match_pattern(pattern_toks, stack, matches):
"""Recursively try to match a pattern, greedily."""
if len(matches) == len(pattern_toks):
return len(stack) == 0
if len(stack) == 0:
return False
cur_tok = pattern_toks[len(matches)]
node = stack.pop()
is_match = _check_match(node, cur_tok)
if is_match:
cur_num_matches = len(matches)
matches.append(node)
new_stack = list(stack)
success = _recursive_match_pattern(pattern_toks, new_stack, matches)
if success:
return True
while len(matches) > cur_num_matches:
matches.pop()
if not node.children:
return False
stack.extend(node.children[::-1])
return _recursive_match_pattern(pattern_toks, stack, matches)
class ReplaceRule(ConversionRule):
"""A simple rule that replaces some tokens with the answer."""
def __init__(self, target, replacement='{}', start=False):
self.target = target
self.replacement = str(replacement)
self.name = 'replace(%s)' % target
self.start = start
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
t_toks = self.target.split(' ')
q_toks = q.rstrip('?.').split(' ')
replacement_text = self.replacement.format(a)
for i in range(len(q_toks)):
if self.start and i != 0:
continue
if ' '.join(q_toks[i:i + len(t_toks)]).rstrip(',').lower() == self.target:
begin = q_toks[:i]
end = q_toks[i + len(t_toks):]
output = ' '.join(begin + [replacement_text] + end)
if run_fix_style:
output = fix_style(output)
return output
return None
class AnswerRule(ConversionRule):
"""Just return the answer."""
name = 'AnswerRule'
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
return a
WHP_RULES = [
ConstituencyRule('$IN what/which type/sort/kind/group of $NP/$Noun', '{1} {0} {4}'),
ConstituencyRule('$IN what/which type/sort/kind/group of $NP/$Noun $PP', '{1} {0} {4} {5}'),
ConstituencyRule('$IN what/which $NP', '{1} the {3} of {0}'),
ConstituencyRule('$IN $WP/$WDT', '{1} {0}'),
ConstituencyRule('what/which type/sort/kind/group of $NP/$Noun', '{0} {3}'),
ConstituencyRule('what/which type/sort/kind/group of $NP/$Noun $PP', '{0} {3} {4}'),
ConstituencyRule('what/which $NP', 'the {2} of {0}'),
ConstituencyRule('how many/much $NP', '{0} {2}'),
ReplaceRule('what'),
ReplaceRule('who'),
ReplaceRule('how many'),
ReplaceRule('how much'),
ReplaceRule('which'),
ReplaceRule('where'),
ReplaceRule('when'),
ReplaceRule('why'),
ReplaceRule('how'),
AnswerRule(),
]
def run_conversion(qas, corenlp_home):
os.environ['CORENLP_HOME'] = 'stanford-corenlp-full-2018-10-05'
ret = list()
with CoreNLPClient(annotators=['tokenize','ssplit','pos','lemma','ner', 'parse'], timeout=30000, memory='16G', properties={'ssplit.eolonly': True, 'ssplit.newlineIsSentenceBreak': 'always', 'outputFormat':'json'}, endpoint='http://localhost:9001') as client:
for question, answer in tqdm(qas):
parse = client.annotate(question)['sentences'][0]
tokens = parse['tokens']
const_parse = read_const_parse(parse['parse'])
for rule in CONVERSION_RULES:
sent = rule.convert(question, answer, tokens, const_parse)
if sent:
ret.append([question, answer, sent])
break
else:
ret.append([question, answer, None])
return ret
def main():
qas = [["What is the current series where the new series began in June 2011?", "CB\u00b706\u00b7ZZ"], ["What is the format for South Australia?", "Snnn\u00b7aaa"]]
sents = run_conversion(qas, 'stanford-corenlp-full-2018-10-05')
print(sents)
if __name__ == '__main__':
main()
``` |
{
"source": "JiajunBao/neural-dimension-reduction",
"score": 2
} |
#### File: neural-dimension-reduction/examples/train.py
```python
from src.models.DenseNetwork.models import Net, Solver
from pathlib import Path
def train():
args = Solver.get_solver_arguments()
dim_in = 200
dim_out = 20
hidden_dims_list = [int(x) for x in args.hidden_dims_list.split('-')]
model = Net.from_scratch(dim_in, hidden_dims_list, dim_out, args.add_shortcut)
solver = Solver.from_scratch(model,
input_dir=args.input_dir,
output_dir=args.output_dir,
learning_rate=args.learning_rate,
n_epoch=args.n_epoch,
per_gpu_batch_size=args.per_gpu_batch_size,
weight_decay=args.weight_decay,
seed=args.seed,
top_k=args.top_k)
solver.fit(num_eval_per_epoch=args.num_eval_per_epoch)
if __name__ == '__main__':
train()
```
#### File: src/data/make_sample.py
```python
from argparse import ArgumentParser
import logging
from pathlib import Path
from src.data.utils import import_raw_data, export_processed_data
def downsample_data(df, num_rows, seed):
sampled_train_df = df.sample(n=num_rows, random_state=seed)
sampled_test_df = df.sample(n=num_rows // 3, random_state=(seed + 10))
return sampled_train_df, sampled_test_df
def main():
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
# get arguments
parser = ArgumentParser(description='Arguments for dataset processing')
parser.add_argument('--input_path', type=Path, required=True, default=None,
help='the input path to the input data')
parser.add_argument('--output_dir', type=Path, required=True, default=None,
help='the output directory to save sampled data')
parser.add_argument('--num_rows', type=int, required=True, default=1000,
help='the number of rows to sample')
parser.add_argument('--seed', type=int, default=42,
help='the random seed of the whole process')
args = parser.parse_args()
logger.info(f'reading data from {args.input_path}')
df = import_raw_data(args.input_path)
sampled_train_df, sampled_test_df = downsample_data(df, args.num_rows, args.seed)
export_processed_data(sampled_train_df, args.output_dir / 'sample' / 'train.csv')
export_processed_data(sampled_test_df, args.output_dir / 'sample' / 'dev.csv')
logger.info(f'saved data at {args.output_dir / "sample"}')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
```
#### File: models/DenseNetwork/models.py
```python
import argparse
import os
import random
from collections import OrderedDict
from pathlib import Path
import numpy
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from runx.logx import logx
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from src.models.DenseNetwork.loss import kl_div_add_mse_loss, input_inverse_similarity, output_inverse_similarity, \
nearest_neighbors
# TOP_K = 20
class VecDataSet(Dataset):
def __init__(self, x, top_k):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# device = 'cpu'
self.anchor_idx, self.q, self.ground_min_dist_square, self.topk_dists = \
self.precomputing(x, top_k=top_k, device=device)
self.top_k = top_k
self.x = x.cpu()
@classmethod
def from_df(cls, path_to_dataframe, top_k):
print(f'Generate dataset top_k = {top_k}')
x = torch.from_numpy(pd.read_csv(path_to_dataframe).to_numpy()).to(torch.float32)
return cls(x, top_k)
@classmethod
def from_dataset(cls, path_to_tensor):
return torch.load(path_to_tensor)
@staticmethod
def precomputing(x, top_k, device):
"""
compute ground true nearest neighbors
:param x:
:param top_k: top-k neighbors that are considered
:param device: device used during computation
:return: anchor_idx: each point has m points as anchors (in the case, we pick m near neighbors of x as anchors)
q: input_similarity
"""
ground_min_dist_square, anchor_idx, topk_dists = nearest_neighbors(x, top_k, device)
q = input_inverse_similarity(x.to(device),
anchor_idx=anchor_idx, # (n, n - 1)
min_dist_square=ground_min_dist_square.to(device)).cpu()
return anchor_idx, q, ground_min_dist_square, topk_dists
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
return self.x[idx]
class Net(nn.Module):
def __init__(self, hidden_layers: nn.ModuleList, model_construct_dict: dict,
shortcut_layers: nn.ModuleList, block_size: int):
super(Net, self).__init__()
self.hidden_layers = hidden_layers
self.model_construct_dict = model_construct_dict
self.shortcut_layers = shortcut_layers
self.block_size = block_size
@classmethod
def from_scratch(cls, dim_in, hidden_dims_list, dim_out, add_shortcut: bool):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
in_dims = [dim_in] + hidden_dims_list
out_dims = hidden_dims_list + [dim_out]
hidden_layers = nn.ModuleList(
[nn.Linear(in_features=i, out_features=o) for i, o in zip(in_dims, out_dims)])
model_construct_dict = {
'dim_in': dim_in,
'hidden_dims_list': hidden_dims_list,
'dim_out': dim_out,
}
shortcut_layers = None
block_size = 4
if add_shortcut:
tmp = list()
for i in range(len(in_dims) // block_size):
tmp.append(nn.Linear(in_features=in_dims[i * block_size], out_features=out_dims[(i + 1) * block_size - 1]))
if len(tmp) > 0:
shortcut_layers = nn.ModuleList(tmp)
return cls(hidden_layers, model_construct_dict, shortcut_layers, block_size)
@classmethod
def from_pretrained(cls, path_to_checkpoints):
checkpoints = torch.load(path_to_checkpoints)
model = cls(**checkpoints['model_construct_dict'])
model.load_state_dict(checkpoints['model_state_dict'])
model.eval()
return model
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
out = x
if self.shortcut_layers is None:
for layer in self.hidden_layers:
out = F.relu(layer.forward(out))
else:
block_out = x
for block_idx in range(len(self.hidden_layers) // self.block_size):
for layer_idx in range(block_idx * self.block_size, (block_idx + 1) * self.block_size):
out = self.hidden_layers[layer_idx].forward(out)
out = out + self.shortcut_layers[block_idx].forward(block_out)
block_out = out
return out
class Solver(object):
def __init__(self, input_dir, output_dir, model, device, per_gpu_batch_size, n_gpu, batch_size, learning_rate,
weight_decay, n_epoch, seed, top_k, **kwargs):
# construct param dict
self.construct_param_dict = OrderedDict({
"input_dir": str(input_dir),
"output_dir": str(output_dir),
"learning_rate": learning_rate,
"n_epoch": n_epoch,
"per_gpu_batch_size": per_gpu_batch_size,
"weight_decay": weight_decay,
"seed": seed,
"top_k": top_k,
})
# build log
logx.initialize(logdir=output_dir,
coolname=True,
tensorboard=True,
no_timestamp=False,
hparams={"solver_construct_dict": self.construct_param_dict,
"model_construct_dict": model.model_construct_dict},
eager_flush=True)
# arguments
self.record_training_loss_per_epoch = kwargs.pop("record_training_loss_per_epoch", False)
self.input_dir = input_dir
self.output_dir = output_dir
self.top_k = top_k
# training utilities
self.model = model
# data utilities
self.train_dataloader = kwargs.pop("train_dataloader", None)
self.dev_dataloader = kwargs.pop("dev_dataloader", None)
self.batch_size = batch_size
self.n_epoch = n_epoch
self.seed = seed
# device
self.device = device
self.n_gpu = n_gpu
logx.msg(f'Number of GPU: {self.n_gpu}.')
self.criterion = kl_div_add_mse_loss
# optimizer and scheduler
if self.train_dataloader:
self.optimizer, self.scheduler = self.get_optimizer(named_parameters=self.model.named_parameters(),
learning_rate=learning_rate,
weight_decay=weight_decay,
train_dataloader=self.train_dataloader,
n_epoch=n_epoch)
# set up random seeds and model location
self.setup()
@classmethod
def from_scratch(cls, model, input_dir, output_dir, learning_rate, n_epoch,
per_gpu_batch_size, weight_decay, seed, top_k):
# check the validity of the directory
if os.path.exists(output_dir) and os.listdir(output_dir):
raise ValueError(f"Output directory ({output_dir}) already exists "
"and is not empty")
output_dir.mkdir(parents=True, exist_ok=True)
# data utilities
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
n_gpu = torch.cuda.device_count()
batch_size = per_gpu_batch_size * max(1, n_gpu)
# dataloader
train_dataloader = cls.get_train_dataloader(input_dir, batch_size, top_k)
dev_dataloader = cls.get_dev_dataloader(input_dir, batch_size, top_k)
return cls(input_dir, output_dir, model, device, per_gpu_batch_size, n_gpu, batch_size, learning_rate,
weight_decay, n_epoch, seed, top_k, train_dataloader=train_dataloader, dev_dataloader=dev_dataloader)
@classmethod
def from_pretrained(cls, model_constructor, pretrained_system_name_or_path, resume_training=False,
input_dir=None, output_dir=None, top_k=None, **kwargs):
# load checkpoints
checkpoint = torch.load(pretrained_system_name_or_path)
meta = {k: v for k, v in checkpoint.items() if k != 'state_dict'}
# load model
model = model_constructor.from_pretrained(pretrained_system_name_or_path) #
# load arguments
solver_args = meta["solver_construct_params_dict"]
solver_args["model"] = model
# update some parameters
solver_args["device"] = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
solver_args["n_gpu"] = torch.cuda.device_count()
old_batch_size = solver_args["per_gpu_batch_size"] * max(1, solver_args["n_gpu"])
solver_args["batch_size"] = kwargs.pop("batch_size", old_batch_size)
# load dataset
if resume_training:
if input_dir is None or output_dir is None or top_k is None:
raise AssertionError("Either input_dir and output_dir (for resuming) is None!")
solver_args["input_dir"] = input_dir
solver_args["output_dir"] = output_dir
solver_args["train_dataloader"] = cls.get_train_dataloader(input_dir, solver_args["batch_size"], top_k)
solver_args["dev_dataloader"] = cls.get_dev_dataloader(input_dir, solver_args["batch_size"], top_k)
solver_args["top_k"] = top_k
return cls(**solver_args)
def fit(self, num_eval_per_epoch=5):
steps_per_eval = len(self.train_dataloader) // num_eval_per_epoch
steps_per_eval = steps_per_eval if steps_per_eval > 0 else 1
self.train(steps_per_eval)
# test_dataloader = self.get_test_dataloader(self.input_dir, self.batch_size)
# mean_loss, metrics_scores = self.validate(test_dataloader)
# logx.msg("Scores on test set: ")
# logx.msg(str(metrics_scores))
def setup(self):
# put onto cuda
self.model = self.model.to(self.device)
if self.n_gpu > 1:
self.model = torch.nn.DataParallel(self.model)
# fix random seed
self.fix_random_seed()
def fix_random_seed(self):
# Set seed
random.seed(self.seed)
numpy.random.seed(self.seed)
torch.manual_seed(self.seed)
if self.n_gpu > 0:
torch.cuda.manual_seed_all(self.seed)
def train(self, steps_per_eval):
# TensorBoard
for epoch_idx in tqdm(range(self.n_epoch)):
self.__train_per_epoch(epoch_idx, steps_per_eval)
def validate(self, dataloader):
outputs = self.__forward_batch_plus(dataloader)
metrics_scores, p = self.get_scores(q=dataloader.dataset.q,
output_embeddings=outputs,
anchor_idx=dataloader.dataset.anchor_idx,
ground_min_dist_square=dataloader.dataset.ground_min_dist_square)
return outputs, metrics_scores, p
def __train_per_epoch(self, epoch_idx, steps_per_eval):
# with tqdm(total=len(self.train_dataloader), desc=f"Epoch {epoch_idx}") as pbar:
for batch_idx, batch in enumerate(self.train_dataloader):
# assume that the whole input matrix fits the GPU memory
global_step = epoch_idx * len(self.train_dataloader) + batch_idx
training_set_loss, training_set_outputs, training_set_p = self.__training_step(batch)
if batch_idx + 1 == len(self.train_dataloader):
# validate and save checkpoints
developing_set_outputs, developing_set_metrics_scores, developing_set_p = \
self.validate(self.dev_dataloader)
# TODO: this part can be optimized to batchwise computing
if self.record_training_loss_per_epoch:
training_set_metrics_scores, training_set_p = \
self.get_scores(q=self.train_dataloader.dataset.q,
output_embeddings=training_set_outputs,
anchor_idx=self.train_dataloader.dataset.anchor_idx,
ground_min_dist_square=self.train_dataloader.dataset.ground_min_dist_square)
training_set_metrics_scores['train_p'] = training_set_p.cpu(),
else:
training_set_metrics_scores = dict()
training_set_metrics_scores['tr_loss'] = training_set_loss.item()
if self.scheduler:
training_set_metrics_scores['learning_rate'] = self.scheduler.get_last_lr()[0]
logx.metric('train', training_set_metrics_scores, global_step)
logx.metric('val', developing_set_metrics_scores, global_step)
if self.n_gpu > 1:
save_dict = {"model_construct_dict": self.model.model_construct_dict,
"model_state_dict": self.model.module.state_dict(),
"solver_construct_params_dict": self.construct_param_dict,
"optimizer": self.optimizer.state_dict(),
"train_metrics_scores": training_set_metrics_scores,
"train_output_embeddings": training_set_outputs.cpu(),
"train_q": self.train_dataloader.dataset.q.cpu(),
"train_anchor_idx": self.train_dataloader.dataset.anchor_idx.cpu(),
"dev_metrics_scores": developing_set_metrics_scores,
"dev_output_embeddings": developing_set_outputs.cpu(),
"dev_q": self.dev_dataloader.dataset.q.cpu(),
"dev_p": developing_set_p.cpu(),
"dev_anchor_idx": self.dev_dataloader.dataset.anchor_idx.cpu()}
else:
save_dict = {"model_construct_dict": self.model.model_construct_dict,
"model_state_dict": self.model.state_dict(),
"solver_construct_params_dict": self.construct_param_dict,
"optimizer": self.optimizer.state_dict(),
"train_metrics_scores": training_set_metrics_scores,
"train_output_embeddings": training_set_outputs.cpu(),
"train_q": self.train_dataloader.dataset.q.cpu(),
"train_anchor_idx": self.train_dataloader.dataset.anchor_idx.cpu(),
"dev_metrics_scores": developing_set_metrics_scores,
"dev_output_embeddings": developing_set_outputs.cpu(),
"dev_q": self.dev_dataloader.dataset.q.cpu(),
"dev_p": developing_set_p.cpu(),
"dev_anchor_idx": self.dev_dataloader.dataset.anchor_idx.cpu()}
logx.save_model(save_dict,
metric=developing_set_metrics_scores['Recall@1'],
epoch=global_step,
higher_better=True)
# pbar.update(1)
def batch_to_device(self, batch):
return batch.to(self.device)
def __training_step(self, batch):
"""
a single forwarding step for training
:param self: a solver
:param batch: a batch of input for model
:return: training loss for this batch
"""
self.model.zero_grad() # reset gradient
self.model.train()
outputs = self.__forwarding_step(batch)
# p = input_inverse_similarity(x=outputs.to(self.device),
# anchor_idx=self.train_dataloader.dataset.anchor_idx.to(self.device),
# min_dist_square=self.train_dataloader.dataset.ground_min_dist_square.to(self.device),
# approximate_min_dist=False).cpu()
p = output_inverse_similarity(y=outputs.to(self.device),
anchor_idx=self.train_dataloader.dataset.anchor_idx.to(self.device)).cpu()
loss = self.criterion(p.to(self.device),
self.train_dataloader.dataset.q.to(self.device), lam=1)
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
loss.backward()
# pbar.set_postfix_str(f"tr_loss: {loss.item():.5f}")
# update weights
self.optimizer.step()
# self.scheduler.step() # Update learning rate schedule
return loss.cpu().detach(), outputs.cpu().detach(), p.cpu().detach()
def __forwarding_step(self, batch):
"""
a single forwarding pass
e.g.
meta_features, input_ids, input_mask, segment_ids, labels = batch
batch_input = {'meta_features': meta_features.to(self.device),
'input_ids': input_ids.to(self.device),
'attention_mask': input_mask.to(self.device),
'token_type_ids': segment_ids.to(self.device),
'labels': labels}
logits = self.model(**batch_input)
return logits, labels
:param self: a solver
:param batch: a batch of input for model
:return: logits and ground true label for this batch
"""
batch_inputs = self.batch_to_device(batch)
outputs = self.model(batch_inputs)
return outputs.cpu()
@staticmethod
def static_get_scores(q, output_embeddings, anchor_idx, device, criterion, top_k, ground_min_dist_square):
"""
:param q: torch.tensor (n, ) input similarity
:param output_embeddings: torch.tensor (n, d2) output embeddings from the network
:param anchor_idx: (n, m) each point has m points as anchors
:param device: device for computation
:param criterion: evaluation criterion
:param top_k: the top-k considered
:return:
"""
scores = dict()
# calculate loss
# p = input_inverse_similarity(x=output_embeddings.to(device),
# anchor_idx=anchor_idx,
# min_dist_square=ground_min_dist_square.to(device),
# approximate_min_dist=False).cpu()
p = output_inverse_similarity(y=output_embeddings.to(device),
anchor_idx=anchor_idx).cpu()
scores['loss'] = criterion(p.to(device), q.to(device), lam=1).cpu().detach().item()
# recalls
_, topk_neighbors, _ = nearest_neighbors(x=output_embeddings, top_k=max(20, top_k), device=device)
ground_nn = anchor_idx[:, 0].unsqueeze(dim=1)
for r in [1, 5, 10, 20]: # r should < 20
top_predictions = topk_neighbors[:, :r] # (n, r)
scores[f'Recall@{r}'] = \
torch.sum(top_predictions == ground_nn, dtype=torch.float).item() / ground_nn.shape[0]
return scores, p
def get_scores(self, q, output_embeddings, anchor_idx, ground_min_dist_square=None):
"""
:param q: torch.tensor (n, ) input similarity
:param output_embeddings: torch.tensor (n, d2) output embeddings from the network
:param anchor_idx: (n, m) each point has m points as anchors
:return:
"""
return self.static_get_scores(q, output_embeddings, anchor_idx, self.device, self.criterion,
self.top_k, ground_min_dist_square)
def __forward_batch_plus(self, dataloader, verbose=False):
preds_list = list()
if verbose:
with tqdm(total=len(dataloader), desc=f"Evaluating: ") as pbar:
with torch.no_grad():
for batch_idx, batch in enumerate(dataloader):
outputs = self.__forwarding_step(batch)
preds_list.append(outputs)
pbar.update(1)
else:
with torch.no_grad():
for batch_idx, batch in enumerate(dataloader):
outputs = self.__forwarding_step(batch)
preds_list.append(outputs)
# collect the whole chunk
reduced_embeddings = torch.cat(preds_list, dim=0)
return reduced_embeddings
@classmethod
def get_train_dataloader(cls, input_dir, batch_size, top_k):
return cls.__set_dataset(input_dir, 'train', batch_size, top_k)
@classmethod
def get_dev_dataloader(cls, input_dir, batch_size, top_k):
return cls.__set_dataset(input_dir, 'dev', batch_size, top_k)
@classmethod
def get_test_dataloader(cls, input_dir, batch_size, top_k):
return cls.__set_dataset(input_dir, 'test', batch_size, top_k)
@classmethod
def __set_dataset(cls, input_dir, split_name, batch_size, top_k):
encoded_data_path = input_dir / f'{split_name}.pth.tar'
if encoded_data_path.is_file():
dataset = torch.load(encoded_data_path)
print(f'load dataset from {encoded_data_path}')
if dataset.top_k >= top_k and dataset.top_k >= 20:
return DataLoader(dataset, shuffle=False, batch_size=batch_size, pin_memory=True)
else:
print(f'inconsistent top_k: {dataset.top_k} vs {top_k}')
dataset = VecDataSet.from_df(input_dir / f'{split_name}.csv', max(top_k, 20))
torch.save(dataset, encoded_data_path)
print(f'construct dataset from dataframe and save dataset at ({encoded_data_path})')
return DataLoader(dataset, shuffle=False, batch_size=batch_size, pin_memory=True)
# def infer(self, data_path):
# data_path = Path(data_path)
# dataset = cls.__set_dataset(data_, 'test', batch_size)
# dataloader = DataLoader(dataset, shuffle=False, batch_size=self.batch_size)
# preds, golds = self.__forward_batch_plus(dataloader, verbose=True)
# return preds, golds
@staticmethod
def get_optimizer(named_parameters, learning_rate, weight_decay, train_dataloader, n_epoch):
"""
get the optimizer and the learning rate scheduler
:param named_parameters:
:param learning_rate:
:param weight_decay:
:param train_dataloader:
:param n_epoch:
:return:
"""
# Prepare optimizer and schedule (linear warm-up and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in named_parameters if not any(
nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': weight_decay},
{'params': [p for n, p in named_parameters if any(
nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0}
]
optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=learning_rate, weight_decay=weight_decay)
'''
# get a linear scheduler
num_steps_epoch = len(train_dataloader)
ReduceLROnPlateau(self.optimizer, 'min')
num_train_optimization_steps = int(num_steps_epoch * n_epoch) + 1
warmup_steps = 100
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=num_train_optimization_steps)
'''
return optimizer, None
@staticmethod
def get_solver_arguments():
parser = argparse.ArgumentParser(description='Arguments for Eigenmetric Regression')
# model parameters
# solver parameters
parser.add_argument('--input_dir', type=Path, default=None,
help='the input directory to the input data')
parser.add_argument('--output_dir', type=Path, default=None,
help='the output directory for saving the regressor')
parser.add_argument('--learning_rate', type=float, default=1e-5,
help='learning rate for training')
parser.add_argument('--n_epoch', type=int, default=3,
help='the number of epochs for training')
parser.add_argument('--num_eval_per_epoch', type=int, default=5,
help='number of evaluation per epoch')
parser.add_argument('--per_gpu_batch_size', type=int, default=32,
help='the batch size per gpu')
parser.add_argument('--weight_decay', type=float, default=1e-6,
help='weight_decay for the optimizer (l2 regularization)')
parser.add_argument('--seed', type=int, default=42,
help='the random seed of the whole process')
parser.add_argument('--top_k', type=int, default=20,
help='the top-k nearest neighbors that are considered.')
parser.add_argument('--hidden_dims_list', type=str,
help='list of hidden dimensions')
parser.add_argument('--add_shortcut', type=bool, default=False,
help='whether to add shortcut connections')
args = parser.parse_args()
return args
```
#### File: src/models/distance_modeling.py
```python
from collections import OrderedDict
import torch
from torch import nn
from torch.nn import functional as F
import pandas as pd
from torch.utils.data import Dataset
from tqdm.auto import tqdm
STABLE_FACTOR = 1e-8
def far_func(sorted_dist: torch.tensor, indices: torch.tensor):
return sorted_dist[:, -1].view(-1, 1), indices[:, -1].view(-1, 1)
def close_func(sorted_dist: torch.tensor, indices: torch.tensor):
return sorted_dist[:, 1].view(-1, 1), indices[:, 1].view(-1, 1)
def calculate_distance(x, close_fn, far_fn):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
batch_size = 512
x_device = x.to(device)
if x.shape[0] * x.shape[1] < batch_size * 200: # direct computes the whole matrix
# TODO: we first assume memory fits in memory. Later, we can process the data in batches.
dist = torch.cdist(x1=x_device, x2=x_device, p=2) # (n, n)
sorted_dist, indices = torch.sort(dist, dim=1, descending=False)
sorted_dist, indices = sorted_dist.cpu(), indices.cpu()
anchor_idx = torch.arange(x.shape[0]) # (n,)
# the 0-th column is the distance to oneself
close_distance, close_idx = close_fn(sorted_dist, indices) # (n, r)
far_distance, far_idx = far_fn(sorted_dist, indices) # (n, r)
else:
num_iter = x.shape[0] // batch_size + 1
anchor_idx_list, close_idx_list, far_idx_list = list(), list(), list()
close_distance_list, far_distance_list = list(), list()
for i in tqdm(torch.arange(num_iter), desc='create triplets'):
batch_x = x[i * batch_size: (i + 1) * batch_size, :].to(device)
dist = torch.cdist(x1=batch_x, x2=x_device, p=2) # (n, n)
sorted_dist, indices = torch.sort(dist, dim=1, descending=False)
sorted_dist, indices = sorted_dist, indices
anchor_idx = torch.arange(i * batch_size, i * batch_size + batch_x.shape[0]) # (n,)
# assert torch.equal(anchor_idx, indices[:, 0].cpu())
# the 0-th column is the distance to oneself
close_distance, close_idx = close_fn(sorted_dist, indices) # (n,)
far_distance, far_idx = far_fn(sorted_dist, indices) # (n, r)
anchor_idx_list.append(anchor_idx.cpu())
close_idx_list.append(close_idx.cpu())
far_idx_list.append(far_idx.cpu())
close_distance_list.append(close_distance.cpu())
far_distance_list.append(far_distance.cpu())
anchor_idx = torch.cat(anchor_idx_list, dim=0)
close_idx = torch.cat(close_idx_list, dim=0)
far_idx = torch.cat(far_idx_list, dim=0)
close_distance = torch.cat(close_distance_list, dim=0)
far_distance = torch.cat(far_distance_list, dim=0)
return anchor_idx, close_idx, far_idx, close_distance, far_distance
def make_pairs(x, close_fn, far_fn):
anchor_idx, close_idx, far_idx, close_distance, far_distance = calculate_distance(x, close_fn, far_fn)
n, r = close_idx.shape
anchor_idx_flatten = anchor_idx.reshape(-1, 1).expand(-1, r).reshape(-1, 1) # (n * r, )
close_idx = close_idx.reshape(-1, 1) # (n * r, 1)
positive_pairs = torch.cat((anchor_idx_flatten, close_idx), dim=1) # (n, 2)
positive_labels = torch.ones(n * r, dtype=torch.int64) # (n, )
n, r = far_idx.shape
far_idx = far_idx.reshape(-1, 1) # (n * r, )
anchor_idx_flatten = anchor_idx.reshape(-1, 1).expand(-1, r).reshape(-1, 1) # (n * r, )
negative_pairs = torch.cat((anchor_idx_flatten, far_idx), dim=1) # (n * r, 2)
negative_labels = torch.zeros(n * r, dtype=torch.int64) # (n * r, )
pairs = torch.cat((positive_pairs, negative_pairs), dim=0)
labels = torch.cat((positive_labels, negative_labels), dim=0)
return pairs, labels, close_distance, far_distance
class SurveyorDataSet(Dataset):
def __init__(self, data, pairs, labels, q):
self.data = data
self.pairs = pairs
self.labels = labels
self.q = q
@classmethod
def from_df(cls, path_to_dataframe, close_fn=close_func, far_fn=far_func):
data = torch.from_numpy(pd.read_csv(path_to_dataframe, header=None).to_numpy()).to(torch.float32)
pairs, labels, close_distance, far_distance = make_pairs(data, close_fn, far_fn)
if close_distance[pairs[:, 0]].shape[1] == 1:
q = thesis_input_inverse_similarity(data[pairs[:, 0]],
data[pairs[:, 1]],
close_distance[pairs[:, 0]].reshape(-1),
close_distance[pairs[:, 1]].reshape(-1))
else:
q = thesis_input_inverse_similarity(data[pairs[:, 0]],
data[pairs[:, 1]],
close_distance[pairs[:, 0], 0].reshape(-1),
close_distance[pairs[:, 1], 0].reshape(-1))
return cls(data, pairs, labels, q)
@classmethod
def from_dataset(cls, path_to_tensor):
return torch.load(path_to_tensor)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
indexs = self.pairs[idx]
left = self.data[indexs[0]]
right = self.data[indexs[1]]
return left, right, self.labels[idx], self.q[idx]
class Surveyor(nn.Module):
def __init__(self, dim_in=200, dim_out=20):
super(Surveyor, self).__init__()
self.encoder = nn.Sequential(
OrderedDict([
('bn0', nn.BatchNorm1d(dim_in)),
('relu0', nn.ReLU(inplace=True)),
('fc0', nn.Linear(dim_in, 500)),
('bn1', nn.BatchNorm1d(500)),
('relu1', nn.ReLU(inplace=True)),
('fc1', nn.Linear(500, 100)),
('bn2', nn.BatchNorm1d(100)),
('relu2', nn.ReLU(inplace=True)),
('fc2', nn.Linear(100, 20)),
('bn3', nn.BatchNorm1d(20)),
('relu3', nn.ReLU(inplace=True)),
('fc3', nn.Linear(20, 20)),
('bn4', nn.BatchNorm1d(20)),
('relu4', nn.ReLU(inplace=True)),
('fc4', nn.Linear(20, 20)),
('bn5', nn.BatchNorm1d(20)),
('relu5', nn.ReLU(inplace=True)),
('fc5', nn.Linear(20, dim_out)),
])
)
self.decoder = nn.Sequential(
OrderedDict([
('bn1', nn.BatchNorm1d(2 * dim_out)),
('relu1', nn.ReLU()),
('fc1', nn.Linear(2 * 20, 20)),
('bn2', nn.BatchNorm1d(20)),
('relu2', nn.ReLU()),
('fc2', nn.Linear(20, 2)),
]))
def encode_batch(self, x):
return self.encoder(x)
def decode_batch(self, out1, out2):
p = thesis_output_inverse_similarity(out1, out2)
x = torch.cat((out1, out2), dim=1)
out = self.decoder(x)
logits = F.softmax(out, dim=1)
return logits, p
def forward(self, x1, x2, q, labels=None, lam=1):
out1 = self.encode_batch(x1)
out2 = self.encode_batch(x2)
logits, p = self.decode_batch(out1, out2)
if labels is not None:
loss_fn = nn.CrossEntropyLoss()
loss = loss_fn(logits, labels) + lam * thesis_kl_div_add_mse_loss(p, q)
return logits, p, out1, out2, loss
return logits, p, out1, out2
def thesis_output_inverse_similarity(y1, y2):
dout = torch.sum((y1 - y2) ** 2, dim=1)
return 1 / (dout + 1)
def thesis_input_inverse_similarity(x1, x2, x1_min_dist, x2_min_dist):
din = torch.sum((x1 - x2) ** 2, dim=1)
q1 = 1 / ((din / (x1_min_dist ** 2)) + STABLE_FACTOR)
q2 = 1 / ((din / (x2_min_dist ** 2)) + STABLE_FACTOR)
return (q1 + q2) / 2
def thesis_kl_div_add_mse_loss(p, q, lam=1):
"""
calculate the sum of kl divergence and mse loss
:param p: p in the formula (P20-2) output similarities
:param q: q in the formula (P20-2) input similarities
:param lam: the constant that balances the influence of two losses
:return: torch.tensor of the shape (,)
"""
return torch.sum(p * torch.log(p / q)) + lam * torch.sum((p - q) ** 2)
class RetrieveSystem(object):
def __init__(self, distance_measure):
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
distance_measure = distance_measure.to(self.device)
self.distance_measure = distance_measure
def retrieve_query(self, query, ignore_idx, x_embedded, x_idx, topk=20):
query_device = query.reshape(1, -1).to(self.device)
cls_distances = list()
p_distances = list()
with torch.no_grad():
for i, x in zip(x_idx, x_embedded):
if ignore_idx is not None and i == ignore_idx:
continue
x_device = x.reshape(1, -1).to(self.device)
logits, p = self.distance_measure.decode_batch(query_device, x_device)
cls_distances.append(logits[:, 1].item())
p_distances.append(p.item())
cls_distances = torch.tensor(cls_distances)
p_distances = torch.tensor(p_distances)
_, cls_nn_idx = cls_distances.sort(descending=True)
_, p_nn_idx = p_distances.sort(descending=True)
return cls_nn_idx[:topk], p_nn_idx[:topk]
def retrieve_corpus(self, corpus, block_list, database):
cls_pred_nn_top, p_distances_nn_top = list(), list()
x_idx = range(database.shape[0])
for ignore_idx, query in tqdm(zip(block_list, corpus), total=len(block_list), desc='retrieve each query'):
cls_distances, p_distances = self.retrieve_query(query, ignore_idx, database, x_idx, 20)
cls_pred_nn_top.append(cls_distances.reshape(1, -1))
p_distances_nn_top.append(p_distances.reshape(1, -1))
cls_pred_nn_top = torch.cat(cls_pred_nn_top, dim=0)
p_distances_nn_top = torch.cat(p_distances_nn_top, dim=0)
return cls_pred_nn_top, p_distances_nn_top
def recall(self, pred, gold, at_n=None):
results = dict()
if at_n is None:
at_n = [1, 5, 10, 20]
for n in at_n:
recall = float((pred[:, :n] == gold.reshape(-1, 1)).sum().item()) / len(gold)
results[f'recall@{n}'] = recall
return results
```
#### File: models/level_kv_div/network.py
```python
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
class EmbeddingNet(nn.Module):
def __init__(self):
super(EmbeddingNet, self).__init__()
self.fc = nn.Sequential(
OrderedDict([
('bn0', nn.BatchNorm1d(128)),
('relu0', nn.ReLU(inplace=True)),
('fc0', nn.Linear(128, 128)),
('bn1', nn.BatchNorm1d(128)),
('relu1', nn.ReLU(inplace=True)),
('fc1', nn.Linear(128, 64)),
('bn2', nn.BatchNorm1d(64)),
('relu2', nn.ReLU(inplace=True)),
('fc2', nn.Linear(64, 64)),
('bn3', nn.BatchNorm1d(64)),
('relu3', nn.ReLU(inplace=True)),
('fc3', nn.Linear(64, 64)),
('bn4', nn.BatchNorm1d(64)),
('relu4', nn.ReLU(inplace=True)),
('fc4', nn.Linear(64, 32)),
('bn5', nn.BatchNorm1d(32)),
('relu5', nn.ReLU(inplace=True)),
('fc5', nn.Linear(32, 32)),
('bn6', nn.BatchNorm1d(32)),
('relu6', nn.ReLU(inplace=True)),
('fc6', nn.Linear(32, 16)),
('bn7', nn.BatchNorm1d(16)),
('relu7', nn.ReLU(inplace=True)),
('fc7', nn.Linear(16, 16)),
('bn8', nn.BatchNorm1d(16)),
])
)
def forward(self, x):
output = self.fc(x)
return output
def get_embedding(self, x):
return self.forward(x)
class EmbeddingNetL2(EmbeddingNet):
def __init__(self):
super(EmbeddingNetL2, self).__init__()
def forward(self, x):
output = super(EmbeddingNetL2, self).forward(x)
output /= output.pow(2).sum(1, keepdim=True).sqrt()
return output
def get_embedding(self, x):
return self.forward(x)
class SiameseNet(nn.Module):
def __init__(self, embedding_net):
super(SiameseNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
return output1, output2
def get_embedding(self, x):
return self.embedding_net(x)
```
#### File: models/tSNE/tSNE.py
```python
from sklearn.manifold import TSNE
from argparse import ArgumentParser
from pathlib import Path
import joblib
import torch
from src.data.utils import import_raw_data
from src.models.DenseNetwork.loss import nearest_neighbors, input_inverse_similarity, kl_div_add_mse_loss
from src.models.DenseNetwork.models import Solver
import os
def main():
parser = ArgumentParser(description='Arguments for dataset processing')
parser.add_argument('--input_path', type=Path, required=True, default=None,
help='the input path to the input data')
parser.add_argument('--output_dir', type=Path, required=True, default=None,
help='the output directory to save sampled data')
parser.add_argument('--n_iter', type=int, required=True, default=2000,
help='the number of rows to sample')
parser.add_argument('--dim_out', type=int, default=20,
help='n components to remain')
parser.add_argument('--perplexity', type=int, default=40,
help='perplexity')
parser.add_argument('--seed', type=int, default=42,
help='the random seed of the whole process')
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError(f"Output directory ({args.output_dir}) already exists "
"and is not empty")
input_embeddings = import_raw_data(args.input_path)
model = TSNE(n_components=args.dim_out, n_iter=args.n_iter, method='exact', perplexity=40, verbose=2, random_state=args.seed)
output_embeddings = model.fit_transform(input_embeddings)
output_embeddings = torch.from_numpy(output_embeddings)
# evaluation
input_embeddings = torch.from_numpy(input_embeddings.to_numpy())
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
ground_min_dist_square, anchor_idx, topk_dists = nearest_neighbors(x=input_embeddings, top_k=20, device=device)
q = input_inverse_similarity(input_embeddings, anchor_idx, ground_min_dist_square)
scores, p = Solver.static_get_scores(q, output_embeddings, anchor_idx,
device, kl_div_add_mse_loss, 20, ground_min_dist_square)
for k, v in scores.items():
print(f'{k} = {v}')
args.output_dir.mkdir(exist_ok=True, parents=True)
torch.save({'output_embeddings': output_embeddings,
'input_embeddings': input_embeddings,
'p': p, 'q': q, 'scores': scores,
'tsne-args': args,
'ground_min_dist_square': ground_min_dist_square,
'anchor_idx': anchor_idx,
'topk_dists': topk_dists
}, args.output_dir / 'embeddings.pth.tar')
joblib.dump(model, args.output_dir / 'tsne-model.pth.tar')
if __name__ == '__main__':
main()
```
#### File: models/utils/distance.py
```python
import torch
from tqdm.auto import tqdm
from torch.nn import functional as F
def nearest_neighbors(x, top_k, device):
"""
calculate the nearest neighbors of x, return the
:param x: for matrix to calculate nearest neighbor
:param top_k: number of the nearest neighbor to be returned
:param device: device used during computation
:return:
ground_min_dist_square: torch.tensor (n, ) distance to the nearest neighbor
topk_neighbors: torch.tensor (n, top_k) the index of the top-k nearest neighbors;
"""
batch_size = 2000
x_to_device = x.to(device)
if x_to_device.shape[0] * x_to_device.shape[1] < batch_size * 200: # direct computes the whole matrix
dist = torch.cdist(x1=x_to_device, x2=x_to_device, p=2) # (n, n)
sorted_dist, indices = torch.sort(dist, dim=1, descending=False)
ground_min_dist_square = sorted_dist[:, 1] # the 0-th column is the distance to oneself
topk_neighbors = indices[:, 1:1 + top_k]
topk_dists = sorted_dist[:, 1:1 + top_k]
else: # calculate the nearest neighbors in batches
num_iter = x_to_device.shape[0] // batch_size + 1
topk_neighbors_list = list()
ground_min_dist_square_list = list()
sorted_dist_list = list()
for i in tqdm(torch.arange(num_iter), desc='computing nearest neighbors'):
batch_x = x_to_device[i * batch_size: (i + 1) * batch_size, :]
dist = torch.cdist(x1=batch_x, x2=x_to_device, p=2) # (n, n)
sorted_dist, indices = torch.sort(dist, dim=1, descending=False)
batch_ground_min_dist_square = sorted_dist[:, 1] # the 0-th column is the distance to oneself
batch_topk_neighbors = indices[:, 1:1 + top_k]
topk_neighbors_list.append(batch_topk_neighbors.cpu())
ground_min_dist_square_list.append(batch_ground_min_dist_square.cpu())
sorted_dist_list.append(sorted_dist[:, 1:1 + top_k].cpu())
ground_min_dist_square = torch.cat(ground_min_dist_square_list, dim=0)
topk_neighbors = torch.cat(topk_neighbors_list, dim=0)
topk_dists = torch.cat(sorted_dist_list, dim=0)
return ground_min_dist_square.cpu(), topk_neighbors.cpu(), topk_dists.cpu()
def euclidean_softmax_similarity(vec_i, vec_j, ground_min_dist_square_i=None, two_eps_square=1):
"""
calculate inverse similarity for inputs:
1 / ((d_{in}(x_i, x_j))^2 / d_i^2 + eps)
:param vec_i: torch.tensor of the shape (n, m)
xi
:param vec_j: torch.tensor of the shape (n, m)
xj
:param two_eps_square: 2 * (epsilon)^2
:param ground_min_dist_square_i:
:return: q: qij in formula (P20-3) torch.tensor of the shape (n, m)
"""
din = (vec_i.unsqueeze(dim=1) - vec_j).square().sum(dim=2) # (n, m)
sim_j_given_i = F.softmin(din / two_eps_square, dim=1) # (n, m)
return sim_j_given_i
def kl_div_loss(input_similarity, output_similarity):
"""
calculate the sum of kl divergence and mse loss
:param input_similarity: input similarity
:param output_similarity: output similarity
:return: torch.tensor of the shape (,)
"""
return torch.sum(input_similarity * torch.log(input_similarity / output_similarity))
def precomputing(x, top_k, device):
"""
compute ground true nearest neighbors
:param x:
:param top_k: top-k neighbors that are considered
:param device: device used during computation
:return: anchor_idx: each point has m points as anchors (in the case, we pick m near neighbors of x as anchors)
input_similarity: input_similarity
"""
ground_min_dist_square, anchor_idx, topk_dists = nearest_neighbors(x, top_k, device)
xi = x
xj = x[anchor_idx, :]
input_similarity = euclidean_softmax_similarity(xi, xj, ground_min_dist_square)
return anchor_idx, input_similarity, ground_min_dist_square, topk_dists
```
#### File: models/vanilla/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.manifold import TSNE
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from models.vanilla.settings import Global
from models.vanilla.similarity import compute_similarity, compute_distance
from models.vanilla.loss import InvLoss
from scipy.spatial import distance_matrix
from models.vanilla.utils import timer
## CHECK FOR GPU'S ##
CUDA = torch.cuda.is_available()
if CUDA:
device = torch.device("cuda:0")
print("Running on the GPU")
else:
device = torch.device("cpu")
print("Running on the CPU")
class SortDB:
MAX_LEN = 20
def __init__(self):
self.array = []
self.n = len(self.array)
def add(self, elem): # elem = (dist, index)
if self.n == 0 or self.n < SortDB.MAX_LEN or elem[0] < self.array[-1][0]:
arr = self.array + [elem]
self.array = sorted(arr, key=lambda x: x[0])
if len(self.array) > SortDB.MAX_LEN:
self.array = self.array[:-1]
self.n = len(self.array)
class Module(nn.Module, Global):
def __init__(self):
super().__init__()
Global.__init__(self)
class DeepNet(Module):
def __init__(self, input_size, output_size, hidden_sizes):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_layers = []
size_1 = input_size
for hidden_size in hidden_sizes:
size_2 = hidden_size
self.hidden_layers.append(nn.Linear(size_1, size_2).to(device))
size_1 = hidden_size
self.output = nn.Linear(size_1, output_size).to(device)
def forward(self, out):
for layer in self.hidden_layers:
out = F.relu(layer(out))
out = self.output(out)
return out
class Reducer(Global):
def __init__(self, name):
super().__init__()
self.name = name
self.correct = {1:[],5:[],10:[],20:[]}
self.cross_correct = {1:[],5:[],10:[],20:[]}
def fit(self, data_df, args):
self.out('Needs to be overriden')
def transform(self, data_df):
self.out('Needs to be overriden')
def create_epoch_plot(self, losses, filename=None):
epochs = np.array(list(range(1,len(losses)+1)))
self.increase_plots()
plt.figure(self.num_of_plots)
self.save_plot_name(self.num_of_plots, filename) #######
plt.plot(epochs, losses)
plt.grid(True)
plt.title('Train Loss Per Epoch')
def create_plot(self, data_df, name=None, filename=None):
self.increase_plots()
plt.figure(self.num_of_plots)
self.save_plot_name(self.num_of_plots, filename) #######
projected_data = self.transform(data_df)
plt.scatter(projected_data[:, 0], projected_data[:, 1])
plt.grid(True)
if name is None:
plt.title(self.name)
else:
plt.title(name)
def create_r_plot(self, R, name='Distribution of R values', filename=None):
self.increase_plots()
plt.figure(self.num_of_plots)
self.save_plot_name(self.num_of_plots, filename) #######
plt.hist(R,bins=80)
plt.grid(True)
if name is None:
plt.title(self.name)
else:
plt.title(name)
@timer
def find_nearest_neighbors(self, data_df):
self.out('\nComputing neighbors.')
data_np = data_df.to_numpy()
print('data_np shape', data_np.shape)
dist = distance_matrix(data_np, data_np)
nearest_neighbor_matrix = np.argpartition(dist, 21, axis=1)[:,:21] # d(x,x)=0, so this needs to be ommitted
nearest_neighbors = {i:SortDB() for i in range(data_df.shape[0])}
for i in range(data_df.shape[0]):
for j in range(20):
neighbor = nearest_neighbor_matrix[i,j]
if not neighbor == i:
nearest_neighbors[i].add((dist[i,neighbor]**2, neighbor))
return nearest_neighbors
@timer
def find_nearest_cross_neighbors(self, train_data_df, test_data_df):
self.out('\nComputing cross neighbors.')
dist = distance_matrix(test_data_df.to_numpy(), train_data_df.to_numpy())
nearest_neighbor_matrix = np.argpartition(dist, 20, axis=1)[:,:20]
nearest_neighbors = {i:SortDB() for i in range(test_data_df.shape[0])}
for i in range(test_data_df.shape[0]):
for j in range(20):
neighbor = nearest_neighbor_matrix[i,j]
nearest_neighbors[i].add((dist[i,neighbor]**2, neighbor))
return nearest_neighbors
def count_neighbors(self, data_df, test=False):
if test:
nearest_neighbors = self.actual_test_nearest_neighbors
else:
nearest_neighbors = self.actual_train_nearest_neighbors
N = data_df.shape[0]
# project data #
projected_data_df = pd.DataFrame(self.transform(data_df))
# find nearest neighbors of projected data #
projected_nearest_neighbors = self.find_nearest_neighbors(projected_data_df)
# count corrects #
correct = [i for i in range(N) if nearest_neighbors[i].array[0][1] == projected_nearest_neighbors[i].array[0][1]]
correct_5 = [i for i in range(N) if nearest_neighbors[i].array[0][1] in [x[1] for x in projected_nearest_neighbors[i].array[:5]]]
correct_10 = [i for i in range(N) if nearest_neighbors[i].array[0][1] in [x[1] for x in projected_nearest_neighbors[i].array[:10]]]
correct_20 = [i for i in range(N) if nearest_neighbors[i].array[0][1] in [x[1] for x in projected_nearest_neighbors[i].array]]
if not test:
msg = 'Train'
else:
msg = 'Test'
msg1 = '{} correct neighbors: {} / {} ({:.2f}%)'.format(msg, len(correct), N, 100 * len(correct) / N)
msg2 = '{} correct neighbors in top 5: {} / {} ({:.2f}%)'.format(msg, len(correct_5), N, 100 * len(correct_5) / N)
msg3 = '{} correct neighbors in top 10: {} / {} ({:.2f}%)'.format(msg, len(correct_10), N, 100 * len(correct_10) / N)
msg4 = '{} correct neighbors in top 20: {} / {} ({:.2f}%)'.format(msg, len(correct_20), N, 100 * len(correct_20) / N)
for msg in [msg1, msg2, msg3, msg4]:
self.out(msg)
if test:
R = [compute_distance(data_df.iloc[i], data_df.iloc[projected_nearest_neighbors[i].array[0][1]]) / compute_distance(data_df.iloc[i], data_df.iloc[nearest_neighbors[i].array[0][1]]) - 1 for i in range(N)]
self.R_data = R
self.correct[1].append((msg, len(correct), N, 100 * len(correct) / N))
self.correct[5].append((msg, len(correct_5), N, 100 * len(correct_5) / N))
self.correct[10].append((msg, len(correct_10), N, 100 * len(correct_10) / N))
self.correct[20].append((msg, len(correct_20), N, 100 * len(correct_20) / N))
return '\n'.join([msg1, msg2, msg3, msg4])
def count_cross_neighbors(self, train_data_df, test_data_df, test=False):
if test:
nearest_neighbors = self.actual_test_cross_nearest_neighbors
else:
nearest_neighbors = self.actual_train_cross_nearest_neighbors
N = test_data_df.shape[0]
# project data #
projected_train_data_df = pd.DataFrame(self.transform(train_data_df))
projected_test_data_df = pd.DataFrame(self.transform(test_data_df))
# find nearest neighbors of projected data #
projected_nearest_neighbors = self.find_nearest_cross_neighbors(projected_train_data_df, projected_test_data_df)
# count corrects #
correct = [i for i in range(N) if nearest_neighbors[i].array[0][1] == projected_nearest_neighbors[i].array[0][1]]
correct_5 = [i for i in range(N) if nearest_neighbors[i].array[0][1] in [x[1] for x in projected_nearest_neighbors[i].array[:5]]]
correct_10 = [i for i in range(N) if nearest_neighbors[i].array[0][1] in [x[1] for x in projected_nearest_neighbors[i].array[:10]]]
correct_20 = [i for i in range(N) if nearest_neighbors[i].array[0][1] in [x[1] for x in projected_nearest_neighbors[i].array]]
if not test:
msg = 'Train'
else:
msg = 'Test'
R = [compute_distance(test_data_df.iloc[i], train_data_df.iloc[projected_nearest_neighbors[i].array[0][1]]) / compute_distance(test_data_df.iloc[i], train_data_df.iloc[nearest_neighbors[i].array[0][1]]) - 1 for i in range(N)]
self.cross_R_data = R
class Net(Module, Reducer):
def __init__(self, hidden_model, input_size, output_size, hidden_sizes):
super().__init__()
Reducer.__init__(self, 'NET')
self.input_size = input_size
self.output_size = output_size
self.model = hidden_model(input_size, output_size, hidden_sizes).to(device)
def forward(self, x):
x = x.to(device)
out = self.model(x)
return out
def transform(self, data_df):
X = torch.tensor(np.array(data_df)).float()
output = self.forward(X)
return output.cpu().data.numpy()
def fit(self, data_df, args):
if Global.IN_SAMPLE_TESTING:
self.actual_train_nearest_neighbors = self.find_nearest_neighbors(data_df)
# Prepare inputs to fit and params #
X = torch.tensor(np.array(data_df)).float()
N = X.shape[0]
self.train()
epochs = args['epochs']
criterion = InvLoss(args['lambda'])
optimizer = optim.Adam(self.parameters(), lr=args['learning_rate'])
# store the minimum square distances #
min_dist_dict = {i:None for i in range(N)}
self.out('\nFitting the model...')
losses = []
for epoch in range(epochs):
running_loss = 0
self.out('EPOCH: {}'.format(epoch+1))
for i in self.tqdm(np.random.permutation(N)):
input1 = X[i]
# get random elem, diff from i #
j = i
while j == i:
j = random.randint(0,N-1)
input2 = X[j]
# get minimum distance squares so far #
if min_dist_dict[i] is None:
min_dist_square_i = None
else:
min_dist_square_i = min_dist_dict[i][0]
if min_dist_dict[j] is None:
min_dist_square_j = None
else:
min_dist_square_j = min_dist_dict[j][0]
# compute similarities #
sim_i, dist_square = compute_similarity(data_df.iloc[i], data_df.iloc[j], min_dist_square_i)
sim_j, _ = compute_similarity(data_df.iloc[j], data_df.iloc[i], min_dist_square_j)
sim = (sim_i + sim_j) / 2
sim = sim.reshape((1))
# pass inputs from model #
output1 = self.forward(input1)
output2 = self.forward(input2)
# update storage #
if min_dist_dict[i] is None or dist_square < min_dist_dict[i][0]:
min_dist_dict[i] = (dist_square, j)
if min_dist_dict[j] is None or dist_square < min_dist_dict[j][0]:
min_dist_dict[j] = (dist_square, i)
# compute loss and backpropagate #
loss = criterion(output1, output2, sim)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
self.out('Train loss: {:.2f}'.format(running_loss))
losses.append(running_loss)
# test after every epoch #
if Global.IN_SAMPLE_TESTING:
self.count_neighbors(data_df, test=False)
# plot loss per epoch #
if args['to_plot']:
losses = np.array(losses)
self.create_epoch_plot(losses, filename='loss')
return self
def test(self, train_data_df, test_data_df):
self.actual_test_nearest_neighbors = self.find_nearest_neighbors(test_data_df)
self.actual_test_cross_nearest_neighbors = self.find_nearest_cross_neighbors(train_data_df, test_data_df)
self.count_neighbors(test_data_df, test=True)
self.count_cross_neighbors(train_data_df, test_data_df, test=True)
self.create_r_plot(self.R_data, filename='r_hist')
self.create_r_plot(self.cross_R_data, filename='r_cross_hist')
return self.R_data, self.cross_R_data, self.correct
```
#### File: src/toolkit/network.py
```python
from collections import OrderedDict
import torch.nn as nn
import torch
import torch.nn.functional as F
class EmbeddingNet(nn.Module):
def __init__(self):
super(EmbeddingNet, self).__init__()
self.fc = nn.Sequential(
OrderedDict([
('bn0', nn.BatchNorm1d(128)),
('relu0', nn.ReLU(inplace=True)),
('fc0', nn.Linear(128, 128)),
('bn1', nn.BatchNorm1d(128)),
('relu1', nn.ReLU(inplace=True)),
('fc1', nn.Linear(128, 64)),
('bn2', nn.BatchNorm1d(64)),
('relu2', nn.ReLU(inplace=True)),
('fc2', nn.Linear(64, 64)),
('bn3', nn.BatchNorm1d(64)),
('relu3', nn.ReLU(inplace=True)),
('fc3', nn.Linear(64, 64)),
('bn4', nn.BatchNorm1d(64)),
('relu4', nn.ReLU(inplace=True)),
('fc4', nn.Linear(64, 32)),
('bn5', nn.BatchNorm1d(32)),
('relu5', nn.ReLU(inplace=True)),
('fc5', nn.Linear(32, 32)),
('bn6', nn.BatchNorm1d(32)),
('relu6', nn.ReLU(inplace=True)),
('fc6', nn.Linear(32, 16)),
('bn7', nn.BatchNorm1d(16)),
('relu7', nn.ReLU(inplace=True)),
('fc7', nn.Linear(16, 16)),
('bn8', nn.BatchNorm1d(16)),
])
)
def forward(self, x):
output = self.fc(x)
return output
def get_embedding(self, x):
return self.forward(x)
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.norm_layer = nn.BatchNorm1d(128)
self.encoder = nn.Sequential(
OrderedDict([
('fc0', nn.Linear(128, 128)),
('drop0', nn.Dropout(0.5, True)),
('relu0', nn.ReLU(inplace=True)),
('bn0', nn.BatchNorm1d(128)),
('fc1', nn.Linear(128, 128)),
('drop1', nn.Dropout(0.5, True)),
('relu1', nn.ReLU(inplace=True)),
('bn1', nn.BatchNorm1d(128)),
('fc2', nn.Linear(128, 64)),
('drop2', nn.Dropout(0.5, True)),
('relu2', nn.ReLU(inplace=True)),
('bn2', nn.BatchNorm1d(64)),
('fc3', nn.Linear(64, 32)),
('drop3', nn.Dropout(0.5, True)),
('relu3', nn.ReLU(inplace=True)),
('bn3', nn.BatchNorm1d(32)),
('fc4', nn.Linear(32, 32)),
('drop4', nn.Dropout(0.5, True)),
('relu4', nn.ReLU(inplace=True)),
('bn4', nn.BatchNorm1d(32)),
])
)
self.decoder = nn.Sequential(
OrderedDict([
('fc5', nn.Linear(32, 64)),
('drop5', nn.Dropout(0.5, True)),
('relu5', nn.ReLU(inplace=True)),
('bn5', nn.BatchNorm1d(64)),
('fc6', nn.Linear(64, 128)),
('drop6', nn.Dropout(0.5, True)),
('relu6', nn.ReLU(inplace=True)),
('bn6', nn.BatchNorm1d(128)),
])
)
def forward(self, x):
normed_x = self.norm_layer(x)
low_embed = self.encoder(normed_x)
reconstructed_embed = self.decoder(low_embed)
reconstructed_loss = ((normed_x - reconstructed_embed) ** 2).sum(dim=1)
return low_embed, reconstructed_loss
def get_embedding(self, x):
out = self.norm_layer(x)
return self.encoder(out)
class SiameseNet(nn.Module):
def __init__(self, embedding_net):
super(SiameseNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
return output1, output2
def get_embedding(self, x):
return self.embedding_net(x)
class ReconstructSiameseNet(nn.Module):
def __init__(self, embedding_net):
super(ReconstructSiameseNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2):
embedded_x1, reconstruct_loss1 = self.embedding_net(x1)
embedded_x2, reconstruct_loss2 = self.embedding_net(x2)
assert len(x1.shape) == 2 and len(embedded_x1.shape) == 2
dist1 = torch.sum(((x1 - x2) / x1.shape[1]) ** 2, dim=1)
dist2 = torch.sum(((x1 - x2) / embedded_x1.shape[1]) ** 2, dim=1)
return (reconstruct_loss1 + reconstruct_loss2 + (dist1 - dist2) ** 2).mean()
def get_embedding(self, x):
embedded_x, _ = self.embedding_net(x)
return embedded_x
``` |
{
"source": "JiajunBernoulli/douban-short-commentary",
"score": 3
} |
#### File: douban-short-commentary/utils/cut_sentences.py
```python
import re
import jieba as jieba
from config import IGNORE
"""
Created by Jiajun·Bernoulli on 2019/1/18
"""
########################传入待切割的句子列表,返回无序的words字典(词-次数)###################
def get_words(sentences):
words = {}
if not IGNORE is None:
ignore = IGNORE
else:
ignore = []
for sentence in sentences:
list = jieba.cut(sentence, cut_all=False)
for word in list:
if word in words:
words[str(word)] = int(words.get(str(word)))+1
else:
if is_chinese_word(word) and word not in ignore:
words[str(word)] = 1
# print(words)
return words
########################传入待切割的句子列表,返回从高到低排列的有序words列表前count位(词-次数形成的元组)###################
def get_ord_words(sentences, count):
words = get_words(sentences)
list = words.items()
# list.sort(key=cmp_to_key(lambda x, y: cmp(x[1], y[1])))
ord_list = sorted(list, key=lambda tuple: tuple[1], reverse=True)
names = []
values = []
for i in range(0, len(ord_list)):
names.append(ord_list[i][0])
values.append(ord_list[i][1])
if count is None:
return ord_list, names, values
return ord_list[0:count], names[0:count], values[0:count]
#-*- coding:utf-8 -*-
def is_chinese_word(word):
if len(word) == 1:
return False
for char in word:
if char < u"\u4e00" or char > u"\u9fa6":
return False
return True
```
#### File: douban-short-commentary/utils/draw_pic.py
```python
import imageio
from wordcloud import WordCloud
import matplotlib
import matplotlib.pyplot as plt
from config import TOP_NUM, FONT_PATH, PIC_PATH, FILE_PATH
"""
Created by Jiajun·Bernoulli on 2019/1/18
"""
###########################绘制柱状图#####################
def draw_bar(labels, quants):
# -*- coding: utf-8 -*-
print(labels)
# 指定默认字体
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['font.family'] = 'sans-serif'
# 解决负号'-'显示为方块的问题
matplotlib.rcParams['axes.unicode_minus'] = False
plt.bar(range(len(quants)), quants, color='rgb', tick_label=labels)
plt.show()
##########################绘制词云########################
def draw_wordCloud(data):
my_wordcloud = WordCloud(
background_color='white', # 设置背景颜色
max_words=TOP_NUM, # 设置最大实现的字数
font_path=FONT_PATH, # 设置字体格式,如不设置显示不了中文
mask=imageio.imread(PIC_PATH), # 设置图片样式
width=800,
height=800,
).generate_from_frequencies(data)
plt.figure()
plt.imshow(my_wordcloud)
plt.axis('off')
plt.show() # 展示词云
my_wordcloud.to_file(FILE_PATH)
``` |
{
"source": "jiajunfanthu/jiajunfanthu2.github.io",
"score": 3
} |
#### File: markdown_generator/test/pub.py
```python
import pandas as pd
articles = pd.read_csv("articles.tsv", sep="\t", header=0)
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c, c) for c in text)
import os
for row, item in articles.iterrows():
md_filename = str(item.pub_date) + "-" + item.slug + ".md"
html_filename = str(item.pub_date) + "-" + item.slug
year = item.pub_date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publication\npermalink: /publication/""" + html_filename
md += "\nexcerpt: '<i>Published in " + item.venue + ", " + str(year) + "</i><br/>" + html_escape(item.summary) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
md += "\npaperurl: '" + item.url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
md += "\n\n<a href='" + item.url + "'>Download PDF here</a>\n"
md += "\nAbstract: " + html_escape(item.description) + "\n"
md += "\n Recommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open(md_filename, 'w') as f:
f.write(md)
``` |
{
"source": "jiajunhua/asyml-texar",
"score": 2
} |
#### File: utils/raml_samples_generation/process_samples.py
```python
from __future__ import print_function
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
import sys
import re
import argparse
import torch
from util import read_corpus
import numpy as np
from scipy.misc import comb
from vocab import Vocab, VocabEntry
import math
from rouge import Rouge
def is_valid_sample(sent):
tokens = sent.split(' ')
return len(tokens) >= 1 and len(tokens) < 50
def sample_from_model(args):
para_data = args.parallel_data
sample_file = args.sample_file
output = args.output
tgt_sent_pattern = re.compile(r"^\[(\d+)\] (.*?)$")
para_data = [l.strip().split(' ||| ') for l in open(para_data)]
f_out = open(output, 'w')
f = open(sample_file)
f.readline()
for src_sent, tgt_sent in para_data:
line = f.readline().strip()
assert line.startswith('****')
line = f.readline().strip()
print(line)
assert line.startswith('target:')
tgt_sent2 = line[len('target:'):]
assert tgt_sent == tgt_sent2
line = f.readline().strip() # samples
tgt_sent = ' '.join(tgt_sent.split(' ')[1:-1])
tgt_samples = set()
for i in range(1, 101):
line = f.readline().rstrip('\n')
m = tgt_sent_pattern.match(line)
assert m, line
assert int(m.group(1)) == i
sampled_tgt_sent = m.group(2).strip()
if is_valid_sample(sampled_tgt_sent):
tgt_samples.add(sampled_tgt_sent)
line = f.readline().strip()
assert line.startswith('****')
tgt_samples.add(tgt_sent)
tgt_samples = list(tgt_samples)
assert len(tgt_samples) > 0
tgt_ref_tokens = tgt_sent.split(' ')
bleu_scores = []
for tgt_sample in tgt_samples:
bleu_score = sentence_bleu([tgt_ref_tokens], tgt_sample.split(' '))
bleu_scores.append(bleu_score)
tgt_ranks = sorted(range(len(tgt_samples)), key=lambda i: bleu_scores[i], reverse=True)
print('%d samples' % len(tgt_samples))
print('*' * 50, file=f_out)
print('source: ' + src_sent, file=f_out)
print('%d samples' % len(tgt_samples), file=f_out)
for i in tgt_ranks:
print('%s ||| %f' % (tgt_samples[i], bleu_scores[i]), file=f_out)
print('*' * 50, file=f_out)
f_out.close()
def get_new_ngram(ngram, n, vocab):
"""
replace ngram `ngram` with a newly sampled ngram of the same length
"""
new_ngram_wids = [np.random.randint(3, len(vocab)) for i in range(n)]
new_ngram = [vocab.id2word[wid] for wid in new_ngram_wids]
return new_ngram
def sample_ngram(args):
src_sents = read_corpus(args.src, 'src')
tgt_sents = read_corpus(args.tgt, 'src') # do not read in <s> and </s>
f_out = open(args.output, 'w')
vocab = torch.load(args.vocab)
tgt_vocab = vocab.tgt
smooth_bleu = args.smooth_bleu
sm_func = None
if smooth_bleu:
sm_func = SmoothingFunction().method3
for src_sent, tgt_sent in zip(src_sents, tgt_sents):
src_sent = ' '.join(src_sent)
tgt_len = len(tgt_sent)
tgt_samples = []
tgt_samples_distort_rates = [] # how many unigrams are replaced
# generate 100 samples
# append itself
tgt_samples.append(tgt_sent)
tgt_samples_distort_rates.append(0)
for sid in range(args.sample_size - 1):
n = np.random.randint(1, min(tgt_len, args.max_ngram_size + 1)) # we do not replace the last token: it must be a period!
idx = np.random.randint(tgt_len - n)
ngram = tgt_sent[idx: idx + n]
new_ngram = get_new_ngram(ngram, n, tgt_vocab)
sampled_tgt_sent = list(tgt_sent)
sampled_tgt_sent[idx: idx + n] = new_ngram
# compute the probability of this sample
# prob = 1. / args.max_ngram_size * 1. / (tgt_len - 1 + n) * 1 / (len(tgt_vocab) ** n)
tgt_samples.append(sampled_tgt_sent)
tgt_samples_distort_rates.append(n)
# compute bleu scores or edit distances and rank the samples by bleu scores
rewards = []
for tgt_sample, tgt_sample_distort_rate in zip(tgt_samples, tgt_samples_distort_rates):
if args.reward == 'bleu':
reward = sentence_bleu([tgt_sent], tgt_sample, smoothing_function=sm_func)
elif args.reward == 'rouge':
rouge = Rouge()
scores = rouge.get_scores(hyps=[' '.join(tgt_sample).decode('utf-8')], refs=[' '.join(tgt_sent).decode('utf-8')], avg=True)
reward = sum([value['f'] for key, value in scores.items()])
else:
reward = -tgt_sample_distort_rate
rewards.append(reward)
tgt_ranks = sorted(range(len(tgt_samples)), key=lambda i: rewards[i], reverse=True)
# convert list of tokens into a string
tgt_samples = [' '.join(tgt_sample) for tgt_sample in tgt_samples]
print('*' * 50, file=f_out)
print('source: ' + src_sent, file=f_out)
print('%d samples' % len(tgt_samples), file=f_out)
for i in tgt_ranks:
print('%s ||| %f' % (tgt_samples[i], rewards[i]), file=f_out)
print('*' * 50, file=f_out)
f_out.close()
def sample_ngram_adapt(args):
src_sents = read_corpus(args.src, 'src')
tgt_sents = read_corpus(args.tgt, 'src') # do not read in <s> and </s>
f_out = open(args.output, 'w')
vocab = torch.load(args.vocab)
tgt_vocab = vocab.tgt
max_len = max([len(tgt_sent) for tgt_sent in tgt_sents]) + 1
for src_sent, tgt_sent in zip(src_sents, tgt_sents):
src_sent = ' '.join(src_sent)
tgt_len = len(tgt_sent)
tgt_samples = []
# generate 100 samples
# append itself
tgt_samples.append(tgt_sent)
for sid in range(args.sample_size - 1):
max_n = min(tgt_len - 1, 4)
bias_n = int(max_n * tgt_len / max_len) + 1
assert 1 <= bias_n <= 4, 'bias_n={}, not in [1,4], max_n={}, tgt_len={}, max_len={}'.format(bias_n, max_n, tgt_len, max_len)
p = [1.0 / (max_n + 5)] * max_n
p[bias_n - 1] = 1 - p[0] * (max_n - 1)
assert abs(sum(p) - 1) < 1e-10, 'sum(p) != 1'
n = np.random.choice(np.arange(1, int(max_n + 1)), p=p) # we do not replace the last token: it must be a period!
assert n < tgt_len, 'n={}, tgt_len={}'.format(n, tgt_len)
idx = np.random.randint(tgt_len - n)
ngram = tgt_sent[idx: idx + n]
new_ngram = get_new_ngram(ngram, n, tgt_vocab)
sampled_tgt_sent = list(tgt_sent)
sampled_tgt_sent[idx: idx + n] = new_ngram
tgt_samples.append(sampled_tgt_sent)
# compute bleu scores and rank the samples by bleu scores
bleu_scores = []
for tgt_sample in tgt_samples:
bleu_score = sentence_bleu([tgt_sent], tgt_sample)
bleu_scores.append(bleu_score)
tgt_ranks = sorted(range(len(tgt_samples)), key=lambda i: bleu_scores[i], reverse=True)
# convert list of tokens into a string
tgt_samples = [' '.join(tgt_sample) for tgt_sample in tgt_samples]
print('*' * 50, file=f_out)
print('source: ' + src_sent, file=f_out)
print('%d samples' % len(tgt_samples), file=f_out)
for i in tgt_ranks:
print('%s ||| %f' % (tgt_samples[i], bleu_scores[i]), file=f_out)
print('*' * 50, file=f_out)
f_out.close()
def sample_from_hamming_distance_payoff_distribution(args):
src_sents = read_corpus(args.src, 'src')
tgt_sents = read_corpus(args.tgt, 'src') # do not read in <s> and </s>
f_out = open(args.output, 'w')
vocab = torch.load(args.vocab)
tgt_vocab = vocab.tgt
payoff_prob, Z_qs = generate_hamming_distance_payoff_distribution(max(len(sent) for sent in tgt_sents),
vocab_size=len(vocab.tgt),
tau=args.temp)
for src_sent, tgt_sent in zip(src_sents, tgt_sents):
tgt_samples = [] # make sure the ground truth y* is in the samples
tgt_sent_len = len(tgt_sent) - 3 # remove <s> and </s> and ending period .
tgt_ref_tokens = tgt_sent[1:-1]
bleu_scores = []
# sample an edit distances
e_samples = np.random.choice(range(tgt_sent_len + 1), p=payoff_prob[tgt_sent_len], size=args.sample_size,
replace=True)
for i, e in enumerate(e_samples):
if e > 0:
# sample a new tgt_sent $y$
old_word_pos = np.random.choice(range(1, tgt_sent_len + 1), size=e, replace=False)
new_words = [vocab.tgt.id2word[wid] for wid in np.random.randint(3, len(vocab.tgt), size=e)]
new_tgt_sent = list(tgt_sent)
for pos, word in zip(old_word_pos, new_words):
new_tgt_sent[pos] = word
bleu_score = sentence_bleu([tgt_ref_tokens], new_tgt_sent[1:-1])
bleu_scores.append(bleu_score)
else:
new_tgt_sent = list(tgt_sent)
bleu_scores.append(1.)
# print('y: %s' % ' '.join(new_tgt_sent))
tgt_samples.append(new_tgt_sent)
def generate_hamming_distance_payoff_distribution(max_sent_len, vocab_size, tau=1.):
"""compute the q distribution for Hamming Distance (substitution only) as in the RAML paper"""
probs = dict()
Z_qs = dict()
for sent_len in range(1, max_sent_len + 1):
counts = [1.] # e = 0, count = 1
for e in range(1, sent_len + 1):
# apply the rescaling trick as in https://gist.github.com/norouzi/8c4d244922fa052fa8ec18d8af52d366
count = comb(sent_len, e) * math.exp(-e / tau) * ((vocab_size - 1) ** (e - e / tau))
counts.append(count)
Z_qs[sent_len] = Z_q = sum(counts)
prob = [count / Z_q for count in counts]
probs[sent_len] = prob
# print('sent_len=%d, %s' % (sent_len, prob))
return probs, Z_qs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['sample_from_model', 'sample_ngram_adapt', 'sample_ngram'], required=True)
parser.add_argument('--vocab', type=str)
parser.add_argument('--src', type=str)
parser.add_argument('--tgt', type=str)
parser.add_argument('--parallel_data', type=str)
parser.add_argument('--sample_file', type=str)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--sample_size', type=int, default=100)
parser.add_argument('--reward', choices=['bleu', 'edit_dist', 'rouge'], default='bleu')
parser.add_argument('--max_ngram_size', type=int, default=4)
parser.add_argument('--temp', type=float, default=0.5)
parser.add_argument('--smooth_bleu', action='store_true', default=False)
args = parser.parse_args()
if args.mode == 'sample_ngram':
sample_ngram(args)
elif args.mode == 'sample_from_model':
sample_from_model(args)
elif args.mode == 'sample_ngram_adapt':
sample_ngram_adapt(args)
```
#### File: examples/sequence_tagging/conll_reader.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from collections import defaultdict
import numpy as np
import tensorflow as tf
# pylint: disable=invalid-name, too-many-locals
MAX_CHAR_LENGTH = 45
NUM_CHAR_PAD = 2
UNK_WORD, UNK_CHAR, UNK_NER = 0, 0, 0
PAD_WORD, PAD_CHAR, PAD_NER = 1, 1, 1
# Regular expressions used to normalize digits.
DIGIT_RE = re.compile(r"\d")
def create_vocabs(train_path, dev_path, test_path, normalize_digits=True, min_occur=1, glove_dict=None):
word_vocab = defaultdict(lambda: len(word_vocab))
word_count = defaultdict(lambda: 0)
char_vocab = defaultdict(lambda: len(char_vocab))
ner_vocab = defaultdict(lambda: len(ner_vocab))
UNK_WORD = word_vocab["<unk>"]
PAD_WORD = word_vocab["<pad>"]
UNK_CHAR = char_vocab["<unk>"]
PAD_CHAR = char_vocab["<pad>"]
UNK_NER = ner_vocab["<unk>"]
PAD_NER = ner_vocab["<pad>"]
print("Creating Vocabularies:")
for file_path in [train_path, dev_path, test_path]:
with open(file_path, 'r') as file:
for line in file:
line = line.strip()
if len(line) == 0:
continue
tokens = line.split(' ')
for char in tokens[1]:
cid = char_vocab[char]
word = DIGIT_RE.sub("0", tokens[1]) if normalize_digits else tokens[1]
ner = tokens[4]
if glove_dict is not None and (word in glove_dict or word.lower() in glove_dict):
word_count[word] += min_occur + 1
elif file_path == train_path:
word_count[word] += 1
nid = ner_vocab[ner]
print("Total Vocabulary Size: %d" % len(word_count))
for word in word_count:
if word_count[word] > min_occur:
wid = word_vocab[word]
print("Word Vocabulary Size: %d" % len(word_vocab))
print("Character Alphabet Size: %d" % len(char_vocab))
print("NER Alphabet Size: %d" % len(ner_vocab))
word_vocab = defaultdict(lambda: UNK_WORD, word_vocab)
char_vocab = defaultdict(lambda: UNK_CHAR, char_vocab)
ner_vocab = defaultdict(lambda: UNK_NER, ner_vocab)
i2w = {v: k for k, v in word_vocab.items()}
i2n = {v: k for k, v in ner_vocab.items()}
return (word_vocab, char_vocab, ner_vocab), (i2w, i2n)
def read_data(source_path, word_vocab, char_vocab, ner_vocab, normalize_digits=True):
data = []
print('Reading data from %s' % source_path)
counter = 0
reader = CoNLLReader(source_path, word_vocab, char_vocab, ner_vocab)
inst = reader.getNext(normalize_digits)
while inst is not None:
counter += 1
sent = inst.sentence
data.append([sent.word_ids, sent.char_id_seqs, inst.ner_ids])
inst = reader.getNext(normalize_digits)
reader.close()
print("Total number of data: %d" % counter)
return data
def iterate_batch(data, batch_size, shuffle=False):
if shuffle:
np.random.shuffle(data)
for start_idx in range(0, len(data), batch_size):
excerpt = slice(start_idx, start_idx + batch_size)
batch = data[excerpt]
batch_length = max([len(batch[i][0]) for i in range(len(batch))])
wid_inputs = np.empty([len(batch), batch_length], dtype=np.int64)
cid_inputs = np.empty([len(batch), batch_length, MAX_CHAR_LENGTH], dtype=np.int64)
nid_inputs = np.empty([len(batch), batch_length], dtype=np.int64)
masks = np.zeros([len(batch), batch_length], dtype=np.float32)
lengths = np.empty(len(batch), dtype=np.int64)
for i, inst in enumerate(batch):
wids, cid_seqs, nids = inst
inst_size = len(wids)
lengths[i] = inst_size
# word ids
wid_inputs[i, :inst_size] = wids
wid_inputs[i, inst_size:] = PAD_WORD
for c, cids in enumerate(cid_seqs):
cid_inputs[i, c, :len(cids)] = cids
cid_inputs[i, c, len(cids):] = PAD_CHAR
cid_inputs[i, inst_size:, :] = PAD_CHAR
nid_inputs[i, :inst_size] = nids
nid_inputs[i, inst_size:] = PAD_NER
masks[i, :inst_size] = 1.0
yield wid_inputs, cid_inputs, nid_inputs, masks, lengths
def load_glove(filename, emb_dim, normalize_digits=True):
"""Loads embeddings in the glove text format in which each line is
'<word-string> <embedding-vector>'. Dimensions of the embedding vector
are separated with whitespace characters.
Args:
filename (str): Path to the embedding file.
vocab (dict): A dictionary that maps token strings to integer index.
Tokens not in :attr:`vocab` are not read.
word_vecs: A 2D numpy array of shape `[vocab_size, embed_dim]`
which is updated as reading from the file.
Returns:
The updated :attr:`word_vecs`.
"""
glove_dict = dict()
with tf.gfile.Open(filename) as fin:
for line in fin:
vec = line.strip().split()
if len(vec) == 0:
continue
word, vec = vec[0], vec[1:]
word = tf.compat.as_text(word)
word = DIGIT_RE.sub("0", word) if normalize_digits else word
glove_dict[word] = np.array([float(v) for v in vec])
if len(vec) != emb_dim:
raise ValueError("Inconsistent word vector sizes: %d vs %d" %
(len(vec), emb_dim))
return glove_dict
def construct_init_word_vecs(vocab, word_vecs, glove_dict):
for word, index in vocab.items():
if word in glove_dict:
embedding = glove_dict[word]
elif word.lower() in glove_dict:
embedding = glove_dict[word.lower()]
else:
embedding = None
if embedding is not None:
word_vecs[index] = embedding
return word_vecs
class CoNLLReader(object):
def __init__(self, file_path, word_vocab, char_vocab, ner_vocab):
self.__source_file = open(file_path, 'r', encoding='utf-8')
self.__word_vocab = word_vocab
self.__char_vocab = char_vocab
self.__ner_vocab = ner_vocab
def close(self):
self.__source_file.close()
def getNext(self, normalize_digits=True):
line = self.__source_file.readline()
# skip multiple blank lines.
while len(line) > 0 and len(line.strip()) == 0:
line = self.__source_file.readline()
if len(line) == 0:
return None
lines = []
while len(line.strip()) > 0:
line = line.strip()
lines.append(line.split(' '))
line = self.__source_file.readline()
length = len(lines)
if length == 0:
return None
words = []
word_ids = []
char_seqs = []
char_id_seqs = []
ner_tags = []
ner_ids = []
for tokens in lines:
chars = []
char_ids = []
for char in tokens[1]:
chars.append(char)
char_ids.append(self.__char_vocab[char])
if len(chars) > MAX_CHAR_LENGTH:
chars = chars[:MAX_CHAR_LENGTH]
char_ids = char_ids[:MAX_CHAR_LENGTH]
char_seqs.append(chars)
char_id_seqs.append(char_ids)
word = DIGIT_RE.sub("0", tokens[1]) if normalize_digits else tokens[1]
ner = tokens[4]
words.append(word)
word_ids.append(self.__word_vocab[word])
ner_tags.append(ner)
ner_ids.append(self.__ner_vocab[ner])
return NERInstance(Sentence(words, word_ids, char_seqs, char_id_seqs), ner_tags, ner_ids)
class NERInstance(object):
def __init__(self, sentence, ner_tags, ner_ids):
self.sentence = sentence
self.ner_tags = ner_tags
self.ner_ids = ner_ids
def length(self):
return self.sentence.length()
class Sentence(object):
def __init__(self, words, word_ids, char_seqs, char_id_seqs):
self.words = words
self.word_ids = word_ids
self.char_seqs = char_seqs
self.char_id_seqs = char_id_seqs
def length(self):
return len(self.words)
```
#### File: data/data/data_iterators_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# pylint: disable=no-member, invalid-name
import tempfile
import numpy as np
import tensorflow as tf
import texar.tf as tx
class DataIteratorTest(tf.test.TestCase):
"""Tests data iterators.
"""
def setUp(self):
tf.test.TestCase.setUp(self)
# Create data
train_text = list(np.linspace(1, 1000, num=1000, dtype=np.int64))
train_text = [str(x) for x in train_text]
train_text_file = tempfile.NamedTemporaryFile()
train_text_file.write('\n'.join(train_text).encode("utf-8"))
train_text_file.flush()
self._train_text_file = train_text_file
test_text = list(np.linspace(1001, 2000, num=1000, dtype=np.int64))
test_text = [str(x) for x in test_text]
test_text_file = tempfile.NamedTemporaryFile()
test_text_file.write('\n'.join(test_text).encode("utf-8"))
test_text_file.flush()
self._test_text_file = test_text_file
vocab_list = train_text + test_text
vocab_file = tempfile.NamedTemporaryFile()
vocab_file.write('\n'.join(vocab_list).encode("utf-8"))
vocab_file.flush()
self._vocab_file = vocab_file
self._vocab_size = len(vocab_list)
self._train_hparams = {
"num_epochs": 2,
"batch_size": 1,
"shuffle": False,
"dataset": {
"files": self._train_text_file.name,
"vocab_file": self._vocab_file.name,
"bos_token": '',
"eos_token": ''
},
"name": "train"
}
self._test_hparams = {
"num_epochs": 1,
"batch_size": 1,
"shuffle": False,
"dataset": {
"files": self._test_text_file.name,
"vocab_file": self._vocab_file.name,
"bos_token": '',
"eos_token": ''
},
"name": "test"
}
def test_iterator_single_dataset(self):
"""Tests iterating over a single dataset.
"""
data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.DataIterator(data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.switch_to_dataset(sess)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Done -- epoch limit reached')
self.assertEqual(i, 2001)
break
def test_iterator_multi_datasets(self):
"""Tests iterating over multiple datasets.
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.DataIterator([train_data, test_data])
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
# Iterates over train data
iterator.switch_to_dataset(sess, train_data.name)
i = 0
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
# Iterates over test data
iterator.switch_to_dataset(sess, test_data.name)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_train_test_data_iterator(self):
"""Tests :class:`texar.tf.data.TrainTestDataIterator`
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.TrainTestDataIterator(train=train_data,
test=test_data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.switch_to_train_data(sess)
i = 0
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
iterator.switch_to_test_data(sess)
i = 1001
while True:
try:
data_batch_ = sess.run(data_batch)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_feedable_iterator_multi_datasets(self):
"""Tests iterating over multiple datasets with the
:class:`FeedableDataIterator`.
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.FeedableDataIterator([train_data, test_data])
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
iterator.initialize_dataset(sess)
for _ in range(2):
# Iterates over train data
iterator.restart_dataset(sess, train_data.name)
data_handle = iterator.get_handle(sess, train_data.name)
i = 0
while True:
try:
feed_dict = {iterator.handle: data_handle}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
# Iterates over test data
iterator.restart_dataset(sess, test_data.name)
data_handle = iterator.get_handle(sess, test_data.name)
i = 1001
while True:
try:
feed_dict = {iterator.handle: data_handle}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
def test_train_test_feedable_data_iterator(self):
"""Tests :class:`texar.tf.data.TrainTestFeedableDataIterator`
"""
train_data = tx.data.MonoTextData(self._train_hparams)
test_data = tx.data.MonoTextData(self._test_hparams)
iterator = tx.data.TrainTestFeedableDataIterator(train=train_data,
test=test_data)
data_batch = iterator.get_next()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
for _ in range(2):
iterator.restart_train_dataset(sess)
i = 0
while True:
try:
feed_dict = {
iterator.handle: iterator.get_train_handle(sess)
}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i + 1))
i = (i + 1) % 1000
except tf.errors.OutOfRangeError:
print('Train data limit reached')
self.assertEqual(i, 0)
break
iterator.restart_test_dataset(sess)
i = 1001
while True:
try:
feed_dict = {
iterator.handle: iterator.get_test_handle(sess)
}
data_batch_ = sess.run(data_batch, feed_dict=feed_dict)
self.assertEqual(
tf.compat.as_text(data_batch_['text'][0][0]),
str(i))
i += 1
except tf.errors.OutOfRangeError:
print('Test data limit reached')
self.assertEqual(i, 2001)
break
if __name__ == "__main__":
tf.test.main()
```
#### File: modules/encoders/conv_encoders_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import texar.tf as tx
from texar.tf.modules.encoders.conv_encoders import Conv1DEncoder
class Conv1DEncoderTest(tf.test.TestCase):
"""Tests :class:`~texar.tf.modules.Conv1DEncoder` class.
"""
def test_encode(self):
"""Tests encode.
"""
encoder_1 = Conv1DEncoder()
self.assertEqual(len(encoder_1.layers), 4)
self.assertTrue(isinstance(encoder_1.layer_by_name("conv_pool_1"),
tx.core.MergeLayer))
for layer in encoder_1.layers[0].layers:
self.assertTrue(isinstance(layer, tx.core.SequentialLayer))
inputs_1 = tf.ones([64, 16, 300], tf.float32)
outputs_1 = encoder_1(inputs_1)
self.assertEqual(outputs_1.shape, [64, 128])
hparams = {
# Conv layers
"num_conv_layers": 2,
"filters": 128,
"kernel_size": [[3, 4, 5], 4],
"other_conv_kwargs": {"padding": "same"},
# Pooling layers
"pooling": "AveragePooling",
"pool_size": 2,
"pool_strides": 1,
# Dense layers
"num_dense_layers": 3,
"dense_size": [128, 128, 10],
"dense_activation": "relu",
"other_dense_kwargs": {"use_bias": False},
# Dropout
"dropout_conv": [0, 1, 2],
"dropout_dense": 2
}
encoder_2 = Conv1DEncoder(hparams)
# nlayers = nconv-pool + nconv + npool + ndense + ndropout + flatten
self.assertEqual(len(encoder_2.layers), 1 + 1 + 1 + 3 + 4 + 1)
self.assertTrue(isinstance(encoder_2.layer_by_name("conv_pool_1"),
tx.core.MergeLayer))
for layer in encoder_2.layers[1].layers:
self.assertTrue(isinstance(layer, tx.core.SequentialLayer))
inputs_2 = tf.ones([64, 16, 300], tf.float32)
outputs_2 = encoder_2(inputs_2)
self.assertEqual(outputs_2.shape, [64, 10])
def test_unknown_seq_length(self):
"""Tests use of pooling layer when the seq_length dimension of inputs
is `None`.
"""
encoder_1 = Conv1DEncoder()
inputs_1 = tf.placeholder(tf.float32, [64, None, 300])
outputs_1 = encoder_1(inputs_1)
self.assertEqual(outputs_1.shape, [64, 128])
hparams = {
# Conv layers
"num_conv_layers": 2,
"filters": 128,
"kernel_size": [[3, 4, 5], 4],
# Pooling layers
"pooling": "AveragePooling",
"pool_size": [2, None],
# Dense layers
"num_dense_layers": 1,
"dense_size": 10,
}
encoder = Conv1DEncoder(hparams)
# nlayers = nconv-pool + nconv + npool + ndense + ndropout + flatten
self.assertEqual(len(encoder.layers), 1 + 1 + 1 + 1 + 1 + 1)
self.assertTrue(isinstance(encoder.layer_by_name('pool_2'),
tx.core.AverageReducePooling1D))
inputs = tf.placeholder(tf.float32, [64, None, 300])
outputs = encoder(inputs)
self.assertEqual(outputs.shape, [64, 10])
hparams_2 = {
# Conv layers
"num_conv_layers": 1,
"filters": 128,
"kernel_size": 4,
"other_conv_kwargs": {'data_format': 'channels_first'},
# Pooling layers
"pooling": "MaxPooling",
"other_pool_kwargs": {'data_format': 'channels_first'},
# Dense layers
"num_dense_layers": 1,
"dense_size": 10,
}
encoder_2 = Conv1DEncoder(hparams_2)
inputs_2 = tf.placeholder(tf.float32, [64, 300, None])
outputs_2 = encoder_2(inputs_2)
self.assertEqual(outputs_2.shape, [64, 10])
if __name__ == "__main__":
tf.test.main()
``` |
{
"source": "jiajunhua/josedolz-HyperDenseNet",
"score": 2
} |
#### File: josedolz-HyperDenseNet/src/generateROI.py
```python
import sys
import pdb
from os.path import isfile, join
import os
import numpy as np
import nibabel as nib
import scipy.io as sio
from LiviaNet.Modules.IO.loadData import load_nii
from LiviaNet.Modules.IO.loadData import load_matlab
from LiviaNet.Modules.IO.saveData import saveImageAsNifti
from LiviaNet.Modules.IO.saveData import saveImageAsMatlab
# NOTE: Only has been tried on nifti images. However, it should not give any error for Matlab images.
""" To print function usage """
def printUsage(error_type):
if error_type == 1:
print(" ** ERROR!!: Few parameters used.")
else:
print(" ** ERROR!!: ...") # TODO
print(" ******** USAGE ******** ")
print(" --- argv 1: Folder containing mr images")
print(" --- argv 2: Folder to save corrected label images")
print(" --- argv 3: Image type")
print(" ------------- 0: nifti format")
print(" ------------- 1: matlab format")
def getImageImageList(imagesFolder):
if os.path.exists(imagesFolder):
imageNames = [f for f in os.listdir(imagesFolder) if isfile(join(imagesFolder, f))]
imageNames.sort()
return imageNames
def checkAnotatedLabels(argv):
# Number of input arguments
# 1: Folder containing label images
# 2: Folder to save corrected label images
# 3: Image type
# 0: nifti format
# 1: matlab format
# Do some sanity checks
if len(argv) < 3:
printUsage(1)
sys.exit()
imagesFolder = argv[0]
imagesFolderdst = argv[1]
imageType = int(argv[2])
imageNames = getImageImageList(imagesFolder)
printFileNames = False
for i_d in xrange(len(imageNames)) :
if imageType == 0:
imageFileName = imagesFolder + '/' + imageNames[i_d]
[imageData,img_proxy] = load_nii(imageFileName, printFileNames)
else:
imageFileName = imagesFolder + '/' + imageNames[i_d]
imageData = load_matlab(imageFileName, printFileNames)
# Find voxels different to 0
# NOTE: I assume voxels equal to 0 are outside my ROI (like in the skull stripped datasets)
idx = np.where(imageData > 0 )
# Create ROI and assign those indexes to 1
roiImage = np.zeros(imageData.shape,dtype=np.int8)
roiImage[idx] = 1
print(" ... Saving roi...")
nameToSave = imagesFolderdst + '/ROI_' + imageNames[i_d]
if imageType == 0: # nifti
imageTypeToSave = np.dtype(np.int8)
saveImageAsNifti(roiImage,
nameToSave,
imageFileName,
imageTypeToSave)
else: # Matlab
# Cast to int8 for saving purposes
saveImageAsMatlab(labelCorrectedImage.astype('int8'),nameToSave)
print " ****************************************** PROCESSING LABELS DONE ******************************************"
if __name__ == '__main__':
checkAnotatedLabels(sys.argv[1:])
```
#### File: Modules/IO/loadData.py
```python
import numpy as np
import pdb
# If you are not using nifti files you can comment this line
import nibabel as nib
import scipy.io as sio
from ImgOperations.imgOp import applyPadding
# ----- Loader for nifti files ------ #
def load_nii (imageFileName, printFileNames) :
if printFileNames == True:
print (" ... Loading file: {}".format(imageFileName))
img_proxy = nib.load(imageFileName)
imageData = img_proxy.get_data()
return (imageData,img_proxy)
def release_nii_proxy(img_proxy) :
img_proxy.uncache()
# ----- Loader for matlab format ------- #
# Very important: All the volumes should have been saved as 'vol'.
# Otherwise, change its name here
def load_matlab (imageFileName, printFileNames) :
if printFileNames == True:
print (" ... Loading file: {}".format(imageFileName))
mat_contents = sio.loadmat(imageFileName)
imageData = mat_contents['vol']
return (imageData)
""" It loads the images (CT/MRI + Ground Truth + ROI) for the patient image Idx"""
def load_imagesSinglePatient(imageIdx,
imageNames,
imageNames_Bottom,
groundTruthNames,
roiNames,
applyPaddingBool,
receptiveField,
sampleSizes,
imageType
):
if imageIdx >= len(imageNames) :
print (" ERROR!!!!! : The image index specified is greater than images array size....)")
exit(1)
# --- Load image data (CT/MRI/...) ---
printFileNames = False # Get this from config.ini
imageFileName = imageNames[imageIdx]
if imageType == 0:
[imageData,img_proxy] = load_nii(imageFileName, printFileNames)
else:
imageData = load_matlab(imageFileName, printFileNames)
if applyPaddingBool == True :
[imageData, paddingValues] = applyPadding(imageData, sampleSizes, receptiveField)
else:
paddingValues = ((0,0),(0,0),(0,0))
if len(imageData.shape) > 3 :
imageData = imageData[:,:,:,0]
if imageType == 0:
release_nii_proxy(img_proxy)
# --- Load image data for bottom path (CT/MRI/...) ---
printFileNames = False # Get this from config.ini
imageFileName = imageNames_Bottom[imageIdx]
if imageType == 0:
[imageData_Bottom,img_proxy] = load_nii(imageFileName, printFileNames)
else:
imageData_Bottom = load_matlab(imageFileName, printFileNames)
if applyPaddingBool == True :
[imageData_Bottom, paddingValues] = applyPadding(imageData_Bottom, sampleSizes, receptiveField)
else:
paddingValues = ((0,0),(0,0),(0,0))
if len(imageData_Bottom.shape) > 3 :
imageData_Bottom = imageData_Bottom[:,:,:,0]
if imageType == 0:
release_nii_proxy(img_proxy)
# --- Load ground truth (i.e. labels) ---
if len(groundTruthNames) > 0 :
GTFileName = groundTruthNames[imageIdx]
if imageType == 0:
[gtLabelsData, gt_proxy] = load_nii (GTFileName, printFileNames)
else:
gtLabelsData = load_matlab(GTFileName, printFileNames)
# Convert ground truth to int type
if np.issubdtype( gtLabelsData.dtype, np.int ) :
gtLabelsData = gtLabelsData
else:
np.rint(gtLabelsData).astype("int32")
imageGtLabels = gtLabelsData
if imageType == 0:
# Release data
release_nii_proxy(gt_proxy)
if applyPaddingBool == True :
[imageGtLabels, paddingValues] = applyPadding(imageGtLabels, sampleSizes, receptiveField)
else :
imageGtLabels = np.empty(0)
# --- Load roi ---
if len(roiNames)> 0 :
roiFileName = roiNames[imageIdx]
if imageType == 0:
[roiMaskData, roi_proxy] = load_nii (roiFileName, printFileNames)
else:
roiMaskData = load_matlab(roiFileName, printFileNames)
roiMask = roiMaskData
if imageType == 0:
# Release data
release_nii_proxy(roi_proxy)
if applyPaddingBool == True :
[roiMask, paddingValues] = applyPadding(roiMask, sampleSizes, receptiveField)
else :
roiMask = np.ones(imageGtLabels.shape)
return [imageData, imageData_Bottom, imageGtLabels, roiMask, paddingValues]
# -------------------------------------------------------- #
def getRandIndexes(total, maxNumberIdx) :
# Generate a shuffle array of a vector containing "total" elements
idxs = range(total)
np.random.shuffle(idxs)
rand_idxs = idxs[0:maxNumberIdx]
return rand_idxs
```
#### File: Modules/NeuralNetwork/ActivationFunctions.py
```python
import pdb
import os
import numpy as np
import theano
import theano.tensor as T
import sys
# https://github.com/Theano/Theano/issues/689
sys.setrecursionlimit(50000)
#####################################################
## Various activation functions for the CNN layers ##
#####################################################
# Sigmoid activations
def applyActivationFunction_Sigmoid(inputData):
""" inputData is a tensor5D with shape:
(batchSize,
Number of feature Maps,
convolvedImageShape[0],
convolvedImageShape[1],
convolvedImageShape[2]) """
outputData = T.nnet.sigmoid(inputData)
return ( outputData )
# Tanh activations
def applyActivationFunction_Tanh(inputData):
"""inputData is a tensor5D with shape:
# (batchSize,
# Number of feature Maps,
# convolvedImageShape[0],
# convolvedImageShape[1],
# convolvedImageShape[2])"""
outputData= T.tanh(inputData)
return ( outputData )
# *** There actually exist several ways to implement ReLU activations ***
# --- Version 1 ---
def applyActivationFunction_ReLU_v1(inputData):
""" inputData is a tensor5D with shape:
# (batchSize,
# Number of feature Maps,
# convolvedImageShape[0],
# convolvedImageShape[1],
# convolvedImageShape[2]) """
return T.maximum(inputData,0)
# --- Version 2 ---
def applyActivationFunction_ReLU_v2(inputData):
return T.switch(inputData < 0., 0., inputData)
# --- Version 3 ---
def applyActivationFunction_ReLU_v3(inputData):
return ((inputData + abs(inputData))/2.0)
# --- Version 4 ---
def applyActivationFunction_ReLU_v4(inputData):
return (T.sgn(inputData) + 1) * inputData * 0.5
# *** LeakyReLU ***
def applyActivationFunction_LeakyReLU( inputData, leakiness ) :
"""leakiness : float
Slope for negative input, usually between 0 and 1.
A leakiness of 0 will lead to the standard rectifier,
a leakiness of 1 will lead to a linear activation function,
and any value in between will give a leaky rectifier.
[1] Maas et al. (2013):
Rectifier Nonlinearities Improve Neural Network Acoustic Models,
http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf
- The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) """
pos = 0.5 * (1 + leakiness)
neg = 0.5 * (1 - leakiness)
output = pos * inputData + neg * abs(inputData)
return (output)
# *** There actually exist several ways to implement PReLU activations ***
# PReLU activations (from Kamnitsas)
def applyActivationFunction_PReLU( inputData, PreluActivations ) :
"""Parametric Rectified Linear Unit.
It follows:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
where `alpha` is a learned array with the same shape as x.
- The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) """
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = T.maximum(0, inputData)
neg = preluActivationsAsRow * (inputData - abs(inputData)) * 0.5
output = pos + neg
return (output)
# --- version 2 ---
def applyActivationFunction_PReLU_v2(inputData,PreluActivations) :
""" inputData is a tensor5D with shape:
(batchSize,
Number of feature Maps,
convolvedImageShape[0],
convolvedImageShape[1],
convolvedImageShape[2]) """
# The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim)
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = ((inputData + abs(inputData)) / 2.0 )
neg = preluActivationsAsRow * ((inputData - abs(inputData)) / 2.0 )
output = pos + neg
return ( output)
# --- version 3 ---
def applyActivationFunction_PReLU_v3(inputData,PreluActivations) :
""" inputData is a tensor5D with shape:
(batchSize,
Number of feature Maps,
convolvedImageShape[0],
convolvedImageShape[1],
convolvedImageShape[2]) """
# The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim)
preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x')
pos = 0.5 * (1 + preluActivationsAsRow )
neg = 0.5 * (1 - preluActivationsAsRow )
output = pos * inputData + neg * abs(inputData)
return ( output)
# Benchmark on ReLU/PReLU activations:
# http://gforge.se/2015/06/benchmarking-relu-and-prelu/
# TODO. Implement some other activation functions:
# Ex: Randomized ReLU
# S-shape Relu
# ThresholdedReLU
``` |
{
"source": "jiajunhua/PyMVPA474833",
"score": 2
} |
#### File: mvpa2/mappers/wavelet.py
```python
from mvpa2.base import externals
if externals.exists('pywt', raise_=True):
# import conditional to be able to import the whole module while building
# the docs even if pywt is not installed
import pywt
import numpy as np
from mvpa2.base import warning
from mvpa2.mappers.base import Mapper
if __debug__:
from mvpa2.base import debug
# WaveletPacket and WaveletTransformation mappers share lots of common
# functionality at the moment
class _WaveletMapper(Mapper):
"""Generic class for Wavelet mappers (decomposition and packet)
"""
def __init__(self, dim=1, wavelet='sym4', mode='per', maxlevel=None):
"""Initialize _WaveletMapper mapper
Parameters
----------
dim : int or tuple of int
dimensions to work across (for now just scalar value, ie 1D
transformation) is supported
wavelet : str
one from the families available withing pywt package
mode : str
periodization mode
maxlevel : int or None
number of levels to use. If None - automatically selected by pywt
"""
Mapper.__init__(self)
self._dim = dim
"""Dimension to work along"""
self._maxlevel = maxlevel
"""Maximal level of decomposition. None for automatic"""
if not wavelet in pywt.wavelist():
raise ValueError, \
"Unknown family of wavelets '%s'. Please use one " \
"available from the list %s" % (wavelet, pywt.wavelist())
self._wavelet = wavelet
"""Wavelet family to use"""
if not mode in pywt.MODES.modes:
raise ValueError, \
"Unknown periodization mode '%s'. Please use one " \
"available from the list %s" % (mode, pywt.MODES.modes)
self._mode = mode
"""Periodization mode"""
def _forward_data(self, data):
data = np.asanyarray(data)
self._inshape = data.shape
self._intimepoints = data.shape[self._dim]
res = self._wm_forward(data)
self._outshape = res.shape
return res
def _reverse_data(self, data):
data = np.asanyarray(data)
return self._wm_reverse(data)
def _wm_forward(self, *args):
raise NotImplementedError
def _wm_reverse(self, *args):
raise NotImplementedError
##REF: Name was automagically refactored
def _get_indexes(shape, dim):
"""Generator for coordinate tuples providing slice for all in `dim`
XXX Somewhat sloppy implementation... but works...
"""
if len(shape) < dim:
raise ValueError, "Dimension %d is incorrect for a shape %s" % \
(dim, shape)
n = len(shape)
curindexes = [0] * n
curindexes[dim] = Ellipsis#slice(None) # all elements for dimension dim
while True:
yield tuple(curindexes)
for i in xrange(n):
if i == dim and dim == n-1:
return # we reached it -- thus time to go
if curindexes[i] == shape[i] - 1:
if i == n-1:
return
curindexes[i] = 0
else:
if i != dim:
curindexes[i] += 1
break
class WaveletPacketMapper(_WaveletMapper):
"""Convert signal into an overcomplete representaion using Wavelet packet
"""
def __init__(self, level=None, **kwargs):
"""Initialize WaveletPacketMapper mapper
Parameters
----------
level : int or None
What level to decompose at. If 'None' data for all levels
is provided, but due to different sizes, they are placed
in 1D row.
"""
_WaveletMapper.__init__(self,**kwargs)
self.__level = level
# XXX too much of duplications between such methods -- it begs
# refactoring
##REF: Name was automagically refactored
def __forward_single_level(self, data):
if __debug__:
debug('MAP', "Converting signal using DWP (single level)")
wp = None
level = self.__level
wavelet = self._wavelet
mode = self._mode
dim = self._dim
level_paths = None
for indexes in _get_indexes(data.shape, self._dim):
if __debug__:
debug('MAP_', " %s" % (indexes,), lf=False, cr=True)
WP = pywt.WaveletPacket(
data[indexes], wavelet=wavelet,
mode=mode, maxlevel=level)
level_nodes = WP.get_level(level)
if level_paths is None:
# Needed for reconstruction
self.__level_paths = np.array([node.path for node in level_nodes])
level_datas = np.array([node.data for node in level_nodes])
if wp is None:
newdim = data.shape
newdim = newdim[:dim] + level_datas.shape + newdim[dim+1:]
if __debug__:
debug('MAP_', "Initializing storage of size %s for single "
"level (%d) mapping of data of size %s" % (newdim, level, data.shape))
wp = np.empty( tuple(newdim) )
wp[indexes] = level_datas
return wp
##REF: Name was automagically refactored
def __forward_multiple_levels(self, data):
wp = None
levels_length = None # total length at each level
levels_lengths = None # list of lengths per each level
for indexes in _get_indexes(data.shape, self._dim):
if __debug__:
debug('MAP_', " %s" % (indexes,), lf=False, cr=True)
WP = pywt.WaveletPacket(
data[indexes],
wavelet=self._wavelet,
mode=self._mode, maxlevel=self._maxlevel)
if levels_length is None:
levels_length = [None] * WP.maxlevel
levels_lengths = [None] * WP.maxlevel
levels_datas = []
for level in xrange(WP.maxlevel):
level_nodes = WP.get_level(level+1)
level_datas = [node.data for node in level_nodes]
level_lengths = [len(x) for x in level_datas]
level_length = np.sum(level_lengths)
if levels_lengths[level] is None:
levels_lengths[level] = level_lengths
elif levels_lengths[level] != level_lengths:
raise RuntimeError, \
"ADs of same level of different samples should have same number of elements." \
" Got %s, was %s" % (level_lengths, levels_lengths[level])
if levels_length[level] is None:
levels_length[level] = level_length
elif levels_length[level] != level_length:
raise RuntimeError, \
"Levels of different samples should have same number of elements." \
" Got %d, was %d" % (level_length, levels_length[level])
level_data = np.hstack(level_datas)
levels_datas.append(level_data)
# assert(len(data) == levels_length)
# assert(len(data) >= Ntimepoints)
if wp is None:
newdim = list(data.shape)
newdim[self._dim] = np.sum(levels_length)
wp = np.empty( tuple(newdim) )
wp[indexes] = np.hstack(levels_datas)
self.levels_lengths, self.levels_length = levels_lengths, levels_length
if __debug__:
debug('MAP_', "")
debug('MAP', "Done convertion into wp. Total size %s" % str(wp.shape))
return wp
def _wm_forward(self, data):
if __debug__:
debug('MAP', "Converting signal using DWP")
if self.__level is None:
return self.__forward_multiple_levels(data)
else:
return self.__forward_single_level(data)
#
# Reverse mapping
#
##REF: Name was automagically refactored
def __reverse_single_level(self, wp):
# local bindings
level_paths = self.__level_paths
# define wavelet packet to use
WP = pywt.WaveletPacket(
data=None, wavelet=self._wavelet,
mode=self._mode, maxlevel=self.__level)
# prepare storage
signal_shape = wp.shape[:1] + self._inshape[1:]
signal = np.zeros(signal_shape)
Ntime_points = self._intimepoints
for indexes in _get_indexes(signal_shape,
self._dim):
if __debug__:
debug('MAP_', " %s" % (indexes,), lf=False, cr=True)
for path, level_data in zip(level_paths, wp[indexes]):
WP[path] = level_data
signal[indexes] = WP.reconstruct(True)[:Ntime_points]
return signal
def _wm_reverse(self, data):
if __debug__:
debug('MAP', "Converting signal back using DWP")
if self.__level is None:
raise NotImplementedError
else:
if not externals.exists('pywt wp reconstruct'):
raise NotImplementedError, \
"Reconstruction for a single level for versions of " \
"pywt < 0.1.7 (revision 103) is not supported"
if not externals.exists('pywt wp reconstruct fixed'):
warning("%s: Reverse mapping with this version of 'pywt' might "
"result in incorrect data in the tails of the signal. "
"Please check for an update of 'pywt', or be careful "
"when interpreting the edges of the reverse mapped "
"data." % self.__class__.__name__)
return self.__reverse_single_level(data)
class WaveletTransformationMapper(_WaveletMapper):
"""Convert signal into wavelet representaion
"""
def _wm_forward(self, data):
"""Decompose signal into wavelets's coefficients via dwt
"""
if __debug__:
debug('MAP', "Converting signal using DWT")
wd = None
coeff_lengths = None
for indexes in _get_indexes(data.shape, self._dim):
if __debug__:
debug('MAP_', " %s" % (indexes,), lf=False, cr=True)
coeffs = pywt.wavedec(
data[indexes],
wavelet=self._wavelet,
mode=self._mode,
level=self._maxlevel)
# <NAME> embedds extraction of statistics right in place
#stats = []
#for coeff in coeffs:
# stats_ = [np.std(coeff),
# np.sqrt(np.dot(coeff, coeff)),
# ]# + list(np.histogram(coeff, normed=True)[0]))
# stats__ = list(coeff) + stats_[:]
# stats__ += list(np.log(stats_))
# stats__ += list(np.sqrt(stats_))
# stats__ += list(np.array(stats_)**2)
# stats__ += [ np.median(coeff), np.mean(coeff), scipy.stats.kurtosis(coeff) ]
# stats.append(stats__)
#coeffs = stats
coeff_lengths_ = np.array([len(x) for x in coeffs])
if coeff_lengths is None:
coeff_lengths = coeff_lengths_
assert((coeff_lengths == coeff_lengths_).all())
if wd is None:
newdim = list(data.shape)
newdim[self._dim] = np.sum(coeff_lengths)
wd = np.empty( tuple(newdim) )
coeff = np.hstack(coeffs)
wd[indexes] = coeff
if __debug__:
debug('MAP_', "")
debug('MAP', "Done DWT. Total size %s" % str(wd.shape))
self.lengths = coeff_lengths
return wd
def _wm_reverse(self, wd):
if __debug__:
debug('MAP', "Performing iDWT")
signal = None
wd_offsets = [0] + list(np.cumsum(self.lengths))
nlevels = len(self.lengths)
Ntime_points = self._intimepoints #len(time_points)
# unfortunately sometimes due to padding iDWT would return longer
# sequences, thus we just limit to the right ones
for indexes in _get_indexes(wd.shape, self._dim):
if __debug__:
debug('MAP_', " %s" % (indexes,), lf=False, cr=True)
wd_sample = wd[indexes]
wd_coeffs = [wd_sample[wd_offsets[i]:wd_offsets[i+1]] for i in xrange(nlevels)]
# need to compose original list
time_points = pywt.waverec(
wd_coeffs, wavelet=self._wavelet, mode=self._mode)
if signal is None:
newdim = list(wd.shape)
newdim[self._dim] = Ntime_points
signal = np.empty(newdim)
signal[indexes] = time_points[:Ntime_points]
if __debug__:
debug('MAP_', "")
debug('MAP', "Done iDWT. Total size %s" % (signal.shape, ))
return signal
```
#### File: support/nibabel/surf_gifti.py
```python
from mvpa2.base import externals
if externals.exists("nibabel", raise_=True):
from nibabel.gifti import gifti, giftiio
import numpy as np, os, re
from mvpa2.support.nibabel import surf
import io
def _get_single_array(g, intent):
ar = g.getArraysFromIntent(intent)
n = len(ar)
if n != 1:
len_str = 'no' if n == 0 else '%d' % n
raise ValueError('Found %s arrays matching %s, expected 1' %
(len_str, intent))
return ar[0]
def read(fn):
'''Reads a GIFTI surface file
Parameters
----------
fn: str
Filename
Returns
-------
surf_: surf.Surface
Surface
Notes
-----
Any meta-information stored in the GIFTI file is not present in surf_.
'''
g = giftiio.read(fn)
vertices = _get_single_array(g, 'NIFTI_INTENT_POINTSET').data
faces = _get_single_array(g, 'NIFTI_INTENT_TRIANGLE').data
return surf.Surface(vertices, faces)
def filename2vertices_faces_metadata(fn):
'''Attempts to get meta data based on the filename
Parameters
----------
fn: str
Filename
Returns
-------
meta: tuple
Tuple with two gifti.GiftiMetaData objects for vertices
and faces. If the filename contains exactly one of 'lh', 'rh', or
'mh' then it is assumed to be of left, right or merged hemispheres.
If the filename contains exactly one of 'pial,'smoothwm',
'intermediate',''inflated','sphere','flat', then the geometric
type is set
'''
_, fn = os.path.split(fn)
vertex_map = dict(AnatomicalStructurePrimary=dict(
lh='CortexLeft',
rh='CortexRight',
mh='CortexRightLeft'),
AnatomicalStructureSecondary=dict(
pial='Pial',
smoothwm='GrayWhite',
intermediate='MidThickness'),
GeometricType=dict(
pial='Anatomical',
smoothwm='Anatomical',
intermediate='Anatomical',
inflated='Inflated',
sphere='Spherical',
flat='Flat'))
def just_one(dict_, fn=fn):
vs = [v for k, v in dict_.iteritems() if k in fn]
return vs[0] if len(vs) == 1 else None
v_meta = [gifti.GiftiNVPairs('Name', fn)]
for key, dict_ in vertex_map.iteritems():
v = just_one(dict_)
if v is not None:
v_meta.append(gifti.GiftiNVPairs(key, v))
f_meta = [gifti.GiftiNVPairs('Name', fn)]
# XXX maybe also closed or open topology? that's a bit tricky though
v = gifti.GiftiMetaData()
v.data.extend(v_meta)
f = gifti.GiftiMetaData()
f.data.extend(f_meta)
return v, f
def to_gifti_image(s, add_indices=False, swap_LPI_RAI=False):
'''
Converts a surface to nibabel's gifti format.
Parameters
----------
s: surf
Input surface
add_indices: True or False (default: False)
if True then indices of the nodes are added.
Note: caret may not be able to read these
swap_LPI_RAI: True or False (default: False)
If True then the diagonal elements of the xform matrix
are set to [-1,-1,1,1], otherwise to [1,1,1,1].
Returns
-------
img: gifti.GiftiImage
Surface representated as GiftiImage
'''
vertices = gifti.GiftiDataArray(np.asarray(s.vertices, np.float32))
vertices.intent = gifti.intent_codes.field1['pointset']
vertices.datatype = 16 # this is what gifti likes
if add_indices:
nvertices = s.nvertices
indices = gifti.GiftiDataArray(np.asarray(np.arange(nvertices), np.int32))
indices.datatype = 8 # this is what gifti likes
indices.coordsys = None # otherwise SUMA might complain
indices.intent = gifti.intent_codes.field1['node index']
faces = gifti.GiftiDataArray(np.asarray(s.faces, np.int32))
faces.intent = gifti.intent_codes.field1['triangle']
faces.datatype = 8 # this is what gifti likes
faces.coordsys = None # otherwise SUMA might complain
# set some fields common to faces and vertices
for arr in (vertices, faces) + ((indices,) if add_indices else ()):
arr.ind_ord = 1
arr.encoding = 3
arr.endian = 'LittleEndian' # XXX this does not work (see below)
arr.dims = list(arr.data.shape)
if externals.versions['nibabel'] < '2.1':
# in later versions it is a computed property
arr.num_dim = len(arr.dims)
# make the image
meta = gifti.GiftiMetaData()
labeltable = gifti.GiftiLabelTable()
img = gifti.GiftiImage(meta=meta, labeltable=labeltable)
if swap_LPI_RAI:
xform = np.asarray(vertices.coordsys.xform)
xform[0, 0] = -1
xform[1, 1] = -1
vertices.coordsys.xform = xform
if add_indices:
img.add_gifti_data_array(indices)
img.add_gifti_data_array(vertices)
img.add_gifti_data_array(faces)
return img
def to_xml(img, meta_fn_hint=None):
'''Converts to XML
Parameters
----------
img: gifti.GiftiImage or surf
Input surface
meta_fn_hint: str or None
If not None, it should be a string (possibly a filename that
describes what kind of surface this is.
See filename2vertices_faces_metadata.
Returns
-------
xml: bytearray
Representation of input surface in XML format
'''
if isinstance(img, surf.Surface):
img = to_gifti_image(img)
if meta_fn_hint is not None:
vertices = _get_single_array(img, 'pointset')
faces = _get_single_array(img, 'triangle')
vertices.meta, faces.meta = \
filename2vertices_faces_metadata(meta_fn_hint)
# XXX FIXME from here on it's a bit of a hack
# The to_xml() method adds newlines in <DATA>...</DATA> segments
# and also writes GIFTI_ENDIAN_LITTLE instead of LittleEndian.
# For now we just replace these offending parts
# TODO: report issue to nibabel developers
xml = img.to_xml().encode('utf-8')
# split by data segements. Odd elements are data, even are surroudning
sps = re.split(b'([<]Data[>][^<]*?[<][/]Data[>])', xml, re.DOTALL)
# fix the LittleEndian issue for even segments and newline for odd ones
fix_odd_even = lambda x, i: x.replace(b'\n', b'') \
if i % 2 == 1 \
else x.replace(b'Endian="GIFTI_ENDIAN_LITTLE"',
b'Endian="LittleEndian"')
xml_fixed = b''.join(fix_odd_even(sp, i) for i, sp in enumerate(sps))
return xml_fixed
def write(fn, s, overwrite=True):
'''Writes a GIFTI surface file
Parameters
----------
fn: str
Filename
s: surf.Surface
Surface
overwrite: bool (default: False)
If set to False an error is raised if the file exists
'''
if not overwrite and os.path.exists(fn):
raise ValueError("Already exists: %s" % fn)
EXT = '.surf.gii'
if not fn.endswith(EXT):
raise ValueError("Filename %s does not end with required"
" extension %s" % (fn, EXT))
xml = to_xml(s, fn)
with io.FileIO(fn, 'wb') as f:
n = f.write(xml)
if n != len(xml):
raise ValueError("Not all bytes written to %s" % fn)
``` |
{
"source": "jiajunhua/qjadud1994-Text_Detector",
"score": 2
} |
#### File: qjadud1994-Text_Detector/Pytorch/encoder.py
```python
import math
import torch
import numpy as np
from utils import meshgrid, box_iou, change_box_order, softmax
from nms_poly import non_max_suppression_poly
class DataEncoder:
def __init__(self, cls_thresh=0.3, nms_thresh=0.1):
self.anchor_areas = [16*16., 32*32., 64*64., 128*128., 256*256, 512*512.] # v3
self.aspect_ratios = [1., 2., 3., 5., 1./2., 1./3., 1./5.] # v3
#self.anchor_areas = [30*30., 70*70., 120*120., 250*250., 320*320. ,450*450.] #v5
#self.aspect_ratios = [1.0, 1.5, 2.0, 3.0, 5.0, 0.5, 0.2] #v5
self.anchor_wh = self._get_anchor_wh()
self.cls_thresh = cls_thresh
self.nms_thresh = nms_thresh
def _get_anchor_wh(self):
'''Compute anchor width and height for each feature map.
Returns:
anchor_wh: (tensor) anchor wh, sized [#fm, #anchors_per_cell, 2].
'''
anchor_wh = []
for s in self.anchor_areas:
for ar in self.aspect_ratios: # w/h = ar
anchor_h = math.sqrt(s/ar)
anchor_w = ar * anchor_h
anchor_wh.append([anchor_w, anchor_h])
num_fms = len(self.anchor_areas)
return torch.FloatTensor(anchor_wh).view(num_fms, -1, 2)
def _get_anchor_boxes(self, input_size):
'''Compute anchor boxes for each feature map.
Args:
input_size: (tensor) model input size of (w,h).
Returns:
boxes: (list) anchor boxes for each feature map. Each of size [#anchors,4],
where #anchors = fmw * fmh * #anchors_per_cell
'''
num_fms = len(self.anchor_areas)
fm_sizes = [(input_size/pow(2.,i+2)).ceil() for i in range(num_fms)] # p2 -> p7 feature map sizes
boxes = []
for i in range(num_fms):
fm_size = fm_sizes[i]
grid_size = input_size / fm_size
fm_w, fm_h = int(fm_size[0]), int(fm_size[1])
fm_w *= 2 # add vertical offset
xy = meshgrid(fm_w,fm_h) + 0.5
xy = (xy*grid_size).view(fm_w,fm_h,1,2).expand(fm_w,fm_h,len(self.aspect_ratios),2)
wh = self.anchor_wh[i].view(1,1,len(self.aspect_ratios),2).expand(fm_w,fm_h,len(self.aspect_ratios),2)
box = torch.cat([xy,wh], 3) # [x,y,w,h]
boxes.append(box.view(-1,4))
return torch.cat(boxes, 0)
def encode(self, gt_quad_boxes, labels, input_size):
'''Encode target bounding boxes and class labels.
TextBoxes++ quad_box encoder:
tx_n = (x_n - anchor_x) / anchor_w
ty_n = (y_n - anchor_y) / anchor_h
Args:
gt_quad_boxes: (tensor) bounding boxes of (xyxyxyxy), sized [#obj, 8].
labels: (tensor) object class labels, sized [#obj, ].
input_size: (int/tuple) model input size of (w,h).
Returns:
loc_targets: (tensor) encoded bounding boxes, sized [#anchors,8].
cls_targets: (tensor) encoded class labels, sized [#anchors,].
'''
input_size = torch.Tensor([input_size,input_size]) if isinstance(input_size, int) \
else torch.Tensor(input_size)
anchor_rect_boxes = self._get_anchor_boxes(input_size) #(num_anchor, 8)
anchor_quad_boxes = change_box_order(anchor_rect_boxes, "xywh2quad") #(num_anchor, 4)
gt_rect_boxes = change_box_order(gt_quad_boxes, "quad2xyxy")
ious = box_iou(anchor_rect_boxes, gt_rect_boxes)
max_ious, max_ids = ious.max(1)
#Each anchor box matches the largest iou with the gt box
gt_quad_boxes = gt_quad_boxes[max_ids] #(num_gt_boxes, 8)
gt_rect_boxes = gt_rect_boxes[max_ids] #(num_gt_boxes, 4)
# for Rectangle boxes -> using in TextBoxes
#gt_rect_boxes = change_box_order(gt_rect_boxes, "xyxy2xywh")
#loc_rect_yx = (gt_rect_boxes[:, :2] - anchor_rect_boxes[:, :2]) / anchor_rect_boxes[:, 2:]
#loc_rect_hw = torch.log(gt_rect_boxes[:, 2:] / anchor_rect_boxes[:, 2:])
# for Quad boxes -> using in TextBoxes++
anchor_boxes_hw = anchor_rect_boxes[:, 2:4].repeat(1, 4)
loc_quad_yx = (gt_quad_boxes - anchor_quad_boxes) / anchor_boxes_hw
#loc_targets = torch.cat([loc_rect_yx, loc_rect_hw, loc_quad_yx], dim=1) # (num_anchor, 12)
loc_targets = loc_quad_yx
cls_targets = labels[max_ids]
cls_targets[max_ious<0.5] = -1 # ignore (0.4~0.5) : -1
cls_targets[max_ious<0.4] = 0 # background (0.0~0.4): 0
# positive (0.5~1.0) : 1
return loc_targets, cls_targets
def decode(self, loc_preds, cls_preds, input_size):
'''Decode outputs back to bouding box locations and class labels.
Args:
loc_preds: (tensor) predicted locations, sized [#anchors, 8].
cls_preds: (tensor) predicted class labels, sized [#anchors, ].
input_size: (int/tuple) model input size of (w,h).
Returns:
boxes: (tensor) decode box locations, sized [#obj,8].
labels: (tensor) class labels for each box, sized [#obj,].
'''
input_size = torch.Tensor([input_size,input_size]) if isinstance(input_size, int) \
else torch.Tensor(input_size)
anchor_rect_boxes = self._get_anchor_boxes(input_size).cuda()
anchor_quad_boxes = change_box_order(anchor_rect_boxes, "xywh2quad")
quad_boxes = anchor_quad_boxes + anchor_rect_boxes[:, 2:4].repeat(1, 4) * loc_preds # [#anchor, 8]
quad_boxes = torch.clamp(quad_boxes, 0, input_size[0])
score, labels = cls_preds.sigmoid().max(1) # focal loss
#score, labels = softmax(cls_preds).max(1) # OHEM+softmax
# Classification score Threshold
ids = score > self.cls_thresh
ids = ids.nonzero().squeeze() # [#obj,]
score = score[ids]
labels = labels[ids]
quad_boxes = quad_boxes[ids].view(-1, 4, 2)
quad_boxes = quad_boxes.cpu().data.numpy()
score = score.cpu().data.numpy()
if len(score.shape) is 0:
return quad_boxes, labels, score
else:
keep = non_max_suppression_poly(quad_boxes, score, self.nms_thresh)
return quad_boxes[keep], labels[keep], score[keep]
def debug():
encoder = DataEncoder()
anchor_wh = encoder._get_anchor_wh()
input_size = 32
input_size = torch.Tensor([input_size,input_size])
anchor = encoder._get_anchor_boxes(input_size)
print("anchor.size() : ", anchor.size())
for i in anchor:
print(i)
exit()
test = torch.randn((3, 8))
#test2 = torch.reshape(test, (-1, 4, 2))
test2 = test.view((-1, 4, 2))
print("test : ", test, test.size())
print("test2 : ", test2, test2.size())
gt_quad_boxes = torch.randn((1400, 8))
labels = torch.randn((1400, 1))
result_encode = encoder.encode(gt_quad_boxes, labels, input_size)
print(result_encode[0].size())
print(result_encode[1].size())
#debug()
```
#### File: Tensorflow/Detector/input_producer.py
```python
import os
import tensorflow as tf
slim = tf.contrib.slim
class InputProducer(object):
def __init__(self, preprocess_image_fn=None, vertical_image=False):
self.vertical_image = vertical_image
self._preprocess_image = preprocess_image_fn if preprocess_image_fn is not None \
else self._default_preprocess_image_fn
self.ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'shape': 'Shape of the image',
'object/bbox': 'A list of bounding boxes, one per each object.',
'object/label': 'A list of labels, one per each object.',
}
self.SPLITS_TO_SIZES = {
'train_IC13': 229,
'val_IC13': 233,
'train_2': 850000,
'val_2': 8750,
'train_quad': 850000,
'val_quad': 8750,
'train_IC15': 1000,
'val_IC15': 500,
'train_IC15_mask': 1000,
'val_IC15_mask': 500
}
self.FILE_PATTERN = '%s.record'
def num_classes(self):
return 20
def get_split(self, split_name, dataset_dir, is_rect=True):
"""Gets a dataset tuple with instructions for reading Pascal VOC dataset.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in self.SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
file_pattern = os.path.join(dataset_dir, self.FILE_PATTERN % split_name)
reader = tf.TFRecordReader
if is_rect: # Rect annotations
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'),
}
else: #Quad annotations
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpg'),
'image/object/bbox/y0': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/x0': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/y1': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/x1': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/y2': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/x2': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/y3': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/x3': tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'object/quad1': slim.tfexample_decoder.BoundingBox(
['y0', 'x0', 'y1', 'x1'], 'image/object/bbox/'),
'object/quad2': slim.tfexample_decoder.BoundingBox(
['y2', 'x2', 'y3', 'x3'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
#if has_labels(dataset_dir):
# labels_to_names = read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=self.SPLITS_TO_SIZES[split_name],
items_to_descriptions=self.ITEMS_TO_DESCRIPTIONS,
num_classes=self.num_classes(),
labels_to_names=labels_to_names)
def _default_preprocess_image_fn(self, image, is_train=True):
return image
```
#### File: Tensorflow/utils/bbox.py
```python
import cv2
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw, ImageFont
def get_mean_and_std(dataset, max_load=10000):
"""Compute the mean and std value of dataset."""
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
N = min(max_load, len(dataset))
for i in range(N):
print(i)
im,_,_ = dataset.load(1)
for j in range(3):
mean[j] += im[:,j,:,:].mean()
std[j] += im[:,j,:,:].std()
mean.div_(N)
std.div_(N)
return mean, std
def change_box_order(boxes, order):
'''Change box order between (xmin,ymin,xmax,ymax) and (xcenter,ycenter,width,height).
Args:
boxes: (tensor) bounding boxes, sized [num anchors, 4].
Returns:
(tensor) converted bounding boxes, sized [num anchor, 4].
'''
if order is 'yxyx2yxhw':
y_min, x_min, y_max, x_max = tf.split(value=boxes, num_or_size_splits=4, axis=1)
x = (x_min + x_max) / 2
y = (y_min + y_max) / 2
w = x_max - x_min
h = y_max - y_min
new_boxes = tf.concat([y,x,h,w], axis=1)
elif order is 'yxhw2yxyx':
y, x, h, w = tf.split(value=boxes, num_or_size_splits=4, axis=1)
x_min = x - w/2
x_max = x + w/2
y_min = y - h/2
y_max = y + h/2
new_boxes = tf.concat([y_min, x_min, y_max, x_max], axis=1)
elif order is 'xyxy2yxyx':
x_min, y_min, x_max, y_max = tf.split(value=boxes, num_or_size_splits=4, axis=1)
new_boxes = tf.concat([y_min, x_min, y_max, x_max], axis=1)
elif order is 'yxyx2xyxy':
y_min, x_min, y_max, x_max = tf.split(value=boxes, num_or_size_splits=4, axis=1)
new_boxes = tf.concat([x_min, y_min, x_max, y_max], axis=1)
elif order is "yxhw2quad":
"""rect : [num_boxes, 4] #yxhw, / quad : [num_boxes, 8]"""
y0, x0, h0, w0 = tf.split(value=boxes, num_or_size_splits=4, axis=1)
new_boxes = tf.concat([y0-h0/2, x0-w0/2,
y0-h0/2, x0+w0/2,
y0+h0/2, x0+w0/2,
y0+h0/2, x0-w0/2], axis=1)
elif order is "quad2yxyx":
"""quad : [num_boxes, 8] / rect : [num_boxes, 4] #yxyx"""
boxes = tf.reshape(boxes, (-1, 4, 2))
new_boxes = tf.concat([tf.reduce_min(boxes[:, :, 0:1], axis=1),
tf.reduce_min(boxes[:, :, 1:2], axis=1),
tf.reduce_max(boxes[:, :, 0:1], axis=1),
tf.reduce_max(boxes[:, :, 1:2], axis=1)], axis=1)
return new_boxes
def box_iou(box1, box2, order='xyxy'):
'''Compute the intersection over union of two set of boxes.
The default box order is (xmin, ymin, xmax, ymax).
Args:
box1: (tensor) bounding boxes, sized [N,4].
box2: (tensor) bounding boxes, sized [M,4].
order: (str) box order, either 'xyxy' or 'xywh'.
Return:
(tensor) iou, sized [N,M].
Reference:
https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py
'''
box1 = change_box_order(box1, "xywh2xyxy")
lt = tf.reduce_max([box1[:, :2], box2[:, :2]]) # [N,M,2]
rb = tf.reduce_max([box1[:, 2:], box2[:, 2:]]) # [N,M,2]
print(lt, rb)
wh = tf.clip_by_value(rb-lt+1, 0, float('nan'))
print(wh)
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
area1 = (box1[:, 2]-box1[:, 0]+1) * (box1[:, 3]-box1[:, 1]+1) # [N,]
area2 = (box2[:, 2]-box2[:, 0]+1) * (box2[:, 3]-box2[:, 1]+1) # [M,]
iou = inter / (area1[:, None] + area2 - inter)
return iou
def draw_bboxes(image, boxes, labels):
boxes = np.array(boxes, dtype=np.int32)
for box, label in zip(boxes, labels):
ymin, xmin, ymax, xmax = box
image = cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0,255,0), 3)
#image = cv2.putText(image, str(label), (box[0]+15, box[1]), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255), 1)
return image
def draw_boxes(img, bboxes, classes, scores):
if len(bboxes) == 0:
return img
#height, width, _ = img.shape
width, height = img.size
#image = Image.fromarray(img)
image = img
font = ImageFont.truetype(
font='/root/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.4).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
draw = ImageDraw.Draw(image)
for box, category, score in zip(bboxes, classes, scores):
y1, x1, y2, x2 = [int(i) for i in box]
p1 = (x1, y1)
p2 = (x2, y2)
label = '{} {:.1f}% '.format(category, score * 100)
label_size = draw.textsize(label)
text_origin = np.array([p1[0], p1[1] - label_size[1]])
color = np.array((0,255,0))
for i in range(thickness):
draw.rectangle(
[p1[0] + i, p1[1] + i, p2[0] - i, p2[1] - i],
outline=tuple(color))
draw.rectangle(
[tuple(text_origin),
tuple(text_origin + label_size)],
fill=tuple(color))
draw.text(
tuple(text_origin),
label, fill=(0, 0, 0),
font=font)
del draw
return np.array(image)
def draw_text_boxes(img, bboxes, color=(0,255,0)):
if len(bboxes) == 0:
return img
width, height = img.size
image = img
draw = ImageDraw.Draw(image)
thickness = (image.size[0] + image.size[1]) // 400
for box in bboxes:
y1, x1, y2, x2 = [int(i) for i in box]
p1 = (x1, y1)
p2 = (x2, y2)
color = np.array(color)
for i in range(thickness):
draw.rectangle(
[p1[0] + i, p1[1] + i, p2[0] - i, p2[1] - i],
outline=tuple(color))
del draw
return image
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes following order [ymin, xmin, ymax, xmax]
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist, num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1, num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2, num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
boxlist1 = change_box_order(boxlist1, "yxhw2yxyx")
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def bboxes_jaccard(bbox_ref, bboxes, name=None):
"""Compute jaccard score between a reference box and a collection
of bounding boxes.
Args:
bbox_ref: (N, 4) or (4,) Tensor with reference bounding box(es).
bboxes: (N, 4) Tensor, collection of bounding boxes.
Return:
(N,) Tensor with Jaccard scores.
"""
with tf.name_scope(name, 'bboxes_jaccard'):
# Should be more efficient to first transpose.
bboxes = tf.transpose(bboxes)
bbox_ref = tf.transpose(bbox_ref)
# Intersection bbox and volume.
int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
# Volumes.
# Volumes.
inter_vol = h * w
bboxes_vol = (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1])
#jaccard = tfe_math.safe_divide(inter_vol, union_vol, 'jaccard')
#return jaccard
return tf.where(
tf.greater(bboxes_vol, 0),
tf.divide(inter_vol, bboxes_vol),
tf.zeros_like(inter_vol),
name='jaccard')
'''
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 86.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
'''
``` |
{
"source": "jiajunhua/StevenLei2017-AI_projects",
"score": 2
} |
#### File: 002_目标检测实践_keras版Mask-RCNN训练自己的数据/code/_06_test_one_image.py
```python
import os
import sys
import cv2
import time
import numpy as np
import json
# 工程的根目录
ROOT_DIR = os.path.abspath("../resources/")
# 导入Mask RCNN库
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
import mrcnn.model as modellib
from mrcnn import visualize
# 获取文件夹中的文件路径
def get_filePathList(dirPath, partOfFileName=''):
allFileName_list = next(os.walk(dirPath))[2]
fileName_list = [k for k in allFileName_list if partOfFileName in k]
filePath_list = [os.path.join(dirPath, k) for k in fileName_list]
return filePath_list
# 根据配置json文件路径解析出字典
def get_jsonDict(config_jsonFilePath):
with open(config_jsonFilePath, 'r', encoding='utf8') as file:
fileContent = file.read()
json_dict = json.loads(fileContent)
className_list = json_dict['className_list']
className_list = [k.strip() for k in className_list]
className_list = sorted(className_list, reverse=False)
json_dict['className_list'] = className_list
return json_dict
# 模型测试配置类
class InferenceConfig(Config):
def __init__(self, config_dict):
super(InferenceConfig, self).__init__()
self.NAME = config_dict['source']
self.BACKBONE = config_dict['backbone']
self.GPU_COUNT = 1
self.IMAGES_PER_GPU = 1
self.BATCH_SIZE =1
self.NUM_CLASSES = 1 + len(config_dict['className_list'])
self.IMAGE_MIN_DIM = min(config_dict['image_width'], config_dict['image_height'])
self.IMAGE_MAX_DIM = max(config_dict['image_width'], config_dict['image_height'])
self.IMAGE_SHAPE = np.array([config_dict['image_height'], config_dict['image_width'], 3])
self.IMAGE_META_SIZE = 15
self.RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)
self.TRAIN_ROIS_PER_IMAGE = 32
self.STEPS_PER_EPOCH = config_dict['steps_per_epoch']
self.VALIDATION_STEPS = 20
self.LEARNING_RATE = 0.001
# 获取模型对象
def get_model(model_dirPath, config_dict):
model = modellib.MaskRCNN(mode="inference",
config=InferenceConfig(config_dict),
model_dir=model_dirPath)
weights_path = model.find_last()
print('模型加载权重文件,权重文件的路径:%s' %weights_path)
model.load_weights(weights_path, by_name=True)
return model
# 定义检测函数,并将检测结果用matplotlib库绘制出来
def detect_image(model, imageFilePath, config_dict):
image_ndarray = cv2.imread(imageFilePath)[:, :, ::-1]
results = model.detect([image_ndarray], verbose=0)
r = results[0]
className_list = ['BG'] + config_dict['className_list']
visualize.display_instances(image_ndarray, r['rois'], r['masks'], r['class_ids'],
className_list, r['scores'], figsize=(8, 8))
# 解析调用代码文件时传入的参数
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model_dirPath',
type=str,
help='模型权重文件夹路径',
default='../download_resources/logs')
parser.add_argument('-i', '--image_dirPath',
type=str,
help='图片文件夹路径',
default='../download_resources/n01440764')
parser.add_argument('--image_suffix',
type=str,
help='图片文件的后缀',
default='.jpg')
parser.add_argument('-c', '--config',
type=str,
help='模型配置json文件路径',
default='../resources/model_config.json')
argument_namespace = parser.parse_args()
return argument_namespace
if __name__ == '__main__':
# 解析传入的参数
argument_namespace = parse_args()
model_dirPath = argument_namespace.model_dirPath.strip()
image_dirPath = argument_namespace.image_dirPath.strip()
image_suffix = argument_namespace.image_suffix.strip()
config_jsonFilePath = argument_namespace.config.strip()
# 获取模型配置字典,并实例化模型对象
config_dict = get_jsonDict(config_jsonFilePath)
model = get_model(model_dirPath, config_dict)
# 获取图片文件路径
imageFilePath_list = get_filePathList(image_dirPath, image_suffix)
assert len(imageFilePath_list), 'no image in image directory path, please check your input parameters: image_dirPath , image_suffix'
imageFilePath = np.random.choice(imageFilePath_list, 1)[0]
print('对此文件路径的图片做检测:%s'%imageFilePath)
# 对单张图片做检测
detect_image(model, imageFilePath, config_dict)
```
#### File: 005_模型部署_在线预测/code/_01_image_server.py
```python
# -*- coding: utf-8 -*-
# 导入常用的库
import time
import os
import cv2
import numpy as np
# 导入flask库的Flask类和request对象
from flask import request, Flask
app = Flask(__name__)
# 导入pickle,加载图像数据处理减去的像素均值pixel_mean
import pickle
with open('../resources/pixel_mean.pickle', 'rb') as file:
pixel_mean = pickle.load(file)
# 定义字典id2name_dict,把种类索引转换为种类名称
className_list = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
id2name_dict = {a:b for a, b in enumerate(className_list)}
# 加载图像分类模型ResNet56
from keras.models import load_model
from keras.optimizers import Adam
model_filePath = '../resources/cifar10_ResNet56v2_model.162.h5'
model = load_model(model_filePath)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.001),
metrics=['accuracy'])
# 根据图片文件路径获取图像数据矩阵
def get_imageNdarray(imageFilePath):
image_ndarray = cv2.imread(imageFilePath)
resized_image_ndarray = cv2.resize(image_ndarray,
(32, 32),
interpolation=cv2.INTER_AREA)
return resized_image_ndarray
# 模型预测前必要的图像处理
def process_imageNdarray(image_ndarray, pixel_mean):
rgb_image_ndarray = image_ndarray[:, :, ::-1]
image_ndarray_1 = rgb_image_ndarray / 255
image_ndarray_2 = image_ndarray_1 - pixel_mean
return image_ndarray_2
# 使用模型对指定图片文件路径完成图像分类,返回值为预测的种类名称
def predict_image(model, imageFilePath, id2name_dict):
image_ndarray = get_imageNdarray(imageFilePath)
processed_image_ndarray = process_imageNdarray(image_ndarray, pixel_mean)
inputs = processed_image_ndarray[np.newaxis, ...]
predict_Y = model.predict(inputs)[0]
predict_y = np.argmax(predict_Y)
predict_className = id2name_dict[predict_y]
print('对此图片路径 %s 的预测结果为 %s' %(imageFilePath, predict_className))
return predict_className
# 定义回调函数,接收来自/的post请求,并返回预测结果
@app.route("/", methods=['POST'])
def anyname_you_like():
startTime = time.time()
received_file = request.files['file']
imageFileName = received_file.filename
if received_file:
received_dirPath = '../resources/received_images'
if not os.path.isdir(received_dirPath):
os.makedirs(received_dirPath)
imageFilePath = os.path.join(received_dirPath, imageFileName)
received_file.save(imageFilePath)
print('图片文件保存到此路径:%s' % imageFilePath)
usedTime = time.time() - startTime
print('接收图片并保存,总共耗时%.2f秒' % usedTime)
startTime = time.time()
predict_className = predict_image(model, imageFilePath, id2name_dict)
usedTime = time.time() - startTime
print('完成对接收图片的分类预测,总共耗时%.2f秒' % usedTime)
return predict_className
else:
return 'failed'
# 主函数
if __name__ == "__main__":
print('在开启服务前,先测试predict_image函数')
imageFilePath = '../resources/images/001.jpg'
predict_className = predict_image(model, imageFilePath, id2name_dict)
app.run("127.0.0.1", port=5000)
```
#### File: 005_模型部署_在线预测/code/_11_yolov3_server_3.py
```python
from _06_yolov3 import Detector
# 导入常用的库
import numpy as np
import time
import os
from PIL import Image
# 导入flask库
from flask import Flask, render_template, request, jsonify
# 加载把图片文件转换为字符串的base64库
import base64
from yolo3.utils import letterbox_image
# 导入代码文件_12_yolov3_client_3.py中的画图方法
from _12_yolov3_client_3 import get_drawedImage
# 实例化Flask服务对象,赋值给变量server
server = Flask(__name__)
# 设置开启web服务后,如果更新html文件,可以使更新立即生效
server.jinja_env.auto_reload = True
server.config['TEMPLATES_AUTO_RELOAD'] = True
# 实例化检测器对象
detector = Detector(
weights_h5FilePath='../resources/yolov3/yolov3_weights.h5',
anchor_txtFilePath='../resources/yolov3/yolov3_anchors.txt',
category_txtFilePath='../resources/yolov3/coco.names'
)
# 获取当前时间表示的字符串的小数部分,精确到0.1毫秒
def get_secondFloat(timestamp):
secondFloat = ('%.4f' %(timestamp%1))[1:]
return secondFloat
# 获取当前时间表示的字符串,精确到0.1毫秒
def get_timeString():
now_timestamp = time.time()
now_structTime = time.localtime(now_timestamp)
timeString_pattern = '%Y%m%d_%H%M%S'
now_timeString_1 = time.strftime(timeString_pattern, now_structTime)
now_timeString_2 = get_secondFloat(now_timestamp)
now_timeString = now_timeString_1 + now_timeString_2
return now_timeString
# 获取使用YOLOv3算法做目标检测的结果
def get_detectResult(image):
startTime = time.time()
boxed_image = letterbox_image(image, (416, 416))
image_data = np.array(boxed_image).astype('float') / 255
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
# 模型网络结构运算
box_ndarray, classId_ndarray, score_ndarray = detector.session.run(
[detector.boxes, detector.classes, detector.scores],
feed_dict={
detector.yolo_model.input: image_data,
detector.input_image_size: [image.size[1], image.size[0]],
}
)
box_ndarray = box_ndarray[:, [1,0,3,2]]
return box_ndarray, classId_ndarray, score_ndarray
# 获取请求中的参数字典
from urllib.parse import unquote
def get_dataDict(data):
data_dict = {}
for text in data.split('&'):
key, value = text.split('=')
value_1 = unquote(value)
data_dict[key] = value_1
return data_dict
# 网络请求'/'的回调函数
@server.route('/')
def index():
htmlFileName = '_14_yolov3_3.html'
htmlFileContent = render_template(htmlFileName)
return htmlFileContent
# 网络请求'/get_detectedResult'的回调函数
@server.route('/get_detectionResult', methods=['POST'])
def anyname_you_like():
startTime = time.time()
data_bytes = request.get_data()
data = data_bytes.decode('utf-8')
data_dict = get_dataDict(data)
if 'image_base64_string' in data_dict:
# 保存接收的图片到指定文件夹
received_dirPath = '../resources/received_images'
if not os.path.isdir(received_dirPath):
os.makedirs(received_dirPath)
timeString = get_timeString()
imageFileName = timeString + '.jpg'
imageFilePath = os.path.join(received_dirPath, imageFileName)
try:
image_base64_string = data_dict['image_base64_string']
image_base64_bytes = image_base64_string.encode('utf-8')
image_bytes = base64.b64decode(image_base64_bytes)
with open(imageFilePath, 'wb') as file:
file.write(image_bytes)
print('接收图片文件保存到此路径:%s' %imageFilePath)
usedTime = time.time() - startTime
print('接收图片并保存,总共耗时%.2f秒' %usedTime)
# 通过图片路径读取图像数据,并对图像数据做目标检测
startTime = time.time()
image = Image.open(imageFilePath)
box_ndarray, classId_ndarray, score_ndarray = get_detectResult(image)
usedTime = time.time() - startTime
print('打开接收的图片文件并做目标检测,总共耗时%.2f秒\n' %usedTime)
# 把目标检测结果图保存在服务端指定路径
drawed_image = get_drawedImage(image, box_ndarray, classId_ndarray, score_ndarray)
drawed_imageFileName = 'drawed_' + imageFileName
drawed_imageFilePath = os.path.join(received_dirPath, drawed_imageFileName)
drawed_image.save(drawed_imageFilePath)
# 把目标检测结果转化为json格式的字符串
json_dict = {
'box_list' : box_ndarray.astype('int').tolist(),
'classId_list' : classId_ndarray.tolist(),
'score_list' : score_ndarray.tolist()
}
return jsonify(**json_dict)
except Exception as e:
print(e)
if __name__ == '__main__':
server.run('127.0.0.1', port=5000)
```
#### File: 005_模型部署_在线预测/code/_12_yolov3_client_3.py
```python
import os
import numpy as np
import cv2
from PIL import Image
# 导入发起网络请求的库requests
import requests
# 导入加密图片文件为base64数据的库base64
import base64
# 根据图片文件路径获取base64编码后内容
def get_imageBase64String(imageFilePath):
assert os.path.exists(imageFilePath), "此图片路径不存在: %" %imageFilePath
with open(imageFilePath, 'rb') as file:
image_bytes = file.read()
image_base64_bytes = base64.b64encode(image_bytes)
image_base64_string = image_base64_bytes.decode('utf-8')
return image_base64_string
# 使用cv2库显示图片
def cv2_display(image_ndarray):
windowName = "object_detection_result"
# cv2设置窗口可以变化
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
cv2.imshow(windowName, image_ndarray)
while True:
pressKey = cv2.waitKey()
# 按Esc键或者q键可以关闭显示窗口
if 27 == pressKey or ord('q') == pressKey:
cv2.destroyAllWindows()
break
# 根据图片文件路径获取图像数据,图像缩小后,保存到新图片文件,返回新图片文件路径
import math
def resize_image(imageFilePath, max_height=416, max_width=416):
image_ndarray = cv2.imread(imageFilePath)
old_height, old_width, _ = image_ndarray.shape
if old_width > max_width or old_height > max_height:
if old_width / old_height >= max_width / max_height:
new_width = max_width
resized_multiple = new_width / old_width
new_height = math.ceil(old_height * resized_multiple)
else:
new_height = max_height
resized_multiple = new_height / old_height
new_width = math.ceil(old_width * resized_multiple)
else:
resized_multiple = 1
new_width = old_width
new_height = old_height
resized_image_ndarray = cv2.resize(
image_ndarray,
(new_width, new_height),
)
image_dirPath, imageFileName = os.path.split(imageFilePath)
resized_imageFileName = 'resized_' + imageFileName
resized_imageFilePath = os.path.join(image_dirPath, resized_imageFileName)
cv2.imwrite(resized_imageFilePath, resized_image_ndarray)
return resized_imageFilePath, resized_multiple
# 通过种类的数量,每个种类对应的颜色,颜色变量color为rgb这3个数值组成的元祖
import colorsys
def get_colorList(category_quantity):
hsv_list = []
for i in range(category_quantity):
hue = i / category_quantity
saturation = 1
value = 1
hsv = (hue, saturation, value)
hsv_list.append(hsv)
colorFloat_list = [colorsys.hsv_to_rgb(*k) for k in hsv_list]
color_list = [tuple([int(x * 255) for x in k]) for k in colorFloat_list]
return color_list
# 从文本文件中解析出物体种类列表category_list,要求每个种类占一行
def get_categoryList(category_txtFilePath):
with open(category_txtFilePath, 'r', encoding='utf8') as file:
fileContent = file.read()
line_list = [k.strip() for k in fileContent.split('\n') if k.strip()!='']
category_list = line_list
return category_list
# 获取绘制检测效果之后的图片
from PIL import Image, ImageDraw, ImageFont
def get_drawedImage(image, box_list,
classId_list, score_list, category_list=None,
show_bbox=True, show_class=True,
show_score=True, show_instanceQuantity=True):
if category_list == None:
category_txtFilePath = '../resources/yolov3/coco.names'
category_list = get_categoryList(category_txtFilePath)
# 复制原图像数据,赋值给表示绘画图像数据的变量drawed_image,这样可以避免修改原图像数据
drawed_image = image.copy()
# 获取图像的宽、高
image_width, image_height = image.size
# 获取实例的数量
box_ndarray = np.array(box_list).astype('int')
instance_quantity = box_ndarray.shape[0]
# 生成随机的颜色
category_quantity = len(category_list)
color_list = get_colorList(category_quantity)
# 循环遍历每个实例
for index in range(len(box_list)):
classId = classId_list[index]
className = category_list[classId]
color = color_list[classId]
x1, y1, x2, y2 = box_ndarray[index]
# 增强绘图功能的健壮性
x1 = max(0, x1)
y1 = max(0, y1)
x2 = min(image_width, x2)
y2 = min(image_height, y2)
# 方框的左上角坐标、右上角坐标
box_leftTop = x1, y1
box_rightBottom = x2, y2
# 绘制矩形,即检测出的边界框
if show_bbox:
drawed_image_ndarray = np.array(drawed_image)
thickness = max(1, (image_width + image_height) // 300)
cv2.rectangle(drawed_image_ndarray, box_leftTop, box_rightBottom, color, thickness)
drawed_image = Image.fromarray(drawed_image_ndarray)
# 绘制文字,文字内容为
if show_class:
# 实例化图像画图对象、图像字体对象
imageDraw = ImageDraw.Draw(drawed_image)
fontSize = max(1, int(0.02 * image_height + 0.5))
imageFont = ImageFont.truetype(
font='../resources/yolov3/FiraMono-Medium.otf',
size= fontSize
)
# 定义文本区域显示的内容
if show_score:
score = score_list[index]
text = '%s %.2f' %(className, score)
else:
text = "%s" %className
# 根据字体种类和文字内容动态调整绘制
textRegion_size = imageDraw.textsize(text, imageFont)
if y1 < 10:
textRegion_leftTop = (x1, y1)
textRegion_rightBottom = (x1 + textRegion_size[0], y1 + textRegion_size[1])
else:
textRegion_leftTop = (x1, y1 - textRegion_size[1])
textRegion_rightBottom = (x1 + textRegion_size[0], y1)
# 绘制与边界框颜色相同的文字背景
imageDraw.rectangle(
[textRegion_leftTop, textRegion_rightBottom],
fill=color
)
# 绘制表示种类名称、置信概率的文字
imageDraw.text(textRegion_leftTop, text, fill=(0, 0, 0), font=imageFont)
del imageDraw
# 绘制文字,文字内容为图片中总共检测出多少个实例物体
if show_instanceQuantity:
imageDraw = ImageDraw.Draw(drawed_image)
text = '此图片中总共检测出的物体数量:%02d' %instance_quantity
fontSize = max(1, int(0.05 * image_height + 0.5))
font = ImageFont.truetype('C:/Windows/Font/STLITI.TTF', fontSize, encoding='utf-8')
textRegion_leftTop = (3, 3)
textColor = (34, 139, 34)
imageDraw.text(textRegion_leftTop, text, textColor, font=font)
return drawed_image
# 主函数
if __name__ == '__main__':
url = "http://127.0.0.1:5000/get_detectionResult"
while True:
input_content = input('输入图片路径,输入-1退出,默认值(../resources/images/person.jpg): ')
if input_content.strip() == "":
input_content = '../resources/images/person.jpg'
if input_content.strip() == "-1":
break
elif not os.path.exists(input_content.strip()):
print('输入图片路径不正确,请重新输入')
else:
imageFilePath = input_content.strip()
resized_imageFilePath, resized_multiple = resize_image(imageFilePath)
image_base64_string = get_imageBase64String(resized_imageFilePath)
data_dict = {'image_base64_string' : image_base64_string}
# 调用request.post方法发起post请求,并接收返回结果
response = requests.post(url, data=data_dict)
# 处理返回的json格式数据,准备好传入get_drawedImageNdarray函数的参数
responseJson_dict = response.json()
image = Image.open(imageFilePath)
box_ndarray = np.array(responseJson_dict['box_list']) / resized_multiple
box_list = box_ndarray.astype('int').tolist()
classId_list = responseJson_dict['classId_list']
score_list = responseJson_dict['score_list']
# 根据目标检测结果获取画框图像数据
drawed_image = get_drawedImage(
image,
box_list,
classId_list,
score_list
)
rgb_image_ndarray = np.array(drawed_image)
bgr_image_ndarray = rgb_image_ndarray[..., ::-1]
cv2_display(bgr_image_ndarray)
``` |
{
"source": "jiajunhua/tolstikhin-adagan",
"score": 2
} |
#### File: jiajunhua/tolstikhin-adagan/pot.py
```python
import collections
import logging
import os
import time
import tensorflow as tf
import utils
from utils import ProgressBar
from utils import TQDM
import numpy as np
import ops
from metrics import Metrics
slim = tf.contrib.slim
def vgg_16(inputs,
is_training=False,
dropout_keep_prob=0.5,
scope='vgg_16',
fc_conv_padding='VALID', reuse=None):
inputs = inputs * 255.0
inputs -= tf.constant([123.68, 116.779, 103.939], dtype=tf.float32)
with tf.variable_scope(scope, 'vgg_16', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
end_points = {}
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
end_points['pool0'] = inputs
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
end_points['pool1'] = net
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
end_points['pool2'] = net
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
end_points['pool3'] = net
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
end_points['pool4'] = net
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
end_points['pool5'] = net
# # Use conv2d instead of fully_connected layers.
# net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout6')
# net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
# net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
# scope='dropout7')
# net = slim.conv2d(net, num_classes, [1, 1],
# activation_fn=None,
# normalizer_fn=None,
# scope='fc8')
# Convert end_points_collection into a end_point dict.
# end_points = slim.utils.convert_collection_to_dict(end_points_collection)
return net, end_points
def compute_moments(_inputs, moments=[2, 3]):
"""From an image input, compute moments"""
_inputs_sq = tf.square(_inputs)
_inputs_cube = tf.pow(_inputs, 3)
height = int(_inputs.get_shape()[1])
width = int(_inputs.get_shape()[2])
channels = int(_inputs.get_shape()[3])
def ConvFlatten(x, kernel_size):
# w_sum = tf.ones([kernel_size, kernel_size, channels, 1]) / (kernel_size * kernel_size * channels)
w_sum = tf.eye(num_rows=channels, num_columns=channels, batch_shape=[kernel_size * kernel_size])
w_sum = tf.reshape(w_sum, [kernel_size, kernel_size, channels, channels])
w_sum = w_sum / (kernel_size * kernel_size)
sum_ = tf.nn.conv2d(x, w_sum, strides=[1, 1, 1, 1], padding='VALID')
size = prod_dim(sum_)
assert size == (height - kernel_size + 1) * (width - kernel_size + 1) * channels, size
return tf.reshape(sum_, [-1, size])
outputs = []
for size in [3, 4, 5]:
mean = ConvFlatten(_inputs, size)
square = ConvFlatten(_inputs_sq, size)
var = square - tf.square(mean)
if 2 in moments:
outputs.append(var)
if 3 in moments:
cube = ConvFlatten(_inputs_cube, size)
skewness = cube - 3.0 * mean * var - tf.pow(mean, 3) # Unnormalized
outputs.append(skewness)
return tf.concat(outputs, 1)
def prod_dim(tensor):
return np.prod([int(d) for d in tensor.get_shape()[1:]])
def flatten(tensor):
return tf.reshape(tensor, [-1, prod_dim(tensor)])
class Pot(object):
"""A base class for running individual POTs.
"""
def __init__(self, opts, data, weights):
# Create a new session with session.graph = default graph
self._session = tf.Session()
self._trained = False
self._data = data
self._data_weights = np.copy(weights)
# Latent noise sampled ones to apply decoder while training
self._noise_for_plots = opts['pot_pz_std'] * utils.generate_noise(opts, 1000)
# Placeholders
self._real_points_ph = None
self._noise_ph = None
# Init ops
self._additional_init_ops = []
self._init_feed_dict = {}
# Main operations
# Optimizers
with self._session.as_default(), self._session.graph.as_default():
logging.error('Building the graph...')
self._build_model_internal(opts)
# Make sure AdamOptimizer, if used in the Graph, is defined before
# calling global_variables_initializer().
init = tf.global_variables_initializer()
self._session.run(init)
self._session.run(self._additional_init_ops, self._init_feed_dict)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# Cleaning the whole default Graph
logging.error('Cleaning the graph...')
tf.reset_default_graph()
logging.error('Closing the session...')
# Finishing the session
self._session.close()
def train(self, opts):
"""Train a POT model.
"""
with self._session.as_default(), self._session.graph.as_default():
self._train_internal(opts)
self._trained = True
def sample(self, opts, num=100):
"""Sample points from the trained POT model.
"""
assert self._trained, 'Can not sample from the un-trained POT'
with self._session.as_default(), self._session.graph.as_default():
return self._sample_internal(opts, num)
def train_mixture_discriminator(self, opts, fake_images):
"""Train classifier separating true data from points in fake_images.
Return:
prob_real: probabilities of the points from training data being the
real points according to the trained mixture classifier.
Numpy vector of shape (self._data.num_points,)
prob_fake: probabilities of the points from fake_images being the
real points according to the trained mixture classifier.
Numpy vector of shape (len(fake_images),)
"""
with self._session.as_default(), self._session.graph.as_default():
return self._train_mixture_discriminator_internal(opts, fake_images)
def _run_batch(self, opts, operation, placeholder, feed,
placeholder2=None, feed2=None):
"""Wrapper around session.run to process huge data.
It is asumed that (a) first dimension of placeholder enumerates
separate points, and (b) that operation is independently applied
to every point, i.e. we can split it point-wisely and then merge
the results. The second placeholder is meant either for is_train
flag for batch-norm or probabilities of dropout.
TODO: write util function which will be called both from this method
and MNIST classification evaluation as well.
"""
assert len(feed.shape) > 0, 'Empry feed.'
num_points = feed.shape[0]
batch_size = opts['tf_run_batch_size']
batches_num = int(np.ceil((num_points + 0.) / batch_size))
result = []
for idx in xrange(batches_num):
if idx == batches_num - 1:
if feed2 is None:
res = self._session.run(
operation,
feed_dict={placeholder: feed[idx * batch_size:]})
else:
res = self._session.run(
operation,
feed_dict={placeholder: feed[idx * batch_size:],
placeholder2: feed2})
else:
if feed2 is None:
res = self._session.run(
operation,
feed_dict={placeholder: feed[idx * batch_size:
(idx + 1) * batch_size]})
else:
res = self._session.run(
operation,
feed_dict={placeholder: feed[idx * batch_size:
(idx + 1) * batch_size],
placeholder2: feed2})
if len(res.shape) == 1:
# convert (n,) vector to (n,1) array
res = np.reshape(res, [-1, 1])
result.append(res)
result = np.vstack(result)
assert len(result) == num_points
return result
def _build_model_internal(self, opts):
"""Build a TensorFlow graph with all the necessary ops.
"""
assert False, 'POT base class has no build_model method defined.'
def _train_internal(self, opts):
assert False, 'POT base class has no train method defined.'
def _sample_internal(self, opts, num):
assert False, 'POT base class has no sample method defined.'
def _train_mixture_discriminator_internal(self, opts, fake_images):
assert False, 'POT base class has no mixture discriminator method defined.'
class ImagePot(Pot):
"""A simple POT implementation, suitable for pictures.
"""
def __init__(self, opts, data, weights):
# One more placeholder for batch norm
self._is_training_ph = None
Pot.__init__(self, opts, data, weights)
def dcgan_like_arch(self, opts, noise, is_training, reuse, keep_prob):
output_shape = self._data.data_shape
num_units = opts['g_num_filters']
batch_size = tf.shape(noise)[0]
num_layers = opts['g_num_layers']
if opts['g_arch'] == 'dcgan':
height = output_shape[0] / 2**num_layers
width = output_shape[1] / 2**num_layers
elif opts['g_arch'] == 'dcgan_mod':
height = output_shape[0] / 2**(num_layers-1)
width = output_shape[1] / 2**(num_layers-1)
else:
assert False
h0 = ops.linear(
opts, noise, num_units * height * width, scope='h0_lin')
h0 = tf.reshape(h0, [-1, height, width, num_units])
h0 = tf.nn.relu(h0)
layer_x = h0
for i in xrange(num_layers-1):
scale = 2**(i+1)
if opts['g_stride1_deconv']:
# Sylvain, I'm worried about this part!
_out_shape = [batch_size, height * scale / 2,
width * scale / 2, num_units / scale * 2]
layer_x = ops.deconv2d(
opts, layer_x, _out_shape, d_h=1, d_w=1,
scope='h%d_deconv_1x1' % i)
layer_x = tf.nn.relu(layer_x)
_out_shape = [batch_size, height * scale, width * scale, num_units / scale]
layer_x = ops.deconv2d(opts, layer_x, _out_shape, scope='h%d_deconv' % i)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = tf.nn.relu(layer_x)
if opts['dropout']:
_keep_prob = tf.minimum(
1., 0.9 - (0.9 - keep_prob) * float(i + 1) / (num_layers - 1))
layer_x = tf.nn.dropout(layer_x, _keep_prob)
_out_shape = [batch_size] + list(output_shape)
if opts['g_arch'] == 'dcgan':
last_h = ops.deconv2d(
opts, layer_x, _out_shape, scope='hlast_deconv')
elif opts['g_arch'] == 'dcgan_mod':
last_h = ops.deconv2d(
opts, layer_x, _out_shape, d_h=1, d_w=1, scope='hlast_deconv')
else:
assert False
if opts['input_normalize_sym']:
return tf.nn.tanh(last_h)
else:
return tf.nn.sigmoid(last_h)
def began_dec(self, opts, noise, is_training, reuse, keep_prob):
""" Architecture reported here: https://arxiv.org/pdf/1703.10717.pdf
"""
output_shape = self._data.data_shape
num_units = opts['g_num_filters']
num_layers = opts['g_num_layers']
batch_size = tf.shape(noise)[0]
h0 = ops.linear(
opts, noise, num_units * 8 * 8, scope='h0_lin')
h0 = tf.reshape(h0, [-1, 8, 8, num_units])
layer_x = h0
for i in xrange(num_layers):
if i % 3 < 2:
# Don't change resolution
layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1, scope='h%d_conv' % i)
layer_x = tf.nn.elu(layer_x)
else:
if i != num_layers - 1:
# Upsampling by factor of 2 with NN
scale = 2 ** (i / 3 + 1)
layer_x = ops.upsample_nn(layer_x, [scale * 8, scale * 8],
scope='h%d_upsample' % i, reuse=reuse)
# Skip connection
append = ops.upsample_nn(h0, [scale * 8, scale * 8],
scope='h%d_skipup' % i, reuse=reuse)
layer_x = tf.concat([layer_x, append], axis=3)
last_h = ops.conv2d(opts, layer_x, output_shape[-1], d_h=1, d_w=1, scope='hlast_conv')
if opts['input_normalize_sym']:
return tf.nn.tanh(last_h)
else:
return tf.nn.sigmoid(last_h)
def conv_up_res(self, opts, noise, is_training, reuse, keep_prob):
output_shape = self._data.data_shape
num_units = opts['g_num_filters']
batch_size = tf.shape(noise)[0]
num_layers = opts['g_num_layers']
data_height = output_shape[0]
data_width = output_shape[1]
data_channels = output_shape[2]
height = data_height / 2**num_layers
width = data_width / 2**num_layers
h0 = ops.linear(
opts, noise, num_units * height * width, scope='h0_lin')
h0 = tf.reshape(h0, [-1, height, width, num_units])
h0 = tf.nn.relu(h0)
layer_x = h0
for i in xrange(num_layers-1):
layer_x = tf.image.resize_nearest_neighbor(layer_x, (2 * height, 2 * width))
layer_x = ops.conv2d(opts, layer_x, num_units / 2, d_h=1, d_w=1, scope='conv2d_%d' % i)
height *= 2
width *= 2
num_units /= 2
if opts['g_3x3_conv'] > 0:
before = layer_x
for j in range(opts['g_3x3_conv']):
layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1,
scope='conv2d_3x3_%d_%d' % (i, j),
conv_filters_dim=3)
layer_x = tf.nn.relu(layer_x)
layer_x += before # Residual connection.
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = tf.nn.relu(layer_x)
if opts['dropout']:
_keep_prob = tf.minimum(
1., 0.9 - (0.9 - keep_prob) * float(i + 1) / (num_layers - 1))
layer_x = tf.nn.dropout(layer_x, _keep_prob)
layer_x = tf.image.resize_nearest_neighbor(layer_x, (2 * height, 2 * width))
layer_x = ops.conv2d(opts, layer_x, data_channels, d_h=1, d_w=1, scope='last_conv2d_%d' % i)
if opts['input_normalize_sym']:
return tf.nn.tanh(layer_x)
else:
return tf.nn.sigmoid(layer_x)
def ali_deconv(self, opts, noise, is_training, reuse, keep_prob):
output_shape = self._data.data_shape
batch_size = tf.shape(noise)[0]
noise_size = int(noise.get_shape()[1])
data_height = output_shape[0]
data_width = output_shape[1]
data_channels = output_shape[2]
noise = tf.reshape(noise, [-1, 1, 1, noise_size])
num_units = opts['g_num_filters']
layer_params = []
layer_params.append([4, 1, num_units])
layer_params.append([4, 2, num_units / 2])
layer_params.append([4, 1, num_units / 4])
layer_params.append([4, 2, num_units / 8])
layer_params.append([5, 1, num_units / 8])
# For convolution: (n - k) / stride + 1 = s
# For transposed: (s - 1) * stride + k = n
layer_x = noise
height = 1
width = 1
for i, (kernel, stride, channels) in enumerate(layer_params):
height = (height - 1) * stride + kernel
width = height
layer_x = ops.deconv2d(
opts, layer_x, [batch_size, height, width, channels], d_h=stride, d_w=stride,
scope='h%d_deconv' % i, conv_filters_dim=kernel, padding='VALID')
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = ops.lrelu(layer_x, 0.1)
assert height == data_height
assert width == data_width
# Then two 1x1 convolutions.
layer_x = ops.conv2d(opts, layer_x, num_units / 8, d_h=1, d_w=1, scope='conv2d_1x1', conv_filters_dim=1)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bnlast')
layer_x = ops.lrelu(layer_x, 0.1)
layer_x = ops.conv2d(opts, layer_x, data_channels, d_h=1, d_w=1, scope='conv2d_1x1_2', conv_filters_dim=1)
if opts['input_normalize_sym']:
return tf.nn.tanh(layer_x)
else:
return tf.nn.sigmoid(layer_x)
def generator(self, opts, noise, is_training=False, reuse=False, keep_prob=1.):
""" Decoder actually.
"""
output_shape = self._data.data_shape
num_units = opts['g_num_filters']
with tf.variable_scope("GENERATOR", reuse=reuse):
# if not opts['convolutions']:
if opts['g_arch'] == 'mlp':
layer_x = noise
for i in range(opts['g_num_layers']):
layer_x = ops.linear(opts, layer_x, num_units, 'h%d_lin' % i)
layer_x = tf.nn.relu(layer_x)
if opts['batch_norm']:
layer_x = ops.batch_norm(
opts, layer_x, is_training, reuse, scope='bn%d' % i)
out = ops.linear(opts, layer_x, np.prod(output_shape), 'h%d_lin' % (i + 1))
out = tf.reshape(out, [-1] + list(output_shape))
if opts['input_normalize_sym']:
return tf.nn.tanh(out)
else:
return tf.nn.sigmoid(out)
elif opts['g_arch'] in ['dcgan', 'dcgan_mod']:
return self.dcgan_like_arch(opts, noise, is_training, reuse, keep_prob)
elif opts['g_arch'] == 'conv_up_res':
return self.conv_up_res(opts, noise, is_training, reuse, keep_prob)
elif opts['g_arch'] == 'ali':
return self.ali_deconv(opts, noise, is_training, reuse, keep_prob)
elif opts['g_arch'] == 'began':
return self.began_dec(opts, noise, is_training, reuse, keep_prob)
else:
raise ValueError('%s unknown' % opts['g_arch'])
def discriminator(self, opts, input_, prefix='DISCRIMINATOR', reuse=False):
"""Discriminator for the GAN objective
"""
num_units = opts['d_num_filters']
num_layers = opts['d_num_layers']
nowozin_trick = opts['gan_p_trick']
# No convolutions as GAN happens in the latent space
with tf.variable_scope(prefix, reuse=reuse):
hi = input_
for i in range(num_layers):
hi = ops.linear(opts, hi, num_units, scope='h%d_lin' % (i+1))
hi = tf.nn.relu(hi)
hi = ops.linear(opts, hi, 1, scope='final_lin')
if nowozin_trick:
# We are doing GAN between our model Qz and the true Pz.
# We know analytical form of the true Pz.
# The optimal discriminator for D_JS(Pz, Qz) is given by:
# Dopt(x) = log dPz(x) - log dQz(x)
# And we know exactly dPz(x). So add log dPz(x) explicitly
# to the discriminator and let it learn only the remaining
# dQz(x) term. This appeared in the AVB paper.
assert opts['latent_space_distr'] == 'normal'
sigma2_p = float(opts['pot_pz_std']) ** 2
normsq = tf.reduce_sum(tf.square(input_), 1)
hi = hi - normsq / 2. / sigma2_p \
- 0.5 * tf.log(2. * np.pi) \
- 0.5 * opts['latent_space_dim'] * np.log(sigma2_p)
return hi
def pz_sampler(self, opts, input_, prefix='PZ_SAMPLER', reuse=False):
"""Transformation to be applied to the sample from Pz
We are trying to match Qz to phi(Pz), where phi is defined by
this function
"""
dim = opts['latent_space_dim']
with tf.variable_scope(prefix, reuse=reuse):
matrix = tf.get_variable(
"W", [dim, dim], tf.float32,
tf.constant_initializer(np.identity(dim)))
bias = tf.get_variable(
"b", [dim],
initializer=tf.constant_initializer(0.))
return tf.matmul(input_, matrix) + bias
def get_batch_size(self, opts, input_):
return tf.cast(tf.shape(input_)[0], tf.float32)# opts['batch_size']
def moments_stats(self, opts, input_):
"""
Compute estimates of the first 4 moments of the coordinates
based on the sample in input_. Compare them to the desired
population values and return a corresponding loss.
"""
input_ = input_ / float(opts['pot_pz_std'])
# If Pz = Qz then input_ should now come from
# a product of pz_dim Gaussians N(0, 1)
# Thus first moments should be 0
p1 = tf.reduce_mean(input_, 0)
center_inp = input_ - p1 # Broadcasting
# Second centered and normalized moments should be 1
p2 = tf.sqrt(1e-5 + tf.reduce_mean(tf.square(center_inp), 0))
normed_inp = center_inp / p2
# Third central moment should be 0
# p3 = tf.pow(1e-5 + tf.abs(tf.reduce_mean(tf.pow(center_inp, 3), 0)), 1.0 / 3.0)
p3 = tf.abs(tf.reduce_mean(tf.pow(center_inp, 3), 0))
# 4th central moment of any uni-variate Gaussian = 3 * sigma^4
# p4 = tf.pow(1e-5 + tf.reduce_mean(tf.pow(center_inp, 4), 0) / 3.0, 1.0 / 4.0)
p4 = tf.reduce_mean(tf.pow(center_inp, 4), 0) / 3.
def zero_t(v):
return tf.sqrt(1e-5 + tf.reduce_mean(tf.square(v)))
def one_t(v):
# The function below takes its minimum value 1. at v = 1.
return tf.sqrt(1e-5 + tf.reduce_mean(tf.maximum(tf.square(v), 1.0 / (1e-5 + tf.square(v)))))
return tf.stack([zero_t(p1), one_t(p2), zero_t(p3), one_t(p4)])
def discriminator_test(self, opts, input_):
"""Deterministic discriminator using simple tests."""
if opts['z_test'] == 'cramer':
test_v = self.discriminator_cramer_test(opts, input_)
elif opts['z_test'] == 'anderson':
test_v = self.discriminator_anderson_test(opts, input_)
elif opts['z_test'] == 'moments':
test_v = tf.reduce_mean(self.moments_stats(opts, input_)) / 10.0
elif opts['z_test'] == 'lks':
test_v = self.discriminator_lks_test(opts, input_)
else:
raise ValueError('%s Unknown' % opts['z_test'])
return test_v
def discriminator_cramer_test(self, opts, input_):
"""Deterministic discriminator using Cramer von Mises Test.
"""
add_dim = opts['z_test_proj_dim']
if add_dim > 0:
dim = int(input_.get_shape()[1])
proj = np.random.rand(dim, add_dim)
proj = proj - np.mean(proj, 0)
norms = np.sqrt(np.sum(np.square(proj), 0) + 1e-5)
proj = tf.constant(proj / norms, dtype=tf.float32)
projected_x = tf.matmul(input_, proj) # Shape [batch_size, add_dim].
# Shape [batch_size, z_dim+add_dim]
all_dims_x = tf.concat([input_, projected_x], 1)
else:
all_dims_x = input_
# top_k can only sort on the last dimension and we want to sort the
# first one (batch_size).
batch_size = self.get_batch_size(opts, all_dims_x)
transposed = tf.transpose(all_dims_x, perm=[1, 0])
values, indices = tf.nn.top_k(transposed, k=tf.cast(batch_size, tf.int32))
values = tf.reverse(values, [1])
#values = tf.Print(values, [values], "sorted values")
normal_dist = tf.contrib.distributions.Normal(0., float(opts['pot_pz_std']))
#
normal_cdf = normal_dist.cdf(values)
#normal_cdf = tf.Print(normal_cdf, [normal_cdf], "normal_cdf")
expected = (2 * tf.range(1, batch_size+1, 1, dtype="float") - 1) / (2.0 * batch_size)
#expected = tf.Print(expected, [expected], "expected")
# We don't use the constant.
# constant = 1.0 / (12.0 * batch_size * batch_size)
# stat = constant + tf.reduce_sum(tf.square(expected - normal_cdf), 1) / batch_size
stat = tf.reduce_sum(tf.square(expected - normal_cdf), 1) / batch_size
stat = tf.reduce_mean(stat)
#stat = tf.Print(stat, [stat], "stat")
return stat
def discriminator_anderson_test(self, opts, input_):
"""Deterministic discriminator using the <NAME> test.
"""
# A-D test says to normalize data before computing the statistic
# Because true mean and variance are known, we are supposed to use
# the population parameters for that, but wiki says it's better to
# still use the sample estimates while normalizing
means = tf.reduce_mean(input_, 0)
input_ = input_ - means # Broadcasting
stds = tf.sqrt(1e-5 + tf.reduce_mean(tf.square(input_), 0))
input_= input_ / stds
# top_k can only sort on the last dimension and we want to sort the
# first one (batch_size).
batch_size = self.get_batch_size(opts, input_)
transposed = tf.transpose(input_, perm=[1, 0])
values, indices = tf.nn.top_k(transposed, k=tf.cast(batch_size, tf.int32))
values = tf.reverse(values, [1])
normal_dist = tf.contrib.distributions.Normal(0., float(opts['pot_pz_std']))
normal_cdf = normal_dist.cdf(values)
# ln_normal_cdf is of shape (z_dim, batch_size)
ln_normal_cdf = tf.log(normal_cdf)
ln_one_normal_cdf = tf.log(1.0 - normal_cdf)
w1 = 2 * tf.range(1, batch_size + 1, 1, dtype="float") - 1
w2 = 2 * tf.range(batch_size - 1, -1, -1, dtype="float") + 1
stat = -batch_size - tf.reduce_sum(w1 * ln_normal_cdf + \
w2 * ln_one_normal_cdf, 1) / batch_size
# stat is of shape (z_dim)
stat = tf.reduce_mean(tf.square(stat))
return stat
def discriminator_lks_lin_test(self, opts, input_):
"""Deterministic discriminator using Kernel Stein Discrepancy test
refer to LKS test on page 3 of https://arxiv.org/pdf/1705.07673.pdf
The statistic basically reads:
\[
\frac{2}{n}\sum_{i=1}^n \left(
frac{<x_{2i}, x_{2i - 1}>}{\sigma_p^4}
+ d/\sigma_k^2
- \|x_{2i} - x_{2i - 1}\|^2\left(\frac{1}{\sigma_p^2\sigma_k^2} + \frac{1}{\sigma_k^4}\right)
\right)
\exp( - \|x_{2i} - x_{2i - 1}\|^2/2/\sigma_k^2)
\]
"""
# To check the typical sizes of the test for Pz = Qz, uncomment
# input_ = opts['pot_pz_std'] * utils.generate_noise(opts, 100000)
batch_size = self.get_batch_size(opts, input_)
batch_size = tf.cast(batch_size, tf.int32)
half_size = batch_size / 2
# s1 = tf.slice(input_, [0, 0], [half_size, -1])
# s2 = tf.slice(input_, [half_size, 0], [half_size, -1])
s1 = input_[:half_size, :]
s2 = input_[half_size:, :]
dotprods = tf.reduce_sum(tf.multiply(s1, s2), axis=1)
distances = tf.reduce_sum(tf.square(s1 - s2), axis=1)
sigma2_p = opts['pot_pz_std'] ** 2 # var = std ** 2
# Median heuristic for the sigma^2 of Gaussian kernel
# sigma2_k = tf.nn.top_k(distances, half_size).values[half_size - 1]
# Maximum heuristic for the sigma^2 of Gaussian kernel
# sigma2_k = tf.nn.top_k(distances, 1).values[0]
sigma2_k = opts['latent_space_dim'] * sigma2_p
if opts['verbose'] == 2:
sigma2_k = tf.Print(sigma2_k, [tf.nn.top_k(distances, 1).values[0]],
'Maximal squared pairwise distance:')
sigma2_k = tf.Print(sigma2_k, [tf.reduce_mean(distances)],
'Average squared pairwise distance:')
sigma2_k = tf.Print(sigma2_k, [sigma2_k], 'Kernel width:')
# sigma2_k = tf.Print(sigma2_k, [sigma2_k], 'Kernel width:')
res = dotprods / sigma2_p ** 2 \
- distances * (1. / sigma2_p / sigma2_k + 1. / sigma2_k ** 2) \
+ opts['latent_space_dim'] / sigma2_k
res = tf.multiply(res, tf.exp(- distances / 2./ sigma2_k))
stat = tf.reduce_mean(res)
return stat
def discriminator_lks_test(self, opts, input_):
"""Deterministic discriminator using Kernel Stein Discrepancy test
refer to the quadratic test of https://arxiv.org/pdf/1705.07673.pdf
The statistic basically reads:
\[
\frac{1}{n^2 - n}\sum_{i \neq j} \left(
frac{<x_i, x__j>}{\sigma_p^4}
+ d/\sigma_k^2
- \|x_i - x_j\|^2\left(\frac{1}{\sigma_p^2\sigma_k^2} + \frac{1}{\sigma_k^4}\right)
\right)
\exp( - \|x_i - x_j\|^2/2/\sigma_k^2)
\]
"""
n = self.get_batch_size(opts, input_)
n = tf.cast(n, tf.int32)
half_size = (n * n - n) / 2
nf = tf.cast(n, tf.float32)
norms = tf.reduce_sum(tf.square(input_), axis=1, keep_dims=True)
dotprods = tf.matmul(input_, input_, transpose_b=True)
distances = norms + tf.transpose(norms) - 2. * dotprods
sigma2_p = opts['pot_pz_std'] ** 2 # var = std ** 2
# Median heuristic for the sigma^2 of Gaussian kernel
# sigma2_k = tf.nn.top_k(tf.reshape(distances, [-1]), half_size).values[half_size - 1]
# Maximal heuristic for the sigma^2 of Gaussian kernel
# sigma2_k = tf.nn.top_k(tf.reshape(distances, [-1]), 1).values[0]
sigma2_k = opts['latent_space_dim'] * sigma2_p
if opts['verbose'] == 2:
sigma2_k = tf.Print(sigma2_k, [tf.nn.top_k(tf.reshape(distances, [-1]), 1).values[0]],
'Maximal squared pairwise distance:')
sigma2_k = tf.Print(sigma2_k, [tf.reduce_mean(distances)],
'Average squared pairwise distance:')
sigma2_k = tf.Print(sigma2_k, [sigma2_k], 'Kernel width:')
res = dotprods / sigma2_p ** 2 \
- distances * (1. / sigma2_p / sigma2_k + 1. / sigma2_k ** 2) \
+ opts['latent_space_dim'] / sigma2_k
res = tf.multiply(res, tf.exp(- distances / 2./ sigma2_k))
res = tf.multiply(res, 1. - tf.eye(n))
stat = tf.reduce_sum(res) / (nf * nf - nf)
# stat = tf.reduce_sum(res) / (nf * nf)
return stat
def discriminator_mmd_test(self, opts, sample_qz, sample_pz):
"""U statistic for MMD(Qz, Pz) with the RBF kernel
"""
sigma2_p = opts['pot_pz_std'] ** 2 # var = std ** 2
kernel = 'IM'
n = self.get_batch_size(opts, sample_qz)
n = tf.cast(n, tf.int32)
nf = tf.cast(n, tf.float32)
half_size = (n * n - n) / 2
# Pz
norms_pz = tf.reduce_sum(tf.square(sample_pz), axis=1, keep_dims=True)
dotprods_pz = tf.matmul(sample_pz, sample_pz, transpose_b=True)
distances_pz = norms_pz + tf.transpose(norms_pz) - 2. * dotprods_pz
# Qz
norms_qz = tf.reduce_sum(tf.square(sample_qz), axis=1, keep_dims=True)
dotprods_qz = tf.matmul(sample_qz, sample_qz, transpose_b=True)
distances_qz = norms_qz + tf.transpose(norms_qz) - 2. * dotprods_qz
# Pz vs Qz
dotprods = tf.matmul(sample_qz, sample_pz, transpose_b=True)
distances = norms_qz + tf.transpose(norms_pz) - 2. * dotprods
if opts['verbose'] == 2:
distances = tf.Print(distances, [tf.nn.top_k(tf.reshape(distances_qz, [-1]), 1).values[0]],
'Maximal Qz squared pairwise distance:')
distances = tf.Print(distances, [tf.reduce_mean(distances_qz)],
'Average Qz squared pairwise distance:')
distances = tf.Print(distances, [tf.nn.top_k(tf.reshape(distances_pz, [-1]), 1).values[0]],
'Maximal Pz squared pairwise distance:')
distances = tf.Print(distances, [tf.reduce_mean(distances_pz)],
'Average Pz squared pairwise distance:')
distances = tf.Print(distances, [tf.nn.top_k(tf.reshape(distances, [-1]), 1).values[0]],
'Maximal squared pairwise distance:')
distances = tf.Print(distances, [tf.nn.top_k(tf.reshape(distances, [-1]), n * n).values[n * n - 1]],
'Minimal squared pairwise distance:')
distances = tf.Print(distances, [tf.reduce_mean(distances)],
'Average squared pairwise distance:')
if kernel == 'RBF':
# RBF kernel
# Median heuristic for the sigma^2 of Gaussian kernel
sigma2_k = tf.nn.top_k(tf.reshape(distances, [-1]), half_size).values[half_size - 1]
sigma2_k += tf.nn.top_k(tf.reshape(distances_qz, [-1]), half_size).values[half_size - 1]
# Maximal heuristic for the sigma^2 of Gaussian kernel
# sigma2_k = tf.nn.top_k(tf.reshape(distances_qz, [-1]), 1).values[0]
# sigma2_k += tf.nn.top_k(tf.reshape(distances, [-1]), 1).values[0]
# sigma2_k = opts['latent_space_dim'] * sigma2_p
sigma2_k = tf.Print(sigma2_k, [sigma2_k], 'Kernel width:')
res1 = tf.exp( - distances_qz / 2. / sigma2_k)
res1 += tf.exp( - distances_pz / 2. / sigma2_k)
res1 = tf.multiply(res1, 1. - tf.eye(n))
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res2 = tf.exp( - distances / 2. / sigma2_k)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
stat = res1 - res2
# stat = tf.reduce_sum(res) / (nf * nf)
elif kernel == 'IM':
# C = tf.nn.top_k(tf.reshape(distances, [-1]), half_size).values[half_size - 1]
# C += tf.nn.top_k(tf.reshape(distances_qz, [-1]), half_size).values[half_size - 1]
C = 2 * opts['latent_space_dim'] * sigma2_p
res1 = C / (C + distances_qz)
res1 += C / (C + distances_pz)
res1 = tf.multiply(res1, 1. - tf.eye(n))
res1 = tf.reduce_sum(res1) / (nf * nf - nf)
res1 = tf.Print(res1, [res1], 'First two terms:')
res2 = C / (C + distances)
res2 = tf.reduce_sum(res2) * 2. / (nf * nf)
res2 = tf.Print(res2, [res2], 'Negative term:')
stat = res1 - res2
# stat = tf.reduce_sum(res) / (nf * nf)
return stat
def correlation_loss(self, opts, input_):
"""
Independence test based on Pearson's correlation.
Keep in mind that this captures only linear dependancies.
However, for multivariate Gaussian independence is equivalent
to zero correlation.
"""
batch_size = self.get_batch_size(opts, input_)
dim = int(input_.get_shape()[1])
transposed = tf.transpose(input_, perm=[1, 0])
mean = tf.reshape(tf.reduce_mean(transposed, axis=1), [-1, 1])
centered_transposed = transposed - mean # Broadcasting mean
cov = tf.matmul(centered_transposed, centered_transposed, transpose_b=True)
cov = cov / (batch_size - 1)
#cov = tf.Print(cov, [cov], "cov")
sigmas = tf.sqrt(tf.diag_part(cov) + 1e-5)
#sigmas = tf.Print(sigmas, [sigmas], "sigmas")
sigmas = tf.reshape(sigmas, [1, -1])
sigmas = tf.matmul(sigmas, sigmas, transpose_a=True)
#sigmas = tf.Print(sigmas, [sigmas], "sigmas")
# Pearson's correlation
corr = cov / sigmas
triangle = tf.matrix_set_diag(tf.matrix_band_part(corr, 0, -1), tf.zeros(dim))
#triangle = tf.Print(triangle, [triangle], "triangle")
loss = tf.reduce_sum(tf.square(triangle)) / ((dim * dim - dim) / 2.0)
#loss = tf.Print(loss, [loss], "Correlation loss")
return loss
def encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
if opts['e_add_noise']:
def add_noise(x):
shape = tf.shape(x)
return x + tf.truncated_normal(shape, 0.0, 0.01)
def do_nothing(x):
return x
input_ = tf.cond(is_training, lambda: add_noise(input_), lambda: do_nothing(input_))
num_units = opts['e_num_filters']
num_layers = opts['e_num_layers']
with tf.variable_scope("ENCODER", reuse=reuse):
if not opts['convolutions']:
hi = input_
for i in range(num_layers):
hi = ops.linear(opts, hi, num_units, scope='h%d_lin' % i)
if opts['batch_norm']:
hi = ops.batch_norm(opts, hi, is_training, reuse, scope='bn%d' % i)
hi = tf.nn.relu(hi)
if opts['e_is_random']:
latent_mean = ops.linear(
opts, hi, opts['latent_space_dim'], 'h%d_lin' % (i + 1))
log_latent_sigmas = ops.linear(
opts, hi, opts['latent_space_dim'], 'h%d_lin_sigma' % (i + 1))
return latent_mean, log_latent_sigmas
else:
return ops.linear(opts, hi, opts['latent_space_dim'], 'h%d_lin' % (i + 1))
elif opts['e_arch'] == 'dcgan':
return self.dcgan_encoder(opts, input_, is_training, reuse, keep_prob)
elif opts['e_arch'] == 'ali':
return self.ali_encoder(opts, input_, is_training, reuse, keep_prob)
elif opts['e_arch'] == 'began':
return self.began_encoder(opts, input_, is_training, reuse, keep_prob)
else:
raise ValueError('%s Unknown' % opts['e_arch'])
def dcgan_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
num_units = opts['e_num_filters']
num_layers = opts['e_num_layers']
layer_x = input_
for i in xrange(num_layers):
scale = 2**(num_layers-i-1)
layer_x = ops.conv2d(opts, layer_x, num_units / scale, scope='h%d_conv' % i)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = tf.nn.relu(layer_x)
if opts['dropout']:
_keep_prob = tf.minimum(
1., 0.9 - (0.9 - keep_prob) * float(i + 1) / num_layers)
layer_x = tf.nn.dropout(layer_x, _keep_prob)
if opts['e_3x3_conv'] > 0:
before = layer_x
for j in range(opts['e_3x3_conv']):
layer_x = ops.conv2d(opts, layer_x, num_units / scale, d_h=1, d_w=1,
scope='conv2d_3x3_%d_%d' % (i, j),
conv_filters_dim=3)
layer_x = tf.nn.relu(layer_x)
layer_x += before # Residual connection.
if opts['e_is_random']:
latent_mean = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
log_latent_sigmas = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
return latent_mean, log_latent_sigmas
else:
return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
def ali_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
num_units = opts['e_num_filters']
layer_params = []
layer_params.append([5, 1, num_units / 8])
layer_params.append([4, 2, num_units / 4])
layer_params.append([4, 1, num_units / 2])
layer_params.append([4, 2, num_units])
layer_params.append([4, 1, num_units * 2])
# For convolution: (n - k) / stride + 1 = s
# For transposed: (s - 1) * stride + k = n
layer_x = input_
height = int(layer_x.get_shape()[1])
width = int(layer_x.get_shape()[2])
assert height == width
for i, (kernel, stride, channels) in enumerate(layer_params):
height = (height - kernel) / stride + 1
width = height
# print((height, width))
layer_x = ops.conv2d(
opts, layer_x, channels, d_h=stride, d_w=stride,
scope='h%d_conv' % i, conv_filters_dim=kernel, padding='VALID')
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = ops.lrelu(layer_x, 0.1)
assert height == 1
assert width == 1
# Then two 1x1 convolutions.
layer_x = ops.conv2d(opts, layer_x, num_units * 2, d_h=1, d_w=1, scope='conv2d_1x1', conv_filters_dim=1)
if opts['batch_norm']:
layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bnlast')
layer_x = ops.lrelu(layer_x, 0.1)
layer_x = ops.conv2d(opts, layer_x, num_units / 2, d_h=1, d_w=1, scope='conv2d_1x1_2', conv_filters_dim=1)
if opts['e_is_random']:
latent_mean = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
log_latent_sigmas = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
return latent_mean, log_latent_sigmas
else:
return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
def began_encoder(self, opts, input_, is_training=False, reuse=False, keep_prob=1.):
num_units = opts['e_num_filters']
assert num_units == opts['g_num_filters'], 'BEGAN requires same number of filters in encoder and decoder'
num_layers = opts['e_num_layers']
layer_x = ops.conv2d(opts, input_, num_units, scope='h_first_conv')
for i in xrange(num_layers):
if i % 3 < 2:
if i != num_layers - 2:
ii = i - (i / 3)
scale = (ii + 1 - ii / 2)
else:
ii = i - (i / 3)
scale = (ii - (ii - 1) / 2)
layer_x = ops.conv2d(opts, layer_x, num_units * scale, d_h=1, d_w=1, scope='h%d_conv' % i)
layer_x = tf.nn.elu(layer_x)
else:
if i != num_layers - 1:
layer_x = ops.downsample(layer_x, scope='h%d_maxpool' % i, reuse=reuse)
# Tensor should be [N, 8, 8, filters] right now
if opts['e_is_random']:
latent_mean = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
log_latent_sigmas = ops.linear(
opts, layer_x, opts['latent_space_dim'], scope='hlast_lin_sigma')
return latent_mean, log_latent_sigmas
else:
return ops.linear(opts, layer_x, opts['latent_space_dim'], scope='hlast_lin')
def _data_augmentation(self, opts, real_points, is_training):
if not opts['data_augm']:
return real_points
height = int(real_points.get_shape()[1])
width = int(real_points.get_shape()[2])
depth = int(real_points.get_shape()[3])
# logging.error("real_points shape", real_points.get_shape())
def _distort_func(image):
# tf.image.per_image_standardization(image), should we?
# Pad with zeros.
image = tf.image.resize_image_with_crop_or_pad(
image, height+4, width+4)
image = tf.random_crop(image, [height, width, depth])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.minimum(tf.maximum(image, 0.0), 1.0)
image = tf.image.random_contrast(image, lower=0.8, upper=1.3)
image = tf.minimum(tf.maximum(image, 0.0), 1.0)
image = tf.image.random_hue(image, 0.08)
image = tf.minimum(tf.maximum(image, 0.0), 1.0)
image = tf.image.random_saturation(image, lower=0.8, upper=1.3)
image = tf.minimum(tf.maximum(image, 0.0), 1.0)
return image
def _regular_func(image):
# tf.image.per_image_standardization(image)?
return image
distorted_images = tf.cond(
is_training,
lambda: tf.map_fn(_distort_func, real_points,
parallel_iterations=100),
lambda: tf.map_fn(_regular_func, real_points,
parallel_iterations=100))
return distorted_images
def _recon_loss_using_disc_encoder(
self, opts, reconstructed_training, encoded_training,
real_points, is_training_ph, keep_prob_ph):
"""Build an additional loss using the encoder as discriminator."""
reconstructed_reencoded_sg = self.encoder(
opts, tf.stop_gradient(reconstructed_training),
is_training=is_training_ph, keep_prob=keep_prob_ph, reuse=True)
if opts['e_is_random']:
reconstructed_reencoded_sg = reconstructed_reencoded_sg[0]
reconstructed_reencoded = self.encoder(
opts, reconstructed_training, is_training=is_training_ph,
keep_prob=keep_prob_ph, reuse=True)
if opts['e_is_random']:
reconstructed_reencoded = reconstructed_reencoded[0]
# Below line enforces the forward to be reconstructed_reencoded and backwards to NOT change the encoder....
crazy_hack = reconstructed_reencoded - reconstructed_reencoded_sg +\
tf.stop_gradient(reconstructed_reencoded_sg)
encoded_training_sg = self.encoder(
opts, tf.stop_gradient(real_points),
is_training=is_training_ph, keep_prob=keep_prob_ph, reuse=True)
if opts['e_is_random']:
encoded_training_sg = encoded_training_sg[0]
adv_fake_layer = ops.linear(opts, reconstructed_reencoded_sg, 1, scope='adv_layer')
adv_true_layer = ops.linear(opts, encoded_training_sg, 1, scope='adv_layer', reuse=True)
adv_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=adv_fake_layer, labels=tf.zeros_like(adv_fake_layer))
adv_true = tf.nn.sigmoid_cross_entropy_with_logits(
logits=adv_true_layer, labels=tf.ones_like(adv_true_layer))
adv_fake = tf.reduce_mean(adv_fake)
adv_true = tf.reduce_mean(adv_true)
adv_c_loss = adv_fake + adv_true
emb_c = tf.reduce_sum(tf.square(crazy_hack - tf.stop_gradient(encoded_training)), 1)
emb_c_loss = tf.reduce_mean(tf.sqrt(emb_c + 1e-5))
# Normalize the loss, so that it does not depend on how good the
# discriminator is.
emb_c_loss = emb_c_loss / tf.stop_gradient(emb_c_loss)
return adv_c_loss, emb_c_loss
def _recon_loss_using_disc_conv(self, opts, reconstructed_training, real_points, is_training, keep_prob):
"""Build an additional loss using a discriminator in X space."""
def _conv_flatten(x, kernel_size):
height = int(x.get_shape()[1])
width = int(x.get_shape()[2])
channels = int(x.get_shape()[3])
w_sum = tf.eye(num_rows=channels, num_columns=channels, batch_shape=[kernel_size * kernel_size])
w_sum = tf.reshape(w_sum, [kernel_size, kernel_size, channels, channels])
w_sum = w_sum / (kernel_size * kernel_size)
sum_ = tf.nn.conv2d(x, w_sum, strides=[1, 1, 1, 1], padding='SAME')
size = prod_dim(sum_)
assert size == height * width * channels, size
return tf.reshape(sum_, [-1, size])
def _gram_scores(tensor, kernel_size):
assert len(tensor.get_shape()) == 4, tensor
ttensor = tf.transpose(tensor, [3, 1, 2, 0])
rand_indices = tf.random_shuffle(tf.range(ttensor.get_shape()[0]))
shuffled = tf.gather(ttensor, rand_indices)
shuffled = tf.transpose(shuffled, [3, 1, 2, 0])
cross_p = _conv_flatten(tensor * shuffled, kernel_size) # shape [batch_size, height * width * channels]
diag_p = _conv_flatten(tf.square(tensor), kernel_size) # shape [batch_size, height * width * channels]
return cross_p, diag_p
def _architecture(inputs, reuse=None):
with tf.variable_scope('DISC_X_LOSS', reuse=reuse):
num_units = opts['adv_c_num_units']
num_layers = 1
filter_sizes = opts['adv_c_patches_size']
if isinstance(filter_sizes, int):
filter_sizes = [filter_sizes]
else:
filter_sizes = [int(n) for n in filter_sizes.split(',')]
embedded_outputs = []
linear_outputs = []
for filter_size in filter_sizes:
layer_x = inputs
for i in xrange(num_layers):
# scale = 2**(num_layers-i-1)
layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1, scope='h%d_conv%d' % (i, filter_size),
conv_filters_dim=filter_size, padding='SAME')
# if opts['batch_norm']:
# layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d_%d' % (i, filter_size))
layer_x = ops.lrelu(layer_x, 0.1)
last = ops.conv2d(
opts, layer_x, 1, d_h=1, d_w=1, scope="last_lin%d" % filter_size, conv_filters_dim=1, l2_norm=True)
if opts['cross_p_w'] > 0.0 or opts['diag_p_w'] > 0.0:
cross_p, diag_p = _gram_scores(layer_x, filter_size)
embedded_outputs.append(cross_p * opts['cross_p_w'])
embedded_outputs.append(diag_p * opts['diag_p_w'])
fl = flatten(layer_x)
# fl = tf.Print(fl, [fl], "fl")
embedded_outputs.append(fl)
size = int(last.get_shape()[1])
linear_outputs.append(tf.reshape(last, [-1, size * size]))
if len(embedded_outputs) > 1:
embedded_outputs = tf.concat(embedded_outputs, 1)
else:
embedded_outputs = embedded_outputs[0]
if len(linear_outputs) > 1:
linear_outputs = tf.concat(linear_outputs, 1)
else:
linear_outputs = linear_outputs[0]
return embedded_outputs, linear_outputs
reconstructed_embed_sg, adv_fake_layer = _architecture(tf.stop_gradient(reconstructed_training), reuse=None)
reconstructed_embed, _ = _architecture(reconstructed_training, reuse=True)
# Below line enforces the forward to be reconstructed_embed and backwards to NOT change the discriminator....
crazy_hack = reconstructed_embed-reconstructed_embed_sg+tf.stop_gradient(reconstructed_embed_sg)
real_p_embed_sg, adv_true_layer = _architecture(tf.stop_gradient(real_points), reuse=True)
real_p_embed, _ = _architecture(real_points, reuse=True)
adv_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=adv_fake_layer, labels=tf.zeros_like(adv_fake_layer))
adv_true = tf.nn.sigmoid_cross_entropy_with_logits(
logits=adv_true_layer, labels=tf.ones_like(adv_true_layer))
adv_fake = tf.reduce_mean(adv_fake)
adv_true = tf.reduce_mean(adv_true)
adv_c_loss = adv_fake + adv_true
emb_c = tf.reduce_mean(tf.square(crazy_hack - tf.stop_gradient(real_p_embed)), 1)
real_points_shuffle = tf.stop_gradient(tf.random_shuffle(real_p_embed))
emb_c_shuffle = tf.reduce_mean(tf.square(real_points_shuffle - tf.stop_gradient(reconstructed_embed)), 1)
raw_emb_c_loss = tf.reduce_mean(emb_c)
shuffled_emb_c_loss = tf.reduce_mean(emb_c_shuffle)
emb_c_loss = raw_emb_c_loss / shuffled_emb_c_loss
emb_c_loss = emb_c_loss * 40
return adv_c_loss, emb_c_loss
def _recon_loss_using_disc_conv_eb(self, opts, reconstructed_training, real_points, is_training, keep_prob):
"""Build an additional loss using a discriminator in X space, using Energy Based approach."""
def copy3D(height, width, channels):
m = np.zeros([height, width, channels, height, width, channels])
for i in xrange(height):
for j in xrange(width):
for c in xrange(channels):
m[i, j, c, i, j, c] = 1.0
return tf.constant(np.reshape(m, [height, width, channels, -1]), dtype=tf.float32)
def _architecture(inputs, reuse=None):
dim = opts['adv_c_patches_size']
height = int(inputs.get_shape()[1])
width = int(inputs.get_shape()[2])
channels = int(inputs.get_shape()[3])
with tf.variable_scope('DISC_X_LOSS', reuse=reuse):
num_units = opts['adv_c_num_units']
num_layers = 1
layer_x = inputs
for i in xrange(num_layers):
# scale = 2**(num_layers-i-1)
layer_x = ops.conv2d(opts, layer_x, num_units, d_h=1, d_w=1, scope='h%d_conv' % i,
conv_filters_dim=dim, padding='SAME')
# if opts['batch_norm']:
# layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i)
layer_x = ops.lrelu(layer_x, 0.1) #tf.nn.relu(layer_x)
copy_w = copy3D(dim, dim, channels)
duplicated = tf.nn.conv2d(inputs, copy_w, strides=[1, 1, 1, 1], padding='SAME')
decoded = ops.conv2d(
opts, layer_x, channels * dim * dim, d_h=1, d_w=1, scope="decoder",
conv_filters_dim=1, padding='SAME')
reconstruction = tf.reduce_mean(tf.square(tf.stop_gradient(duplicated) - decoded), [1, 2, 3])
assert len(reconstruction.get_shape()) == 1
return flatten(layer_x), reconstruction
reconstructed_embed_sg, adv_fake_layer = _architecture(tf.stop_gradient(reconstructed_training), reuse=None)
reconstructed_embed, _ = _architecture(reconstructed_training, reuse=True)
# Below line enforces the forward to be reconstructed_embed and backwards to NOT change the discriminator....
crazy_hack = reconstructed_embed-reconstructed_embed_sg+tf.stop_gradient(reconstructed_embed_sg)
real_p_embed_sg, adv_true_layer = _architecture(tf.stop_gradient(real_points), reuse=True)
real_p_embed, _ = _architecture(real_points, reuse=True)
adv_fake = tf.reduce_mean(adv_fake_layer)
adv_true = tf.reduce_mean(adv_true_layer)
adv_c_loss = tf.log(adv_true) - tf.log(adv_fake)
emb_c = tf.reduce_sum(tf.square(crazy_hack - tf.stop_gradient(real_p_embed)), 1)
emb_c_loss = tf.reduce_mean(emb_c)
return adv_c_loss, emb_c_loss
def _recon_loss_using_vgg(self, opts, reconstructed_training, real_points, is_training, keep_prob):
"""Build an additional loss using a pretrained VGG in X space."""
def _architecture(_inputs, reuse=None):
_, end_points = vgg_16(_inputs, is_training=is_training, dropout_keep_prob=keep_prob, reuse=reuse)
layer_name = opts['vgg_layer']
if layer_name == 'concat':
outputs = []
for ln in ['pool1', 'pool2', 'pool3']:
output = end_points[ln]
output = flatten(output)
outputs.append(output)
output = tf.concat(outputs, 1)
elif layer_name.startswith('concat_w'):
weights = layer_name.split(',')[1:]
assert len(weights) == 5
outputs = []
for lnum in range(5):
num = lnum + 1
ln = 'pool%d' % num
output = end_points[ln]
output = flatten(output)
# We sqrt the weight here because we use L2 after.
outputs.append(np.sqrt(float(weights[lnum])) * output)
output = tf.concat(outputs, 1)
else:
output = end_points[layer_name]
output = flatten(output)
if reuse is None:
variables_to_restore = slim.get_variables_to_restore(include=['vgg_16'])
path = os.path.join(opts['data_dir'], 'vgg_16.ckpt')
# '/tmpp/models/vgg_16.ckpt'
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(path, variables_to_restore)
self._additional_init_ops += [init_assign_op]
self._init_feed_dict.update(init_feed_dict)
return output
reconstructed_embed_sg = _architecture(tf.stop_gradient(reconstructed_training), reuse=None)
reconstructed_embed = _architecture(reconstructed_training, reuse=True)
# Below line enforces the forward to be reconstructed_embed and backwards to NOT change the discriminator....
crazy_hack = reconstructed_embed-reconstructed_embed_sg+tf.stop_gradient(reconstructed_embed_sg)
real_p_embed = _architecture(real_points, reuse=True)
emb_c = tf.reduce_mean(tf.square(crazy_hack - tf.stop_gradient(real_p_embed)), 1)
emb_c_loss = tf.reduce_mean(tf.sqrt(emb_c + 1e-5))
# emb_c_loss = tf.Print(emb_c_loss, [emb_c_loss], "emb_c_loss")
# # Normalize the loss, so that it does not depend on how good the
# # discriminator is.
# emb_c_loss = emb_c_loss / tf.stop_gradient(emb_c_loss)
return emb_c_loss
def _recon_loss_using_moments(self, opts, reconstructed_training, real_points, is_training, keep_prob):
"""Build an additional loss using moments."""
def _architecture(_inputs):
return compute_moments(_inputs, moments=[2]) # TODO
reconstructed_embed = _architecture(reconstructed_training)
real_p_embed = _architecture(real_points)
emb_c = tf.reduce_mean(tf.square(reconstructed_embed - tf.stop_gradient(real_p_embed)), 1)
# emb_c = tf.Print(emb_c, [emb_c], "emb_c")
emb_c_loss = tf.reduce_mean(emb_c)
return emb_c_loss * 100.0 * 100.0 # TODO: constant.
def _recon_loss_using_vgg_moments(self, opts, reconstructed_training, real_points, is_training, keep_prob):
"""Build an additional loss using a pretrained VGG in X space."""
def _architecture(_inputs, reuse=None):
_, end_points = vgg_16(_inputs, is_training=is_training, dropout_keep_prob=keep_prob, reuse=reuse)
layer_name = opts['vgg_layer']
output = end_points[layer_name]
# output = flatten(output)
output /= 255.0 # the vgg_16 method scales everything by 255.0, so we divide back here.
variances = compute_moments(output, moments=[2])
if reuse is None:
variables_to_restore = slim.get_variables_to_restore(include=['vgg_16'])
path = os.path.join(opts['data_dir'], 'vgg_16.ckpt')
# '/tmpp/models/vgg_16.ckpt'
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(path, variables_to_restore)
self._additional_init_ops += [init_assign_op]
self._init_feed_dict.update(init_feed_dict)
return variances
reconstructed_embed_sg = _architecture(tf.stop_gradient(reconstructed_training), reuse=None)
reconstructed_embed = _architecture(reconstructed_training, reuse=True)
# Below line enforces the forward to be reconstructed_embed and backwards to NOT change the discriminator....
crazy_hack = reconstructed_embed-reconstructed_embed_sg+tf.stop_gradient(reconstructed_embed_sg)
real_p_embed = _architecture(real_points, reuse=True)
emb_c = tf.reduce_mean(tf.square(crazy_hack - tf.stop_gradient(real_p_embed)), 1)
emb_c_loss = tf.reduce_mean(emb_c)
# emb_c_loss = tf.Print(emb_c_loss, [emb_c_loss], "emb_c_loss")
# # Normalize the loss, so that it does not depend on how good the
# # discriminator is.
# emb_c_loss = emb_c_loss / tf.stop_gradient(emb_c_loss)
return emb_c_loss # TODO: constant.
def add_least_gaussian2d_ops(self, opts):
""" Add ops searching for the 2d plane in z_dim hidden space
corresponding to the 'least Gaussian' look of the sample
"""
with tf.variable_scope('leastGaussian2d'):
# Projection matrix which we are going to tune
sample_ph = tf.placeholder(
tf.float32, [None, opts['latent_space_dim']],
name='sample_ph')
v = tf.get_variable(
"proj_v", [opts['latent_space_dim'], 1],
tf.float32, tf.random_normal_initializer(stddev=1.))
u = tf.get_variable(
"proj_u", [opts['latent_space_dim'], 1],
tf.float32, tf.random_normal_initializer(stddev=1.))
npoints = tf.cast(tf.shape(sample_ph)[0], tf.int32)
# First we need to make sure projection matrix is orthogonal
v_norm = tf.nn.l2_normalize(v, 0)
dotprod = tf.reduce_sum(tf.multiply(u, v_norm))
u_ort = u - dotprod * v_norm
u_norm = tf.nn.l2_normalize(u_ort, 0)
Mproj = tf.concat([v_norm, u_norm], 1)
sample_proj = tf.matmul(sample_ph, Mproj)
a = tf.eye(npoints) - tf.ones([npoints, npoints]) / tf.cast(npoints, tf.float32)
b = tf.matmul(sample_proj, tf.matmul(a, a), transpose_a=True)
b = tf.matmul(b, sample_proj)
# Sample covariance matrix
covhat = b / (tf.cast(npoints, tf.float32) - 1)
# covhat = tf.Print(covhat, [covhat], 'Cov:')
with tf.variable_scope('leastGaussian2d'):
gcov = opts['pot_pz_std'] * opts['pot_pz_std'] * tf.eye(2)
# l2 distance between sample cov and the Gaussian cov
projloss = tf.reduce_sum(tf.square(covhat - gcov))
# Also account for the first moment, i.e. expected value
projloss += tf.reduce_sum(tf.square(tf.reduce_mean(sample_proj, 0)))
# We are maximizing
projloss = -projloss
optim = tf.train.AdamOptimizer(0.001, 0.9)
optim = optim.minimize(projloss, var_list=[v, u])
self._proj_u = u_norm
self._proj_v = v_norm
self._proj_sample_ph = sample_ph
self._proj_covhat = covhat
self._proj_loss = projloss
self._proj_optim = optim
def least_gaussian_2d(self, opts, X):
"""
Given a sample X of shape (n_points, n_z) find 2d plain
such that projection looks least gaussian.
"""
with self._session.as_default(), self._session.graph.as_default():
sample_ph = self._proj_sample_ph
optim = self._proj_optim
loss = self._proj_loss
u = self._proj_u
v = self._proj_v
covhat = self._proj_covhat
proj_mat = tf.concat([v, u], 1).eval()
dot_prod = -1
best_of_runs = 10e5 # Any positive value would do
updated = False
for _start in xrange(3):
# We will run 3 times from random inits
loss_prev = 10e5 # Any positive value would do
proj_vars = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="leastGaussian2d")
self._session.run(tf.variables_initializer(proj_vars))
step = 0
for _ in xrange(5000):
self._session.run(optim, feed_dict={sample_ph:X})
step += 1
if step % 10 == 0:
loss_cur = loss.eval(feed_dict={sample_ph: X})
rel_imp = abs(loss_cur - loss_prev) / abs(loss_prev)
if rel_imp < 1e-2:
break
loss_prev = loss_cur
loss_final = loss.eval(feed_dict={sample_ph: X})
if loss_final < best_of_runs:
updated = True
best_of_runs = loss_final
proj_mat = tf.concat([v, u], 1).eval()
dot_prod = tf.reduce_sum(tf.multiply(u, v)).eval()
if not updated:
logging.error('WARNING: possible bug in the worst 2d projection')
return proj_mat, dot_prod
def _build_model_internal(self, opts):
"""Build the Graph corresponding to POT implementation.
"""
data_shape = self._data.data_shape
additional_losses = collections.OrderedDict()
# Placeholders
real_points_ph = tf.placeholder(
tf.float32, [None] + list(data_shape), name='real_points_ph')
noise_ph = tf.placeholder(
tf.float32, [None] + [opts['latent_space_dim']], name='noise_ph')
enc_noise_ph = tf.placeholder(
tf.float32, [None] + [opts['latent_space_dim']], name='enc_noise_ph')
lr_decay_ph = tf.placeholder(tf.float32)
is_training_ph = tf.placeholder(tf.bool, name='is_training_ph')
keep_prob_ph = tf.placeholder(tf.float32, name='keep_prob_ph')
# Operations
if opts['pz_transform']:
assert opts['z_test'] == 'gan', 'Pz transforms are currently allowed only for POT+GAN'
noise = self.pz_sampler(opts, noise_ph)
else:
noise = noise_ph
real_points = self._data_augmentation(
opts, real_points_ph, is_training_ph)
if opts['e_is_random']:
# If encoder is random we map the training points
# to the expectation of Q(Z|X) and then add the scaled
# Gaussian noise corresponding to the learned sigmas
enc_train_mean, enc_log_sigmas = self.encoder(
opts, real_points,
is_training=is_training_ph, keep_prob=keep_prob_ph)
# enc_log_sigmas = tf.Print(enc_log_sigmas, [tf.reduce_max(enc_log_sigmas),
# tf.reduce_min(enc_log_sigmas),
# tf.reduce_mean(enc_log_sigmas)], 'Log sigmas:')
# enc_log_sigmas = tf.Print(enc_log_sigmas, [tf.slice(enc_log_sigmas, [0,0], [1,-1])], 'Log sigmas:')
# stds = tf.sqrt(tf.exp(enc_log_sigmas) + 1e-05)
stds = tf.sqrt(tf.nn.relu(enc_log_sigmas) + 1e-05)
# stds = tf.Print(stds, [stds[0], stds[1], stds[2], stds[3]], 'Stds: ')
# stds = tf.Print(stds, [enc_train_mean[0], enc_train_mean[1], enc_train_mean[2]], 'Means: ')
scaled_noise = tf.multiply(stds, enc_noise_ph)
encoded_training = enc_train_mean + scaled_noise
else:
encoded_training = self.encoder(
opts, real_points,
is_training=is_training_ph, keep_prob=keep_prob_ph)
reconstructed_training = self.generator(
opts, encoded_training,
is_training=is_training_ph, keep_prob=keep_prob_ph)
reconstructed_training.set_shape(real_points.get_shape())
if opts['recon_loss'] == 'l2':
# c(x,y) = ||x - y||_2
loss_reconstr = tf.reduce_sum(
tf.square(real_points - reconstructed_training), axis=1)
# sqrt(x + delta) guarantees the direvative 1/(x + delta) is finite
loss_reconstr = tf.reduce_mean(tf.sqrt(loss_reconstr + 1e-08))
elif opts['recon_loss'] == 'l2f':
# c(x,y) = ||x - y||_2
loss_reconstr = tf.reduce_sum(
tf.square(real_points - reconstructed_training), axis=[1, 2, 3])
loss_reconstr = tf.reduce_mean(tf.sqrt(1e-08 + loss_reconstr)) * 0.2
elif opts['recon_loss'] == 'l2sq':
# c(x,y) = ||x - y||_2^2
loss_reconstr = tf.reduce_sum(
tf.square(real_points - reconstructed_training), axis=[1, 2, 3])
loss_reconstr = tf.reduce_mean(loss_reconstr) * 0.05
elif opts['recon_loss'] == 'l1':
# c(x,y) = ||x - y||_1
loss_reconstr = tf.reduce_mean(tf.reduce_sum(
tf.abs(real_points - reconstructed_training), axis=[1, 2, 3])) * 0.02
else:
assert False
# Pearson independence test of coordinates in Z space
loss_z_corr = self.correlation_loss(opts, encoded_training)
# Perform a Qz = Pz goodness of fit test based on Stein Discrepancy
if opts['z_test'] == 'gan':
# Pz = Qz test based on GAN in the Z space
d_logits_Pz = self.discriminator(opts, noise)
d_logits_Qz = self.discriminator(opts, encoded_training, reuse=True)
d_loss_Pz = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_Pz, labels=tf.ones_like(d_logits_Pz)))
d_loss_Qz = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_Qz, labels=tf.zeros_like(d_logits_Qz)))
d_loss_Qz_trick = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_Qz, labels=tf.ones_like(d_logits_Qz)))
d_loss = opts['pot_lambda'] * (d_loss_Pz + d_loss_Qz)
if opts['pz_transform']:
loss_match = d_loss_Qz_trick - d_loss_Pz
else:
loss_match = d_loss_Qz_trick
elif opts['z_test'] == 'mmd':
# Pz = Qz test based on MMD(Pz, Qz)
loss_match = self.discriminator_mmd_test(opts, encoded_training, noise)
d_loss = None
d_logits_Pz = None
d_logits_Qz = None
elif opts['z_test'] == 'lks':
# Pz = Qz test without adversarial training
# based on Kernel Stein Discrepancy
# Uncomment next line to check for the real Pz
# loss_match = self.discriminator_test(opts, noise_ph)
loss_match = self.discriminator_test(opts, encoded_training)
d_loss = None
d_logits_Pz = None
d_logits_Qz = None
else:
# Pz = Qz test without adversarial training
# (a) Check for multivariate Gaussianity
# by checking Gaussianity of all the 1d projections
# (b) Run Pearson's test of coordinate independance
loss_match = self.discriminator_test(opts, encoded_training)
loss_match = loss_match + opts['z_test_corr_w'] * loss_z_corr
d_loss = None
d_logits_Pz = None
d_logits_Qz = None
g_mom_stats = self.moments_stats(opts, encoded_training)
loss = opts['reconstr_w'] * loss_reconstr + opts['pot_lambda'] * loss_match
# Optionally, add one more cost function based on the embeddings
# add a discriminator in the X space, reusing the encoder or a new model.
if opts['adv_c_loss'] == 'encoder':
adv_c_loss, emb_c_loss = self._recon_loss_using_disc_encoder(
opts, reconstructed_training, encoded_training, real_points, is_training_ph, keep_prob_ph)
loss += opts['adv_c_loss_w'] * adv_c_loss + opts['emb_c_loss_w'] * emb_c_loss
additional_losses['adv_c'], additional_losses['emb_c'] = adv_c_loss, emb_c_loss
elif opts['adv_c_loss'] == 'conv':
adv_c_loss, emb_c_loss = self._recon_loss_using_disc_conv(
opts, reconstructed_training, real_points, is_training_ph, keep_prob_ph)
additional_losses['adv_c'], additional_losses['emb_c'] = adv_c_loss, emb_c_loss
loss += opts['adv_c_loss_w'] * adv_c_loss + opts['emb_c_loss_w'] * emb_c_loss
elif opts['adv_c_loss'] == 'conv_eb':
adv_c_loss, emb_c_loss = self._recon_loss_using_disc_conv_eb(
opts, reconstructed_training, real_points, is_training_ph, keep_prob_ph)
additional_losses['adv_c'], additional_losses['emb_c'] = adv_c_loss, emb_c_loss
loss += opts['adv_c_loss_w'] * adv_c_loss + opts['emb_c_loss_w'] * emb_c_loss
elif opts['adv_c_loss'] == 'vgg':
emb_c_loss = self._recon_loss_using_vgg(
opts, reconstructed_training, real_points, is_training_ph, keep_prob_ph)
loss += opts['emb_c_loss_w'] * emb_c_loss
additional_losses['emb_c'] = emb_c_loss
elif opts['adv_c_loss'] == 'moments':
emb_c_loss = self._recon_loss_using_moments(
opts, reconstructed_training, real_points, is_training_ph, keep_prob_ph)
loss += opts['emb_c_loss_w'] * emb_c_loss
additional_losses['emb_c'] = emb_c_loss
elif opts['adv_c_loss'] == 'vgg_moments':
emb_c_loss = self._recon_loss_using_vgg_moments(
opts, reconstructed_training, real_points, is_training_ph, keep_prob_ph)
loss += opts['emb_c_loss_w'] * emb_c_loss
additional_losses['emb_c'] = emb_c_loss
else:
assert opts['adv_c_loss'] == 'none'
# Add ops to pretrain the Qz match mean and covariance of Pz
loss_pretrain = None
if opts['e_pretrain']:
# Next two vectors are zdim-dimensional
mean_pz = tf.reduce_mean(noise, axis=0, keep_dims=True)
mean_qz = tf.reduce_mean(encoded_training, axis=0, keep_dims=True)
mean_loss = tf.reduce_mean(tf.square(mean_pz - mean_qz))
cov_pz = tf.matmul(noise - mean_pz,
noise - mean_pz, transpose_a=True)
cov_pz /= opts['e_pretrain_bsize'] - 1.
cov_qz = tf.matmul(encoded_training - mean_qz,
encoded_training - mean_qz, transpose_a=True)
cov_qz /= opts['e_pretrain_bsize'] - 1.
cov_loss = tf.reduce_mean(tf.square(cov_pz - cov_qz))
loss_pretrain = mean_loss + cov_loss
# Also add ops to find the least Gaussian 2d projection
# this is handy when visually inspection Qz = Pz
self.add_least_gaussian2d_ops(opts)
# Optimizer ops
t_vars = tf.trainable_variables()
# Updates for discriminator
d_vars = [var for var in t_vars if 'DISCRIMINATOR/' in var.name]
# Updates for everything but adversary (encoder, decoder and possibly pz-transform)
all_vars = [var for var in t_vars if 'DISCRIMINATOR/' not in var.name]
# Updates for everything but adversary (encoder, decoder and possibly pz-transform)
eg_vars = [var for var in t_vars if 'GENERATOR/' in var.name or 'ENCODER/' in var.name]
# Encoder variables separately if we want to pretrain
e_vars = [var for var in t_vars if 'ENCODER/' in var.name]
logging.error('Param num in G and E: %d' % \
np.sum([np.prod([int(d) for d in v.get_shape()]) for v in eg_vars]))
for v in eg_vars:
print v.name, [int(d) for d in v.get_shape()]
if len(d_vars) > 0:
d_optim = ops.optimizer(opts, net='d', decay=lr_decay_ph).minimize(loss=d_loss, var_list=d_vars)
else:
d_optim = None
optim = ops.optimizer(opts, net='g', decay=lr_decay_ph).minimize(loss=loss, var_list=all_vars)
pretrain_optim = None
if opts['e_pretrain']:
pretrain_optim = ops.optimizer(opts, net='g').minimize(loss=loss_pretrain, var_list=e_vars)
generated_images = self.generator(
opts, noise, is_training=is_training_ph,
reuse=True, keep_prob=keep_prob_ph)
self._real_points_ph = real_points_ph
self._real_points = real_points
self._noise_ph = noise_ph
self._noise = noise
self._enc_noise_ph = enc_noise_ph
self._lr_decay_ph = lr_decay_ph
self._is_training_ph = is_training_ph
self._keep_prob_ph = keep_prob_ph
self._optim = optim
self._d_optim = d_optim
self._pretrain_optim = pretrain_optim
self._loss = loss
self._loss_reconstruct = loss_reconstr
self._loss_match = loss_match
self._loss_z_corr = loss_z_corr
self._loss_pretrain = loss_pretrain
self._additional_losses = additional_losses
self._g_mom_stats = g_mom_stats
self._d_loss = d_loss
self._generated = generated_images
self._Qz = encoded_training
self._reconstruct_x = reconstructed_training
saver = tf.train.Saver(max_to_keep=10)
tf.add_to_collection('real_points_ph', self._real_points_ph)
tf.add_to_collection('noise_ph', self._noise_ph)
tf.add_to_collection('enc_noise_ph', self._enc_noise_ph)
if opts['pz_transform']:
tf.add_to_collection('noise', self._noise)
tf.add_to_collection('is_training_ph', self._is_training_ph)
tf.add_to_collection('keep_prob_ph', self._keep_prob_ph)
tf.add_to_collection('encoder', self._Qz)
tf.add_to_collection('decoder', self._generated)
if d_logits_Pz is not None:
tf.add_to_collection('disc_logits_Pz', d_logits_Pz)
if d_logits_Qz is not None:
tf.add_to_collection('disc_logits_Qz', d_logits_Qz)
self._saver = saver
logging.error("Building Graph Done.")
def pretrain(self, opts):
steps_max = 200
batch_size = opts['e_pretrain_bsize']
for step in xrange(steps_max):
train_size = self._data.num_points
data_ids = np.random.choice(train_size, min(train_size, batch_size),
replace=False)
batch_images = self._data.data[data_ids].astype(np.float)
batch_noise = opts['pot_pz_std'] *\
utils.generate_noise(opts, batch_size)
# Noise for the random encoder (if present)
batch_enc_noise = utils.generate_noise(opts, batch_size)
# Update encoder
[_, loss_pretrain] = self._session.run(
[self._pretrain_optim,
self._loss_pretrain],
feed_dict={self._real_points_ph: batch_images,
self._noise_ph: batch_noise,
self._enc_noise_ph: batch_enc_noise,
self._is_training_ph: True,
self._keep_prob_ph: opts['dropout_keep_prob']})
if opts['verbose'] == 2:
logging.error('Step %d/%d, loss=%f' % (step, steps_max, loss_pretrain))
if loss_pretrain < 0.1:
break
def _train_internal(self, opts):
"""Train a POT model.
"""
logging.error(opts)
batches_num = self._data.num_points / opts['batch_size']
train_size = self._data.num_points
num_plot = 320
sample_prev = np.zeros([num_plot] + list(self._data.data_shape))
l2s = []
losses = []
losses_rec = []
losses_match = []
wait = 0
start_time = time.time()
counter = 0
decay = 1.
logging.error('Training POT')
# Optionally we first pretrain the Qz to match mean and
# covariance of Pz
if opts['e_pretrain']:
logging.error('Pretraining the encoder')
self.pretrain(opts)
logging.error('Pretraining the encoder done')
for _epoch in xrange(opts["gan_epoch_num"]):
if opts['decay_schedule'] == "manual":
if _epoch == 30:
decay = decay / 2.
if _epoch == 50:
decay = decay / 5.
if _epoch == 100:
decay = decay / 10.
elif opts['decay_schedule'] != "plateau":
assert type(1.0 * opts['decay_schedule']) == float
decay = 1.0 * 10**(-_epoch / float(opts['decay_schedule']))
if _epoch > 0 and _epoch % opts['save_every_epoch'] == 0:
os.path.join(opts['work_dir'], opts['ckpt_dir'])
self._saver.save(self._session,
os.path.join(opts['work_dir'],
opts['ckpt_dir'],
'trained-pot'),
global_step=counter)
for _idx in xrange(batches_num):
data_ids = np.random.choice(train_size, opts['batch_size'],
replace=False, p=self._data_weights)
batch_images = self._data.data[data_ids].astype(np.float)
# Noise for the Pz=Qz GAN
batch_noise = opts['pot_pz_std'] *\
utils.generate_noise(opts, opts['batch_size'])
# Noise for the random encoder (if present)
batch_enc_noise = utils.generate_noise(opts, opts['batch_size'])
# Update generator (decoder) and encoder
[_, loss, loss_rec, loss_match] = self._session.run(
[self._optim,
self._loss,
self._loss_reconstruct,
self._loss_match],
feed_dict={self._real_points_ph: batch_images,
self._noise_ph: batch_noise,
self._enc_noise_ph: batch_enc_noise,
self._lr_decay_ph: decay,
self._is_training_ph: True,
self._keep_prob_ph: opts['dropout_keep_prob']})
if opts['decay_schedule'] == "plateau":
# First 30 epochs do nothing
if _epoch >= 30:
# If no significant progress was made in last 10 epochs
# then decrease the learning rate.
if loss < min(losses[-20 * batches_num:]):
wait = 0
else:
wait += 1
if wait > 10 * batches_num:
decay = max(decay / 1.4, 1e-6)
logging.error('Reduction in learning rate: %f' % decay)
wait = 0
losses.append(loss)
losses_rec.append(loss_rec)
losses_match.append(loss_match)
if opts['verbose'] >= 2:
# logging.error('loss after %d steps : %f' % (counter, losses[-1]))
logging.error('loss match after %d steps : %f' % (counter, losses_match[-1]))
# Update discriminator in Z space (if any).
if self._d_optim is not None:
for _st in range(opts['d_steps']):
if opts['d_new_minibatch']:
d_data_ids = np.random.choice(
train_size, opts['batch_size'],
replace=False, p=self._data_weights)
d_batch_images = self._data.data[data_ids].astype(np.float)
d_batch_enc_noise = utils.generate_noise(opts, opts['batch_size'])
else:
d_batch_images = batch_images
d_batch_enc_noise = batch_enc_noise
_ = self._session.run(
[self._d_optim, self._d_loss],
feed_dict={self._real_points_ph: d_batch_images,
self._noise_ph: batch_noise,
self._enc_noise_ph: d_batch_enc_noise,
self._lr_decay_ph: decay,
self._is_training_ph: True,
self._keep_prob_ph: opts['dropout_keep_prob']})
counter += 1
now = time.time()
rec_test = None
if opts['verbose'] and counter % 500 == 0:
# Printing (training and test) loss values
test = self._data.test_data[:200]
[loss_rec_test, rec_test, g_mom_stats, loss_z_corr, additional_losses] = self._session.run(
[self._loss_reconstruct, self._reconstruct_x, self._g_mom_stats, self._loss_z_corr,
self._additional_losses],
feed_dict={self._real_points_ph: test,
self._enc_noise_ph: utils.generate_noise(opts, len(test)),
self._is_training_ph: False,
self._noise_ph: batch_noise,
self._keep_prob_ph: 1e5})
debug_str = 'Epoch: %d/%d, batch:%d/%d, batch/sec:%.2f' % (
_epoch+1, opts['gan_epoch_num'], _idx+1,
batches_num, float(counter) / (now - start_time))
debug_str += ' [L=%.5f, Recon=%.5f, GanL=%.5f, Recon_test=%.5f' % (
loss, loss_rec, loss_match, loss_rec_test)
debug_str += ',' + ', '.join(
['%s=%.2g' % (k, v) for (k, v) in additional_losses.items()])
logging.error(debug_str)
if opts['verbose'] >= 2:
logging.error(g_mom_stats)
logging.error(loss_z_corr)
if counter % opts['plot_every'] == 0:
# plotting the test images.
metrics = Metrics()
merged = np.vstack([rec_test[:8 * 10], test[:8 * 10]])
r_ptr = 0
w_ptr = 0
for _ in range(8 * 10):
merged[w_ptr] = test[r_ptr]
merged[w_ptr + 1] = rec_test[r_ptr]
r_ptr += 1
w_ptr += 2
metrics.make_plots(
opts,
counter,
None,
merged,
prefix='test_reconstr_e%04d_mb%05d_' % (_epoch, _idx))
if opts['verbose'] and counter % opts['plot_every'] == 0:
# Plotting intermediate results
metrics = Metrics()
# --Random samples from the model
points_to_plot, sample_pz = self._session.run(
[self._generated, self._noise],
feed_dict={
self._noise_ph: self._noise_for_plots[0:num_plot],
self._is_training_ph: False,
self._keep_prob_ph: 1e5})
Qz_num = 320
sample_Qz = self._session.run(
self._Qz,
feed_dict={
self._real_points_ph: self._data.data[:Qz_num],
self._enc_noise_ph: utils.generate_noise(opts, Qz_num),
self._is_training_ph: False,
self._keep_prob_ph: 1e5})
# Searching least Gaussian 2d projection
proj_mat, check = self.least_gaussian_2d(opts, sample_Qz)
# Projecting samples from Qz and Pz on this 2d plain
metrics.Qz = np.dot(sample_Qz, proj_mat)
# metrics.Pz = np.dot(self._noise_for_plots, proj_mat)
metrics.Pz = np.dot(sample_pz, proj_mat)
if self._data.labels != None:
metrics.Qz_labels = self._data.labels[:Qz_num]
else:
metrics.Qz_labels = None
metrics.l2s = losses[:]
metrics.losses_match = [opts['pot_lambda'] * el for el in losses_match]
metrics.losses_rec = [opts['reconstr_w'] * el for el in losses_rec]
to_plot = [points_to_plot, 0 * batch_images[:16], batch_images]
if rec_test is not None:
to_plot += [0 * batch_images[:16], rec_test[:64]]
metrics.make_plots(
opts,
counter,
None,
np.vstack(to_plot),
prefix='sample_e%04d_mb%05d_' % (_epoch, _idx) if rec_test is None \
else 'sample_with_test_e%04d_mb%05d_' % (_epoch, _idx))
# --Reconstructions for the train and test points
num_real_p = 8 * 10
reconstructed, real_p = self._session.run(
[self._reconstruct_x, self._real_points],
feed_dict={
self._real_points_ph: self._data.data[:num_real_p],
self._enc_noise_ph: utils.generate_noise(opts, num_real_p),
self._is_training_ph: True,
self._keep_prob_ph: 1e5})
points = real_p
merged = np.vstack([reconstructed, points])
r_ptr = 0
w_ptr = 0
for _ in range(8 * 10):
merged[w_ptr] = points[r_ptr]
merged[w_ptr + 1] = reconstructed[r_ptr]
r_ptr += 1
w_ptr += 2
metrics.make_plots(
opts,
counter,
None,
merged,
prefix='reconstr_e%04d_mb%05d_' % (_epoch, _idx))
sample_prev = points_to_plot[:]
if _epoch > 0:
os.path.join(opts['work_dir'], opts['ckpt_dir'])
self._saver.save(self._session,
os.path.join(opts['work_dir'],
opts['ckpt_dir'],
'trained-pot-final'),
global_step=counter)
def _sample_internal(self, opts, num):
"""Sample from the trained GAN model.
"""
# noise = opts['pot_pz_std'] * utils.generate_noise(opts, num)
# sample = self._run_batch(
# opts, self._generated, self._noise_ph, noise, self._is_training_ph, False)
sample = None
return sample
```
#### File: jiajunhua/tolstikhin-adagan/utils.py
```python
import tensorflow as tf
import os
import sys
import copy
import numpy as np
import logging
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import metrics as metrics_lib
# from metrics import Metrics
from tqdm import tqdm
def generate_noise(opts, num=100):
"""Generate latent noise.
"""
noise = None
if opts['latent_space_distr'] == 'uniform':
noise = np.random.uniform(
-1, 1, [num, opts["latent_space_dim"]]).astype(np.float32)
elif opts['latent_space_distr'] == 'normal':
mean = np.zeros(opts["latent_space_dim"])
cov = np.identity(opts["latent_space_dim"])
noise = np.random.multivariate_normal(
mean, cov, num).astype(np.float32)
elif opts['latent_space_distr'] == 'mnist':
noise = np.random.rand(1, opts['latent_space_dim'])
return noise
class ArraySaver(object):
"""A simple class helping with saving/loading numpy arrays from files.
This class allows to save / load numpy arrays, while storing them either
on disk or in memory.
"""
def __init__(self, mode='ram', workdir=None):
self._mode = mode
self._workdir = workdir
self._global_arrays = {}
def save(self, name, array):
if self._mode == 'ram':
self._global_arrays[name] = copy.deepcopy(array)
elif self._mode == 'disk':
create_dir(self._workdir)
np.save(o_gfile((self._workdir, name), 'wb'), array)
else:
assert False, 'Unknown save / load mode'
def load(self, name):
if self._mode == 'ram':
return self._global_arrays[name]
elif self._mode == 'disk':
return np.load(o_gfile((self._workdir, name), 'rb'))
else:
assert False, 'Unknown save / load mode'
class ProgressBar(object):
"""Super-simple progress bar.
Thanks to http://stackoverflow.com/questions/3160699/python-progress-bar
"""
def __init__(self, verbose, iter_num):
self._width = iter_num
self.verbose = verbose
if self.verbose:
sys.stdout.write("[%s]" % (" " * self._width))
sys.stdout.flush()
sys.stdout.write("\b" * (self._width + 1))
def bam(self):
if self.verbose:
sys.stdout.write("*")
sys.stdout.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.verbose:
sys.stdout.write("\n")
def TQDM(opts, myRange, *args, **kwargs):
if opts['verbose'] and opts.get('use_tqdm', True):
return tqdm(myRange, *args, ncols=80, smoothing=0., **kwargs)
else:
return myRange
def create_dir(d):
if not tf.gfile.IsDirectory(d):
tf.gfile.MakeDirs(d)
class File(tf.gfile.GFile):
"""Wrapper on GFile extending seek, to support what python file supports."""
def __init__(self, *args):
super(File, self).__init__(*args)
def seek(self, position, whence=0):
if whence == 1:
position += self.tell()
elif whence == 2:
position += self.size()
else:
assert whence == 0
super(File, self).seek(position)
def o_gfile(filename, mode):
"""Wrapper around file open, using gfile underneath.
filename can be a string or a tuple/list, in which case the components are
joined to form a full path.
"""
if isinstance(filename, tuple) or isinstance(filename, list):
filename = os.path.join(*filename)
return File(filename, mode)
def listdir(dirname):
return tf.gfile.ListDirectory(dirname)
def js_div_uniform(p, num_cat=1000):
""" Computes the JS-divergence between p and the uniform distribution.
"""
phat = np.bincount(p, minlength=num_cat)
phat = (phat + 0.0) / np.sum(phat)
pu = (phat * .0 + 1.) / num_cat
pref = (phat + pu) / 2.
JS = np.sum(np.log(pu / pref) * pu)
JS += np.sum(np.log(pref / pu) * pref)
JS = JS / 2.
return JS
def debug_mixture_classifier(opts, step, probs, points, num_plot=320, real=True):
"""Small debugger for the mixture classifier's output.
"""
num = len(points)
if len(probs) != num:
return
if num < 2 * num_plot:
return
sorted_vals_and_ids = sorted(zip(probs, range(num)))
if real:
correct = sorted_vals_and_ids[-num_plot:]
wrong = sorted_vals_and_ids[:num_plot]
else:
correct = sorted_vals_and_ids[:num_plot]
wrong = sorted_vals_and_ids[-num_plot:]
correct_ids = [_id for val, _id in correct]
wrong_ids = [_id for val, _id in wrong]
idstring = 'real' if real else 'fake'
logging.debug('Correctly classified %s points probs:' %\
idstring)
logging.debug([val[0] for val, _id in correct])
logging.debug('Incorrectly classified %s points probs:' %\
idstring)
logging.debug([val[0] for val, _id in wrong])
metrics = metrics_lib.Metrics()
metrics.make_plots(opts, step,
None, points[correct_ids],
prefix='c_%s_correct_' % idstring)
metrics.make_plots(opts, step,
None, points[wrong_ids],
prefix='c_%s_wrong_' % idstring)
def debug_updated_weights(opts, steps, weights, data):
""" Various debug plots for updated weights of training points.
"""
assert data.num_points == len(weights), 'Length mismatch'
ws_and_ids = sorted(zip(weights,
range(len(weights))))
num_plot = 20 * 16
if num_plot > len(weights):
return
ids = [_id for w, _id in ws_and_ids[:num_plot]]
plot_points = data.data[ids]
metrics = metrics_lib.Metrics()
metrics.make_plots(opts, steps,
None, plot_points,
prefix='d_least_')
ids = [_id for w, _id in ws_and_ids[-num_plot:]]
plot_points = data.data[ids]
metrics = metrics_lib.Metrics()
metrics.make_plots(opts, steps,
None, plot_points,
prefix='d_most_')
plt.clf()
ax1 = plt.subplot(211)
ax1.set_title('Weights over data points')
plt.plot(range(len(weights)), sorted(weights))
plt.axis([0, len(weights), 0., 2. * np.max(weights)])
if data.labels is not None:
all_labels = np.unique(data.labels)
w_per_label = -1. * np.ones(len(all_labels))
for _id, y in enumerate(all_labels):
w_per_label[_id] = np.sum(
weights[np.where(data.labels == y)[0]])
ax2 = plt.subplot(212)
ax2.set_title('Weights over labels')
plt.scatter(range(len(all_labels)), w_per_label, s=30)
filename = 'data_w{:02d}.png'.format(steps)
create_dir(opts['work_dir'])
plt.savefig(o_gfile((opts["work_dir"], filename), 'wb'))
def one_hot(labels, num_class=10):
res = np.zeros((len(labels), num_class))
for idx in xrange(len(labels)):
res[idx][labels[idx]] = 1.
return res
``` |
{
"source": "jiajunmao/igvc-software",
"score": 2
} |
#### File: src/train_eval/train.py
```python
import argparse
import cv2
import numpy as np
import os
import pdb
import torch
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import transforms
from IGVCDataset import IGVCDataset
import models.model
import utils
np.set_printoptions(threshold=np.nan)
torch.set_printoptions(precision=10)
# Training settings.
parser = argparse.ArgumentParser(description='IGVC segmentation of lines.')
# Hyperparameters.
parser.add_argument('--batch_size', type=int, default=1,
help='input batch size for training.')
parser.add_argument('--epochs', type=int, default=5,
help='number of epochs to train')
parser.add_argument('--im_size', type=int, nargs=3, default=[3,400,400],
help='image dimensions for training.')
parser.add_argument('--kernel_size', type=int, default=3,
help='size of convolution kernels/filters.')
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate.')
parser.add_argument('--lr_decay', type=float, default=1.0,
help='Learning rate decay multiplier.')
parser.add_argument('--step_interval', type=int, default=100,
help='Update learning rate every <step_interval> epochs.')
parser.add_argument('--weight_decay', type=float, default=0.0,
help='Weight decay hyperparameter.')
# Other configuration.
parser.add_argument('--save_model', action='store_true', default=False,
help='Save pytorch model.')
parser.add_argument('--save_interval', type=int, default=1,
help='Save pytorch model after <save_interval> epochs.')
parser.add_argument('--load_model', type=str, default=None,
help='Load model from .pt file, either for initialization or evaluation.')
parser.add_argument('--log_interval', type=int, default=10,
help='number of batches between logging train status.')
parser.add_argument('--vis', action='store_true', default=False,
help='Visualize model output every log interval.')
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--cfgfile', type=str, default='cfg/igvc.cfg',
help='Directory containing cfg for train and evaluation.')
parser.add_argument('--test', action='store_true', default=False,
help='Skip training, and evaluate a loaded model on test data.')
parser.add_argument('--val_samples', type=int, default=10,
help='Number of validation samples to use from train data.')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
print('Using cuda.')
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
transform = transforms.Compose([
transforms.ToTensor(),
])
cfg_params = utils.read_data_cfg(args.cfgfile)
train_txt = cfg_params['train']
test_txt = cfg_params['test']
backup_dir = cfg_params['backup']
if args.load_model is not None:
print('Loading model from %s.' % args.load_model)
model = models.model.UNet(args.im_size, args.kernel_size)
model.load_state_dict(torch.load(args.load_model))
elif args.test:
print('Missing model file for evaluating test set.')
exit()
else:
model = models.model.UNet(args.im_size, args.kernel_size)
# Datasets and dataloaders.
if not args.test:
train_dataset = IGVCDataset(train_txt, im_size=args.im_size, split='train', transform=transform, val_samples=args.val_samples)
val_dataset = IGVCDataset(train_txt, im_size=args.im_size, split='val', transform=transform, val_samples=args.val_samples)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
# Optmizer
lr = args.lr
print('Initial lr: %f.' % lr)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=args.weight_decay)
else:
test_dataset = IGVCDataset(test_txt, im_size=args.im_size, split='test', transform=transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True, **kwargs)
criterion = F.binary_cross_entropy
if args.cuda:
model.cuda()
def train(epoch):
iters = []
lrs = []
train_losses = []
val_losses = []
val_accuracies = []
model.train()
# train loop
for batch_idx, batch in enumerate(train_loader):
# prepare data
images = Variable(batch[0])
targets = Variable(batch[1])
if args.cuda:
images, targets = images.cuda(), targets.cuda()
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
if args.vis and batch_idx % args.log_interval == 0 and images.shape[0] == 1:
cv2.imshow('output: ', outputs.cpu().data.numpy()[0][0])
cv2.imshow('target: ', targets.cpu().data.numpy()[0][0])
cv2.waitKey(10)
# Learning rate decay.
if epoch % args.step_interval == 0 and epoch != 1 and batch_idx == 0:
if args.lr_decay != 1:
global lr, optimizer
lr *= args.lr_decay
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('Learning rate decayed to %f.' % lr)
if batch_idx % args.log_interval == 0:
val_loss, val_acc = evaluate('val', n_batches=80)
train_loss = loss.item()
iters.append(len(train_loader.dataset)*(epoch-1)+batch_idx)
lrs.append(lr)
train_losses.append(train_loss)
val_losses.append(val_loss)
val_accuracies.append(val_acc)
examples_this_epoch = batch_idx * len(images)
epoch_progress = 100. * batch_idx / len(train_loader)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\t'
'Train Loss: {:.6f}\tVal Loss: {:.6f}\tVal Acc: {}'.format(
epoch, examples_this_epoch, len(train_loader.dataset),
epoch_progress, train_loss, val_loss, val_acc))
return iters, train_losses, val_losses, val_accuracies
def evaluate(split, verbose=False, n_batches=None):
'''
Compute loss on val or test data.
'''
model.eval()
loss = 0
acc = 0
correct = 0
n_examples = 0
if split == 'val':
loader = val_loader
elif split == 'test':
loader = test_loader
for batch_i, batch in enumerate(loader):
data, target = batch
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
loss += criterion(output, target).item()
acc += (np.sum(output.cpu().data.numpy()[target.cpu().data.numpy()!=0] > 0.5) \
+ np.sum(output.cpu().data.numpy()[target.cpu().data.numpy()==0] < 0.5)) / float(args.im_size[1]*args.im_size[2])
n_examples += output.size(0)
if n_batches and (batch_i == n_batches-1):
break
loss /= n_examples
acc /= n_examples
return loss, acc
if args.test:
print("Running evaluation on test set.")
test_loss, test_acc = evaluate('test')
print('Test loss: %f Test accuracy: %f' % (test_loss, test_acc))
else:
# train the model one epoch at a time
metrics = {'iters':[], 'train_loss':[], 'val_loss':[], 'val_acc':[]}
for epoch in range(1, args.epochs + 1):
iters, train_losses, val_losses, val_accuracies = train(epoch)
metrics['iters'] += iters
metrics['train_loss'] += train_losses
metrics['val_loss'] += val_losses
metrics['val_acc'] += val_accuracies
if (epoch % args.save_interval == 0 and args.save_model):
save_path = os.path.join(backup_dir, 'IGVCModel' + '_' + str(epoch) + '.pt')
print('Saving model: %s' % save_path)
torch.save(model.state_dict(), save_path)
metrics_path = os.path.join(backup_dir, 'metrics.npy')
np.save(metrics_path, metrics)
```
#### File: igvc_utils/scripts/merge_camera_dirs.py
```python
import argparse
import os
import sys
import shutil
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
parser = argparse.ArgumentParser(description='Merge the contents of multiple directories in sequence in order to prevent file naming conflicts.')
parser.add_argument('-d','--directories', nargs='+', help='Absolute paths of directories to merge in sequence', required=True)
parser.add_argument('-o','--out_dir_path', nargs='?', type=str, help='Absolte path to output dir name. i.e. /PATH/TO/OUTPUT/DIR/DIR_NAME', required=False, default='out')
args = parser.parse_args()
def copy_rename(old_file_name, new_file_name):
src_dir= os.curdir
dst_dir= os.path.join(os.curdir , "subfolder")
src_file = os.path.join(src_dir, old_file_name)
shutil.copy(src_file,dst_dir)
dst_file = os.path.join(dst_dir, old_file_name)
new_dst_file_name = os.path.join(dst_dir, new_file_name)
os.rename(dst_file, new_dst_file_name)
if __name__=="__main__":
out_dir_path = args.out_dir_path
for dir in args.directories:
if not os.path.exists(dir):
print(bcolors.FAIL + "Directory [" + dir + "] does not exist. Please check supplied path." + bcolors.ENDC)
sys.exit()
if not os.path.exists(out_dir_path):
os.mkdir(out_dir_path)
padding = len(str(sum([len(os.listdir(dir)) for dir in args.directories])))
# cycle through directories
i = 0
for dir in args.directories:
files = os.listdir(dir)
print(bcolors.OKBLUE + "Merging " + str(len(files)) + " from " + dir)
# cycle through files within directory
for file in files:
src_file = os.path.join(dir, file)
shutil.copy(src_file, out_dir_path)
dst_file = os.path.join(out_dir_path, file)
extension = os.path.splitext(file)[1]
new_dst_file_name = os.path.join(out_dir_path, format(i, '0' + str(padding)) + extension)
os.rename(dst_file, new_dst_file_name)
i+=1
``` |
{
"source": "JiajunQiu/ProNAhot",
"score": 3
} |
#### File: ProNAhot/pronaHotMod/lib_parser.py
```python
import math
import sys
import re
class ParseError(Exception): pass #To indicate a parsing error
class EmptyError(Exception): pass #To indicate empty data passed to the parser
class NoResultError(Exception): pass #To indicate that a method did not feel like producing a result, used in parse_psic()
def parse_sequence(d_in, d_fasta):
"""
pp returns two sequence files: query.in and query.fasta. No idea why.
Here we check that both are the same and return the sequence.
"""
seq_in = ''
seq_fasta = ''
for line in d_in.split('\n')[1:]:
if not line: continue
seq_in += line
for line in d_fasta.split('\n')[1:]:
if not line: continue
seq_fasta += ''.join(line.split()) #Get rid of those strange whitespaces within the sequence!
if seq_in != seq_fasta:
sys.exit("Error!!!ProNAhot can not be done for protein %s.\nProtein sequence of *in and * fasta are not identical.\npp seems to work with different sequences.\n" % d_fasta.split('\n')[0][1:])
return {'seq':seq_in},d_fasta.split('\n')[0][1:]
def parse_blast_reorder(d_blast):
ori_order='ARNDCQEGHILKMFPSTWYV'
#ress
new_order='RKDEQNHSTYCWAILMFVPG'
#res
# new_order='AVLIPFWMGSTCYNQDEKRH'
if d_blast == '':
raise EmptyError('Empty pssm file!')
pssm_mat = []
perc_mat = []
inf_per_pos = []
rel_weight = []
pssm_seq = ''
#First turn pssm into a matrix we can handle
for line in d_blast.split('\n'):
tokens = line.split()
if len(tokens) == 40 and line.strip() != 'A R N D C Q E G H I L K M F P S T W Y V A R N D C Q E G H I L K M F P S T W Y V':
raise ParseError("It seems that we have an issue now. Blast produces columns with altering meanings!")
if len(tokens) != 44: continue
pssm_seq += tokens[1]
inf_per_pos.append( float(tokens[42]) ) #The second last column in the blast output
rel_weight.append( float(tokens[43]) ) #The very last column
#The first matrix i.e. pssm
pssm_mat_row = []
tmp={}
for ind in range(len(ori_order)):
tmp[ori_order[ind]]=tokens[2:22][ind]
for r in new_order:
pssm_mat_row.append(int(tmp[r]))
#The second one, i.e. the percentages
perc_mat_row = []
tmp={}
for ind in range(len(ori_order)):
tmp[ori_order[ind]]=tokens[22:42][ind]
for r in new_order:
perc_mat_row.append(int(tmp[r]))
#Check if we are really dealing with 20 values here!
if len(pssm_mat_row) != 20 or len(perc_mat_row) != 20:
raise ParseError("It seems that we have a situation now. The expected amount of columns is 20, found: %s!" % len(pssm_mat_row))
pssm_mat.append(pssm_mat_row)
perc_mat.append(perc_mat_row)
#Further consistency check...
if len(pssm_mat) != len(pssm_seq) != len(perc_mat) != len(inf_per_pos) != len(rel_weight):
raise ParseError("It seems that we have an issue now. Something went wrong during parsing the pssm matrix!")
return {'seq':pssm_seq, 'pssm':pssm_mat, 'perc':perc_mat, 'inf_per_pos':inf_per_pos, 'rel_weight':rel_weight}
def parse_consurf(consurf):
if consurf == '':
raise EmptyError('Empty consurf file!')
out1 = []
out2 = []
for line in consurf.split('\n'):
tokens = line.split('\t')
if len(tokens) < 6 or 'COLOR' in line: continue
out1.append( float(tokens[2]) )
out2.append( float(tokens[4].lstrip()[0]) )
#Consistency check
if len(out1) != len(out2):
raise ParseError("Something happened! consurf returns different column lengths!")
return {'consurf_score':out1, 'consurf_color':out2}
def parse_blast(d_blast):
"""
Note that we do not parse out the weighted observed percentages part.
Meaning of pssm columns:
A R N D C Q E G H I L K M F P S T W Y V
Returns a dictionary with keys as follows:
'seq': The sequence as blast sees it
'pssm': pssm matrix as a list of lists. Each sublist represents a row in the PSSM matrix.
'perc': perc matrix
'inf_per_pos': The second last column in the blast output
'rel_weight': The last column
"""
if d_blast == '':
raise EmptyError('Empty pssm file!')
pssm_mat = []
perc_mat = []
inf_per_pos = []
rel_weight = []
pssm_seq = ''
#First turn pssm into a matrix we can handle
for line in d_blast.split('\n'):
tokens = line.split()
if len(tokens) == 40 and line.strip() != 'A R N D C Q E G H I L K M F P S T W Y V A R N D C Q E G H I L K M F P S T W Y V':
raise ParseError("It seems that we have an issue now. Blast produces columns with altering meanings!")
if len(tokens) != 44: continue
pssm_seq += tokens[1]
inf_per_pos.append( float(tokens[42]) ) #The second last column in the blast output
rel_weight.append( float(tokens[43]) ) #The very last column
#The first matrix i.e. pssm
pssm_mat_row = []
for t in tokens[2:22]:
pssm_mat_row.append(int(t))
#The second one, i.e. the percentages
perc_mat_row = []
for t in tokens[22:42]:
perc_mat_row.append(int(t))
#Check if we are really dealing with 20 values here!
if len(pssm_mat_row) != 20 or len(perc_mat_row) != 20:
raise ParseError("It seems that we have a situation now. The expected amount of columns is 20, found: %s!" % len(pssm_mat_row))
pssm_mat.append(pssm_mat_row)
perc_mat.append(perc_mat_row)
#Further consistency check...
if len(pssm_mat) != len(pssm_seq) != len(perc_mat) != len(inf_per_pos) != len(rel_weight):
raise ParseError("It seems that we have an issue now. Something went wrong during parsing the pssm matrix!")
return {'seq':pssm_seq, 'pssm':pssm_mat, 'perc':perc_mat, 'inf_per_pos':inf_per_pos, 'rel_weight':rel_weight}
def parse_psic(d_psic):
"""
Unfortunately, psic returns no sequence.
Meaning of psic's columns:
A R N D C Q E G H I L K M F P S T W Y V NumSeq
This is exactly what could be found in the sublist of each residue.
Returns a dictionary with keys as follows:
'psic': psic matrix as a list of lists. Each sublist represents a row in the psic matrix.
'NumSeq': the very last column, denoting NumSeq i.e. number of aligned sequences at that pos
"""
if d_psic == '':
raise EmptyError('Empty psic file!')
elif d_psic.startswith('sequence too short'):
raise NoResultError('Sequence seems to be too short for psic. No psic output found.')
psic_mat = []
numseq = []
for line in d_psic.split('\n'):
if line.startswith('Pos') or line == '': continue
tokens = line.split()
if len(tokens) != 22:
raise ParseError('"It seems that we have a situation now. The expected amount of columns is 22, found: %s!" % len(tokens)')
psic_mat_row = [ float(t) for t in tokens[1:21] ]
numseq.append( int(tokens[21]) ) #The last column is an integer denoting the amount of aligned seqs at that pos.
psic_mat.append(psic_mat_row) #Now glue the current column to the matrix
#Check!
if len(psic_mat) != len(numseq):
raise ParseError("It seems that we have an issue now. Something went wrong during parsing the psic matrix!")
return {'psic':psic_mat, 'NumSeq':numseq}
def parse_disis(d_disis):
"""
Returns a dictionary with keys as follows:
'seq': The sequence as disis sees it
'prd_bin': binary prdct
'prd_raw': raw prdct
"""
if d_disis == '':
raise EmptyError('Empty disis file!')
disis_seq_binprd = [] #Sequence parsed out of the binary prediction part
disis_seq_rawprd = [] #...parsed out of the raw (numeric) predictions
disis_prd_bin = [] #Binary predictions
disis_prd_raw = [] #Raw numeric predictions
cnt = 0
for line in d_disis.split('\n'):
if line == '': continue
tokens = line.split()
if len(tokens) == 1: #We are in the upper part of disis' output, i.e. the binary predictions
if cnt % 2 == 0:
disis_seq_binprd.extend( list(line) )
else:
disis_prd_bin.extend( list(line.replace('P','+')) )
elif len(tokens) == 2: #Now we are in the lower part, i.e. the numeric outputs of disis
disis_seq_rawprd.append( tokens[0] )
disis_prd_raw.append( int(tokens[1]) )
cnt += 1
#Now do some consistency checks
if disis_seq_binprd != disis_seq_rawprd:
raise ParseError("It seems that we have an issue now. Disis returns different sequences in the upper and lower part!")
if len(disis_seq_binprd) != len(disis_prd_bin) != len(disis_prd_raw):
raise ParseError("It seems that we have an issue now. Parsed datastructures have different lengths!")
return {'seq':''.join(disis_seq_binprd), 'prd_bin':disis_prd_bin, 'prd_raw':disis_prd_raw}
def parse_isis(d_isis):
"""
Returns a dictionary with keys as follows:
'seq': The sequence as isis sees it
'prd_bin': binary prdct
'prd_raw': raw prdct
"""
if d_isis == '':
raise EmptyError('Empty isis file!')
isis_seq_binprd = [] #Sequence parsed out of the binary prediction part
isis_seq_rawprd = [] #...parsed out of the raw (numeric) predictions
isis_prd_bin = [] #Binary predictions
isis_prd_raw = [] #Raw numeric predictions
cnt = 0
for line in d_isis.split('\n'):
if line == '' or line.startswith('>'): continue
tokens = line.split()
if len(tokens) == 1: #We are in the upper part of disis' output, i.e. the binary predictions
if cnt % 2 == 0:
isis_seq_binprd.extend( list(line) )
else:
isis_prd_bin.extend( list(line.replace('P','+')) )
elif len(tokens) == 3: #Now we are in the lower part, i.e. the numeric outputs of disis
isis_seq_rawprd.append( tokens[1] )
isis_prd_raw.append( int(tokens[2]) )
cnt += 1
#Now do some consistency checks
if isis_seq_binprd != isis_seq_rawprd:
raise ParseError("It seems that we have an issue now. Isis returns different sequences in the upper and lower part!")
if len(isis_seq_binprd) != len(isis_prd_bin) != len(isis_prd_raw):
raise ParseError("It seems that we have an issue now. Parsed datastructures have different lengths!")
return {'seq':''.join(isis_seq_binprd), 'prd_bin':isis_prd_bin, 'prd_raw':isis_prd_raw}
def parse_md(d_md):
"""
Returns a dictionary with keys as follows:
'seq': sequence as MD sees it
'norsnet_raw': raw norsnet prdct
'norsnet_bin': binary norsnet prdct
'bval_raw': raw bval prdct
'bval_bin': binary bval prdct
'ucon_raw': raw ucon prdct
'ucon_bin': binary ucon prdct
'prd_raw': MD's raw prdct
'prd_ri': MD's reliability index
'prd_bin': MD's binary prdct
"""
if d_md == '':
raise EmptyError('Empty md file!')
md_seq = []
md_norsnet_raw = []
md_norsnet_bin = []
md_bval_raw = []
md_bval_bin = []
md_ucon_raw = []
md_ucon_bin = []
md_raw = []
md_ri = []
md_bin = []
for line in d_md.split('\n'):
if line.startswith('Number'): continue #The header
if line == '': break #We reached the end of the output block
tokens = line.split()
if len(tokens) != 11:
raise ParseError("It seems that we have an issue now. MD returned an unexpected number of columns!")
md_seq.append( tokens[1] )
md_norsnet_raw.append( float(tokens[2]) )
md_norsnet_bin.append( tokens[3].replace('D','+') )
md_bval_raw.append( float(tokens[4]) )
md_bval_bin.append( tokens[5].replace('D','+') )
md_ucon_raw.append( float(tokens[6]) )
md_ucon_bin.append( tokens[7].replace('D','+') )
md_raw.append( float(tokens[8]) )
md_ri.append( int(tokens[9]) )
md_bin.append( tokens[10].replace('D','+') )
#Check it!
if len(md_seq) != len(md_norsnet_raw) != len(md_norsnet_bin) != len(md_bval_raw) != len(md_bval_bin) != len(md_ucon_raw) != len(md_ucon_bin) != len(md_raw) != len(md_ri) != len(md_bin):
raise ParseError("It seems that we have an issue now. MD returned unequal column lengths!")
return {'seq':''.join(md_seq), 'norsnet_raw':md_norsnet_raw, 'norsnet_bin':md_norsnet_bin, 'bval_raw':md_bval_raw, 'bval_bin':md_bval_bin, 'ucon_raw':md_ucon_raw, 'ucon_bin':md_ucon_bin, 'prd_raw':md_raw, 'prd_ri':md_ri, 'prd_bin':md_bin}
def parse_profsecacc(d_prof):
"""
Returns a dictionary where keys have the same designation as the column names in prof's tabular output.
Values hold lists of per-residue predictions.
AA
OHEL
PHEL
RI_S
OACC
PACC
OREL
PREL
RI_A
pH
pE
pL
Obe
Pbe
Obie
Pbie
OtH
OtE
OtL
Ot0
Ot1
Ot2
Ot3
Ot4
Ot5
Ot6
Ot7
Ot8
Ot9
Their meaning (taken from prof's output):
# NOTATION BODY : PROFsec
# NOTATION OHEL : observed secondary structure: H=helix, E=extended (sheet), blank=other (loop)
# NOTATION PHEL : PROF predicted secondary structure: H=helix, E=extended (sheet), blank=other (loop) PROF = PROF: Profile network prediction HeiDelberg
# NOTATION RI_S : reliability index for PROFsec prediction (0=lo 9=high) Note: for the brief presentation strong predictions marked by '*'
# NOTATION pH : 'probability' for assigning helix (1=high, 0=low)
# NOTATION pE : 'probability' for assigning strand (1=high, 0=low)
# NOTATION pL : 'probability' for assigning neither helix, nor strand (1=high, 0=low)
# NOTATION OtH : actual neural network output from PROFsec for helix unit
# NOTATION OtE : actual neural network output from PROFsec for strand unit
# NOTATION OtL : actual neural network output from PROFsec for 'no-regular' unit
#
# ------------------------------------------------------------------------
# NOTATION BODY : PROFacc
# NOTATION OACC : observed solvent accessibility (acc) in square Angstroem (taken from DSSP: <NAME> and <NAME>, Biopolymers, 22, 2577-2637, 1983)
# NOTATION PACC : PROF predicted solvent accessibility (acc) in square Angstroem
# NOTATION OREL : observed relative solvent accessibility (acc) in 10 states: a value of n (=0-9) corresponds to a relative acc. of between n*n % and (n+1)*(n+1) % (e.g. for n=5: 16-25%).
# NOTATION PREL : PROF predicted relative solvent accessibility (acc) in 10 states: a value of n (=0-9) corresponds to a relative acc. of between n*n % and (n+1)*(n+1) % (e.g. for n=5: 16-25%).
# NOTATION RI_A : reliability index for PROFacc prediction (0=low to 9=high) Note: for the brief presentation strong predictions marked by '*'
# NOTATION Obe : observerd relative solvent accessibility (acc) in 2 states: b = 0-16%, e = 16-100%.
# NOTATION Pbe : PROF predicted relative solvent accessibility (acc) in 2 states: b = 0-16%, e = 16-100%.
# NOTATION Obie : observerd relative solvent accessibility (acc) in 3 states: b = 0-9%, i = 9-36%, e = 36-100%.
# NOTATION Pbie : PROF predicted relative solvent accessibility (acc) in 3 states: b = 0-9%, i = 9-36%, e = 36-100%.
# NOTATION Ot4 : actual neural network output from PROFsec for unit 0 coding for a relative solvent accessibility of 4*4 - 5*5 percent (16-25%). Note: OtN, with N=0-9 give the same information for the other output units!
#
"""
if d_prof == '':
raise EmptyError('Empty prof file!')
ret = {}
for line in d_prof.split('\n'):
if not line.startswith('#') and line != '':
#First parse the column header
if line.startswith('No'):
column_names = re.split('\s+', line)
#Now the predicted values per line
else:
value_tokens = re.split('\s+', line)
for i in range(len(value_tokens)):
#Get its specific column name
col = column_names[i]
#Try to convert the current value into an integer.
#If that fails we are dealing with a string
try:
val = int(value_tokens[i])
except ValueError:
val = value_tokens[i]
#Now append it
try:
ret[col].append(val)
except KeyError:
ret[col] = [val]
#Do some final consistency checks: Has everything the same length?
l = len(list(ret.values())[0])
for listt in ret.values():
if len(listt) != l:
raise ParseError("Something happened! profsecacc returns different column lengths!")
#Add an additional entry containing the concatenated aa sequence
seq = ''.join(ret['AA'])
ret['seq'] = seq
return ret
def parse_profbval(d_bval):
"""
Returns a dictionary with keys and values as follows:
'prd_raw1': list of integers corresponding to first output node
'prd_raw2': list of integers corresponding to second output node
Unfortunately, there is neither sequence information nor a binary prediction to be found in the output.
"""
if d_bval == '':
raise EmptyError('Empty bval file!')
out1 = []
out2 = []
region_of_interest = False
for line in d_bval.split('\n'):
if region_of_interest:
tokens = line.split()
if len(tokens) == 0: continue
out1.append( int(tokens[1]) )
out2.append( int(tokens[2]) )
if line.startswith('* out vec:'):
region_of_interest = True
#Consistency check
if len(out1) != len(out2):
raise ParseError("Something happened! profbval returns different column lengths!")
return {'prd_raw1':out1, 'prd_raw2':out2}
def parse_pfam_annotations(d_hmmer):
"""
This method performs residue-wise domain annotations according to aligned pfam domains. Search against the PfamA database should be performed
by means of the new hmmer3 suite, so using the old hmmer2 is strongly discouraged, due to its different output style!
The parsing depends on hmmer-3.0rc1 output (as of February 2010), so check that before running a newer hmmer3!!!
Each sequence position of the query seq is annotated in a dictionary in the following way:
{ ...
i: {mdl#:[(query_i,domain_i-eval,consensus_i,match_i,pp_i),(...)], mdl#:[(...),(...),...] } ,
i+j: {mdl#:[(...),(...),...], ...},
...
},
where
i: the i-th position in the query seq (starting at 0!!),
mdl#: the number of the model
query_i: the residue of the query sequence, intended for checking purposes for the function caller
domain_i-eval: the domain's i-evalue
consensus_i: the aligned residue in the consensus pfam domain
match_i: the information of the conservation grade ,
pp_i: the posterior probability of that specific aligned residue (new to hmmer3)
Note, the hierarchy of hmmer output:
A query sequence could match to different Pfam models, each consisting of several domains. Furthermore, a residue could be aligned to
more than one domain _within_ a model, hence the assigned list to each model number in the nested dictionary:
Each entry essentially refers to one domain where that specific residue i is aligned to.
A sample hmmer output against PfamA could look like this:
-------------------------------------------------------------------------------------------------------------------------------------
Query: query [L=386]
Scores for complete sequence (score includes all domains):
--- full sequence --- --- best 1 domain --- -#dom-
E-value score bias E-value score bias exp N Model Description
------- ------ ----- ------- ------ ----- ---- -- -------- -----------
3.1e-94 315.1 5.1 1e-78 264.1 0.4 2.7 2 PF00224.14 Pyruvate kinase, barrel domain
5.2e-26 90.1 6.7 6e-26 89.9 3.7 1.8 1 PF02887.9 Pyruvate kinase, alpha/beta domain
Domain annotation for each model (and alignments):
>> PF00224.14 Pyruvate kinase, barrel domain
# score bias c-Evalue i-Evalue hmmfrom hmm to alifrom ali to envfrom env to acc
--- ------ ----- --------- --------- ------- ------- ------- ------- ------- ------- ----
1 ! 53.2 0.0 2.2e-18 1.3e-14 2 72 .. 24 89 .. 23 90 .. 0.93
2 ! 264.1 0.4 1.7e-82 1e-78 173 344 .. 89 259 .. 88 263 .. 0.96
Alignments for each domain:
== domain 1 score: 53.2 bits; conditional E-value: 2.2e-18
--SEEEEEE--TTTSHHHHHHHHH----EEEEETT---HHHHHHHHHHHHHHHHCTTTS-EEEEE------ CS
PF00224.14 2 rrtkivctlGPasesvekleklieaGlnvvRlnfshGsheehkeridnvreaeeklgkkvaillDtkGpei 72
++t+ivctlGPa +sve+l kli+aG+++ R+n she+hke +nv +a+ +l +++llDtkGp i
query 24 KKTHIVCTLGPACKSVETLVKLIDAGMDICRFN----SHEDHKEMFNNVLKAQ-ELRCLLGMLLDTKGPPI 89
89******************************9....789*********9986.56788**********76 PP
== domain 2 score: 264.1 bits; conditional E-value: 1.7e-82
SS-HHHHHHHH---TT.-SEEEETTE-SHHHHHHHHHHHHHTTTTSEEEEEE-S----TTHHHHHHH----EEE-------S-GGGHHHHHHHHHHHCCC-----EEESSTTGGGGTSSS--HHHHHHHHHHHH----EEEE---------HHHHHHHHHHHHHHHHCTS-H CS
PF00224.14 173 alsekDkadlkfgvkqgvdliaasfvRkaedvkevRevleekgkeikiiakienqegvenldeileasdgimvaRGDlGieipaekvvlaqkllikkcnlagkpvitatqmlesmiknPrptRaevsDvanavldGaDavmLsgetakGkyPveavkamaevaleaekalke 344
+sekDk+d+ + ++iaasf+ +a+dv+ +R++l+++g++ikii kien eg+ ++d+il +sdgim+aRGDlG+ei ekv+laqkl+i+kcnl gkp+itatqmlesm+knPrptRaev+DvanavldG+D+vmLsgeta Gk+Pveav++m++++leae+ +++
query 89 IISEKDKNDILNFAIPMCNFIAASFIQSADDVRLIRNLLGPRGRHIKIIPKIENIEGIIHFDKILAESDGIMIARGDLGMEISPEKVFLAQKLMISKCNLQGKPIITATQMLESMTKNPRPTRAEVTDVANAVLDGTDCVMLSGETA-GKFPVEAVTIMSKICLEAEACIDY 259
69******9765555579********************************************************************************************************************************8.*******************99986 PP
>> PF02887.9 Pyruvate kinase, alpha/beta domain
# score bias c-Evalue i-Evalue hmmfrom hmm to alifrom ali to envfrom env to acc
--- ------ ----- --------- --------- ------- ------- ------- ------- ------- ------- ----
1 ! 89.9 3.7 1e-29 6e-26 2 116 .. 278 383 .. 277 384 .. 0.94
Alignments for each domain:
== domain 1 score: 89.9 bits; conditional E-value: 1e-29
HHHHHHHHHHHHH----EEEEE-----HHHHHHCC---..EEEEE----HHH---EEE---TT---HHHHCHHHHHHHHHCCHHH-----SSS-EEEE--....-------EEEE CS
PF02887.9 2 eaiaeaaveaAkelgakaIvvltesGstarlvskyrpgvpIlavtpseetarqlalvwGvhplvgkeraistdeviaealraalkkglikkgdevvvtaglpfgtaggtntikvv 116
ea+a++ave+A++++a+ I++lte+G+tarl++ky+p++ Ila++ s++t + l++++Gv+++ + + td vi++a+++a++++++k gd v++++g +tn++kvv
query 278 EAVARSAVETAESIQASLIIALTETGYTARLIAKYKPSCTILALSASDSTVKCLNVHRGVTCIKVGSF---TDIVIRNAIEIAKQRNMAKVGDSVIAIHG------IKTNLMKVV 383
99************************************************************544444...59***************************......589999998 PP
-------------------------------------------------------------------------------------------------------------------------------------
Each model is introduced by an '>>', each model could have several domains, introduced by an '=='.
Mind e.g. query residue i=88 in the first model (89 in the output above): It is annotated in both domains. Hence its annotation in the return dictionary would
look like:
88:{0:[('I','1.3e-14', 'i', 'i', '6'), ('I','1e-78', 'a', ' ', '6')]}
If it would align in a domain of the second model, that annotation would accur as another entry in the sub-dictionary, introduced by a 1.
Here, you can also see what is actually used as annotation: first the i-evalue of the domain (1.3e-14 or 1e-78) followed by the subject (consensus) residue, the
conservation letter (line between query and subject) and the posterior probability (beneath the query line).
There could be other information to be extracted (like bit score, start stop positions...). Perhaps in the future.
"""
if 'No hits detected that satisfy reporting thresholds' in d_hmmer:
#raise NoResultError('No significant hit found')
raise NoResultError('hmmer3 did not detect any hits.')
#First we split up into models
#Look for the '>>' at the beginning of the line.
rgx = re.compile('^>>', re.M)
models_tmp = rgx.split(d_hmmer)
models = []
for model in models_tmp[1:-1]: #The first one is the hmmscan header and general information, we aren't interested in that one; the last one
models.append(model) #needs to be purged off the footer
#Get rid of the last model's footer and append it to models
rgx = re.compile('^Internal pipeline statistics summary', re.M)
models.append(rgx.split(models_tmp[-1])[0])
#Now handle each single domain within the models and save models and their domains into model_list
rgx = re.compile('^ ==', re.M) #Each domain starts with this string
mdl_cnt = 0 #How many models are we dealing with? Remember, each model is made up of at least one domain
#model_list = {} #Here we store each model along with their domains
residues = {}
for model in models:
if 'No individual domains that satisfy reporting thresholds' in model:
continue
domains = rgx.split(model)
domains_header = domains[0]
domains_aligns = domains[1:]
#Parse the header first for domain-specific information
# # score bias c-Evalue i-Evalue hmmfrom hmm to alifrom ali to envfrom env to acc
dom = []
for line in domains_header.split('\n'):
if re.match('\s+\d+\s+', line):
tokens = line.strip().split()
assert tokens[1] == '!' #If this fails, we didn't fully understand what's going on during parsing!
score = tokens[2] #See the docu about the significance of both evalues. i-eval (independent) is the one
c_eval = tokens[4] #we're probably interested in later, as well as the bitscore.
i_eval = tokens[5] #Yes we are, since pfam website only reports i_eval as _the_ evalue
hmm_endpoints = tokens[8] #Is either '..', '.]', '[.' or '[]', indicating if alignment ended internally a domain ('.') or end exactly with domain boundaries ([/])
start_model = tokens[6]
end_model = tokens[7]
start_query = tokens[9]
end_query = tokens[10]
#Not all of this is currently needed.
info = {'score':score, 'i_eval':i_eval, 'start_model':start_model, 'end_model':end_model, 'start_query':start_query, 'end_query':end_query, 'hmm_endpoints':hmm_endpoints}
dom.append(info)
#Now handle the alignments in each domain
i = 0
feature_string = ''
for algn in domains_aligns:
lines = algn.strip().split('\n')
#There could be up to two additional annotation lines present above the actual alignment (s. hmmer docu p.18). Get rid of those!
if len(lines) == 5:
lines = lines[1:]
elif len(lines) == 6:
lines = lines[2:]
elif len(lines) == 7:
lines = lines[3:]
else:
raise ParseError('Well, that is indeed interesting. Something went terribly wrong during assuming the amount of possible lines per alignment! I think I will be dying now!')
line_model = lines[0] #The line containing the consensus of the domain sequence
line_match = lines[1]
line_target = lines[2] #Our target sequence for which we just found a homologous domain
line_pp = lines[3]
name_model = line_model.split()[0]
start_query = int(dom[i]['start_query'])
end_query = int(dom[i]['end_query'])
seq_model = line_model.split()[2] #The domain consensus sequence
seq_query = line_target.split()[2] #The query sequence
#We need the start index of the match sequence which is the same as from all the others, e.g. seq_model
m_start = line_model.index(seq_model)
seq_match = line_match[m_start:] #The match sequence between both
seq_pp = line_pp.lstrip().split()[0] #The posterior probability sequence
#Some semantics checks: each string length has to be the same. Otherwise, something went wrong during parsing!
assert len(seq_model) == len(seq_match) == len(seq_query) == len(seq_pp)
#Now do the mapping
actual_pos = 0
for pos in range(len(seq_query)):
if seq_query[pos] == '-':
continue
try:
residues[actual_pos+start_query-1][mdl_cnt].append( (seq_query[pos],dom[i]['i_eval'],seq_model[pos], seq_match[pos], seq_pp[pos]) )
except KeyError:
try:
residues[actual_pos+start_query-1][mdl_cnt] = [ (seq_query[pos],dom[i]['i_eval'],seq_model[pos], seq_match[pos], seq_pp[pos]) ]
except KeyError:
residues[actual_pos+start_query-1] = {mdl_cnt: [ (seq_query[pos],dom[i]['i_eval'],seq_model[pos], seq_match[pos], seq_pp[pos]) ] }
actual_pos += 1
#A further consistency check!
assert end_query == actual_pos+start_query-1
#Proceed with the next residue within the alignment
i += 1
#Proceed with the next model
mdl_cnt += 1
return residues
def parse_prosite(d_prosite):
"""
"""
if d_prosite == '':
raise EmptyError('Empty prosite file!')
stretches = [] #Here we store the prosite matches: A list of 3-lists, i.e. one per prosite match: start,stop,stretch
within = False
for line in d_prosite.split('\n'):
if line.startswith('Pattern:'):
within = True
continue
if line.startswith('Pattern-ID:') or line.strip() == '':
within = False
continue
if within:
tokens = line.strip().split()
start = int(tokens[0]) #Prosite starts counting at 1!
stretch = tokens[1]
stop = start + len(stretch) - 1
#We return sequence positions 0-based!
stretches.append( [start-1,stop-1,stretch] )
return stretches
if __name__ == '__main__':
import os
from lib_parser import *
import sys
pp_path = '/mnt/home/schaefer/SNAPv2/pp/'
chains = os.listdir(pp_path)
i = 0
N = len(chains)
mn = 0
mx = 0
for chain in chains:
#print chain
i += 1
#print chain, i, N
try:
#d_blast = open(pp_path+chain+"/query.blastPsiMat").read()
#d_disis = open(pp_path+chain+"/query.disis").read()
#d_isis = open(pp_path+chain+"/query.isis").read()
#d_md = open(pp_path+chain+"/query.mdisorder").read()
#d_prof = open(pp_path+chain+"/query.profRdb").read()
#d_bval = open(pp_path+chain+"/query.profbval").read()
#d_psic = open(pp_path+chain+"/query.psic").read()
d_in = open(pp_path+chain+"/query.in").read()
d_fasta = open(pp_path+chain+"/query.fasta").read()
#d_hmmer = open(pp_path+chain+"/query.hmm3pfam").read()
d_prosite = open(pp_path+chain+"/query.prosite").read()
except NoResultError:
#print 'too short for psic'
continue
except IOError:
print('file not found')
continue
#print d_hmmer
seq = parse_sequence(d_in, d_fasta)['seq']
for stretch in parse_prosite(d_prosite):
print (stretch[2])
print (seq[stretch[0]:stretch[1]+1])
assert stretch[2] == seq[stretch[0]:stretch[1]+1]
```
#### File: ProNAhot/pronaHotMod/protvec.py
```python
import prona2019Mod.utils as utils
import itertools as it
from six import iteritems, string_types, PY2, next
import numpy as np
import sys
def _is_single(obj):
"""
Check whether `obj` is a single document or an entire corpus.
Returns (is_single, new) 2-tuple, where `new` yields the same
sequence as `obj`.
`obj` is a single document if it is an iterable of strings. It
is a corpus if it is an iterable of documents.
"""
obj_iter = iter(obj)
temp_iter = obj_iter
try:
peek = next(obj_iter)
obj_iter = it.chain([peek], obj_iter)
except StopIteration:
# An empty object is a single document
return True, obj
if isinstance(peek, string_types):
# It's a document, return the iterator
return True, obj_iter
if temp_iter == obj:
# Checking for iterator to the object
return False, obj_iter
else:
# If the first item isn't a string, assume obj is a corpus
return False, obj
'''
def _apply(corpus, chunksize=None, **kwargs):
"""Apply the transformation to a whole corpus and get the result as another corpus.
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
chunksize : int, optional
If provided - more effective processing (by group of documents) will performed.
kwargs
Arbitrary keyword arguments.
Returns
-------
:class:`~gensim.interfaces.TransformedCorpus`
Transformed corpus.
"""
return TransformedCorpus(self, corpus, chunksize, **kwargs)
'''
def score_item(worda, wordb, components, scorer, phrasegrams):
"""score is retained from original dataset
"""
try:
return phrasegrams[tuple(components)][1]
except KeyError:
return -1
def analyze_sentence(sentence, threshold, common_terms, scorer,phrasegrams):
"""Analyze a sentence
`sentence` a token list representing the sentence to be analyzed.
`threshold` the minimum score for a bigram to be taken into account
`common_terms` the list of common terms, they have a special treatment
`scorer` the scorer function, as given to Phrases
"""
s = [utils.any2utf8(w) for w in sentence]
last_uncommon = None
in_between = []
# adding None is a trick that helps getting an automatic happy ending
# has it won't be a common_word, nor score
for word in s + [None]:
is_common = word in common_terms
if not is_common and last_uncommon:
chain = [last_uncommon] + in_between + [word]
# test between last_uncommon
score = score_item(
worda=last_uncommon,
wordb=word,
components=chain,
scorer=scorer,
phrasegrams=phrasegrams
)
if score > threshold:
yield (chain, score)
last_uncommon = None
in_between = []
else:
# release words individually
for w in it.chain([last_uncommon], in_between):
yield (w, None)
in_between = []
last_uncommon = word
elif not is_common:
last_uncommon = word
else: # common term
if last_uncommon:
# wait for uncommon resolution
in_between.append(word)
else:
yield (word, None)
def get_phrase(sentence,phrase_model):
is_single, sentence = _is_single(sentence)
if not is_single:
# if the input is an entire corpus (rather than a single sentence),
# return an iterable stream.
sys.exit("It is not a protein sequence")
delimiter = phrase_model['delimiter']
bigrams = analyze_sentence(
sentence,
threshold=phrase_model['threshold'],
common_terms=phrase_model['common_terms'],
scorer=None,
phrasegrams=phrase_model['phrasegrams']) # we will use our score_item function redefinition
new_s = []
for words, score in bigrams:
if score is not None:
words = delimiter.join(words)
new_s.append(words)
return [utils.to_unicode(w) for w in new_s]
def split_ngrams(seq, n):
"""
'AGAMQSASM' => [['AGA', 'MQS', 'ASM'], ['GAM','QSA'], ['AMQ', 'SAS']]
"""
all_ngrams=[]
for x in range(n):
all_ngrams.append(zip(*[iter(seq[x:])]*n))
str_ngrams = []
for ngrams in all_ngrams:
x = []
for ngram in ngrams:
x.append("".join(ngram))
str_ngrams.append(x)
return str_ngrams
def to_vecs(seq,phrase_model,kmer,word2vec_index):
"""
convert sequence to three n-length vectors
e.g. 'AGAMQSASM' => [ array([ ... * 100 ], array([ ... * 100 ], array([ ... * 100 ] ]
"""
ngram_patterns = split_ngrams(seq, kmer)
protvecs = []
for ngrams in ngram_patterns:
ngram_vecs = []
if phrase_model=='none':
ngramss = ngrams
else:
ngramss=get_phrase(get_phrase(ngrams,phrase_model),phrase_model)
for ngram in ngramss:
try:
ngram_vecs.append(np.array(word2vec_index[ngram]))
except KeyError:
continue
protvecs.append(sum(ngram_vecs))
return protvecs
``` |
{
"source": "jiajuns/CarND-Capstone",
"score": 3
} |
#### File: src/waypoint_updater/waypoint_updater.py
```python
import rospy
import numpy as np
import math
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 #200 # Number of waypoints we will publish. You can change this number
NORMAL_DECEL = 4 # m/s^2
MAX_DECEL = 9.5 # m/2^2
NORMAL_ACCEL = 6 # m/s^2
VELOCITY_30MPH = 2.77 # m/s
REFRESH_RATE = 10 #50 # Hz
STOP_OFFSET = 8
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
self.current_pose = None
self.base_waypoints = None
self.stop_waypoint_idx = 752 #750 #286
#self.stopped_time = 0.0
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# rospy.Subscriber('/obstacle_waypoint', Waypoint, self.obstacle_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
#rospy.spin()
self.rate = rospy.Rate(REFRESH_RATE) # 50hz sampling rate
while not rospy.is_shutdown():
# rospy.loginfo("WaypointUpdater goes to loop")
self.loop()
# rospy.loginfo("Vehicle stopped time: %d", self.stopped_time)
# if self.stopped_time >= 10: # vehicle has stopped for over 10 seconds
# self.stop_waypoint_idx += 400
# self.stopped_time = 0.0
def loop(self):
if (self.current_pose is None) or (self.base_waypoints is None):
return
# step 1. find out the nearest waypoint to the current position
# current x & y coordinates. Shall we include z???
current_pose_x = self.current_pose.pose.position.x
current_pose_y = self.current_pose.pose.position.y
current_pose_z = self.current_pose.pose.position.z
shortest_distance = +np.inf
nearest_waypoint_idx = 0
roll, pitch, yaw = quaternion_to_euler_angle(self.current_pose.pose.orientation.w,
self.current_pose.pose.orientation.x,
self.current_pose.pose.orientation.y,
self.current_pose.pose.orientation.z)
# for each waypoint of the base_waypoints, calculate the distance from the current position, find out the nearest waypoint index
for i in range(len(self.base_waypoints)):
# base waypoint x & y coordinates.
base_waypoint_x = self.base_waypoints[i].pose.pose.position.x
base_waypoint_y = self.base_waypoints[i].pose.pose.position.y
base_waypoint_z = self.base_waypoints[i].pose.pose.position.z
distance = np.sqrt((current_pose_x - base_waypoint_x)**2 + (current_pose_y - base_waypoint_y)**2 + (current_pose_z - base_waypoint_z)**2)
if distance < shortest_distance:
shortest_distance = distance
nearest_waypoint_idx = i
# rospy.loginfo("nearest waypoint index is %d", nearest_waypoint_idx)
# step 2. the nearest waypoint might be behind the car, we need to check if the nearest waypoint is at the current heading direction. We need to utilize the orientation info from the PoseStampd message
nearest_waypoint_x = self.base_waypoints[nearest_waypoint_idx].pose.pose.position.x
nearest_waypoint_y = self.base_waypoints[nearest_waypoint_idx].pose.pose.position.y
wp_yaw = np.arctan2((nearest_waypoint_y - current_pose_y), (nearest_waypoint_x - current_pose_x)) # I`m not too sure about this part
# calculate the angle between car's yaw and wp_yaw, only accept the waypoint if the angle is less than 45 degree, otherwise, use the next waypoint as the first lookahead waypoint. Then append the next 200 base waypoints as the lookahead waypoints. Rollover to the first base waypoint when the loop reaches the end of the base waypoint list.
theta = yaw - wp_yaw
lookahead_waypoints = []
if abs(theta) < np.pi/2:
for i in range(LOOKAHEAD_WPS):
waypoint_idx = (nearest_waypoint_idx + i) % len(self.base_waypoints)
lookahead_waypoints.append(self.base_waypoints[waypoint_idx])
else:
for i in range(LOOKAHEAD_WPS):
waypoint_idx = (nearest_waypoint_idx + 1 + i) % len(self.base_waypoints)
lookahead_waypoints.append(self.base_waypoints[waypoint_idx])
# step 3.
if self.stop_waypoint_idx is not None:
if self.stop_waypoint_idx == -1 - STOP_OFFSET:
# no red light detected, adjust current velocity to 30MPH
# calculate the distance the vehicle needs to travel from current velocity to 30mph
# d=(vc^2-vo^2)/2a
dist_to_30mph = (VELOCITY_30MPH**2 - self.current_velocity**2) / (2*NORMAL_ACCEL)
accel_per_dist = (VELOCITY_30MPH - self.current_velocity) / (dist_to_30mph + 1e-12)
# update the velocity of the lookahead_waypoints
for i in range(nearest_waypoint_idx, nearest_waypoint_idx+LOOKAHEAD_WPS):
dist_curr_to_i = self.distance(self.base_waypoints, nearest_waypoint_idx, i+1)
increased_v = dist_curr_to_i * accel_per_dist
velocity_i = self.current_velocity + increased_v
velocity_i = velocity_i if velocity_i < VELOCITY_30MPH else VELOCITY_30MPH
self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, velocity_i)
else:
rospy.loginfo("stop_waypoint_idx is %d", self.stop_waypoint_idx)
# red light detected
# calculate the normal braking distance from the current_velocity
# a=(vc-v0)/t, d=((vc+v0)/2)*t, v0=0 --> d=vc^2/(2*a)
normal_brake_dist = (self.current_velocity**2)/(2*NORMAL_DECEL)
# calculate the distance between the current position and the red light stop position. use the nearest waypoint as the current position
dist_to_stop = self.distance(self.base_waypoints, nearest_waypoint_idx, self.stop_waypoint_idx)
# if the car is getting close to the red light, start braking, otherwise, keep constant speed
if dist_to_stop <= normal_brake_dist and dist_to_stop > 3:
#rospy.loginfo("if cond1: brake, current_velocity is %f", self.current_velocity)
decel_per_dist = self.current_velocity / (dist_to_stop + 1e-12) #* 2 # provide a factor of 1.5 to be safe
for i in range(nearest_waypoint_idx, self.stop_waypoint_idx):
dist_curr_to_i = self.distance(self.base_waypoints, nearest_waypoint_idx, i+1)
reduced_v = dist_curr_to_i * decel_per_dist
velocity_i = self.current_velocity - reduced_v
velocity_i = velocity_i if velocity_i > 0 else 0.0
if i < nearest_waypoint_idx + LOOKAHEAD_WPS:
self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, velocity_i)
elif dist_to_stop <= 3:
#rospy.loginfo("if cond2: stop, current_velocity is %f", self.current_velocity)
for i in range(nearest_waypoint_idx, nearest_waypoint_idx+LOOKAHEAD_WPS):
if i < nearest_waypoint_idx + LOOKAHEAD_WPS:
self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, 0.0)
# adjust velocity up to 30mph if current velocity is slow and vehicle is still away from red light
elif dist_to_stop > 3 and dist_to_stop > 2*normal_brake_dist and self.current_velocity < VELOCITY_30MPH:
#rospy.loginfo("if cond2: stop, current_velocity is %f", self.current_velocity)
# calculate the distance the vehicle needs to travel from current velocity to 30mph
# d=(vc^2-vo^2)/2a
dist_to_30mph = (VELOCITY_30MPH**2 - self.current_velocity**2) / (2*NORMAL_ACCEL)
accel_per_dist = (VELOCITY_30MPH - self.current_velocity) / (dist_to_30mph + 1e-12)
# update the velocity of the lookahead_waypoints
for i in range(nearest_waypoint_idx, nearest_waypoint_idx+LOOKAHEAD_WPS):
dist_curr_to_i = self.distance(self.base_waypoints, nearest_waypoint_idx, i+1)
increased_v = dist_curr_to_i * accel_per_dist
velocity_i = self.current_velocity + increased_v
velocity_i = velocity_i if velocity_i < VELOCITY_30MPH else VELOCITY_30MPH
self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, velocity_i)
# rospy.loginfo("current_velocity: %f", self.current_velocity)
# if self.current_velocity <= 1.0:
# self.stopped_time = self.stopped_time + 0.02 #1/REFRESH_RATE
# if dist_to_stop <= normal_brake_dist:
# decel = (self.current_velocity**2)/(2*dist_to_stop + 1e-12)
# if decel > MAX_DECEL:
# decel = MAX_DECEL
# # calculate the velocity for each waypoint between the current position and red light stop line
# for i in range(nearest_waypoint_idx, self.stop_waypoint_idx+1):
# dist_curr_to_i = self.distance(self.base_waypoints, nearest_waypoint_idx, i)
# # vi = sqrt(vc^2-2*a*d)
# velocity_i = np.sqrt(self.current_velocity**2 - 2*decel*dist_curr_to_i)
# # set velocity for each waypoint in the lookahead_waypoints
# if i < nearest_waypoint_idx + LOOKAHEAD_WPS:
# self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, velocity_i)
# if i == 0:
# rospy.loginfo(velocity_i)
# rospy.loginfo(nearest_waypoint_idx)
# create an empty Lane message to hold the lookahead_waypoints
lane = Lane()
lane.waypoints = lookahead_waypoints
# rospy.loginfo("waypoint 0 velocity %f", lane.waypoints[0].twist.twist.linear.x)
# rospy.loginfo("waypoint 1 velocity %f", lane.waypoints[1].twist.twist.linear.x)
# rospy.loginfo("waypoint 2 velocity %f", lane.waypoints[2].twist.twist.linear.x)
self.final_waypoints_pub.publish(lane)
self.rate.sleep()
def pose_cb(self, msg):
# TODO: Implement
'''msg type geometry_msgs/PoseStamped
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
'''
self.current_pose = msg
#pass
def waypoints_cb(self, waypoints):
# TODO: Implement
'''waypoints message type styx_msgs/Lane
styx_msgs/Waypoint[] waypoints
geometry_msgs/PoseStamped pose
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
geometry_msgs/TwistStamped twist
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Twist twist
geometry_msgs/Vector3 linear
float64 x
float64 y
float64 z
geometry_msgs/Vector3 angular
float64 x
float64 y
float64 z
'''
# get the waypoint list from the Lane message
self.base_waypoints = waypoints.waypoints
#pass
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stop_waypoint_idx = msg.data - STOP_OFFSET
#pass
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def velocity_cb(self, msg):
'''msg type geometry_msgs/TwistStamped
geometry_msgs/Twist twist
geometry_msgs/Vector3 linear
float64 x
float64 y
float64 z
geometry_msgs/Vector3 angular
float64 x
float64 y
float64 z
'''
# get the vehicle's current velocity from the simulator
self.current_velocity = msg.twist.linear.x
#pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
def quaternion_to_euler_angle(w, x, y, z):
"""
helper function to convert quaternion to euler angle
"""
ysqr = y * y
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + ysqr)
X = math.degrees(math.atan2(t0, t1)) #roll
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
Y = math.degrees(math.asin(t2)) #pitch
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (ysqr + z * z)
Z = math.degrees(math.atan2(t3, t4)) #yaw
return X, Y, Z
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
``` |
{
"source": "jiajuns/CarND-Semantic-Segmentation",
"score": 2
} |
#### File: jiajuns/CarND-Semantic-Segmentation/main.py
```python
import os.path
import re
import numpy as np
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
import scipy.misc
from glob import glob
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
input_tensor = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_tensor, keep_prob, layer3_out, layer4_out, layer7_out
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
initializer = tf.contrib.layers.xavier_initializer_conv2d()
conv_1x1 = tf.layers.conv2d(inputs=vgg_layer7_out,
filters=num_classes,
kernel_initializer=initializer,
activation=tf.nn.leaky_relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer,
kernel_size=1,
strides=(1, 1),
padding='same')
de_conv1 = tf.layers.conv2d_transpose(inputs=conv_1x1,
filters=num_classes,
kernel_initializer=initializer,
activation=tf.nn.leaky_relu,
kernel_size=4,
strides=(2, 2),
padding='same')
vgg_layer4_out = tf.layers.conv2d(inputs=vgg_layer4_out,
filters=num_classes,
kernel_initializer=initializer,
activation=tf.nn.leaky_relu,
kernel_size=1,
strides=(1, 1),
padding='same')
de_conv1_added = tf.add(de_conv1, vgg_layer4_out)
vgg_layer3_out = tf.layers.conv2d(inputs=vgg_layer3_out,
filters=num_classes,
kernel_initializer=initializer,
activation=tf.nn.leaky_relu,
kernel_size=1,
strides=(1, 1),
padding='same')
de_conv2 = tf.layers.conv2d_transpose(inputs=de_conv1_added,
filters=num_classes,
kernel_initializer=initializer,
activation=tf.nn.leaky_relu,
kernel_size=4,
strides=(2, 2),
padding='same')
de_conv2_added = tf.add(de_conv2, vgg_layer3_out)
output = tf.layers.conv2d_transpose(inputs=de_conv2_added,
filters=num_classes,
kernel_initializer=initializer,
activation=tf.nn.leaky_relu,
kernel_size=16,
strides=(8, 8),
padding='same')
return output
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=correct_label, logits=logits))
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
decay=0.9,
momentum=0.0,
epsilon=1e-10)
train_step = optimizer.minimize(loss)
return logits, train_step, loss
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, iou, iou_op,
input_placeholder,label_placeholder, keep_prob, keep_prob_value, image_shape):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
print_every = 5
iter_cnt = 1
if (iou is not None) and (iou_op is not None):
best_accuracy = 0
saver = tf.train.Saver()
for e in range(epochs):
losses = []
for train_data, train_label in get_batches_fn(batch_size):
feed_dict = {
input_placeholder: train_data,
label_placeholder: train_label,
keep_prob: keep_prob_value,
}
loss, _ = sess.run([cross_entropy_loss, train_op], feed_dict)
losses.append(loss)
if (iter_cnt % print_every) == 0:
print("Iteration {0}: with minibatch training loss = {1:.3g}".format(iter_cnt, loss))
iter_cnt += 1
if (iou is not None) and (iou_op is not None):
## validation
accuracy = compute_accuracy(sess, input_placeholder, label_placeholder, iou, iou_op, keep_prob, image_shape)
print('epoch {0} average accuracy {1}'.format(e, accuracy))
if accuracy > best_accuracy:
best_accuracy = accuracy
save_path = saver.save(sess, '/tmp/model.ckpt')
print('Model saved in path {}'.format(save_path))
def compute_accuracy(sess, input_placeholder, label_placeholder, iou, iou_op, keep_prob, image_shape, data_folder='./data/data_road/valid'):
list_accuracy = []
background_color = np.array([255, 0, 0])
image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))
label_paths = {
re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path
for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}
for image_file in image_paths:
gt_image_file = label_paths[os.path.basename(image_file)]
image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)
gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)
gt_bg = np.all(gt_image == background_color, axis=2)
gt_bg = gt_bg.reshape(*gt_bg.shape, 1)
gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)
accuracy, _ = sess.run([iou, iou_op], {keep_prob:1.0, input_placeholder:np.array([image]), label_placeholder:np.array([gt_image])})
list_accuracy.append(accuracy)
return np.mean(list_accuracy)
def train(learning_rate, epochs, batch_size, keep_prob_value, debug=False):
num_classes = 2
image_shape = (160, 576)
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
if debug==True:
tests.test_load_vgg(load_vgg, tf)
tests.test_layers(layers)
tests.test_optimize(optimize)
tests.test_train_nn(train_nn)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# TODO: Build NN using load_vgg, layers, and optimize function
label_placeholder = tf.placeholder(tf.float32, [None, None, None, num_classes])
input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)
output = layers(layer3_out, layer4_out, layer7_out, num_classes)
logits, optimizer, loss = optimize(output, label_placeholder, learning_rate, num_classes)
prediction = tf.argmax(output, axis=3)
ground_truth = tf.argmax(label_placeholder, axis=3)
iou, iou_op = tf.metrics.mean_iou(ground_truth, prediction, num_classes)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, optimizer, loss, iou, iou_op, input_image,
label_placeholder, keep_prob, keep_prob_value, image_shape)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
def inference():
# OPTIONAL: Apply the trained model to a video
pass
if __name__ == '__main__':
train(learning_rate=1e-4, epochs=30, batch_size=4, keep_prob_value=0.6, debug=True)
``` |
{
"source": "JiajunWithGreatDream/fritz-models",
"score": 3
} |
#### File: image_segmentation/image_segmentation/train.py
```python
import argparse
import keras
import logging
import time
import sys
import struct
import os
from tensorflow.python.lib.io import file_io
import tensorflow as tf
from image_segmentation.icnet import ICNetModelFactory
from image_segmentation.data_generator import ADE20KDatasetBuilder
from image_segmentation import dali_config
from google.cloud import storage
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('train')
def _summarize_arguments(args):
"""Summarize input arguments to ICNet model training.
Args:
args:
"""
logger.info('ICNet Model training Parameters')
logger.info('-------------------------------')
for key, value in vars(args).items():
logger.info(' {key}={value}'.format(key=key, value=value))
def _build_parser(argv):
parser = argparse.ArgumentParser(
description='Train an ICNet model.'
)
# Data options
parser.add_argument(
'-d', '--data', nargs='+', required=True,
help='A TFRecord file containing images and segmentation masks.'
)
parser.add_argument(
'--tfindex-files', nargs='+',
help='TFIndex file for dali pipeline. If not included, will be built'
)
parser.add_argument(
'-l', '--label-filename', type=str, required=True,
help='A file containing a single label per line.'
)
parser.add_argument(
'-s', '--image-size', type=int, default=768,
help=('The pixel dimension of model input and output. Images '
'will be square.')
)
parser.add_argument(
'-a', '--alpha', type=float, default=1.0,
help='The width multiplier for the network'
)
parser.add_argument(
'--augment-images', type=bool, default=True,
help='turn on image augmentation.'
)
parser.add_argument(
'--add-noise', action='store_true',
help='Add gaussian noise to training.'
)
parser.add_argument(
'--use-dali', action='store_true',
help='turn on image augmentation.'
)
parser.add_argument(
'--list-labels', action='store_true',
help='If true, print a full list of object labels.'
)
# Training options
parser.add_argument(
'-b', '--batch-size', type=int, default=8,
help='The training batch_size.'
)
parser.add_argument(
'--lr', type=float, default=0.001, help='The learning rate.'
)
parser.add_argument(
'-n', '--num-steps', type=int, default=1000,
help='Number of training steps to perform'
)
parser.add_argument(
'--steps-per-epoch', type=int, default=100,
help='Number of training steps to perform between model checkpoints'
)
parser.add_argument(
'-o', '--output',
help='An output file to save the trained model.')
parser.add_argument(
'--gpu-cores', type=int, default=1,
help='Number of GPU cores to run on.')
parser.add_argument(
'--fine-tune-checkpoint', type=str,
help='A Keras model checkpoint to load and continue training.'
)
parser.add_argument(
'--gcs-bucket', type=str,
help='A GCS Bucket to save models too.'
)
parser.add_argument(
'--parallel-calls', type=int, default=1,
help='Number of parallel calss to preprocessing to perform.'
)
parser.add_argument(
'--model-name', type=str, required=True,
help='Short name separated by underscores'
)
return parser.parse_known_args()
def _prepare_dataset(args, n_classes):
dataset = ADE20KDatasetBuilder.build(
args.data,
n_classes=n_classes,
batch_size=args.batch_size,
image_size=(args.image_size, args.image_size),
augment_images=False,
parallel_calls=args.parallel_calls,
prefetch=True,
)
iterator = dataset.make_one_shot_iterator()
example = iterator.get_next()
return {
'input': example['image'],
'mask_4': example['mask_4'],
'mask_8': example['mask_8'],
'mask_16': example['mask_16'],
}
def build_tfindex_file(tfrecord_file, tfindex_file):
"""Builds a tfindex file used by DALI from a tfrecord file.
Args:
tfrecord_file: Path to TFRecord file.
tfindex_file: output file to write to.
"""
tfrecord_fp = open(tfrecord_file, 'rb')
idx_fp = open(tfindex_file, 'w')
while True:
current = tfrecord_fp.tell()
try:
# length
byte_len = tfrecord_fp.read(8)
if byte_len == '':
break
# crc
tfrecord_fp.read(4)
proto_len = struct.unpack('q', byte_len)[0]
# proto
tfrecord_fp.read(proto_len)
# crc
tfrecord_fp.read(4)
idx_fp.write(str(current) + ' ' +
str(tfrecord_fp.tell() - current) + '\n')
except Exception:
print("Not a valid TFRecord file")
break
tfrecord_fp.close()
idx_fp.close()
def _prepare_dali(args, n_classes):
if args.gpu_cores > 1:
logger.error(
'Have not built in support for more than one GPU at the moment.'
)
sys.exit(1)
# non NVIDIA cloud environments will not have dali, so we
# have to do the import here.
from image_segmentation.dali_pipeline import CommonPipeline
import nvidia.dali.plugin.tf as dali_tf
batch_size = args.batch_size
image_size = args.image_size
device_id = 0
storage_client = storage.Client()
filenames = []
for filename in args.data:
if filename.startswith('gs://'):
parts = filename[5:].split('/')
bucket_name, blob_name = parts[0], '/'.join(parts[1:])
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
download_filename = os.path.basename(blob_name)
blob.download_to_filename(download_filename)
filenames.append(download_filename)
else:
filenames.append(filename)
tfindex_files = args.tfindex_files or []
if not tfindex_files:
for path in filenames:
tfindex_file = path.split('.')[0] + '.tfindex'
build_tfindex_file(path, tfindex_file)
logger.info('Created tfindex file: {input} -> {output}'.format(
input=path,
output=tfindex_file
))
tfindex_files.append(tfindex_file)
config = dali_config.DaliConfig()
config.summarize()
pipe = CommonPipeline(
args.batch_size,
args.parallel_calls,
device_id,
args.image_size,
filenames,
tfindex_files,
config
)
pipe.build()
daliop = dali_tf.DALIIterator()
with tf.device('/gpu:0'):
results = daliop(
serialized_pipeline=pipe.serialize(),
shape=[args.batch_size, args.image_size, args.image_size, 3],
label_type=tf.int64,
)
input_tensor = results.batch
results.label.set_shape([batch_size, image_size, image_size, 3])
mask = results.label
new_shape = [image_size / 4, image_size / 4]
mask_4 = ADE20KDatasetBuilder.scale_mask(mask, 4, new_shape, n_classes)
new_shape = [image_size / 8, image_size / 8]
mask_8 = ADE20KDatasetBuilder.scale_mask(mask, 8, new_shape, n_classes)
new_shape = [image_size / 16, image_size / 16]
mask_16 = ADE20KDatasetBuilder.scale_mask(mask, 16, new_shape, n_classes)
return {
'input': input_tensor,
'mask_4': mask_4,
'mask_8': mask_8,
'mask_16': mask_16,
}
def train(argv):
"""Train an ICNet model."""
args, unknown = _build_parser(argv)
_summarize_arguments(args)
class_labels = ADE20KDatasetBuilder.load_class_labels(
args.label_filename)
if args.list_labels:
logger.info('Labels:')
labels = ''
for label in class_labels:
labels += '%s\n' % label
logger.info(labels)
sys.exit()
n_classes = len(class_labels)
if args.use_dali:
data = _prepare_dali(args, n_classes)
else:
data = _prepare_dataset(args, n_classes)
if args.add_noise:
logger.info('Adding gaussian noise to input tensor.')
noise = tf.random_normal(shape=tf.shape(data['input']),
mean=0.0,
stddev=0.07,
dtype=tf.float32)
data['input'] = data['input'] + noise
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
config = tf.ConfigProto(gpu_options=gpu_options)
sess = tf.Session(config=config)
keras.backend.set_session(sess)
if args.gpu_cores > 1:
with tf.device('/CPU:0'):
icnet = ICNetModelFactory.build(
args.image_size,
n_classes,
weights_path=args.fine_tune_checkpoint,
train=True,
input_tensor=data['input'],
alpha=args.alpha,
)
gpu_icnet = keras.utils.multi_gpu_model(icnet, gpus=args.cores)
gpu_icnet.__setattr__('callback_model', icnet)
model = gpu_icnet
else:
with tf.device('/GPU:0'):
model = ICNetModelFactory.build(
args.image_size,
n_classes,
weights_path=args.fine_tune_checkpoint,
train=True,
input_tensor=data['input'],
alpha=args.alpha,
)
optimizer = keras.optimizers.Adam(lr=args.lr)
model.compile(
optimizer,
loss=keras.losses.categorical_crossentropy,
loss_weights=[1.0, 0.4, 0.16],
metrics=['categorical_accuracy'],
target_tensors=[
data['mask_4'], data['mask_8'], data['mask_16']
]
)
if not args.output:
output_filename_fmt = '{model_name}_{size}x{size}_{alpha}_{time}.h5'
filename = output_filename_fmt.format(
model_name=args.model_name,
size=args.image_size,
alpha=str(args.alpha).replace('0', '').replace('.', ''),
time=int(time.time())
)
else:
filename = args.output
print("=======================")
print("Output file name: {name}".format(name=filename))
print("=======================")
callbacks = [
keras.callbacks.ModelCheckpoint(
filename,
verbose=0,
mode='auto',
period=1
),
]
if args.gcs_bucket:
callbacks.append(SaveCheckpointToGCS(filename, args.gcs_bucket))
model.fit(
steps_per_epoch=args.steps_per_epoch,
epochs=int(args.num_steps / args.steps_per_epoch) + 1,
callbacks=callbacks,
)
class SaveCheckpointToGCS(keras.callbacks.Callback):
"""A callback to save local model checkpoints to GCS."""
def __init__(self, local_filename, gcs_filename):
"""Save a checkpoint to GCS.
Args:
local_filename (str): the path of the local checkpoint
gcs_filename (str): the GCS bucket to save the model to
"""
self.gcs_filename = gcs_filename
self.local_filename = local_filename
@staticmethod
def _copy_file_to_gcs(job_dir, file_path):
gcs_url = os.path.join(job_dir, file_path)
logger.info('Saving models to GCS: %s' % gcs_url)
with file_io.FileIO(file_path, mode='rb') as input_f:
with file_io.FileIO(gcs_url, mode='w+') as output_f:
output_f.write(input_f.read())
def on_epoch_end(self, epoch, logs={}):
"""Save model to GCS on epoch end.
Args:
epoch (int): the epoch number
logs (dict, optional): logs dict
"""
basename = os.path.basename(self.local_filename)
self._copy_file_to_gcs(self.gcs_filename, basename)
if __name__ == '__main__':
train(sys.argv[1:])
``` |
{
"source": "JiajunX31/fib_py",
"score": 3
} |
#### File: JiajunX31/fib_py/get_latest_version.py
```python
import os
import pathlib
from typing import Tuple, List, Union
import requests
def get_latest_version_number() -> str:
req = requests.get("https://pypi.org/pypi/fib-py/json")
return req.json()["info"]["version"]
def unpack_version_number(version_string: str) -> Tuple[int, int, int]:
version_buffer: List[str] = version_string.split(".")
return int(version_buffer[0]), int(version_buffer[1]), int(version_buffer[2])
def increase_version_number(
version_buffer: Union[Tuple[int, int, int], List[int]]
) -> List[int]:
first: int = version_buffer[0]
second: int = version_buffer[1]
third: int = version_buffer[2]
third += 1
if third >= 10:
third = 0
second += 1
if second >= 10:
second = 0
first += 1
return [first, second, third]
def pack_version_number(version_buffer: Union[Tuple[int, int, int], List[int]]) -> str:
return f"{version_buffer[0]}.{version_buffer[1]}.{version_buffer[2]}"
def write_version_to_file(version_number: str) -> None:
version_file_path = (
str(pathlib.Path(__file__).parent.absolute()) + "/fib_py/version.py"
)
if os.path.exists(version_file_path):
os.remove(version_file_path)
with open(version_file_path, "w") as f:
f.write(f"VERSION='{version_number}'")
if __name__ == "__main__":
write_version_to_file(
pack_version_number(
increase_version_number(unpack_version_number(get_latest_version_number()))
)
)
``` |
{
"source": "jiaju-yang/leetcode",
"score": 4
} |
#### File: leetcode/src/graph_tools.py
```python
class Node:
def __init__(self, val=0, neighbors=None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
def __eq__(self, other):
if isinstance(other, Node):
return self.val == other.val
return False
def __str__(self) -> str:
return f'Node({self.val})'
def __repr__(self) -> str:
return str(self)
def construct_graph_from_adj_list(adj_list):
if not adj_list:
return None
nodes = {}
for value, adj in enumerate(adj_list, 1):
node = nodes[value] if value in nodes else Node(value)
nodes[value] = node
for adj_node_value in adj:
if adj_node_value not in nodes:
nodes[adj_node_value] = Node(adj_node_value)
node.neighbors.append(nodes[adj_node_value])
return nodes[1]
def is_graph_equal(node_a: Node, node_b: Node):
if not node_a and not node_b:
return True
visited = set()
def is_equal(node_a, node_b):
if node_a.val in visited:
return True
if node_a != node_b or len(node_a.neighbors) != len(node_b.neighbors):
return False
visited.add(node_a.val)
for adj_of_a, adj_of_b in zip(node_a.neighbors, node_b.neighbors):
if not is_equal(adj_of_a, adj_of_b):
return False
return True
return is_equal(node_a, node_b)
```
#### File: leetcode/src/linkedlist_tools.py
```python
from typing import List
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def __eq__(self, other):
if not isinstance(other, ListNode):
return False
return self.val == other.val and self.next == other.next
def __repr__(self):
return f'Node({self.val})'
def construct_linkedlist(values: List, pos=None):
"""
Construct a linked list from list
:param pos: if there is a cycle in the linked list, pass the index of the node that the last node should point to.
"""
previous = head = pos_node = None
for i in range(len(values)):
current = ListNode(values[i])
if not head:
head = current
if previous:
previous.next = current
previous = current
if i == pos:
pos_node = current
if i == len(values) - 1:
current.next = pos_node
return head
def construct_multi_list(values: List[List]) -> List[ListNode]:
result = []
for value in values:
result.append(construct_linkedlist(value))
return result
```
#### File: leetcode/src/q105-construct-binary-tree-from-preorder-and-inorder-traversal.py
```python
from typing import List, Optional
from .tree_tools import *
# @lc code=start
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:
if not preorder:
return None
node = TreeNode(preorder[0])
length = inorder.index(preorder[0])
node.left = self.buildTree(preorder[1:1+length], inorder[:length])
node.right = self.buildTree(preorder[1+length:], inorder[length+1:])
return node
# @lc code=end
solve = Solution().buildTree
def test_default():
solve([3, 9, 20, 15, 7], [9, 3, 15, 20, 7]) == construct_tree(
[3, 9, 20, None, None, 15, 7])
def test_corner_cases():
solve([-1], [-1]) == TreeNode(-1)
solve([], []) == None
solve([1, 2], [2, 1]) == construct_tree([1, 2])
```
#### File: leetcode/src/q1143-longest-common-subsequence.py
```python
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
dp = [[0 for _ in range(len(text2) + 1)]
for _ in range(len(text1) + 1)]
for i in range(1, len(text1) + 1):
for j in range(1, len(text2) + 1):
if text1[i-1] == text2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[len(text1)][len(text2)]
# @lc code=end
solve = Solution().longestCommonSubsequence
def test_default():
assert solve('ace', 'abcde') == 3
assert solve('abcde', 'ace') == 3
assert solve('abcde', 'aae') == 2
assert solve('hofubmnylkra', 'pqhgxgdofcvmr') == 5
assert solve('ezupkr', 'ubmrapg') == 2
assert solve('oxcpqrsvwf', 'shmtulqrypy') == 2
def test_corner_cases():
assert solve('', 'a') == 0
assert solve('a', '') == 0
assert solve('a', 'a') == 1
assert solve('a', 'b') == 0
assert solve('aa', 'b') == 0
assert solve('aa', 'a') == 1
assert solve('aa', 'aa') == 2
```
#### File: leetcode/src/q139-word-break.py
```python
from typing import List
# @lc code=start
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
dp = [False] * (len(s) + 1)
dp[0] = True
max_word_len = len(max(wordDict, key=len))
last_true = 0
for i in range(1, len(s) + 1):
if i > last_true + max_word_len:
break
for word in wordDict:
if i - len(word) >= 0 and dp[i-len(word)]:
sub = s[i-len(word):i]
if sub == word:
dp[i] = True
last_true = i
break
return dp[-1]
# @lc code=end
solve = Solution().wordBreak
def test_default():
assert solve('leetcode', ['leet', 'code'])
assert solve('applepenapple', ['apple', 'pen'])
assert not solve('catsandog', ['cats', 'dog', 'sand', 'and', 'cat'])
def test_corner_cases():
assert solve('a', ['a'])
assert solve('aa', ['a'])
assert not solve('a', ['b'])
assert solve('a', ['a', 'b'])
```
#### File: leetcode/src/q19-remove-nth-node-from-end-of-list.py
```python
from typing import Optional
from .linkedlist_tools import *
# @lc code=start
class Solution:
def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:
dummy = ListNode(0, head)
previous, end = dummy, head
for _ in range(n):
end = end.next
while end:
previous = previous.next
end = end.next
previous.next = previous.next.next
return dummy.next
# @lc code=end
solve = Solution().removeNthFromEnd
def test_default():
assert solve(construct_linkedlist(
[1, 2, 3, 4, 5]), 2) == construct_linkedlist([1, 2, 3, 5])
def test_corner_cases():
assert solve(construct_linkedlist([1]), 1) == construct_linkedlist([])
assert solve(construct_linkedlist([1, 2]), 1) == construct_linkedlist([1])
```
#### File: leetcode/src/q242-valid-anagram.py
```python
from typing import Counter
# @lc code=start
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
return Counter(s) == Counter(t)
# @lc code=end
solve = Solution().isAnagram
def test_default():
assert solve('anagram', 'nagaram')
assert not solve('rat', 'car')
def test_corner_cases():
assert solve('a', 'a')
assert not solve('a', 'b')
assert solve('ab', 'ba')
assert not solve('a', 'ba')
```
#### File: leetcode/src/q297-serialize-and-deserialize-binary-tree.py
```python
from .tree_tools import *
from collections import deque
# @lc code=start
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
return f'{root.val},{self.serialize(root.left)},{self.serialize(root.right)}' if root else ''
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
return self.preorder(deque(data.split(',')))
def preorder(self, q):
val = q.popleft()
if val == '':
return None
node = TreeNode(int(val))
node.left = self.preorder(q)
node.right = self.preorder(q)
return node
# Your Codec object will be instantiated and called as such:
# ser = Codec()
# deser = Codec()
# ans = deser.deserialize(ser.serialize(root))
# @lc code=end
solution = Codec()
def test_default():
assert solution.deserialize(solution.serialize(construct_tree(
[1, 2, 3, None, None, 4, 5]))) == construct_tree([1, 2, 3, None, None, 4, 5])
def test_corner_cases():
assert solution.deserialize(solution.serialize(None)) == None
assert solution.deserialize(solution.serialize(construct_tree(
[1]))) == construct_tree([1])
assert solution.deserialize(solution.serialize(construct_tree(
[1, 2]))) == construct_tree([1, 2])
```
#### File: leetcode/src/q371-sum-of-two-integers.py
```python
from itertools import zip_longest
class Solution:
def getSum(self, a: int, b: int) -> int:
mask = 0xfff
max = 2047
while b:
a, b = (a ^ b) & mask, ((a & b) << 1) & mask
return a if a <= max else ~(a ^ mask)
# # @lc code=end
solve = Solution().getSum
def test_default():
assert solve(5, 2) == 7
assert solve(0, 1000) == 1000
def test_overflow_cases():
assert solve(1000, 1000) == 2000
def test_negative_cases():
assert solve(-1, -2) == -3
assert solve(-1, 2) == 1
assert solve(-1, 1) == 0
assert solve(-1, 0) == -1
```
#### File: leetcode/src/q417-pacific-atlantic-water-flow.py
```python
from typing import List
# @lc code=start
class Solution:
directions = [[0, 1], [1, 0], [0, -1], [-1, 0]]
def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:
m = len(heights)
n = len(heights[0])
pacific_ocean_access = [[False] * n for _ in range(m)]
for i in range(n):
self.dfs((0, i), pacific_ocean_access, heights, m, n)
for i in range(m):
self.dfs((i, 0), pacific_ocean_access, heights, m, n)
atlantic_ocean_access = [[False] * n for _ in range(m)]
for i in range(n):
self.dfs((m-1, i), atlantic_ocean_access, heights, m, n)
for i in range(m):
self.dfs((i, n-1), atlantic_ocean_access, heights, m, n)
result = []
for i in range(m):
for j in range(n):
if pacific_ocean_access[i][j] and atlantic_ocean_access[i][j]:
result.append([i, j])
return result
def dfs(self, node, states, heights, m, n):
if states[node[0]][node[1]]:
return
states[node[0]][node[1]] = True
for i, j in self.neighbors(node[0], node[1], heights, m, n):
self.dfs((i, j), states, heights, m, n)
def neighbors(self, i, j, heights, m, n):
for neighbor_i_off, neighbor_j_off in self.directions:
neighbor_i, neighbor_j = i + neighbor_i_off, j + neighbor_j_off
if 0 <= neighbor_i < m and 0 <= neighbor_j < n and heights[i][j] <= heights[neighbor_i][neighbor_j]:
yield (neighbor_i, neighbor_j)
# @lc code=end
solve = Solution().pacificAtlantic
def test_default():
assert solve([[1, 2, 2, 3, 5], [3, 2, 3, 4, 4], [2, 4, 5, 3, 1], [6, 7, 1, 4, 5], [
5, 1, 1, 2, 4]]) == [[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]]
def test_corner_cases():
assert solve([[1]]) == [[0, 0]]
assert solve([[2, 1], [1, 2]]) == [[0, 0], [0, 1], [1, 0], [1, 1]]
```
#### File: leetcode/src/q435-non-overlapping-intervals.py
```python
from typing import List
from operator import itemgetter
# @lc code=start
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
max_count, end_at = 0, float('-inf')
for interval in sorted(intervals, key=itemgetter(1)):
if end_at <= interval[0]:
max_count += 1
end_at = interval[1]
return len(intervals) - max_count
# @lc code=end
solve = Solution().eraseOverlapIntervals
def test_default():
assert solve([[1, 2], [2, 3], [3, 4], [1, 3]]) == 1
def test_corner_cases():
assert solve([[1, 2], [1, 2], [1, 2]]) == 2
assert solve([[1, 2], [2, 3]]) == 0
assert solve([]) == 0
assert solve([[1, 2]]) == 0
assert solve([[1, 5], [2, 3]]) == 1
```
#### File: leetcode/src/q5-longest-palindromic-substring.py
```python
class DPSolution:
def longestPalindrome(self, s: str) -> str:
dp = [[False] * len(s) for _ in range(len(s))]
start, end = 0, 0
for i in range(len(s)):
dp[i][i] = True
for i in range(len(s)-2, -1, -1):
for j in range(i+1, len(s)):
if (j == i + 1 or dp[i+1][j-1]) and s[i] == s[j]:
dp[i][j] = True
if j - i > end - start:
start, end = i, j
return s[start:end+1]
class TwoPointersSolution:
def longestPalindrome(self, s: str) -> str:
start, end = 0, 0
for i in range(len(s)):
start, end = max((start, end),
self.extend(i, i, s),
self.extend(i, i+1, s),
key=lambda pair: pair[1] - pair[0])
return s[start: end + 1]
def extend(self, left, right, s):
while left >= 0 and right < len(s) and s[left] == s[right]:
left -= 1
right += 1
return left+1, right-1
Solution = TwoPointersSolution
# @lc code=end
solve = Solution().longestPalindrome
def test_default():
assert solve('babad') == 'bab' or 'aba'
assert solve('cbbd') == 'bb'
def test_corner_cases():
assert solve('a') == 'a'
assert solve('ab') == 'a'
assert solve('aba') == 'aba'
```
#### File: leetcode/src/q91-decode-ways.py
```python
class Solution:
def numDecodings(self, s: str) -> int:
it = iter(s)
p_char = next(it)
previous, current = 1, (1 if self.is_valid(p_char) else 0)
for c in it:
previous, current, p_char = current, (
current if self.is_valid(c) else 0) + (previous if self.is_valid(p_char + c) else 0), c
return current
def is_valid(self, t):
if t.startswith('0'):
return False
return 1 <= int(t) <= 26
# @lc code=end
solve = Solution().numDecodings
def test_default():
assert solve('11106') == 2
assert solve('12') == 2
assert solve('226') == 3
def test_corners():
assert solve('0') == 0
assert solve('06') == 0
assert solve('6') == 1
```
#### File: leetcode/src/q98-validate-binary-search-tree.py
```python
from .tree_tools import *
from typing import Optional
# @lc code=start
class Solution:
def isValidBST(self, root: Optional[TreeNode]) -> bool:
return self.dfs(root, float('-inf'), float('inf'))
def dfs(self, node, minimum, maximum):
if not node:
return True
if node.val >= maximum or node.val <= minimum:
return False
return self.dfs(node.left, minimum, node.val) and self.dfs(node.right, node.val, maximum)
# @lc code=end
solve = Solution().isValidBST
def test_default():
assert solve(construct_tree([2, 1, 3]))
assert not solve(construct_tree([5, 1, 4, None, None, 3, 6]))
def test_corner_cases():
assert solve(construct_tree([1]))
assert not solve(construct_tree([1, 2]))
assert solve(construct_tree([1, None, 2]))
assert not solve(construct_tree([2, 2, 2]))
``` |
{
"source": "jiakai0419/Curvature-Learning-Framework",
"score": 2
} |
#### File: curvlearn/optimizers/radagrad.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops, state_ops, control_flow_ops
from curvlearn.manifolds import euclidean
class RAdagrad(tf.train.AdagradOptimizer):
"""Riemannian Adagrad optimizer
"""
def __init__(self,
manifold,
c,
learning_rate=1e-3,
initial_accumulator_value=0.1,
use_locking=False,
name="RAdagrad"):
"""Riemannian Adam optimizer initialization.
Args:
manifold (class): The manifold.
c (float): The manifold curvature.
learning_rate (float, optional): The learning rate to use.
initial_accumulator_value (float, optional): Starting value for the accumulators, must be positive.
use_locking (bool, optional): If True use locks for update operations.
name (str, optional): The optimizer name.
"""
super(RAdagrad, self).__init__(learning_rate=learning_rate, initial_accumulator_value=initial_accumulator_value,
use_locking=use_locking, name=name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
self.default_manifold = euclidean.Euclidean()
self.manifold = manifold
self.default_c = c
def _create_slots(self, var_list):
"""Creates manifold slot for var_list.
Args:
var_list (list): A list of variables.
"""
for v in var_list:
dtype = v.dtype.base_dtype
if v.get_shape().is_fully_defined():
init = tf.constant_initializer(
self._initial_accumulator_value, dtype=dtype)
else:
init = self._init_constant_op(v, dtype)
self._get_or_make_slot_with_initializer(
v, init, v.get_shape(), dtype, "accumulator", self._name)
# TODO: pass manifold attr into trainable_variable list
# v.manifold = v.manifold if hasattr(v, "manifold") else self.default_manifold
if "RiemannianParameter" in v.name:
v.manifold = self.manifold
else:
v.manifold = self.default_manifold
def _init_constant_op(self, v, dtype):
def init():
"""Use a Tensor instead of initializer if variable does not have static shape.
"""
init_constant = tf.fill(
tf.shape(v), self._initial_accumulator_value)
return tf.cast(init_constant, dtype)
return init
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
def _apply_dense(self, grad, var):
"""Apply gradients to variables.
Args:
grad (tensor): The gradient.
var (tensor): The variable.
Returns:
operation: An Operation that applies the specified gradients.
"""
rgrad = var.manifold.egrad2rgrad(grad, var, c=self.default_c)
rgrad_sq = var.manifold.inner(
rgrad, rgrad, var, c=self.default_c, keep_shape=True)
lr = math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype)
acc = self.get_slot(var, "accumulator")
acc_update = state_ops.assign_add(acc, rgrad_sq)
with tf.control_dependencies([acc_update]):
new_value = var.manifold.retraction(
-lr * rgrad / math_ops.sqrt(acc_update), var, c=self.default_c)
var_update = state_ops.assign(
ref=var, value=new_value, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, acc_update])
def _apply_sparse(self, grad, var):
if var.manifold.name == "Euclidean":
return super(RAdagrad, self)._apply_sparse(grad, var)
raise NotImplementedError
def _resource_apply_dense(self, grad, var):
if var.manifold.name == "Euclidean":
return super(RAdagrad, self)._resource_apply_dense(grad, var)
raise NotImplementedError
def _resource_apply_sparse(self, grad, var, indices):
if var.manifold.name == "Euclidean":
return super(RAdagrad, self)._resource_apply_sparse(grad, var, indices)
raise NotImplementedError
```
#### File: examples/tree_pretrain/CateTreeModel.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('.')
import tensorflow as tf
import tf_euler
from .utils.loss import Loss
from .utils.summary import summary_tensor
from .utils.clip_gradient import clip_gradient
class HyperCateTreeModel(object):
def __init__(self, src_node, pos_node, global_step,
neg_sample_num, nb_sample_num, embedding_dimension, embedding_space, mid_dim, out_dim,
embedding_init_type, embedding_stddev, dense_init_type, dense_stddev, bias_init_val,
manifold, decode, l2_decay,
l2_enable, soft_c_enable, clip_gradient_enable,
c, loss_type, learning_rate, **kwargs):
self.src_node = src_node
self.pos_node = pos_node
self.global_step = global_step
self.neg_sample_num = neg_sample_num
self.nb_sample_num = nb_sample_num
self.embedding_dimension = embedding_dimension
self.embedding_space = embedding_space
self.mid_dim = mid_dim
self.out_dim = out_dim
self.embedding_init_type = embedding_init_type
self.embedding_stddev = embedding_stddev
self.dense_init_type = dense_init_type
self.dense_stddev = dense_stddev
self.bias_init_val = bias_init_val
self.manifold = manifold
self.decode = decode
self.l2_decay = l2_decay
self.l2_enable = l2_enable
self.soft_c_enable = soft_c_enable
self.clip_gradient_enable = clip_gradient_enable
self.init_c = tf.constant(c, dtype=self.manifold.dtype)
self.loss_func = Loss(loss_type)
self.opt = tf.train.AdagradOptimizer(learning_rate=learning_rate)
src_feature, pos_feature, neg_feature = self.cate_tree_feature_without_neighbor(self.src_node, self.pos_node)
with tf.variable_scope('embedding_table', reuse=tf.AUTO_REUSE) as scope:
cate_feature_names = ['node_id', 'level']
embedding_matrix_map = {}
for name in cate_feature_names:
embedding_matrix_map[name] = tf.get_variable(name + '_embedding_table',
shape=(self.embedding_space, self.embedding_dimension),
dtype=self.manifold.dtype,
initializer=self.get_initializer(
init_val=self.embedding_init_type,
stddev=self.embedding_stddev)
)
embedding_matrix_map[name] = self.manifold.variable(embedding_matrix_map[name], c=self.init_c)
with tf.variable_scope('feature_embedding_layer', reuse=tf.AUTO_REUSE) as scope:
src_embedding = self.sparse_feature_embedding(embedding_matrix_map, src_feature, cate_feature_names)
src_embedding = self.manifold.concat(src_embedding, axis=1, c=self.init_c)
pos_embedding = self.sparse_feature_embedding(embedding_matrix_map, pos_feature, cate_feature_names)
pos_embedding = self.manifold.concat(pos_embedding, axis=1, c=self.init_c)
neg_embedding_all = self.sparse_feature_embedding(embedding_matrix_map, neg_feature, cate_feature_names)
neg_embedding_all = self.manifold.concat(neg_embedding_all, axis=1, c=self.init_c)
if self.soft_c_enable is True:
with tf.variable_scope('manifold_c', reuse=tf.AUTO_REUSE) as scope:
c_mid = tf.get_variable('c_dnn', dtype=self.manifold.dtype,
initializer=tf.constant(-1.0, dtype=self.manifold.dtype))
c_out = tf.get_variable('c_dnn', dtype=self.manifold.dtype,
initializer=tf.constant(-1.0, dtype=self.manifold.dtype))
else:
c_mid = tf.constant(-1.0, dtype=self.manifold.dtype)
c_out = tf.constant(-1.0, dtype=self.manifold.dtype)
clip = lambda x: tf.clip_by_value(x, clip_value_min=-1e5, clip_value_max=-1e-5)
c_mid, c_out = clip(c_mid), clip(c_out)
with tf.variable_scope('output_layer', reuse=tf.AUTO_REUSE) as scope:
src_output = self.hyper_output_layer(self.init_c, c_mid, c_out, src_embedding, self.embedding_dimension * 2,
self.mid_dim, self.out_dim, scope_name='src_output')
pos_output = self.hyper_output_layer(self.init_c, c_mid, c_out, pos_embedding, self.embedding_dimension * 2,
self.mid_dim, self.out_dim, scope_name='dst_output')
neg_output_all = self.hyper_output_layer(self.init_c, c_mid, c_out, neg_embedding_all,
self.embedding_dimension * 2, self.mid_dim, self.out_dim,
scope_name='dst_output')
origin = self.manifold.proj(tf.zeros([self.out_dim], dtype=self.manifold.dtype), c=c_out)
l2_penalty = lambda x: self.manifold.distance(x, origin, c=c_out)
penalty = []
distance = []
with tf.variable_scope('loss_metric_layer') as scope:
if self.decode == 'distance':
decode_func = lambda x, y: tf.sigmoid(5.0 - 5.0 * self.manifold.distance(x, y, c=c_out))
else:
decode_func = self.cosine_fun
pos_sim = decode_func(src_output, pos_output)
penalty.append(l2_penalty(src_output))
penalty.append(l2_penalty(pos_output))
distance.append(self.manifold.distance(src_output, pos_output, c=c_out))
att_sim = [pos_sim]
node_neg_id_ays_re = tf.reshape(neg_output_all, [-1, self.neg_sample_num * self.out_dim])
node_neg_id_ays_list = tf.split(node_neg_id_ays_re, num_or_size_splits=self.neg_sample_num, axis=1)
for neg in node_neg_id_ays_list:
neg_sim = decode_func(src_output, neg)
att_sim.append(neg_sim)
penalty.append(l2_penalty(neg))
distance.append(self.manifold.distance(src_output, neg, c=c_out))
sim = tf.concat(att_sim, 1)
tf.summary.scalar('c_final', c_out)
l2_penalty = tf.concat(penalty, 1)
l2_loss = tf.reduce_mean(tf.reduce_sum(l2_penalty, axis=-1))
distance = tf.concat(distance, 1)
pos_distance = tf.slice(distance, [0, 0], [-1, 1])
neg_distance = tf.slice(distance, [0, 1], [-1, -1])
summary_tensor('positive_distance', pos_distance)
summary_tensor('negative_distance', neg_distance)
summary_tensor('all_distance', distance)
tf.summary.scalar('l2_penalty', l2_loss)
self.loss = self.loss_func(sim)
if self.l2_enable:
self.loss += l2_decay * l2_loss
gradients, variables = zip(*self.opt.compute_gradients(self.loss))
if self.clip_gradient_enable:
gradients = clip_gradient(gradients)
self.train_op = self.opt.apply_gradients(zip(gradients, variables), global_step=self.global_step)
def get_model_result(self):
return self.train_op, self.loss
def cosine_fun(self, ays_src, ays_dst):
src_norm = tf.sqrt(tf.reduce_sum(tf.square(ays_src), 1, True))
dst_norm = tf.sqrt(tf.reduce_sum(tf.square(ays_dst), 1, True))
prod = tf.reduce_sum(tf.multiply(ays_src, ays_dst), 1, True)
norm_prod = tf.multiply(src_norm, dst_norm)
cosine = tf.truediv(prod, norm_prod)
return cosine
def get_initializer(self, init_val=1, stddev=0.1):
dtype = self.manifold.dtype
if init_val == 1:
return tf.truncated_normal_initializer(dtype=dtype, stddev=stddev)
elif init_val == 2:
return tf.uniform_unit_scaling_initializer(factor=stddev, seed=10, dtype=dtype)
elif init_val == 3:
return tf.glorot_normal_initializer(dtype=dtype)
else:
return None
def global_sample_cate_tree(self, src, pos):
batch_size = tf.shape(src)[0]
negs = tf_euler.sample_node(batch_size * self.neg_sample_num, node_type='-1')
src_nodes = tf.reshape(src, [-1])
pos_nodes = tf.reshape(pos, [-1])
neg_nodes = tf.reshape(negs, [-1])
return src_nodes, pos_nodes, neg_nodes
def local_sample_cate_tree(self, src, pos):
negs = tf_euler.sample_node_with_src(pos, self.neg_sample_num)
src_nodes = tf.reshape(src, [-1])
pos_nodes = tf.reshape(pos, [-1])
neg_nodes = tf.reshape(negs, [-1])
return src_nodes, pos_nodes, neg_nodes
def node_feature_with_neighbor(self, node, etypes, nb_cnt, n_feature_names, cn_feature_names):
node_c = tf.reshape(node, [-1])
node_filled = tf_euler.get_sparse_feature(node_c, n_feature_names)
n_nodes, _, _ = tf_euler.sample_neighbor(node, edge_types=etypes, count=nb_cnt)
n_nodes = tf.reshape(n_nodes, [-1])
n_nodes_filled = tf_euler.get_sparse_feature(n_nodes, cn_feature_names)
return node_filled, n_nodes_filled
def hyper_convolution_with_neighbor(self, node, c_node_nei, num=5, dim=8):
c_node_nei_s = tf.reshape(c_node_nei, [-1, num, dim])
c_nei = self.manifold.mean(c_node_nei_s, axis=1, base=None, c=self.init_c)
features = self.manifold.concat([node, c_nei], axis=1, c=self.init_c)
return features
def cate_tree_feature_with_neighbor(self, source, pos_node):
src, pos, neg = self.global_sample_cate_tree(source, pos_node)
full_features = ['node_id', 'level']
c_part_features = ['node_id']
src_f, src_c_nb_f = self.node_feature_with_neighbor(node=src,
etypes=['1'],
nb_cnt=self.nb_sample_num,
n_feature_names=full_features,
cn_feature_names=c_part_features
)
pos_f, pos_c_nb_f = self.node_feature_with_neighbor(node=pos,
etypes=['1'],
nb_cnt=self.nb_sample_num,
n_feature_names=full_features,
cn_feature_names=c_part_features
)
neg_f, neg_c_nb_f = self.node_feature_with_neighbor(node=neg,
etypes=['1'],
nb_cnt=self.nb_sample_num,
n_feature_names=full_features,
cn_feature_names=c_part_features
)
return src_f, src_c_nb_f, pos_f, pos_c_nb_f, neg_f, neg_c_nb_f
def node_feature_without_neighbor(self, node, n_feature_names):
node_c = tf.reshape(node, [-1])
node_filled = tf_euler.get_sparse_feature(node_c, n_feature_names)
return node_filled
def cate_tree_feature_without_neighbor(self, source, pos_node):
src, pos, neg = self.global_sample_cate_tree(source, pos_node)
full_features = ['node_id', 'level']
src_f = self.node_feature_without_neighbor(node=src, n_feature_names=full_features)
pos_f = self.node_feature_without_neighbor(node=pos, n_feature_names=full_features)
neg_f = self.node_feature_without_neighbor(node=neg, n_feature_names=full_features)
return src_f, pos_f, neg_f
def sparse_feature_embedding(self, embedding_matrix_map, sparse_inputs, names, no_biases=True):
l = []
for i in range(len(sparse_inputs)):
with tf.variable_scope('sparse_feature_embedding_' + names[i]):
emb = tf.nn.embedding_lookup_sparse(embedding_matrix_map[names[i]], sparse_inputs[i], None,
combiner='sum')
emb_l2 = tf.nn.l2_loss(emb)
tf.losses.add_loss(emb_l2, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)
if not no_biases:
biases = tf.get_variable('biases',
initializer=tf.constant(self.bias_init_val, dtype=self.manifold.dtype,
shape=[self.embedding_dimension])
)
emb = self.manifold.add_bias(emb, biases, c=self.init_c)
l.append(emb)
return l
def hyper_linear_layer(self, train_inputs, in_dim, out_dim, c_in, c_out, activation, scope_name, no_biases=False):
with tf.variable_scope(scope_name):
weights = tf.get_variable('weights',
[in_dim, out_dim],
dtype=self.manifold.dtype,
initializer=self.get_initializer(init_val=self.dense_init_type,
stddev=self.dense_stddev),
regularizer=tf.nn.l2_loss
)
train = self.manifold.matmul(train_inputs, weights, c=c_in)
if not no_biases:
biases = tf.get_variable('biases',
initializer=tf.constant(self.bias_init_val, dtype=self.manifold.dtype,
shape=[out_dim])
)
train = self.manifold.add_bias(train, biases, c=c_in)
if activation is not None:
train = self.manifold.activation(train, act=activation, c_in=c_in, c_out=c_out)
return train
def hyper_output_layer(self, c_in, c_mid, c_out, id_embedding, input_dim, mid_dim, out_dim, scope_name):
with tf.variable_scope(scope_name):
out1 = self.hyper_linear_layer(train_inputs=id_embedding,
in_dim=input_dim,
out_dim=mid_dim,
c_in=c_in,
c_out=c_mid,
activation=tf.nn.elu,
scope_name='output_layer1',
no_biases=False
)
out2 = self.hyper_linear_layer(train_inputs=out1,
in_dim=mid_dim,
out_dim=out_dim,
c_in=c_mid,
c_out=c_out,
activation=tf.nn.elu,
scope_name='output_layer2',
no_biases=False
)
return out2
```
#### File: tree_pretrain/utils/learning_rate.py
```python
from tensorflow.python.framework import ops
import tensorflow as tf
class LearningRate(object):
"""Gradually warm-up(increasing and decreasing) learning rate in optimizer.
Includes three stages: warm up stage, increasing stage and decay stage.
"""
def __init__(self,
lr=1e-2,
lr_warm=1e-3,
lr_end=1e-4,
warm_step=1e5,
increase_step=1e6,
decay_step=1e8):
"""Initialize
Args:
lr_warm (float): The learning rate changes from 0 to lr_warm in the warm up stage.
lr (float): The learning rate changes from lr_warm to lr in the increasing stage.
lr_end (float): The learning rate changes from lr to lr_end in the decay stage.
warm_step (int): The step between 0 and warm_step is in the warm up stage.
increase_step (int): The step between warm_step and increase_step is in the increasing stage.
decay_step (int): The step between warm_step and decay_step is in the decay stage.
"""
super(LearningRate, self).__init__()
self.lr = float(max(lr, 0.0))
self.lr_warm = float(max(lr_warm, 0.0))
self.lr_end = float(max(lr_end, 0.0))
self.warm_step = float(max(warm_step, 0))
self.increase_step = float(max(increase_step, 0))
self.decay_step = float(max(decay_step, 0))
self.step = 0
def get_step(self):
"""Gets current training step.
Returns:
int: current training step.
"""
return tf.to_float(tf.train.get_or_create_global_step())
def _warm_up_lr(self, step):
"""Computes learning rate in the warm up stage.
Args:
step (int): current step.
Returns:
float: The updated learning rate.
"""
return self.lr_warm * step / self.warm_step
def _increase_lr(self, step):
"""Computes learning rate in the increasing stage.
Args:
step (int): current step.
Returns:
float: The updated learning rate.
"""
ratio = (step - self.warm_step) / (self.increase_step - self.warm_step)
return self.lr_warm + ratio * (self.lr - self.lr_warm)
def _decay_lr(self, step):
"""Computes learning rate in the decay stage.
Args:
step (int): current step.
Returns:
float: The updated learning rate.
"""
ratio = (step - self.increase_step) / \
(self.decay_step - self.increase_step)
return self.lr_end + (1.0 - ratio) * (self.lr - self.lr_end)
def _end_lr(self, step):
"""Computes learning rate after the decay stage.
Args:
step (int): current step.
Returns:
float: The updated learning rate.
"""
return self.lr_end
def _less_than(self, a, b):
"""Returns the truth value of (a < b) element-wise.
a is a Tensor, b is a float/int.
Args:
a (tensor): A tensor.
b (float/int): A float or int value.`
Returns:
tensor: A tensor of type bool.
"""
b = ops.convert_to_tensor(b, dtype=a.dtype.base_dtype)
return tf.math.less(a, b)
def get_lr(self):
"""Computes the learning rate according to the training step.
Returns:
float: The updated learning rate.
"""
current_step = self.get_step()
lr = tf.cond(
self._less_than(current_step, self.warm_step),
lambda: self._warm_up_lr(current_step),
lambda: tf.cond(
self._less_than(current_step, self.increase_step),
lambda: self._increase_lr(current_step),
lambda: tf.cond(
self._less_than(current_step, self.decay_step),
lambda: self._decay_lr(current_step),
lambda: self._end_lr(current_step)
)
)
)
return lr
def __call__(self):
return ops.convert_to_tensor(self.get_lr(), dtype=tf.float32)
```
#### File: tree_pretrain/utils/loss.py
```python
import tensorflow as tf
class Loss(object):
"""A set of loss functions
"""
def __init__(self, name, weight_decay=0.0):
"""Initialization.
Args:
name (str): The name of loss function.
weight_decay (float, optional): The factor for regularization term.
Raises:
NotImplementedError: Loss function not implemented.
"""
super(Loss, self).__init__()
self.weight_decay = weight_decay
if hasattr(self, name):
self.loss = getattr(self, name)
else:
raise NotImplementedError
def softmax_loss(self, sim, ratio=1.0):
"""Computes the softmax loss.
Args:
sim (tensor): The sim value for one positive sample and several negative samples.
ratio (float, optional): The scale factor.
Returns:
tensor: The softmax loss
"""
prob = tf.nn.softmax(ratio * sim)
hit_prob = tf.slice(prob, [0, 0], [-1, 1])
loss = -tf.log(hit_prob)
return tf.reduce_mean(loss, name='softmax_loss')
def bce_loss(self, sim):
"""Computes the bce (binary cross entropy) loss.
Args:
sim (tensor): The sim value for one positive sample and several negative samples.
Returns:
tensor: The bce loss.
"""
# bce_loss = -log(sigmoid(sim^+)) + -log(1-sigmoid(sim^-))
hit = tf.slice(sim, [0, 0], [-1, 1])
miss = tf.slice(sim, [0, 1], [-1, -1])
labels = tf.concat([tf.ones_like(hit), tf.zeros_like(miss)], axis=-1)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels,
logits=sim
)
return tf.reduce_mean(loss, name='bce_loss')
def triplet_loss(self, sim, margin=0.5):
"""Computes the triplet loss.
Args:
sim (tensor): The sim value for one positive sample and several negative samples.
margin (float, optional): The margin value.
Returns:
tensor: The triplet loss.
"""
# sim = sigmoid(5.0 - 5.0*distance)
pos_sim, neg_sim = tf.slice(sim, [0, 0], [-1, 1]), tf.slice(sim, [0, 1], [-1, -1])
# pos_sim has larger similarity than neg_sim
triplet_loss = margin + neg_sim - pos_sim
triplet_loss = tf.nn.relu(triplet_loss)
triplet_loss = tf.reduce_sum(triplet_loss, axis=-1)
triplet_loss = tf.reduce_mean(triplet_loss)
# wmrb_loss = tf.log(1.0 + margin_loss)
return triplet_loss
def ranking_loss(self, sim, margin=1.0):
"""Computes the ranking loss.
Args:
sim (tensor): The sim value for one positive sample and several negative samples.
margin (float, optional): The margin value.
Returns:
tensor: The ranking loss.
"""
# sim = 1.0 - distance
pos_sim, neg_sim = tf.slice(sim, [0, 0], [-1, 1]), tf.slice(sim, [0, 1], [-1, -1])
pos_dis, neg_dis = 1.0 - pos_sim, 1.0 - neg_sim
hinge_loss = tf.nn.relu(margin - neg_dis)
ranking_loss = tf.reduce_mean(
pos_dis) + tf.reduce_mean(tf.reduce_sum(hinge_loss, axis=-1))
return ranking_loss
def bpr_loss(self, sim):
"""Computes the bpr loss.
Args:
sim (tensor): The sim value for one positive sample and several negative samples.
Returns:
tensor: The bpr loss.
"""
# sim = 1.0 - distance
pos_sim, neg_sim = tf.slice(sim, [0, 0], [-1, 1]), tf.slice(sim, [0, 1], [-1, -1])
margin = pos_sim - neg_sim
# bpr loss = -log(sigmoid(x))
labels = tf.ones_like(margin)
bpr_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels,
logits=margin
)
return tf.reduce_mean(bpr_loss, name='bpr_loss')
def __call__(self, *args, **kwargs):
return self.loss(*args, **kwargs)
# reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# return self.loss(*args, **kwargs) + self.weight_decay * tf.add_n(reg_losses)
``` |
{
"source": "jia-kai/minisatcs",
"score": 3
} |
#### File: SAT/ineq/gen_rand.py
```python
import numpy as np
import argparse
import operator
def main():
parser = argparse.ArgumentParser(
description='generate random input with inequalities',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('nvar', type=int)
parser.add_argument('output', help='output file name')
parser.add_argument('--min-incl', type=int, default=5,
help='minimal number of clauses containing a given var')
parser.add_argument('--cl-size-min', type=int, default=2,
help='minimal number of literals in a clause')
parser.add_argument('--cl-bias-max', type=int, default=10,
help='max clause bias value')
parser.add_argument('--cl-bias-offset', type=int, default=3,
help='max random offset of bias value')
parser.add_argument('--seed', type=int, default=np.random.randint(2**32),
help='rng seed')
args = parser.parse_args()
assignment = np.random.randint(2, size=args.nvar)
incl_cnt = np.zeros(args.nvar, dtype=np.uint32)
clauses = []
bias_th0 = args.cl_bias_max
rng = np.random.RandomState(args.seed)
while incl_cnt.max() < args.min_incl:
cur_vars = rng.choice(
args.nvar, size=rng.randint(args.cl_size_min, args.nvar + 1),
replace=False)
incl_cnt[cur_vars] += 1
mask = assignment[cur_vars]
cur_val = mask.sum()
bias_th1 = cur_vars.size - bias_th0
cur_vars += 1
# fix cur_val range by negating some vars
if bias_th0 < cur_val < bias_th1:
offset = rng.randint(-args.cl_bias_offset,
args.cl_bias_offset + 1)
if cur_val < (bias_th0 + bias_th1) / 2:
# change some true lits to false
nr = max(cur_val - bias_th0 + offset, 0)
sel, = np.where(mask == 1)
cur_val -= nr
else:
# change some false lits to false
nr = max(bias_th1 - cur_val + offset, 0)
sel, = np.where(mask == 0)
cur_val += nr
sel = rng.choice(sel, size=nr, replace=False)
cur_vars[sel] *= -1
cur_clause = list(cur_vars)
if rng.randint(2):
cur_clause.append('<=')
cmpr = operator.le
else:
cur_clause.append('>=')
cmpr = operator.ge
offset = rng.randint(-args.cl_bias_offset,
args.cl_bias_offset + 1)
cur_clause.append(cur_val + offset)
cur_clause.append('#')
dst = rng.randint(1, args.nvar + 1)
if int(cmpr(0, offset)) != assignment[dst - 1]:
dst = -dst
cur_clause.append(dst)
clauses.append(' '.join(map(str, cur_clause)))
with open(args.output, 'w') as fout:
fout.write(f'c args: {args}\n')
fout.write('c assignment: ')
fout.write(' '.join(str(i + 1) if j else str(-(i + 1))
for i, j in enumerate(assignment)))
fout.write('\n')
fout.write(f'p cnf {args.nvar} {len(clauses)}\n')
for i in clauses:
fout.write(i)
fout.write('\n')
if __name__ == '__main__':
main()
``` |
{
"source": "jia-kai/SANM",
"score": 2
} |
#### File: SANM/utils/test_svdw_grad.py
```python
import numpy as np
import itertools
rng = np.random.RandomState(42)
def svdw(m):
n = m.shape[0]
assert m.shape == (n, n)
u, s, vt = np.linalg.svd(m)
w = u @ vt
assert np.allclose(u.T @ u, np.eye(n))
assert np.allclose(w.T @ w, np.eye(n))
assert np.allclose(u @ np.diag(s) @ u.T @ w, m)
return u, s, w
def check_eq(msg, a, b):
diff = np.abs(a - b).max()
assert diff < 1e-5, (msg, diff)
def svdw_jacobian(M, u, s, w):
n = M.shape[0]
assert M.shape == u.shape == w.shape == (n, n)
v = w.T @ u
dsdm = np.empty((n, n*n), dtype=M.dtype)
for i in range(n):
for j in range(n):
for k in range(n):
dsdm[i, j*n+k] = u.T[i, j] * v[k, i]
dwdy = np.empty((n*n, n*n), dtype=M.dtype)
dydm = np.empty_like(dwdy)
dudx = np.empty_like(dwdy)
dxdm = np.empty_like(dwdy)
for i, j, k, l in itertools.product(range(n), range(n), range(n), range(n)):
cij = u.T[i, k] * v[l, j]
cji = u.T[j, k] * v[l, i]
dydm[i*n+j, k*n+l] = 0 if i == j else (cij - cji) / (s[i] + s[j])
dwdy[i*n+j, k*n+l] = u[i, k] * v.T[l, j]
dudx[i*n+j, k*n+l] = 0 if l != j else u[i, k]
dxdm[i*n+j, k*n+l] = 0 if i == j else (
cij * s[j] + cji * s[i]) / (s[j]**2 - s[i]**2)
return dudx @ dxdm, dsdm, dwdy @ dydm
def svdw_jacobian_num(M, u, s, w, eps=1e-4):
n = M.shape[0]
assert M.shape == (n, n)
dudm = np.zeros((n*n, n*n), dtype=M.dtype)
dsdm = np.zeros((n, n*n), dtype=M.dtype)
dwdm = np.zeros((n*n, n*n), dtype=M.dtype)
grad = lambda x, y: (y - x).flatten() / (eps * 2)
for i in range(n):
for j in range(n):
x0 = M[i, j]
M[i, j] = x0 - eps
u1, s1, w1 = svdw(M)
M[i, j] = x0 + eps
u2, s2, w2 = svdw(M)
M[i, j] = x0
p = i*n+j
dudm[:, p] = grad(u1, u2)
dsdm[:, p] = grad(s1, s2)
dwdm[:, p] = grad(w1, w2)
return dudm, dsdm, dwdm
def main():
np.set_printoptions(4, suppress=True)
n = 8
m = rng.normal(size=(n, n))
u, s, w = svdw(m)
print('det(m):', np.linalg.det(m))
print('s:', s)
for gsym, gnum, name in zip(svdw_jacobian(m, u, s, w),
svdw_jacobian_num(m, u, s, w),
['dU/dM', 'dS/dM', 'dW/dM']):
print(f'====== {name}')
if gsym.shape[0] == gsym.shape[1]:
print('grad det:', np.linalg.det(gsym))
print('grad rank:', np.linalg.matrix_rank(gsym))
diff = np.abs(gsym - gnum).mean()
print(diff, diff / np.abs(gnum).mean())
if __name__ == '__main__':
main()
``` |
{
"source": "jiakechong1991/Surprise",
"score": 3
} |
#### File: Surprise/tests/test_train2fit.py
```python
import os
import pytest
from surprise import Dataset
from surprise import Reader
from surprise import AlgoBase
from surprise.model_selection import KFold
data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
data = Dataset.load_from_file(data_file, Reader('ml-100k'))
kf = KFold(n_splits=2)
def test_new_style_algo():
'''Test that new algorithms (i.e. algoritms that only define fit()) can
support both calls to fit() and to train()
- algo.fit() is the new way of doing things
- supporting algo.train() is needed for the (unlikely?) case where a user
has defined custom tools that use algo.train().
'''
class CustomAlgoFit(AlgoBase):
def __init__(self):
AlgoBase.__init__(self)
self.cnt = -1
def fit(self, trainset):
AlgoBase.fit(self, trainset)
self.est = 3
self.bu, self.bi = 1, 1
self.cnt += 1
def estimate(self, u, i):
return self.est
algo = CustomAlgoFit()
for i, (trainset, testset) in enumerate(kf.split(data)):
algo.fit(trainset)
predictions = algo.test(testset)
# Make sure AlgoBase.fit has been called
assert hasattr(algo, 'trainset')
# Make sure CustomAlgoFit.fit has been called
assert all(est == 3 for (_, _, _, est, _) in predictions)
# Make sure AlgoBase.fit is finished before CustomAlgoFit.fit
assert (algo.bu, algo.bi) == (1, 1)
# Make sure the rest of fit() is only called once
assert algo.cnt == i
algo = CustomAlgoFit()
for i, (trainset, testset) in enumerate(kf.split(data)):
with pytest.warns(UserWarning):
algo.train(trainset)
predictions = algo.test(testset)
# Make sure AlgoBase.fit has been called
assert hasattr(algo, 'trainset')
# Make sure CustomAlgoFit.fit has been called
assert all(est == 3 for (_, _, _, est, _) in predictions)
# Make sure AlgoBase.fit is finished before CustomAlgoFit.fit
assert (algo.bu, algo.bi) == (1, 1)
# Make sure the rest of fit() is only called once
assert algo.cnt == i
def test_old_style_algo():
'''Test that old algorithms (i.e. algoritms that only define train()) can
support both calls to fit() and to train()
- supporting algo.fit() is needed so that custom algorithms that only
define train() can still use up to date tools (such as evalute, which has
been updated to use fit()).
- algo.train() is the old way, and must still be supported for custom
algorithms and tools.
'''
class CustomAlgoTrain(AlgoBase):
def __init__(self):
AlgoBase.__init__(self)
self.cnt = -1
def train(self, trainset):
AlgoBase.train(self, trainset)
self.est = 3
self.bu, self.bi = 1, 1
self.cnt += 1
def estimate(self, u, i):
return self.est
with pytest.warns(UserWarning):
algo = CustomAlgoTrain()
for i, (trainset, testset) in enumerate(kf.split(data)):
with pytest.warns(UserWarning):
algo.fit(trainset)
predictions = algo.test(testset)
# Make sure AlgoBase.fit has been called
assert hasattr(algo, 'trainset')
# Make sure CustomAlgoFit.train has been called
assert all(est == 3 for (_, _, _, est, _) in predictions)
# Make sure AlgoBase.fit is finished before CustomAlgoTrain.train
assert (algo.bu, algo.bi) == (1, 1)
# Make sure the rest of train() is only called once
assert algo.cnt == i
with pytest.warns(UserWarning):
algo = CustomAlgoTrain()
for i, (trainset, testset) in enumerate(kf.split(data)):
with pytest.warns(UserWarning):
algo.train(trainset)
predictions = algo.test(testset)
# Make sure AlgoBase.fit has been called
assert hasattr(algo, 'trainset')
# Make sure CustomAlgoFit.train has been called
assert all(est == 3 for (_, _, _, est, _) in predictions)
# Make sure AlgoBase.fit is finished before CustomAlgoTrain.train
assert (algo.bu, algo.bi) == (1, 1)
# Make sure the rest of train() is only called once
assert algo.cnt == i
``` |
{
"source": "jiakeke/django-ckeditor",
"score": 2
} |
#### File: ckeditor/image/pillow_backend.py
```python
from io import BytesIO
import os.path
try:
from PIL import Image, ImageOps
except ImportError:
import Image
import ImageOps
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import InMemoryUploadedFile
from ckeditor import utils
THUMBNAIL_SIZE = (75, 75)
def image_verify(f):
try:
Image.open(f).verify()
except IOError:
raise utils.NotAnImageException
def resize(file_path, max_width):
thumbnail_format = utils.get_image_format(os.path.splitext(file_path)[1])
file_format = thumbnail_format.split('/')[1]
image = default_storage.open(file_path)
image = Image.open(image)
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
size_orig = image.size
width_orig = size_orig[0]
if width_orig <= max_width:
size = size_orig
return
else:
height = int(1.0*max_width/width_orig*size_orig[1])
size = (max_width, height)
default_storage.delete(file_path)
# scale and crop to thumbnail
imagefit = ImageOps.fit(image, size, Image.ANTIALIAS)
thumbnail_io = BytesIO()
imagefit.save(thumbnail_io, format=file_format)
thumbnail = InMemoryUploadedFile(
thumbnail_io,
None,
file_path,
thumbnail_format,
len(thumbnail_io.getvalue()),
None)
thumbnail.seek(0)
return default_storage.save(file_path, thumbnail)
def create_thumbnail(file_path):
thumbnail_filename = utils.get_thumb_filename(file_path)
thumbnail_format = utils.get_image_format(os.path.splitext(file_path)[1])
file_format = thumbnail_format.split('/')[1]
image = default_storage.open(file_path)
image = Image.open(image)
# Convert to RGB if necessary
# Thanks to Limodou on DjangoSnippets.org
# http://www.djangosnippets.org/snippets/20/
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
# scale and crop to thumbnail
imagefit = ImageOps.fit(image, THUMBNAIL_SIZE, Image.ANTIALIAS)
thumbnail_io = BytesIO()
imagefit.save(thumbnail_io, format=file_format)
thumbnail = InMemoryUploadedFile(
thumbnail_io,
None,
thumbnail_filename,
thumbnail_format,
len(thumbnail_io.getvalue()),
None)
thumbnail.seek(0)
return default_storage.save(thumbnail_filename, thumbnail)
def should_create_thumbnail(file_path):
image = default_storage.open(file_path)
try:
Image.open(image)
except IOError:
return False
else:
return True
``` |
{
"source": "jial13/Navigation",
"score": 3
} |
#### File: jial13/Navigation/dqn_agent.py
```python
import random
from collections import namedtuple, deque
import numpy as np
import torch
import torch.nn
import torch.optim as optim
import torch.nn.functional as F
from Model import QNetwork
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
def __init__(self, bufferSize, batchSize, gamma, tau, lr, updateEvery, state_size, action_size, seed):
print(device)
# hyper parameters
self.BufferSize = bufferSize
self.BatchSize = batchSize
self.Gamma = gamma
self.Tau = tau
self.LR = lr
self.UpdateEvery = updateEvery
self.State_size = state_size
self.Action_size = action_size
self.Seed = random.seed(seed)
# Q network
self.QNetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.QNetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.Optimizer = optim.Adam(self.QNetwork_local.parameters(), lr=self.LR)
# Replay memory
self.memory = ReplayBuffer(action_size, self.BufferSize, self.BatchSize, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def act(self, state, eps=0.):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.QNetwork_local.eval()
with torch.no_grad():
action_values = self.QNetwork_local(state)
self.QNetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.Action_size))
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % self.UpdateEvery
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > self.BatchSize:
experiences = self.memory.sample()
self.learn(experiences, self.Gamma)
def learn(self, experiences, gamma):
states, actions, rewards, next_states, dones = experiences
q_targets_next = self.QNetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
q_targets = rewards + (gamma * q_targets_next * (1 - dones))
# Get expected Q values from local model
q_expected = self.QNetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(q_expected, q_targets)
# Minimize the loss
self.Optimizer.zero_grad()
loss.backward()
self.Optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.QNetwork_local, self.QNetwork_target, self.Tau)
def soft_update(self, local_model, target_model, tau):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
def __init__(self, action_size, buffer_size, batch_size, seed):
self.Action_size = action_size
self.Memory = deque(maxlen=buffer_size)
self.Batch_size = batch_size
self.Experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.Seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
e = self.Experience(state, action, reward, next_state, done)
self.Memory.append(e)
def sample(self):
experiences = random.sample(self.Memory, k=self.Batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
return states, actions, rewards, next_states, dones
def __len__(self):
return len(self.Memory)
``` |
{
"source": "jialehuo/ceilometer-ecs",
"score": 2
} |
#### File: ceilometer-ecs/ceilometer_ecs/discovery.py
```python
import dateutil.parser
from oslo_config import cfg
from oslo_log import log
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
import ecs_resource
from pollsters import ecs_billing_dao
LOG = log.getLogger(__name__)
cfg.CONF.register_group(cfg.OptGroup(
name='ecs', title="Configuration for ECS Meters"
))
OPTS = [
cfg.StrOpt('ecs_endpoint'),
cfg.StrOpt('ecs_username'),
cfg.StrOpt('ecs_password'),
cfg.StrOpt('ecs_cert_path'),
cfg.StrOpt('sample_start_time'),
cfg.StrOpt('sample_interval'),
cfg.StrOpt('sample_delay'),
cfg.StrOpt('ceilometer_endpoint'),
cfg.StrOpt('os_project_name'),
cfg.StrOpt('os_project_domain_name'),
cfg.StrOpt('os_username'),
cfg.StrOpt('os_password'),
cfg.StrOpt('os_user_domain_name'),
cfg.StrOpt('os_auth_url')
]
cfg.CONF.register_opts(OPTS, group='ecs')
class ECSDiscovery(plugin_base.DiscoveryBase):
def __init__(self):
super(ECSDiscovery, self).__init__()
def discover(self, manager, param=None):
resources = []
resource = ecs_resource.ECSResource(
ecs_endpoint=cfg.CONF['ecs'].ecs_endpoint,
ecs_username=cfg.CONF['ecs'].ecs_username,
ecs_password=cfg.CONF['ecs'].ecs_password,
ecs_cert_path=cfg.CONF['ecs'].ecs_cert_path,
sample_start_time=dateutil.parser.parse(cfg.CONF['ecs'].sample_start_time),
sample_interval=int(cfg.CONF['ecs'].sample_interval),
sample_delay=int(cfg.CONF['ecs'].sample_delay),
ceilometer_endpoint=cfg.CONF['ecs'].ceilometer_endpoint,
os_project_name=cfg.CONF['ecs'].os_project_name,
os_project_domain_name=cfg.CONF['ecs'].os_project_domain_name,
os_username=cfg.CONF['ecs'].os_username,
os_password=cfg.CONF['ecs'].os_password,
os_user_domain_name=cfg.CONF['ecs'].os_user_domain_name,
os_auth_url=cfg.CONF['ecs'].os_auth_url
)
dao = ecs_billing_dao.ECSBillingDAO(resource)
resource.ecs_vdc_id = dao.getVDCLocalID()
resources.append(resource)
return resources
```
#### File: ceilometer_ecs/pollsters/ecs_mgmt_client.py
```python
import re
from datetime import datetime, timedelta
import pytz
import requests
import xml.etree.ElementTree as ET
import dateutil.parser
from ceilometerclient.v2 import client as ceiloclient
class ECSManagementClient:
AUTH_TOKEN = '<PASSWORD>'
BUCKET_CREATED_PATTERN = re.compile('Bucket \\S+ has been created')
BUCKET_DELETED_PATTERN = re.compile('Bucket \\S+ has been deleted')
START_TIME_CACHE = 'ecs_start_time'
END_TIME_CACHE = 'ecs_end_time'
TIMESTAMP_CACHE = 'ecs_timestamp'
def __init__(self, resource):
self.resource = resource
def isValidElem(self, elem):
return (elem is not None) and (elem.text is not None) and bool(elem.text.strip())
def login(self):
r = requests.get(self.resource.ecs_endpoint + '/login', auth=(self.resource.ecs_username, self.resource.ecs_password), verify=self.resource.ecs_cert_path)
self.headers = {self.AUTH_TOKEN: r.headers[self.AUTH_TOKEN]}
def logout(self):
r = requests.get(self.resource.ecs_endpoint + '/logout', headers=self.headers, verify=self.resource.ecs_cert_path)
def getVDCLocalID(self):
r = requests.get(self.resource.ecs_endpoint + '/object/vdcs/vdc/local', headers=self.headers, verify=self.resource.ecs_cert_path)
root = ET.fromstring(r.text)
if self.isValidElem(root.find('id')):
return root.find('id').text.strip()
else:
return None
def getStartTime(self):
kwargs = {
'project_name': self.resource.os_project_name,
'project_domain_name': self.resource.os_project_domain_name,
'username': self.resource.os_username,
'password': self.resource.os_password,
'user_domain_name': self.resource.os_user_domain_name,
'auth_url': self.resource.os_auth_url
}
client = ceiloclient.Client(self.resource.ceilometer_endpoint, **kwargs)
filter = "{\"=\": {\"meter\": \"ecs.objects\"}}"
orderby = "[{\"timestamp\": \"DESC\"}]"
limit = 1
result = client.query_samples.query(filter=filter, orderby=orderby, limit=limit)
if (result is not None) and (len(result) > 0) and (result[0].to_dict() is not None) and (result[0].to_dict().get("metadata") is not None) and (result[0].to_dict().get("metadata").get("end_time") is not None):
return dateutil.parser.parse(result[0].to_dict().get("metadata").get("end_time"))
else:
return self.resource.sample_start_time
def isSampleTime(self):
self.start_time = self.getStartTime()
self.end_time = self.start_time + timedelta(minutes=self.resource.sample_interval)
sample_time = self.end_time + timedelta(minutes=self.resource.sample_delay)
self.timestamp = datetime.now(pytz.utc)
if (sample_time > self.timestamp):
return False
else:
return True
def getNamespaceSamples(self, manager, cache):
namespaces = []
if not self.isSampleTime():
return namespaces
cache[self.START_TIME_CACHE] = self.start_time.astimezone(pytz.utc).isoformat()
cache[self.END_TIME_CACHE] = self.end_time.astimezone(pytz.utc).isoformat()
cache[self.TIMESTAMP_CACHE] = self.timestamp.astimezone(pytz.utc).isoformat()
if manager.keystone.version == 'v3':
projects = manager.keystone.projects.list()
else:
projects = manager.keystone.tenants.list()
for project in projects:
namespace = self.getNamespaceSample(project.id, cache[self.START_TIME_CACHE], cache[self.END_TIME_CACHE])
if namespace is not None:
namespaces.append(namespace)
return namespaces
def getNamespaceSample(self, id, start_time, end_time):
namespace = {'id': id, 'total_buckets': 0}
next_marker = ''
while True:
r = requests.get(self.resource.ecs_endpoint + '/object/billing/namespace/' + id + '/sample?sizeunit=KB&include_bucket_detail=true&start_time=' + start_time + '&end_time=' + end_time + next_marker, headers=self.headers, verify=self.resource.ecs_cert_path)
root = ET.fromstring(r.text)
if root.tag == 'error':
return None
if self.isValidElem(root.find('total_objects')):
namespace['total_objects'] = long(root.find('total_objects').text.strip())
if self.isValidElem(root.find('total_size')):
namespace['total_size'] = long(root.find('total_size').text.strip())
if self.isValidElem(root.find('total_size_unit')):
namespace['total_size_unit'] = root.find('total_size_unit').text.strip()
if self.isValidElem(root.find('objects_created')):
namespace['objects_created'] = long(root.find('objects_created').text.strip())
if self.isValidElem(root.find('objects_deleted')):
namespace['objects_deleted'] = long(root.find('objects_deleted').text.strip())
if self.isValidElem(root.find('bytes_delta')):
namespace['bytes_delta'] = long(root.find('bytes_delta').text.strip())
if self.isValidElem(root.find('ingress')):
namespace['ingress'] = long(root.find('ingress').text.strip())
if self.isValidElem(root.find('egress')):
namespace['egress'] = long(root.find('egress').text.strip())
if (root.findall('./bucket_billing_sample/name') is not None):
namespace['total_buckets'] += len(root.findall('./bucket_billing_sample/name'))
if self.isValidElem(root.find('next_marker')):
next_marker = '&marker=' + root.find('next_marker').text.strip()
else:
break
# get bucket delta samples through audit events
namespace['buckets_created'] = 0
namespace['buckets_deleted'] = 0
next_marker = ''
while True:
r = requests.get(self.resource.ecs_endpoint + '/vdc/events?namespace=' + id + '&start_time=' + start_time + '&end_time=' + end_time + next_marker, headers=self.headers, verify=self.resource.ecs_cert_path)
root = ET.fromstring(r.text)
for auditevent in root.findall('./auditevent/description'):
if self.BUCKET_CREATED_PATTERN.match(auditevent.text):
namespace['buckets_created'] += 1
elif self.BUCKET_DELETED_PATTERN.match(auditevent.text):
namespace['buckets_deleted'] += 1
if self.isValidElem(root.find('NextMarker')):
next_marker = '&marker=' + root.find('NextMarker').text.strip()
else:
break
return namespace
``` |
{
"source": "JiaLei123/ML_camp",
"score": 3
} |
#### File: ML_camp/daguan/rnn_gluon_daguan_data_iterator.py
```python
import math
import os
import time
import numpy as np
import mxnet as mx
from mxnet import gluon, autograd
from mxnet.gluon import nn, rnn, data as gdata
import zipfile
from MXnet import utils
from gensim.models import word2vec
import pandas as pd
import numpy as np
class Dictionary(object):
def __init__(self):
self.word_to_idx = {}
self.idx_to_word = []
def add_word(self, word):
if word not in self.word_to_idx:
self.idx_to_word.append(word)
self.word_to_idx[word] = len(self.idx_to_word) - 1 # 就是返回word在idx_to_word中的index值
return self.word_to_idx[word]
def __len__(self):
return len(self.idx_to_word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train, _train = self.tokenize(path + 'train.txt')
self.valid, _val = self.tokenize(path + 'valid.txt')
self.test, _test = self.tokenize(path + 'test.txt')
all_sentences = list()
all_sentences.extend(_train)
all_sentences.extend(_val)
all_sentences.extend(_test)
self.w2v = word2vec.Word2Vec(all_sentences)
def tokenize(self, path):
assert os.path.exists(path)
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(path, 'r') as f:
indices = np.zeros((tokens,), dtype="int32")
idx = 0
all_sentences = list()
for line in f:
words = line.split() + ['<eos>']
for word in words:
indices[idx] = self.dictionary.word_to_idx[word]
idx += 1
all_sentences.append(words)
return mx.nd.array(indices, dtype='int32'), all_sentences
class RNNModel(gluon.Block):
def __init__(self, mode, embed_dim, hidden_dim, num_layers, w2v_vec, drop_out=0.5, **kwargs):
super(RNNModel, self).__init__(**kwargs)
with self.name_scope():
self.drop = nn.Dropout(drop_out)
# self.encoder = nn.Embedding(grad_red='null')
# self.encoder.weight.set_data(w2v_vec)
if mode == "rnn_relu":
self.rnn = rnn.RNN(hidden_dim, num_layers, activation='relu', dropout=drop_out, input_size=embed_dim)
elif mode == "rnn_tanh":
self.rnn = rnn.RNN(hidden_dim, num_layers, activation='tanh', dropout=drop_out, input_size=embed_dim)
elif mode == "lstm":
self.rnn = rnn.LSTM(hidden_dim, num_layers, dropout=drop_out, input_size=embed_dim)
elif mode == "gru":
self.rnn = rnn.GRU(hidden_dim, num_layers, dropout=drop_out, input_size=embed_dim)
else:
raise ValueError("Invalid Mode")
self.decoder = nn.Dense(19, in_units=hidden_dim)
self.hidden_dim = hidden_dim
self.w2v_vec = w2v_vec
# def get_vec(self,inputs):
# input_vec = []
# for word in enumerate(inputs):
# try:
# input_vec.append(self.w2v_vec.wv[word])
# except:
# input_vec.append(np.random.uniform(-0.25, 0.25, self.w2v_vec.vector_size))
# return mx.nd.array(input_vec).reshape((len(inputs), 1, -1))
def forward(self, inputs, state):
# input_node = self.get_vec(inputs)
# emb = self.drop(inputs)
outputs = []
for input in inputs:
step, vec_size = input.shape
input = input.reshape(step, 1, -1)
output, state = self.rnn(input, state)
output = self.drop(output)
output = output[-1].reshape((batch_size, -1))
outputs.append(output)
outputs = mx.nd.concat(*outputs, dim=0)
decoded = self.decoder(outputs.reshape((-1, self.hidden_dim)))
return decoded, state
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
def get_batch(source, label, i):
data = source[i]
target = label[i]
return data, target
def get_data_iter(path, batch_size, w2v_vec):
total_data = pd.read_csv(path)
data = total_data["article"][0:100]
f = lambda x: [w2v_vec.wv.get_vector(xi) for xi in x.split(" ")]
#
data = data.apply(f)
# data = pd.read_pickle("E:\\ML_learning\\Daguan\\data\\train_data_vec.plk", "gzip")
label = total_data["class"][0:100]
# dataset = gdata.ArrayDataset(data, label)
# data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)
return data, label
def detach(state):
if isinstance(state, (tuple, list)):
state = [i.detach() for i in state]
else:
state = state.detach()
return state
def model_eval(data_source):
total_L = 0.0
ntotal = 0
hidden = model.begin_state(func=mx.nd.zeros, batch_size=batch_size_clas, ctx=context)
for i in range(0, data_source.shape[0] - 1, num_steps):
data, target = get_batch(data_source, i)
output, hidden = model(data, hidden)
L = loss(output, target)
total_L += mx.nd.sum(L).asscalar()
ntotal += L.size
return total_L / ntotal
def train():
for epoch in range(epochs):
total_L = 0
start_time = time.time()
hidden = model.begin_state(func=mx.nd.zeros, batch_size=batch_size_clas, ctx=context)
batch_num = 0
for X, y in train_data_iter:
batch_num += 1
hidden = detach(hidden)
with autograd.record():
output, hidden = model(X, hidden)
# output = output[-1].reshape((1, -1))
L = loss(output, y)
L.backward()
grads = [i.grad(context) for i in model.collect_params().values()]
gluon.utils.clip_global_norm(grads, clipping_norm * num_steps * batch_size)
trainer.step(batch_size)
total_L += mx.nd.sum(L).asscalar()
if batch_num % eval_period == 0 and batch_num > 0:
cur_L = total_L / batch_num
train_acc = evaluate_accuracy(train_data_iter, model)
print('[Epoch %d Batch %d] loss %.2f, Train acc %f' % (epoch + 1, batch_num, cur_L, train_acc))
cur_L = total_L / len(train_data_iter)
train_acc = evaluate_accuracy(train_data_iter, model)
print('[Epoch %d loss %.2fm Train acc %f' % (epoch + 1, cur_L, train_acc))
def _get_batch(batch, ctx):
"""return data and label on ctx"""
if isinstance(batch, mx.io.DataBatch):
data = batch.data[0]
label = batch.label[0]
else:
data, label = batch
return (gluon.utils.split_and_load(data, ctx),
gluon.utils.split_and_load(label, ctx),
data.shape[0])
def evaluate_accuracy(data_iterator, net, ctx=[mx.cpu()]):
if isinstance(ctx, mx.Context):
ctx = [ctx]
acc = mx.nd.array([0])
n = 0.
if isinstance(data_iterator, mx.io.MXDataIter):
data_iterator.reset()
for batch in data_iterator:
data, label, batch_size = _get_batch(batch, ctx)
for X, y in zip(data, label):
hidden = model.begin_state(func=mx.nd.zeros, batch_size=batch_size_clas, ctx=context)
y = y.astype('float32')
pred, _ = net(X, hidden)
acc += mx.nd.sum(pred.argmax(axis=1) == y).copyto(mx.cpu())
n += y.size
acc.wait_to_read() # don't push too many operators into backend
return acc.asscalar() / n
if __name__=="__main__":
model_name = 'rnn_relu'
embed_dim = 100
hidden_dim = 100
num_layers = 2
lr = 1
clipping_norm = 0.2
epochs = 10
batch_size = 5
batch_size_clas = 1
num_steps = 1
dropout_rate = 0.2
eval_period = 20
context = utils.try_gpu()
train_data_path = "E:\\ML_learning\\Daguan\\data\\train_data.csv"
w2v = word2vec.Word2Vec.load("E:\\ML_learning\\Daguan\\data\\mymodel")
# test_data_path = ""
# train_data, label = get_data_iter(train_data_path, batch_size, w2v)
train_data_iter = get_data_iter(train_data_path, batch_size, w2v)
model = RNNModel(model_name, embed_dim, hidden_dim, num_layers, w2v, dropout_rate)
model.collect_params().initialize(mx.init.Xavier(), ctx=context)
trainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0, 'wd': 0})
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# model_eval(val_data)
train()
# test_L = model_eval(test_data_iter)
# print('Test loss %.2f, test perplexity %.2f' % (test_L, math.exp(test_L)))
```
#### File: ML_camp/daguan/rnn_gluon_daguan_w2v_lstm.py
```python
import math
import os
import random
import time
import numpy as np
import mxnet as mx
from mxnet import gluon, autograd
from mxnet.gluon import nn, rnn, data as gdata
import zipfile
from MXnet import utils
from gensim.models import word2vec
import pandas as pd
import numpy as np
high_frequency_word_list = ['1044285', '7368', '856005', '72195', '195449', '359838', '239755', '427848', '316564']
class RNNModel(gluon.Block):
def __init__(self, mode, embed_dim, hidden_dim, num_layers, w2v_vec, drop_out=0.5, **kwargs):
super(RNNModel, self).__init__(**kwargs)
with self.name_scope():
self.drop = nn.Dropout(drop_out)
# self.encoder = nn.Embedding(grad_red='null')
# self.encoder.weight.set_data(w2v_vec)
if mode == "rnn_relu":
self.rnn = rnn.RNN(hidden_dim, num_layers, activation='relu', dropout=drop_out, input_size=embed_dim)
elif mode == "rnn_tanh":
self.rnn = rnn.RNN(hidden_dim, num_layers, activation='tanh', dropout=drop_out, input_size=embed_dim)
elif mode == "lstm":
self.rnn = rnn.LSTM(hidden_dim, num_layers, dropout=drop_out, input_size=embed_dim)
elif mode == "gru":
self.rnn = rnn.GRU(hidden_dim, num_layers, dropout=drop_out, input_size=embed_dim)
else:
raise ValueError("Invalid Mode")
self.decoder = nn.Dense(19, in_units=hidden_dim)
self.hidden_dim = hidden_dim
self.w2v_vec = w2v_vec
# def get_vec(self,inputs):
# input_vec = []
# for word in enumerate(inputs):
# try:
# input_vec.append(self.w2v_vec.wv[word])
# except:
# input_vec.append(np.random.uniform(-0.25, 0.25, self.w2v_vec.vector_size))
# return mx.nd.array(input_vec).reshape((len(inputs), 1, -1))
def forward(self, inputs, state):
outputs = []
for input in inputs:
input_node = mx.nd.array(input)
step = input_node.shape[0]
input_node = input_node.reshape(step, 1, -1)
output, out_state = self.rnn(input_node, state)
output = self.drop(output)
output = output[-1]
outputs.append(output)
outputs = mx.nd.concat(*outputs, dim=0)
decoded = self.decoder(outputs)
return decoded, out_state
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
def get_batch(source, label, i):
data = source[i]
target = label[i]
return data, target
def data_iter(source, target, batch_size):
"""
get number_example/batch_size data list and each list has batch_size data
:return:
"""
number_example = len(target)
idx = list(range(number_example))
random.shuffle(idx)
def _data(pos):
return source[pos]
def _lable(pos):
return target[pos]
for i in range(0, number_example, batch_size):
batch_indices = idx[i: min(i + batch_size, number_example)]
data = [_data(j) for j in batch_indices]
label = [_lable(j) for j in batch_indices]
yield data, label
def get_data_iter(path, batch_size, w2v_vec):
total_data = pd.read_csv(path)
data = total_data["article"][0:60000]
f = lambda x: [w2v_vec.wv.get_vector(xi) for xi in
[si for si in x.split(" ") if si not in high_frequency_word_list][0:500]]
# f = lambda x: [xi for xi in x.split(" ")[0:800] ]
#
data = data.apply(f)
label = total_data["class"][0:60000]
# dataset = gdata.ArrayDataset(data, label)
# data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)
return np.array(data), np.array(label)
def detach(state):
if isinstance(state, (tuple, list)):
state = [i.detach() for i in state]
else:
state = state.detach()
return state
def train():
for epoch in range(epochs):
total_L = 0
start_time = time.time()
hidden = model.begin_state(func=mx.nd.zeros, batch_size=batch_size_clas, ctx=context)
batch_num = 0
for X, y in data_iter(train_data, label, batch_size):
# X, y = get_batch(train_data, label, i)
hidden = detach(hidden)
with autograd.record():
output, hidden = model(X, hidden)
L = loss(output, mx.nd.array(y))
L.backward()
grads = [i.grad(context) for i in model.collect_params().values()]
gluon.utils.clip_global_norm(grads, clipping_norm * num_steps * batch_size)
trainer.step(batch_size)
total_L += mx.nd.sum(L).asscalar()
batch_num += 1
if batch_num % eval_period == 0 and batch_num > 0:
cur_L = total_L / batch_num / batch_size
# train_acc = evaluate_accuracy(train_data, label, model)
print('[Epoch %d Batch %d] loss %.2f' % (epoch + 1, batch_num, cur_L))
cur_L = total_L / len(label)
train_acc = evaluate_accuracy(train_data, label, model)
print('[Epoch %d loss %.2f Train acc %f' % (epoch + 1, cur_L, train_acc))
model.save_parameters()
def evaluate_accuracy(train_data, label, net, ctx=[mx.cpu()]):
if isinstance(ctx, mx.Context):
ctx = [ctx]
acc = mx.nd.array([0])
n = 0.
for X, y in data_iter(train_data, label, batch_size):
# X, y = get_batch(train_data, label, i)
hidden = model.begin_state(func=mx.nd.zeros, batch_size=batch_size_clas, ctx=context)
y = mx.nd.array(y)
y = y.astype('float32')
pred, _ = net(X, hidden)
acc += mx.nd.sum(pred.argmax(axis=1) == y)
n += y.size
acc.wait_to_read() # don't push too many operators into backend
return acc.asscalar() / len(label)
if __name__ == "__main__":
model_name = 'lstm'
embed_dim = 100
hidden_dim = 100
num_layers = 2
lr = 0.2
clipping_norm = 0.2
epochs = 10
batch_size = 100
batch_size_clas = 1
num_steps = 1
dropout_rate = 0.2
eval_period = 50
context = utils.try_gpu()
train_data_path = "E:\\ML_learning\\Daguan\\data\\train_data.csv"
w2v = word2vec.Word2Vec.load("E:\\ML_learning\\Daguan\\data\\mymodel")
# test_data_path = ""
train_data, label = get_data_iter(train_data_path, batch_size, w2v)
# train_data_iter = get_data_iter(train_data_path, batch_size, w2v)
model = RNNModel(model_name, embed_dim, hidden_dim, num_layers, w2v, dropout_rate)
model.collect_params().initialize(mx.init.Xavier(), ctx=context)
trainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': lr})
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# model_eval(val_data)
train()
# test_L = model_eval(test_data_iter)
# print('Test loss %.2f, test perplexity %.2f' % (test_L, math.exp(test_L)))
```
#### File: ML_camp/data_mining_annotation/use_word_vec.py
```python
from gensim.models import word2vec
from sklearn.decomposition import PCA
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
mopdelfilePath = 'D:\\ML_learning\\annotation\\model.bin'
model = word2vec.Word2Vec.load(mopdelfilePath)
raw_word_vec = model.wv.vectors
cent_word1 = "[body]"
cent_word2 = "[city]"
cent_word3 = "[search_phrase]"
cent_word4 = "[named_business]"
cent_word5 = "[search_phrase]"
wordList1 = model.wv.most_similar_cosmul(cent_word1, topn=20)
wordList2 = model.wv.most_similar_cosmul(cent_word2, topn=20)
wordList3 = model.wv.most_similar_cosmul(cent_word3, topn=20)
wordList4 = model.wv.most_similar_cosmul(cent_word4, topn=20)
wordList5 = model.wv.most_similar_cosmul(cent_word5, topn=20)
wordList1 = np.append([item[0] for item in wordList1], cent_word1)
wordList2 = np.append([item[0] for item in wordList2], cent_word2)
wordList3 = np.append([item[0] for item in wordList3], cent_word3)
wordList4 = np.append([item[0] for item in wordList4], cent_word4)
wordList5 = np.append([item[0] for item in wordList5], cent_word5)
def get_word_index(word):
index = model.wv.vocab[word].index
return index
index_list1 = map(get_word_index, wordList1)
index_list2 = map(get_word_index, wordList2)
index_list3 = map(get_word_index, wordList3)
index_list4 = map(get_word_index, wordList4)
index_list5 = map(get_word_index, wordList5)
vec_reduced = PCA(n_components=2).fit_transform(raw_word_vec)
# fig = plt.figure()
# ax = fig.add_subplot(111)
zhfont = matplotlib.font_manager.FontProperties(fname=r'C:\Nuance\python_env\basic_dev\Lib\site-packages\matplotlib\mpl-data\fonts\ttf\msyh.ttf')
x = np.arange(-10, 10, 0.1)
y = x
plt.plot(x, y)
for i in index_list1:
plt.text(vec_reduced[i][0], vec_reduced[i][1], model.wv.index2word[i], color='r', fontproperties=zhfont)
# for i in index_list2:
# plt.text(vec_reduced[i][0], vec_reduced[i][1], model.wv.index2word[i], color='b', fontproperties=zhfont)
#
# for i in index_list3:
# plt.text(vec_reduced[i][0], vec_reduced[i][1], model.wv.index2word[i], color='g', fontproperties=zhfont)
#
# for i in index_list4:
# plt.text(vec_reduced[i][0], vec_reduced[i][1], model.wv.index2word[i], color='k', fontproperties=zhfont)
#
# for i in index_list5:
# plt.text(vec_reduced[i][0], vec_reduced[i][1], model.wv.index2word[i], color='c', fontproperties=zhfont)
# plt.axis([40, 160, 0, 0.03])
plt.show()
# indexes = model.wv.most_similar_cosmul('中国')
# for index in indexes:
# print(index)
```
#### File: MXnet/convolutional-neural-networks/cnn-mpl-compare.py
```python
from mxnet.gluon import nn
from mxnet import gluon
import matplotlib.pyplot as plt
from MXnet import utils
from MXnet.display_utils import show_loss_acc_picture, show_loss_acc_for_two_model
def cnn(num_epochs, unit_count):
net = nn.Sequential()
with net.name_scope():
net.add(
nn.Conv2D(channels=20, kernel_size=5, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=50, kernel_size=3, activation="relu"),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
nn.Dense(unit_count, activation="relu"),
nn.Dense(10)
)
net.initialize()
ctx = utils.try_gpu()
batch_size = 256
train_data, test_data = utils.load_data_fashion_mnist(batch_size)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})
return utils.train(train_data, test_data, net, loss, trainer, ctx, num_epochs=num_epochs)
def mlp(num_epochs, unit_count):
net = nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(unit_count, activation="relu"))
# net.add(gluon.nn.Dense(28 * 28, activation="relu"))
# net.add(gluon.nn.Dense(28 * 28, activation="relu"))
# net.add(gluon.nn.Dense(28 * 28, activation="relu"))
net.add(gluon.nn.Dense(10))
net.initialize()
ctx = utils.try_gpu()
batch_size = 256
train_data, test_data = utils.load_data_fashion_mnist(batch_size)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.5})
return utils.train(train_data, test_data, net, loss, trainer, ctx, num_epochs=num_epochs)
num_epochs = 2
unit_count = 56 * 56
train_loss_list, test_loss_list, train_acc_list, test_acc_list = cnn(num_epochs, 256)
train_loss_2_list, test_loss_2_list, train_acc_2_list, test_acc_2_list = mlp(num_epochs, unit_count)
show_loss_acc_for_two_model(unit_count, num_epochs,
train_loss_list, train_loss_2_list,
test_loss_list, test_loss_2_list,
train_acc_list, train_acc_2_list,
test_acc_list, test_acc_2_list,
"cnn", "mpl")
```
#### File: MXnet/supervised-learning/linear-regression-scratch.py
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
import mxnet as mx
from mxnet import autograd, nd
import numpy as np
import random
import sys
# import utils
number_inputs = 2
number_example = 100000
true_w = nd.array([2, -3.4]).reshape((2, 1))
true_b = 4.2
X = nd.random.normal(scale=1, shape=(number_example, number_inputs))
y = nd.dot(X, true_w) + true_b
y += 0.01 * nd.random.normal(scale=1, shape=y.shape)
# plot
# plt.rcParams['figure.figsize'] = (3.5, 2.5)
# plt.scatter(X[:, 1].asnumpy(), y.asnumpy(), c="r", marker="v")
# plt.show()
batch_size = 10
def data_iter():
"""
get number_example/batch_size data list and each list has batch_size data
:return:
"""
idx = list(range(number_example))
random.shuffle(idx)
for i in range(0, number_example, batch_size):
j = nd.array(idx[i: min(i + batch_size, number_example)])
yield X.take(j), y.take(j)
for data, label in data_iter():
print(data, label)
break
w = nd.random.normal(scale=1, shape=(number_inputs, 1))
b = nd.zeros(1)
params = [w, b]
for param in params:
param.attach_grad()
def hypothesis_function(X, w, b):
"""
hypothesis function
:param X:
:param w:
:param b:
:return:
"""
return nd.dot(X, w) + b
def squared_loss(yhat, y):
"""
loss function
:param yhat:
:param y:
:return:
"""
return (yhat - y.reshape(yhat.shape)) ** 2 / 2
def sgd(params, lr, batch_size):
for param in params:
param[:] = param - lr * param.grad / batch_size
def trainer():
lr = 0.05
num_round = 10
for epoch in range(1, num_round + 1):
for X, y in data_iter():
with autograd.record():
output = hypothesis_function(X, w, b)
loss = squared_loss(output, y)
loss.backward()
sgd([w, b], lr, batch_size)
print("epoch %d, loss: %f" % (epoch, squared_loss(hypothesis_function(X, w, b), y).mean().asnumpy()))
trainer()
print(true_w, w)
print(true_b, b)
```
#### File: MXnet/supervised-learning/mlp.py
```python
from mxnet.gluon import nn
from mxnet import gluon
import matplotlib.pyplot as plt
from MXnet import utils
from MXnet.display_utils import show_loss_acc_picture, show_loss_acc_for_two_model
def mlp(num_epochs, unit_count, hidden_layer_num=1):
net = nn.Sequential()
with net.name_scope():
for _ in range(hidden_layer_num):
net.add(gluon.nn.Dense(unit_count, activation="relu"))
net.add(gluon.nn.Dense(10))
net.initialize()
ctx = utils.try_gpu()
batch_size = 256
train_data, test_data = utils.load_data_fashion_mnist(batch_size)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.5})
return utils.train(train_data, test_data, net, loss, trainer, ctx, num_epochs=num_epochs)
```
#### File: MXnet/supervised-learning/over_fit_under_fit.py
```python
from mxnet import ndarray as nd
from mxnet import autograd
from mxnet import gluon
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 120
import matplotlib.pyplot as plt
num_train = 1000
num_test = 100
true_w = [1.2, -3.4, 5.6]
true_b = 5.0
x = nd.random.normal(shape=(num_train + num_test, 1))
X = nd.concat(x, nd.power(x, 2), nd.power(x, 3))
y = true_w[0] * X[:, 0] + true_w[1] * X[:, 1] + true_w[2] * X[:, 2] + true_b
y += 0.1 * nd.random.normal(shape=y.shape)
def train(X_train, X_test, y_train, y_test):
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(1)) # 只有一个神经元
net.initialize()
learning_rate = 0.01
epochs = 100
batch_size = min(10, y_train.shape[0])
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size, shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {"learning_rate": learning_rate})
square_loss = gluon.loss.L2Loss()
train_loss = []
test_loss = []
for e in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
train_loss.append(square_loss(net(X_train), y_train).mean().asscalar())
test_loss.append(square_loss(net(X_test), y_test).mean().asscalar())
plt.plot(train_loss)
plt.plot(test_loss)
plt.legend(['train', 'test'])
title = 'learned weight' + str(net[0].weight.data()) + 'learned bias' + str(net[0].bias.data())
title = title.replace('\n', '')
plt.title(title, color='blue', wrap=True)
plt.show()
return ('learned weight', net[0].weight.data(), 'learned bias', net[0].bias.data())
train(X[:num_train, :], X[num_train:, :], y[:num_train], y[num_train:])
# train(x[:num_train, :], x[num_train:, :], y[:num_train], y[num_train:])
# train(X[0:2, :], X[num_train:, :], y[0:2], y[num_train:])
```
#### File: mytensorflow/cp05/adv_mnist.py
```python
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
LAYER2_NODE = 100
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEP = 30000
MOVING_AVERAGE_DECAY = 0.99
def inference(input_tensor, avg_class, w1, b1, w2, b2):
'''
计算神经网络的前向传播结果
:param input_tensor:
:param avg_class:
:param w1:
:param b1:
:param w2:
:param b2:
:return:
'''
if avg_class == None:
layer1 = tf.nn.relu(tf.matmul(input_tensor, w1) + b1)
return tf.matmul(layer1, w2) + b2
else:
# 使用avg_class.average()来获取变量的滑动平均值
layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(w1)) + avg_class.average(b1))
return tf.matmul(layer1, avg_class.average(w2)) + avg_class.average(b2)
def train(mnist):
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
w1 = tf.get_variable('w1', initializer=tf.truncated_normal_initializer(stddev=0.1), shape=[INPUT_NODE, LAYER1_NODE])
b1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))
w2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1), name='w2')
b2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))
y = inference(x, None, w1, b1, w2, b2)
global_step = tf.Variable(0, trainable=False)
# 初始化滑动平均
varialbe_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
# 在所有神经网络参数的变量上使用滑动平均
# tf.trainable_variables() 获取所有的可训练变量
variable_averages_op = varialbe_average.apply(tf.trainable_variables())
# 计算出滑动平均的y
average_y = inference(x, varialbe_average, w1, b1, w2, b2)
# 计算loss的时候要使用y,预测的时候使用滑动平均的y
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
regularization = regularizer(w1) + regularizer(w2)
loss = cross_entropy_mean + regularization
# 使用指数衰减的learning rate
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# 将后向传播更新神经网络中的参数和更新每个参数的滑动平均值合并在一个操作中完成
# 也可以使用tg.group()来实现
# train_op = tf.group(train_step, variable_averages_op)
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train')
# tf.argmax(average_y, 1) 计算每个样例的预测答案,在每行只选取最大值对应的下标
correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
test_feed = {x: mnist.test.images, y_: mnist.test.labels}
for i in range(TRAINING_STEP):
if i % 1000 == 0:
validate_acc = sess.run(accuracy, feed_dict=validate_feed)
print("After %d training step(s), validation accuracy using average mode is %g" % (i, validate_acc))
xs, ys = mnist.train.next_batch(BATCH_SIZE)
sess.run(train_op, feed_dict={x: xs, y_: ys})
test_acc = sess.run(accuracy, feed_dict=test_feed)
print("After %d training step(s), test accuracy using average mode is %g" % (TRAINING_STEP, test_acc))
def main(argv=None):
mnist = input_data.read_data_sets("../data", one_hot=True)
train(mnist)
if __name__ == "__main__":
main()
# tf.app.run()
```
#### File: cp06/mnist_program_conv/mnist_train.py
```python
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import mnist_inference
BATCH_SIZE = 1000
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 6000
MOVING_AVERAGE_DECAY = 0.99
MODEL_NAME = "model.ckpt"
def train(mnist):
x = tf.placeholder(tf.float32, [
BATCH_SIZE,
mnist_inference.IMAGE_SIZE,
mnist_inference.IMAGE_SIZE,
mnist_inference.NUM_CHANNELS], name="x-input")
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = mnist_inference.inference(x, True, regularizer)
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name="train")
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
reshape_xs = np.reshape(xs, (
BATCH_SIZE,
mnist_inference.IMAGE_SIZE,
mnist_inference.IMAGE_SIZE,
mnist_inference.NUM_CHANNELS
))
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshape_xs, y_: ys})
if i % 1000 == 0:
print("After %d training step(s), loss on train batch is %g" % (i, loss_value))
saver.save(sess, os.path.join("model", MODEL_NAME), global_step=global_step)
def main():
mnist = input_data.read_data_sets("../../data", one_hot=True)
train(mnist)
if __name__ == "__main__":
main()
```
#### File: mytensorflow/cp06/view_lnception_v3.py
```python
import tensorflow as tf
import os
MODEL_PATH = r"E:\ML_learning\tensorFlow\inception_dec_2015"
MODEL_FILE = "tensorflow_inception_graph.pb"
with tf.Session() as sess:
with tf.gfile.FastGFile(os.path.join(MODEL_PATH, MODEL_FILE), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
writer = tf.summary.FileWriter("output1", sess.graph)
writer.close()
```
#### File: NG/LinearRessgion_Reg/polyFeatures.py
```python
import numpy as np
def poly_features(X, p):
X_poly=X[:]#第一列为原始输入特征
#第2到p列 是原始输入特征的平方到p次方
for i in range(2,p+1):
X_poly=np.c_[X_poly,X**i]
return X_poly
```
#### File: NG/LinearRessgion_Reg/validationCurve.py
```python
import numpy as np
import trainLinearReg as tlr
import linearRegCostFunction as lrcf
def validation_curve(X, y, Xval, yval):
# 尝试不同的lambda值
lambda_vec = np.array([0., 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])
# 每设置一个lambda值,进行训练,返回此时的训练误差和验证误差
error_train = np.zeros(lambda_vec.size)
error_val = np.zeros(lambda_vec.size)
i=0
for lmd in lambda_vec:
theta=tlr.train_linear_reg(X, y, lmd)
error_train[i]=lrcf.linear_reg_cost_function(theta, X, y,0)[0]#注意计算误差时lmd=0
error_val[i]=lrcf.linear_reg_cost_function(theta, Xval, yval,0)[0]#注意计算误差时lmd=0
i+=1
return lambda_vec, error_train, error_val
``` |
{
"source": "JiaLei123/PythonCamp",
"score": 2
} |
#### File: scrum/board/serializers.py
```python
from rest_framework import serializers
from django.contrib.auth import get_user_model
from rest_framework.reverse import reverse
from .models import Sprint, Task
User = get_user_model()
class SprintSerializer(serializers.ModelSerializer):
links = serializers.SerializerMethodField()
class Meta:
model = Sprint
fields = ('id', 'name', 'description', 'end', 'links',)
def get_links(self, obj):
request = self.context['request']
return {
'self': reverse('sprint-detail', kwargs={'pk': obj.pk}, request=request)
}
class TaskSerializer(serializers.ModelSerializer):
status_display = serializers.SerializerMethodField()
assigned = serializers.SlugRelatedField(
slug_field=User.USERNAME_FIELD, required=False, allow_null=True, queryset=User.objects.all()
)
links = serializers.SerializerMethodField()
class Meta:
model = Task
fields = (
'id', 'name', 'description', 'sprint', 'status', 'status_display', 'order', 'assigned', 'started', 'due',
'completed', 'links',)
def get_status_display(self, obj):
return obj.get_status_display()
def get_links(self, obj):
request = self.context['request']
links = {
'self': reverse('task-detail', kwargs={'pk': obj.pk}, request=request),
'sprint': None,
'assigned': None
}
if obj.sprint_id:
links['sprint'] = reverse('sprint-detail', kwargs={'pk': obj.sprint_id}, request=request)
if obj.assigned:
links['assigned'] = reverse('user-detail', kwargs={User.USERNAME_FIELD: obj.assigned}, request=request)
return links
class UserSerializer(serializers.ModelSerializer):
full_name = serializers.CharField(source='get_full_name', read_only=True)
links = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('id', User.USERNAME_FIELD, 'full_name', 'is_active', 'links',)
def get_links(self, obj):
request = self.context['request']
username = obj.get_username()
return {
'self': reverse('user-detail', kwargs={User.USERNAME_FIELD: username}, request=request)
}
``` |
{
"source": "JiaLei123/PythonCamp_PY3",
"score": 3
} |
#### File: PythonCamp_PY3/basic_program/basic_generater.py
```python
class BasicGenerater(object):
def __init__(self):
self.first_name = 'Jack'
self.last_name = 'Freeman'
def my_name(self):
print('.'.join((self.first_name, self.last_name)))
def __del__(self):
print("call del")
class AdvaceGenerator(BasicGenerater):
def __init__(self):
BasicGenerater.__init__(self)
self.first_name = 'Bob'
class AdvaceGenerator2(BasicGenerater):
def __init__(self):
super(AdvaceGenerator2, self).__init__()
self.first_name = 'Alon'
if __name__ == "__main__":
basic = AdvaceGenerator2()
basic.my_name()
print("end")
```
#### File: PythonCamp_PY3/python_profile/benchmark.py
```python
import time
def primise(n):
if n == 2:
return [2]
elif n < 2:
return []
s = []
for i in range(3, n + 1):
if i % 2 != 0:
s.append(i)
mroot = n ** 0.5
half = (n + 1) / 2 - 1
i = 0
m = 3
while m <= mroot:
if s[i]:
j = int((m * m - 3) / 2)
s[j] = 0
while j < half:
s[j - 1] = 0
j += m
i = i + 1
m = 2 * i + 3
l = [2]
for x in s:
if x:
l.append(x)
return l
def primise2(n):
if n == 2:
return [2]
elif n < 2:
return []
s = list(range(3, n + 1, 2))
mroot = n ** 0.5
half = (n + 1) / 2 - 1
i = 0
m = 3
while m <= mroot:
if s[i]:
j = int((m * m - 3) / 2)
s[j] = 0
while j < half:
s[j - 1] = 0
j += m
i = i + 1
m = 2 * i + 3
l = [2] + [x for x in s if x]
return l
def benchmark():
start = time.time()
for _ in range(40):
count = len(primise(10000))
end = time.time()
print("benchmark duration: %r seconds" % (end - start))
print(count)
if __name__=="__main__":
benchmark()
``` |
{
"source": "JiaLei123/PythonCamp",
"score": 3
} |
#### File: PythonCamp/PythonAdvance/encode_passwd.py
```python
import sys
import uuid
from optparse import OptionParser
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
class prpcrypt():
def __init__(self):
self.key = prpcrypt.get_mac_address()
self.mode = AES.MODE_CBC
# 加密函数,如果text不是16的倍数【加密文本text必须为16的倍数!】,那就补足为16的倍数
@staticmethod
def get_mac_address():
return uuid.UUID(int=uuid.getnode()).hex[-16:].upper()
def encrypt(self, text):
cryptor = AES.new(self.key, self.mode, self.key)
# 这里密钥key 长度必须为16(AES-128)、24(AES-192)、或32(AES-256)Bytes 长度.目前AES-128足够用
length = 16
count = len(text)
if (count % length != 0):
add = length - (count % length)
else:
add = 0
text = text + ('\0' * add)
self.ciphertext = cryptor.encrypt(text)
# 因为AES加密时候得到的字符串不一定是ascii字符集的,输出到终端或者保存时候可能存在问题
# 所以这里统一把加密后的字符串转化为16进制字符串
return b2a_hex(self.ciphertext)
# 解密后,去掉补足的空格用strip() 去掉
def decrypt(self, text):
cryptor = AES.new(self.key, self.mode, self.key)
plain_text = cryptor.decrypt(a2b_hex(text))
return str(plain_text, encoding="utf-8").rstrip('\0')
usage = """
python %prog [--data] [--file] [--code]"""
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--data', '-d', dest='data', default='0123456789')
parser.add_option('--file', '-f', dest='file', default='pw')
options, args = parser.parse_args()
pc = prpcrypt()
e = pc.encrypt(options.data)
with open(options.file, 'w') as pw_file:
line = str(e, encoding="utf-8")
pw_file.write(line)
with open(options.file) as pw_file:
e = pw_file.read()
d = pc.decrypt(e)
print(e, d)
```
#### File: PythonCamp/spiders/china_regional_high_way.py
```python
from MyScraperUtil.MyScraperUtil import *
high_way_list = []
def preprocessHTML(inputHTML):
try:
inputHTML = inputHTML.decode('gb2312','ignore')
except UnicodeDecodeError:
raise BadHTMLError
else:
return inputHTML.encode('utf8')
def processPage(soup, url=None, urlPayload=None, addUrl=None, addListOfUrls=None):
if soup:
if urlPayload == 'topUrl':
province_high_way = soup.findAll("div", {"class":"roadTxt"})
if province_high_way:
high_way_links = province_high_way[1].findAll('a')
if high_way_links:
for high_way_link in high_way_links:
if high_way_link.has_key('href') and high_way_link['href'] != "#":
addUrl(url + high_way_link['href'], 'highWay')
print url + high_way_link['href']
print "parse url: " + url + " payLoad: " + urlPayload
if urlPayload == 'highWay':
high_way_div_list = soup.findAll("div", {"class": "box bd"})
if high_way_div_list:
for high_way_div in high_way_div_list:
high_way_name_header = high_way_div.findAll('h2')
if high_way_name_header:
high_way_name = high_way_name_header[0].text[3:]
if not high_way_name.endswith(u"高速"):
high_way_name = high_way_name + u"高速"
if high_way_name not in high_way_list:
high_way_list.append(high_way_name)
print high_way_name.encode('utf8', 'ignore')
if __name__ == '__main__':
url = 'http://gs.cngaosu.com/'
urlPayload = "topUrl"
#debug
url = 'http://gs.cngaosu.com/gaosuluduan/difang/gansu/'
urlPayload = "highWay"
addUrl(url, urlPayload)
run_scraper(processPage, preprocessHTML, websiteEncoding="GB2312")
```
#### File: PythonCamp/spiders/iqiyi_tv.py
```python
try:
import lmtoolspath
except ImportError:
pass
import os
import sys
from optparse import OptionParser
from lmscraperkit_v02 import *
import traceback
from lmtoolkit import Logger
import re
import requests
#########################################################################
total_list = list()
run_small = False
def processPage(soup, url, urlPayload, addUrl, addListOfUrls, printToFile):
"""
Grab the text from the page as well as links to
subsequent pages.
Keyword arguments:
soup -- BeautifulSoup parsing of webpage
url -- URL of the webpage
urlPayload -- payload to carry information across webpage scrapes
addUrl -- function that adds to the list of URLs to scrape
printToFile -- function that prints text to a file
stock
"""
try:
if urlPayload[0] == "topUrl":
total_page = 30
for p in range(1, total_page + 1):
tv_page_url = u"http://list.iqiyi.com/www/2/-------------11-" + str(p) + u"-1-iqiyi--.html"
addUrl(tv_page_url, payload=["tv_page"])
# for samll run only need get first page data
if run_small:
break
if urlPayload[0] == "tv_page":
p_page_tv_ul = soup.findAll('ul', attrs={'class': 'site-piclist site-piclist-180236 site-piclist-auto'})
p_page_tv_list = p_page_tv_ul[0].findAll('li')
for i in range(len(p_page_tv_list)):
tv_title_info = p_page_tv_list[i].find('p', attrs={'class': 'site-piclist_info_title '})
tv_url = tv_title_info.find('a')['href']
addUrl(tv_url, payload=["tv_url_page", tv_title_info.find('a').string.strip()])
if urlPayload[0] == "tv_url_page":
parse_tv_url_page(soup, urlPayload[1])
except Exception as e:
print "Error Happened: ", e
def parse_tv_url_page(soup, tv_title):
# print soup
scr = soup.findAll('script')
# print len(scr)
ids = re.findall(r"albumId:(.+?),", str(scr[4]))
id = ''
if len(ids)!=0:
id = ids[0]
url = 'http://up-video.iqiyi.com/ugc-updown/quud.do?dataid='+ id +\
'&type=1&userid='
response = requests.get(url)
scoreinfo = response.text
scores = re.findall(r"score\":(.+?),", str(scoreinfo))
score = '0'
if len(scores)!=0:
score = scores[0]
area = ''
vv = '0'
director = ''
language = ''
year = ''
artist = ''
type = ''
tv_info = soup.findAll('div', attrs={'class': 'info-intro'})
# style 1
if len(tv_info)!=0:
# tv_titles = tv_info[0].find("h1").contents[1].string.strip()
# if u'立即播放' in tv_titles:
# tv_title=tv_titles[:-4]
# else:
# tv_title = tv_titles
pvnum = soup.findAll('span', attrs={'class': 'effrct-PVNum'})
if len(pvnum) != 0:
vv = pvnum[0].string.strip()[:-2]
area_info = soup.findAll('p', attrs={'class': 'episodeIntro-area'})
if len(area_info)!=0:
area = area_info[0].find('a').string.strip()
dir_info = soup.findAll('p', attrs={'class': 'episodeIntro-director'})
if len(dir_info)!=0:
director = dir_info[0].find('a').string.strip()
type_infos = soup.findAll('p', attrs={'class': 'episodeIntro-type'})
if len(type_infos)!=0:
type_info = type_infos[0].findAll('a')
if len(type_info)!= 0:
type = ','.join([type_name.string.strip() for type_name in type_info])
lang_infos = soup.findAll('p', attrs={'class': 'episodeIntro-lang'})
if len(lang_infos) != 0:
lang_info = lang_infos[0].findAll('a')
if len(lang_info) != 0:
if len(lang_info)<2:
year = lang_info[0].string.strip()
else:
language = lang_info[0].string.strip()
year = lang_info[1].string.strip()
artist_infos = soup.findAll('ul', attrs={'class': 'headImg-7575 clearfix'})
if len(artist_infos) != 0:
artist_info = artist_infos[0].findAll('li')
if len(artist_info) != 0:
for i in range(len(artist_info)):
art = artist_info[i].findAll('p', attrs={'class': 'headImg-bottom-title'})
artist = artist + art[0].find('a').string.strip() + ','
# style 2
else:
msg_div = soup.find("div", {"class": "album-msg"})
pvnum = msg_div.find('i', attrs={'id': 'widget-playcount'})
vv = pvnum.string.strip()
mini_info = msg_div.findAll('p', attrs={'class': 'li-mini'})
if len(mini_info) != 0:
area = mini_info[0].find('a').string.strip()
language = mini_info[1].find('a').string.strip()
director = mini_info[2].find('a').string.strip()
mini_large_info = msg_div.findAll('p', attrs={'class': 'li-large'})
if len(mini_large_info) != 0:
type_infos = mini_large_info[0]
type_info = type_infos.findAll('a')
if len(type_info) != 0:
type = ','.join([type_name.string.strip() for type_name in type_info])
artist_infos = mini_large_info[1]
artist_info = artist_infos.findAll('a')
if len(artist_info) != 0:
artist = ','.join([artist_name.string.strip() for artist_name in artist_info])
each_list = []
each_list.append(tv_title)
each_list.append(artist[:-1])
if u'万' in vv:
vv = vv[:-1]
vv = vv.split(".")
if len(vv) > 1:
play_count_int = int(vv[0]) * 10000 + int(vv[1]) * 1000
else:
play_count_int = int(vv[0]) * 10000
elif u'亿' in vv:
vv = vv[:-1]
vv = vv.split(".")
if len(vv) > 1:
play_count_int = int(vv[0]) * 100000000 + int(vv[1]) * 10000000
else:
play_count_int = int(vv[0]) * 100000000
else:
play_count_int = int(vv)
each_list.append(play_count_int)
each_list.append(score)
each_list.append(area)
each_list.append(director)
each_list.append(type)
each_list.append(language)
each_list.append(year)
total_list.append(each_list)
################################################################################
usage = """
python2.6 %prog [--debug] [--dateTag] [--restart]
[--robots] [--basepath]
<<NOTE>> basepath and robots should be set for other than /lm/data2/
"""
################################################################################
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(
'--basepath',
'-b',
dest='basepath',
default='/lm/data2/')
parser.add_option(
'--restart',
'-r',
default=False,
action='store_true',
help='Restart the scraper from a previous incomplete run.'
)
parser.add_option(
'--html',
default=None,
help='HTML databall that will be used as input'
)
parser.add_option(
'--robots',
default='/lm/data2/scrapers/zho-CHN/epg/iqiyi.com.dianshiju/log.inc/'
'robots.txt.zip',
help='robots.zip file'
)
parser.add_option(
'--delay',
type='int',
dest='delay',
default=2,
help='specify delay in seconds between acessing web pages'
)
parser.add_option(
'--debug',
action='store_true',
dest='debug',
default=False,
help='print status messages to stdout'
)
parser.add_option(
'--dateTag',
'-d',
dest='dateTag',
default=None,
help='Date used for path creation; defaults to current date'
)
parser.add_option(
'--badUrlsFile',
dest='badUrlsFile',
default='/lm/data2/scrapers/zho-CHN/epg/iqiyi.com.dianshiju'
'/log.inc/iqiyi.com.dianshiju.badUrls.lst',
help='Prints unusable URLs to external file instead of halting the scraper.'
)
parser.add_option(
'--small',
action='store_true',
dest='run_small',
default=False,
help='if run spider by small data set, this is for debug.'
)
options, args = parser.parse_args()
log = Logger(options.debug)
if options.run_small:
run_small = options.run_small
if options.html:
myScraper = HTMLScraper(
scraperType=u'scrapers',
topic=u'epg',
lang=u'zho-CHN',
name=u'iqiyi.com.dianshiju',
frequency=u'versions'
)
myScraper.inputDataBall(options.html)
else:
myScraper = WebScraper(
scraperType=u'scrapers',
topic=u'epg',
lang=u'zho-CHN',
name=u'iqiyi.com.dianshiju',
frequency=u'versions'
)
if options.robots:
# set the robots.txt for the scraper
myScraper.setRobotsTxt(url='http://www.iqiyi.com/',
zip=options.robots)
# Set the base path ...
# over ride the default of /lm/data2 with the --basepath option
myScraper.setBasePath(options.basepath)
# Use the date specified at the command line if provided
if options.dateTag:
y, m, d = options.dateTag.split(u'_')
else:
# otherwise default to current date
y, m, d = yearMonthDay()
# if restarting scraper, set the rawDirectory
if options.restart:
myScraper.setRawDirectory(
myScraper.generatePath(year=y, month=m, day=d, cleanState='raw')
)
outputPath = myScraper.generatePath(year=y, month=m, day=d,
cleanState=u'records')
output_file = os.path.join(outputPath, myScraper.generateFileName(fileType='tsv'))
myScraper.addOutputFile('', output_file, noTemp=False)
# add the seed URL to the scraper
myScraper.addUrl(
u'http://list.iqiyi.com/www/2/-------------11-0-1-iqiyi--.html',
payload=['topUrl']
)
# start the scraping job
try:
log.info('Starting the scrape \n')
myScraper.run(
processPage,
restart=options.restart,
badUrlsFile=options.badUrlsFile
)
sorted_list = sorted(total_list, key=lambda x: x[2], reverse=True)
if len(sorted_list) > 1:
op = OpenFile(output_file, 'a', encoding='utf-8')
if not options.restart:
header = [u'epg_title',
u'name_actor_csv',
u'number_downloads',
u'number_rating',
u'address',
u'name_director',
u'category',
u'language',
u'date_year']
op.write(u'#scraper01 ' + u'\t'.join(header))
op.write('\n')
for i in sorted_list:
record = '\t'.join([i[0], i[1], str(i[2]), i[3], i[4], i[5], i[6], i[7], i[8]])
op.write(record)
op.write('\n')
op.close()
else:
log.warning('Less output')
log.info('Finished the scrape \n')
except StandardError, error:
traceback.print_exc()
log.error(error)
if options.debug:
raise
sys.exit(2)
``` |
{
"source": "jialeicui/Raspberry_Pi_Web_Player",
"score": 3
} |
#### File: jialeicui/Raspberry_Pi_Web_Player/song_scan.py
```python
import os
def get(base_dir, opt_dir = ''):
folders = []
songs = []
final_dir = os.path.join(base_dir, opt_dir)
for dirs in os.listdir(final_dir):
if not dirs.startswith('.'):
if os.path.isdir(os.path.join(final_dir, dirs)):
folders.append({'name':dirs, 'href':'/ls/' + os.path.join(opt_dir, dirs),'playable':False})
elif _is_audio_file(dirs):
songs.append({'name':dirs, 'file_path':os.path.join(opt_dir, dirs), 'href':'/player/play', 'addhref':'/player/add','playable':True})
folders.sort()
songs.sort()
return folders + songs
pass
def _is_audio_file(name):
valid_ext = ['mp3', 'm4a', 'ape', 'wma', 'flac']
n, ext = os.path.splitext(name)
if valid_ext.count(ext[1:].lower()) != 0:
return True
return False
pass
``` |
{
"source": "jialeli1/From-Voxel-to-Point",
"score": 2
} |
#### File: models/dense_heads/center_af_head_single.py
```python
import torch
import torch.nn as nn
from .center_af_head_template import CenterAFHeadTemplate
from .feature_adaptor.deformable_convs import FeatureAdaption as FeatureAdaptionV1
from .feature_adaptor.mdeformable_convs import FeatureAdaption as FeatureAdaptionV2
class CenterAFHeadSingle(CenterAFHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, voxel_size, grid_size, point_cloud_range,
predict_boxes_when_training=True):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, voxel_size=voxel_size, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.predict_boxes_when_training = True
self.iouscore_training_samples = self.model_cfg.NUM_IOUSCORE_TRAINING_SAMPLES
self.num_infernce_samples = self.model_cfg.NUM_INFERENCE_SAMPLES
# shared conv
pre_channel = input_channels
shared_conv_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
# hiddens
shared_conv_list.extend([
nn.Conv2d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
# dropout
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_conv_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_conv_layer = nn.Sequential(*shared_conv_list)
input_channels = pre_channel
# adaptation with deformable convs
if self.model_cfg.USE_DCN == 'DCN':
self.feature_adapt = FeatureAdaptionV1(
in_channels=input_channels,
out_channels=input_channels,
kernel_size=3,
deformable_groups=4)
elif self.model_cfg.USE_DCN == 'MDCN':
self.feature_adapt = FeatureAdaptionV2(
in_channels=input_channels,
out_channels=input_channels,
kernel_size=3,
deformable_groups=4)
self.num_spatial_features_before_head = input_channels
# heads
self.head_names = [ cfg['name'] for cfg in self.model_cfg.HEADS_CONFIG ]
for head_cfg in self.model_cfg.HEADS_CONFIG:
if head_cfg['name'] == 'hm':
head_cfg['out_channel'] = self.num_class
cur_head = self.make_fc_head(input_channels=input_channels,
head_cfg=head_cfg)
self.__setattr__(head_cfg['name'], cur_head)
def forward(self, data_dict):
"""
'points', 'frame_id', 'calib', 'gt_boxes', 'road_plane', 'use_lead_xyz',
'voxels', 'voxel_coords', 'voxel_num_points',
'image_shape', 'batch_size',
'voxel_features', 'encoded_spconv_tensor', 'encoded_spconv_tensor_stride',
'multi_scale_3d_features', 'spatial_features', 'spatial_features_stride',
'spatial_features_2d'
"""
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d = self.shared_conv_layer(spatial_features_2d)
# -------------Second Half of the ADFA Module--------------------
# The second half of the ADFA module follows DCNBEVBackbone, including the final deformable conv layer and mask-guided attention enhancement.
# For convenience, the segmentation sub-network in Figure 4 is implemented as a head parallel to the detection head, with the same operation according to the paper.
if self.model_cfg.USE_DCN in ['DCN', 'MDCN']:
spatial_features_2d = self.feature_adapt(spatial_features_2d)
if 'segm' in self.head_names:
# segm_pred
segm_pred = self.__getattr__('segm')(spatial_features_2d) # (B, C, sizey, sizex)
segm_pred_norm = torch.sigmoid(segm_pred.detach()) # (B, 1, sizey, sizex)
# res from adding
spatial_weight = segm_pred_norm.expand_as(spatial_features_2d) # (B, C, sizey, sizex)
spatial_features_2d_res = spatial_weight*spatial_features_2d
spatial_features_2d_att = spatial_features_2d + spatial_features_2d_res
segm_preds_name = 'segm' + '_pred'
self.forward_ret_dict.update({segm_preds_name: segm_pred})
data_dict.update({'spatial_features_before_head': spatial_features_2d_att})
# -------------Anchor-free Detection Head--------------------
for head_name in self.head_names:
if head_name != 'segm':
cur_preds_name = head_name + '_pred'
cur_preds = self.__getattr__(head_name)(spatial_features_2d_att) # (B, C, sizey, sizex)
self.forward_ret_dict.update({cur_preds_name: cur_preds})
# -------------Target Assignment--------------------
if self.training:
'''
target_dict includs:
heatmaps
heightmaps
'''
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
# -------------Decode predicted boxes for loss computation--------------------
if self.training and self.predict_boxes_when_training:
# for iouscore loss computation
# decode predicted boxes in an inference manner
self.forward_ret_dict.update( self.predhm_based_predicted_boxes_generation_ssd(K=self.iouscore_training_samples) )
# for corner loss computation
# decode predicted boxes in a training manner
self.forward_ret_dict.update( self.gthm_based_predicted_boxes_generation() )
# -------------Decode detections for inference--------------------
if not self.training:
# You can compare a variety of NMS by setting different flags, which have slight differences.
# NMS-1: max-pooling based nms (default)
normal_infer_flag = True
if normal_infer_flag:
center3d_pred_dict = self.predhm_based_predicted_boxes_generation_ssd(K=self.num_infernce_samples)
data_dict.update(center3d_pred_dict)
data_dict['cls_preds_normalized'] = False
# NMS-2: normal NMS.
normal_nms_flag = False
# normal_nms_flag = True
if normal_nms_flag:
center3d_pred_dict = self.predhm_based_predicted_boxes_generation_nomaxpooling(K=100)
data_dict.update(center3d_pred_dict)
data_dict['cls_preds_normalized'] = False
return data_dict
```
#### File: dense_heads/feature_adaptor/deformable_convs.py
```python
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.batchnorm import _BatchNorm
try:
from pcdet.ops.DeformableConvolutionV2PyTorch.modules.deform_conv import DeformConv
# from pcdet.ops.DeformableConvolutionV2PyTorch.modules.modulated_deform_conv import ModulatedDeformConv
except:
print("Deformable Convolution not built!")
class FeatureAdaption(nn.Module):
"""Feature Adaption Module.
Feature Adaption Module is implemented based on DCN v1.
It uses anchor shape prediction rather than feature map to
predict offsets of deformable conv layer.
Args:
in_channels (int): Number of channels in the input feature map.
out_channels (int): Number of channels in the output feature map.
kernel_size (int): Deformable conv kernel size.
deformable_groups (int): Deformable conv group size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4):
super(FeatureAdaption, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(
in_channels, deformable_groups * offset_channels, 1, bias=True)
self.conv_adaption = DeformConv(
in_channels,
out_channels,
stride=1,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups,
bias=False)
self.relu = nn.ReLU(inplace=True)
self.init_offset()
def init_offset(self):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def init_weights(self):
pass
"""normal_init(self.conv_offset, std=0.1)
normal_init(self.conv_adaption, std=0.01)
"""
def forward(self, x,):
offset = self.conv_offset(x)
x = self.relu(self.conv_adaption(x, offset))
return x
```
#### File: models/roi_heads/pointrcnniou_head.py
```python
import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.roipoint_pool3d import roipoint_pool3d_utils
from ...utils import common_utils
from ...ops.iou3d_nms import iou3d_nms_utils
from .roi_head_template import RoIHeadTemplate
class PointRCNNIoUHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
use_bn = self.model_cfg.USE_BN
self.SA_modules = nn.ModuleList()
channel_in = input_channels
self.num_prefix_channels = 3 + 2 # xyz + point_scores + point_depth
xyz_mlps = [self.num_prefix_channels] + self.model_cfg.XYZ_UP_LAYER
shared_mlps = []
for k in range(len(xyz_mlps) - 1):
shared_mlps.append(nn.Conv2d(xyz_mlps[k], xyz_mlps[k + 1], kernel_size=1, bias=not use_bn))
if use_bn:
shared_mlps.append(nn.BatchNorm2d(xyz_mlps[k + 1]))
shared_mlps.append(nn.ReLU())
self.xyz_up_layer = nn.Sequential(*shared_mlps)
c_out = self.model_cfg.XYZ_UP_LAYER[-1]
self.merge_down_layer = nn.Sequential(
nn.Conv2d(c_out * 2, c_out, kernel_size=1, bias=not use_bn),
*[nn.BatchNorm2d(c_out), nn.ReLU()] if use_bn else [nn.ReLU()]
)
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = [channel_in] + self.model_cfg.SA_CONFIG.MLPS[k]
npoint = self.model_cfg.SA_CONFIG.NPOINTS[k] if self.model_cfg.SA_CONFIG.NPOINTS[k] != -1 else None
self.SA_modules.append(
pointnet2_modules.PointnetSAModule(
npoint=npoint,
radius=self.model_cfg.SA_CONFIG.RADIUS[k],
nsample=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlp=mlps,
use_xyz=True,
bn=use_bn
)
)
channel_in = mlps[-1]
# 这个cls_layer是否还需要分类别呢?
# CLASS_AGNOSTIC = True,所以self.num_class应该一直是1的
self.cls_layers = self.make_fc_layers(
input_channels=channel_in, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
# 这个reg_layer好像是分类别进行的?
# CLASS_AGNOSTIC = True, 所以还是不分类别的
self.reg_layers = self.make_fc_layers(
input_channels=channel_in,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.roipoint_pool3d_layer = roipoint_pool3d_utils.RoIPointPool3d(
num_sampled_points=self.model_cfg.ROI_POINT_POOL.NUM_SAMPLED_POINTS,
pool_extra_width=self.model_cfg.ROI_POINT_POOL.POOL_EXTRA_WIDTH
)
self.init_weights(weight_init='xavier')
self.predict_boxes_when_training = self.model_cfg.TARGET_CONFIG.CLS_SCORE_TYPE == 'rcnn_iou'
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roipool3d_gpu(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
batch_idx = batch_dict['point_coords'][:, 0]
point_coords = batch_dict['point_coords'][:, 1:4]
point_features = batch_dict['point_features']
# print('==> 1. point_features.shape: ', point_features.shape) # (32768, 128), bs=2
rois = batch_dict['rois'] # (B, num_rois, 7 + C)
batch_cnt = point_coords.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert batch_cnt.min() == batch_cnt.max()
point_scores = batch_dict['point_cls_scores'].detach()
point_depths = point_coords.norm(dim=1) / self.model_cfg.ROI_POINT_POOL.DEPTH_NORMALIZER - 0.5
point_features_list = [point_scores[:, None], point_depths[:, None], point_features]
point_features_all = torch.cat(point_features_list, dim=1)
batch_points = point_coords.view(batch_size, -1, 3)
batch_point_features = point_features_all.view(batch_size, -1, point_features_all.shape[-1])
# print('==> batch_point_features.shape: ', batch_point_features.shape) # (2, 16384, 130), 因为增加了scores和depth
with torch.no_grad():
pooled_features, pooled_empty_flag = self.roipoint_pool3d_layer(
batch_points, batch_point_features, rois
) # pooled_features: (B, num_rois, num_sampled_points, 3 + C), pooled_empty_flag: (B, num_rois)
# canonical transformation
roi_center = rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
pooled_features = pooled_features.view(-1, pooled_features.shape[-2], pooled_features.shape[-1])
pooled_features[:, :, 0:3] = common_utils.rotate_points_along_z(
pooled_features[:, :, 0:3], -rois.view(-1, rois.shape[-1])[:, 6]
)
pooled_features[pooled_empty_flag.view(-1) > 0] = 0
# print('==> pooled_features.shape: ', pooled_features.shape) # (256, 512, 133), 因为增加了xyz到前面吗
return pooled_features
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt) # (M, N)
cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return max_overlaps, gt_assignment
@torch.no_grad()
def generate_rcnn_iouscore_label(self, rcnn_cls, rcnn_reg, batch_dict):
"""
Args:
rcnn_cls: (BN, num_class)
rcnn_reg: (BN, code_size)
batch_dict:
roi_labels: (B, N), 这个一定要用更新后的
return:
rcnn_cls_labels: (B, N)
"""
batch_size = batch_dict['batch_size']
# 1. 先解出预测得rcnn box, 一定要先clone().detach()
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_size, rois=batch_dict['rois'],
cls_preds=rcnn_cls.clone().detach(), box_preds=rcnn_reg.clone().detach()
)
# print('==> 0.batch_box_preds.shape: ', batch_box_preds.shape) #(B, N, 7)
# 3. 分batch,分类别的计算3D iou
batch_gt_boxes = batch_dict['gt_boxes'] # (B, N, c)
batch_roi_labels = batch_dict['roi_labels'] # (B, N) # 这个一定要用更新后的
# print('==> 1.batch_gt_boxes.shape: ', batch_gt_boxes.shape)
# print('==> 2.batch_roi_labels.shape: ', batch_roi_labels.shape)
rcnn_iou3d_list = []
for bs_idx in range(batch_size):
cur_box_preds = batch_box_preds[bs_idx]
cur_gt_boxes = batch_gt_boxes[bs_idx]
cur_roi_labels = batch_roi_labels[bs_idx]
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_box_preds, roi_labels=cur_roi_labels,
gt_boxes=cur_gt_boxes[:, 0:7], gt_labels=cur_gt_boxes[:, -1].long()
)
rcnn_iou3d_list.append(max_overlaps)
# print('==> max_overlaps.shape: ', max_overlaps.shape) #(N, )
batch_rcnn_ious = torch.stack(rcnn_iou3d_list, dim=0) #(B, N)
# 4. 然后需要直接在这对iou划分,制作cls_label
iou_bg_thresh = self.model_cfg.TARGET_CONFIG.CLS_BG_THRESH
iou_fg_thresh = self.model_cfg.TARGET_CONFIG.CLS_FG_THRESH
fg_mask = batch_rcnn_ious > iou_fg_thresh
bg_mask = batch_rcnn_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
batch_cls_labels[interval_mask] = \
(batch_rcnn_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
# 5. 再记录一下样本,观察训练过程
distribution_dict = {}
num_sample_fg = fg_mask.float().sum()
num_sample_bg = bg_mask.float().sum()
num_sample_inter = interval_mask.float().sum()
distribution_dict['num_sample_fg'] = num_sample_fg / batch_size
distribution_dict['num_sample_bg'] = num_sample_bg / batch_size
distribution_dict['num_sample_inter'] = num_sample_inter / batch_size
# 输出
rcnn_cls_labels = batch_cls_labels
return rcnn_cls_labels, distribution_dict
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
# 这里重新赋值是因为assign_targets里面会对 rois 进行重新采样,此时rois已经发生改变, 故更新之.
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
pooled_features = self.roipool3d_gpu(batch_dict) # (total_rois, num_sampled_points, 3 + C)
# 难道这里的 point_features 是包含了xyz的吗?
xyz_input = pooled_features[..., 0:self.num_prefix_channels].transpose(1, 2).unsqueeze(dim=3).contiguous()
xyz_features = self.xyz_up_layer(xyz_input)
point_features = pooled_features[..., self.num_prefix_channels:].transpose(1, 2).unsqueeze(dim=3).contiguous()
merged_features = torch.cat((xyz_features, point_features), dim=1)
merged_features = self.merge_down_layer(merged_features)
l_xyz, l_features = [pooled_features[..., 0:3].contiguous()], [merged_features.squeeze(dim=3).contiguous()]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
shared_features = l_features[-1] # (total_rois, num_features, 1)
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if self.training and self.predict_boxes_when_training:
new_rcnn_cls_labels, new_distribution_dict = self.generate_rcnn_iouscore_label(
rcnn_cls=rcnn_cls, rcnn_reg=rcnn_reg, batch_dict=batch_dict
)
# 先检查一下之前的target分配,再更新为rcnn_iouscore
assert targets_dict['rcnn_cls_labels'] == None
targets_dict['rcnn_cls_labels'] = new_rcnn_cls_labels
targets_dict['distribution_dict'].update(new_distribution_dict)
# 这里始终希望使用roi_labels作为最终的类别标签,
# 使用rcnn_cls_labels作为检测置信度,以及negative result 过滤
# 强置设置为True
batch_dict['has_class_labels'] = True
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
```
#### File: DeformableConvolutionV2PyTorch/modules/mdeformable_conv_block.py
```python
import numpy as np
import torch
# from det3d.core import box_torch_ops
# from det3d.models.builder import build_loss
# from det3d.models.losses import metrics
# from det3d.torchie.cnn import constant_init, kaiming_init
# from det3d.torchie.trainer import load_checkpoint
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.batchnorm import _BatchNorm
# from det3d.models.losses.centernet_loss import FocalLoss, SmoothRegLoss, RegLoss, RegClsLoss, FastFocalLoss
# from det3d.core.utils.center_utils import ddd_decode
# from det3d.models.utils import Sequential
# from .. import builder
# from ..losses import accuracy
# from ..registry import HEADS
# import copy
try:
# from pcdet.ops.dcn.deform_conv import DeformConv, ModulatedDeformConvPack
# from pcdet.ops.DeformableConvolutionV2PyTorch.modules.deform_conv import DeformConv
from pcdet.ops.DeformableConvolutionV2PyTorch.modules.modulated_deform_conv import ModulatedDeformConv
except:
print("Deformable Convolution not built!")
class MdeformConvBlock(nn.Module):
"""Feature Adaption Module.
Feature Adaption Module is implemented based on DCN v1.
It uses anchor shape prediction rather than feature map to
predict offsets of deformable conv layer.
Args:
in_channels (int): Number of channels in the input feature map.
out_channels (int): Number of channels in the output feature map.
kernel_size (int): Deformable conv kernel size.
deformable_groups (int): Deformable conv group size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4,
):
super(MdeformConvBlock, self).__init__()
offset_mask_channels = kernel_size * kernel_size * (2+1)
self.conv_offset_mask = nn.Conv2d(
in_channels,
deformable_groups * offset_mask_channels,
kernel_size=kernel_size,
stride=1,
padding=(kernel_size-1) // 2,
bias=True)
self.conv_adaption = ModulatedDeformConv(
in_channels,
out_channels,
stride=1,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups,
bias=False)
# self.relu = nn.ReLU(inplace=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def init_weights(self):
pass
"""normal_init(self.conv_offset, std=0.1)
normal_init(self.conv_adaption, std=0.01)
"""
def forward(self, x):
offset_mask = self.conv_offset_mask(x)
o1, o2, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
# offset = torch.cat((o1, o2), dim=1).detach()
mask = torch.sigmoid(mask)
# print('==> offset.size(): ', offset.size()) #torch.Size([1, 72, 200, 176])
# print(offset[0, 0:18, :, :])
# just dcn without actfunc
x = self.conv_adaption(x, offset, mask)
return x
```
#### File: pointnet2/pointnet2_stack/voxel_pool_trans_modules.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import voxel_query_utils
from typing import List
class NeighborVoxelSAModuleMSG(nn.Module):
def __init__(self, *, query_ranges: List[List[int]], radii: List[float],
nsamples: List[int], mlps: List[List[int]], use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
query_ranges: list of int, list of neighbor ranges to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(query_ranges) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps_in = nn.ModuleList()
self.mlps_pos = nn.ModuleList()
self.mlps_out = nn.ModuleList()
for i in range(len(query_ranges)):
max_range = query_ranges[i]
nsample = nsamples[i]
radius = radii[i]
self.groupers.append(voxel_query_utils.VoxelQueryAndGrouping(max_range, radius, nsample))
mlp_spec = mlps[i]
cur_mlp_in = nn.Sequential(
nn.Conv1d(mlp_spec[0], mlp_spec[1], kernel_size=1, bias=False),
nn.BatchNorm1d(mlp_spec[1])
)
cur_mlp_pos = nn.Sequential(
nn.Conv2d(3, mlp_spec[1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[1])
)
cur_mlp_out = nn.Sequential(
nn.Conv1d(mlp_spec[1], mlp_spec[2], kernel_size=1, bias=False),
nn.BatchNorm1d(mlp_spec[2]),
nn.ReLU()
)
self.mlps_in.append(cur_mlp_in)
self.mlps_pos.append(cur_mlp_pos)
self.mlps_out.append(cur_mlp_out)
self.relu = nn.ReLU()
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, \
new_coords, features, voxel2point_indices):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:param point_indices: (B, Z, Y, X) tensor of point indices
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
# change the order to [batch_idx, z, y, x]
new_coords = new_coords[:, [0, 3, 2, 1]].contiguous()
new_features_list = []
neighbor_feature_list = [] # for msg, but maybe only ssg used.
neighbor_xyz_list = [] # for msg, but maybe only ssg used.
for k in range(len(self.groupers)):
# features_in: (1, C, M1+M2)
features_in = features.permute(1, 0).unsqueeze(0)
features_in = self.mlps_in[k](features_in)
# features_in: (1, M1+M2, C)
features_in = features_in.permute(0, 2, 1).contiguous()
# features_in: (M1+M2, C)
features_in = features_in.view(-1, features_in.shape[-1])
# grouped_features: (M1+M2, C, nsample)
# grouped_xyz: (M1+M2, 3, nsample)
grouped_features, grouped_xyz, empty_ball_mask = self.groupers[k](
new_coords, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features_in, voxel2point_indices
)
grouped_features[empty_ball_mask] = 0
# grouped_features: (1, C, M1+M2, nsample)
grouped_features = grouped_features.permute(1, 0, 2).unsqueeze(dim=0)
# print('==> grouped_features.shape', grouped_features.shape)
# print('==> grouped_features[0, :5, 100, :]', grouped_features[0, :5, 100, :])
# print('==> grouped_features[0, :5, 400, :]', grouped_features[0, :5, 400, :])
# print('==> grouped_features[0, :5, 800, :]', grouped_features[0, :5, 800, :])
# torch.cuda.synchronize()
# print('==> grouped_xyz.shape', grouped_xyz.shape)
# print('==> new_xyz[100, ...]: ', new_xyz[100, ...])
# print('==> grouped_xyz[100, ...]', grouped_xyz[100, ...])
# print('==> new_xyz[10400, ...]: ', new_xyz[10400, ...])
# print('==> grouped_xyz[10400, ...]', grouped_xyz[10400, ...])
# print('==> new_xyz[10800, ...]: ', new_xyz[10800, ...])
# print('==> grouped_xyz[10800, ...]', grouped_xyz[10800, ...])
# grouped_xyz: (M1+M2, 3, nsample)
grouped_xyz = grouped_xyz - new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
# grouped_xyz: (1, 3, M1+M2, nsample)
grouped_xyz = grouped_xyz.permute(1, 0, 2).unsqueeze(0)
# grouped_xyz: (1, C, M1+M2, nsample)
position_features = self.mlps_pos[k](grouped_xyz)
grouped_new_features = grouped_features + position_features
grouped_new_features = self.relu(grouped_new_features)
# 把这个增加了pos的特征保存出来,作为邻域特征,不过是否需要对齐呢,去外面对齐?
# 这里或许已经是对齐了的,通过合理配置mlp
# (1, C, M1+M2, nsample)
neighbor_feature_list.append(grouped_new_features)
# (1, 3, M1+M2, nsample)
neighbor_xyz_list.append(grouped_xyz)
# grouped_new_features -> new_features,
# 这里是先pooling了再变换维度,节省一点内存的。
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
grouped_new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
grouped_new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = self.mlps_out[k](new_features)
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
# (M1 + M2 ..., C)
new_features = torch.cat(new_features_list, dim=1)
# (1, C, M1+M2, nsample_s1/nsample_s2...) -> (1, C, M1+M2, nsample_s1 + nsample_s2)
neighbor_features = torch.cat(neighbor_feature_list, dim=-1)
# (1, C, M1+M2, nsample_s1 + nsample_s2) -> (M1+M2, nsample_s1 + nsample_s2, C)
neighbor_features = neighbor_features.squeeze(dim=0).permute(1,2,0).contiguous()
neighbor_xyz = torch.cat(neighbor_xyz_list, dim=-1)
neighbor_xyz = neighbor_xyz.squeeze(dim=0).permute(1,2,0).contiguous()
return new_features, neighbor_features, neighbor_xyz
class TransformerDecoderLayerPreNorm(nn.Module):
def __init__(self, d_model, nc_mem, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super().__init__()
# donnt do self_attn
# self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, kdim=nc_mem, vdim=nc_mem)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout, inplace=True)
self.linear2 = nn.Linear(dim_feedforward, d_model)
# self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.norm_mem = nn.LayerNorm(nc_mem)
self.dropout1 = nn.Dropout(dropout, inplace=True)
self.dropout2 = nn.Dropout(dropout, inplace=True)
self.dropout3 = nn.Dropout(dropout, inplace=True)
self.activation = nn.ReLU(inplace=True)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super().__setstate__(state)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
"""
tgt: (B, L1, E1)
mem: (B, L2, E2)
"""
# mem是可以self-atten一下的
# 但是通道太少了, 先不做
"""
memory = self.norm1(memory)
memory2 = self.self_attn(memory, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
memory = memory + self.dropout1(memory2)
"""
# tgt attend to mem.
tgt = self.norm2(tgt)
memory = self.norm_mem(memory)
# 在multihead_attn里面会做qkv_proj, 将qkv都投影到d_model上
tgt2, mask = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
return tgt
class PointNeighborTransformer(nn.Module):
def __init__(self, dim_in, dim_out, nhead=4, num_layers=1, drop=0.0, dim_feature=32, prenorm=True):
super().__init__()
self.nc_in = dim_in
self.nc_out = dim_out
self.nhead = nhead
self.pe = nn.Sequential(
# conv-bn-relu-conv, no/bias when without/with norm
nn.Conv2d(3, self.nc_in // 2, 1, bias=False),
nn.BatchNorm2d(self.nc_in // 2),
nn.ReLU(),
nn.Conv2d(self.nc_in // 2, self.nc_in, 1, bias=True)
)
self.chunk = nn.TransformerDecoder(
TransformerDecoderLayerPreNorm(d_model=self.nc_in, dim_feedforward=2 * self.nc_in, dropout=drop, nhead=nhead, nc_mem=dim_feature),
num_layers=num_layers,
)
self.fc = nn.Linear(self.nc_in, self.nc_out, bias=True)
def forward(self, xyz_tgt, xyz_mem, features_tgt, features_mem):
"""
xyz_tgt: (M1+M2+..., 3)
xyz_mem: (M1+M2+..., N_mem, 3)
features_tgt: (M1+M2+..., C_tgt(=d_model)), 这个应该是已经对齐到d_model上了的特征.
features_mem: (M1+M2+..., N_mem, C_mem)
"""
# (M1+M2+..., 3) -> (M1+M2+..., N_tgt=1, 3) -> (M1+M2+..., 3, N_tgt, 1)
xyz_tgt_flipped = xyz_tgt.unsqueeze(1).transpose(1,2).unsqueeze(-1)
xyz_mem_flipped = xyz_mem.transpose(1,2).unsqueeze(-1)
# (M1+M2+..., C_tgt, N_tgt, 1)
tgt = features_tgt.unsqueeze(1).transpose(1,2).unsqueeze(-1) + self.pe(xyz_tgt_flipped)
mem = features_mem.transpose(1,2).unsqueeze(-1) + self.pe(xyz_mem_flipped)
mem_mask = None
# torch.nn.MultiheadAttention requires the (L, B, E) shape.
# (N_tgt, M1+M2+..., C_tgt)
tgt = tgt.squeeze(-1).permute(2, 0, 1)
mem = mem.squeeze(-1).permute(2, 0, 1)
# back to (M1+M2+..., N_tgt, C_tgt)
transformed_feats = self.chunk(tgt, mem, memory_mask=mem_mask).permute(1,0,2)
# one more fc or not?
# (M1+M2+..., N_tgt, cout)
output = self.fc(transformed_feats)
return output
``` |
{
"source": "jialeyu/somethingw",
"score": 3
} |
#### File: jialeyu/somethingw/hi.py
```python
import time
itemf=True
items=[]
inventory=[]
users=[]
badges=0
money=500
mode = "unknown"
import random
print ("Something World v.0.21 by <NAME>")
print ("If you wanna see the C++ version, which is a lot faster, you can contact Gabriel, or if you want the Javascript version, contact <NAME>.")
print ("")
print ("_\~ () |\/| [- ~|~ |-| | |\| (_, \/\/ () /? |_ |) ")
print ("")
print ("")
print ("Welcome! What's your name?")
name = input(">")
#not used yet
if name == "guest session":
mode = "guest"
elif name not in users:
users.append(name)
mode = "newuser"
else:
mode = "ruser"
begin = "no"
noc=0
commanding = False
while begin == "no":
print ("Hi " + name + "! Are you ready to begin? Type yes or no, or type credits to type the credits.")
begin = input(">")
if begin == "command":
commanding == True
break
if begin == "exit":
print ("Game aborted. You may now close the window. To play again, please refresh before rerunning.")
quit()
if begin == "yes":
break
if noc >= 10:
print ("STOP PRESSING THE BUTTON OK?!?!?!?!?")
badges=badges+1
if begin == "no":
print ("OK, I'll wait!")
noc=noc+1
elif begin == "cheat":
print ("Congratulations, " + name + "! You have completed BDN World 2D! You have earned the DUMB CHEATER badge! (WINNING THE ACTUAL GAME DOES SOMETHING ELSE)")
badges=badges+1
mode = "cheater"
print ("Ka-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
quit()
if begin == "credits":
print ("""
Something World, a Jiale.co side project. Work began Feb something 2016. Work is not copyrighted other than the parts from other things which originally were copyrighted.
IDE(s) used, in order of most used: Repl.It, Pythonanywhere, Ideone.
I owe a great thanks to Simon, who gave me the original idea and a bunch of ascii art. Also thanks for the error message idea!
Copyright for Simon contributed areas:
All credits for BDN ascii art goes to me!
""")
if begin != "yes" and begin != "no" and begin != "cheat" and begin != "command" and begin != "credits":
print ("""
File "<JCODE>", input, in entering
SelectionError: Invalid Command""")
print ("")
begin = "no"
badges=badges+1
continue
if commanding == True:
while True:
command = input(">")
if command == "/level":
ncommand = input("/level>")
print ("Sorry, /level based ncommand is not yet avalible. To test the bug-laden beta, please email me at <EMAIL>")
elif command == "/edit":
ncommand = input("/edit")
if ncommand == "/intf":
necommand = input("/edit>/intf>")
if necommand == "/badge":
print ("Editing badge count. Using this feature may cause the game to break, so use with caution.")
yesr=input("Type yes to confirm you want to edit badge count:")
if yesr == "yes":
badge = input("New badge count:")
else:
break
elif ncommand == "/mode":
necommand == input("/edit>/mode>")
mode = input("Type new mode:")
print ("Are you using c9 or another ide that doesn't support end="" sleeping?")
while True:
ide=input("[y/n]:")
if ide == 'y':
break
else:
A = """Welcome to Something World.
In battle, you can use the commands 'attack', 'defend', 'run', 'skip', and 'special'. Here's an example:
An a appeared:
a
attack=0
hp=5
defence=0
special=None
Your stats:
attack=5
defence=5
hp=15
special=None
What do you do?
>attack
You attacked. a has 0 hp.
You defeated a! You earned $0."""
for x in A:
time.sleep(0.1)
attack=5
origattack=5
defence=5
origdefence=5
hp=15
orighp=15
special="None yet."
level=1
def match(money,gainmoney,attackern,ehp,eattack,edefence,especial,hp,attack,defence,special):
eallow = True
time.sleep(0.5)
print ("Stats:")
time.sleep(0.5)
print ("HP: " + str(ehp))
time.sleep(0.5)
print ("Attack: " + str(eattack))
time.sleep(0.5)
print ("Defence: " + str(edefence))
time.sleep(0.5)
print ("Special: " + especial)
time.sleep(0.5)
print ("")
time.sleep(0.5)
print ("Your stats:")
time.sleep(0.5)
print ("HP: " + str(hp))
time.sleep(0.5)
print ("Attack: " + str(attack))
time.sleep(0.5)
print ("Defence: " + str(defence))
time.sleep(0.5)
print ("Special: " + str(special))
time.sleep(0.5)
print ("")
print ("")
while hp > 0 or ehp > 0:
Turn = True
edefend = False
if Turn == True:
if hp <= 0:
time.sleep(1)
print ("Oh nose! You died. Care to try again?")
print ("""
__ __ _ _ _
\ \ / / | | (_) | |
\ \_/ / ___ _ _ __| | _ ___ __| |
\ / / _ \ | | | | / _` | | | / _ \ / _` |
| | | (_) | | |_| | | (_| | | | | __/ | (_| | _
|_| \___/ \__,_| \__,_| |_| \___| \__,_| (_)
""")
quit()
else:
print ("What do you do?")
while True:
action = input(">")
if action == "attack":
if edefend == False:
ehp = ehp-attack
time.sleep(0.5)
print ("You attacked. " + attackern + " now has " + str(ehp) + "hp.")
turn = False
break
else:
print ("Hey! You can't do that. The enemy is defended!")
continue
elif action == "defence" or action == "defend" or action == "defense":
if eattack > defence:
time.sleep(0.5)
print ("Defence failed.")
turn = False
break
else:
time.sleep(0.5)
print ("Defended!")
eallow = False
turn = False
break
elif action == "special":
print ("Under Construction! Please choose something else for now.")
continue
elif action == "run":
print ("Under Construction! I'm so sorry for this not being usable. But try your best and you will (hopefully) win!")
turn = False
break
elif action == "skip":
time.sleep(0.25)
print ("You skipped your turn.")
turn = False
break
elif action == "":
print ("""
File "<JCODE>", input, in attacking
HEY!Error: THATZ A BLANK LINE! THOZEZ R USELESSZ!""")
else:
print ("""
File "<JCODE>", input, in attacking
SelectionError: Invalid Command""")
#Opponent's Turn
number=random.randint(1,2)
if ehp <= 0:
money = money + gainmoney
print (attackern + " was defeated! You won and gained $" + str(gainmoney) + "!")
print ("You now have $" + str(money))
break
if number == 1:
hp = hp - eattack
print ("Enemy is thinking...")
time.sleep(2)
print ("Enemy attacked! Attack is " + str(eattack) +". You have " + str(hp) + " left!")
elif number == 2:
if edefence >= attack:
edefend = True
print ("Enemy is thinking...")
time.sleep(2)
print ("Enemy defended themselves! You cannot attack in your next turn!")
elif edefence < attack:
print ("Enemy is thinking...")
time.sleep(2)
print ("Enemy attempted defending and failed! Enemy did nothing.")
Turn = True
time.sleep(2.5)
print ("Game started.")
time.sleep(1)
print ("Player is " + name)
print ("if you would like a custom nickname, type y. If not, type n.")
loop = True
while loop == True:
yesorno=input(">")
if yesorno == "y":
print ("OK, what would you like me to call you?")
name = input(">")
print ("OK then! " + name + " it is!")
time.sleep(1)
loop = False
elif yesorno == "n":
print ("Sure! Moving on...")
loop = False
else:
print ("")
time.sleep(1.5)
print ("Battle scene controls: attack, defence, special, run, skip")
time.sleep(1)
time.sleep(1)
print ("")
print ("")
print ("")
time.sleep(2)
print ("----------------------------------------------------------")
time.sleep(1)
print ("You are a kid named " + name + ". One day you decided to walk around in the city.")
time.sleep(1)
print ("CITY: LEVEL 1, EASY")
time.sleep(1)
print ("You meet Mario!")
print ("""
______██████████████ "MY GAME IS TOO BORING! TIME TO RUIN SOMEONE ELSE'S GAME!"
-____██▓▓▓▓▓▓▓▓▓ M ▓████ "Actually, you are making the game more INTRESTING."
-__██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓██ "OH. WATEVA"
-__██████░░░░██░░██████
██░░░░████░░██░░░░░░░░██
██░░░░████░░░░██░░░░░░██
-__████░░░░░░██████████
-__██░░░░░░░░░░░░░██
_____██░░░░░░░░░██
-______██░░░░░░██
-____██▓▓████▓▓▓█
-_██▓▓▓▓▓▓████▓▓█
██▓▓▓▓▓▓███░░███░
-__██░░░░░░███████
-____██░░░░███████
-______██████████
-_____██▓▓▓▓▓▓▓▓▓██
-_____█████████████""")
attackern="Mario"
eattack=5
edefence=0
ehp=10
especial="None yet."
gainmoney=100
match(money,gainmoney,attackern,ehp,eattack,edefence,especial,hp,attack,defence,special)
print ("")
print ("")
print ("")
print ("")
print ("")
time.sleep(1.5)
print ("Hi! My name is GBot! You make know me from Simon's drawings. If you know me and my slightly unhelpful personality, don't worry, because my creator gave me some software improvements, and now I am as helpful as a thing programmed by a 11-year old can be! *I wonder if that sounds helpful...")
time.sleep(1)
print ("Anyway, I will tell you about stuff when I feel you need help. Or, you can talk to me by typing GBot in the NexT MenU.")
print ("For GBot to work, it must be installed on your system. Would you like to install now?")
while True:
ha = input("</G>")
if ha == "yep" or ha == "yes":
print ("Nice! Wait a few seconds...")
time.sleep(5)
print ("GBot has been installed!")
break
else:
print ("""
File "<JCODE>", input, in attacking
GBotIsAnnoyingError: Sorry, I don't seem to understand. Please type either yes to install or yep to install.""")
print ("")
time.sleep(0.5)
print ("Thanks for installing me! I hope we get along well. You can request a feature or a bugfix at my creator's email: <EMAIL>. Enough, lets move on.")
time.sleep(2)
def nextmenu(items,itemf):
while True:
print ("""
""")
print ("NexT MenU: Please choose a choice:")
time.sleep(0.5)
print ("*continue*")
print ("*Battleground* Coming Soon")
print ("*Go Home* Unlocks at Level 5")
print ("*Items*")
print ("*Collection*")
print ("*Stats*")
print ("*GBot*")
option = input(">>")
if option == "Continue Trip" or option == "continue" or option == "continue trip":
print ("""
""")
break
elif option == "items" or option == "Items":
print ("""
""")
print ("ITEMS:")
print ("""
""")
RefillHealth="HealthMax: refills your health to max"
AttackDouble="SupaAttack: doubles your attack"
DefenceDouble="SupaDefender: doubles your defence"
if itemf == True:
print ("GBot: Welcome to the items menu! Here you can see your items and use them!")
print ("GBot: I've added some starter items for you.")
print ("""
""")
items.append(RefillHealth)
items.append(AttackDouble)
items.append(DefenceDouble)
lengthi=len(items)
lindex=0
for i in range (lengthi):
time.sleep(0.5)
print (items[lindex])
lindex=lindex+1
itemf=False
choicei=input("Items>")
if choicei == "HealthMax":
hp = orighp
print ("Your health is now up to max:" + str(hp))
time.sleep(1)
elif option == "stats" or option == "Stats":
print ("""
""")
else:
print ("""
File "<JCODE>", input, in attacking
MenuError: Menu could not read your command.""")
nextmenu(items,itemf)
print ("You come to a fork in the road.")
time.sleep(0.5)
time.sleep(0.5)
print ("What do you do?")
wd=input(">")
if wd == "l" or wd == "r" or wd == "left" or wd == "right":
print ("Oops there is only one path")
elif wd == "forward" or wd == "f":
print ("There's a fork in the road! Learn to read!")
elif wd == "get fork":
print ("You removed the fork and put it into your inventory.")
time.sleep(1)
inventory.append("Fork: seems to do nothing. Maybe good for stabbing people? NoT that I say that's a good idea...")
print ("Having taken care of the fork in the road, you decide to be bored. You soon realize being bored is very boring. Oh and also, you realise that it is getting dark, and that it might be a good idea to find a place to sleep.")
time.sleep(1)
print ("Whenever you are told that it might be a good idea to sleep, you can type sleep now, and your person will find a place to sleep. Unfortunately your person has bad judgement, so if a bed of nails is nearer than a soft, nice bed, he/she will choose the bed of nails.")
time.sleep(2.5)
print ("You are at a fork in the road again. This time though, it's the kind of fork that looks like this:")
time.sleep(1)
print ("""
\ \ | | / /
\ \| |/ /
\ /
| | |
|_|_| """)
print ("Type anything to continue")
read=input(">")
time.sleep(1)
print ("There are three paths: ONE------------------------------------------------------------")
time.sleep(1)
print ("You hear in the distance: THAT's topytighted you ididots you freeziong idiozt garbageonthestreet!d;l")
time.sleep(1.5)
print ("Anytay,doingpoiseaoshso9s8nfjdopooooooooooooooooooingspriopojgjdsgBDNBDNBDNBDNBDNDBNletzfightdabdn ow that hurt NOTGHT")
print ("Type anything to continue")
stuf=input(">")
time.sleep(1.5)
print ("")
print ("Err...")
time.sleep(1)
print ("Well...")
time.sleep(1)
print ("Um...")
time.sleep(1)
print ("")
time.sleep(1)
print ("""
File "<Sanity>", storyboard, in fork2, ?
RealisticError: Something happened, and we couldn't make sure the game is working properly. You can type exit to continue and disable the workingCheCK.""")
print ("01000001 01101110 00100000 01110101 01101110 01101011 01101110 01101111 01110111 01101110 00100000 01100101 01110010 01110010 01101111 01110010 00100000 01101111 01100011 01100011 01110101 01110010 01110010 01100101 01100100 00101110")
print ("Type anything if your now bored.")
s=input(">")
time.sleep(2)
print ("I'm now bored.")
time.sleep(1)
print ("""
""")
print ("There are three paths: 1 leads to a house, 2 leads to a bed of nails with a health drink nearby, 3 leads to---What? Nothing?!?!?!")
print ("Type anything! JUST DO IT")
stuff=input(">")
print ("""
""")
print ("YoU FeEl YoUrEsElF gEtTiNg PuShEd InTo PaTh ThReE!?!?!?!")
print ("AhHhHhHhHhHhHhHhHhHhHHHHhhhhh h hh hhh h h???")
print ("Type anything if your ok with that.")
st=input(">")
print ("""
""")
time.sleep(2)
''' Again, same here :( '''
print ("")
print ("'Is that a BDN?' you find yourself saying.")
time.sleep(1)
print ("'I'm sooooo a BDN!!!' replies the um... well... kinda-BDN")
time.sleep(1)
print ("You feel that you suddenly know that BDNs can't talk. Well at least not in english. Not that you didn't know that already...")
time.sleep(1)
print ("You find yourself yelling 'Hey! BDNs can't talk! WTF!!!'")
time.sleep(1)
print ("The kinda-BDN replies 'Well, I am a BDN, Jiale.co allowed me to test their BDN speaker. It enables you to ----- KA------------------------------------------------------------------------------------------------------------------")
time.sleep(1)
print ("...")
time.sleep(1)
print ("You hear 'Ka------------------------------------------------------------------------------------------'")
time.sleep(1)
print ("""
""")
time.sleep(1)
print ("GHhHhhHhHHHHAahahHhhHHHhAHHAHSAHDHAHSHhAHHASIHhoLH is a sound that fills the air.")
print ("")
time.sleep(1)
print ("The BDN attacks you!")
time.sleep(1)
attackern="BDN"
eattack=10
edefence=-1
ehp=35
especial="KA----------------------------"
gainmoney=100
print ("You feel a magical power indulge you.")
time.sleep(1)
print ("You momentarily vanish from the dimention.")
print ("""
+--------+ +---------------------------------------------------+
| +------+ | |
| +------+ | |
| | | |
+----------------------------------------+ |
| +--------+ | | |
| | | |
| | +---------------+ |
| | +-----|-----------|---|----------------------|----+
| +---------|------|-----|-----------|--+| | |
| +---------+------------|-----------|---|----------------------+ |
+----------------------------+| | | | |
| | +-------------||-----|---+ | | |
| | | || | | | | |
| | +--------+ || | | | | |
| +-----|--------|------|------|-----------+ | |
+------------|--------|------+|-+ | | | |
| | | | +--|---------------|------------------------+ |
| +------|-------|--|--|---+ | | |
| | | | | | | |
| | +--+--|---------------|------------------------+--+
+--------+ +---------------+----------------+
Your defence is now 1000
Your attack is now 10
Your mom is now dead
""")
defence=1000
attack=10
mom='dead'
print ("""
""")
gainmoney=500
match(money,gainmoney,attackern,ehp,eattack,edefence,especial,hp,attack)
print ("""
""")
print ("Having defeated the BDN, you are satisfied.")
time.sleep(1)
print ("ACK! You feel the extra power you gained is starting to stick!")
time.sleep(1)
print ("")
print ("Your attack is now 10!!!")
origattack=10
attack=10
time.sleep(0.5)
print ("")
print ("You move forward. A cool breeze starts to blow.")
time.sleep(1)
``` |
{
"source": "JialianLee/open_spiel",
"score": 2
} |
#### File: JialianLee/open_spiel/noxfile.py
```python
import os
import sys
import sysconfig
import nox
def get_distutils_tempdir():
return (
f"temp.{sysconfig.get_platform()}-{sys.version_info[0]}.{sys.version_info[1]}"
)
@nox.session
def tests(session):
session.install("-r", "requirements.txt")
session.run("python", "setup.py", "build")
session.run("python", "setup.py", "install")
session.cd(os.path.join("build", get_distutils_tempdir()))
session.run(
"ctest", f"-j{4*os.cpu_count()}", "--output-on-failure", external=True)
```
#### File: algorithms/alpha_zero/alpha_zero.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms.alpha_zero import model as model_lib
import pyspiel
class AlphaZero(object):
"""AlphaZero implementation.
Follows the pseudocode AlphaZero implementation given in the paper
DOI:10.1126/science.aar6404.
"""
def __init__(self,
game,
bot,
model,
replay_buffer_capacity=int(1e6),
action_selection_transition=30):
"""AlphaZero constructor.
Args:
game: a pyspiel.Game object
bot: an MCTSBot object.
model: A Model.
replay_buffer_capacity: the size of the replay buffer in which the results
of self-play games are stored.
action_selection_transition: an integer representing the move number in a
game of self-play when greedy action selection is used. Before this,
actions are sampled from the MCTS policy.
Raises:
ValueError: if incorrect inputs are supplied.
"""
game_info = game.get_type()
if game.num_players() != 2:
raise ValueError("Game must be a 2-player game")
if game_info.chance_mode != pyspiel.GameType.ChanceMode.DETERMINISTIC:
raise ValueError("The game must be a Deterministic one, not {}".format(
game.chance_mode))
if (game_info.information !=
pyspiel.GameType.Information.PERFECT_INFORMATION):
raise ValueError(
"The game must be a perfect information one, not {}".format(
game.information))
if game_info.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("The game must be turn-based, not {}".format(
game.dynamics))
if game_info.utility != pyspiel.GameType.Utility.ZERO_SUM:
raise ValueError("The game must be 0-sum, not {}".format(game.utility))
if game.num_players() != 2:
raise ValueError("Game must have exactly 2 players.")
self.game = game
self.bot = bot
self.model = model
self.replay_buffer = dqn.ReplayBuffer(replay_buffer_capacity)
self.action_selection_transition = action_selection_transition
def update(self, num_training_epochs=10, batch_size=128, verbose=False):
"""Trains the neural net.
Randomly sampls data from the replay buffer. An update resets the optimizer
state.
Args:
num_training_epochs: An epoch represents one pass over the training data.
The total number training iterations this corresponds to is
num_training_epochs * len(replay_buffer)/batch_size.
batch_size: the number of examples sampled from the replay buffer and
used for each net training iteration.
verbose: whether to print training metrics during training.
Returns:
A list of length num_training_epochs. Each element of this list is
a Losses tuples, averaged per epoch.
"""
num_epoch_iters = math.ceil(len(self.replay_buffer) / float(batch_size))
losses = []
for epoch in range(num_training_epochs):
epoch_losses = []
for _ in range(num_epoch_iters):
train_data = self.replay_buffer.sample(batch_size)
epoch_losses.append(self.model.update(train_data))
epoch_losses = (sum(epoch_losses, model_lib.Losses(0, 0, 0)) /
len(epoch_losses))
losses.append(epoch_losses)
if verbose:
print("Epoch {}: {}".format(epoch, epoch_losses))
return losses
def self_play(self, num_self_play_games=5000):
"""Uses the current state of the net with MCTS to play full games against.
Args:
num_self_play_games: the number of self-play games to play using the
current net and MCTS.
"""
for _ in range(num_self_play_games):
self._self_play_single()
def _self_play_single(self):
"""Play a single game and add it to the replay buffer."""
state = self.game.new_initial_state()
trajectory = []
while not state.is_terminal():
root = self.bot.mcts_search(state)
target_policy = np.zeros(self.game.num_distinct_actions(),
dtype=np.float32)
for child in root.children:
target_policy[child.action] = child.explore_count
target_policy /= sum(target_policy)
trajectory.append(model_lib.TrainInput(
state.observation_tensor(), state.legal_actions_mask(),
target_policy, root.total_reward / root.explore_count))
action = self._select_action(root.children, len(trajectory))
state.apply_action(action)
terminal_rewards = state.rewards()
for state in trajectory:
self.replay_buffer.add(
model_lib.TrainInput(state.observation, state.legals_mask,
state.policy, terminal_rewards[0]))
def _select_action(self, children, game_history_len):
explore_counts = [(child.explore_count, child.action) for child in children]
if game_history_len < self.action_selection_transition:
probs = np_softmax(np.array([i[0] for i in explore_counts]))
action_index = np.random.choice(range(len(probs)), p=probs)
action = explore_counts[action_index][1]
else:
_, action = max(explore_counts)
return action
def np_softmax(logits):
max_logit = np.amax(logits, axis=-1, keepdims=True)
exp_logit = np.exp(logits - max_logit)
return exp_logit / np.sum(exp_logit, axis=-1, keepdims=True)
``` |
{
"source": "jialiasus2/AI-Studio-Contest-Quantum202103",
"score": 3
} |
#### File: AI-Studio-Contest-Quantum202103/work/ArchitectureSearch.py
```python
import time
import numpy as np
from tqdm import tqdm
from utils import RandomCNOT, RandomCNOTs
def SimulatedAnnealing(quantum_count, layer_count, solver, epochs=100, save_path=None, global_best_score=0):
#TODO:
best_score = 0
cnot = RandomCNOTs(quantum_count, layer_count)
sc, model = solver(cnot)
if sc>best_score:
best_score = sc
cnot_seed = cnot
best_model = model
best_cnot = cnot
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
start_time = time.time()
for epoch in range(epochs):
for i in range(layer_count):
cnot_layers = cnot_seed.copy()
cnot_layers[i] = RandomCNOT(quantum_count)
sc, model = solver(cnot_layers)
if sc>best_score or np.random.randint(epochs)>epoch:
cnot_seed = cnot_layers
if sc>best_score:
best_score = sc
best_model = model
best_cnot = cnot_layers
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
print('epoch %d, iter %d, Score = %g, best_score = %g, global_best_score = %g, time = %gs'%(epoch, i, sc, best_score, global_best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model, best_cnot
def SequenceJitter(quantum_count, layer_count, solver, init_epochs=10, epochs=100, save_path=None, global_best_score=0):
#TODO:
best_score = 0
print('Init cnot seed.')
for _ in tqdm(range(init_epochs)):
cnot = RandomCNOTs(quantum_count, layer_count)
sc, model = solver(cnot)
if sc>best_score:
best_score = sc
cnot_seed = cnot
best_model = model
best_cnot = cnot
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
start_time = time.time()
for epoch in range(epochs):
for i in range(layer_count):
cnot_layers = cnot_seed.copy()
cnot_layers[i] = RandomCNOT(quantum_count)
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
cnot_seed = cnot_layers
best_model = model
best_cnot = cnot_layers
if save_path is not None and best_score>global_best_score:
with open(save_path, 'w') as f:
f.write(best_model)
print('Score = %g, best_score = %g, global_best_score = %g, time = %gs'%(sc, best_score, global_best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model, best_cnot
def RandomSearch(cnot_creater, solver, epochs=100, save_path=None):
'''
随机搜索
Parameters:
cnot_creater: 生成CNOT层的可执行对象
solver: 一个可执行对象,给定网络结构后,求解网络参数的求解器
epochs: 随机搜索的轮数
save_path: 保存最佳结果的路径
'''
best_score = 0
start_time = time.time()
for epoch in range(epochs):
cnot_layers = cnot_creater()
sc, model = solver(cnot_layers)
if sc>best_score:
best_score = sc
best_model = model
if save_path is not None:
with open(save_path, 'w') as f:
f.write(best_model)
print('No_%d: score = %g, best_score = %g, time = %gs'%(epoch, sc, best_score, time.time()-start_time))
# print(best_model)
return best_score, best_model
``` |
{
"source": "jialiasus2/AI-Studio-Contest-Remote202110",
"score": 2
} |
#### File: AI-Studio-Contest-Remote202110/work/predict.py
```python
import os
import cv2
from tqdm import tqdm
import numpy as np
import paddle
from my_dataset import MyDataset, save_img
from my_model import make_model
from configs import MODEL_PATH, TEST_DIR, SAVE_DIR, BATCH_SIZE
def post_process(result):
'''
result: 2*H*W
'''
if result.shape[0]<=3:
res = cv2.GaussianBlur(result.transpose([1,2,0]),(5,5),1).transpose([2, 0, 1])
else:
res = cv2.GaussianBlur(result,(5,5),1)
return res
if __name__=='__main__':
test_ids = [f[:-4] for f in os.listdir(TEST_DIR) if f.endswith('.jpg')]
test_dataset = MyDataset(test_ids, TEST_DIR, None, argument=False)
test_loader = paddle.io.DataLoader(test_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=False)
print(len(test_dataset))
model = make_model()
params = paddle.load(os.path.join(MODEL_PATH, 'model.pdparams'))
print('Load model')
model.set_state_dict(params)
with paddle.no_grad():
model.eval()
fi = 0
for X in tqdm(test_loader):
Y = model(X)[0]
Y = paddle.argmax(Y, axis=1).numpy().astype(np.uint8)
for y in Y:
save_path = os.path.join(SAVE_DIR, test_ids[fi]+'.png')
save_img(save_path, y)
fi += 1
# os.system('zip -qr result.zip result/')
pass
``` |
{
"source": "jiali-ms/JLM",
"score": 3
} |
#### File: JLM/decoder/model_ngram.py
```python
import pickle
import numpy as np
import os
import json
from random import shuffle
import math
import sys
sys.path.append('..')
from config import data_path, experiment_path
class NGramModel():
"""N-gram model that take arpa file as input and provide probability with previous words.
Arpa parsing code are mainly from
https://raw.githubusercontent.com/yohokuno/neural_ime/master/decode_ngram.py
"""
def __init__(self, ngram_file='lm3', ngram_order=3):
self.ngram_order = ngram_order
self.model = self.parse_srilm(os.path.join(data_path, ngram_file))
def parse_ngram(self, ngram):
for word in ngram.split(' '):
if word == '<s>':
yield '<eos>'
elif word == '</s>':
yield '<eos>'
else:
yield word
def parse_srilm(self, file):
print('{} loaded'.format(file))
ngrams = {}
with open(file, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
fields = line.split('\t', 2)
if len(fields) < 2:
continue
if len(fields) == 2:
logprob, ngram = fields
backoff = None
elif len(fields) > 2:
logprob, ngram, backoff = fields
backoff = -math.log(10 ** float(backoff))
cost = -math.log(10 ** float(logprob))
ngram = tuple(self.parse_ngram(ngram))
ngrams[ngram] = (cost, backoff)
print('{} ngrams loaded'.format(len(ngrams)))
return ngrams
def predict(self, words, debug=False):
if type(words) is list:
words = tuple(words[-self.ngram_order:])
if words in self.model:
cost, _ = self.model[words]
if debug:
print(words)
return cost
if len(words) == 1:
return 100.0
return self.predict(words[1:], debug)
def evaluate(self, words, debug=False):
prob = []
words = ['<eos>'] + words
for i in range(2, len(words)+1):
prob.append(self.predict(words[:i], debug))
if debug:
print(prob[-1])
return sum(prob)
if __name__ == "__main__":
# test the model
model = NGramModel(ngram_file='lm3', ngram_order=2)
print(model.evaluate(['今日/キョー/名詞-普通名詞-副詞可能', 'は/ワ/助詞-係助詞', 'いい/イー/形容詞-非自立可能', '天気/テンキ/名詞-普通名詞-一般', 'です/デス/助動詞'], debug=True))
``` |
{
"source": "jiali-ms/seg",
"score": 3
} |
#### File: jiali-ms/seg/lexicon_dp.py
```python
from collections import defaultdict, deque
import math
import time
import re
re_dict = re.compile('^(.+?)( [0-9]+)?( [a-z]+)?$', re.U)
re_han = re.compile("([\u4E00-\u9FD5]+)", re.U)
re_skip = re.compile("[^a-zA-Z0-9+#\n]", re.U)
class Lexicon():
def __init__(self, dict_path):
"""
Init lexicon with dict path.
Format is 'word freq pos' with space separated. Note that we don't handle pos so far.
:param dict_path:
"""
self.total = 0
self.dict = {}
with open(dict_path, 'r', encoding='utf-8')as f:
for line in f:
word, freq, tag = re_dict.match(line).groups()
if freq is not None:
freq = freq.strip()
# give a minimal 1 count for rare words without freq as smoothing
freq = max(int(freq), 1)
self.dict[word] = freq
self.total += freq
# prefix but not yet a word will be 0
# mimic of prefix check of trie for acceleration
for i in range(len(word)):
sub_word = word[:i + 1]
if sub_word not in self.dict:
self.dict[sub_word] = 0
def check_prob(self, word):
"""
Return prob in neg log format.
:param word:
:return: 0 for prefix, neg log for word. Otherwise None
"""
if word in self.dict:
freq = self.dict[word]
if freq is not 0:
return -math.log(freq/self.total)
else:
return 0
else:
return None
def has_prefix(self, word):
return word in self.dict
def is_word(self, word):
return word in self.dict and self.dict[word] != 0
class Decoder():
def __init__(self):
# model will provide probability
self.lexicon = Lexicon('user_dict.txt')
def decode(self, input):
"""
decode the input sentence.
This method cut input sentence into blocks first with non-chinese symbols as natural boundary.
It is vital for speed up. In local experiment, 50x faster.
:param input:
:return:
"""
blocks = re_han.split(input)
for block in blocks:
if not block:
continue
if re_han.match(block):
for word in self.decode_(block):
yield word
else:
if block == '':
continue
else:
matched = False
tmp = re_skip.split(block)
for x in tmp:
if re_skip.match(x):
matched = True
yield x
if not matched:
yield block
def decode_(self, input):
"""
use dp to find best path.
This method decode with backward lookup. Notice that forward lookup is also a choice.
:param input: The raw input sequence
:return: Best path as list of words
"""
# build frames
# frame is backward lookup with start_idx to key as valid word
frames = defaultdict(list)
input_size = len(input)
for s in range(input_size):
e = s + 1
while self.lexicon.has_prefix(input[s:e]) and e <= input_size:
if self.lexicon.is_word(input[s:e]):
frames[e].append(s)
e += 1
# in case of oov symbols, segment to char
if s not in frames:
frames[s] = [(s-1, 0)]
# decode best path with simple dp from start
best_path = {}
best_path[0] = (0, 0)
for i in range(1, input_size + 1):
for s in frames[i]:
word = input[s:i]
prob = self.lexicon.check_prob(word)
neg_log = prob + best_path[s][1]
if i not in best_path or neg_log < best_path[i][1]:
best_path[i] = (s, neg_log)
# parse results
result = deque()
idx = input_size
while idx > 0:
s = best_path[idx][0]
result.appendleft(input[s:idx])
idx = s
for x in result:
yield x
if __name__ == "__main__":
decoder = Decoder()
start_time = time.time()
result = decoder.decode('结婚的和尚未结婚的,都是很nice cool的“靠谱人士”')
end_time = time.time()
print(' '.join(result))
print('{} s'.format(end_time - start_time))
``` |
{
"source": "jialing3/corner_cases",
"score": 4
} |
#### File: corner_cases/algo_fun/expression.py
```python
class Expression:
def __init__(self, str1, str2, str3):
self.strs = [str1, str2, str3]
def getChars(self):
return set(self.strs[0]) | set(self.strs[1]) | set(self.strs[2])
def char_to_num(self, m, str):
new_str = []
for c in str: # str is a list, ONE
new_str.append(m.get(c))
return int(new_str)
def evaluate(self, m): # m = {'a': 1, 'b': 2}
return self.char_to_num(m, self.strs[0]) + self.char_to_num(m, self.strs[1]) == self.char_to_num(m, self.strs[2])
# Map<Char, Int> solve(Expression e)
e = Expression('ONE', 'ONE', 'TWO')
def solve(e):
chars = e.getChars()
n = len(chars)
return walk([str(x) for x in range(10)], [], n, e, chars)
def walk(remaining, taken, n, e, chars): # n is the total len(getChars)
if len(taken) == n:
m = dict([(c, num) for c, num in zip(chars, path)])
if e.evaluate(m):
return m
else:
for ind in range(len(remaining)):
tmp = walk(remaining[:ind] + remaining[ind+1:], taken + [remaining[ind]], n, e, chars)
if tmp is not None:
return tmp
return None
```
#### File: corner_cases/Relue/Eu55.py
```python
def isPanlidrome(num):
num = str(num)
return num[::-1] == num
def addReverse(num):
rev = int(str(num)[::-1])
return num + rev
def isLychrel(num):
counter = 50
while counter > 0:
num = addReverse(num)
if isPanlidrome(num): return False
counter -= 1
return True
lst = [x for x in range(1, 10000) if isLychrel(x)]
print len(lst)
print 349, 349 in lst, '\n', 196, 196 in lst, '\n', 4994, 4994 in lst
```
#### File: corner_cases/Relue/Eu60.py
```python
from collections import defaultdict
def is_prime(n):
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n ** .5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
concat = lambda x, y: int(''.join(str(x) + str(y)))
def concat_check(x, y):
return all([is_prime(concat(x, y)), is_prime(concat(y, x))])
nrange = 1e2
nrange_new=1e4 # guaranteed as 1e4*5 > 26033
lst = range(2, int(nrange))
prime_lst = list(set(lst).difference([x * y for x in lst for y in lst if x * y <= max(lst)]))
prime_lst += [x for x in range(int(nrange), int(nrange_new) + 1) if is_prime(x)]
prime_pair = defaultdict(list)
for x in prime_lst:
for y in prime_lst:
if y > x and concat_check(x, y):
prime_pair[x].append(y)
lst_of_five = []
for key1 in sorted(prime_pair):
new_lst1 = prime_pair[key1]
if len(new_lst1) >= 4:
for key2 in new_lst1:
new_lst2 = set(new_lst1) & set(prime_pair[key2])
if len(new_lst2) >= 3:
for key3 in sorted(new_lst2):
new_lst3 = new_lst2 & set(prime_pair[key3])
if len(new_lst3) >= 2:
for key4 in sorted(new_lst3):
new_lst4 = new_lst3 & set(prime_pair[key4])
if len(new_lst4) >= 1:
lst_of_five.append( [key1, key2, key3, key4, min(new_lst4)] )
```
#### File: corner_cases/Relue/Eu61.py
```python
triangle = lambda n: n*(n+1)/2
square = lambda n: n**2
pentagonal = lambda n: n*(3*n-1)/2
hexagonal = lambda n: n*(2*n-1)
heptagonal = lambda n: n*(5*n-3)/2
octagonal = lambda n: n*(3*n-2)
digit_split = lambda num: (num / 100, num % 100)
list3 = [digit_split(triangle(x)) for x in range(45, 141)]
list4 = [digit_split(square(x)) for x in range(32, 100)]
list5 = [digit_split(pentagonal(x)) for x in range(26, 82)]
list6 = [digit_split(hexagonal(x)) for x in range(23, 71)]
list7 = [digit_split(heptagonal(x)) for x in range(21, 64)]
list8 = [digit_split(octagonal(x)) for x in range(19, 59)]
AB_3, CD_3 = set([x[0] for x in list3]), set([x[1] for x in list3])
AB_4, CD_4 = set([x[0] for x in list4]), set([x[1] for x in list4])
AB_5, CD_5 = set([x[0] for x in list5]), set([x[1] for x in list5])
AB_6, CD_6 = set([x[0] for x in list6]), set([x[1] for x in list6])
AB_7, CD_7 = set([x[0] for x in list7]), set([x[1] for x in list7])
AB_8, CD_8 = set([x[0] for x in list8]), set([x[1] for x in list8])
def intersect_join(s1, s2, s3, s4, s5, s6):
return s1 & (s2 | s3 | s4 | s5 | s6)
AB_3 = intersect_join(AB_3, CD_4, CD_5, CD_6, CD_7, CD_8)
CD_3 = intersect_join(CD_3, AB_4, AB_5, AB_6, AB_7, AB_8)
AB_4 = intersect_join(AB_4, CD_3, CD_5, CD_6, CD_7, CD_8)
CD_4 = intersect_join(CD_4, AB_3, AB_5, AB_6, AB_7, AB_8)
AB_5 = intersect_join(AB_5, CD_4, CD_3, CD_6, CD_7, CD_8)
CD_5 = intersect_join(CD_5, AB_4, AB_3, AB_6, AB_7, AB_8)
AB_6 = intersect_join(AB_6, CD_4, CD_5, CD_3, CD_7, CD_8)
CD_6 = intersect_join(CD_6, AB_4, AB_5, AB_3, AB_7, AB_8)
AB_7 = intersect_join(AB_7, CD_4, CD_5, CD_6, CD_3, CD_8)
CD_7 = intersect_join(CD_7, AB_4, AB_5, AB_6, AB_3, AB_8)
AB_8 = intersect_join(AB_8, CD_4, CD_5, CD_6, CD_7, CD_3)
CD_8 = intersect_join(CD_8, AB_4, AB_5, AB_6, AB_7, AB_3)
list3 = [x for x in list3 if x[0] in AB_3 and x[1] in CD_3]
list4 = [x for x in list4 if x[0] in AB_4 and x[1] in CD_4]
list5 = [x for x in list5 if x[0] in AB_5 and x[1] in CD_5]
list6 = [x for x in list6 if x[0] in AB_6 and x[1] in CD_6]
list7 = [x for x in list7 if x[0] in AB_7 and x[1] in CD_7]
list8 = [x for x in list8 if x[0] in AB_8 and x[1] in CD_8]
ans = []
for ind1, lst1 in enumerate([list3, list4, list5, list6, list7, list8]):
ind_list = [3, 4, 5, 6, 7, 8]
list_used = set([3, 4, 5, 6, 7, 8])
for AB1, CD1 in lst1:
list_used = list_used ^ set([8])
for ind2, lst2 in enumerate([list3, list4, list5, list6, list7, list8]):
if ind1 != ind2:
for AB2, CD2 in lst2:
if CD1 == AB2:
list_used = list_used ^ set([ind_list[ind2]])
for ind3, lst3 in enumerate([list3, list4, list5, list6, list7, list8]):
if ind1 != ind3 and ind2 != ind3:
for AB3, CD3 in lst3:
if CD2 == AB3:
list_used = list_used ^ set([ind_list[ind3]])
for ind4, lst4 in enumerate([list3, list4, list5, list6, list7, list8]):
if ind1 != ind4 and ind2 != ind4 and ind3 != ind4:
for AB4, CD4 in lst4:
if CD3 == AB4:
list_used = list_used ^ set([ind_list[ind4]])
for ind5, lst5 in enumerate([list3, list4, list5, list6, list7, list8]):
if ind1 != ind5 and ind2 != ind5 and ind3 != ind5 and ind4 != ind5:
for AB5, CD5 in lst5:
if CD4 == AB5:
list_used = list_used ^ set([ind_list[ind5]])
for ind6, lst6 in enumerate([list3, list4, list5, list6, list7, list8]):
if ind1 != ind6 and ind2 != ind6 and ind3 != ind6 and ind4 != ind6 and ind5 != ind6:
for AB6, CD6 in lst6:
if CD5 == AB6 and CD6 == AB1:
list_used = list_used ^ set([ind_list[ind6]])
ans.append( [AB1*100+CD1, ind_list[ind1], AB2*100+CD2, ind_list[ind2], AB3*100+CD3, ind_list[ind3], AB4*100+CD4, ind_list[ind4], AB5*100+CD5, ind_list[ind5], AB6*100+CD6, ind_list[ind6]])
from random import randint
def recurse(whole_lst=[list3, list4, list5, list6, list7, list8], inds=[3, 4, 5, 6, 7, 8], CD_old=None, AB_new=None):
if len(inds) > 1 and CD_old is None:
for tmp_ind in range(len(inds)):
inds2 = inds[:]
whole_lst2 = whole_lst[:]
ind = inds2.pop(tmp_ind)
lst = whole_lst2.pop(tmp_ind)
for AB, CD in lst:
AB_new = AB
tmp = recurse(whole_lst2, inds2, CD, AB_new)
if tmp:
return tmp + [AB*100+CD, ind]
elif len(inds) > 1 and CD_old is not None:
for tmp_ind in range(len(inds)):
inds2 = inds[:]
whole_lst2 = whole_lst[:]
ind = inds2.pop(tmp_ind)
lst = whole_lst2.pop(tmp_ind)
for AB, CD in lst:
if CD_old == AB:
tmp = recurse(whole_lst2, inds2, CD, AB_new)
if tmp:
return tmp + [AB*100+CD, ind]
elif len(inds) == 1:
ind = inds[0]
lst = whole_lst[0]
for AB, CD in lst:
if CD_old == AB and CD == AB_new:
return [AB*100+CD, ind]
print recurse()
```
#### File: corner_cases/Relue/Eu64.py
```python
from math import sqrt
def cycle(n, print_flg=0):
count = 0
a, b, c = int(sqrt(n)), 1, int(sqrt(n))
d = []
rep = set()
while True:
if print_flg:
print a, ',', b,'/ ( sqrt(', n, ') -', c, ')'
d.append((a,b,c))
expression = b / (sqrt(n) - c)
a = int(expression)
b = (n - c ** 2) / b
c = b * a - c
if (a,b,c) in d[:-1]:
initial_ind = d.index((a,b,c))
if (a,b,c) in d[initial_ind+1:]:
cycle_length = d.index((a,b,c), initial_ind + 1) - initial_ind
if d[initial_ind:initial_ind+cycle_length+1] == d[initial_ind+cycle_length:initial_ind+2*cycle_length+1]:
return cycle_length
count += 1
print n, d
def euler64():
counter = 0
for i in range(2, 10001):
if not sqrt(i).is_integer():
counter += (cycle(i) % 2)
#print i
print counter
def play():
list1 = []
for i in range(2, 300):
if not sqrt(i).is_integer():
if cycle(i) % 2:
if not sqrt(i-1).is_integer():
list1.append(i)
print list1
def test1():
set1 = set()
list1 = [i**2+1 for i in range(1,1000)]
for i in range(len(list1)-1):
for j in range(i):
if not list1[i] % list1[j] and not sqrt(list1[i] / list1[j]).is_integer():
set1.add(list1[i] / list1[j])
#set1.add((list1[i] / list1[j], list1[i], list1[j]))
set1 = set1 - set(list1)# - set(test2())
print sorted(set1)[:30]
def test2():
set1 = set()
list1 = [i**2+1 for i in range(1,100)]
for i in range(len(list1)-1):
for j in range(i):
set1.add(list1[i] * list1[j])
#set1.add((list1[i] * list1[j], list1[i], list1[j]))
set1 = set1 - set(list1)
return sorted(set1)[:30]
#1 2 1 2 4 2 1 2 2 5, 10000
#b_tmp = 23 - c ** 2
#b = b_tmp / b if b_tmp % b == 0 else b_tmp
#4 , 1 / (sqrt(23) - 4)
# = (sqrt(23) + 4) / 7
# = 1 + (sqrt(23) - 3) / 7
#1, 7 / (sqrt(23) - 3)
# = 7 * (sqrt(23) + 3) / 14
# = (sqrt(23) + 3) / 2
# = 3 + (sqrt(23) - 3) / 2
#3, 2 / (sqrt(23) - 3)
# = 2 * (sqrt(23) + 3) / 14
# = (sqrt(23) + 3) / 7
# = 1 + (sqrt(23) - 4) / 7
#1, 7 / (sqrt(23) - 4)
# = 7 * (sqrt(23) + 4) / 7
# = (sqrt(23) + 4)
# = 8 + (sqrt(23) - 8)
#8, 1 / (sqrt(23) - 8)
'''
sqrt(2) = 1 + sqrt(2) - 1
1, 1 / (sqrt(2) - 1)
= (sqrt(2) + 1) / 1
= 2 + (sqrt(2) - 1) / 1
2, 1 / (sqrt(2) - 1)
= (sqrt(2) + 1) / 1
= 2 + (sqrt(2) - 1) / 1
sqrt(3) = 1 + sqrt(3) - 1
1, 1 / (sqrt(3) - 1)
= (sqrt(3) + 1) / 2
= 1 + (sqrt(3) - 1) / 2
1, 2 / (sqrt(3) - 1)
= 2 * (sqrt(3) + 1) / 2
= sqrt(3) + 1
= 2 + (sqrt(3) - 1)
2, 1 / (sqrt(3) - 1)
= (sqrt(3) + 1) / 2
sqrt(7) = 2 + sqrt(7) - 2
2, 1 / (sqrt(7) - 2)
= (sqrt(7) + 2) / 3
= 1 + (sqrt(7) - 1) / 3
1, 3 / (sqrt(7) - 1)
= 3 * (sqrt(7) + 1) / 6
= (sqrt(7) + 1) / 2
= 1 + (sqrt(7) - 1) / 2
1, 2 / (sqrt(7) - 1)
= 2 * (sqrt(7) + 1) / 6
= (sqrt(7) + 1) / 3
= 1 + (sqrt(7) - 2) / 3
1, 3 / (sqrt(7) - 2)
= 3 * (sqrt(7) + 2) / 3
= sqrt(7) + 2
= 4 + (sqrt(7) - 2)
4, 1 / (sqrt(7) - 2)
1
1
1
4
'''
```
#### File: corner_cases/Relue/Eu77.py
```python
class Solution:
def __init__(self):
self.memo = {}
def break_down(self, n, to_use):
if type(n) != int or type(to_use) != list:
return 0
else:
if len(to_use) == 0:
return 1 if n == 0 else 0
elif len(to_use) == 1 and to_use[0] == n:
return 1
elif (n, tuple(to_use)) in self.memo:
return self.memo[n, tuple(to_use)]
else:
not_used = self.break_down(n, to_use[:-1])
used = self.break_down(n - to_use[-1], list(filter(lambda x: x <= n - to_use[-1], to_use)))
self.memo[n, tuple(to_use)] = not_used + used
#print(n, to_use[-1], not_used, used)
return not_used + used
def sieve_prime(self, n):
non_prime = set()
for i in range(2, n):
for j in range(2, n):
non_prime.add(i * j)
return [x for x in range(2, n + 1) if x not in non_prime]
def break_down_wrapper(self, n):
return self.break_down(n, self.sieve_prime(n))
sol = Solution()
assert sol.break_down_wrapper(10) == 5
assert sol.sieve_prime(100) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
number_of_ways = []
for n in range(10, 100):
number_of_ways.append(sol.break_down_wrapper(n))
if number_of_ways[-1] > 5000:
print(n, number_of_ways)
break
```
#### File: corner_cases/Relue/Eu79.py
```python
class Solution:
def __init__(self):
self.keylog = self.get_keylog()
self.first, self.last, self.all_nums, self.count_of_n1_before_n2 = self.get_first_last_and_counts()
self.guess = [self.first, self.last]
def check(self):
for k in self.keylog:
inds = []
for num in k:
if num in self.guess:
inds.append(self.guess.index(num))
else:
print(k)
break
if sorted(inds) != inds:
print(k)
else:
print('check passed')
def make_a_guess(self):
last = self.guess.pop()
candidates = self.all_nums
for i in range(6):
candidates.remove(self.guess[i])
flags = {} # flags[n1][n2] indicates whether n1 can go before n2
for n1 in candidates:
if n1 != last:
flags[n1] = {}
for n2 in candidates:
if n1 != n2 and last not in (n1, n2):
n1_before_n2 = self.count_of_n1_before_n2[n1].get(n2, 0)
n2_before_n1 = self.count_of_n1_before_n2[n2].get(n1, 0)
flags[n1][n2] = True if n1_before_n2 >= n2_before_n1 else False
selected = list(filter(lambda x: all(x[1].values()), flags.items()))[0][0]
self.guess.append(selected)
self.guess.append(last)
print(''.join(self.guess))
return
def get_keylog(self):
keylog = set()
with open('p079_keylog.txt') as f:
for row in f.readlines():
row = tuple(list(row.strip()))
keylog.add(row)
return sorted(keylog)
def get_first_last_and_counts(self):
count_of_n1_before_n2 = {}
non_first = set()
non_last = set()
all_nums = set()
for k in self.keylog:
for n1, n2 in zip(k[:-1], k[1:]):
if n1 not in count_of_n1_before_n2:
count_of_n1_before_n2[n1] = {}
if n2 not in count_of_n1_before_n2[n1]:
count_of_n1_before_n2[n1][n2] = 0
count_of_n1_before_n2[n1][n2] += 1
for n in k[1:]:
non_first.add(n)
for n in k[:-1]:
non_last.add(n)
for n in k:
all_nums.add(n)
first = (all_nums - non_first).pop()
last = (all_nums - non_last).pop()
return first, last, all_nums, count_of_n1_before_n2
if __name__ == '__main__':
sol = Solution()
sol.make_a_guess()
sol.check()
```
#### File: corner_cases/Relue/Eu86.py
```python
from math import sqrt
'''
x = 6
y = 5
z = 3
# 0 <= a <= x
dist1 = sqrt((x - a) ** 2 + y ** 2) + sqrt(a ** 2 + z ** 2)
# d/da dist1 == 0
a = z * x / (z + y)
x - a = x * y / (z + y)
sqrt((x - a) ** 2 + y ** 2) = y / (z + y) * sqrt((z + y) ** 2 + x ** 2)
sqrt(a ** 2 + z ** 2) = z / (z + y) * sqrt( x ** 2 + (z + y) ** 2)
dist1 = sqrt((z + y) ** 2 + x ** 2)
# 0 <= a <= y
dist2 = sqrt((y - a) ** 2 + x ** 2) + sqrt(a ** 2 + z ** 2)
# d/da dist2 == 0
a = z * y / (z + x)
dist2 = sqrt((z + x) ** 2 + y ** 2)
# dist2 ** 2 - dist1 ** 2 = 2 * z * (x - y) > 0
# 0 <= a <= z
dist3 = sqrt((z - a) ** 2 + y ** 2) + sqrt(a ** 2 + x ** 2)
# d/da dist3 == 0
a = x * z / (x + y)
dist3 = sqrt((x + y) ** 2 + z ** 2)
# dist3 ** 2 - dist2 ** 2 = 2 * x * (y - z) > 0
start with z <= y <= x
first, build a list of perfect square's
a ** 2 + b ** 2 = c ** 2 with a <= b < c
then either 1) z + y = a, x = b
or 2) z + y = b, x = a
'''
def perfect_squares_sieve2(limit):
root2 = 2 ** 0.5
a = set()
for i in range(3, limit + 1):
for j in range(int(i / root2) + 1, i):
k = int((i ** 2 - j ** 2) ** 0.5)
if (k, j) not in a:
if k ** 2 + j ** 2 == i ** 2:
a.add((k, j))
for l in range(2, limit // i + 1):
a.add((k * l, j * l))
return a
perfect_squares = perfect_squares_sieve2(5000)
'''
perfect_squares = {(3, 4): 5}
for c in range(1, 2000):
for b in range(1, c):
for a in range(1, b + 1):
if a ** 2 + b ** 2 == c ** 2:
perfect_squares[(a, b)] = c
'''
def count_of_cuboids_given_M(M, c=set()):
for x in range(1, M + 1):
for y in range(1, x + 1):
for z in range(1, y + 1):
if (x, y, z) not in c:
a = z + y
b = x
if (a, b) in perfect_squares:
for d in range(1, M // x + 1):
c.add((x * d, y * d, z * d))
a = x
b = z + y
if (a, b) in perfect_squares:
for d in range(1, M // x + 1):
c.add((x * d, y * d, z * d))
l = len(list(filter(lambda k: k[0] <= x, c)))
if l > 1000000:
print(x, l)
break
if x % 100 == 0:
print(x, l)
return c
c = count_of_cuboids_given_M(99, c=set())
assert(len(c) == 1975)
c = count_of_cuboids_given_M(100, c=c)
assert(len(c) == 2060)
'''
# determine the upper-bound of the perfect_squares to calculate
c = set()
i = 1
i2o = {}
while i < 101:
c = count_of_cuboids_given_M(i, c)
i2o[i] = len(c)
i += 1
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
slope, intercept, r_value, p_value, std_err = stats.linregress(np.log2(x[2:]), np.log2(y[2:]))
print(slope, intercept, r_value**2, p_value, std_err)
# np.log2(y) = intercept + slope * np.log2(x)
# np.log2(1M) = 20
# x = 2 ** 11 = 2K
plt.plot(np.log2(x), np.log2(y), '.', np.log2(x), intercept + slope * np.log2(x), '.'); plt.show()
'''
i = 5000
c = count_of_cuboids_given_M(i, set())
# needs further optimization https://en.wikipedia.org/wiki/Pythagorean_triple
```
#### File: corner_cases/stats_fun/running_total_die_toss.py
```python
from pprint import pprint
import matplotlib.pyplot as plt
def memoize(f):
cache = {}
def g(x):
if x not in cache:
cache[x] = f(x)
return cache[x]
return g
def prob_total_equal_n(n):
if n < 0:
return 0
elif n == 0:
return 1
else:
return 1. / 6 * sum(prob_total_equal_n(n - i) for i in range(1, 7))
prob_total_equal_n = memoize(prob_total_equal_n)
if __name__ == '__main__':
P_n = [prob_total_equal_n(j) for j in range(100)]
pprint(P_n)
plt.plot(range(1, 100), P_n[1:], '.:', alpha=.5) # plateaus at around n = 20
plt.show()
``` |
{
"source": "jialingt/dowel",
"score": 3
} |
#### File: src/dowel/simple_outputs.py
```python
import abc
import datetime
import os
import sys
import dateutil.tz
from dowel import LogOutput
from dowel.tabular_input import TabularInput
from dowel.utils import mkdir_p
class StdOutput(LogOutput):
"""Standard console output for the logger.
:param with_timestamp: Whether to log a timestamp before non-tabular data.
"""
def __init__(self, with_timestamp=True):
self._with_timestamp = with_timestamp
@property
def types_accepted(self):
"""Accept str and TabularInput objects."""
return (str, TabularInput)
def record(self, data, prefix=''):
"""Log data to console."""
if isinstance(data, str):
out = prefix + data
if self._with_timestamp:
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S')
out = '%s | %s' % (timestamp, out)
elif isinstance(data, TabularInput):
out = str(data)
data.mark_str()
else:
raise ValueError('Unacceptable type')
print(out)
def dump(self, step=None):
"""Flush data to standard output stream."""
sys.stdout.flush()
class FileOutput(LogOutput, metaclass=abc.ABCMeta):
"""File output abstract class for logger.
:param file_name: The file this output should log to.
:param mode: File open mode ('a', 'w', etc).
"""
def __init__(self, file_name, mode='w'):
mkdir_p(os.path.dirname(file_name))
# Open the log file in child class
self._log_file = open(file_name, mode)
def close(self):
"""Close any files used by the output."""
if self._log_file and not self._log_file.closed:
self._log_file.close()
def dump(self, step=None):
"""Flush data to log file."""
self._log_file.flush()
class TextOutput(FileOutput):
"""Text file output for logger.
:param file_name: The file this output should log to.
:param with_timestamp: Whether to log a timestamp before the data.
"""
def __init__(self, file_name, with_timestamp=True):
super().__init__(file_name, 'a')
self._with_timestamp = with_timestamp
self._delimiter = ' | '
@property
def types_accepted(self):
"""Accept str objects only."""
return (str, TabularInput)
def record(self, data, prefix=''):
"""Log data to text file."""
if isinstance(data, str):
out = prefix + data
if self._with_timestamp:
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S')
out = '%s | %s' % (timestamp, out)
elif isinstance(data, TabularInput):
out = str(data)
data.mark_str()
else:
raise ValueError('Unacceptable type.')
self._log_file.write(out + '\n')
``` |
{
"source": "jialinjiao/udacity_CarND",
"score": 3
} |
#### File: udacity_CarND/CarND-Behavioral-Cloning-P3/model1.py
```python
import numpy as np
import csv
import cv2
import sklearn
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, MaxPooling2D, Dropout
from keras.layers.convolutional import Convolution2D
# Constants
data_path = "data/"
image_path = data_path + "IMG/"
left_image_angle_correction = 0.20
right_image_angle_correction = -0.20
csv_data = []
processed_csv_data = []
# Reading the content of csv file
with open(data_path + 'driving_log.csv') as csv_file:
csv_reader = csv.reader(csv_file)
# Skipping the headers
next(csv_reader, None)
for each_line in csv_reader:
csv_data.append(each_line)
# Method to pre-process the input image
def pre_process_image(image):
# Since cv2 reads the image in BGR format and the simulator will send the image in RGB format
# Hence changing the image color space from BGR to RGB
colored_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Cropping the image
cropped_image = colored_image[60:140, :]
# Downscaling the cropped image
resized_image = cv2.resize(cropped_image, None, fx=0.25, fy=0.4, interpolation=cv2.INTER_CUBIC)
return resized_image
def generator(input_data, batch_size=64):
# Since we are augmenting 3 more images for a given input image, so dividing the batch size by 4
processing_batch_size = int(batch_size / 4)
number_of_entries = len(input_data)
# Shuffling the csv entries
input_data = sklearn.utils.shuffle(input_data)
while True:
for offset in range(0, number_of_entries, processing_batch_size):
# Splitting the data set into required batch size
batch_data = input_data[offset:offset + processing_batch_size]
image_data = []
steering_angle = []
# Iterating over each image in batch_data
for each_entry in batch_data:
center_image_path = image_path + each_entry[0].split('/')[-1]
center_image = cv2.imread(center_image_path)
steering_angle_for_centre_image = float(each_entry[3])
if center_image is not None:
# Pre-processing the image
processed_center_image = pre_process_image(center_image)
image_data.append(processed_center_image)
steering_angle.append(steering_angle_for_centre_image)
# Flipping the image
image_data.append(cv2.flip(processed_center_image, 1))
steering_angle.append(- steering_angle_for_centre_image)
# Processing the left image
left_image_path = image_path + each_entry[1].split('/')[-1]
left_image = cv2.imread(left_image_path)
if left_image is not None:
image_data.append(pre_process_image(left_image))
steering_angle.append(steering_angle_for_centre_image + left_image_angle_correction)
# Processing the right image
right_image_path = image_path + each_entry[2].split('/')[-1]
right_image = cv2.imread(right_image_path)
if right_image is not None:
image_data.append(pre_process_image(right_image))
steering_angle.append(steering_angle_for_centre_image + right_image_angle_correction)
# Shuffling and returning the image data back to the calling function
yield sklearn.utils.shuffle(np.array(image_data), np.array(steering_angle))
# Splitting the csv data set into train and validation data
train_data, validation_data = train_test_split(csv_data, test_size=0.2)
# Creating generator instances for train and validation data set
train_generator_instance = generator(train_data)
validation_generator_instance = generator(validation_data)
# Getting shape of processed image
first_img_path = image_path + csv_data[0][0].split('/')[-1]
first_image = cv2.imread(first_img_path)
processed_image_shape = pre_process_image(first_image).shape
# My final model architecture
model = Sequential()
# Normalizing the input image data
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=processed_image_shape))
# First Convolution2D layer
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation="relu"))
model.add(MaxPooling2D())
model.add(Dropout(0.25))
# Second Convolution2D layer
model.add(Convolution2D(32, 5, 5, subsample=(2, 2), activation="relu"))
model.add(MaxPooling2D())
# Flattening the output of 2nd Convolution2D layer
model.add(Flatten())
# First Dense layer
model.add(Dense(32))
model.add(Dropout(0.20))
# Second Dense Layer
model.add(Dense(16))
# Third and Final Dense Layer
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator_instance, samples_per_epoch=len(train_data) * 4, verbose=1, validation_data=validation_generator_instance, nb_val_samples=len(validation_data)*4, nb_epoch=3)
model.save('model.h5')
```
#### File: udacity_CarND/CarND-Behavioral-Cloning-P3/model_not_working.py
```python
import csv
import cv2
import utils
import argparse
import numpy as np
from nn import model
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
class Pipeline:
def __init__(self, model=None, base_path='', epochs=2):
self.data = []
self.model = model
self.epochs = epochs
self.training_samples = []
self.validation_samples = []
self.correction_factor = 0.2
self.base_path = base_path
self.image_path = self.base_path + '/IMG/'
self.driving_log_path = self.base_path + '/driving_log.csv'
def import_data(self):
with open(self.driving_log_path) as csvfile:
reader = csv.reader(csvfile)
# Skip the column names row
next(reader)
for line in reader:
self.data.append(line)
return None
def process_batch(self, batch_sample):
steering_angle = np.float32(batch_sample[3])
images, steering_angles = [], []
for image_path_index in range(3):
image_name = batch_sample[image_path_index].split('/')[-1]
image = cv2.imread(self.image_path + image_name)
rgb_image = utils.bgr2rgb(image)
resized = utils.crop_and_resize(rgb_image)
images.append(resized)
if image_path_index == 1:
steering_angles.append(steering_angle + self.correction_factor)
elif image_path_index == 2:
steering_angles.append(steering_angle - self.correction_factor)
else:
steering_angles.append(steering_angle)
if image_path_index == 0:
flipped_center_image = utils.flipimg(resized)
images.append(flipped_center_image)
steering_angles.append(-steering_angle)
return images, steering_angles
def data_generator(self, samples, batch_size=128):
num_samples = len(samples)
while True:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images, steering_angles = [], []
for batch_sample in batch_samples:
augmented_images, augmented_angles = self.process_batch(batch_sample)
images.extend(augmented_images)
steering_angles.extend(augmented_angles)
X_train, y_train = np.array(images), np.array(steering_angles)
yield shuffle(X_train, y_train)
def split_data(self):
train, validation = train_test_split(self.data, test_size=0.2)
self.training_samples, self.validation_samples = train, validation
return None
def train_generator(self, batch_size=128):
return self.data_generator(samples=self.training_samples, batch_size=batch_size)
def validation_generator(self, batch_size=128):
return self.data_generator(samples=self.validation_samples, batch_size=batch_size)
def run(self):
self.split_data()
self.model.fit_generator(generator=self.train_generator(),
validation_data=self.validation_generator(),
epochs=self.epochs,
steps_per_epoch=len(self.training_samples) * 2,
validation_steps=len(self.validation_samples))
self.model.save('model.h5')
def main():
parser = argparse.ArgumentParser(description='Train a car to drive itself')
parser.add_argument(
'--data-base-path',
type=str,
default='./data',
help='Path to image directory and driving log'
)
args = parser.parse_args()
# Instantiate the pipeline
pipeline = Pipeline(model=model(), base_path=args.data_base_path, epochs=2)
# Feed driving log data into the pipeline
pipeline.import_data()
# Start training
pipeline.run()
if __name__ == '__main__':
main()
``` |
{
"source": "JialinLiOSU/bikeability_ConvNet",
"score": 4
} |
#### File: GIS_algorithm/geom/line_seg_intersection.py
```python
__author__ = "<NAME> <<EMAIL>>"
import sys
sys.path.append("..")
from contrib.bintrees import AVLTree
from .point import *
from .intersection import *
from .line_seg_eventqueue import *
def get_edges(t, p):
"""
Gets the edges (segments) that contain point p as their right
endpoint or in the interior
"""
lr = []
lc = []
for s in AVLTree(t):
if s.rp == p:
lr.append(s)
elif s.lp == p and s.status == INTERIOR:
lc.append(s)
elif sideplr(p, s.lp, s.rp) == 0:
lc.append(s)
return lr, lc
def get_lr(T, s):
"""
Returns the left and right neighbors (branches) of s in T.
"""
try:
sl = T.floor_key(s)
except KeyError:
sl = None
try:
sr = T.ceiling_key(s)
except KeyError:
sr = None
return sl, sr
def get_lrmost(T, segs):
"""
Finds the leftmost and rightmost segments of segs in T
"""
l = []
for s in list(T):
if s in segs:
l.append(s)
if len(l) < 1:
return None, None
return l[0], l[-1]
def find_new_event(s1, s2, p, q):
"""
Tests if s1 intersects s2 at a point that is not in the event queue.
When a new intersection point is found, a new event will be created
and added to the event queue.
Input:
s1: line segment
s2: line segment
p: the point of the current event
q: event queue
Output:
True if a new point is found, False otherwise
Change: the content in the queue (q) may change.
"""
ip = intersectx(s1, s2)
if ip is None:
return False
if q.find(ip) is not -1:
return False
if ip.x>p.x or (ip.x==p.x and ip.y >= p.y):
e0 = Event()
e0.p = ip
e0.edges = [s1, s2]
q.add(e0)
return True
def intersectx(s1, s2):
"""
Tests intersection of 2 input segments. If intersection is possible,
the actual intersection point will be calculated and returned.
"""
if not test_intersect(s1, s2):
return None
p = getIntersectionPoint(s1, s2) # an intersection
return p
def intersections(psegs):
"""
Implementation of the Bentley-Ottmann algorithm.
Input
psegs: a list of segments
Output
intpoints: a list of intersection points
"""
eq = EventQueue(psegs)
intpoints = []
T = AVLTree()
L=[]
while not eq.is_empty(): # for all events
e = eq.events.pop(0) # remove the event
p = e.p # get event point
L = e.edges # segments with p as left end
R,C = get_edges(T, p) # p: right (R) and interior (C)
if len(L+R+C) > 1: # Intersection at p among L+R+C
for s in L+R+C:
if not s.contains(p): # if p is interior
s.lp = p # change lp and
s.status = INTERIOR # status
intpoints.append(p)
R,C = get_edges(T, p)
for s in R+C:
T.discard(s)
for s in L+C:
T.insert(s, str(s))
if len(L+C) == 0:
s = R[0]
if s is not None:
sl, sr = get_lr(T, s)
find_new_event(sl, sr, p, eq)
else:
sp, spp = get_lrmost(T, L+C)
try:
sl = T.prev_key(sp)
except KeyError: # only on last key
sl = None
try:
sr = T.succ_key(spp)
except KeyError: # only on last key
sr = None
find_new_event(sl, sp, p, eq)
find_new_event(sr, spp, p, eq)
return intpoints
```
#### File: GIS_algorithm/geom/polygon_error.py
```python
class PolygonError:
"""Basic error for point-in-polygon algorithms"""
def __init__(self, msg):
self.message = msg
```
#### File: GIS_algorithm/geom/shapex.py
```python
from struct import unpack, calcsize
from os.path import isfile
from datetime import date
shapefile_types = {
0: 'Null Shape',
1: 'Point',
3: 'PolyLine',
5: 'Polygon',
8: 'MultiPoint',
11: 'PointZ',
13: 'PolyLineZ',
15: 'PolygonZ',
18: 'MultiPointZ',
21: 'PointM',
23: 'PolyLineM',
25: 'PolygonM',
28: 'MultiPointM',
31: 'MultiPatch'
}
supported_types = [ 'Point', 'MultiPoint', 'PolyLine', 'Polygon' ]
def clockwise(polygon):
'''calculate 2*A
polygon: [ [x, y], [x, y], ... ]
polygon = [ [1, 0], [2,0], [2,2], [1,2], [1, 0] ]
clockwise(polygon) # False
polygon.reverse()
clockwise(polygon) # True
'''
if polygon[0] != polygon[-1]:
return
num_point = len(polygon)
A = 0
for i in range(num_point-1):
p1 = polygon[i]
p2 = polygon[i+1]
ai = p1[0] * p2[1] - p2[0] * p1[1]
A += ai
return A<0
class shapex:
'''
A class for points in Cartesian coordinate systems.
Examples
>>> fname = '/Users/xiao/lib/gisalgs/data/uscnty48area.shp'
>>> shp = shapex(fname)
>>> print(shp[60])
'''
def __init__(self, fname):
if not fname.endswith('.shp'):
raise Exception('Need a .shp file.')
self.fname_shp = fname
self.fname_shx = fname[:-3]+'shx'
self.fname_dbf = fname[:-3]+'dbf'
if not isfile(self.fname_shp) or not isfile(self.fname_shx) or not isfile(self.fname_dbf):
raise Exception('Need at least three files: .shp, .shx, .dbf')
self.open_shapefile()
def open_shapefile(self):
self.f_shx = open(self.fname_shx, 'rb')
h1 = unpack('>7i', self.f_shx.read(28))
h2 = unpack('<2i 8d', self.f_shx.read(72))
file_length = h1[-1]
self.num_rec = (file_length-50)//4
self.f_shp = open(self.fname_shp, 'rb')
h1 = unpack('>7i', self.f_shp.read(28)) # BIG
h2 = unpack('<2i 8d', self.f_shp.read(72)) # LITTLE
self.file_length = h1[-1]
self.version = h2[0]
self.shape_type = shapefile_types[h2[1]]
# self.xmin, self.ymin, self.xmax, self.ymax, self.zmin, self.zmax, self.mmin, self.mmax = h2[2:10]
self.xmin = h2[2]
self.ymin = h2[3]
self.xmax = h2[4]
self.ymax = h2[5]
self.zmin = h2[6]
self.zmax = h2[7]
self.mmin = h2[8]
self.mmax = h2[9]
self.this_feature_num = 0
# get (offset, content length) pairs from shx
# remember each record has a header of 8 bytes
index = unpack('>'+'i'*self.num_rec*2, self.f_shx.read(self.num_rec*4*4))
self.index = [(index[i]*2, index[i+1]*2) for i in range(0, len(index), 2)]
# get schema, etc.
self.f_dbf = open(self.fname_dbf, 'rb')
dbf_numrec, lenheader = unpack('<xxxxLH22x', self.f_dbf.read(32))
self.numfields = (lenheader - 33) // 32
if dbf_numrec != self.num_rec:
raise Exception('SHP and DBF have different numbers of records')
self.fields = []
for fieldno in range(self.numfields):
name, dtype, size, deci = unpack('<11sc4xBB14x', self.f_dbf.read(32))
name = name.replace(b'\0', b'') # take out \x00
self.fields.append((name.decode('ascii'), dtype.decode('ascii'), size, deci))
self.f_dbf.read(1) # skip the terminator
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
self.formatstr = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
self.formatsize = calcsize(self.formatstr)
self.dbf_header_length = 32 + 32*self.numfields + 1
def __getitem__(self, i):
if not self.shape_type in supported_types:
raise Exception(self.shape_type + ' shape type not supported')
if isinstance(i, slice):
return [self[j] for j in range(*i.indices(len(self)))]
elif isinstance(i, int):
if i<0:
i = self.num_rec + i
if i<0 or i+1>self.num_rec:
raise Exception('Feature index out of range (' + str(i) + ')')
pos = self.index[i]
self.f_shp.seek(pos[0] + 8) # skip record hearder, which is not useful
if self.shape_type == 'Polygon':
feature = self.readpolygon()
if self.shape_type == 'PolyLine':
feature = self.readpolygon()
if feature['geometry']['type'] == 'MultiPolygon':
feature['geometry']['type'] = 'MultiLineString'
else:
feature['geometry']['type'] = 'LineString'
if self.shape_type == 'Point':
feature = self.readpoint()
if self.shape_type == 'MultiPoint':
feature = self.readmultipoint()
# get properties here.
properties = self.read_dbf(i)
feature['properties'] = properties
feature['id'] = i
return feature
else:
raise TypeError('Invalid index')
def robust_decode(self, bs):
'''
https://stackoverflow.com/questions/24475393/unicodedecodeerror-ascii-codec-cant-decode-byte-0xc3-in-position-23-ordinal
Convert a byte string to unicode. Try UTF8 first, if not working then latin1.
'''
cr = None
try:
cr = bs.decode('utf8')
except UnicodeDecodeError:
cr = bs.decode('latin1')
return cr
def read_dbf(self, i):
# Note: dtypes of D, L are note tested
self.f_dbf.seek(self.dbf_header_length + i * self.formatsize)
record = unpack(self.formatstr, self.f_dbf.read(self.formatsize))
if record[0] == ' ':
return ' ' * self.formatsize
result = []
for (name, dtype, size, deci), value in zip(self.fields, record):
value = value.decode('latin1')
# value = value.decode('ascii') // works for Python 2
# value = self.robust_decode(value)
if name == 'DeletionFlag':
continue
if dtype == 'N':
value = value.replace('\0', '').lstrip()
if value == '':
value = 0
elif deci:
value = float(value)
else:
value = int(value)
elif dtype == 'C':
value = value.rstrip()
elif dtype == 'D':
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = date(y, m, d)
elif dtype == 'L':
value = (value in 'YyTt' and 'T') or (value in 'NnFf' and 'F') or '?'
elif dtype == 'F':
value = float(value)
result.append(value)
properties = {}
for fi in range(1, len(self.fields)):
properties[self.fields[fi][0]] = result[fi-1]
return properties
def readpoint(self):
point = unpack('<idd', self.f_shp.read(4+8+8))
feature = {
"type": "Feature",
"geometry": {
"type": 'Point',
"coordinates": (point[1], point[2])
}
}
return feature
def readmultipoint(self):
# This function is not tested
content_head = unpack('<i 4d i', self.f_shp.read(40))
shape_type = content_head[0]
num_points = content_head[5]
points = unpack('<'+'d'*num_points*2, self.f_shp.read(8*2*num_points))
multipoints = [(points[i], points[i+1]) for i in range(0, len(points), 2)]
feature = {
"type": "Feature",
"geometry": {
"type": 'MultiPoint',
"coordinates": multipoints
}
}
return feature
def readpolygon(self):
content_head = unpack('<i 4d 2i', self.f_shp.read(44))
shape_type = content_head[0]
num_parts = content_head[5]
num_points = content_head[6]
parts = unpack('<'+'i'*num_parts, self.f_shp.read(4*num_parts))
points = unpack('<'+'d'*num_points*2, self.f_shp.read(8*2*num_points))
feature = {
"type": "Feature",
"geometry": {
"type": 'Polygon'
}
}
if num_parts == 1:
polygon = [[(points[i], points[i+1]) for i in range(0, len(points), 2)]]
feature['geometry']['coordinates'] = polygon
else:
directions = []
polygons = []
for j in range(num_parts):
start = parts[j]*2
if j != num_parts-1:
end = parts[j+1]*2
else:
end = len(points)
polygon = [(points[i], points[i+1]) for i in range(start, end, 2)]
polygons.append(polygon)
directions.append(clockwise(polygon))
if False in directions:
feature['geometry']['type'] = 'Polygon'
feature['geometry']['coordinates'] = polygons
else:
feature['geometry']['type'] = 'MultiPolygon'
multipolygon = []
for poly in polygons:
multipolygon.append([poly])
feature['geometry']['coordinates'] = multipolygon
return(feature)
def __len__(self):
return self.num_rec
def __iter__(self):
return self
def __next__(self):
if self.this_feature_num >= self.num_rec:
self.this_feature_num = 0
raise StopIteration
feature = self.__getitem__(self.this_feature_num)
self.this_feature_num += 1
return feature
def close(self):
self.f_shp.close()
self.f_shx.close()
self.f_dbf.close()
@property
def bounds(self):
return(self.xmin, self.ymin, self.xmax, self.ymax)
@property
def schema(self):
myschema = {}
myschema['geometry'] = self.shape_type
properties = []
for fi in range(1, len(self.fields)):
name = self.fields[fi][0]
f1 = self.fields[fi][1]
f2 = self.fields[fi][2]
dci = self.fields[fi][3]
if f1 == 'C':
fmt = 'str:' + str(f2)
elif f1 == 'F':
fmt = 'float:' + str(f2) + '.' + str(dci)
elif f1 == 'N':
if dci == 0:
fmt = 'int:' + str(f2)
else:
fmt = 'float:' + str(f2) + '.' + str(dci)
elif f1 == 'D':
fmt = 'datetime'
else:
fmt = 'other'
properties.append((name, fmt))
myschema['properties'] = properties
return myschema
if __name__ == '__main__':
fname = '/Users/xiao/lib/gisalgs/data/uscnty48area.shp'
fname = '/Users/xiao/lib/gisalgs/data/ne_110m_coastline.shp'
# fname = '/Users/xiao/lib/gisalgs/data/ne_110m_populated_places.shp'
# fname = '/Users/xiao/lib/gisalgs/data/ne_110m_admin_0_countries.shp'
shp = shapex(fname)
print('Number of fetures:', len(shp))
# this tests all the geometry types
types = {}
for f in shp:
t = f['geometry']['type']
if t[:5] != 'Multi':
if len(f['geometry']['coordinates']) > 1:
t = t + '_Parts'
if t not in types:
types[t] = 1
else:
types[t] += 1
print(types)
print('Shape type:', shp.shape_type)
print('Schema:\n', shp.schema)
print('Bunds:\n', shp.bounds)
shp.close()
```
#### File: bikeability_ConvNet/GIS_algorithm/lane_width.py
```python
def extract_first_three_lanes(polys):
"""
Extract the first three lanes
Input
polys: all lanes
Ouput
the first three lanes
"""
return polys[:3]
def calculate_max_width(poly):
"""
Calculate the maximum width of a polygon and
the cooresponding y coordinate of the vertex used to calculate the maximum width
Input
poly: a set of vertices of a polygon
Ouput
width_y: the y coordinate of the vertex used to calculate the maximum width
"""
width = 0
width_y = 0
for p0 in poly:
x0, y0 = p0[0], p0[1]
for i in range(len(poly)):
x1, y1 = poly[i-1][0], poly[i-1][1]
x2, y2 = poly[i][0], poly[i][1]
if y0 == y1 == y2:
if abs(x1 - x2) > width:
width = abs(x1 - x2)
width_y = y0
elif y0 != y1 != y2:
x = (y0 - y2)/(y1 - y2) * (x1 - x2) + x2
if x > x0 and x - x0 > width:
width = x - x0
width_y = y0
return width_y
def calculate_max_y(width_ys):
"""
Calculate the y coordinate of the baseline used for width comparisons
Input
width_ys: a collection of y coordinates used to calculate the maximum widths
Ouput
the y coordinate of the baseline
"""
return max(width_ys)
def calculate_compared_width(y_base, poly):
"""
Calculate the width of each polygon according to the baseline
Input
y_base: y coordinate of the base line
poly: a set of vertices of a polygon
Ouput
width: the width of a polygon
"""
width = 0
width_xs = []
for i in range(len(poly)):
x1, y1 = poly[i - 1][0], poly[i - 1][1]
x2, y2 = poly[i][0], poly[i][1]
if y_base == y1 == y2:
if abs(x1 - x2) > width:
width = abs(x1 - x2)
elif y_base != y1 != y2:
x = (y_base - y2) / (y1 - y2) * (x1 - x2) + x2
width_xs.append(x)
if max(width_xs) - min(width_xs) > width:
width = max(width_xs) - min(width_xs)
return width
def compare_widths(polys):
"""
Calculate the index of the polygon with the maximum width
Input
polys: a set of polygons
Ouput
index and the corresponding width
"""
# 1. Extract the first three lanes
polys = extract_first_three_lanes(polys)
# 2. Calculate the y coordinate of the compared baseline
width_ys = []
for poly in polys:
width_y = calculate_max_width(poly)
width_ys.append(width_y)
y_base = calculate_max_y(width_ys)
# 3. Compare widths
width = 0
i = 0
widthList = []
for poly in polys:
w = calculate_compared_width(y_base, poly)
widthList.append(w)
if w > width:
width = w
i = polys.index(poly)
widths = [w for w in widthList if widthList.index(w) != i]
indexes = [idx for idx in range(3) if idx != i]
polys_results = [poly for poly in polys if polys.index(poly) != i]
return indexes, polys_results, widths
polygon1 = [[0,0], [4,0], [3,9], [1.5,9], [0,0]]
polygon2 = [[1.5,0], [6,0], [8,8], [4,8], [1.5,0]]
polygon3 = [[4,0], [8,0], [5,7], [4,7], [4,0]]
polygon4 = [[1.5,0], [6,0], [8,8], [4,8], [1.5,0]]
polygons = [polygon1, polygon2, polygon3, polygon4]
print(polygons)
print(compare_widths(polygons))
``` |
{
"source": "jialinliu233/WordleMaster",
"score": 4
} |
#### File: jialinliu233/WordleMaster/compare_word.py
```python
def compare_word(truth, guess):
results = [0 for i in range(len(truth))]
for i, w in enumerate(truth):
if w == guess[i]:
results[i] = 2
else:
if w in guess:
results[guess.index(w)] = 1
return results
``` |
{
"source": "JialinMao/gym-ww",
"score": 3
} |
#### File: envs/algorithmic/reverse.py
```python
import numpy as np
from gym.envs.algorithmic import algorithmic_env
from gym.envs.algorithmic.algorithmic_env import ha
class ReverseEnv(algorithmic_env.AlgorithmicEnv):
def __init__(self, base=2):
algorithmic_env.AlgorithmicEnv.__init__(self,
inp_dim=1,
base=base,
chars=True)
algorithmic_env.AlgorithmicEnv.current_length = 1
self.last = 50
def set_data(self):
self.content = {}
self.target = {}
for i in range(self.total_len):
val = self.np_random.randint(self.base)
self.content[ha(np.array([i]))] = val
self.target[self.total_len - i - 1] = val
self.total_reward = self.total_len + 0.9
```
#### File: envs/doom/doom_corridor.py
```python
import logging
from gym.envs.doom import doom_env
logger = logging.getLogger(__name__)
class DoomCorridorEnv(doom_env.DoomEnv):
"""
------------ Training Mission 2 - Corridor ------------
This map is designed to improve your navigation. There is a vest
at the end of the corridor, with 6 enemies (3 groups of 2). Your goal
is to get to the vest as soon as possible, without being killed.
Allowed actions:
[0] - ATTACK - Shoot weapon - Values 0 or 1
[10] - MOVE_RIGHT - Move to the right - Values 0 or 1
[11] - MOVE_LEFT - Move to the left - Values 0 or 1
[13] - MOVE_FORWARD - Move forward - Values 0 or 1
[14] - TURN_RIGHT - Turn right - Values 0 or 1
[15] - TURN_LEFT - Turn left - Values 0 or 1
Note: see controls.md for details
Rewards:
+ dX - For getting closer to the vest
- dX - For getting further from the vest
-100 - Penalty for being killed
Goal: 1,000 points
Reach the vest (or at least get past the guards in the 3rd group)
Mode:
- env.mode can be 'fast', 'normal' or 'human' (e.g. env.mode = 'fast')
- 'fast' (default) will run as fast as possible (~75 fps) (best for simulation)
- 'normal' will run at roughly 35 fps (easier for human to watch)
- 'human' will let you play the game (keyboard only: Arrow Keys, '<', '>' and Ctrl)
Ends when:
- Player touches vest
- Player is dead
- Timeout (1 minutes - 2,100 frames)
Actions:
actions = [0] * 43
actions[0] = 0 # ATTACK
actions[10] = 1 # MOVE_RIGHT
actions[11] = 0 # MOVE_LEFT
actions[13] = 0 # MOVE_FORWARD
actions[14] = 0 # TURN_RIGHT
actions[15] = 0 # TURN_LEFT
-----------------------------------------------------
"""
def __init__(self):
super(DoomCorridorEnv, self).__init__(1)
```
#### File: envs/doom/doom_my_way_home.py
```python
import logging
from gym.envs.doom import doom_env
logger = logging.getLogger(__name__)
class DoomMyWayHomeEnv(doom_env.DoomEnv):
"""
------------ Training Mission 6 - My Way Home ------------
This map is designed to improve navigational skills. It is a series of
interconnected rooms and 1 corridor with a dead end. Each room
has a separate color. There is a green vest in one of the room.
The vest is always in the same room. Player must find the vest.
Allowed actions:
[13] - MOVE_FORWARD - Move forward - Values 0 or 1
[14] - TURN_RIGHT - Turn right - Values 0 or 1
[15] - TURN_LEFT - Turn left - Values 0 or 1
Note: see controls.md for details
Rewards:
+ 1 - Finding the vest
-0.0001 - 35 times per second - Find the vest quick!
Goal: 0.50 point
Find the vest
Mode:
- env.mode can be 'fast', 'normal' or 'human' (e.g. env.mode = 'fast')
- 'fast' (default) will run as fast as possible (~75 fps) (best for simulation)
- 'normal' will run at roughly 35 fps (easier for human to watch)
- 'human' will let you play the game (keyboard only: Arrow Keys, '<', '>' and Ctrl)
Ends when:
- Vest is found
- Timeout (1 minutes - 2,100 frames)
Actions:
actions = [0] * 43
actions[13] = 0 # MOVE_FORWARD
actions[14] = 1 # TURN_RIGHT
actions[15] = 0 # TURN_LEFT
-----------------------------------------------------
"""
def __init__(self):
super(DoomMyWayHomeEnv, self).__init__(5)
``` |
{
"source": "jialin-wu-02/aimde",
"score": 2
} |
#### File: app/commits/utils.py
```python
import os
from aimrecords import Storage
from app.projects.utils import get_project_branches, get_branch_commits
from app.db import db
from app.commits.models import Commit, Tag
from artifacts.artifact import Metric
def get_commits(metric, tag=None, experiments=None):
project_path = '/store'
project_branches = get_project_branches(project_path)
commit_storage_path = lambda b, c: os.path.join(b, c, 'objects')
# Filter by experiments
if experiments and isinstance(experiments, str):
experiments = filter(lambda e: e,
map(lambda e: e.strip(), experiments.split(',')))
project_branches = [e for e in experiments if e in project_branches]
# Get all commit objects
commit_objects = {}
for branch in project_branches:
branch_path = os.path.join(project_path, branch)
branch_commits = get_branch_commits(branch_path)
for c in branch_commits.values():
commit_objects[c['hash']] = {
'branch': branch,
'hash': c['hash'],
'date': c['date'],
'msg': c['message'],
}
# Filter by tag
commit_hashes_by_tag = set()
if tag is not None:
tags = Tag.query.filter(Tag.name.like('{}%'.format(tag))).all()
for t in tags:
for tag_commit in t.commits:
commit_hashes_by_tag.add(tag_commit.hash)
filtered_commits = {c_hash: commit_objects[c_hash]
for c_hash in commit_hashes_by_tag}
else:
filtered_commits = commit_objects
# Get commits data length
max_commit_len = 0
for commit_hash, commit in filtered_commits.items():
branch_path = os.path.join(project_path, commit['branch'])
storage_path = commit_storage_path(branch_path, commit['hash'])
records_storage = Storage(storage_path, 'r')
try:
records_storage.open(metric,
uncommitted_bucket_visible=True)
commit['num_steps'] = records_storage.get_records_num(metric)
records_storage.close()
except:
commit['num_steps'] = 0
if commit['num_steps'] > max_commit_len:
max_commit_len = commit['num_steps']
# Get commits data
scaled_steps_len = 50
if scaled_steps_len > max_commit_len:
scaled_steps_len = max_commit_len
if scaled_steps_len:
scaled_steps = slice(0, max_commit_len,
max_commit_len // scaled_steps_len)
else:
scaled_steps = slice(0, 0)
# Retrieve actual values from commits
for commit_hash, commit in filtered_commits.items():
branch_path = os.path.join(project_path, commit['branch'])
storage_path = commit_storage_path(branch_path, commit['hash'])
commit['data'] = []
records_storage = Storage(storage_path, 'r')
try:
records_storage.open(metric,
uncommitted_bucket_visible=True)
for r in records_storage.read_records(metric,
scaled_steps):
base, metric_record = Metric.deserialize(r)
commit['data'].append({
'value': metric_record.value,
'epoch': base.epoch,
'step': base.step,
})
records_storage.close()
except:
pass
# Remove empty commits
filtered_commits = {c_hash: filtered_commits[c_hash]
for c_hash in filtered_commits.keys()
if len(filtered_commits[c_hash]['data']) > 0}
# Get tags and colors
commit_models = db.session.query(Commit, Tag) \
.join(Tag, Commit.tags) \
.filter(Commit.hash.in_(filtered_commits.keys())).all()
for i in commit_models:
if len(i) <= 1 or not i[1].color:
continue
commit_model = i[0]
commit_tag = i[1]
for commit_hash, commit in filtered_commits.items():
if commit_hash == commit_model.hash:
commit['color'] = commit_tag.color
commit['tag'] = commit_tag.name
return filtered_commits
``` |
{
"source": "jialin-wu-02/aimrecords",
"score": 2
} |
#### File: aimrecords/artifact_storage/storage.py
```python
import os
from typing import Union, Tuple
from collections import Iterator
from aimrecords.record_storage.writer import Writer
from aimrecords.record_storage.reader import ReaderIterator
from aimrecords.artifact_storage.consts import (
STORAGE_DIR_NAME,
)
class Storage:
WRITING_MODE = 'w'
READING_MODE = 'r'
MODES = (
WRITING_MODE,
READING_MODE,
)
def __init__(self, root_path: str, mode: str):
self.root_path = root_path
self.storage_path = self._get_storage_path()
self._artifacts = {}
assert mode in self.MODES
self._mode = mode
if not os.path.isdir(self.storage_path):
os.makedirs(self.storage_path)
def __iter__(self):
return iter(self._artifacts)
def __del__(self):
self.close()
def open(self, artifact_name: str, *args, **kwargs):
assert artifact_name not in self._artifacts
artifact_path = self._get_artifact_path(artifact_name)
if self._mode == self.WRITING_MODE:
artifact_instance = Writer(artifact_path, *args, **kwargs)
elif self._mode == self.READING_MODE:
artifact_instance = ReaderIterator(artifact_path, *args, **kwargs)
self._artifacts.update({
artifact_name: artifact_instance,
})
def append_record(self, artifact_name: str, data: bytes) -> int:
assert self._mode == self.WRITING_MODE
artifact = self._get_artifact(artifact_name)
artifact.append_record(data)
return artifact.records_num
def flush(self, artifact_name: str = None):
assert self._mode == self.WRITING_MODE
if artifact_name:
artifact = self._get_artifact(artifact_name)
artifact.flush()
else:
for a in self._artifacts.values():
a.flush()
def read_records(self, artifact_name: str,
indices: Union[None, int, Tuple[int, ...], slice] = None
) -> Iterator:
assert self._mode == self.READING_MODE
artifact = self._get_artifact(artifact_name)
return artifact[indices]
def get_records_num(self, artifact_name: str) -> int:
artifact = self._get_artifact(artifact_name)
return artifact.get_records_num()
def get_modification_time(self, artifact_name: str) -> float:
assert self._mode == self.READING_MODE
artifact = self._get_artifact(artifact_name)
return artifact.get_modification_time()
def close(self, artifact_name: str = None):
if artifact_name:
artifact = self._get_artifact(artifact_name)
artifact.close()
del self._artifacts[artifact_name]
else:
while len(self._artifacts):
artifact_name = list(self._artifacts.keys())[0]
artifact_inst = self._artifacts[artifact_name]
artifact_inst.close()
del self._artifacts[artifact_name]
def _get_artifact(self, artifact_name: str
) -> Union[Writer, ReaderIterator]:
artifact = self._artifacts.get(artifact_name)
if artifact is None:
raise ValueError('artifact {} is not in ' +
'storage'.format(artifact_name))
return artifact
def _get_storage_path(self) -> str:
return os.path.join(self.root_path, STORAGE_DIR_NAME)
def _get_artifact_path(self, name: str) -> str:
return os.path.join(self.storage_path, name)
```
#### File: aimrecords/record_storage/utils.py
```python
import os
import json
def get_data_fname(path: str) -> str:
return os.path.join(path, 'data_chunk_00000.bin')
def get_record_offsets_fname(path: str) -> str:
return os.path.join(path, 'record_offsets.bin')
def get_bucket_offsets_fname(path: str) -> str:
return os.path.join(path, 'bucket_offsets.bin')
def get_metadata_fname(path: str) -> str:
return os.path.join(path, 'metadata.json')
def current_bucket_fname(path: str) -> str:
return os.path.join(path, 'current_bucket.bin')
def metadata_exists(path: str) -> bool:
return os.path.isfile(get_metadata_fname(path))
def write_metadata(path: str, metadata: dict):
with open(get_metadata_fname(path), 'w') as f_out:
json.dump(metadata, f_out)
def read_metadata(path: str) -> dict:
if metadata_exists(path):
with open(get_metadata_fname(path), 'r') as f_in:
return json.load(f_in)
return {}
def data_version_compatibility(prev_version: str, version: str):
assert 0 <= int(version) - int(prev_version) <= 1
```
#### File: tests/artifact_storage/basic_test.py
```python
import tempfile
from aimrecords import Storage
class TestMulitpleArtifacts(object):
def test_simple_int(self):
len = 1000
with tempfile.TemporaryDirectory() as temp_dir:
storage_writer = Storage(temp_dir, 'w')
storage_writer.open('loss')
for i in range(len):
storage_writer.append_record('loss', str(i).encode())
storage_writer.close('loss')
storage_writer.open('accuracy')
for i in range(len, 2*len):
storage_writer.append_record('accuracy', str(i).encode())
storage_writer.close('accuracy')
del storage_writer
storage_reader = Storage(temp_dir, 'r')
storage_reader.open('loss')
assert storage_reader.get_records_num('loss') == len
for i, record in enumerate(storage_reader.read_records('loss')):
assert i == int(record.decode())
storage_reader.close('loss')
storage_reader.open('accuracy')
assert storage_reader.get_records_num('accuracy') == len
for i, record in enumerate(storage_reader.read_records('accuracy')):
assert i + len == int(record.decode())
storage_reader.close('accuracy')
del storage_reader
```
#### File: tests/record_storage/basic_test.py
```python
import os
import tempfile
from aimrecords.record_storage.reader import Reader
from aimrecords.record_storage.writer import Writer
class TestBasicStuff(object):
def test_simple_int(self):
with tempfile.TemporaryDirectory() as temp_dir:
path = os.path.join(temp_dir, 'loss')
writer = Writer(path, compression=None)
length = 1000
for index in range(length):
writer.append_record(str(index).encode())
writer.close()
reader = Reader(path)
assert reader.get_records_num() == length
for index in range(length):
assert index == int(reader.get(index).decode())
def test_simple_binary(self):
with tempfile.TemporaryDirectory() as temp_dir:
path = os.path.join(temp_dir, 'loss')
writer = Writer(path, compression=None)
length = 5000
for index in range(length):
entry = (str(index) * index).encode()
writer.append_record(entry)
writer.close()
reader = Reader(path)
assert reader.get_records_num() == length
for index in range(length):
entry = (str(index) * index).encode()
assert entry == reader.get(index)
```
#### File: tests/record_storage/compression_test.py
```python
import os
import tempfile
from aimrecords.record_storage.reader import Reader
from aimrecords.record_storage.writer import Writer
class TestBucketCompression(object):
def test_gzip_compression(self):
with tempfile.TemporaryDirectory() as temp_dir:
path = os.path.join(temp_dir, 'loss')
writer = Writer(path, compression='gzip')
length = 1000
for i in range(length):
writer.append_record(str(i).encode())
writer.close()
reader = Reader(path)
assert reader.get_records_num() == length
for index in range(length):
assert index == int(reader.get(index).decode())
``` |
{
"source": "jialin-wu-02/skyportal",
"score": 2
} |
#### File: handlers/api/filter.py
```python
from marshmallow.exceptions import ValidationError
from baselayer.app.access import auth_or_token, permissions
from ..base import BaseHandler
from ...models import (
DBSession,
Filter,
)
class FilterHandler(BaseHandler):
@auth_or_token
def get(self, filter_id=None):
"""
---
single:
description: Retrieve a filter
parameters:
- in: path
name: filter_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: SingleFilter
400:
content:
application/json:
schema: Error
multiple:
description: Retrieve all filters
responses:
200:
content:
application/json:
schema: ArrayOfFilters
400:
content:
application/json:
schema: Error
"""
if filter_id is not None:
f = Filter.get_if_owned_by(filter_id, self.current_user)
if f is None:
return self.error("Invalid filter ID.")
return self.success(data=f)
filters = (
DBSession.query(Filter)
.filter(Filter.group_id.in_([g.id for g in self.current_user.groups]))
.all()
)
return self.success(data=filters)
@permissions(["Manage groups"])
def post(self):
"""
---
description: POST a new filter.
requestBody:
content:
application/json:
schema: FilterNoID
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
id:
type: integer
description: New filter ID
"""
data = self.get_json()
schema = Filter.__schema__()
try:
fil = schema.load(data)
except ValidationError as e:
return self.error(
"Invalid/missing parameters: " f"{e.normalized_messages()}"
)
DBSession.add(fil)
DBSession().commit()
return self.success(data={"id": fil.id})
@permissions(["Manage groups"])
def patch(self, filter_id):
"""
---
description: Update a filter
parameters:
- in: path
name: filter_id
required: True
schema:
type: integer
requestBody:
content:
application/json:
schema: FilterNoID
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
data = self.get_json()
data["id"] = filter_id
schema = Filter.__schema__()
try:
schema.load(data)
except ValidationError as e:
return self.error('Invalid/missing parameters: '
f'{e.normalized_messages()}')
DBSession().commit()
return self.success()
@permissions(["Manage groups"])
def delete(self, filter_id):
"""
---
description: Delete a filter
parameters:
- in: path
name: filter_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: Success
"""
DBSession.delete(Filter.query.get(filter_id))
DBSession().commit()
return self.success()
```
#### File: tests/api/test_groups.py
```python
import uuid
from skyportal.tests import api
from skyportal.model_util import create_token
def test_token_user_create_new_group(manage_groups_token, super_admin_user):
group_name = str(uuid.uuid4())
status, data = api(
'POST',
'groups',
data={'name': group_name,
'group_admins': [super_admin_user.username]},
token=manage_groups_token)
assert status == 200
assert data['status'] == 'success'
new_group_id = data['data']['id']
status, data = api('GET', f'groups/{new_group_id}',
token=manage_groups_token)
assert data['status'] == 'success'
assert data['data']['name'] == group_name
def test_token_user_request_all_groups(manage_groups_token, super_admin_user):
group_name = str(uuid.uuid4())
status, data = api(
'POST',
'groups',
data={'name': group_name,
'group_admins': [super_admin_user.username]},
token=manage_groups_token)
assert status == 200
assert data['status'] == 'success'
new_group_id = data['data']['id']
status, data = api('GET', 'groups',
token=manage_groups_token)
assert data['status'] == 'success'
assert data['data']['user_groups'][-1]['name'] == group_name
assert data['data']['all_groups'] is None
def test_token_user_update_group(manage_groups_token, public_group):
new_name = str(uuid.uuid4())
status, data = api(
'PUT',
f'groups/{public_group.id}',
data={'name': new_name},
token=manage_groups_token)
assert status == 200
assert data['status'] == 'success'
status, data = api('GET', f'groups/{public_group.id}',
token=manage_groups_token)
assert data['status'] == 'success'
assert data['data']['name'] == new_name
def test_token_user_delete_group(manage_groups_token, public_group):
status, data = api(
'DELETE',
f'groups/{public_group.id}',
token=manage_groups_token)
assert status == 200
assert data['status'] == 'success'
status, data = api('GET', f'groups/{public_group.id}',
token=manage_groups_token)
assert status == 400
def test_manage_groups_token_get_unowned_group(manage_groups_token, user,
super_admin_user):
group_name = str(uuid.uuid4())
status, data = api(
'POST',
'groups',
data={'name': group_name,
'group_admins': [user.username]},
token=manage_groups_token)
assert status == 200
assert data['status'] == 'success'
new_group_id = data['data']['id']
token_name = str(uuid.uuid4())
token_id = create_token(permissions=['Manage groups'],
created_by_id=super_admin_user.id,
name=token_name)
status, data = api('GET', f'groups/{new_group_id}',
token=token_id)
assert data['status'] == 'success'
assert data['data']['name'] == group_name
```
#### File: tests/api/test_photometry.py
```python
import os
import datetime
import base64
from skyportal.tests import api
from skyportal.models import Thumbnail, DBSession, Photometry
import numpy as np
import sncosmo
def test_token_user_post_get_photometry_data(upload_data_token, public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.,
'magsys': 'ab',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
assert data['data']['ra'] is None
assert data['data']['dec'] is None
assert data['data']['ra_unc'] is None
assert data['data']['dec_unc'] is None
np.testing.assert_allclose(data['data']['flux'],
12.24 * 10**(-0.4 * (25. - 23.9)))
def test_token_user_post_mag_photometry_data_and_convert(upload_data_token,
public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': 21.,
'magerr': 0.2,
'limiting_mag': 22.3,
'magsys': 'vega',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
ab = sncosmo.get_magsystem('ab')
vega = sncosmo.get_magsystem('vega')
correction = 2.5 * np.log10(vega.zpbandflux('ztfg') / ab.zpbandflux('ztfg'))
np.testing.assert_allclose(data['data']['flux'],
10**(-0.4 * (21. - correction - 23.9 )))
np.testing.assert_allclose(data['data']['fluxerr'],
0.2 / (2.5 / np.log(10)) * data['data']['flux'])
status, data = api(
'GET',
f'photometry/{photometry_id}',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['mag'],
21. - correction)
np.testing.assert_allclose(data['data']['magerr'],
0.2)
def test_token_user_post_and_get_different_systems_mag(upload_data_token,
public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': 21.,
'magerr': 0.2,
'limiting_mag': 22.3,
'magsys': 'vega',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=mag&magsys=vega',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
ab = sncosmo.get_magsystem('ab')
vega = sncosmo.get_magsystem('vega')
correction = 2.5 * np.log10(vega.zpbandflux('ztfg') / ab.zpbandflux('ztfg'))
np.testing.assert_allclose(data['data']['mag'], 21.)
np.testing.assert_allclose(data['data']['magerr'], 0.2)
np.testing.assert_allclose(data['data']['limiting_mag'], 22.3)
status, data = api(
'GET',
f'photometry/{photometry_id}?format=mag&magsys=ab',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['mag'], 21. - correction)
np.testing.assert_allclose(data['data']['magerr'], 0.2)
np.testing.assert_allclose(data['data']['limiting_mag'], 22.3 - correction)
def test_token_user_post_and_get_different_systems_flux(upload_data_token,
public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': 21.,
'magerr': 0.2,
'limiting_mag': 22.3,
'magsys': 'vega',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux&magsys=vega',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
ab = sncosmo.get_magsystem('ab')
vega = sncosmo.get_magsystem('vega')
correction = 2.5 * np.log10(vega.zpbandflux('ztfg') / ab.zpbandflux('ztfg'))
np.testing.assert_allclose(data['data']['flux'], 10**(-0.4 * (21 - correction - 23.9)))
np.testing.assert_allclose(data['data']['fluxerr'], 0.2 / (2.5 / np.log(10)) * data['data']['flux'])
np.testing.assert_allclose(data['data']['zp'], 23.9 + correction)
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux&magsys=ab',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['flux'], 10**(-0.4 * (21 - correction - 23.9)))
np.testing.assert_allclose(data['data']['fluxerr'], 0.2 / (2.5 / np.log(10)) * data['data']['flux'])
np.testing.assert_allclose(data['data']['zp'], 23.9)
def test_token_user_mixed_photometry_post(upload_data_token, public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': 21.,
'magerr': [0.2, 0.1],
'limiting_mag': 22.3,
'magsys': 'ab',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][1]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['flux'],
10**(-0.4 * (21. - 23.9 )))
np.testing.assert_allclose(data['data']['fluxerr'],
0.1 / (2.5 / np.log(10)) * data['data']['flux'])
# should fail as len(mag) != len(magerr)
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': [21.],
'magerr': [0.2, 0.1],
'limiting_mag': 22.3,
'magsys': 'ab',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 400
assert data['status'] == 'error'
def test_token_user_mixed_mag_none_photometry_post(upload_data_token, public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': None,
'magerr': [0.2, 0.1],
'limiting_mag': 22.3,
'magsys': 'ab',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 400
assert data['status'] == 'error'
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': [21.3, None],
'magerr': [0.2, 0.1],
'limiting_mag': 22.3,
'magsys': 'ab',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 400
assert data['status'] == 'error'
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': [21.3, None],
'magerr': [None, 0.1],
'limiting_mag': 22.3,
'magsys': 'ab',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 400
assert data['status'] == 'error'
def test_token_user_post_photometry_limits(upload_data_token, public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': None,
'magerr': None,
'limiting_mag': 22.3,
'magsys': 'ab',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
assert data['data']['flux'] == None
np.testing.assert_allclose(data['data']['fluxerr'],
10**(-0.4 * (22.3 - 23.9)) / 5)
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'flux': None,
'fluxerr': 0.031,
'zp': 25.,
'magsys': 'ab',
'filter': 'ztfg'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
assert data['data']['flux'] == None
np.testing.assert_allclose(data['data']['fluxerr'],
0.031 * 10**(-0.4 * (25. - 23.9)))
def test_token_user_post_invalid_filter(upload_data_token, public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'mag': None,
'magerr': None,
'limiting_mag': 22.3,
'magsys': 'ab',
'filter': 'bessellv'
},
token=upload_data_token)
assert status == 400
assert data['status'] == 'error'
def test_token_user_post_photometry_data_series(upload_data_token, public_source,
ztf_camera):
# valid request
status, data = api(
'POST',
'photometry',
data={'obj_id': str(public_source.id),
'mjd': [58000., 58001., 58002.],
'instrument_id': ztf_camera.id,
'flux': [12.24, 15.24, 12.24],
'fluxerr': [0.031, 0.029, 0.030],
'filter': ['ztfg', 'ztfg', 'ztfg'],
'zp': [25., 30., 21.2],
'magsys': ['ab', 'ab', 'ab'],
'ra': 264.1947917,
'dec': [50.5478333, 50.5478333 + 0.00001, 50.5478333],
'dec_unc': 0.2},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
assert len(data['data']['ids']) == 3
photometry_id = data['data']['ids'][1]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
assert np.allclose(data['data']['flux'],
15.24 * 10**(-0.4 * (30 - 23.9)))
assert np.allclose(data['data']['dec'],
50.5478333 + 0.00001)
assert np.allclose(data['data']['dec_unc'], 0.2)
assert data['data']['ra_unc'] is None
# invalid request
status, data = api(
'POST',
'photometry',
data=[{'obj_id': str(public_source.id),
'mjd': 58000,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'filter': 'ztfg',
'zp': 25.,
'magsys': 'ab'},
{'obj_id': str(public_source.id),
'mjd': 58001,
'instrument_id': ztf_camera.id,
'flux': 15.24,
'fluxerr': 0.031,
'filter': 'ztfg',
'zp': 30.,
'magsys': 'ab'},
{'obj_id': str(public_source.id),
'mjd': 58002,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'filter': 'ztfg',
'zp': 21.2,
'magsys': 'vega'}],
token=upload_data_token)
assert status == 400
assert data['status'] == 'error'
def test_post_photometry_no_access_token(view_only_token, public_source,
ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.,
'magsys': 'ab',
'filter': 'ztfg'
},
token=view_only_token)
assert status == 400
assert data['status'] == 'error'
def test_token_user_update_photometry(upload_data_token,
manage_sources_token,
public_source, ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.,
'magsys': 'ab',
'filter': 'ztfi'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['flux'],
12.24 * 10**(-0.4 * (25 - 23.9)))
status, data = api(
'PUT',
f'photometry/{photometry_id}',
data={'obj_id': str(public_source.id),
'flux': 11.0,
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'fluxerr': 0.031,
'zp': 25.,
'magsys': 'ab',
'filter': 'ztfi'},
token=manage_sources_token)
assert status == 200
assert data['status'] == 'success'
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
np.testing.assert_allclose(data['data']['flux'],
11.0 * 10**(-0.4 * (25 - 23.9)))
def test_delete_photometry_data(upload_data_token, manage_sources_token,
public_source, ztf_camera):
status, data = api('POST', 'photometry',
data={'obj_id': str(public_source.id),
'mjd': 58000.,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.,
'magsys': 'ab',
'filter': 'ztfi'
},
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['flux'],
12.24 * 10 ** (-0.4 * (25 - 23.9)))
status, data = api(
'DELETE',
f'photometry/{photometry_id}',
token=manage_sources_token)
assert status == 200
status, data = api(
'GET',
f'photometry/{photometry_id}?format=flux',
token=upload_data_token)
assert status == 400
def test_token_user_retrieving_source_photometry_and_convert(view_only_token, public_source):
status, data = api('GET', f'sources/{public_source.id}/photometry?format=flux&magsys=ab',
token=view_only_token)
assert status == 200
assert data['status'] == 'success'
assert isinstance(data['data'], list)
assert 'mjd' in data['data'][0]
assert 'ra_unc' in data['data'][0]
mag1_ab = -2.5 * np.log10(data['data'][0]['flux']) + data['data'][0]['zp']
magerr1_ab = 2.5 / np.log(10) * data['data'][0]['fluxerr']/ data['data'][0]['flux']
maglast_ab = -2.5 * np.log10(data['data'][-1]['flux']) + data['data'][-1]['zp']
magerrlast_ab = 2.5 / np.log(10) * data['data'][-1]['fluxerr']/ data['data'][-1]['flux']
status, data = api('GET', f'sources/{public_source.id}/photometry?format=mag&magsys=ab',
token=view_only_token)
assert status == 200
assert data['status'] == 'success'
assert np.allclose(mag1_ab, data['data'][0]['mag'])
assert np.allclose(magerr1_ab, data['data'][0]['magerr'])
assert np.allclose(maglast_ab, data['data'][-1]['mag'])
assert np.allclose(magerrlast_ab, data['data'][-1]['magerr'])
status, data = api('GET', f'sources/{public_source.id}/photometry?format=flux&magsys=vega',
token=view_only_token)
mag1_vega = -2.5 * np.log10(data['data'][0]['flux']) + data['data'][0]['zp']
magerr1_vega = 2.5 / np.log(10) * data['data'][0]['fluxerr']/ data['data'][0]['flux']
maglast_vega = -2.5 * np.log10(data['data'][-1]['flux']) + data['data'][-1]['zp']
magerrlast_vega = 2.5 / np.log(10) * data['data'][-1]['fluxerr']/ data['data'][-1]['flux']
assert status == 200
assert data['status'] == 'success'
ab = sncosmo.get_magsystem('ab')
vega = sncosmo.get_magsystem('vega')
vega_to_ab = {
filter: 2.5 * np.log10(ab.zpbandflux(filter) / vega.zpbandflux(filter))
for filter in ['ztfg', 'ztfr', 'ztfi']
}
assert np.allclose(mag1_ab, mag1_vega + vega_to_ab[data['data'][0]['filter']])
assert np.allclose(magerr1_ab, magerr1_vega)
assert np.allclose(maglast_ab, maglast_vega + vega_to_ab[data['data'][-1]['filter']])
assert np.allclose(magerrlast_ab, magerrlast_vega)
```
#### File: tests/frontend/test_scanning_page.py
```python
import uuid
import time
from skyportal.tests import api
def test_candidates_page_render(driver, user, public_candidate):
driver.get(f"/become_user/{user.id}")
driver.get("/candidates")
driver.wait_for_xpath(f'//a[text()="{public_candidate.id}"]')
def test_candidate_group_filtering(
driver,
user,
public_candidate,
public_filter,
public_group,
upload_data_token,
manage_groups_token,
):
candidate_id = str(uuid.uuid4())
for i in range(5):
status, data = api(
"POST",
"candidates",
data={
"id": f"{candidate_id}_{i}",
"ra": 234.22,
"dec": -22.33,
"redshift": 3,
"altdata": {"simbad": {"class": "RRLyr"}},
"transient": False,
"ra_dis": 2.3,
"filter_ids": [public_filter.id],
},
token=upload_data_token,
)
assert status == 200
assert data["data"]["id"] == f"{candidate_id}_{i}"
status, data = api(
"POST",
"groups",
data={"name": str(uuid.uuid4()), "group_admins": [user.username]},
token=manage_groups_token,
)
assert status == 200
driver.get(f"/become_user/{user.id}")
driver.get("/candidates")
for i in range(5):
driver.wait_for_xpath(f'//a[text()="{candidate_id}_{i}"]')
group_checkbox = driver.wait_for_xpath(
f'//input[starts-with(@name,"groupIDs[0]")]'
)
driver.scroll_to_element_and_click(group_checkbox)
submit_button = driver.wait_for_xpath('//span[text()="Submit"]')
driver.scroll_to_element_and_click(submit_button)
for i in range(5):
driver.wait_for_xpath_to_disappear(f'//a[text()="{candidate_id}_{i}"]')
driver.scroll_to_element_and_click(group_checkbox)
driver.scroll_to_element_and_click(submit_button)
for i in range(5):
driver.wait_for_xpath(f'//a[text()="{candidate_id}_{i}"]')
def test_candidate_unsaved_only_filtering(
driver,
user,
public_candidate,
public_filter,
public_group,
upload_data_token,
manage_groups_token,
):
candidate_id = str(uuid.uuid4())
for i in range(5):
status, data = api(
"POST",
"sources",
data={
"id": f"{candidate_id}_{i}",
"ra": 234.22,
"dec": -22.33,
"redshift": 3,
"altdata": {"simbad": {"class": "RRLyr"}},
"transient": False,
"ra_dis": 2.3,
"group_ids": [public_group.id],
},
token=upload_data_token,
)
assert status == 200
status, data = api(
"POST",
"candidates",
data={
"id": f"{candidate_id}_{i}",
"ra": 234.22,
"dec": -22.33,
"redshift": 3,
"altdata": {"simbad": {"class": "RRLyr"}},
"transient": False,
"ra_dis": 2.3,
"filter_ids": [public_filter.id],
},
token=upload_data_token,
)
assert status == 200
assert data["data"]["id"] == f"{candidate_id}_{i}"
status, data = api(
"POST",
"groups",
data={"name": str(uuid.uuid4()), "group_admins": [user.username]},
token=manage_groups_token,
)
assert status == 200
driver.get(f"/become_user/{user.id}")
driver.get("/candidates")
for i in range(5):
driver.wait_for_xpath(f'//a[text()="{candidate_id}_{i}"]')
unsaved_only_checkbox = driver.wait_for_xpath('//input[@name="unsavedOnly"]')
driver.scroll_to_element_and_click(unsaved_only_checkbox)
submit_button = driver.wait_for_xpath('//span[text()="Submit"]')
driver.scroll_to_element_and_click(submit_button)
for i in range(5):
driver.wait_for_xpath_to_disappear(f'//a[text()="{candidate_id}_{i}"]')
driver.scroll_to_element_and_click(unsaved_only_checkbox)
driver.scroll_to_element_and_click(submit_button)
for i in range(5):
driver.wait_for_xpath(f'//a[text()="{candidate_id}_{i}"]')
def test_candidate_date_filtering(
driver, user, public_candidate, public_filter, public_group, upload_data_token,
ztf_camera
):
candidate_id = str(uuid.uuid4())
for i in range(5):
status, data = api(
"POST",
"candidates",
data={
"id": f"{candidate_id}_{i}",
"ra": 234.22,
"dec": -22.33,
"redshift": 3,
"altdata": {"simbad": {"class": "RRLyr"}},
"transient": False,
"ra_dis": 2.3,
"filter_ids": [public_filter.id],
},
token=upload_data_token,
)
assert status == 200
assert data["data"]["id"] == f"{candidate_id}_{i}"
status, data = api(
"POST",
"photometry",
data={
"obj_id": f"{candidate_id}_{i}",
"mjd": 58000.,
"instrument_id": ztf_camera.id,
"flux": 12.24,
"fluxerr": 0.031,
"zp": 25.,
"magsys": "ab",
"filter": "ztfr",
},
token=upload_data_token,
)
assert status == 200
driver.get(f"/become_user/{user.id}")
driver.get("/candidates")
for i in range(5):
driver.wait_for_xpath(f'//a[text()="{candidate_id}_{i}"]')
start_date_input = driver.wait_for_xpath("//input[@name='startDate']")
start_date_input.clear()
start_date_input.send_keys("20001212")
end_date_input = driver.wait_for_xpath("//input[@name='endDate']")
end_date_input.clear()
end_date_input.send_keys("20011212")
time.sleep(0.1)
submit_button = driver.wait_for_xpath('//span[text()="Submit"]')
driver.scroll_to_element_and_click(submit_button)
for i in range(5):
driver.wait_for_xpath_to_disappear(f'//a[text()="{candidate_id}_{i}"]', 10)
end_date_input.clear()
end_date_input.send_keys("20901212")
time.sleep(0.1)
driver.scroll_to_element_and_click(submit_button)
for i in range(5):
driver.wait_for_xpath(f'//a[text()="{candidate_id}_{i}"]', 10)
def test_save_candidate(driver, group_admin_user, public_group, public_candidate):
driver.get(f"/become_user/{group_admin_user.id}")
driver.get("/candidates")
driver.wait_for_xpath(f'//a[text()="{public_candidate.id}"]')
first_save_button = driver.wait_for_xpath(
f'//button[@name="initialSaveCandidateButton{public_candidate.id}"]')
driver.scroll_to_element_and_click(first_save_button)
driver.wait_for_xpath("//input[@name='group_ids[0]']").click()
second_save_button = driver.wait_for_xpath(
f'//button[@name="finalSaveCandidateButton{public_candidate.id}"]')
second_save_button.click()
driver.wait_for_xpath_to_disappear('//span[text()="Save as source"]')
driver.wait_for_xpath('//a[text()="Previously Saved"]')
def test_save_candidate_no_groups_error_message(
driver, group_admin_user, public_group, public_candidate
):
driver.get(f"/become_user/{group_admin_user.id}")
driver.get("/candidates")
driver.wait_for_xpath(f'//a[text()="{public_candidate.id}"]')
first_save_button = driver.wait_for_xpath(
f'//button[@name="initialSaveCandidateButton{public_candidate.id}"]')
driver.scroll_to_element_and_click(first_save_button)
second_save_button = driver.wait_for_xpath(
f'//button[@name="finalSaveCandidateButton{public_candidate.id}"]')
second_save_button.click()
driver.wait_for_xpath('//div[contains(.,"Select at least one group")]')
``` |
{
"source": "jialinwu17/box_cls_reg",
"score": 3
} |
#### File: lib/roi_data_layer/psroi_layer.py
```python
import caffe
from fast_rcnn.config import cfg
from roi_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
import scipy.io as sio
class RoIDataLayer(caffe.Layer):
"""Fast R-CNN data layer used for training."""
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str)
shape = bottom[0].data.shape #1, C, H, W
self._tiles = layer_params['tiles']
self._axis = layer_params['axis']
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
data = bottom[0].data
data = data[:,,:,:]
data = np.transpose(data,self._order)
top[0].reshape(*(data.shape))
top[0].data[...] = data
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
diff = top[0].diff
diff = np.transpose(diff, self._order_back)
bottom[0].reshape((*diff.shape))
bottom[0].diff = diff
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
``` |
{
"source": "jialinwu17/caption_vqa",
"score": 2
} |
#### File: jialinwu17/caption_vqa/models.py
```python
import torch
import torch.nn as nn
from attention import Att_0, Att_1, Att_2, Att_3, Att_P, Att_PD, Att_3S
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier, PaperClassifier
from fc import FCNet, GTH
from caption_model import CaptionRNN
import torch.nn.functional as F
import utils
from torch.autograd import Variable
import shutil
import numpy as np
import matplotlib.pyplot as plt
# Dropout p: probability of an element to be zeroed. Default: 0.5
"""
Name: Model
Pre written
"""
class Model(nn.Module):
def __init__(self, w_emb, q_emb, v_att, q_net, v_net, classifier):
super(Model, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
def forward(self, v, b, q, labels):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q) # get word embeddings
q_emb = self.q_emb(w_emb) # run GRU on word embeddings [batch, q_dim]
att = self.v_att(v, q_emb) # [batch, 1, v_dim]
v_emb = (att * v).sum(1) # [batch, v_dim]
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
return logits
class Model_2(nn.Module):
def __init__(self, w_emb, q_emb, v_att_1, v_att_2, q_net, v_net, classifier,caption_w_emb, reference_caption_decoder, question_caption_decoder,caption_decoder,v2rc_net,v2qc_net):
super(Model_2, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att_1 = v_att_1
self.v_att_2 = v_att_2
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
self.reference_caption_decoder = reference_caption_decoder
self.question_caption_decoder = question_caption_decoder
self.caption_w_emb = caption_w_emb
self.caption_decoder = caption_decoder
self.v2rc_net = v2rc_net
self.v2qc_net = v2qc_net
def forward(self, v, b, q, labels, c):
"""Forward
v: [batch,5, num_objs, obj_dim]
b: [batch, 5,num_objs, b_dim]
q: [batch, 5, seq_length]
c: [batch, 5, 20 ]
return: logits, not probs
"""
batch = c.size(0)
q = q.view(batch * 5, -1)
c = c.view(batch * 5, -1)
v = v.view(batch * 5, 36, -1)
batch = c.size(0)
'''
v: [batch* 5, num_objs, obj_dim]
q: [batch* 5, seq_length]
c: [batch* 5, 20 ]
'''
#print c.shape, type(c)
w_emb = self.w_emb(q) # get word embeddings
q_emb = self.q_emb(w_emb) # run GRU on word embeddings [batch, q_dim]
# [batch* 5, num_hid]
#print c.shape, type(c)
att_1 = self.v_att_1(v, q_emb) # [batch* 5, 1, v_dim]
#print c.shape, type(c)
att_2 = self.v_att_2(v, q_emb) # [batch* 5, 1, v_dim]
att = att_1 + att_2
#print c.shape, type(c)
v_emb = (att * v).sum(1) # [batch, v_dim]
#print c.shape, type(c)
q_repr = self.q_net(q_emb) #[batch * 5 ,hid_dim]
v_repr = self.v_net(v_emb)#[batch *5, hid_dim]
#print c.shape, type(c)
# v_repr = v_repr.unsqueeze(1).repeat(1,5,1).view(batch*5,-1)
joint_repr = q_repr * v_repr #[batch *5,hid_dim ]
logits = self.classifier(joint_repr)
#print c.shape, type(c)
rc_w_emb = self.caption_w_emb(c)
qc_w_emb = self.caption_w_emb(c) # [batch * 5, 20 , hid_dim]
#print c.shape, type(c)
v_rc = self.v2rc_net(v)
v_qc = self.v2qc_net(joint_repr)
rc_emb = self.reference_caption_decoder(rc_w_emb, v_rc)
#[b,5,21,hid_dim]
qc_emb = self.question_caption_decoder(v_qc ,qc_w_emb)
#[b,5,21,hid_dim]
rc_repr = self.caption_decoder(rc_emb)
qc_repr = self.caption_decoder(qc_emb)
return logits, rc_repr, qc_repr
# all three returns are logits
class Model_4(nn.Module):
def __init__(self, w_emb, q_emb, v_att_1, v_att_2, q_net, v_net, classifier,caption_w_emb, reference_caption_decoder, question_caption_decoder,caption_decoder,v2rc_net, v2qc_net):
super(Model_4, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att_1 = v_att_1
self.v_att_2 = v_att_2
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
self.reference_caption_rnn = reference_caption_decoder
self.question_caption_rnn = question_caption_decoder
self.caption_w_emb = caption_w_emb
self.caption_decoder = caption_decoder
self.v2rc_net = v2rc_net
self.v2qc_net = v2qc_net
def forward(self, v, b, q, labels, c):
"""Forward
v: [batch,5, num_objs, obj_dim]
b: [batch, 5,num_objs, b_dim]
q: [batch, 5, seq_length]
c: [batch, 5, 20 ]
return: logits, not probs
"""
#print 'haha1'
batch = c.size(0)
q = q.view(batch * 5, -1)
c = c.view(batch * 5, -1)
v = v.view(batch * 5, 36, -1)
batch = c.size(0)
'''
v: [batch* 5, num_objs, obj_dim]
q: [batch* 5, seq_length]
c: [batch* 5, 20 ]
'''
#print c.shape, type(c)
w_emb = self.w_emb(q) # get word embeddings
q_emb = self.q_emb(w_emb) # run GRU on word embeddings [batch, q_dim]
# [batch* 5, num_hid]
#print c.shape, type(c)
att_1 = self.v_att_1(v, q_emb) # [batch* 5, 1, v_dim]
#print c.shape, type(c)
att_2 = self.v_att_2(v, q_emb) # [batch* 5, 1, v_dim]
att = att_1 + att_2
#print c.shape, type(c)
v_emb = (att * v).sum(1) # [batch, v_dim]
#print c.shape, type(c)
#print 'haha1'
q_repr = self.q_net(q_emb) #[batch * 5 ,hid_dim]
v_repr = self.v_net(v_emb)#[batch *5, hid_dim]
#print c.shape, type(c)
# v_repr = v_repr.unsqueeze(1).repeat(1,5,1).view(batch*5,-1)
joint_repr = q_repr * v_repr #[batch *5,hid_dim ]
logits = self.classifier(joint_repr)
#print c.shape, type(c)
rc_w_emb = self.caption_w_emb(c)
#print c.shape, type(c)
v_rc = self.v2rc_net(v.mean(1))
v_qc = self.v2qc_net(joint_repr)
rc_emb = self.reference_caption_rnn( v_rc,rc_w_emb)
#[b,5,21,hid_dim]
rc_repr = self.caption_decoder(rc_emb)
#qc_repr = self.caption_decoder(qc_emb)
pred_ans = F.sigmoid(logits).contiguous()
pred_rc = F.sigmoid(rc_repr).contiguous()
#print 'haha2'
batch = batch / 5
caption_from_ans = pred_rc[:, : , : 3129 ]
# [b*5, 20, 3129]
caption_from_ans = caption_from_ans.contiguous().view(batch, 1 ,5, 20, -1).repeat(1,5,1,1,1)
# [batch ,5, 5, 20, caption set ]
#print 'haha3'
similarities_ = (caption_from_ans * (pred_ans.view(batch, 5,1,1,-1).repeat(1, 1, 5, 20, 1))).sum(4)
# [batch, 5, 5, 20] [i,j] i th answer with j th caption
similarities, _ = similarities_.max(3)
# [batch ,5, 5]
_, indices = similarities.max(2)
# [batch, 5 ]
indices = indices.view(-1,1 )
#[batch, 5]
#print 'haha3.5'
target_qc_mask = torch.zeros(batch*5, 5)
#print target_qc_mask.shape, indices.data.shape
target_qc_mask.scatter_(1, indices.data.type(torch.LongTensor), 1)
#print 'haha5'
#target_qc_mask = Variable(target_qc_mask.view(batch, 5, 5, 1).repeat(1,1,1,20), volatile=True).cuda()
target_qc_mask = Variable(target_qc_mask.view(batch, 5, 5, 1).repeat(1,1,1,20).type(torch.LongTensor)).cuda()
# [b, 5, 5, 20]
#print 'haha6'
target_qc = c.view(batch,1,5,20).repeat(1,5,1,1)
# [b,5,5, 20]
#print 'haha7'
target_qc = target_qc * target_qc_mask
#print 'haha8'
target_qc = target_qc.sum(2).view(-1, 20)
# []
#print 'haha9'
qc_w_emb = self.caption_w_emb(target_qc) # [batch * 5, 20 , hid_dim]
#print 'haha10'
qc_emb = self.question_caption_rnn(v_qc ,qc_w_emb)
#print 'haha11'
qc_repr = self.caption_decoder(qc_emb)
#print 'haha12'
pred_qc = F.sigmoid(qc_repr).contiguous()
return logits, pred_rc, pred_qc, target_qc
# all three returns are logits
class Model_3(nn.Module):
def __init__(self, w_emb, q_emb, v_att_1, v_att_2, v_att_3, q_net, v_net, classifier):
super(Model_3, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att_1 = v_att_1
self.v_att_2 = v_att_2
self.v_att_3 = v_att_3
self.q_net = q_net
self.v_net = v_net
self.classifier = classifier
def forward(self, v, b, q, labels):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q) # get word embeddings
q_emb = self.q_emb(w_emb) # run GRU on word embeddings [batch, q_dim]
att_1 = self.v_att_1(v, q_emb) # [batch, 1, v_dim]
att_2 = self.v_att_2(v, q_emb) # [batch, 1, v_dim]
att_3 = self.v_att_3(v, q_emb) # [batch, 1, v_dim]
att = att_1 + att_2 + att_3
v_emb = (att * v).sum(1) # [batch, v_dim]
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classifier(joint_repr)
return logits
# Attn: 1 layer attention, output layer, softmax
def build_baseline(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_0(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
# Attn: 2 layer attention, output layer, softmax
def build_model_A1(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_1(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
# Attn: 1 layer seperate, element-wise *, output layer, softmax
def build_model_A2(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_2(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
# Attn: 1 layer seperate, element-wise *, 1 layer, output layer, softmax
def build_model_A3(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_3(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
# Attn: 1 layer seperate, element-wise *, 1 layer, output layer, sigmoid
def build_model_A3S(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_3S(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
# 2*Attn: 1 layer seperate, element-wise *, 1 layer, output layer, softmax
# our adopted model
# (self, in_dim, num_hid, v_dim, nlayers, bidirect, dropout, rnn_type='LSTM'):
# (self, embed_size, hidden_size, vocab_size, num_layers):
def build_model_A3x2(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att_1 = Att_3(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
v_att_2 = Att_3(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
# num_hid = 1280 , dataset.v_dim = 2048
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
v2rc_net = FCNet([dataset.v_dim, 300 ], dropout= dropL, norm= norm, act= activation)
v2qc_net = FCNet([num_hid, 300], dropout= dropL, norm= norm, act= activation)
caption_w_emb = WordEmbedding(dataset.caption_dictionary.ntoken, emb_dim=300, dropout=dropW)
reference_caption_decoder = CaptionRNN(300, 512, num_layers = 1 )
question_caption_decoder = CaptionRNN(300, 512, num_layers = 1 )
caption_decoder = SimpleClassifier( in_dim=512, hid_dim=2 * num_hid, out_dim= dataset.caption_dictionary.ntoken, dropout=dropC, norm= norm, act= activation)
return Model_4(w_emb, q_emb, v_att_1, v_att_2, q_net, v_net, classifier,caption_w_emb, reference_caption_decoder, question_caption_decoder, caption_decoder,v2rc_net, v2qc_net)
# 2*Attn: 1 layer seperate, element-wise *, output layer, softmax
def build_model_A2x2(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att_1 = Att_2(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
v_att_2 = Att_2(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model_2(w_emb, q_emb, v_att_1, v_att_2, q_net, v_net, classifier)
def build_model_A23P(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att_1 = Att_2(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
v_att_2 = Att_3(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
v_att_3 = Att_P(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model_3(w_emb, q_emb, v_att_1, v_att_2, v_att_3, q_net, v_net, classifier)
# 3*Attn: 1 layer seperate, element-wise *, 1 layer, output layer, softmax
def build_model_A3x3(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att_1 = Att_3(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
v_att_2 = Att_3(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
v_att_3 = Att_3(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model_3(w_emb, q_emb, v_att_1, v_att_2, v_att_3, q_net, v_net, classifier)
# 3*Attn: 1 layer seperate, element-wise *, output layer, softmax
def build_model_A2x3(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att_1 = Att_2(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
v_att_2 = Att_2(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
v_att_3 = Att_2(v_dim=dataset.v_dim, q_dim=q_emb.num_hid, num_hid=num_hid, dropout=dropout, norm=norm,
act=activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model_3(w_emb, q_emb, v_att_1, v_att_2, v_att_3, q_net, v_net, classifier)
# Attn: 1 layer seperate, element-wise *, output layer
def build_model_AP(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_P(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
# 2*Attn: 1 layer seperate, element-wise *, output layer
def build_model_APx2(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att_1 = Att_P(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
v_att_2 = Att_P(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model_2(w_emb, q_emb, v_att_1, v_att_2, q_net, v_net, classifier)
# Attn: 2 layer seperate, element-wise *, output layer
def build_model_APD(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_PD(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = SimpleClassifier(
in_dim=num_hid, hid_dim=2 * num_hid, out_dim=dataset.num_ans_candidates, dropout=dropC, norm= norm, act= activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
def build_model_AP_PC(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_P(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = FCNet([q_emb.num_hid, num_hid], dropout= dropL, norm= norm, act= activation)
v_net = FCNet([dataset.v_dim, num_hid], dropout= dropL, norm= norm, act= activation)
classifier = PaperClassifier(
in_dim=num_hid, hid_dim_1=300, hid_dim_2=2048, out_dim=dataset.num_ans_candidates, dropout=dropC, norm=norm,
act=activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
def build_model_P_exact(dataset, num_hid, dropout, norm, activation):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=0.0)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=0, rnn_type='GRU')
v_att = Att_P(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = GTH(q_emb.num_hid, num_hid, dropout=0, norm=norm, act=activation)
v_net = GTH(dataset.v_dim, num_hid, dropout=0, norm=norm, act=activation)
classifier = PaperClassifier(
in_dim=num_hid, hid_dim_1= 300, hid_dim_2= 2048, out_dim=dataset.num_ans_candidates, dropout=0, norm=norm, act=activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
def build_model_P_mod(dataset, num_hid, dropout, norm, activation, dropL , dropG, dropW, dropC):
w_emb = WordEmbedding(dataset.dictionary.ntoken, emb_dim=300, dropout=dropW)
q_emb = QuestionEmbedding(in_dim=300, num_hid=num_hid, nlayers=1, bidirect=False, dropout=dropG, rnn_type='GRU')
v_att = Att_P(v_dim= dataset.v_dim, q_dim= q_emb.num_hid, num_hid= num_hid, dropout= dropout, norm= norm, act= activation)
q_net = GTH(q_emb.num_hid, num_hid, dropout=dropL, norm=norm, act=activation)
v_net = GTH(dataset.v_dim, num_hid, dropout=dropL, norm=norm, act=activation)
classifier = PaperClassifier(
in_dim=num_hid, hid_dim_1= 300, hid_dim_2= 2048, out_dim=dataset.num_ans_candidates, dropout=dropC, norm=norm, act=activation)
return Model(w_emb, q_emb, v_att, q_net, v_net, classifier)
```
#### File: jialinwu17/caption_vqa/test.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import pdb
import time
import json
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from base_model import Model
from loader import Data_loader
def test(args):
# Some preparation
torch.manual_seed(1000)
if torch.cuda.is_available():
torch.cuda.manual_seed(1000)
else:
raise SystemExit('No CUDA available, don\'t do this.')
print ('Loading data')
loader = Data_loader(args.bsize, args.emb, args.multilabel, train=False)
print ('Parameters:\n\tvocab size: %d\n\tembedding dim: %d\n\tK: %d\n\tfeature dim: %d\
\n\thidden dim: %d\n\toutput dim: %d' % (loader.q_words, args.emb, loader.K, loader.feat_dim,
args.hid, loader.n_answers))
model = Model(vocab_size=loader.q_words,
emb_dim=args.emb,
K=loader.K,
feat_dim=loader.feat_dim,
hid_dim=args.hid,
out_dim=loader.n_answers,
pretrained_wemb=loader.pretrained_wemb)
model = model.cuda()
if args.modelpath and os.path.isfile(args.modelpath):
print ('Resuming from checkpoint %s' % (args.modelpath))
ckpt = torch.load(args.modelpath)
model.load_state_dict(ckpt['state_dict'])
else:
raise SystemExit('Need to provide model path.')
result = []
for step in xrange(loader.n_batches):
# Batch preparation
q_batch, a_batch, i_batch = loader.next_batch()
q_batch = Variable(torch.from_numpy(q_batch))
i_batch = Variable(torch.from_numpy(i_batch))
q_batch, i_batch = q_batch.cuda(), i_batch.cuda()
# Do one model forward and optimize
output = model(q_batch, i_batch)
_, ix = output.data.max(1)
for i, qid in enumerate(a_batch):
result.append({
'question_id': qid,
'answer': loader.a_itow[ix[i]]
})
json.dump(result, open('result.json', 'w'))
print ('Validation done')
``` |
{
"source": "JialinX/404Lab2",
"score": 3
} |
#### File: JialinX/404Lab2/client.py
```python
import socket, sys
def main():
try:
#define address info, payload, and buffer size
host = 'www.google.com'
port = 80
payload = f'GET / HTTP/1.0\r\nHost: {host}\r\n\r\n'
buffer_size = 4096
#make the socket, get the ip, and connect
#create a tcp socket
print('Creating socket')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket created successfully')
print(f'Getting IP for {host}')
remote_ip = socket.gethostbyname(host)
print (f'Ip address of {host} is {remote_ip}')
s.connect((remote_ip , port))
print (f'Socket Connected to {host} on ip {remote_ip}')
#send the data and shutdown
print("Sending payload")
s.sendall(payload.encode())
print("Payload sent successfully")
s.shutdown(socket.SHUT_WR)
#continue accepting data until no more left
full_data = b""
while True:
data = s.recv(buffer_size)
if not data:
break
full_data += data
print(full_data)
except Exception as e:
print(e)
finally:
s.close()
if __name__ == "__main__":
main()
``` |
{
"source": "JialinX/cgi-lab",
"score": 2
} |
#### File: JialinX/cgi-lab/secret.py
```python
import cgi
import cgitb
cgitb.enable()
class FollowingTheTAsInstructionsError(Exception):
def __init__(self):
Exception.__init__(self, (
"You must edit secret.py to change the username, password, "
"and to delete this error!"
))
# Delete this line:
# raise FollowingTheTAsInstructionsError
# Edit the following two lines:
username = "jialin"
password = "<PASSWORD>"
``` |
{
"source": "jialinyi94/matching-bandit",
"score": 3
} |
#### File: matching_bandit/agents/simple_adaptive_matching.py
```python
import numpy as np
import gym
from gym import logger
import matching_bandit
import argparse
class MDC(object):
def __init__(self, n_pairs=3):
self.name = 'MDC'
self.n_pairs = n_pairs
self.n_items = n_pairs*2
self.X = np.zeros(shape=(self.n_items, self.n_items))
self.C = np.zeros(shape=(self.n_items, self.n_items))
self.X_tilda = np.zeros(shape=(self.n_items, self.n_items))
self.C_tilda = np.zeros(shape=(self.n_items, self.n_items))
self.tournament = Tournament()
self.tournament.head = Node(list(range(self.n_items)))
def reset(self):
n_pairs = self.n_pairs
self.__init__(n_pairs=n_pairs)
def run(self, env, horizon=10000, animated=True):
logger.info('Algorithm: '+self.name)
for t in range(horizon):
action = self.tournament.sample_matching()
# observe rewards
obs, _, _, _ = env.step(action)
feedback = obs['feedback']
for k in range(self.n_pairs):
i = action[2*k]
j = action[2*k+1]
self.X_tilda[i,j] += feedback[k]
self.X_tilda[j,i] += feedback[k]
self.C_tilda[i,j] += 1
self.C_tilda[j,i] += 1
tmp = self.tournament.head
while tmp is not None:
s = tmp.cluster
for i in s:
s_ = s.copy()
s_.remove(i)
counter = sum(self.C_tilda[i, s_])
if counter == len(s) - 1:
self.X[s,:] += self.X_tilda[s,:]
self.C[s,:] += self.C_tilda[s,:]
self.X_tilda[s,:] = 0
self.C_tilda[s,:] = 0
break
tmp = tmp.next
# make sure at least every item has been paired with all other pairs.
if t >= self.n_items - 1:
UCB, LCB = self.tournament.confidence_intervals(self.X, self.C, horizon)
self.tournament.split(UCB, LCB)
# log regret
if (t+1) % env.time_series_frequency == 0:
env.log_regret()
# rendering
if animated: env.render()
class Node:
def __init__(self, cluster):
self.cluster = cluster
n_items = len(cluster)
n_pairs = n_items // 2
self.group = np.reshape(cluster, (2, n_pairs)).tolist()
self.next = None
class LinkedList:
def __init__(self):
self.head = None
class Tournament(LinkedList):
def sample_matching(self):
matching = []
tmp = self.head
while tmp is not None:
group = tmp.group
# extract the matching
matching += group2matching(group)
# update to the next schedule
tmp.group = round_robin_next(group)
tmp = tmp.next
return matching
def confidence_intervals(self, X, C, T):
n_items = X.shape[0]
UCB = np.zeros(shape=(n_items,))
LCB = np.zeros(shape=(n_items,))
for i in range(n_items):
lower_items = self.get_lower_items(i)
k_l = sum(C[i,lower_items])
d = sum(X[i,lower_items])
LCB[i] = d/k_l - np.sqrt(np.log(T)/k_l)
UCB[i] = d/k_l + np.sqrt(np.log(T)/k_l)
return UCB, LCB
def get_lower_items(self, idx):
lower_items = []
tmp = self.head
while tmp is not None:
cluster = tmp.cluster
lower_items += cluster
if idx in cluster:
return lower_items
tmp = tmp.next
def split(self, UCB, LCB):
tmp = self.head
if tmp is not None:
# if the headnode needs to be split
TorF, high, low = is_split(tmp.cluster, UCB, LCB)
if TorF == True:
high_node = Node(high)
low_node = Node(low)
high_node.next = tmp.next
low_node.next = high_node
self.head = low_node
removed = tmp
tmp = tmp.next
removed.next = None
prev = high_node
else:
prev = tmp
tmp = tmp.next
# traverse the nodes left
while tmp is not None:
TorF, high, low = is_split(tmp.cluster, UCB, LCB)
if TorF == True:
high_node = Node(high)
low_node = Node(low)
high_node.next = tmp.next
low_node.next = high_node
prev.next = low_node
prev = high_node
tmp = tmp.next
else:
prev = tmp
tmp = tmp.next
def __str__(self):
info = ''
tmp = self.head
while tmp is not None:
info += tmp.cluster.__str__()
tmp = tmp.next
return info
def round_robin_next(group):
'''
Implementation of the round-robin tournament transition:
Circle scheduling
'''
# rotate entries in group clockwise
# with the group[0,0] fixed
group[1] = np.flip(group[1])
group_flatten = list(np.concatenate(group).flat)
n_items = len(group_flatten)
if n_items > 2:
new_group_flatten = [0]*n_items
new_group_flatten[1:] = group_flatten[:-1]
new_group_flatten[0] = group_flatten[-1]
n_pairs = n_items // 2
new_group = np.reshape(new_group_flatten, (2,n_pairs))
new_group[1] = np.flip(new_group[1])
tmp = new_group[0,0]
new_group[0,0] = new_group[0,1]
new_group[0,1] = tmp
new_group = new_group.tolist()
else:
new_group = group
return new_group
def is_split(cluster, UCB, LCB):
l = len(cluster)
if l > 2:
# sort items in cluster by UCB
cluster = sorted(cluster, key=lambda i: UCB[i], reverse=True)
for i in range(2, l, 2):
item_i = cluster[i]
item_im1 = cluster[i-1]
if UCB[item_i] < LCB[item_im1]:
# larger items put lower in the chain
lower = cluster[:i]
higher = cluster[i:]
return True, higher, lower
return False, None, None
def matching2group(matching):
n_items = len(matching)
n_pairs = n_items // 2
group = np.reshape([0]*n_items, (2,n_pairs))
for k in range(n_pairs):
group[0,k] = matching[2*k]
group[1,k] = matching[2*k+1]
return group.tolist()
def group2matching(group):
n_pairs = len(group[0])
n_items = n_pairs * 2
matching = [0]*n_items
for k in range(n_pairs):
matching[2*k] = group[0][k]
matching[2*k+1] = group[1][k]
return matching
if __name__ == '__main__':
'''
The following code is to replicate the origin_equal_distance problem instance
'''
from matching_bandit.utils.p_dist import origin_equal_distance
parser = argparse.ArgumentParser()
parser.add_argument('--n_pairs', type=int, default=11)
parser.add_argument('--Delta', type=float, default=0.1)
parser.add_argument('--horizon', type=int, default=200000)
args = parser.parse_args()
logger.set_level(logger.INFO)
# Set up the agent
agent = MDC(n_pairs=args.n_pairs)
# Set up the environment
env = gym.make(
'MatchingSelectionBandit-v0',
n_pairs = args.n_pairs,
time_series_frequency = args.horizon // 10
)
p = origin_equal_distance(args.n_pairs, args.Delta)
env.reset(agent.name, item_dist=p)
# Simulation loop
agent.run(
env=env,
horizon=args.horizon,
animated=True
)
# Render the final state and keep the plot window open
env.render(freeze=True)
env.close()
``` |
{
"source": "jialiuGit/Bayes_GGP",
"score": 3
} |
#### File: jialiuGit/Bayes_GGP/main.py
```python
import numpy as np
from PN_Bayes import *
import pickle
import time
import argparse
from scipy.stats import norm as normal
import pdb
#run python
#for |\bm xi| = 5
#python3 main.py large 8 20000 5000 --Initials_xi 30 3 0.5 6 15 1.0 --Initials_g -18 0 0 --directory results --rstname tk03_test_large
#or |\bm xi| = 6
#python3 main.py large 8 20000 5000 --Initials_xi 30 3 0.5 6 15 1.0 3.0 --Initials_g -18 0 0 --beta_model rv --directory results --rstname tk03_test_large_betarv
#---data_name ----name of user's data, we need data--- unit (spherical) directional data in cartesian coordinate, and lat --- latitude to run the code
#---degree------- degree in SH, l, we use a (default) value is 8, here is an input
#---nMCMC ------- number of MCMC runs
#---burnin--------number of burnin in MCMC
#---Intials_xi----initial values sig10_2, sig11_2,sig20_2,sig21_2, alpha, beta
#---Intials_g---- initial values of means of g_1^0 , g_2^0, g_3^0,...,g_l^0
#---mu_ast--------parameter mu star in the Mirror random walk proposal
#---beta_model----if beta is treated as a fixed value, |xi| = 5, otherwise |xi| - 6
#---fileformat----file format when saving the results, default is '.pkl'
#---GGPmodel-----the exsiting GGPmodel, default is tk03
#---directory----directory where is to save the results
#---rstname------name of the results
#---method ------ we provide three different solutions to update field variation, 'ab_joint'---update xi where alpha and beta are together;
#'ab_ind' --update xi where alpha and beta is independently, 'x2' ---update xi2 .
parser = argparse.ArgumentParser(description='main script to run on server')
parser.add_argument('data_name', metavar='data_name', nargs='+',help='name of data') #
parser.add_argument('degree', metavar='degree', type=int, nargs='+',help='degree in SH')
parser.add_argument('nMCMC', metavar='nMCMC', type=int, nargs='+',help='number of MCMC iterations')
parser.add_argument('burnin', metavar='burnin', type=int, nargs='+',help='number of burnin in MCMC')
parser.add_argument('--Initials_xi', metavar='Initials_xi', type=float, nargs='+',help='initial values of variation parameter')
parser.add_argument('--Initials_g', metavar='Initials_g', type=float, nargs=3, help='initial values of field parameter')
parser.add_argument('--mu_ast', metavar='mu_ast', type=float, nargs='+',help='mu of RW mirror proposal')
parser.add_argument('--beta_model', metavar='beta_model', nargs='+',help='model of beta (fixed/r.v.)')
parser.add_argument('--fileformat', metavar='fileformat', nargs='+',help='figure fileformat (pkl)')
parser.add_argument('--GGPmodel', metavar='GGPmodel', nargs='+',help='name of GGP model')
parser.add_argument('--directory', metavar='directory', nargs='+',help='Subdirectory for output-files')
parser.add_argument('--rstname', metavar='rstname', nargs='+',help='result name in pkl-file')
parser.add_argument('--method', metavar='method', nargs='+',help='simulation method') #
args = parser.parse_args()
data_name =str(args.data_name[0])
degree = np.int(args.degree[0])
nMCMC = np.int(args.nMCMC[0])
burnin = np.int(args.burnin[0])
Initials_xi =np.float32(args.Initials_xi)
sig10_2, sig11_2, sig20_2, sig21_2, alpha, beta = Initials_xi
sig22_2 = sig20_2
Initials_g =np.float32(args.Initials_g)
G = Initials_g
if args.data_name is None:
data_name = 'large'
else:
data_name=str(args.data_name[0])
if args.beta_model is None:
beta_model="fixed"
else:
beta_model=str(args.beta_model[0])
if args.mu_ast is None: #run the code get mu_ast from burnin or some good choices such as tk03
if beta_model == 'fixed':
mu_ast = 2*np.array([6.4, 1.7, 0.6, 2.2, 7.5])
else:
mu_ast = 2*np.array([6.4, 1.7, 0.6, 2.2, 7.5, 7.5*3.8])
else:
mu_ast=2*np.float32(args.mu_ast)
#pdb.set_trace()
if args.fileformat is None:
fileformat="pkl"
else:
fileformat=str(args.fileformat[0])
if args.GGPmodel is None:
GGPmodel="tk03"
else:
GGPmodel=str(args.GGPmodel[0])
if args.method is None:
method="ab_joint"
else:
method=str(args.method[0])
#load your data: we need 'data = directional data' and 'lat = latitude' to run the code!
if data_name == 'large':
with open('simu_large_data.pkl', 'rb') as f:
tk03_data, tk03_real_inten, tk03_lat, CJ98_data, CJ98_real_inten, CJ98_lat = pickle.load(f)
if GGPmodel == 'tk03':
data = tk03_data
elif GGPmodel == 'CJ98':
data = CJ98_data
lat = tk03_lat
#pdb.set_trace()
if beta_model == 'fixed':
cof_K = cof_K1(degree, lat, beta)
else:
cof_K = cof_K2(degree, lat)
directory = str(args.directory[0])
rstname = str(args.rstname[0])
def update_Sigma_AM_onestp(Y, mu, lat, degree, cof_K, sig10_2, sig11_2,sig20_2,sig21_2, alpha,beta, Sigma0, sd, mu_ast):
#update alpha and beta together, alpha * beta is random variables #default method
uvec = Y - mu
cov_len =cof_K.shape[0]
if cov_len ==5:
x1 = np.hstack((np.array(np.sqrt([sig10_2, sig11_2,sig20_2,sig21_2]) , dtype=float) ,alpha))
x0 = np.hstack((np.array(np.sqrt([sig10_2, sig11_2,sig20_2,sig21_2]), dtype=float) ,alpha))
else:
x1 = np.hstack((np.array(np.sqrt([sig10_2, sig11_2,sig20_2,sig21_2]), dtype=float) ,alpha, alpha*beta))
x0 = np.hstack((np.array(np.sqrt([sig10_2, sig11_2,sig20_2,sig21_2]), dtype=float) ,alpha, alpha*beta))
for j in range(cov_len):
sampXI = list(x0) #will not change x0
go_on = 1
while go_on:
xi_j = np.random.normal(mu_ast[j] - x1[j], sd[j])
if xi_j > 0.0: #positive constraints
go_on = 0
sampXI[j] = xi_j
#print(xi_j,j)
#pdb.set_trace()
logMHrat = log_lik_diff(uvec, cof_K, np.array(sampXI)**2, x0**2 )
MHrat = np.exp(logMHrat)
vrat = np.random.uniform(0,1)
if (MHrat > vrat):
x0[j] = xi_j
if cov_len == 5:
dist = np.abs(np.sum(x0 - x1))
else:
#pdb.set_trace()
dist = np.abs(np.sum(x0[0:-1] - x1[0:-1]))
if (dist) > 1e-5:
if cov_len ==6:
sig10_2, sig11_2, sig20_2, sig21_2 =x0[0:4]**2
beta = x0[-1]/x0[-2]
else:
sig10_2, sig11_2, sig20_2, sig21_2 = x0[0:4]**2
alpha = x0[4]
sig22_2 = sig20_2
loglik = log_post_fun(uvec, cof_K, x0**2)
lat0 = 0
Sigma = np.zeros((n,3,3))
for i in range(len(Y)):
if lat[i] != lat0:
Sigma_i = psv.Cov(alpha,beta,lat[i], degree,sig10_2,sig11_2,sig20_2,sig21_2,sig22_2)
Sigma[i] = Sigma_i
else:
Sigma[i] = Sigma[i-1]
lat0 = lat[i]
else:
sig22_2 = sig20_2
Sigma = Sigma0
loglik = log_post_fun(uvec, cof_K, x1**2)
return sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, loglik
def update_Sigma_AM_onestp_beta(Y, mu, lat, degree, cof_K, sig10_2, sig11_2,sig20_2,sig21_2, alpha,beta, Sigma0, sd, mu_ast):
#update alpha and beta separately, beta is random variables
sig2 = 100 #---prior
uvec = Y - mu
cov_len =cof_K.shape[0]
if cov_len ==5:
x1 = np.hstack((np.array(np.sqrt([sig10_2, sig11_2,sig20_2,sig21_2]) , dtype=float) ,alpha))
x0 = np.hstack((np.array(np.sqrt([sig10_2, sig11_2,sig20_2,sig21_2]), dtype=float) ,alpha))
else:
x1 = np.hstack((np.array(np.sqrt([sig10_2, sig11_2,sig20_2,sig21_2]), dtype=float) ,alpha, beta))
x0 = np.hstack((np.array(np.sqrt([sig10_2, sig11_2,sig20_2,sig21_2]), dtype=float) ,alpha, beta))
for j in range(cov_len):
sampXI = list(x0) #will not change x0
go_on = 1
while go_on:
xi_j = np.random.normal(mu_ast[j] - x1[j], sd[j])
if xi_j > 0.0:
go_on = 0
sampXI[j] = xi_j
if j == 5:
xi_new = np.hstack((x0[0:5]**2, (x0[4]*xi_j)**2 ))
xi_old = np.hstack((x0[0:5]**2, (x0[4]*x0[j])**2 ))
# pdb.set_trace()
else:
xi_new = np.array(sampXI)**2
xi_old = x0**2
logMHrat = log_lik_diff(uvec, cof_K, xi_new , xi_old) + x0[j]**2/sig2 - xi_j**2/sig2
#pdb.set_trace()
MHrat = np.exp(logMHrat)
vrat = np.random.uniform(0,1)
if (MHrat >= vrat):
x0[j] = xi_j
dist = np.abs(np.sum(x0 - x1))
if (dist) > 1e-6:
alpha = x0[4]
if cov_len ==6:
sig10_2, sig11_2, sig20_2, sig21_2 =x0[0:4]**2
#alpha = x0[4]
beta = x0[-1]
else:
sig10_2, sig11_2, sig20_2, sig21_2 =x0[0:4]**2
sig22_2 = sig20_2
#alpha = np.sqrt(alpha2)
if cov_len == 5:
loglik = log_post_fun(uvec, cof_K, x0**2 )
else:
loglik = log_post_fun(uvec, cof_K, np.hstack((x0[0:5]**2, (x0[4]*x0[-1])**2 )))
lat0 = 0
Sigma = np.zeros((n,3,3))
for i in range(len(Y)):
if lat[i] != lat0:
Sigma_i = psv.Cov(alpha,beta,lat[i], degree,sig10_2,sig11_2,sig20_2,sig21_2,sig22_2)
Sigma[i] = Sigma_i
else:
Sigma[i] = Sigma[i-1]
lat0 = lat[i]
else:
sig22_2 = sig20_2
Sigma = Sigma0
if cov_len == 5:
loglik = log_post_fun(uvec, cof_K, x1**2)
else:
loglik = log_post_fun(uvec, cof_K, np.hstack((x1[0:5]**2, (x1[4]*x1[-1])**2 )))
return sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, loglik
def update_Sigma_AM_onestp2(Y, mu, lat, degree, cof_K, sig10_2, sig11_2,sig20_2,sig21_2, alpha,beta, Sigma0, sd, mus_ast):
#update xi2---- sig10_2, sig11_2,sig20_2,sig21_2, alpha**2, (alpha*beta)**2
def rep_array(XI,x_j,j):
XI[j] = x_j
return XI
def strwhat(x0,sig):
tmp = np.random.uniform(0,1,3)
u1, u2, u3 = tmp
a = 1
b = 1.35
term1 = a/(3*b - 2*a)
if u1 < term1:
y = a*np.power(u2,1/3)
else:
y = np.random.uniform(a,b,1)[0]
if u3 < 0.5:
y = - y
xprime = x0 + sig*y
return xprime
uvec = Y - mu
cov_len =cof_K.shape[0]
if cov_len ==5:
x0 = np.array([sig10_2, sig11_2, sig20_2, sig21_2,alpha**2], dtype=float)
x1 = np.array([sig10_2, sig11_2, sig20_2, sig21_2,alpha**2], dtype=float)
mu_ast = 2*np.array([40.96, 2.89, 0.36, 4.84, 56.25]) #xi2 or other choices from burnin
else:
x0 = np.array([sig10_2, sig11_2, sig20_2, sig21_2,alpha**2, (alpha*beta)**2], dtype=float)
x1 = np.array([sig10_2, sig11_2, sig20_2, sig21_2,alpha**2, (alpha*beta)**2], dtype=float)
mu_ast = 2*np.array([40.96, 2.89, 0.36, 4.84, 56.25,56.25*14.4])
for j in range(cov_len):
sampXI = list(x0) #will not change x1
go_on = 1
while go_on:
xi_j = np.random.normal(mu_ast[j] - x1[j], sd[j])
if xi_j > 0.0:
go_on = 0
sampXI[j] = xi_j
#print(xi_j,j)
#print(xi_j)
logMHrat = log_lik_diff(uvec, cof_K, sampXI, x0 )
MHrat = np.exp(logMHrat)
vrat = np.random.uniform(0,1)
if (MHrat > vrat):
x0[j] = xi_j
if (np.abs(np.sum(x0[0:cov_len-1] - x1[0:cov_len-1])) ) > 1e-5:
if cov_len ==6:
sig10_2, sig11_2, sig20_2, sig21_2,alpha2, tau2 =x0
beta = np.sqrt(tau2/alpha2)
else:
sig10_2, sig11_2, sig20_2, sig21_2, alpha2 =x0
sig22_2 = sig20_2
alpha = np.sqrt(alpha2)
loglik = log_post_fun(uvec, cof_K, x0)
lat0 = 0
Sigma = np.zeros((n,3,3))
for i in range(len(Y)):
if lat[i] != lat0:
Sigma_i = psv.Cov(alpha,beta,lat[i], degree,sig10_2,sig11_2,sig20_2,sig21_2,sig22_2)
Sigma[i] = Sigma_i
else:
Sigma[i] = Sigma[i-1]
lat0 = lat[i]
else:
sig22_2 = sig20_2
Sigma = Sigma0
loglik = log_post_fun(uvec, cof_K, x1)
return sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, loglik
def log_post_diff(u, cof_K, sig_paras, x0, a, b):
n = len(u)
cov_len = cof_K.shape[0]
diffloglik = 0
#
def log_inverse_gamma(x, a,b):
log_ig = 0
for j in range(len(x)):
log_ig = log_ig - ((a[j] + 1)*np.log(x[j]) + b[j]/x[j])
return log_ig
#
def log_normal(x, mu,sig2):
log_norm = 0
for j in range(len(x)):
log_norm = log_norm - (x[j]-mu[i])**2/sig2
return log_norm
for i in range(n): #This step can be done in parallel!
K = 0
K1 = 0
for j in range(cov_len): #covariates j
K = K + sig_paras[j]* cof_K[j][i]
K1 = K1 + x0[j]* cof_K[j][i]
Kpre = Omega_mat(K)
Kpre1 = Omega_mat(K1)
A = u[i].reshape(3,1)*u[i].reshape(1,3)
diffloglik = diffloglik + np.matrix.trace( np.dot(A, Kpre1) ) - np.matrix.trace( np.dot(A, Kpre) ) + np.log(np.linalg.det(K1) ) - np.log(np.linalg.det(K) )
prior = log_normal(np.sqrt(sig_paras), a,b) - log_normal(np.sqrt(x0), a,b)
post = 0.5*diffloglik + prior
return post
def same_sign(x,y):
#pdb.set_trace()
if abs(x)<= 1e-3 or abs(y) <= 1e-3:
sign = 1
elif abs(x) + abs(y) == abs(x + y):
sign = 1
else:
sign = 0
return sign
#----------------------main body------------------------
n = len(data)
cov_len = cof_K.shape[0]
R = np.ones(n)
G4 =0.0
start = time.time()
log_fullpost = np.zeros((nMCMC+1,1))
logpost_g = np.zeros((nMCMC+1,1))
Sig_y = np.zeros((nMCMC+1,1))
C0 = np.diag(np.ones(cov_len))
J = len(G)
sum_G = np.zeros((1,J))
sum_sig_l_02 = np.zeros((1,7))
t = 0
mu = np.zeros((n,3))
sig_l_02_samp = np.zeros((nMCMC+1,7))
G_samp = np.zeros((nMCMC+1,len(G)))
seqJ = np.array(range(J))
#design matrix from data---only compute once
X = np.zeros((n,3,len(G)))
lat0 =0
Sigma = np.zeros((n,3,3))
#X1 is a design matrix
for i in range(n):
if lat[i] != lat0:
X1 = deg_mat_zonal(lat[i],G4,a_r=1.0) #ith design matrix
Xtmp= X1
Sigma_i = psv.Cov(alpha,beta,lat[i], degree,sig10_2,sig11_2,sig20_2,sig21_2,sig22_2)
Sigma[i] = Sigma_i
else:
X1 = Xtmp
Sigma[i] = Sigma[i-1]
#pdb.set_trace()
#print(Sigma[i] , Sigma[i-1] )
X1 = np.vstack((X1[0],[0,0,0],X1[1]))
X[i] = X1
lat0 = lat[i]
while t <= nMCMC:
#update length paras R----given G compute mu
Y = np.zeros((n,3))
lat0 = 0
for i in range(n):
lat_i = lat[i]
#print(lat_i, lat0)
if lat_i != lat0:
mu[i] = np.array([np.sum(G*X[i,0]),0.0, np.sum(G*X[i,2])])
else:
mu[i] = mu[i-1]
lat0 = lat_i
R[i] = slice_samp_r_weight(R[i], data[i],mu[i],Sigma[i],3)
Y[i] = R[i]*data[i]
#-------------------------------------------------------------------------------------
if cov_len == 5:
sig_paras = np.array([sig10_2, sig11_2, sig20_2, sig21_2, alpha])
else:
sig_paras = np.array([sig10_2, sig11_2, sig20_2, sig21_2, alpha, beta])
print(sig_paras)
sig_l_02_samp[t] = np.array([sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta])
sd = 0.35*np.ones(cov_len)
if t ==0:
if method =='ab_joint':
sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, log_post_Sig = update_Sigma_AM_onestp(Y, mu, lat, degree, cof_K, sig10_2, sig11_2,sig20_2,sig21_2, alpha,beta, Sigma, sd, mu_ast)
elif method == 'ab_ind':
sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, log_post_Sig = update_Sigma_AM_onestp_beta(Y, mu, lat, degree, cof_K, sig10_2, sig11_2,sig20_2,sig21_2, alpha,beta, Sigma, sd, mu_ast)
elif method == 'xi2':
sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, log_post_Sig = update_Sigma_AM_onestp2(Y, mu, lat, degree, cof_K, sig10_2, sig11_2,sig20_2,sig21_2, alpha,beta, Sigma, sd, mu_ast)
else:
if method =='ab_joint':
sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, log_post_Sig = update_Sigma_AM_onestp(Y, mu, lat, degree, cof_K, sig_l_02_samp[t,0],sig_l_02_samp[t,1],sig_l_02_samp[t,2],sig_l_02_samp[t,3],sig_l_02_samp[t,5], sig_l_02_samp[t,6], Sigma, sd, mu_ast)
elif method == 'ab_ind':
sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, log_post_Sig = update_Sigma_AM_onestp_beta(Y, mu, lat, degree, cof_K, sig_l_02_samp[t,0],sig_l_02_samp[t,1],sig_l_02_samp[t,2],sig_l_02_samp[t,3],sig_l_02_samp[t,5], sig_l_02_samp[t,6], Sigma, sd, mu_ast)
elif method == 'xi2':
sig10_2, sig11_2, sig20_2, sig21_2,sig22_2, alpha,beta, Sigma, log_post_Sig = update_Sigma_AM_onestp2(Y, mu, lat, degree, cof_K, sig_l_02_samp[t,0],sig_l_02_samp[t,1],sig_l_02_samp[t,2],sig_l_02_samp[t,3],sig_l_02_samp[t,5], sig_l_02_samp[t,6], Sigma, sd, mu_ast)
Sig_y[t] = log_post_Sig
#--------------------------------------------------------------------------------------------
sig30_2 = psv.s_lm2(3,0,alpha,beta,sig10_2, sig11_2, sig20_2, sig21_2,sig22_2)
if G4 == 0.0:
sig_l_02 = np.array([sig10_2, sig20_2, sig30_2])
else:
Sig40_2 = psv.s_lm2(4,0,alpha,beta,sig10_2, sig11_2, sig20_2, sig21_2,sig22_2)
sig_l_02 = np.array([sig10_2, sig20_2, sig30_2, sig40_2])
#print(Y[0])
#update non-zonal effects G=[g_1^0, g_2^0, g_3^0] given Sig and R
sig_l_2_tild = []
m_tild = []
log_post_G = 0
for j in range(len(G)):
noJind = np.where(seqJ != j)[0]
mu_tild_cof = 0
invsig_tild = 0
for i in range(n):
GnoJ = G[noJind]
sumXGnotj =np.sum( X[i][:, noJind]*GnoJ.T, axis=1) #dim 2x1 #
XSig = np.linalg.solve(Sigma[i],X[i][:,j]) #Sigma^-1 X^T dim = 2x1
XsigX = np.matmul(XSig.T, X[i][:,j]) #scalar
XsigXGnoj = np.matmul(XSig.T, sumXGnotj) #scalar
XsigY = Y[i].dot(XSig)
mu_tild_cof = mu_tild_cof - XsigXGnoj + XsigY
invsig_tild = invsig_tild + XsigX
sig2 = 1/(invsig_tild + 1/100) #flat prior
#pdb.set_trace()
if sig2 < 0:
sig2 = 0.0
sig_l_2_tild.append(sig2)
mu_j = sig2* mu_tild_cof
m_tild.append(mu_j)
sig = np.sqrt(sig2)
#
tmp_g = np.random.normal(mu_j, sig)
if np.isnan(tmp_g):
tmp_g = 0.0
G[j] = tmp_g
log_g = np.log(normal.pdf(tmp_g, loc=mu_j, scale=sig) )
log_post_G = log_post_G + log_g
G_samp[t] = G
print(G)
logpost_g[t] = log_post_G
new_post = log_post_Sig - log_post_G
log_fullpost[t] = new_post
if (t > nMCMC):
break
t += 1
print(t)
MCpost_G = np.mean(G_samp[burnin:-1],axis=0)
MCpost_sig_l_02 = np.mean(sig_l_02_samp[burnin:-1], axis=0)
end = time.time()
runtime = end - start
print(runtime)
Filename=directory + "/" + rstname +"."+fileformat
with open(Filename, 'wb') as f:
pickle.dump([Y, X, R, cof_K, MCpost_G,MCpost_sig_l_02, G_samp,log_fullpost, sig_l_02_samp, Sig_y, logpost_g, runtime], f)
``` |
{
"source": "Jiali-Xing/Talaria",
"score": 3
} |
#### File: models/pbft/message.py
```python
from blocksim.utils import kB_to_MB
from blocksim.models.pbft_network import MaliciousModel
class Message:
# Jiali: Copied from Ethereum
# Defines a model for the network messages of the PBFT blockchain.
def __init__(self, origin_node):
self.origin_node = origin_node
_env = origin_node.env
self._message_size = _env.config['pbft']['message_size_kB']
# Digest is 1 meaning correct/valid, 0 is malicious and modified/invalid message.
self.digest = 0 if self.origin_node.is_malicious == MaliciousModel.ACTIVE else 1
def status(self):
""" Inform a peer of its current PoA state.
This message should be sent `after` the initial handshake and `prior` to any PoA related messages.
"""
return {
'id': 'status',
'protocol_version': 'ONE',
'network': self.origin_node.network.name,
'td': self.origin_node.chain.head.header.difficulty,
'best_hash': self.origin_node.chain.head.header.hash,
'genesis_hash': self.origin_node.chain.genesis.header.hash,
'size': kB_to_MB(self._message_size['status'])
}
def transactions(self, transactions: list):
""" Specify (a) transaction(s) that the peer should make sure is included on its
transaction queue. Nodes must not resend the same transaction to a peer in the same session.
This packet must contain at least one (new) transaction.
"""
num_txs = len(transactions)
transactions_size = num_txs * self._message_size['tx']
return {
'id': 'transactions',
'transactions': transactions,
'size': kB_to_MB(transactions_size)
}
# Ryan: Reformat messages to hold info
def pre_prepare(self, seqno, new_blocks: dict, block_bodies: dict, new_view):
# Jiali: pre-prepare should be similar to newblock, so I migrate newblock to here.
"""Advertises one or more new blocks which have appeared on the network"""
# Jiali: we can use the number of last block in one message (assume multiple blocks in one pre-prepare is
# possible) as seqno!
if new_view:
return {
'id': 'pre-prepare',
'view': self.origin_node.network.view + 1,
'seqno': seqno,
'digest': self.digest,
'new_blocks': new_blocks,
'block_bodies': block_bodies,
'size': kB_to_MB(self._message_size['tx'])
}
else:
num_new_block_hashes = len(new_blocks)
new_blocks_size = num_new_block_hashes * \
self._message_size['hash_size']
txsCount = 0
for block_hash, block_txs in block_bodies.items():
txsCount += len(block_txs)
message_size = (txsCount * self._message_size['tx']) + self._message_size['block_bodies']
return {
'id': 'pre-prepare',
'view': self.origin_node.network.view,
'seqno': seqno,
'digest': self.digest,
'new_blocks': new_blocks,
'block_bodies': block_bodies,
'size': kB_to_MB(message_size+new_blocks_size)
}
def prepare(self, seqno):
return {
'id': 'prepare',
'view': self.origin_node.network.view,
'seqno': seqno,
'digest': self.digest,
'replica_id': self.origin_node.replica_id,
'size': kB_to_MB(self._message_size['prepare'])
}
# Originally copied from "block_bodies()" in the ETH version of message.py
def commit(self, seqno):
return {
'id': 'commit',
'view': self.origin_node.network.view,
'seqno': seqno,
'digest': self.digest,
'replica_id': self.origin_node.replica_id,
'size': kB_to_MB(self._message_size['commit'])
}
def client_reply(self, new_block):
return {
'id': 'reply',
'view': self.origin_node.network.view,
'timestamp': new_block.header.timestamp,
'client': self.origin_node.network.view % len(self.origin_node.network._list_authority_nodes),
'replica_id': self.origin_node.replica_id,
'result': new_block,
'size': kB_to_MB(self._message_size['reply']) #TODO: Will need to add block size
}
def checkpoint(self, seqno, replica_id):
return {
'id': 'checkpoint',
'seqno': seqno,
'digest': self.digest,
'replica_id': replica_id,
'size': kB_to_MB(self._message_size['checkpoint'])
}
def view_change(self, ckpt_seqno, checkpoint_msg, prepare_msg):
return {
'id': "viewchange",
'nextview': self.origin_node.network.view + 1,
'checkpoint_seqno': ckpt_seqno,
'checkpoint_messages': checkpoint_msg,
'prepare_messages': prepare_msg,
'replica_id': self.origin_node.replica_id,
'size': kB_to_MB(self._message_size['viewchange_base']) + (len(checkpoint_msg) * kB_to_MB(self._message_size['checkpoint'])) + \
(len(prepare_msg) * kB_to_MB(self._message_size['prepare']))
}
def new_view(self, viewchange_msg, preprepare_msg):
# Jiali: the following line is redundant as per line 425 in node.py: self.network.view += 1
# self.origin_node.network.view = self.origin_node.network.view + 1
return {
'id': "newview",
'newview': self.origin_node.network.view + 1,
'viewchange_messages': viewchange_msg,
'preprepare_messages': preprepare_msg,
'size': kB_to_MB(self._message_size['newview_base']) + (len(viewchange_msg) * kB_to_MB(self._message_size['viewchange_base'])) + \
(len(preprepare_msg) * kB_to_MB(self._message_size['tx']))
}
```
#### File: blocksim/models/permissoned_transaction_queue.py
```python
from collections import OrderedDict
# from collections import deque
# from blocksim.utils import time
class TransactionQueue():
def __init__(self, env, node, consensus):
self._env = env
self._node = node
self._consensus = consensus
self._transaction_queue = OrderedDict()
key = f'{node.address}_number_of_transactions_queue'
self._env.data[key] = 0
def put(self, tx):
key = f'{self._node.address}_number_of_transactions_queue'
self._env.data[key] += 1
self._transaction_queue[tx.signature] = tx
def get(self):
# TODO: A delay to retrieve a transaction from the Queue
# Jiali: FIFO popitem is achieved with the OrderedDict() here
return self._transaction_queue.popitem(last=False)[1]
def remove(self, tx):
# None is the default value for pop, so no exception is raised if given key doesn't exist
return self._transaction_queue.pop(tx.signature, None)
def remove_txs(self, txs):
[self._transaction_queue.pop(tx.signature) for tx in txs]
def add_txs(self, txs):
for tx in txs:
self.put(tx)
def is_empty(self):
return len(self._transaction_queue) == 0
def size(self):
return len(self._transaction_queue)
```
#### File: blocksim/models/simpy_learning.py
```python
import simpy
def car(env):
flag = True
while True:
print('Start parking at %d' % env.now)
parking_duration = 5
yield env.timeout(parking_duration)
print('Start driving at %d' % env.now)
trip_duration = 2
yield env.timeout(trip_duration)
print('now=%d, car need to wait for some time, value=%d' % (env.now, 3))
event = simpy.events.Timeout(env, delay=3, value=3)
value = yield event
print('now=%d, finish waiting, this happens in main time line' % env.now)
if flag:
env.process(traffic(env))
# flag = False
def traffic(env):
print('now=%d, this happens in parallel universe' % env.now)
event = simpy.events.Timeout(env, delay=1, value=1)
value = yield event
print('now=%d, and does not interfere with main time line\'s car, value=%d' % (env.now, value))
if __name__ == '__main__':
env = simpy.Environment()
env.process(car(env))
env.run(until=50)
```
#### File: Talaria/blocksim/pbft_transaction_factory.py
```python
import json
import string
from pathlib import Path
from random import choices, randint, random
import simpy
import numpy as np
from blocksim.utils import time
from blocksim.permissioned_transaction_factory import PermTransactionFactory
from blocksim.models.ethereum.transaction import Transaction as ETHTransaction
from blocksim.models.transaction import Transaction
class PBFTTransactionFactory(PermTransactionFactory):
""" Responsible to create batches of random transactions. Depending on the blockchain
being simulated, transaction factory will create transactions according to the
transaction model. Moreover, the created transactions will be broadcasted when simulation
is running by a random node on a list. Additionally, the user needs to specify the
number of batches, number of transactions per batch and the interval in seconds between each batch.
"""
def __init__(self, world):
super().__init__(world)
def broadcast(self, json_file_name, interval, nodes_list):
self.verbose = self._world.env.config["verbose"]
path = Path.cwd() / 'supply-chain-input-data' / json_file_name
if not path.exists():
raise Exception('Wrong working dir. Should be blocksim-dlasc')
all_days = True
with path.open() as f:
all_days_tx = json.load(f)
if all_days:
today = 'All Days'
# only one day's tx is too little...
# Thus I decide to use all tx from 180 days,
# but that turns out to be too much, so I add only every ten days
# '''
node_tx = []
for key, value in all_days_tx.items():
# This part sums tx every ten days, e.g., 10, 20, 30 etc., to make tx larger, but not too large
node_tx.append(all_days_tx[key][1:])
# if int(key[-2:-1]) < 9:
# pass
node_tx_array = np.array(node_tx)
# '''
sum_tx = np.sum(node_tx_array, axis=0)
else:
today = self._world.env.data['day']
sum_tx = all_days_tx[today][1:]
# Jiali: Here we implement the paired transaction dictionary to count international tx.
paired = False
dict_path = Path.cwd() / 'supply-chain-input-data' / 'tx_dict.json'
with dict_path.open() as df:
paired_tx = json.load(df)
blockchain_switcher = {
'poa': self._generate_poa_tx,
'pbft': self._generate_pbft_tx,
'bitcoin': self._generate_bitcoin_tx,
'ethereum': self._generate_ethereum_tx
}
if paired:
international_tx = 0
for sender in paired_tx.keys():
transactions = []
for j in paired_tx[sender].keys():
n_tx = paired_tx[sender][j]
i = int(sender)
j = int(j)
j = min(j, len(nodes_list) - 1)
for _i in range(n_tx):
sign = '-'.join([nodes_list[i].address, nodes_list[j].address, str(_i)])
tx = blockchain_switcher.get(self._world.blockchain, lambda: "Invalid blockchain")(sign, i)
transactions.append(tx)
if nodes_list[i].address[7] != nodes_list[j].address[7]:
self._world.env.data['international_transactions'] += n_tx
self._world.env.process(self._set_interval(nodes_list[i], transactions, interval * i))
else:
for i in range(min(len(nodes_list), len(sum_tx))):
transactions = []
for _i in range(sum_tx[i]):
# Generate a random string to a transaction be distinct from others
# rand_sign = ''.join(
# choices(string.ascii_letters + string.digits, k=20))
sign = '- '.join(
[today, nodes_list[i].address, str(_i), str(self._world.env.data['created_transactions'])])
tx = blockchain_switcher.get(self._world.blockchain, lambda: "Invalid blockchain")(sign, i)
transactions.append(tx)
self._world.env.process(self._set_interval(nodes_list[i], transactions, interval * i))
def _set_interval(self, node, tx, interval):
event = simpy.events.Timeout(self._world.env, delay=interval, value=interval)
value = yield event
self._world.env.process(
node.broadcast_transactions(tx))
if self.verbose:
print(f'{time(self._world.env)}, now {value} seconds have passed')
self._world.env.data['created_transactions'] += len(tx)
# yield self._world.env.timeout(interval)
def _generate_pbft_tx(self, rand_sign, i):
tx = Transaction('address', 'address', 140, rand_sign, 50)
return tx
```
#### File: Talaria/blocksim/permissioned_node_factory.py
```python
import csv
from pathlib import Path
from ast import literal_eval as make_tuple
from random import randint
from blocksim.models.bitcoin.node import BTCNode
from blocksim.models.ethereum.dlasc_node import ETHNode
from blocksim.models.poa.node import POANode
from blocksim.models.pbft.node import PBFTNode
from blocksim.models.pbft_network import MaliciousModel
from blocksim.node_factory import NodeFactory
class PermNodeFactory(NodeFactory):
""" Responsible to create the nodes used during the simulation.
Depending on the blockchain being simulated, node factory will create nodes according
to the node model. The user can specify the location, number of miners and non-miners,
and the range of hash rate for the miner nodes. When nodes are created, is chosen a
random hash rate from the range inputed. The location of each node needs to be recognised
by the simulator, meaning that it needs to exist input parameters about latency and throughput.
"""
def __init__(self, world, network):
super().__init__(world, network)
def create_nodes(self, miners, non_miners):
self._check_location(miners, non_miners)
# If a new blockchain is modeled it needs to be inserted here
blockchain_switcher = {
'bitcoin': self.create_bitcoin_nodes,
'ethereum': self.create_ethereum_nodes,
'poa': self.create_poa_nodes,
'pbft': self.create_pbft_nodes
}
return blockchain_switcher.get(
self._world.blockchain, lambda: "Invalid blockchain")(miners, non_miners)
def create_poa_nodes(self, miners, non_miners):
# Jiali: miners/non_miners are set by csv instead, so no need to provide above!
path = Path.cwd() / 'blocksim' / 'Test_DLA1_Input.csv'
if not path.exists():
raise Exception('Wrong working dir. Should be blocksim-dlasc')
with path.open('r') as infile:
reader = csv.reader(infile)
node_region = {rows[0]: rows[3] for rows in reader}
print(node_region)
# node_id = 0 # Unique ID for each node
nodes_list = []
for node_id, region_id in node_region.items():
node_address = f'region_{region_id}-no_{node_id}'
if int(region_id) <= 3:
# Create the authority nodes if node is in US
mega_hashrate_range = make_tuple('(20, 40)')
# Jiali: hashrate is no longer needed, but let's keep it in case.
# hashrate = randint(
# mega_hashrate_range[0], mega_hashrate_range[1]) * 10 ** 6
new = POANode(self._world.env,
self._network,
region_id,
node_address,
# hashrate,
True)
nodes_list.append(new)
else:
# Creat the non-authority nodes if node is oversea
new = POANode(self._world.env,
self._network,
region_id,
node_address,
False)
nodes_list.append(new)
print(f'NodeFactory: Created {len(nodes_list)} PoA nodes')
return nodes_list
def create_pbft_nodes(self, miners, non_miners):
# Jiali: miners/non_miners are set by csv instead, so no need to provide above!
path = Path.cwd() / 'blocksim' / 'Test_DLA2_Input.csv'
if not path.exists():
raise Exception('Wrong working dir. Should be blocksim-dlasc')
with path.open('r') as infile:
reader = csv.reader(infile)
node_region = {rows[0]: rows[3] for rows in reader}
with path.open('r') as infile:
# Jiali: Assume the last column in csv represents malicious type.
reader = csv.reader(infile)
nodes_malicious = {rows[0]: rows[7] for rows in reader}
print(node_region)
# node_id = 0 # Unique ID for each node
nodes_list = []
replica_id = 0
for node_id, region_id in node_region.items():
node_address = f'region_{region_id}-no_{node_id}'
is_malicious = int(nodes_malicious[node_id])
if int(region_id) <= 3:
# Create the authority nodes if node is in US
mega_hashrate_range = make_tuple('(20, 40)')
# Jiali: hashrate is no longer needed, but let's keep it in case.
# hashrate = randint(
# mega_hashrate_range[0], mega_hashrate_range[1]) * 10 ** 6
new = PBFTNode(self._world.env,
self._network,
region_id,
node_address,
replica_id,
True,
MaliciousModel(is_malicious))
nodes_list.append(new)
else:
# Creat the non-authority nodes if node is oversea
new = PBFTNode(self._world.env,
self._network,
region_id,
node_address,
replica_id,
False)
nodes_list.append(new)
replica_id = replica_id + 1
print(f'NodeFactory: Created {len(nodes_list)} pBFT nodes')
return nodes_list
def create_ethereum_nodes(self, miners, non_miners):
with open('Test_DLA1_Input.csv', mode='r') as infile:
reader = csv.reader(infile)
node_region = {rows[0]: rows[3] for rows in reader}
print(node_region)
# node_id = 0 # Unique ID for each node
nodes_list = []
for node_id, region_id in node_region.items():
node_address = f'{region_id}-{node_id}'
if int(region_id) <= 3:
# Create the miners nodes if node is in US
mega_hashrate_range = make_tuple('(20, 40)')
# Choose a random value on MH/s range and convert to H/s
hashrate = randint(
mega_hashrate_range[0], mega_hashrate_range[1]) * 10 ** 6
new = ETHNode(self._world.env,
self._network,
region_id,
node_address,
hashrate,
True)
nodes_list.append(new)
else:
# Creat the non-miner nodes if node is oversea
new = ETHNode(self._world.env,
self._network,
region_id,
node_address,
False)
nodes_list.append(new)
print(f'NodeFactory: Created {len(nodes_list)} ethereum nodes')
return nodes_list
``` |
{
"source": "Jialn/SocialRobot",
"score": 2
} |
#### File: python/social_bot/keybo_control.py
```python
import sys, tty, termios
import select
import numpy as np
from absl import logging
from pymouse import PyMouse
from pykeyboard import PyKeyboardEvent
class KeyboardControl(PyKeyboardEvent):
"""
This class is used to generate demonstrations from human through keyboard.
Some tricks are used to make the keyboard controlling more user friendly.
Move the agent around by key "W, A, S, D" and open or close gripper by
key "E", and move the robot arm joints (if there is) by mouse and key "R, F".
"""
def __init__(self):
super().__init__(capture=False)
self._mouse = PyMouse()
x, y = self._mouse.screen_size()
self._x_center, self._y_center = x / 2.0, y / 2.0
self._speed = 0
self._turning = 0
self._arm_joints = [0, 0, 0]
self._gripper_open = True
self._wheel_step = 0.5
self._speed_decay = 0.9
self._turning_decay = 0.6
self._done = False
logging.info("Control:" + """
W : increase forward speed
S : increase backward speed
A : turn left
D : turn right
E : open/close gripper (if there is one)
R/F : move robot arm joint (if there is one)
+ : increase the step size for W/S/A/D
- + decrease the step size for W/S/A/D
Q : quit
mouse : gripper position (if there is one)
""")
self.start()
def reset(self):
self._arm_joints = [0, 0, 0]
self._gripper_open = True
self._speed = 0
self._turning = 0
def get_agent_actions(self, agent_type):
"""
Args:
agent_type(sting): the agent type
Returns:
actions generated by the keyboard accroding to agent type
"""
# decay the speed
self._speed *= self._speed_decay
self._turning *= self._turning_decay
# get gripper pos
mouse_x, mouse_y = self._get_mouse_pos()
self._arm_joints[0] = mouse_x
self._arm_joints[1] = mouse_y
return self._convert_to_agent_action(agent_type)
def tap(self, keycode, character, press):
""" Keyboard event handler.
Args:
keycode(int): the key code
character(string): the key name
press(bool): True if the key was pressed and False if the key was released.
"""
if not press:
return
if character == "w":
self._speed = 0 if self._speed < -0.01 else self._speed + self._wheel_step
elif character == "s":
self._speed = 0 if self._speed > 0.01 else self._speed - self._wheel_step
elif character == "a":
self._turning = 0 if self._turning > 0.01 else self._turning - self._wheel_step
elif character == "d":
self._turning = 0 if self._turning < -0.01 else self._turning + self._wheel_step
# arm_joint[2]
elif character == "r":
self._arm_joints[2] -= 0.1
elif character == "f":
self._arm_joints[2] += 0.1
# gripper finger
elif character == "e":
self._gripper_open = not self._gripper_open
# set step size
elif character == "+":
self._wheel_step *= 1.5
elif character == "-":
self._wheel_step *= 0.7
elif character == "q":
self._done = True
def is_done(self):
return self._done
def _get_mouse_pos(self):
""" Get the mouse position and normalize to (-1, 1).
"""
x, y = self._mouse.position()
x, y = x / self._x_center - 1.0, y / self._y_center - 1.0
return x, y
def _convert_to_agent_action(self, agent_type):
if agent_type == 'pioneer2dx_noplugin' or agent_type == 'turtlebot':
actions = self._to_diff_drive_action()
elif agent_type == 'youbot_noplugin':
actions = self._to_youbot_action()
elif agent_type == 'pr2_noplugin':
actions = self._to_pr2_action()
elif agent_type == 'kuka_lwr_4plus':
actions = self._to_lwr4_action()
else:
actions = []
logging.info("agent type not supported yet: " + agent_type)
return actions
def _to_diff_drive_action(self):
left_wheel_joint = self._speed + self._turning
right_wheel_joint = self._speed - self._turning
actions = [left_wheel_joint, right_wheel_joint]
return actions
def _to_youbot_action(self):
""" Convert to the wrapped youbot actions
"""
if self._gripper_open:
finger_joint = 0.5
else:
finger_joint = -0.5
actions = [
self._arm_joints[0], self._arm_joints[1], self._arm_joints[2], 0,
finger_joint, self._speed, self._turning
]
return actions
def _to_lwr4_action(self):
actions = [
self._speed, self._turning, self._arm_joints[0],
self._arm_joints[1], self._arm_joints[2]
]
return actions
def _to_pr2_action(self):
wheel_joint_bl = self._speed + self._turning
wheel_joint_br = self._speed - self._turning
wheel_joint_fl = self._speed + self._turning
wheel_joint_fr = self._speed - self._turning
actions = [
wheel_joint_fl, wheel_joint_fl, wheel_joint_fr, wheel_joint_fr,
wheel_joint_bl, wheel_joint_bl, wheel_joint_br, wheel_joint_br
]
return actions
def main():
"""
Simple testing of KeyboardControl class.
"""
import matplotlib.pyplot as plt
from social_bot.envs.play_ground import PlayGround
from social_bot.tasks import GoalTask, KickingBallTask, ICubAuxiliaryTask, Reaching3D, PickAndPlace, Stack
from social_bot.gazebo_agent import YoubotActionWrapper
# Avoid the conflict between the keyboard control of the robot and the shortcut
# key of pyplot
plt.rcParams['keymap.save'].remove('s')
plt.rcParams['keymap.fullscreen'].remove('f')
plt.rcParams['keymap.quit'].remove('q')
use_image_obs = True
fig = None
agent_type = 'youbot_noplugin'
env = PlayGround(
with_language=False,
use_image_observation=use_image_obs,
image_with_internal_states=False,
agent_type=agent_type,
max_steps=100000,
step_time=0.05,
real_time_update_rate=500,
resized_image_size=(128, 128),
tasks=[Stack],
action_wrapper=YoubotActionWrapper)
env.render()
keybo = KeyboardControl()
while True:
actions = np.array(keybo.get_agent_actions(agent_type))
if keybo.is_done():
break
obs, _, done, _ = env.step(actions)
if use_image_obs:
if fig is None:
fig = plt.imshow(obs)
else:
fig.set_data(obs)
plt.pause(0.00001)
if done:
env.reset()
keybo.reset()
env.close()
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
main()
```
#### File: python/social_bot/parser_test.py
```python
import unittest
from social_bot.parser import ReParser
class ParserTest(unittest.TestCase):
def test_reparser(self):
parser = ReParser()
parser.add_word_class("OBJ", ["apple", "orange", "peach"])
parser.add_word_class("COLOR", ["red", "orange", "yellow"])
parser.add_rule("((it is )?an? )?<OBJ>", "OBJ")
parser.add_rule("(it is )?<COLOR>", "COLOR")
parser.add_rule("<OBJ> is edible", "EDIBLE")
parser.add_rule("<OBJ> is <COLOR>", "OBJ_COLOR")
parser.add_rule(
"an? <OBJ:OBJ_RIGHT> is on the right of an? <OBJ:OBJ_LEFT>",
"OBJ_POS")
parser.add_rule(
"an? <OBJ:OBJ_LEFT> is on the left of an? <OBJ:OBJ_RIGHT>",
"OBJ_POS")
for sentence in ["orange", "an orange", "it is an orange"]:
result = parser.parse(sentence)
self.assertIsNotNone(result)
self.assertEqual(result.rule_name, "OBJ")
self.assertEqual(result.slot_values, {"OBJ": "orange"})
for sentence in ["n orange", "it orange"]:
result = parser.parse(sentence)
self.assertIsNone(result)
for sentence in [
"a peach is on the right of an apple",
"an apple is on the left of a peach"
]:
result = parser.parse(sentence)
self.assertIsNotNone(result)
self.assertEqual(result.rule_name, "OBJ_POS")
self.assertEqual(result.slot_values, {
"OBJ_RIGHT": "peach",
"OBJ_LEFT": "apple"
})
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jialongshi1994/SQLAlchemy-challenge",
"score": 3
} |
#### File: jialongshi1994/SQLAlchemy-challenge/app.py
```python
from flask import Flask, render_template, jsonify
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///Resources/hawaii.sqlite'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
db.reflect()
all_table = {table_obj.name: table_obj for table_obj in db.get_tables_for_bind()}
print(all_table)
@app.route('/')
def hello_world():
return render_template("home_page.html")
@app.route('/api/v1.0/precipitation', methods=['GET'])
def list_precipitation():
data = db.session.query(all_table["measurement"].c.date.label('date'),
all_table["measurement"].c.prcp.label("prcp")).all()
return render_template('list_precipitation.html', data=data)
@app.route('/api/v1.0/stations', methods=['GET'])
def list_stations():
data = db.session.query(all_table["station"]).all()
print(data)
return render_template('list_stations.html', data=data)
@app.route('/api/v1.0/tobs', methods=['GET'])
def list_tobs():
data_sub = db.session.query(all_table["measurement"].c.station.label('station'),
func.count(all_table["measurement"].c.station).label('station_count')).group_by(
all_table["measurement"].c.station).subquery()
data_2_sub = db.session.query(data_sub.c.station.label('station'), func.max(data_sub.c.station_count)).subquery()
data = db.session.query(all_table["measurement"].c.date, all_table['measurement'].c.tobs,
all_table['measurement'].c.station).filter(
all_table['measurement'].c.station == data_2_sub.c.station).all()
return render_template('list_tobs.html', data=data)
@app.route('/api/v1.0/<start>/<end>', methods=['GET'])
def get_data(start, end):
sel = [all_table['measurement'].c.station, func.min(all_table['measurement'].c.tobs),
func.avg(all_table['measurement'].c.tobs),
func.max(all_table['measurement'].c.tobs)]
data = db.session.query(*sel).filter(all_table["measurement"].c.date >= start,
all_table["measurement"].c.date >= end).group_by(
all_table["measurement"].c.station).all()
return render_template('static.html', data=data)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jialong-w/gps_api",
"score": 4
} |
#### File: gps_api/gps_api/gps_api.py
```python
import serial
import serial.tools.list_ports
from haversine import haversine
from . import position
class GPS:
def __init__(self, port):
self.port = port
# list com ports
ports = list(serial.tools.list_ports.comports())
for p in ports:
# if port in listed com ports
if self.port in p:
# connect serial port
self.ser = serial.Serial(port, baudrate=9600, timeout=0.5)
else: raise Exception('Invalid port')
# change NMEA message type and frequency:
# set GLL, RMC, VTG and GGA output frequency to be outputting once every position fix
self.ser.write(b'\$PMTK314,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0*28\r\n')
# initialisation of variables
self.nmea_msg = ""
self.position = position.Position()
self.distance = 0
self.another_location = None
def reboot(self):
"""
reboot involves a full cold start
FULL COLD START:
time, position, almanacs and ephemeris data will be redownloaded
system/user configurations will be cleared
process will take approximately 8 minutes, use with patience
instantiate GPS class after reboot to apply the
configuration of NMEA message type and frequency
"""
self.ser.write("\$PMTK104*37\r\n")
def clean_string(self):
"""
clear data held in nmea_msg
"""
self.nmea_msg = ""
def get_latitude(self):
"""
get latitude data from latest nmea message
:return: latitude data at current position
"""
self.clean_string()
while "GPGGA" not in self.nmea_msg:
self.nmea_msg = self.ser.readline().decode("utf-8", "ignore")
self.position.update(self.nmea_msg)
return self.position.get_latitude()
def get_longitude(self):
"""
get lonitude data from latest nmea message
:return: longitude data at current position
"""
self.clean_string()
while "GPGGA" not in self.nmea_msg:
self.nmea_msg = self.ser.readline().decode("utf-8", "ignore")
self.position.update(self.nmea_msg)
return self.position.get_longitude()
def get_altitude(self):
"""
get altitude data from latest nmea message
:return: altitude data at current position
"""
self.clean_string()
while "GPGGA" not in self.nmea_msg:
self.nmea_msg = self.ser.readline().decode("utf-8", "ignore")
self.position.update(self.nmea_msg)
return self.position.get_altitude()
def get_current_location(self):
"""
get current location data from
:return: current location data in the form (latitude, longitude)
"""
self.clean_string()
while "GPGGA" not in self.nmea_msg:
self.nmea_msg = self.ser.readline().decode("utf-8", "ignore")
self.position.update(self.nmea_msg)
return self.position.get_current_location()
def get_UTC_time(self):
"""
get UTC time data from latest nmea message
:return: UTC time at current position
"""
self.clean_string()
while "GPRMC" not in self.nmea_msg:
self.nmea_msg = self.ser.readline().decode("utf-8", "ignore")
self.position.update(self.nmea_msg)
return self.position.get_UTC_time()
def get_date(self):
"""
get UTC date data from latest nmea message
:return: UTC date at current position
"""
self.clean_string()
while "GPRMC" not in self.nmea_msg:
self.nmea_msg = self.ser.readline().decode("utf-8", "ignore")
self.position.update(self.nmea_msg)
return self.position.get_date()
def set_another_location(self, latitude, longitude):
"""
set a location
:param latitude: another location's latitude
:param longitude: another location's longitude
"""
self.another_location = (latitude, longitude)
def get_distance(self, latitude, longitude):
"""
get the distance to another location
:param latitude: another location's latitude
:param longitude: another location's longitude
:return: distance to the other position at (latitude, longitude)
"""
self.set_another_location(latitude, longitude)
distance = haversine(self.get_current_location(), self.another_location)
return distance
def get_speed(self):
"""
get speed data from latest nmea message
:return: current speed in km/h
"""
self.clean_string()
while "GPVTG" not in self.nmea_msg:
self.nmea_msg = self.ser.readline().decode("utf-8", "ignore")
self.position.update(self.nmea_msg)
return self.position.get_speed()
def get_time_of_arrival(self, latitude, longitude):
"""
get time of arrival to destination (latitude, longitude)
:param latitude: destination's latitude
:param longitude: destination's longitude
:return: estimated time of arrival to the destination
"""
speed = self.get_speed()
if speed == 0.0:
message = "You are stationary, time of arrival unknown"
return message
else:
time = float(self.get_distance(latitude,longitude))/float(speed)*3600
day = time // (24 * 3600)
time = time % (24 * 3600)
hour = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
# gettings arrival time and splitting into hours, minutes and seconds
current_time = str(self.get_UTC_time())
c_hour = int(current_time[0,2])
c_minutes = int(current_time[2,4])
c_seconds = int(current_time[4,6])
# calculating arival times
a_hours = c_hour+hour
a_minutes = c_minutes+minutes
a_seconds = c_seconds+seconds
arrival_time = ("%d:%d:%d" % (a_hours, a_minutes, a_seconds))
return arrival_time
``` |
{
"source": "jialong-w/NN-Oleander-Detection",
"score": 3
} |
#### File: jialong-w/NN-Oleander-Detection/download_image.py
```python
import requests # to get image from the web
import shutil # to save it locally
import os
import sys
import csv
import cv2
import argparse
from ast import literal_eval
# global variables
project_name = ''
paths = []
label_list = []
image_select = False
ID_list = []
size = None
def set_paths():
global paths
for object_label in object_labels:
for item in ['Images', 'Labels', 'Masks']:
paths.append(os.path.join(project_name, object_label, item))
def make_directories():
for path in paths:
try:
os.makedirs(path) # make directories
except OSError as e:
print ("Creation of the directory %s failed: %s" % (path, format(e)))
else:
print ("Successfully created the directory %s " % path)
def get_IDs(txtfile):
try:
tf = open(txtfile, 'r')
line_list = [line.rstrip('\n') for line in tf]
tf.close()
except FileNotFoundError:
print('File {} does not exist'.format(txtfile))
sys.exit()
else:
s = set(line_list) # remove deplicates
return s
def download_images(csv_file):
global size
try:
# read data
with open(csv_file, 'rt') as file:
data = csv.reader(file)
# Sniffer class to deduce the format of a CSV file and detect whether a header row is present
# along with the built-in next() function to skip over the first row only when necessary
has_header = csv.Sniffer().has_header(file.read(1024))
file.seek(0)
if has_header: # skip header row
next(data)
# if size is given, download the limited number of images
# else, download all
if size is None:
data = list(data)
size = len(data)
number_of_downloaded = 1
image_no = [] # tracks the number of images under each label
for i in range(len(label_list)):
image_no.append(1)
for row in data:
# iterate through the rows
if number_of_downloaded <= size:
# load data
asset_ID = row[0] # load image ID
asset_url = row[2] # load original image url
labels = literal_eval(row[3]) # label entry converted to dictionary
if len(labels) == 0: # if no labels created
continue # skip the current row
else:
objects = labels['objects'] # load label instances
if image_select: # if textfile was passed to select image for download
if asset_ID in ID_list: # if current ID required
ID_list.remove(asset_ID) # remove the asset ID from the list and do the download
else: continue # else if not required, skip the current row
# iterate through labels to download images and masks of each label
for i in range(len(label_list)):
image_downloaded = False
n = image_no[i]
masks = []
mask_index = 1
result = 0
# iterate through the all the labels created on the asset image
for object in objects:
if object['title'] == label_list[i]:
if image_downloaded: pass
else:
# download original image if did not
dir_name = paths[3*i]
file_name = '{}{}{}.png'.format(project_name, '0'*(5-len(str(n))), n)
save_image(asset_url, dir_name, file_name)
image_no[i] += 1
image_downloaded = True
# download masks of labeled objects
mask_url = object['instanceURI'] # mask instance URI
dir_name = paths[3*i+1]
file_name = '{}{}{}_mask{}.png'.format(project_name, '0'*(5-len(str(n))), n, mask_index)
save_image(mask_url, dir_name, file_name)
mask_index += 1 # increment mask index
masks.append(file_name)
for mask in masks:
r = cv2.imread(os.path.join(dir_name, file_name)).astype('float32')
result = result + r
if image_downloaded:
# overlay masks
result = 255*result
result = result.clip(0, 255).astype('uint8')
# save overlaid mask
dir_name = paths[3*i+2]
file_name = '{}{}{}_mask.png'.format(project_name, '0'*(5-len(str(n))), n)
cv2.imwrite(os.path.join(dir_name, file_name), result)
print('Mask successfully generated: ', os.path.join(dir_name, file_name))
number_of_downloaded += 1 # increment number of downloaded asset
else: break
except FileNotFoundError:
print('File {} does not exist'.format(txtfile))
sys.exit()
if not data:
raise ValueError('No data available')
def save_image(image_url, dir_name, file_name):
# Open the url image, set stream to True, this will return the stream content.
# stream = True to guarantee no interruptions
r = requests.get(image_url, stream = True)
# Check if the image was retrieved successfully
if r.status_code == 200:
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
# Open a local file with wb ( write binary ) permission.
with open(os.path.join(dir_name, file_name), 'wb') as f:
shutil.copyfileobj(r.raw, f)
print('Image sucessfully downloaded: ', os.path.join(dir_name, file_name))
else:
print("Image couldn\'t be retreived")
if __name__=='__main__':
# define the name of the directory to be created
parser = argparse.ArgumentParser()
# -p PROJECT_NAME -l LABELS -csv CSV_FILE -txt TXT_FILE -size NUMBER OF IMAGES
parser.add_argument('-p', '--projname', help = 'Project name')
parser.add_argument('-l', '--labels', help = 'Labels (separated by commas)')
parser.add_argument('-csv', '--csvfile', help = 'CSV filename')
parser.add_argument('-txt', '--txtfile', help = 'Text filename to download specified images')
parser.add_argument('-s', '--size', help = 'Number of images to be downloaded', type=int)
args = parser.parse_args()
if len(sys.argv) == 1:
print('-h for help')
else:
project_name = args.projname
project_labels = args.labels.split(',')
for label in project_labels:
label_list.append(label)
set_paths()
make_directories()
csv_file = args.csvfile
if args.txtfile is not None:
image_select = True
ID_list = get_IDs(args.txtfile)
if args.size is not None:
size = args.size
download_images(csv_file)
if ID_list: # if ID list still has items after the download
print('Images with the following IDs were not availble:')
for item in ID_list:
print(item)
else: print('All completed')
``` |
{
"source": "JialongZhou666/Database-system-of-College-Students-restaurant-based-on-Python-GUI-library-Tkinter",
"score": 3
} |
#### File: Database-system-of-College-Students-restaurant-based-on-Python-GUI-library-Tkinter/src/identity1.py
```python
from tkinter import *
from tkinter.font import Font
from tkinter.ttk import *
from tkinter.messagebox import *
from PIL import Image,ImageTk
import identity2 as iden2
import pymysql
class identity1(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master.title('身份验证界面')
self.master.geometry('409x300')
self.createWidgets()
def createWidgets(self):
self.top = self.winfo_toplevel()
self.style = Style()
global img0
self.style.configure('Label4.TLabel',anchor='center', justify='center')
photo = Image.open("E:/新桌面/ttttt/background2.png")
photo = photo.resize((409,300))
img0 = ImageTk.PhotoImage(photo)
self.Label4 = Label(self.top, image=img0, style='Label4.TLabel')
self.Label4.pack()
self.style.configure('Label1.TLabel',relief=SUNKEN, anchor='center', font=('楷体',16))
self.Label1 = Label(self.top, text='身份验证', style='Label1.TLabel')
self.Label1.place(relx=0.02, rely=0.027, relwidth=0.257, relheight=0.083)
self.Text1Var = StringVar()
self.Text1 = Entry(self.top, textvariable=self.Text1Var)
self.Text1.place(relx=0.372, rely=0.32, relwidth=0.374, relheight=0.083)
self.style.configure('Label2.TLabel',anchor='center', font=('楷体',15))
self.Label2 = Label(self.top, text='用户名:', style='Label2.TLabel')
self.Label2.place(relx=0.117, rely=0.32, relwidth=0.198, relheight=0.083)
self.Text2Var = StringVar()
self.Text2 = Entry(self.top, textvariable=self.Text2Var)
self.Text2.place(relx=0.372, rely=0.507, relwidth=0.374, relheight=0.083)
self.style.configure('Label3.TLabel',anchor='center', font=('楷体',15))
self.Label3 = Label(self.top, text='密码:', style='Label3.TLabel')
self.Label3.place(relx=0.156, rely=0.507, relwidth=0.159, relheight=0.083)
self.style.configure('Command1.TButton',font=('楷体',16))
self.Command1 = Button(self.top, text='登陆', command=self.Command1_Cmd, style='Command1.TButton')
self.Command1.place(relx=0.372, rely=0.72, relwidth=0.257, relheight=0.11)
def Command1_Cmd(self, event=None):
self.identity_in_db()
def identity_in_db(self):
global db
try:
db = pymysql.connect("172.16.58.3", "s2018302465", "GaussDB@123", "db_2018302465")
except:
messagebox.showarning('警告', '连接数据库失败!')
cursor = db.cursor()
cursor.execute("select username from usr where username={} and pw={}".format(self.Text1.get(),self.Text2.get()))
data = cursor.fetchone()
if data == None:
showwarning("警告","您不是餐厅管理人员!")
else:
showinfo("成功","已成功确认您的餐厅管理人员身份!")
self.top.destroy()
iden2.identity2()
db.close()
```
#### File: Database-system-of-College-Students-restaurant-based-on-Python-GUI-library-Tkinter/src/main.py
```python
from tkinter import *
from tkinter.font import Font
from tkinter.ttk import *
from tkinter.messagebox import *
import identity1 as iden1
import pymysql
import customer_page as cp
class home_page(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master.title('大学餐厅菜品及服务管理系统首页')
self.master.geometry('820x538')
self.createWidgets()
def createWidgets(self):
self.top = self.winfo_toplevel()
self.style = Style()
global photo
self.style.configure('Label3.TLabel',anchor='center', justify='center')
photo = PhotoImage(file="E:/新桌面/ttttt/background1.png")
self.Label3 = Label(self.top, image=photo, style='Label3.TLabel')
self.Label3.pack()
self.style.configure('Command3.TButton',font=('楷体',16))
self.Command3 = Button(self.top, text='退出', command=self.Command3_Cmd, style='Command3.TButton')
self.Command3.place(relx=0.873, rely=0.917, relwidth=0.119, relheight=0.065)
self.style.configure('Command2.TButton',font=('楷体',18))
self.Command2 = Button(self.top, text='我是餐厅管理人员', command=self.Command2_Cmd, style='Command2.TButton')
self.Command2.place(relx=0.507, rely=0.669, relwidth=0.255, relheight=0.061)
self.style.configure('Command1.TButton',font=('楷体',18))
self.Command1 = Button(self.top, text='我是顾客', command=self.Command1_Cmd, style='Command1.TButton')
self.Command1.place(relx=0.234, rely=0.669, relwidth=0.255, relheight=0.061)
self.style.configure('Label1.TLabel',anchor='center', font=('楷体',18))
self.Label1 = Label(self.top, text='请选择您的身份?', style='Label1.TLabel')
self.Label1.place(relx=0.39, rely=0.461, relwidth=0.235, relheight=0.059)
self.style.configure('Label2.TLabel',anchor='center', font=('楷体',26))
self.Label2 = Label(self.top, text='大学餐厅菜品及服务管理系统', style='Label2.TLabel')
self.Label2.place(relx=0.205, rely=0.193, relwidth=0.572, relheight=0.095)
def Command3_Cmd(self, event=None):
if askokcancel("确认","是否退出?"):
top.destroy()
def Command2_Cmd(self, event=None):
top.destroy()
iden1.identity1()
def Command1_Cmd(self, event=None):
top.destroy()
cp.customer_page()
if __name__ == "__main__":
top = Tk()
home_page(top).mainloop()
```
#### File: Database-system-of-College-Students-restaurant-based-on-Python-GUI-library-Tkinter/src/select_sell_table.py
```python
from tkinter import *
from tkinter.font import Font
from tkinter.ttk import *
from tkinter.messagebox import *
import pymysql
class select_sell_table(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master.title('餐厅管理人员界面')
self.master.geometry('889x509')
self.createWidgets()
def createWidgets(self):
self.top = Tk()
self.top.title("售卖信息表")
self.style = Style()
self.tree = Treeview(self.top)
self.tree["columns"] = ("菜号", "菜名", "卖出数量")
self.tree.column("菜号", width=100)
self.tree.column("菜名", width=100)
self.tree.column("卖出数量", width=100)
self.tree.heading("菜号", text="菜号")
self.tree.heading("菜名", text="菜名")
self.tree.heading("卖出数量", text="卖出数量")
self.select_sell_table_in_db()
self.tree.pack()
self.top.mainloop()
def select_sell_table_in_db(self):
global db
try:
db = pymysql.connect("172.16.31.10", "s2018302465", "GaussDB@123", "db_2018302465")
except:
messagebox.showarning('警告', '连接数据库失败!')
cursor = db.cursor()
cursor.execute("select food.fno,fname,sell_num from food,sell where food.fno=sell.fno")
data = cursor.fetchall()
cnt=1
for row in data:
fno = row[0]
fname = row[1]
sell_num = row[2]
self.tree.insert("", cnt, text="line{}".format(cnt), values=(fno, fname, sell_num))
cnt=cnt+1
db.close()
``` |
{
"source": "jialrs/floris-enhanced",
"score": 3
} |
#### File: simulation/wake_velocity/jensen.py
```python
from ...utilities import setup_logger
from .base_velocity_deficit import VelocityDeficit
import numpy as np
class Jensen(VelocityDeficit):
"""
The Jensen model computes the wake velocity deficit based on the classic
Jensen/Park model :cite:`jvm-jensen1983note`.
References:
.. bibliography:: /source/zrefs.bib
:style: unsrt
:filter: docname in docnames
:keyprefix: jvm-
"""
default_parameters = {
"we": 0.05
}
def __init__(self, parameter_dictionary):
"""
Stores model parameters for use by methods.
Args:
parameter_dictionary (dict): Model-specific parameters.
Default values are used when a parameter is not included
in `parameter_dictionary`. Possible key-value pairs include:
- **we** (*float*): The linear wake decay constant that
defines the cone boundary for the wake as well as the
velocity deficit. D/2 +/- we*x is the cone boundary for the
wake.
"""
super().__init__(parameter_dictionary)
self.logger = setup_logger(name=__name__)
self.model_string = "jensen"
model_dictionary = self._get_model_dict(__class__.default_parameters)
self.we = float(model_dictionary["we"])
def function(self, x_locations, y_locations, z_locations, turbine,
turbine_coord, deflection_field, flow_field):
"""
Using the Jensen wake model, this method calculates and returns
the wake velocity deficits, caused by the specified turbine,
relative to the freestream velocities at the grid of points
comprising the wind farm flow field.
Args:
x_locations (np.array): An array of floats that contains the
streamwise direction grid coordinates of the flow field
domain (m).
y_locations (np.array): An array of floats that contains the grid
coordinates of the flow field domain in the direction normal to
x and parallel to the ground (m).
z_locations (np.array): An array of floats that contains the grid
coordinates of the flow field domain in the vertical
direction (m).
turbine (:py:obj:`floris.simulation.turbine`): Object that
represents the turbine creating the wake.
turbine_coord (:py:obj:`floris.utilities.Vec3`): Object containing
the coordinate of the turbine creating the wake (m).
deflection_field (np.array): An array of floats that contains the
amount of wake deflection in meters in the y direction at each
grid point of the flow field.
flow_field (:py:class:`floris.simulation.flow_field`): Object
containing the flow field information for the wind farm.
Returns:
np.array, np.array, np.array:
Three arrays of floats that contain the wake velocity
deficit in m/s created by the turbine relative to the freestream
velocities for the U, V, and W components, aligned with the
x, y, and z directions, respectively. The three arrays contain
the velocity deficits at each grid point in the flow field.
"""
# define the boundary of the wake model ... y = mx + b
m = self.we
x = x_locations - turbine_coord.x1
b = turbine.rotor_radius
boundary_line = m * x + b
y_upper = boundary_line + turbine_coord.x2 + deflection_field
y_lower = -1 * boundary_line + turbine_coord.x2 + deflection_field
z_upper = boundary_line + turbine.hub_height
z_lower = -1 * boundary_line + turbine.hub_height
# calculate the wake velocity
c = (turbine.rotor_diameter \
/ (2 * self.we * (x_locations - turbine_coord.x1) \
+ turbine.rotor_diameter))**2
# filter points upstream and beyond the upper and
# lower bounds of the wake
c[x_locations - turbine_coord.x1 < 0] = 0
c[y_locations > y_upper] = 0
c[y_locations < y_lower] = 0
c[z_locations > z_upper] = 0
c[z_locations < z_lower] = 0
return 2 * turbine.aI * c * flow_field.u_initial, \
np.zeros(np.shape(flow_field.u_initial)), \
np.zeros(np.shape(flow_field.u_initial))
@property
def we(self):
"""
The linear wake decay constant that defines the cone boundary for the
wake as well as the velocity deficit. D/2 +/- we*x is the cone boundary
for the wake.
**Note:** This is a virtual property used to "get" or "set" a value.
Args:
value (float): Value to set.
Returns:
float: Value currently set.
Raises:
ValueError: Invalid value.
"""
return self._we
@we.setter
def we(self, value):
if type(value) is not float:
err_msg = ('Invalid value type given for we: {}, ' + \
'expected float.').format(value)
self.logger.error(err_msg, stack_info=True)
raise ValueError(err_msg)
self._we = value
if value != __class__.default_parameters['we']:
self.logger.info(
('Current value of we, {0}, is not equal to tuned ' +
'value of {1}.').format(
value, __class__.default_parameters['we'])
)
```
#### File: floris/tools/interface_utilities.py
```python
import inspect
def show_params(fi, params=None, verbose=False, wake_velocity_model=True,
wake_deflection_model=True, turbulence_model=True):
if wake_velocity_model is True:
obj = 'fi.floris.farm.wake.velocity_model'
props = get_props(obj, fi)
if verbose == True:
print('='.join(['=']*39))
else:
print('='.join(['=']*19))
print('Wake Velocity Model Parameters:', \
fi.floris.farm.wake.velocity_model.model_string, 'model')
if params is not None:
props_subset = get_props_subset(params, props)
if verbose is False:
print_props(obj, fi, props_subset)
else:
print_prop_docs(obj, fi, props_subset)
else:
if verbose is False:
print_props(obj, fi, props)
else:
print_prop_docs(obj, fi, props)
if wake_deflection_model is True:
obj = 'fi.floris.farm.wake.deflection_model'
props = get_props(obj, fi)
if verbose == True:
print('='.join(['=']*39))
else:
print('='.join(['=']*19))
print('Wake Deflection Model Parameters:', \
fi.floris.farm.wake.deflection_model.model_string, 'model')
if params is not None:
props_subset = get_props_subset(params, props)
if props_subset: # true if the subset is not empty
if verbose is False:
print_props(obj, fi, props_subset)
else:
print_prop_docs(obj, fi, props_subset)
else:
if verbose is False:
print_props(obj, fi, props)
else:
print_prop_docs(obj, fi, props)
if turbulence_model is True:
obj = 'fi.floris.farm.wake.turbulence_model'
props = get_props(obj, fi)
if verbose == True:
print('='.join(['=']*39))
else:
print('='.join(['=']*19))
print('Wake Turbulence Model Parameters:', \
fi.floris.farm.wake.turbulence_model.model_string, 'model')
if params is not None:
props_subset = get_props_subset(params, props)
if props_subset: # true if the subset is not empty
if verbose is False:
print_props(obj, fi, props_subset)
else:
print_prop_docs(obj, fi, props_subset)
else:
if verbose is False:
print_props(obj, fi, props)
else:
print_prop_docs(obj, fi, props)
def get_params(fi, params=None, wake_velocity_model=True,
wake_deflection_model=True, turbulence_model=True):
model_params = {}
if wake_velocity_model is True:
wake_vel_vals = {}
obj = 'fi.floris.farm.wake.velocity_model'
props = get_props(obj, fi)
if params is not None:
props_subset = get_props_subset(params, props)
wake_vel_vals = get_prop_values(obj, fi, props_subset)
else:
wake_vel_vals = get_prop_values(obj, fi, props)
model_params['Wake Velocity Parameters'] = wake_vel_vals
if wake_deflection_model is True:
wake_defl_vals = {}
obj = 'fi.floris.farm.wake.deflection_model'
props = get_props(obj, fi)
if params is not None:
props_subset = get_props_subset(params, props)
wake_defl_vals = get_prop_values(obj, fi, props_subset)
else:
wake_defl_vals = get_prop_values(obj, fi, props)
model_params['Wake Deflection Parameters'] = wake_defl_vals
if turbulence_model is True:
wake_defl_vals = {}
obj = 'fi.floris.farm.wake.turbulence_model'
props = get_props(obj, fi)
if params is not None:
props_subset = get_props_subset(params, props)
wake_defl_vals = get_prop_values(obj, fi, props_subset)
else:
wake_defl_vals = get_prop_values(obj, fi, props)
model_params['Wake Turbulence Parameters'] = wake_defl_vals
return model_params
def set_params(fi, params, verbose=True):
for param_dict in params:
if param_dict == 'Wake Velocity Parameters':
obj = 'fi.floris.farm.wake.velocity_model'
props = get_props(obj, fi)
for prop in params[param_dict]:
if prop in [val[0] for val in props]:
exec(obj + '.' + prop + ' = ' + \
str(params[param_dict][prop]))
if verbose is True:
print('Wake velocity parameter ' + prop + ' set to ' + \
str(params[param_dict][prop]))
else:
raise Exception(('Wake deflection parameter \'{}\' ' + \
'not part of current model. Value \'{}\' was not ' + \
'used.').format(prop, params[param_dict][prop]))
if param_dict == 'Wake Deflection Parameters':
obj = 'fi.floris.farm.wake.deflection_model'
props = get_props(obj, fi)
for prop in params[param_dict]:
if prop in [val[0] for val in props]:
exec(obj + '.' + prop + ' = ' + \
str(params[param_dict][prop]))
if verbose is True:
print('Wake deflection parameter ' + prop + \
' set to ' + str(params[param_dict][prop]))
else:
raise Exception(('Wake deflection parameter \'{}\' ' + \
'not part of current model. Value \'{}\' was not ' + \
'used.').format(prop, params[param_dict][prop]))
if param_dict == 'Wake Turbulence Parameters':
obj = 'fi.floris.farm.wake.turbulence_model'
props = get_props(obj, fi)
for prop in params[param_dict]:
if prop in [val[0] for val in props]:
exec(obj + '.' + prop + ' = ' + \
str(params[param_dict][prop]))
if verbose is True:
print('Wake turbulence parameter ' + prop + \
' set to ' + str(params[param_dict][prop]))
else:
raise Exception(('Wake turbulence parameter \'{}\' ' + \
'not part of current model. Value \'{}\' was not ' + \
'used.').format(prop, params[param_dict][prop]))
def get_props_subset(params, props):
prop_names = [prop[0] for prop in props]
try:
props_subset_inds = [prop_names.index(param) \
for param in params]
except:
props_subset_inds = []
print('Parameter(s)', ', '.join(params), 'does(do) not exist.')
props_subset = [props[i] for i in props_subset_inds]
return props_subset
def get_props(obj, fi):
return inspect.getmembers(eval(obj + '.__class__'), \
lambda obj: isinstance(obj, property))
def get_prop_values(obj, fi, props):
prop_val_dict = {}
for val in props:
prop_val_dict[val[0]] = eval(obj + '.' + val[0])
return prop_val_dict
def print_props(obj, fi, props):
print('-'.join(['-']*19))
for val in props:
print(val[0] + ' = ' + str(eval(obj + '.' + val[0])))
print('-'.join(['-']*19))
def print_prop_docs(obj, fi, props):
for val in props:
print('-'.join(['-']*39) + '\n', val[0] + ' = ' + str(eval(obj + '.' \
+ val[0])), '\n', eval(obj + '.__class__.' + val[0] + '.__doc__'))
print('-'.join(['-']*39))
```
#### File: optimization/scipy/optimization.py
```python
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import numpy as np
import matplotlib.pyplot as plt
try:
from mpi4py.futures import MPIPoolExecutor
except ImportError:
pass
class Optimization():
"""
Optimization is the base optimization class for
`~.tools.optimization.scipy` subclasses. Contains some common
methods and properties that can be used by the individual optimization
classes.
"""
def __init__(self, fi):
"""
Initializes an Optimization object by assigning a
FlorisInterface object.
Args:
fi (:py:class:`~.tools.floris_interface.FlorisInterface`):
Interface used to interact with the Floris object.
"""
self.fi = fi
# Private methods
def _reinitialize(self):
pass
def _norm(self, val, x1, x2):
return (val - x1)/(x2 - x1)
def _unnorm(self, val, x1, x2):
return np.array(val)*(x2 - x1) + x1
# Properties
@property
def nturbs(self):
"""
Number of turbines in the :py:class:`~.farm.Farm` object.
Returns:
int
"""
self._nturbs = len(self.fi.floris.farm.turbine_map.turbines)
return self._nturbs
```
#### File: preprocessor/src/v2_0_0.py
```python
from .version_class import VersionClass
from .data_transform import DataTransform
class V2_0_0(VersionClass, DataTransform):
version_string = "v2.0.0"
def __init__(self, meta_dict, turbine_dict, wake_dict, farm_dict):
self.base_meta = meta_dict
self.base_turbine = turbine_dict
self.base_wake = wake_dict
self.base_farm = farm_dict
self._meta_dict = self.build_meta_dict()
self._turbine_dict = self.build_turbine_dict()
self._wake_dict = self.build_wake_dict()
self._farm_dict = self.build_farm_dict()
def build_meta_dict(self):
self.base_meta["logging"] = {
"console": {
"enable": True,
"level": "INFO"
},
"file": {
"enable": False,
"level": "INFO"
}
}
self.base_meta["version"] = V2_0_0.version_string
return self.base_meta
def build_farm_dict(self):
# wind_speed, wind_directory becomes a list
DataTransform.deep_put(
self.base_farm,
["farm", "properties", "wind_speed"],
DataTransform.to_list(
DataTransform.deep_get(self.base_farm, ["farm", "properties", "wind_speed"])
)
)
DataTransform.deep_put(
self.base_farm,
["farm", "properties", "wind_direction"],
DataTransform.to_list(
DataTransform.deep_get(self.base_farm, ["farm", "properties", "wind_direction"])
)
)
DataTransform.deep_put(
self.base_farm,
["farm", "properties", "turbulence_intensity"],
DataTransform.to_list(
DataTransform.deep_get(self.base_farm, ["farm", "properties", "turbulence_intensity"])
)
)
DataTransform.deep_put(
self.base_farm,
["farm", "properties", "wind_x"],
[0.0]
)
DataTransform.deep_put(
self.base_farm,
["farm", "properties", "wind_y"],
[0.0]
)
DataTransform.deep_put(
self.base_farm,
["farm", "properties", "specified_wind_height"],
self.base_turbine["turbine"]["properties"]["hub_height"]
)
return self.base_farm
def build_wake_dict(self):
DataTransform.deep_put(
self.base_wake,
["wake", "properties", "turbulence_model"],
"crespo_hernandez"
)
del self.base_wake["wake"]["properties"]["parameters"]
return self.base_wake
def build_turbine_dict(self):
return self.base_turbine
@property
def meta_dict(self):
return self._meta_dict
@property
def turbine_dict(self):
return self._turbine_dict
@property
def wake_dict(self):
return self._wake_dict
@property
def farm_dict(self):
return self._farm_dict
```
#### File: preprocessor/src/version_class.py
```python
from .output import Output
from abc import ABC, abstractmethod
class VersionClass(ABC):
version_string = "v0.0.0"
@abstractmethod
def build_meta_dict(self):
pass
@abstractmethod
def build_farm_dict(self):
pass
@abstractmethod
def build_wake_dict(self):
pass
@abstractmethod
def build_turbine_dict(self):
pass
@abstractmethod
def meta_dict(self):
pass
@abstractmethod
def farm_dict(self):
pass
@abstractmethod
def turbine_dict(self):
pass
@abstractmethod
def wake_dict(self):
pass
def build_input_file_data(self):
return {
**self.meta_dict,
**self.farm_dict,
**self.turbine_dict,
**self.wake_dict
}
def export(self, filename=version_string + ".json"):
output = Output(filename)
file_data = self.build_input_file_data()
output.write_dictionary(file_data)
output.end()
def input_or_default(self, key, search_dictionary, default_dictionary):
if key in search_dictionary:
return search_dictionary[key]
else:
return default_dictionary[key]
``` |
{
"source": "jials/CS4243-project",
"score": 2
} |
#### File: jials/CS4243-project/changeDetection.py
```python
import numpy as np
import cv2
import imageMarker
lucas_kanade_params = dict(
winSize= (4, 4),
maxLevel= 3, #level of pyramids used
criteria= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
)
def mark_features_on_all_images(images, features_coordinates):
marked_images = []
marked_frame_coordinates = []
last_gs_img = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)
p0 = []
for coordinate in features_coordinates:
p0.append([coordinate,])
p0 = np.float32(p0)
mask = np.zeros_like(images[0])
status_arr = []
for fr in range(1, len(images)):
marked_coordinates = []
if images[fr] is None:
print('change detection problematic frame', fr)
print('len of given images', len(images))
frame = images[fr].copy()
gs_img = cv2.cvtColor(images[fr], cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(last_gs_img, gs_img, p0, None, **lucas_kanade_params)
status_arr.append(st)
if p1 is None:
marked_images.append(frame)
marked_frame_coordinates.append(features_coordinates if len(images) == 1 else marked_frame_coordinates[-1])
continue
new_points = []
for index in range(len(p1)):
if st[index] == 1:
new_points.append(p1[index])
else:
new_points.append(p0[index])
new_points = np.array(new_points)
for index, point in enumerate(new_points):
x, y = point.ravel()
marked_coordinates.append([x,y])
imageMarker.mark_image_at_point(frame, int(y), int(x), 9, imageMarker.colors[index])
marked_frame_coordinates.append(marked_coordinates)
img = cv2.add(frame,mask)
marked_images.append(img)
# update last frame and point
last_gs_img = gs_img.copy()
p0 = new_points.reshape(-1,1,2)
return marked_images, marked_frame_coordinates, status_arr
```
#### File: jials/CS4243-project/convolution.py
```python
import numpy as np
def convolve(img, ff):
ff = np.flipud(ff)
result = np.zeros(img.shape)
for y_offset in range(len(img) - 2):
for x_offset in range(len(img[0]) - 2):
temp_arr = []
for row in img[y_offset : 3 + y_offset]:
temp_arr.append(row[x_offset : 3 + x_offset])
filtered_arr = np.array(temp_arr) * ff
filtered_sum = sum(map(sum, filtered_arr))
result[y_offset + 1][x_offset + 1] = filtered_sum
return result
def get_sobel_horizontal_edge_strength(img):
horizontal_sobel_filter = np.array([[-1, 0, 1],[-2, 0, 2],[-1, 0, 1]])
return convolve(img, horizontal_sobel_filter)
def get_sobel_vertical_edge_strength(img):
vertical_sobel_filter = np.array([[1, 2, 1],[0, 0, 0],[-1, -2, -1]])
return convolve(img, vertical_sobel_filter)
```
#### File: jials/CS4243-project/statistics.py
```python
import matplotlib
matplotlib.use('TkAgg')
import math
import numpy as np
import matplotlib.pyplot as plt
import util
def calculate_distance(pointA, pointB):
# length of an actual beach volleyball court (in meters)
standard_court_length = 16
length_pixel = 718
A_x, A_y = pointA[0], pointA[1]
B_x, B_y = pointB[0], pointB[1]
return math.sqrt((B_x - A_x) * (B_x - A_x) + (B_y - A_y) * (B_y - A_y)) / length_pixel * standard_court_length
def generate_statistics(all_selected_players_feet, all_is_jumping):
"""
Generate statistics of the match
Distance travelled by each players, Number of jumps of each players
"""
# calculate the distance travelled by 4 different players
distance_travelled = [[0] for _ in range(4)]
for idx, selected_players_feet in enumerate(all_selected_players_feet[:-1]):
for i in range(min(4, len(selected_players_feet))):
next_frame_player_position = all_selected_players_feet[idx + 1][i]
distance = distance_travelled[i][-1] + calculate_distance(selected_players_feet[i],
next_frame_player_position)
distance_travelled[i].append(distance)
for i in range(len(selected_players_feet), 4):
distance_travelled[i].append(0)
# calculate the number of time each player jumps each frame
num_jumps_of_each_player = [[0] for _ in range(4)]
for idx, is_jumping in enumerate(all_is_jumping[:-1]):
for i in range(min(4, len(is_jumping))):
jump_cnt = num_jumps_of_each_player[i][-1]
if all_is_jumping[idx][i] is False and all_is_jumping[idx + 1][i] is True:
jump_cnt += 1
num_jumps_of_each_player[i].append(jump_cnt)
return distance_travelled, num_jumps_of_each_player
def draw_stats_table(distances, jumps, video_file_name):
statsImages = []
video_file_name = video_file_name + "_stats"
_, N = np.shape(distances)
for i in xrange(N):
fig = plt.figure()
plt.axis('off')
ax = plt.gca()
colLabels = ['Player', 'Distance(m)', 'Jump']
rowLabels = ['', '', '', '']
tableValues =[['',round(distances[0][i], 2),jumps[0][i]],
['',round(distances[1][i], 2),jumps[1][i]],
['',round(distances[2][i], 2),jumps[2][i]],
['',round(distances[3][i], 2),jumps[3][i]]]
# colours for each players in the following order: Red, Yellow, Green, Blue
colours = [[(0, 0, 0.8), (1, 1, 1), (1, 1, 1)],
[(0, 1, 1), (1, 1, 1), (1, 1, 1)],
[(0, 0.8, 0), (1, 1, 1), (1, 1, 1)],
[(0.8, 0, 0), (1, 1, 1), (1, 1, 1)]]
the_table = plt.table(cellText=tableValues, cellColours=colours, rowLoc='right', rowLabels=rowLabels,
colWidths=[.3]*3, colLoc='center', colLabels=colLabels, loc='center')
the_table.auto_set_font_size(False)
the_table.set_fontsize(20)
the_table.scale(1, 6)
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
statsImages.append(data)
plt.clf()
plt.close(fig)
util.images_to_video(statsImages, 60, video_file_name)
``` |
{
"source": "jialuechen/augustus",
"score": 2
} |
#### File: builtin/backtest_stock/stock_broker.py
```python
from augustus.builtin.backtest_stock.stock_limit_filter_risk_manager import \
StockLimitFilterRiskManager
from augustus.system.base_broker import BrokerBase
from augustus.system.models.orders.general_order import MarketOrder
class StockBroker(BrokerBase):
def __init__(self):
super().__init__()
StockLimitFilterRiskManager()
@classmethod
def _required_cash_func(cls, order: MarketOrder) -> float:
# TODO
return order.size * order.execute_price
```
#### File: builtin/backtest_stock/stock_recorder.py
```python
from augustus.builtin.backtest_stock import stock_recorder_series
from augustus.builtin.backtest_stock.stock_bar import BarAshares
from augustus.builtin.backtest_stock.stock_log import StockTradeLog
from augustus.system.base_recorder import RecorderBase
from augustus.system.components.match_engine import MatchEngine
class StockRecorder(RecorderBase):
def __init__(self):
super().__init__()
def _update_cash(self, trading_date: str):
total_margin = self.margin.total_value()
total_market_value = self.market_value.total_value()
new_balance = self.balance.latest()
new_frozen_cash = total_margin+total_market_value # 更新frozen_cash
new_cash = new_balance - new_frozen_cash # 更新cash
self.frozen_cash.append(
{'date': trading_date, 'value': new_frozen_cash})
self.cash.append({'date': trading_date, 'value': new_cash})
def set_setting(self, initial_cash=100000,
comm=1, comm_pct=None, margin_rate=0.1):
self.initial_cash = initial_cash
self.per_comm = comm
self.per_comm_pct = comm_pct
self.margin_rate = margin_rate
def settle_match_engine_and_series(self):
self.match_engine = MatchEngine(StockTradeLog)
self.series = stock_recorder_series
@property
def bar_class(self):
return BarAshares
```
#### File: builtin/plotters/by_plotly.py
```python
import pandas as pd
import plotly
from plotly import offline as py
from augustus.system.metabase_env import augustusEnvBase
class PlotBase(augustusEnvBase):
def __init__(self):
super().__init__()
self.positions_df = self.env.recorder.position.dataframe()
self.holding_pnl_df = self.env.recorder.holding_pnl.dataframe()
self.commission_df = self.env.recorder.commission.dataframe()
self.margin_df = self.env.recorder.margin.dataframe()
self.data = []
self.updatemenus = []
self.balance_df = self.env.recorder.balance.dataframe()
self.cash_df = self.env.recorder.cash.dataframe()
self.balance_df.columns = ['balance']
self.cash_df.columns = ['cash']
@property
def realized_pnl_df(self):
trade_log = self.env.recorder.match_engine.generate_trade_log()
trade_log.dropna(inplace=True)
df = trade_log[['exit_date', 're_pnl']].copy()
df.rename(columns=dict(exit_date='date'), inplace=True)
df.set_index('date', drop=True, inplace=True)
df.index = pd.to_datetime(df.index)
return df
@property
def returns_df(self):
returns_df = self.balance_df.pct_change(
).dropna()
returns_df.columns = ['returns']
return returns_df
def ohlc_df(self, ticker):
ohlc = self.env.readers[ticker].load(
self.env.fromdate, self.env.todate, self.env.sys_frequency)
dataframe = pd.DataFrame((i for i in ohlc))
dataframe.set_index('date', inplace=True)
dataframe.index = pd.to_datetime(dataframe.index)
return dataframe
class Plotly(PlotBase):
"""
Depreciated
"""
def plot2(self, ticker=None, notebook=False):
returns_df = self.balance_df.pct_change(
).dropna()
returns_df.columns = ['returns']
fig = plotly.tools.make_subplots(
rows=5, cols=2,
shared_xaxes=True,
vertical_spacing=0.001)
fig['layout'].update(height=1500)
self.append_trace(fig, self.positions_df, 2, 1)
self.append_trace(fig, self.balance_df, 3, 1)
self.append_trace(fig, self.holding_pnl_df, 4, 1)
self.append_trace(fig, self.commission_df, 5, 1)
self.append_trace(fig, self.margin_df, 1, 1)
self.append_trace(fig, returns_df, 2, 2, 'bar')
# fig['layout']['showlegend'] = True
if notebook:
plotly.offline.init_notebook_mode()
py.iplot(fig, filename='augustus_plot.html', validate=False)
else:
py.plot(fig, filename='augustus_plot.html', validate=False)
def append_trace(self, figure, df_list, row, col, plot_type='scatter', legendly_visible: bool=False):
visible = True if legendly_visible is False else 'legendonly'
if not isinstance(df_list, list):
df_list = [df_list]
for dataframe in df_list:
dataframe.sort_index(inplace=True)
name = dataframe.columns[0]
series = dataframe[name]
result = dict(
x=series.index,
y=series.values,
name=name,
type=plot_type,
visible=visible,
legendgroup=f'{name[:4]}')
figure.append_trace(result, row, col)
def append_candlestick_trace(self, figure, dataframe, row, col, ticker):
dataframe.sort_index(inplace=True)
result = dict(
x=dataframe.index,
open=dataframe.open,
high=dataframe.high,
low=dataframe.low,
close=dataframe.close,
type='candlestick'
)
figure.append_trace(result, row, col)
def plot(self, ticker=None, notebook=False):
fig = plotly.tools.make_subplots(
rows=5, cols=1,
shared_xaxes=True,
vertical_spacing=0.001,
specs=[[{}],
[{}],
[{'rowspan': 2}],
[None],
[{}]],)
# fig['layout'].update(height=1500)
if isinstance(ticker, str):
ticker = [ticker]
for i in ticker:
close_df = self.ohlc_df(i)[['close']]
close_df.columns = [i]
# volume_df = self.ohlc_df(i)[['volume']]
# volume_df.columns = [i+' volume']
self.append_trace(fig, close_df, 3, 1)
# self.append_trace(fig, volume_df, 3, 1,
# plot_type='bar', legendly_visible=True)
# fig['data'][-1].update(dict(yaxis='y6', opacity=0.5))
# for i in ticker:
# self.append_candlestick_trace(fig, self.ohlc_df(i), 3, 1, i)
self.append_trace(fig, self.balance_df, 1, 1)
self.append_trace(fig, self.cash_df, 1, 1)
self.append_trace(fig, self.holding_pnl_df, 2, 1)
self.append_trace(fig, self.commission_df, 2,
1, legendly_visible=True)
self.append_trace(fig, self.positions_df, 5, 1)
total_holding_pnl = sum((i[i.columns[0]] for i in self.holding_pnl_df))
total_holding_pnl = pd.DataFrame(total_holding_pnl)
total_holding_pnl.columns = ['total_holding_pnl']
self.append_trace(fig, total_holding_pnl, 2, 1)
fig['layout']['yaxis'].update(
dict(overlaying='y3', side='right', showgrid=False))
# fig['layout']['xaxis']['type'] = 'category'
# fig['layout']['xaxis']['rangeslider']['visible'] = False
# fig['layout']['xaxis']['tickangle'] = 45
fig['layout']['xaxis']['visible'] = False
fig['layout']['hovermode'] = 'closest'
fig['layout']['xaxis']['rangeslider']['visible'] = False
if notebook:
plotly.offline.init_notebook_mode()
py.iplot(fig, filename='augustus_plot.html', validate=False)
else:
py.plot(fig, filename='augustus_plot.html', validate=False)
```
#### File: augustus/custom/cleaner_talib.py
```python
from collections import defaultdict, deque
from numpy import array, isnan
from talib.abstract import Function
from augustus.systemeaner import CleanerBase
class Talib(CleanerBase):
def __init__(self, ind: str, params: dict, frequency: str=None,
buffer_day: int = 5) -> None:
super().__init__(None, buffer_day, frequency)
self.indicator = Function(ind)
self.indicator.set_parameters(params)
self.rolling_window = self.indicator.lookback+1
self.data = defaultdict(dict) # type:dict[str,dict[str,deque]]
@staticmethod
def _check_nan(value: float):
if isnan(value):
raise Exception(
'rolling_window should be longer. Because nan is generated!')
def _data_proxy(self, ticker: str) -> dict:
key = f'{ticker}_{self.frequency}'
return {'open': array(self.data[key]['open']),
'high': array(self.data[key]['high']),
'low': array(self.data[key]['low']),
'close': array(self.data[key]['close']),
'volume': array(self.data[key]['volume'])}
def calculate(self, ticker: str) -> dict:
self.indicator.set_input_arrays(self._data_proxy(ticker))
value = list(self.indicator.outputs)
if len(self.indicator.output_names) > 1:
[self._check_nan(i[-1]) for i in value]
return {k: v[-1]
for k, v in zip(self.indicator.output_names, value)}
self._check_nan(value[-1])
return value[-1]
```
#### File: augustus/augustus/environment.py
```python
import logging
from collections import defaultdict
import arrow
import augustus as ag
from augustus.event_engine import EventEngine
from augustus.utils.easy_func import get_day_ratio
class Environment(object):
# general context
sys_date: str = None
sys_frequency: str = None
instrument: str = None
fromdate: str = None
todate: str = None
tickers: list = []
# general setting
execute_on_close_or_next_open: str = 'open'
is_save_original: bool = False
is_live_trading: bool = False
is_show_today_signals: bool = False
# backtest modules dict
readers: dict = {}
feeds: dict = {}
cleaners: dict = {}
cleaners_feeds: dict = {}
strategies: dict = {}
brokers: dict = {}
risk_managers: dict = {}
recorders: dict = {}
recorder = None # type: op.RecorderBase
# system memory
signals_normal: list = []
signals_pending: list = []
signals_trigger: list = []
signals_cancel: list = []
signals_normal_cur: list = []
signals_pending_cur: list = []
signals_trigger_cur: list = []
signals_cancel_cur: list = []
orders_mkt_normal_cur: list = []
orders_child_of_mkt_dict: dict = {}
orders_mkt_absolute_cur: list = []
orders_mkt_submitted_cur: list = []
orders_pending: list = []
orders_cancel_cur: list = []
orders_cancel_submitted_cur: list = []
cur_suspended_tickers: list = []
suspended_tickers_record: defaultdict = defaultdict(list)
# system modules
logger = logging.getLogger("augustus")
event_engine = EventEngine()
cache: dict = {}
@classmethod
def initialize_env(cls):
cls.signals_normal.clear()
cls.signals_pending.clear()
cls.signals_trigger.clear()
cls.signals_cancel.clear()
cls.signals_normal_cur.clear()
cls.signals_pending_cur.clear()
cls.signals_trigger_cur.clear()
cls.signals_cancel_cur.clear()
cls.orders_mkt_normal_cur.clear()
cls.orders_mkt_absolute_cur.clear()
cls.orders_mkt_submitted_cur.clear()
cls.orders_pending.clear()
cls.orders_child_of_mkt_dict.clear()
cls.orders_cancel_cur.clear()
cls.orders_cancel_submitted_cur.clear()
cls.tickers.clear()
cls.cur_suspended_tickers.clear()
cls.suspended_tickers_record.clear()
cls.cache.clear()
if not cls.is_live_trading:
ratio = get_day_ratio(cls.sys_frequency)
cls.sys_date = arrow.get(cls.fromdate).shift(
days=-ratio).format('YYYY-MM-DD HH:mm:ss')
cls.reset_all_counters()
@classmethod
def clear_modules(cls):
cls.sys_date: str = None
cls.sys_frequency: str = None
cls.instrument: str = None
cls.fromdate: str = None
cls.todate: str = None
cls.tickers: list = []
cls.cur_suspended_tickers: list = []
cls.suspended_tickers_record: defaultdict = defaultdict(list)
cls.market_maker = None
cls.readers: dict = {}
cls.feeds: dict = {}
cls.cleaners: dict = {}
cls.cleaners_feeds: dict = {}
cls.strategies: dict = {}
cls.brokers: dict = {}
cls.risk_managers: dict = {}
cls.recorders: dict = {}
cls.recorder = None # type: op.RecorderBase
cls.event_loop = None # type: List[Dict]
cls.cache = {}
cls.execute_on_close_or_next_open: str = 'open'
cls.is_save_original: bool = False
cls.is_live_trading: bool = False
cls.is_show_today_signals: bool = False
@classmethod
def reset_all_counters(cls):
from itertools import count
from augustus.system.models import signals
from augustus.system.base_cleaner import CleanerBase
from augustus.system.models.orders.base_order import OrderBase
from augustus.system.components.order_generator import OrderGenerator
CleanerBase.counter = count(1)
signals.Signal.counter = count(1)
signals.SignalByTrigger.counter = count(1)
signals.SignalForPending.counter = count(1)
signals.SignalCancelTST.counter = count(1)
signals.SignalCancelPending.counter = count(1)
OrderBase.counter = count(1)
OrderGenerator.counter = count(1)
```
#### File: augustus/system/base_broker.py
```python
import abc
from augustus.constants import ActionType
from augustus.system.components.order_checker import SubmitOrderChecker
from augustus.system.components.order_generator import OrderGenerator
from augustus.system.metabase_env import augustusEnvBase
from augustus.system.models.orders.general_order import (CancelPendingOrder,
CancelTSTOrder,
MarketOrder)
class BrokerBase(augustusEnvBase, abc.ABC):
def __init__(self):
self.env.brokers.update({self.__class__.__name__: self})
self._checker = SubmitOrderChecker(self._required_cash_func)
self._order_generator = OrderGenerator()
def _clear_submited_order(self):
self.env.orders_mkt_submitted_cur = []
self.env.orders_cancel_submitted_cur = []
def _generate_order(self):
self._order_generator.run()
def _check_order(self):
self._checker.run()
def _submit_order(self):
self._process_cancel_order()
def _judge_long_or_short(self, order):
if order.action_type in [ActionType.Buy, ActionType.Sell]:
return 'long'
elif order.action_type in [ActionType.Short, ActionType.Cover]:
return 'short'
def _process_cancel_order(self):
for cancel_order in self.env.orders_cancel_submitted_cur:
ticker = cancel_order.ticker
long_or_short = cancel_order.long_or_short
if isinstance(cancel_order, CancelPendingOrder):
for order in list(self.env.orders_pending):
confirm_ticker = order.ticker == ticker
confirm_long_short = self._judge_long_or_short(
order) == long_or_short
if confirm_ticker and confirm_long_short:
if cancel_order.is_target(order):
self.env.orders_pending.remove(order)
elif isinstance(cancel_order, CancelTSTOrder):
for order_list in self.env.orders_child_of_mkt_dict.values():
for order in list(order_list):
if cancel_order.is_target(order):
order_list.remove(order)
@abc.abstractclassmethod
def _required_cash_func(cls, order: MarketOrder):
raise NotImplementedError
def run(self):
self._clear_submited_order()
self._generate_order()
self._check_order()
self._submit_order()
```
#### File: system/components/signal_generator.py
```python
from typing import Optional
from augustus.constants import ActionType
from augustus.system.components.exceptions import OrderConflictError
from augustus.system.metabase_env import augustusEnvBase
from augustus.system.models.orders.base_order import PendingOrderBase
from augustus.system.models.signals import (Signal, SignalByTrigger,
SignalCancelPending,
SignalCancelTST, SignalForPending)
class SignalGenerator(augustusEnvBase):
def __init__(self, action_type, strategy_name) -> None:
self.action_type = action_type # type:ActionType
self.strategy_name = strategy_name
def settle_price_pct(self, ticker, price, price_pct):
if price and price_pct:
raise OrderConflictError("$ and pct can't be set together")
elif price_pct:
price = (price_pct+1) * self.env.feeds[ticker].cur_price
price_pct = None
return price, None
def get_signal(self, kwargs):
"""
发送信号分三种情况:
1. 挂单。直接通过。
2. 市价单。
1. 判断当前是否停牌,若停牌,则不生成信号
"""
ticker = kwargs['ticker']
if kwargs['price']:
return SignalForPending(**kwargs)
if ticker in self.env.cur_suspended_tickers: # 停牌不生成信号
return
return Signal(**kwargs)
def buy_or_short(self, size: int, ticker: str,
takeprofit: float = None,
takeprofit_pct: float = None,
stoploss: float = None,
stoploss_pct: float = None,
trailingstop: float = None,
trailingstop_pct: float = None,
price: float = None,
price_pct: float = None) -> Signal:
"""
For Buy, ShortSell
size: int,
ticker: str,
takeprofit: float , 单位为元
takeprofit_pct: float , 范围为(-1,1)
stoploss: float , 单位为元
stoploss_pct: float , 范围为(-1,1)
trailingstop: float , 单位为元
trailingstop_pct: float , 范围为(-1,1)
price: float
price_pct: float , 范围为(-1,1)
"""
price, price_pct = self.settle_price_pct(ticker, price, price_pct)
kwargs = {
'strategy_name': self.strategy_name,
'action_type': self.action_type,
'size': size,
'ticker': ticker,
'takeprofit': takeprofit,
'takeprofit_pct': takeprofit_pct,
'stoploss': stoploss,
'stoploss_pct': stoploss_pct,
'trailingstop': trailingstop,
'trailingstop_pct': trailingstop_pct,
'price': price,
'price_pct': price_pct}
return self.get_signal(kwargs)
def sell_or_cover(self, size: int, ticker: str,
price: float = None, price_pct: float = None) -> Signal:
"""For Sell, ShortCover"""
price, price_pct = self.settle_price_pct(ticker, price, price_pct)
kwargs = {'strategy_name': self.strategy_name,
'action_type': self.action_type,
'size': size,
'ticker': ticker,
'price': price,
'price_pct': price_pct}
return self.get_signal(kwargs)
def cancel_tst(self, ticker: str, long_or_short: str,
takeprofit: bool = False, stoploss: bool = False,
trailingstop: bool = False):
if long_or_short not in ['long', 'short']:
raise ValueError("long_or_short should be long or short!")
kwargs = {'strategy_name': self.strategy_name,
'action_type': self.action_type,
'ticker': ticker,
'long_or_short': long_or_short,
'takeprofit': takeprofit,
'stoploss': stoploss,
'trailingstop': trailingstop}
return SignalCancelTST(**kwargs)
def cancel_pending(self, ticker: str, long_or_short: str,
below_price: float=None, above_price: float=None):
if long_or_short not in ['long', 'short']:
raise ValueError("long_or_short should be long or short!")
kwargs = {'strategy_name': self.strategy_name,
'action_type': self.action_type,
'ticker': ticker,
'long_or_short': long_or_short,
'below_price': below_price,
'above_price': above_price
}
return SignalCancelPending(**kwargs)
class TriggeredSignalGenerator(augustusEnvBase):
"""为触发的挂单生成挂单触发信号"""
@classmethod
def _generate_bare_signal(cls, order) -> SignalByTrigger:
kwargs = {'action_type': order.action_type,
'strategy_name': order.strategy_name,
'size': order.size,
'ticker': order.ticker,
'execute_price': order.target_price,
'first_cur_price': order.first_cur_price,
'mkt_id': order.mkt_id,
'order_type': order.order_type,
'parent_order': order}
return SignalByTrigger(**kwargs)
@classmethod
def _generate_full_signal(cls, order) -> SignalByTrigger:
kwargs = {'action_type': order.action_type,
'strategy_name': order.strategy_name,
'size': order.size,
'ticker': order.ticker,
'execute_price': order.target_price,
'price': None,
'price_pct': None,
'takeprofit': order.signal.takeprofit,
'takeprofit_pct': order.signal.takeprofit_pct,
'stoploss': order.signal.stoploss,
'stoploss_pct': order.signal.stoploss_pct,
'trailingstop': order.signal.trailingstop,
'trailingstop_pct': order.signal.trailingstop_pct,
'order_type': order.order_type}
return SignalByTrigger(**kwargs)
@classmethod
def generate_triggered_signal(cls, order: PendingOrderBase) -> Optional[Signal]:
if order.ticker not in cls.env.cur_suspended_tickers:
if order.is_triggered:
if order.is_with_mkt():
return cls._generate_bare_signal(order)
return cls._generate_full_signal(order)
```
#### File: system/models/calendar.py
```python
import arrow
from augustus.system.components.exceptions import BacktestFinished
from augustus.system.metabase_env import augustusEnvBase
from augustus.utils.easy_func import get_day_ratio
class Calendar(augustusEnvBase):
def __init__(self, instrument):
if instrument == 'A_shares':
self.is_trading_time = self._is_A_shares_trading_time
elif instrument == 'Forex':
self.is_trading_time = self._is_forex_trading_time
def _is_forex_trading_time(self, now: arrow.arrow.Arrow) -> bool:
weekday = now.isoweekday()
date = now.format('YYYY-MM-DD')
if weekday <= 4:
return True
elif weekday == 5:
if now < arrow.get(f'{date} 22:00'):
return True
elif weekday == 6:
return False
elif weekday == 7:
if now >= arrow.get(f'{date} 21:00'):
return True
return False
def _is_A_shares_trading_time(self, now: arrow.arrow.Arrow) -> bool:
weekday = now.isoweekday()
date = now.format('YYYY-MM-DD')
if self.env.sys_frequency == 'D':
if weekday <= 5:
return True
else:
if weekday <= 5:
left_1 = arrow.get(f'{date} 09:30')
right_1 = arrow.get(f'{date} 11:30')
left_2 = arrow.get(f'{date} 13:00')
right_2 = arrow.get(f'{date} 15:00')
if left_1 <= now <= right_1 or left_2 <= now <= right_2:
return True
return False
def update_calendar(self):
if self.env.is_live_trading:
self.env.sys_date = arrow.utcnow().format('YYYY-MM-DD HH:mm:ss')
else:
self._check_todate()
ratio = get_day_ratio(self.env.sys_frequency)
new_sys_date = arrow.get(self.env.sys_date).shift(days=ratio)
self.env.sys_date = new_sys_date.format('YYYY-MM-DD HH:mm:ss')
while not self.is_trading_time(new_sys_date):
self._check_todate()
new_sys_date = arrow.get(self.env.sys_date).shift(days=ratio)
self.env.sys_date = new_sys_date.format('YYYY-MM-DD HH:mm:ss')
def _check_todate(self):
if arrow.get(self.env.sys_date) >= arrow.get(self.env.todate):
raise BacktestFinished
``` |
{
"source": "jialuechen/quantorch",
"score": 2
} |
#### File: models/vasicek/vasicek.py
```python
from textwrap import wrap
import torch
class Vasicek:
def __init__(self,alpha,sigma) -> None:
self.alpha=alpha
self.sigma=sigma
self.init_r=wrap(0.0)
def get_params(self):
return torch.cat([self.alpha,self.sigma])
def step(self,dt,r0,defl0):
numSim=r0.size(0)
def simulate(self):
pass
def zcb_price(self,r,tenor,params=None):
pass
```
#### File: quantorch/quantorch/tensor.py
```python
import torch
def steps(end:float,steps=None,dtype=None,device=None)->torch.Tensor:
return torch.linspace(0.0,end,steps+1,dtype=dtype,device=device)[1:]
``` |
{
"source": "jialuechen/raptor",
"score": 2
} |
#### File: raptor/raptor/momentum.py
```python
import dask as da
from raptor.utils import BaseIndicator, _ema
class RSIIndicator(BaseIndicator):
def __init__(self, close: da.Series, window: int = 14, fillna: bool = False):
self._close = close
self._window = window
self._fillna = fillna
self._run()
def _run(self):
diff = self._close.diff(1)
up_direction = diff.where(diff > 0, 0.0)
down_direction = -diff.where(diff < 0, 0.0)
min_periods = 0 if self._fillna else self._window
emaup = up_direction.ewm(
alpha=1 / self._window, min_periods=min_periods, adjust=False
).mean()
emadn = down_direction.ewm(
alpha=1 / self._window, min_periods=min_periods, adjust=False
).mean()
relative_strength = emaup / emadn
self._rsi = da.Series(
da.where(emadn == 0, 100, 100 - (100 / (1 + relative_strength))),
index=self._close.index,
)
def rsi(self) -> da.Series:
rsi_series = self._check_fillna(self._rsi, value=50)
return da.Series(rsi_series, name="rsi")
class StochasticOscillator(BaseIndicator):
def __init__(
self,
high: da.Series,
low: da.Series,
close: da.Series,
window: int = 14,
smooth_window: int = 3,
fillna: bool = False,
):
self._close = close
self._high = high
self._low = low
self._window = window
self._smooth_window = smooth_window
self._fillna = fillna
self._run()
def _run(self):
min_periods = 0 if self._fillna else self._window
smin = self._low.rolling(self._window, min_periods=min_periods).min()
smax = self._high.rolling(self._window, min_periods=min_periods).max()
self._stoch_k = 100 * (self._close - smin) / (smax - smin)
def stoch(self) -> da.Series:
stoch_k = self._check_fillna(self._stoch_k, value=50)
return da.Series(stoch_k, name="stoch_k")
def stoch_signal(self) -> da.Series:
min_periods = 0 if self._fillna else self._smooth_window
stoch_d = self._stoch_k.rolling(
self._smooth_window, min_periods=min_periods
).mean()
stoch_d = self._check_fillna(stoch_d, value=50)
return da.Series(stoch_d, name="stoch_k_signal")
class KAMAIndicator(BaseIndicator):
def __init__(
self,
close: da.Series,
window: int = 10,
pow1: int = 2,
pow2: int = 30,
fillna: bool = False,
):
self._close = close
self._window = window
self._pow1 = pow1
self._pow2 = pow2
self._fillna = fillna
self._run()
def _run(self):
close_values = self._close.values
vol = da.Series(abs(self._close - da.roll(self._close, 1)))
min_periods = 0 if self._fillna else self._window
er_num = abs(close_values - da.roll(close_values, self._window))
er_den = vol.rolling(self._window, min_periods=min_periods).sum()
efficiency_ratio = er_num / er_den
smoothing_constant = (
(
efficiency_ratio * (2.0 / (self._pow1 + 1) - 2.0 / (self._pow2 + 1.0))
+ 2 / (self._pow2 + 1.0)
)
** 2.0
).values
self._kama = da.zeros(smoothing_constant.size)
len_kama = len(self._kama)
first_value = True
for i in range(len_kama):
if da.isnan(smoothing_constant[i]):
self._kama[i] = da.nan
elif first_value:
self._kama[i] = close_values[i]
first_value = False
else:
self._kama[i] = self._kama[i - 1] + smoothing_constant[i] * (
close_values[i] - self._kama[i - 1]
)
def kama(self) -> da.Series:
kama_series = da.Series(self._kama, index=self._close.index)
kama_series = self._check_fillna(kama_series, value=self._close)
return da.Series(kama_series, name="kama")
class ROCIndicator(BaseIndicator):
def __init__(self, close: da.Series, window: int = 12, fillna: bool = False):
self._close = close
self._window = window
self._fillna = fillna
self._run()
def _run(self):
self._roc = (
(self._close - self._close.shift(self._window))
/ self._close.shift(self._window)
) * 100
def roc(self) -> da.Series:
roc_series = self._check_fillna(self._roc)
return da.Series(roc_series, name="roc")
class StochRSIIndicator(BaseIndicator):
def __init__(
self,
close: da.Series,
window: int = 14,
smooth1: int = 3,
smooth2: int = 3,
fillna: bool = False,
):
self._close = close
self._window = window
self._smooth1 = smooth1
self._smooth2 = smooth2
self._fillna = fillna
self._run()
def _run(self):
self._rsi = RSIIndicator(
close=self._close, window=self._window, fillna=self._fillna
).rsi()
lowest_low_rsi = self._rsi.rolling(self._window).min()
self._stochrsi = (self._rsi - lowest_low_rsi) / (
self._rsi.rolling(self._window).max() - lowest_low_rsi
)
self._stochrsi_k = self._stochrsi.rolling(self._smooth1).mean()
def stochrsi(self):
stochrsi_series = self._check_fillna(self._stochrsi)
return da.Series(stochrsi_series, name="stochrsi")
def stochrsi_k(self):
stochrsi_k_series = self._check_fillna(self._stochrsi_k)
return da.Series(stochrsi_k_series, name="stochrsi_k")
def stochrsi_d(self):
stochrsi_d_series = self._stochrsi_k.rolling(self._smooth2).mean()
stochrsi_d_series = self._check_fillna(stochrsi_d_series)
return da.Series(stochrsi_d_series, name="stochrsi_d")
def rsi(close, window=14, fillna=False) -> da.Series:
return RSIIndicator(close=close, window=window, fillna=fillna).rsi()
def stoch(high, low, close, window=14, smooth_window=3, fillna=False) -> da.Series:
return StochasticOscillator(
high=high,
low=low,
close=close,
window=window,
smooth_window=smooth_window,
fillna=fillna,
).stoch()
def stoch_signal(
high, low, close, window=14, smooth_window=3, fillna=False
) -> da.Series:
return StochasticOscillator(
high=high,
low=low,
close=close,
window=window,
smooth_window=smooth_window,
fillna=fillna,
).stoch_signal()
def roc(close: da.Series, window: int = 12, fillna: bool = False) -> da.Series:
return ROCIndicator(close=close, window=window, fillna=fillna).roc()
def stochrsi(
close: da.Series,
window: int = 14,
smooth1: int = 3,
smooth2: int = 3,
fillna: bool = False,
) -> da.Series:
return StochRSIIndicator(
close=close, window=window, smooth1=smooth1, smooth2=smooth2, fillna=fillna
).stochrsi()
def stochrsi_k(
close: da.Series,
window: int = 14,
smooth1: int = 3,
smooth2: int = 3,
fillna: bool = False,
) -> da.Series:
return StochRSIIndicator(
close=close, window=window, smooth1=smooth1, smooth2=smooth2, fillna=fillna
).stochrsi_k()
def stochrsi_d(
close: da.Series,
window: int = 14,
smooth1: int = 3,
smooth2: int = 3,
fillna: bool = False,
) -> da.Series:
return StochRSIIndicator(
close=close, window=window, smooth1=smooth1, smooth2=smooth2, fillna=fillna
).stochrsi_d()
```
#### File: raptor/raptor/trend.py
```python
import dask as da
from raptor.utils import BaseIndicator, _ema, _sma, _get_min_or_max
class AroonIndicator(BaseIndicator):
def __init__(self, close: da.Series, window: int = 25, fillna: bool = False):
self._close = close
self._window = window
self._fillna = fillna
self._run()
def _run(self):
min_periods = 0 if self._fillna else self._window
rolling_close = self._close.rolling(self._window, min_periods=min_periods)
self._aroon_up = rolling_close.apply(
lambda x: float(da.argmax(x) + 1) / self._window * 100, raw=True
)
self._aroon_down = rolling_close.apply(
lambda x: float(da.argmin(x) + 1) / self._window * 100, raw=True
)
def aroon_up(self) -> da.Series:
aroon_up_series = self._check_fillna(self._aroon_up, value=0)
return da.Series(aroon_up_series, name=f"aroon_up_{self._window}")
def aroon_down(self) -> da.Series:
aroon_down_series = self._check_fillna(self._aroon_down, value=0)
return da.Series(aroon_down_series, name=f"aroon_down_{self._window}")
def aroon_indicator(self) -> da.Series:
aroon_diff = self._aroon_up - self._aroon_down
aroon_diff = self._check_fillna(aroon_diff, value=0)
return da.Series(aroon_diff, name=f"aroon_ind_{self._window}")
class MACD(BaseIndicator):
def __init__(
self,
close: da.Series,
window_slow: int = 26,
window_fast: int = 12,
window_sign: int = 9,
fillna: bool = False,
):
self._close = close
self._window_slow = window_slow
self._window_fast = window_fast
self._window_sign = window_sign
self._fillna = fillna
self._run()
def _run(self):
self._emafast = _ema(self._close, self._window_fast, self._fillna)
self._emaslow = _ema(self._close, self._window_slow, self._fillna)
self._macd = self._emafast - self._emaslow
self._macd_signal = _ema(self._macd, self._window_sign, self._fillna)
self._macd_diff = self._macd - self._macd_signal
def macd(self) -> da.Series:
macd_series = self._check_fillna(self._macd, value=0)
return da.Series(
macd_series, name=f"MACD_{self._window_fast}_{self._window_slow}"
)
def macd_signal(self) -> da.Series:
macd_signal_series = self._check_fillna(self._macd_signal, value=0)
return da.Series(
macd_signal_series,
name=f"MACD_sign_{self._window_fast}_{self._window_slow}",
)
def macd_diff(self) -> da.Series:
macd_diff_series = self._check_fillna(self._macd_diff, value=0)
return da.Series(
macd_diff_series, name=f"MACD_diff_{self._window_fast}_{self._window_slow}"
)
class EMAIndicator(BaseIndicator):
def __init__(self, close: da.Series, window: int = 14, fillna: bool = False):
self._close = close
self._window = window
self._fillna = fillna
def ema_indicator(self) -> da.Series:
ema_ = _ema(self._close, self._window, self._fillna)
return da.Series(ema_, name=f"ema_{self._window}")
class SMAIndicator(BaseIndicator):
def __init__(self, close: da.Series, window: int, fillna: bool = False):
self._close = close
self._window = window
self._fillna = fillna
def sma_indicator(self) -> da.Series:
sma_ = _sma(self._close, self._window, self._fillna)
return da.Series(sma_, name=f"sma_{self._window}")
class WMAIndicator(BaseIndicator):
def __init__(self, close: da.Series, window: int = 9, fillna: bool = False):
self._close = close
self._window = window
self._fillna = fillna
self._run()
def _run(self):
_weight = da.Series(
[
i * 2 / (self._window * (self._window + 1))
for i in range(1, self._window + 1)
]
)
def weighted_average(weight):
def _weighted_average(x):
return (weight * x).sum()
return _weighted_average
self._wma = self._close.rolling(self._window).apply(
weighted_average(_weight), raw=True
)
def wma(self) -> da.Series:
wma = self._check_fillna(self._wma, value=0)
return da.Series(wma, name=f"wma_{self._window}")
class TRIXIndicator(BaseIndicator):
def __init__(self, close: da.Series, window: int = 15, fillna: bool = False):
self._close = close
self._window = window
self._fillna = fillna
self._run()
def _run(self):
ema1 = _ema(self._close, self._window, self._fillna)
ema2 = _ema(ema1, self._window, self._fillna)
ema3 = _ema(ema2, self._window, self._fillna)
self._trix = (ema3 - ema3.shift(1, fill_value=ema3.mean())) / ema3.shift(
1, fill_value=ema3.mean()
)
self._trix *= 100
def trix(self) -> da.Series:
trix_series = self._check_fillna(self._trix, value=0)
return da.Series(trix_series, name=f"trix_{self._window}")
class ADXIndicator(BaseIndicator):
def __init__(
self,
high: da.Series,
low: da.Series,
close: da.Series,
window: int = 14,
fillna: bool = False,
):
self._high = high
self._low = low
self._close = close
self._window = window
self._fillna = fillna
self._run()
def _run(self):
if self._window == 0:
raise ValueError("window may not be 0")
close_shift = self._close.shift(1)
pdm = _get_min_or_max(self._high, close_shift, "max")
pdn = _get_min_or_max(self._low, close_shift, "min")
diff_directional_movement = pdm - pdn
self._trs_initial = da.zeros(self._window - 1)
self._trs = da.zeros(len(self._close) - (self._window - 1))
self._trs[0] = diff_directional_movement.dropna()[0 : self._window].sum()
diff_directional_movement = diff_directional_movement.reset_index(drop=True)
for i in range(1, len(self._trs) - 1):
self._trs[i] = (
self._trs[i - 1]
- (self._trs[i - 1] / float(self._window))
+ diff_directional_movement[self._window + i]
)
diff_up = self._high - self._high.shift(1)
diff_down = self._low.shift(1) - self._low
pos = abs(((diff_up > diff_down) & (diff_up > 0)) * diff_up)
neg = abs(((diff_down > diff_up) & (diff_down > 0)) * diff_down)
self._dip = da.zeros(len(self._close) - (self._window - 1))
self._dip[0] = pos.dropna()[0 : self._window].sum()
pos = pos.reset_index(drop=True)
for i in range(1, len(self._dip) - 1):
self._dip[i] = (
self._dip[i - 1]
- (self._dip[i - 1] / float(self._window))
+ pos[self._window + i]
)
self._din = da.zeros(len(self._close) - (self._window - 1))
self._din[0] = neg.dropna()[0 : self._window].sum()
neg = neg.reset_index(drop=True)
for i in range(1, len(self._din) - 1):
self._din[i] = (
self._din[i - 1]
- (self._din[i - 1] / float(self._window))
+ neg[self._window + i]
)
def adx(self) -> da.Series:
dip = da.zeros(len(self._trs))
for i in range(len(self._trs)):
dip[i] = 100 * (self._dip[i] / self._trs[i])
din = da.zeros(len(self._trs))
for i in range(len(self._trs)):
din[i] = 100 * (self._din[i] / self._trs[i])
directional_index = 100 * da.abs((dip - din) / (dip + din))
adx_series = da.zeros(len(self._trs))
adx_series[self._window] = directional_index[0 : self._window].mean()
for i in range(self._window + 1, len(adx_series)):
adx_series[i] = (
(adx_series[i - 1] * (self._window - 1)) + directional_index[i - 1]
) / float(self._window)
adx_series = da.concatenate((self._trs_initial, adx_series), axis=0)
adx_series = da.Series(data=adx_series, index=self._close.index)
adx_series = self._check_fillna(adx_series, value=20)
return da.Series(adx_series, name="adx")
def adx_pos(self) -> da.Series:
dip = da.zeros(len(self._close))
for i in range(1, len(self._trs) - 1):
dip[i + self._window] = 100 * (self._dip[i] / self._trs[i])
adx_pos_series = self._check_fillna(
da.Series(dip, index=self._close.index), value=20
)
return da.Series(adx_pos_series, name="adx_pos")
def adx_neg(self) -> da.Series:
din = da.zeros(len(self._close))
for i in range(1, len(self._trs) - 1):
din[i + self._window] = 100 * (self._din[i] / self._trs[i])
adx_neg_series = self._check_fillna(
da.Series(din, index=self._close.index), value=20
)
return da.Series(adx_neg_series, name="adx_neg")
def ema_indicator(close, window=12, fillna=False):
return EMAIndicator(close=close, window=window, fillna=fillna).ema_indicator()
def sma_indicator(close, window=12, fillna=False):
return SMAIndicator(close=close, window=window, fillna=fillna).sma_indicator()
def wma_indicator(close, window=9, fillna=False):
return WMAIndicator(close=close, window=window, fillna=fillna).wma()
def macd(close, window_slow=26, window_fast=12, fillna=False):
return MACD(
close=close,
window_slow=window_slow,
window_fast=window_fast,
window_sign=9,
fillna=fillna,
).macd()
def macd_signal(close, window_slow=26, window_fast=12, window_sign=9, fillna=False):
return MACD(
close=close,
window_slow=window_slow,
window_fast=window_fast,
window_sign=window_sign,
fillna=fillna,
).macd_signal()
def macd_diff(close, window_slow=26, window_fast=12, window_sign=9, fillna=False):
return MACD(
close=close,
window_slow=window_slow,
window_fast=window_fast,
window_sign=window_sign,
fillna=fillna,
).macd_diff()
def adx(high, low, close, window=14, fillna=False):
return ADXIndicator(
high=high, low=low, close=close, window=window, fillna=fillna
).adx()
def adx_pos(high, low, close, window=14, fillna=False):
return ADXIndicator(
high=high, low=low, close=close, window=window, fillna=fillna
).adx_pos()
def adx_neg(high, low, close, window=14, fillna=False):
return ADXIndicator(
high=high, low=low, close=close, window=window, fillna=fillna
).adx_neg()
def trix(close, window=15, fillna=False):
return TRIXIndicator(close=close, window=window, fillna=fillna).trix()
def aroon_up(close, window=25, fillna=False):
return AroonIndicator(close=close, window=window, fillna=fillna).aroon_up()
def aroon_down(close, window=25, fillna=False):
return AroonIndicator(close=close, window=window, fillna=fillna).aroon_down()
```
#### File: raptor/raptor/volume.py
```python
import dask as da
from raptor.utils import BaseIndicator, _ema
class AccDistIndexIndicator(BaseIndicator):
def __init__(
self,
high: da.Series,
low: da.Series,
close: da.Series,
volume: da.Series,
fillna: bool = False,
):
self._high = high
self._low = low
self._close = close
self._volume = volume
self._fillna = fillna
self._run()
def _run(self):
clv = ((self._close - self._low) - (self._high - self._close)) / (
self._high - self._low
)
clv = clv.fillna(0.0) # float division by zero
adi = clv * self._volume
self._adi = adi.cumsum()
def acc_dist_index(self) -> da.Series:
adi = self._check_fillna(self._adi, value=0)
return da.Series(adi, name="adi")
class ForceIndexIndicator(BaseIndicator):
def __init__(
self,
close: da.Series,
volume: da.Series,
window: int = 13,
fillna: bool = False,
):
self._close = close
self._volume = volume
self._window = window
self._fillna = fillna
self._run()
def _run(self):
fi_series = (self._close - self._close.shift(1)) * self._volume
self._fi = _ema(fi_series, self._window, fillna=self._fillna)
def force_index(self) -> da.Series:
fi_series = self._check_fillna(self._fi, value=0)
return da.Series(fi_series, name=f"fi_{self._window}")
class VolumePriceTrendIndicator(BaseIndicator):
def __init__(self, close: da.Series, volume: da.Series, fillna: bool = False):
self._close = close
self._volume = volume
self._fillna = fillna
self._run()
def _run(self):
vpt = self._volume * (
(self._close - self._close.shift(1, fill_value=self._close.mean()))
/ self._close.shift(1, fill_value=self._close.mean())
)
self._vpt = vpt.shift(1, fill_value=vpt.mean()) + vpt
def volume_price_trend(self) -> da.Series:
vpt = self._check_fillna(self._vpt, value=0)
return da.Series(vpt, name="vpt")
def acc_dist_index(high, low, close, volume, fillna=False):
return AccDistIndexIndicator(
high=high, low=low, close=close, volume=volume, fillna=fillna
).acc_dist_index()
def force_index(close, volume, window=13, fillna=False):
return ForceIndexIndicator(
close=close, volume=volume, window=window, fillna=fillna
).force_index()
def volume_price_trend(close, volume, fillna=False):
return VolumePriceTrendIndicator(
close=close, volume=volume, fillna=fillna
).volume_price_trend()
``` |
{
"source": "Jialu-Huang/python",
"score": 4
} |
#### File: python/lab5/complex.py
```python
from clear_console import clean_the_console
"""
Name: Complex
Description: Generate a object to store, print, and calculate complex numbers
Format: (real +- imag * i)
"""
class Complex:
#Constructor of this class when default parameters are 0s
def __init__(self, real = 0, imag = 0):
self.real = real
self.imag = imag
#define formatted print
def __repr__(self):
com_str = ""
com_str += "(" + "%s" % ("%g" % self.real) + num_to_str(self.imag) + "i)" #%+-g means display sign
return com_str
#define formatted string for print()
def __str__(self):
return self.__repr__()
#Overloading operator + OR plus
def __add__(self, o): #(binary +)
if isinstance(o, (float,int)): #Make sure the type of operands between the operator are the same.
o = Complex(o)
return Complex(self.real + o.real, self.imag + o.imag) #return as a new assigned complex object
def __radd__(self, o): #Solve some equations have a wrong data type at the left side of operator
return self.__add__(o)
def __sub__(self, o): #(binary -)
if isinstance(o, (float,int)): #Make sure the type of operands between the operator are the same.
o = Complex(o)
return Complex((self.real - o.real),(self.imag - o.imag))
def __rsub__(self,o):
return self.__sub__(o)
def __mul__(self, o): #(binary *)
if isinstance(o, (float,int)): #Make sure the type of operands between the operator are the same.
o = Complex(o)
return Complex(((self.real * o.real)+ (-1)*(self.imag * o.imag)),(self.real * o.imag)+(self.imag * o.real))
def __rmul__(self, o):
return self.__mul__(o)
def __truediv__(self, o): #(binary /)
if isinstance(o, (float,int)): #Make sure the type of operands between the operator are the same.
o = Complex(o)
a = (self.real * o.real) + (self.imag * o.imag)
b = pow((o.real), 2.0) + pow((o.imag), 2.0)
c = (self.imag * o.real) - (self.real * o.imag)
return Complex((a / b),(c / b))
def __rtruediv__(self, o): # self/o and o/self are different
temp = Complex(o)
return temp.__truediv__(self)
def __neg__(self): #Get negative complex number (unary -)
return Complex(0 - self.real, 0 - self.imag)
def __invert__(self): #Get conjugate complex number (unary ~)
return Complex(self.real, 0 - self.imag)
def __eq__(self, o): #(Binary ==) Determine that the two Complex objects are completely equal
if self.real == o.real and self.imag == o.imag:
return True
else:
return False
def num_to_str(num):
if(num >= 0):
return " + " + str(num)
else:
return " - " + str(abs(num))
def div_line():
print(50 * "/")
if __name__ == '__main__':
clean_the_console()
a = Complex (1,2)
b = Complex (3,-4)
print(a,b)
print(a, " + ", b," = ", a + b)
print(a, " - ", b," = ", a - b)
print(a, " * ", b," = ", a * b)
print(a, " / ", b," = ", a / b)
div_line()
print(a, " + ", 1.5, " = ", a + 1.5)
print(1.5, " + ", a, " = ", 1.5 + a)
print(a, " - ", 1.5, " = ", a - 1.5)
print(1.5, " - ", a, " = ", 1.5 - a)
print(a, " * ", 1.5, " = ", a * 1.5)
print(1.5, " * ", a, " = ", 1.5 * a)
print(a, " / ", 1.5, " = ", a / 1.5)
print(1.5, " / ", a, " = ", 1.5 / a)
div_line()
print("Negation",-a)
print("conjugate",~a)
print("a = b ? -> ",a == b)
div_line()
c = Complex()
d = Complex(3,)
print (c,d)
print(c, " + ", d," = ", c + d)
print(c, " - ", d," = ", c - d)
print(c, " * ", d," = ", c * d)
print(c, " / ", d," = ", c / d)
``` |
{
"source": "jialuli-luka/EnvEdit",
"score": 2
} |
#### File: EnvEdit/style_transfer/style_transfer.py
```python
from styleaug import StyleAugmentor
import torch
from torchvision.transforms import ToTensor, ToPILImage
from PIL import Image
import os
import torchvision.transforms as transforms
import argparse
# PyTorch Tensor <-> PIL Image transforms:
toTensor = ToTensor()
toPIL = ToPILImage()
loader = transforms.Compose([
transforms.Resize((480,640)), # scale imported image
transforms.ToTensor()]) # transform it into a torch tensor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def image_loader(image_name):
image = Image.open(image_name)
# print(image.size())
# fake batch dimension required to fit network's input dimensions
image = loader(image).unsqueeze(0)
return image.to(device, torch.float)
def environment_creation(args):
path = args.input
output_path = args.output
files = os.listdir(path)
augmentor = StyleAugmentor()
unloader = transforms.ToPILImage()
for j, scan in enumerate(files):
if os.path.isdir(path + "/" + scan):
print("scan:", scan, "progress:", j, "/", len(files))
views = os.listdir(path + "/" + scan)
# embedding = augmentor.sample_embedding(1) # Sample the same style embedding for all the views in a scan
for view in views:
print("view:", view)
imgs = os.listdir(path + "/" + scan + "/" + view)
embedding = augmentor.sample_embedding(1) # Sample the same style embedding for all discretized views in a panorama
for i, img in enumerate(imgs):
content_img = image_loader(path + "/" + scan + "/" + view + "/" + img)
im_restyled = augmentor(content_img, embedding=embedding)
image = im_restyled.squeeze(0).cpu().detach()
image = unloader(image)
dir = "%s/%s" % (output_path, scan+"/"+view)
if not os.path.exists(dir):
os.makedirs(dir)
image.save("%s/%s" % (output_path, scan+"/"+view+"/"+img))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input',
default="views_img")
parser.add_argument('--output', default='views_img_style_transfer')
args = parser.parse_args()
environment_creation(args)
``` |
{
"source": "jialuogao/ECE539FinalProject",
"score": 2
} |
#### File: ChipGAN/models/model.py
```python
import os
import sys
import torch
from torch.autograd import Variable
import shutil
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torchvision.models as models
class Hed(nn.Module):
def __init__(self):
super(Hed, self).__init__()
self.vgg16 = models.vgg16(pretrained=True)
self.vgg16 = self.vgg16.features
for param in self.vgg16.parameters():
param.requires_grad = False
self.score_dsn1 = nn.Conv2d(64, 1, kernel_size=1, stride=1, padding=0)
self.score_dsn2 = nn.Conv2d(128, 1, kernel_size=1, stride=1, padding=0)
self.score_dsn3 = nn.Conv2d(256, 1, kernel_size=1, stride=1, padding=0)
self.score_dsn4 = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0)
self.score_dsn5 = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0)
self.fuse = nn.Conv2d(5, 1, kernel_size=1, stride=1, padding=0)
self.upsample2 = nn.ConvTranspose2d(1, 1, kernel_size=3, stride=2, padding=1, output_padding=1)
self.upsample3 = nn.ConvTranspose2d(1, 1, kernel_size=6, stride=4, padding=1)
self.upsample4 = nn.ConvTranspose2d(1, 1, kernel_size=12, stride=8, padding=2)
self.upsample5 = nn.ConvTranspose2d(1, 1, kernel_size=24, stride=16, padding=4)
def forward(self, x):
cnt = 1
res = []
for l in self.vgg16:
x = l(x)
# print(cnt)
if cnt == 4:
y = self.score_dsn1(x)
res += [y]
elif cnt == 9:
y = self.score_dsn2(x)
y = self.upsample2(y)
res += [y]
elif cnt == 16:
y = self.score_dsn3(x)
y = self.upsample3(y)
res += [y]
elif cnt == 23:
y = self.score_dsn4(x)
y = self.upsample4(y)
res += [y]
elif cnt == 30:
y = self.score_dsn5(x)
y = self.upsample5(y)
res += [y]
cnt += 1
res = self.fuse(torch.cat(res, dim=1))
return res
```
#### File: ECE539FinalProject/models/xdog.py
```python
import cv2
import numpy as np
def dog(img,size=(0,0),k=1.6,sigma=0.5,gamma=1):
img1 = cv2.GaussianBlur(img,size,sigma)
print("img:")
print(np.max(img1))
img2 = cv2.GaussianBlur(img,size,sigma*k)
return (img1-gamma*img2)
def xdog(img,sigma=0.5,k=1.6, gamma=1,epsilon=1,phi=1):
img = dog(img,sigma=sigma,k=k,gamma=gamma)
for i in range(0,img.shape[0]):
for j in range(0,img.shape[1]):
if(img[i,j] < epsilon):
img[i,j] = 1
else:
img[i,j] = (1 + np.tanh(phi*(img[i,j])))
return img
def xdog_thresh(img, sigma=0.5,k=1.6, gamma=1,epsilon=1,phi=1,alpha=1):
img = xdog(img,sigma=sigma,k=k,gamma=gamma,epsilon=epsilon,phi=phi)
#cv2.imshow("1",np.uint8(img))
mean = np.mean(img)
max = np.max(img)
img = cv2.GaussianBlur(src=img,ksize=(0,0),sigmaX=sigma*3)
#cv2.imshow("2",np.uint8(img))
for i in range(0,img.shape[0]):
for j in range(0,img.shape[1]):
if(img[i,j] > mean):
img[i,j] = max
#cv2.imshow("3",np.uint8(img))
return img/max
if __name__ == '__main__':
# Open image in grayscale
#img = cv2.imread('imgs/lena.jpg',cv2.CV_LOAD_IMAGE_GRAYSCALE)
img = cv2.imread('./imgs/horse.png',cv2.IMREAD_GRAYSCALE)
print(img.shape)
img = cv2.resize(img,(400,400))
print(img.shape)
# k = 1.6 as proposed in the paper
k = 1.6
#cv2.imshow("Original in Grayscale", img)
#cv2.imshow("Edge DoG",edge_dog(img,sigma=0.5,k=200, gamma=0.98))
#cv2.imshow("XDoG GaryGrossi",np.uint8(xdog_garygrossi(img,sigma=0.5,k=200, gamma=0.98,epsilon=0.1,phi=10)))
#cv2.imshow("XDoG Project 1",np.uint8(xdog(img,sigma=0.4,k=1.6, gamma=0.5,epsilon=-0.5,phi=10)))
cv2.imshow("orig",img)
cv2.imshow("thres",np.uint8(255*xdog_thresh(img,sigma=0.5,k=1.6, gamma=0.98,epsilon=-0.1,phi=200)))
print(img)
print(255*xdog_thresh(img,sigma=0.5,k=1.6, gamma=0.98,epsilon=-0.1,phi=200))
#cv2.imshow("XDoG Project 2",np.uint8(xdog(img,sigma=1.6,k=1.6, gamma=0.5,epsilon=-1,phi=10)))
# Natural media (tried to follow parameters of article)
#cv2.imshow("XDoG Project 3 - Natural Media",np.uint8(xdog(img,sigma=1,k=1.6, gamma=0.5,epsilon=-0.5,phi=10)))
#cv2.imshow("XDoG Project 4 - Hatch",np.uint8(hatchBlend(img)))
cv2.waitKey(0)
``` |
{
"source": "JialuZhang/ConfigV",
"score": 3
} |
#### File: ConfigV/graphAnalysis/build_graph.py
```python
import argparse
import json
import operator
import pprint
import sys
parser = argparse.ArgumentParser(description=\
'Build VeriConf post-analysis rule graph')
parser.add_argument('input_file')
parser.add_argument('edge_file')
args = parser.parse_args()
with open(args.input_file) as data_file:
data = json.load(data_file)
# DEFINE RULE PARSING LOGIC
def parse_fine_l(x):
nodes = x[0]
sources = nodes[0:2]
targets = [nodes[2]]
vals = x[1]
label = max(vals.iteritems(), key=operator.itemgetter(1))[0]
tru = float(vals[label])
total = float(sum(vals.values()))
return {
'PARTICIPANT_A' : sources
,'PARTICIPANT_B' : targets
,'LABEL' : label
,'WEIGHT' : tru / total
}
def parse_order_l(x):
nodes = x[0]
sources = [nodes[0]]
targets = [nodes[1]]
vals = x[1]
label = 'order'
tru = float( vals['tru'] )
total = float( vals['tru'] + vals['fls'] )
return {
'PARTICIPANT_A' : sources
,'PARTICIPANT_B' : targets
,'LABEL' : label
,'WEIGHT' : tru / total
}
def parse_missing_l(x):
nodes = x[0]
sources = [nodes[0]]
targets = [nodes[1]]
vals = x[1]
label = 'missing'
tru = float( vals['tru'] )
total = float( vals['tru'] + vals['fls'] )
return {
'PARTICIPANT_A' : sources
,'PARTICIPANT_B' : targets
,'LABEL' : label
,'WEIGHT' : tru / total
}
def parse_int_rel_l(x):
nodes = x[0]
sources = [nodes[0]]
targets = [nodes[1]]
vals = x[1]
label = max(vals.iteritems(), key=operator.itemgetter(1))[0]
tru = float(vals[label])
total = float(sum(vals.values()))
return {
'PARTICIPANT_A' : sources
,'PARTICIPANT_B' : targets
,'LABEL' : label
,'WEIGHT' : tru / total
}
def parse_type_err_l(x):
node = x[0]
if not isinstance(node, basestring):
sys.exit('ERROR: malformed type rule found in graph construction')
sources = [node]
targets = [node]
vals = x[1]
label = max(vals.iteritems(), key=operator.itemgetter(1))[0]
tru = float(vals[label])
total = float(sum(vals.values()))
if total > 10:
return {
'PARTICIPANT_A' : sources
,'PARTICIPANT_B' : targets
,'LABEL' : label
,'WEIGHT' : tru / total
}
else:
return None
# add all rule types as targets
RULE_TYPES = [
('finel', parse_fine_l)
,('orderl', parse_order_l)
,('missingl', parse_missing_l)
,('intRell', parse_int_rel_l)
,('typeErrl', parse_type_err_l)
]
edges = []
for t, parser in RULE_TYPES:
for edge in data[t]:
parsed_edge = parser(edge)
if parsed_edge:
edges.append( parsed_edge )
with open(args.edge_file, 'w') as outfile:
json.dump(edges, outfile)
```
#### File: ConfigV/graphAnalysis/calc_complexity.py
```python
import argparse
import json
parser = argparse.ArgumentParser(description=\
'analyze complexity of configuration file')
parser.add_argument('config_opts')
parser.add_argument('edge_file')
args = parser.parse_args()
#
# determine the "total weight" between an option
# and a set of options
#
def weight_between(opt, opts, edges):
weight = 0
for edge in edges:
if (opt in edge['PARTICIPANT_A']
and set(edge['PARTICIPANT_B']) & set(opts)):
weight = weight + edge['WEIGHT']
return weight
def total_weight(opt, edges):
weight = 0
for edge in edges:
if (opt in edge['PARTICIPANT_A']):
weight = weight + edge['WEIGHT']
return weight
# CONFIG FILE COMPLEXITY(opts):
#
# candidate_complexity = 0
#
# for opt_a in opts:
# opt_weight = 0;
#
# all_local_weight = total_weight(opt_a)
#
# for opt_b in opts: # since self-edges allowed
# opt_weight = opt_weight + weight_between(opt_a, [opt_b])
#
#
# # now how related are they?
# # if A is heavily related to other options, count it less.
#
# candidate_complexity =
# candidate_complexity +
# 1 * (1 - (opt_weight / all_local_weight))
#
with open(args.config_opts) as opts_file:
config_options = opts_file.readlines()
config_options = [x.strip() for x in config_options]
with open(args.edge_file) as edge_file:
edges = json.load(edge_file)
candidate_complexity = float(0)
for opt_a in config_options:
opt_weight = 0
all_local_weight = total_weight(opt_a, edges)
for opt_b in config_options:
opt_weight = opt_weight + weight_between(opt_a, [opt_b], edges)
if all_local_weight != 0:
candidate_complexity = \
candidate_complexity + \
1 * (1 - (opt_weight / all_local_weight))
else:
# no information available, use naive measure
candidate_complexity = candidate_complexity + 1
print ("INFO: config file processing completed")
print ("INFO: {} lines, {} total complexity score".format(
len(config_options), candidate_complexity))
``` |
{
"source": "jialvarez/persystems",
"score": 3
} |
#### File: jialvarez/persystems/test.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright 2011, <NAME> <<EMAIL>>"
__license__ = "GPL-2"
import csv
import time
import getopt
import imp
import os
# CSV to process
FILENAME = '/tmp/fichero1.csv'
class Test:
def __init__(self, csv_reader, module):
# import backend
f, filename, description = imp.find_module(module, ['backends'])
try:
moduleObj = imp.load_module(module, f, filename, description)
finally:
f.close()
# open test write database for chosen backend
test_file = '/tmp/test' + module + '.db'
test_db = moduleObj.TestBackend(test_file, "w")
# start write test to disk
start_write = time.time()
i = 0
for row in csv_reader:
test_db[str(i)] = str(row)
i += 1
test_db.close()
end_write = time.time()
print module + " writing time: " + str(end_write - start_write)
# start read test to memory from disk
start_read = time.time()
test_db = moduleObj.TestBackend(test_file, "r")
# Read records. PyTables uses a different way based on class description
for item in test_db.getTestDBItems():
if module == 'pytables':
a = item['key']
b = item['value']
else:
memvar = item
#Another way to read records, unimplemented by now
#for (key, value) in test_db.iteritems():
# memvar = value
# i += 1
test_db.close()
end_read = time.time()
print module + " reading time: " + str(end_read - start_read) + "\n"
def getCSVReader():
# CSV dialect where lines end in "\n"
csv.register_dialect('endline', lineterminator='\n')
csv_reader = csv.reader(open(FILENAME,'r'), delimiter=',')
return csv_reader
# test experiment
tester = Test(getCSVReader(), "pytables")
tester = Test(getCSVReader(), "pybsddb")
tester = Test(getCSVReader(), "pyzodb")
tester = Test(getCSVReader(), "pydurus")
tester = Test(getCSVReader(), "pyredis")
``` |
{
"source": "JiamanZhang/Lab_TAD_intactness",
"score": 2
} |
#### File: Lab_TAD_intactness/codes/get.tad.intactness.value.py
```python
import sys
import os
import numpy as np
import gzip
def main():
tadfile = sys.argv[1]
chr = sys.argv[2]
contactfile = sys.argv[3]
faifile = sys.argv[4]
oup = sys.argv[5]
f1 = open(faifile,'r')
fai_dict = {}
for line1 in f1:
info1 = line1.strip().split('\t')
fai_dict[info1[0]] = int(info1[1])
f1.close()
chr_len = fai_dict[chr]
if chr_len%20000 == 0:
bin_num = chr_len / 20000
else:
bin_num = chr_len / 20000 + 1
bin1 = 0
f2 = gzip.open(contactfile,'r')
contact_dict ={}
for line2 in f2:
info2 = line2.strip().split('\t')
if len(info2) != bin_num:
print('this is wrong!!!~')
sys.exit(0)
for bin2 in range(0,len(info2),1):
if bin1 > bin2:
continue
contact_dict[(bin1,bin2)] = float(info2[bin2])
bin1+=1
f2.close()
out = open(oup,'w')
f3 = open(tadfile,'r')
for line3 in f3:
info3 = line3.strip().split('\t')
if info3[0] != chr:
continue
start = int(info3[1])
end = int(info3[2])
if end - start <= 20000:
info_list = info3 + ['NA']
out.write('{0}\n'.format('\t'.join([str(i) for i in info_list])))
continue
bin_s = start / 20000
bin_e = end / 20000
lines = bin_e - bin_s
Dscore_dict = {}
for i in range(1,lines,1):
Dscore_dict[i] = [[],[],[]]
for bin1 in range(bin_s,bin_e,1):
for bin2 in range(bin_s-lines,bin_s,1):
if bin1-lines >= bin2:
continue
i = abs(bin2 - bin1)
try:
Dscore_dict[i][0].append(contact_dict[(bin2,bin1)])
except KeyError as reason:
continue
for bin1 in range(bin_s,bin_e,1):
for bin2 in range(bin_e,bin_e+lines,1):
if bin1 <= bin2-lines:
continue
i = abs(bin2 - bin1)
try:
Dscore_dict[i][1].append(contact_dict[(bin1,bin2)])
except KeyError as reason:
continue
for bin1 in range(bin_s,bin_e,1):
for bin2 in range(bin_s,bin_e,1):
if bin1 >= bin2:
continue
i = abs(bin2 - bin1)
Dscore_dict[i][2].append(contact_dict[(bin1,bin2)])
a_list = []
for i in range(1,lines,1):
# print(len(Dscore_dict[i][0]),len(Dscore_dict[i][1]),len(Dscore_dict[i][2]))
if len(Dscore_dict[i][0]) == 0:
mean_val = np.mean(Dscore_dict[i][1])
elif len(Dscore_dict[i][1]) ==0:
mean_val = np.mean(Dscore_dict[i][0])
else:
mean_val = np.mean([np.mean(Dscore_dict[i][0]),np.mean(Dscore_dict[i][1])])
a_list.append(np.median(Dscore_dict[i][2])/mean_val)
value = np.median(a_list)
info_list = info3 + [value]
out.write('{0}\n'.format('\t'.join([str(i) for i in info_list])))
out.close()
# write.close()
f3.close()
if __name__ == '__main__':
main()
``` |
{
"source": "jiamaozheng/imlab_merging_sqlite_db",
"score": 2
} |
#### File: imlab_merging_sqlite_db/src/merging_sqlites_v6p_old.py
```python
import os, pandas, glob, sqlite3, csv, sys, time, argparse
import urllib2, boto3, json, pandas, time, os, sys, logging, argparse
from datetime import datetime
import uuid as myuuid
from botocore.exceptions import ClientError
__author__ = "<NAME> <<EMAIL>>"
__version__ = "Revision: 0.0.1"
__date__ = "Date: 2017-11-28"
# usages:
# 1) python merging_sqlites_v6p_old.py -m DGN-HapMap-2015.sqlite -i DGN-HapMap-2015 -o DGN-HapMap-2015 -l DGN-HapMap-2015
# 2) python merging_sqlites_v6p_old.py -m GTEx-V6p-1KG-2016-11-16.sqlite -i GTEx-V6p-1KG-2016-11-16 -o GTEx-V6p-1KG-2016-11-16 -l GTEx-V6p-1KG-2016-11-16
# 3) python merging_sqlites_v6p_old.py -m GTEx-V6p-HapMap-2016-09-08.sqlite -l GTEx-V6p-HapMap-2016-09-08 -i GTEx-V6p-HapMap-2016-09-08 -o GTEx-V6p-HapMap-2016-09-08
class SqliteDBMerged(object):
def __init_(self):
# logger
self.logger = ''
# input path
self.input_path = ''
# output path
self.output_path = ''
# log path
self.log_path = ''
# merged db name
self.merged_sqlite_db_name = ''
# Logging function
def getLog(self):
log_file_name = ''
if self.log_path != '':
if self.log_path[-1] != '/':
self.log_path = self.log_path + '/'
log_file_name = self.log_path + str(myuuid.uuid4()) + '.log'
self.logger = logging.getLogger()
fhandler = logging.FileHandler(filename=log_file_name, mode='w')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
self.logger.addHandler(fhandler)
self.logger.setLevel(logging.INFO)
# Funtion to get a pretty string for a given number of seconds.
def timeString(self, seconds):
tuple = time.gmtime(seconds);
days = tuple[2] - 1;
hours = tuple[3];
mins = tuple[4];
secs = tuple[5];
if sum([days,hours,mins,secs]) == 0:
return "<1s";
else:
string = str(days) + "d";
string += ":" + str(hours) + "h";
string += ":" + str(mins) + "m";
string += ":" + str(secs) + "s";
return string;
# Get arguments
def get_args(self):
# setup commond line arguments
parser = argparse.ArgumentParser()
# bucket path
parser.add_argument('-m', '--merged_db_name', required=True, default='', type=str, help='e.g. gtex-v6p-1kg-2016-08-18.sqlite')
# output path
parser.add_argument('-o', '--output_path', required=True, default='', type=str, help='a directory path you choosen to save merged sqlite db output')
# log path
parser.add_argument('-l', '--log_path', required=True, default='', type=str, help='a directory path you choosen to store log')
# input path
parser.add_argument('-i', '--input_path', required=True, default='', type=str, help='a directory path that hold all individual sqlite db')
# parse the arguments
args = parser.parse_args()
self.output_path = args.output_path.strip()
self.log_path = args.log_path.strip()
self.merged_db_name = args.merged_db_name.strip()
self.input_path = args.input_path.strip()
if self.output_path != '' and not os.path.exists(self.output_path):
os.makedirs(self.output_path)
if self.log_path != '' and not os.path.exists(self.log_path):
os.makedirs(self.log_path)
if self.output_path != '':
if self.output_path[-1] != '/':
self.output_path = self.output_path + '/'
if self.input_path != '':
if self.input_path[-1] != '/':
self.input_path = self.input_path + '/'
# merge
def merge(self):
# create a new database
predictdb_all = self.output_path + self.merged_db_name
connection = sqlite3.connect(predictdb_all)
ccc = connection.cursor()
ccc.execute("DROP TABLE IF EXISTS weights")
ccc.execute("CREATE TABLE weights (rsid text NOT NULL, gene text NOT NULL, weight real NULL, ref_allele text NULL, eff_allele text NULL, tissue text NOT NULL, PRIMARY KEY(rsid, gene, tissue))")
ccc.execute("DROP TABLE IF EXISTS extra")
ccc.execute("CREATE TABLE extra (gene text NOT NULL, genename text NOT NULL, pred_perf_R2 text NULL, n_snps_in_model integer NULL, pred_perf_pval real NULL, pred_perf_qval real NULL, tissue text NOT NULL, PRIMARY KEY(gene, tissue))")
ccc.execute("DROP TABLE IF EXISTS construction")
ccc.execute("CREATE TABLE construction (chr integer NOT NULL, cv_seed integer NOT NULL, tissue text NOT NULL, PRIMARY KEY (chr, tissue))")
ccc.execute("DROP TABLE IF EXISTS sample_info")
ccc.execute("CREATE TABLE sample_info (n_samples integer NOT NULL, tissue text NOT NULL, PRIMARY KEY (tissue))")
# merge all sqlite databases into one sqlite database
tableName = ['construction', 'extra', 'sample_info', 'weights']
dbFileList = glob.glob(self.input_path + "*.db")
database_names = []
for dbFilename in dbFileList:
database_names.append(dbFilename)
for i in range(len(database_names)):
print(database_names[i])
conn = sqlite3.connect(database_names[i])
c = conn.cursor()
tissue_name = database_names[i].split('.')[0][:-2]
if 'DGN-WB' in database_names[i].split('/')[-1]:
tissue_name = tissue_name.split('/')[len(tissue_name.split('/'))-1]
else:
tissue_name = tissue_name.split('/')[len(tissue_name.split('/'))-1][3:]
print(tissue_name)
for table_name in tableName:
try:
c.execute("alter table '%s' " %table_name + ' add column tissue TEXT')
c.execute('update %s' %table_name + " set tissue = '%s' " %tissue_name)
except Exception as e:
print(e)
c.execute('select * from %s' %table_name)
output = c.fetchall()
# csv
csv_writer = ''
if table_name == 'construction':
csv_writer = csv.writer(open(self.output_path + self.merged_db_name.split('.')[0] + "_" + tissue_name + "_" + table_name + ".csv", "w"))
csv_writer.writerow(['chr', 'cv.seed', 'tissue'])
elif table_name == 'extra':
csv_writer = csv.writer(open(self.output_path + self.merged_db_name.split('.')[0] + "_" + tissue_name + "_" + table_name + ".csv", "w"))
csv_writer.writerow(['gene', 'genename', 'pred.perf.R2', 'n.snps.in.model', 'pred.perf.pval', 'pred.perf.qval', 'tissue'])
elif table_name == 'weights':
csv_writer = csv.writer(open(self.output_path + self.merged_db_name.split('.')[0] + "_" + tissue_name + "_" + table_name + ".csv", "w"))
csv_writer.writerow(['rsid', 'gene', 'weight', 'ref_allele', 'eff_allele', 'tissue'])
else:
csv_writer = csv.writer(open(self.output_path + self.merged_db_name.split('.')[0] + "_" + tissue_name + "_" + table_name + ".csv", "w"))
csv_writer.writerow(['n.samples', 'tissue'])
csv_writer.writerows(output)
# sqlite db
for row in output:
if table_name == 'construction':
ccc.execute("insert into %s VALUES(?, ?, ?)" %table_name, row)
elif table_name == 'extra':
ccc.execute("insert into %s VALUES(?, ?, ?, ?, ?, ?, ?)" %table_name, row)
elif table_name == 'weights':
ccc.execute("insert into %s VALUES(?, ?, ?, ?, ?, ?)" %table_name, row)
else:
ccc.execute("insert into %s VALUES(?, ?)" %table_name, row)
# commit and close db
conn.commit()
conn.close()
# commit and close db
connection.commit()
connection.close()
# concat and output combined datasets
merged_extra = glob.glob(self.output_path + '*extra.csv')
merged_weights = glob.glob(self.output_path + '*weights.csv')
merged_sample_info = glob.glob(self.output_path + '*sample_info.csv')
merged_construction = glob.glob(self.output_path + '*construction.csv')
for list in [merged_extra, merged_construction, merged_weights, merged_sample_info]:
merged_final = ''
merged = []
for filename in list:
merged.append(pandas.read_csv(filename))
os.system('rm %s' %filename)
print('remove %s' %filename)
merged_final = pandas.concat(merged, axis=0)
if 'extra' in list[0]:
merged_final.to_csv(self.output_path + self.merged_db_name.split('.')[0] + "_" + 'extra_final.csv', index=None)
elif 'weights' in list[0]:
merged_final.to_csv(self.output_path + self.merged_db_name.split('.')[0] + "_" + 'weights_final.csv', index=None)
elif 'construction' in list[0]:
merged_final.to_csv(self.output_path + self.merged_db_name.split('.')[0] + "_" + 'construction_final.csv', index=None)
else:
merged_final.to_csv(self.output_path + self.merged_db_name.split('.')[0] + "_" + 'sample_info_final.csv', index=None)
def main():
# Instantial class
start_time = time.time()
sqliteDBMerged = SqliteDBMerged()
sqliteDBMerged.get_args()
sqliteDBMerged.getLog()
# merge
sqliteDBMerged.merge()
msg = "\nElapsed Time: " + sqliteDBMerged.timeString(time.time() - start_time) # calculate how long the program is running
sqliteDBMerged.logger.info(msg)
print(msg)
msg = "\nDate: " + datetime.now().strftime('%Y-%m-%d') + "\n"
sqliteDBMerged.logger.info(msg)
print(msg)
# INITIALIZE
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "jiamaozheng/Imlab-syn-aws-s3-tool",
"score": 2
} |
#### File: jiamaozheng/Imlab-syn-aws-s3-tool/syn_aws_s3.py
```python
import sys, os, logging, argparse, json, time, boto3
from pprint import pprint
from datetime import datetime
import uuid as myuuid
from botocore.exceptions import ClientError
__author__ = "<NAME> <<EMAIL>>"
__version__ = "Revision: 0.0.1"
__date__ = "Date: 2017-09-28"
class BackupS3(object):
def __init_(self):
# logger
self.logger = ' '
# bucket name
self.bucket_name = ''
# file name
self.file_name = ''
# output path
self.output_path = ''
# log path
self.log_path = ''
# Logging function
def getLog(self):
log_file_name = ''
if self.log_path != 'l':
if self.log_path[-1] != '/':
self.log_path = self.log_path + '/'
log_file_name = self.log_path + str(myuuid.uuid4()) + '.log'
else:
currentPath = os.path.abspath(os.path.abspath(sys.argv[0]))[:-13]
currentPath = currentPath[:-(len(currentPath.split('/')[-2]) + 1)]
log_file_name = currentPath + 'log/' + datetime.now().strftime('%Y-%m-%d')
if not os.path.exists(log_file_name):
os.makedirs(log_file_name)
log_file_name = log_file_name + '/' + str(myuuid.uuid4()) + '.log'
self.logger = logging.getLogger()
fhandler = logging.FileHandler(filename=log_file_name, mode='w')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
self.logger.addHandler(fhandler)
self.logger.setLevel(logging.INFO)
# Funtion to get a pretty string for a given number of seconds.
def timeString(self, seconds):
tuple = time.gmtime(seconds);
days = tuple[2] - 1;
hours = tuple[3];
mins = tuple[4];
secs = tuple[5];
if sum([days,hours,mins,secs]) == 0:
return "<1s";
else:
string = str(days) + "d";
string += ":" + str(hours) + "h";
string += ":" + str(mins) + "m";
string += ":" + str(secs) + "s";
return string;
def get_args(self):
# setup commond line arguments
parser = argparse.ArgumentParser()
# bucket name if user want to backup a specific bucket
parser.add_argument('-b', '--bucket_name', required=False, default='b', type=str, help='aws s3 bucket name or subfolder name e.g imlab-jiamao or imlab-jiamao/labfiles')
# a specific file name if user want to backup a specific file
parser.add_argument('-f', '--file_name', required=False, default='f', type=str, help='aws s3 bucket name e.g imlab-jiamao/jiamao.txt')
# output path
parser.add_argument('-o', '--output_path', required=False, default='o', type=str, help='a directory or a s3 bucket/subfolder you choosen to backup aws s3 files')
# log path
parser.add_argument('-l', '--log_path', required=False, default='l', type=str, help='a directory or a aws s3 bucket/subfolder you choosen to store log files')
# parse the arguments
args = parser.parse_args()
self.bucket_name = args.bucket_name.strip()
self.file_name = args.file_name.strip()
self.output_path = args.output_path.strip()
self.log_path = args.log_path.strip()
if self.output_path != 'o' and not os.path.exists(self.output_path):
os.makedirs(self.output_path)
if self.log_path != 'l' and not os.path.exists(self.log_path):
os.makedirs(self.log_path)
def synAllBuckets(self):
# current path
currentPath = os.path.abspath(os.path.abspath(sys.argv[0]))[:-13]
currentPath = currentPath[:-(len(currentPath.split('/')[-2]) + 1)]
bucket_name_json_file = currentPath + 'log/' + datetime.now().strftime('%Y-%m-%d')
if not os.path.exists(bucket_name_json_file):
os.makedirs(bucket_name_json_file)
bucket_name_json_file = bucket_name_json_file + '/' + str(myuuid.uuid4()) + '.json'
cmd = 'aws s3api list-buckets > %s' % bucket_name_json_file
msg = "Executing list-buckets s3api command: " + cmd
self.logger.info(msg)
print(msg)
os.system(cmd)
jdata = open(bucket_name_json_file)
data = json.load(jdata)
msg = "Reading a json file containing all aws s3 bucket names: " + bucket_name_json_file
self.logger.info(msg)
print(msg)
for x in range (0, len(data["Buckets"])):
cmd = ''
if self.output_path[-1] != '/':
self.output_path = self.output_path + '/'
if currentPath[-1] != '/':
currentPath = currentPath + '/'
if self.output_path[:-1] != 'o':
cmd = 'aws s3 sync s3://%s %s' %(data["Buckets"][x]['Name'], self.output_path + data["Buckets"][x]['Name'])
else:
cmd = 'aws s3 sync s3://%s %s' %(data["Buckets"][x]['Name'], currentPath + 'aws_s3_buckets/' + data["Buckets"][x]['Name'])
msg = "Executing sync s3 command: " + cmd
self.logger.info(msg)
print(msg)
os.system(cmd)
msg = "\n"
self.logger.info(msg)
print(msg)
def synOneBucket(self):
# current path
currentPath = os.path.abspath(os.path.abspath(sys.argv[0]))[:-13]
currentPath = currentPath[:-(len(currentPath.split('/')[-2]) + 1)]
bucket_name_json_file = currentPath + 'log/' + datetime.now().strftime('%Y-%m-%d')
if not os.path.exists(bucket_name_json_file):
os.makedirs(bucket_name_json_file)
bucket_name_json_file = bucket_name_json_file + '/' + str(myuuid.uuid4()) + '.json'
cmd = 'aws s3api list-buckets > %s' % bucket_name_json_file
msg = "Executing list-buckets s3api command: " + cmd
self.logger.info(msg)
print(msg)
os.system(cmd)
jdata = open(bucket_name_json_file)
data = json.load(jdata)
msg = "Reading a json file containing all aws s3 bucket names: " + bucket_name_json_file
self.logger.info(msg)
print(msg)
bucket_name = None
for x in range (0, len(data["Buckets"])):
if self.bucket_name.split('/')[0] == data["Buckets"][x]['Name']:
bucket_name = self.bucket_name
if bucket_name is None:
msg = "\nFAILED! Please check your bucket name - %s which matchs to that of the s3 bucket" % (self.bucket_name.split('/')[0])
self.logger.info(msg)
print(msg)
else:
if self.output_path[-1] != '/':
self.output_path = self.output_path + '/'
cmd = ''
if self.output_path != 'o':
cmd = 'aws s3 sync s3://%s %s' %(self.bucket_name, self.output_path + self.bucket_name)
os.system(cmd)
if int(os.path.getsize(self.output_path + self.bucket_name)) > 500:
msg = "\nGreat, you have successfully downloaded %s" % (self.output_path + self.bucket_name)
self.logger.info(msg)
print(msg)
else:
# os.system('rm -rf %s' %(self.output_path + self.bucket_name))
msg = '\nFAILED! Please use usage -f or check the existence of your bucket/subfolder name - %s!' %(self.bucket_name)
self.logger.info(msg)
print(msg)
else:
cmd = 'aws s3 sync s3://%s %s' %(self.bucket_name, currentPath + 'aws_s3_buckets/' + self.bucket_name)
os.system(cmd)
if int(os.path.getsize(currentPath + 'aws_s3_buckets/' + self.bucket_name)) > 500:
msg = '\nGreat, you have successfully downloaded %s' % (self.output_path + self.bucket_name)
self.logger.info(msg)
print(msg)
else:
# os.system('rm -rf %s'%(self.bucket_name, currentPath + 'aws_s3_buckets/' + self.bucket_name))
msg = "\nFAILED! Please use usage -f or check the existence of your bucket/subfolder name - %s!" %(self.bucket_name)
self.logger.info(msg)
print(msg)
def synOneFile(self):
# current path
currentPath = os.path.abspath(os.path.abspath(sys.argv[0]))[:-13]
currentPath = currentPath[:-(len(currentPath.split('/')[-2]) + 1)]
bucket_name_json_file = currentPath + 'log/' + datetime.now().strftime('%Y-%m-%d')
if not os.path.exists(bucket_name_json_file):
os.makedirs(bucket_name_json_file)
bucket_name_json_file = bucket_name_json_file + '/' + str(myuuid.uuid4()) + '.json'
cmd = 'aws s3api list-buckets > %s' % bucket_name_json_file
msg = "Executing list-buckets s3api command: " + cmd
self.logger.info(msg)
print(msg)
os.system(cmd)
jdata = open(bucket_name_json_file)
data = json.load(jdata)
msg = "Reading a json file containing all aws s3 bucket names: " + bucket_name_json_file
self.logger.info(msg)
print(msg)
file_name = None
for x in range (0, len(data["Buckets"])):
if self.file_name.split('/')[0] == data["Buckets"][x]['Name']:
file_name = self.file_name
if file_name is None:
msg = "\nFAILED! Please check your bucket name - %s which matchs to that of the s3 bucket" % (self.file_name.split('/')[0])
self.logger.info(msg)
print(msg)
else:
# print(self.file_name.split('/')[0] == data["Buckets"][x]['Name'])
if self.output_path[-1] != '/':
self.output_path = self.output_path + '/'
# print(self.output_path)
cmd = ''
if self.output_path != 'o':
cmd = 'aws s3 cp s3://%s %s' %(self.file_name, self.output_path + self.file_name)
else:
cmd = 'aws s3 cp s3://%s %s' %(self.file_name, currentPath + 'aws_s3_buckets/' + self.file_name)
msg = "Executing cp s3 command: " + cmd
self.logger.info(msg)
print(msg)
os.system(cmd)
def main():
# Instantial class
start_time = time.time()
backupS3 = BackupS3()
backupS3.get_args()
backupS3.getLog()
msg = "\n"
backupS3.logger.info(msg)
print(msg)
# backup all buckets, one bucket or one file
if backupS3.bucket_name != 'b' and backupS3.file_name == 'f': # one bucket
# s3 = boto3.resource('s3')
# try:
# s3.Object(backupS3.bucket_name).load()
# print('success')
# except ClientError as e:
# print(e)
try:
backupS3.synOneBucket()
except Exception, e:
msg = e
backupS3.logger.info(msg)
print(msg)
elif backupS3.bucket_name == 'b' and backupS3.file_name != 'f': # a single file
try:
backupS3.synOneFile()
except Exception, e:
msg = e
backupS3.logger.info(msg)
print(msg)
elif backupS3.bucket_name == 'b' and backupS3.file_name == 'f': # all buckets
try:
backupS3.synAllBuckets()
except Exception, e:
msg = e
backupS3.logger.info(msg)
print(msg)
msg = "\nElapsed Time: " + backupS3.timeString(time.time() - start_time) # calculate how long the program is running
backupS3.logger.info(msg)
print(msg)
msg = "\nDate: " + datetime.now().strftime('%Y-%m-%d') + "\n"
backupS3.logger.info(msg)
print(msg)
# INITIALIZE
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "jiameng1010/pointNet",
"score": 2
} |
#### File: jiameng1010/pointNet/train_gan.py
```python
import tensorflow as tf
tfgan = tf.contrib.gan
layers = tf.contrib.layers
import numpy as np
import argparse
import provider
import importlib
import h5py
import os
import sys
from tensorflow.python.ops import variable_scope
from tensorflow.contrib.gan.python.losses.python import tuple_losses_impl as tfgan_losses
from tensorflow.contrib.data import Dataset, Iterator
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import tf_util
from transform_nets import input_transform_net, feature_transform_net
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
FLAGS = parser.parse_args()
FLAGS.noise_dims = 128
FLAGS.max_number_of_steps = 120
MAX_NUM_POINT = 2048
NUM_CLASSES = 40
DECAY_STEP = FLAGS.decay_step
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
MODEL = importlib.import_module(FLAGS.model)
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def provide_data():
while(True):
BATCH_SIZE = 32
current_data, current_label = provider.loadDataFile('./data/modelnet40_ply_hdf5_2048/train_all.h5')
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx + 1) * BATCH_SIZE
# mantipulation data
rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
jittered_data = provider.jitter_point_cloud(rotated_data)
# mantipulate labe
one_hot_labe = np.zeros((BATCH_SIZE, 40))
one_hot_labe[np.arange(BATCH_SIZE), current_label[start_idx:end_idx]] = 1
#out['data'] = jittered_data
#out['labe'] = one_hot_labe
yield jittered_data, one_hot_labe
def get_model(point_cloud, is_training, one_hot_labels, bn_decay=None,):
""" Classification PointNet, input is BxNx3, output Bx40 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tfgan.features.condition_tensor_from_onehot(net, one_hot_labels)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2', reuse=tf.AUTO_REUSE) as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
net_transformed = tf.expand_dims(net_transformed, [2])
net = tf_util.conv2d(net_transformed, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
#net = tfgan.features.condition_tensor_from_onehot(net, one_hot_labels)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# Symmetric function: max pooling
net = tf_util.avg_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
net = tf.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp1')
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
scope='dp2')
net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
return net, end_points
def conditional_discriminator(point_clouds, one_hot_labels):
batch = tf.constant([32])#tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', tf.squeeze(bn_decay))
is_training_pl = tf.constant([True])
# Get model and loss
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
pred, end_points = get_model(point_clouds, tf.squeeze(is_training_pl), one_hot_labels[1])
return layers.fully_connected(pred, 1, activation_fn=tf.nn.softmax)
def generate_cloud(feature, noise):
feature = tf.concat([feature, feature], axis=1)#2
feature = tf.concat([feature, feature], axis=1)#4
feature = tf.concat([feature, feature], axis=1)#8
feature = tf.concat([feature, feature], axis=1)#16
feature = tf.concat([feature, feature], axis=1)#32
feature = tf.concat([feature, feature], axis=1)#64
feature = tf.concat([feature, feature], axis=1)#128
feature = tf.concat([feature, feature], axis=1)#256
feature = tf.concat([feature, feature], axis=1)#512
feature = tf.concat([feature, feature], axis=1)#1024
#noise = tf.concat([noise, noise], axis=1)#2
#noise = tf.concat([noise, noise], axis=1)#4
#noise = tf.concat([noise, noise], axis=1)#8
#noise = tf.concat([noise, noise], axis=1)#16
#noise = tf.concat([noise, noise], axis=1)#32
#noise = tf.concat([noise, noise], axis=1)#64
#noise = tf.concat([noise, noise], axis=1)#128
#noise = tf.concat([noise, noise], axis=1)#256
#noise = tf.concat([noise, noise], axis=1)#512
#noise = tf.concat([noise, noise], axis=1)#1024
feature = tf.concat([feature, noise], axis=2)
point = layers.fully_connected(feature, 256)
point = layers.fully_connected(point, 64)
point = layers.fully_connected(point, 32)
point = layers.fully_connected(point, 16, activation_fn=tf.nn.softsign)
point = layers.fully_connected(point, 3, activation_fn=tf.nn.softsign)
return point
def conditional_generator(inputs):
noise, cloud_labels = inputs
#with tf.variable_scope('Generator', reuse=tf.AUTO_REUSE):
with tf.contrib.framework.arg_scope(
[layers.fully_connected, layers.conv2d_transpose],
activation_fn=tf.nn.relu, normalizer_fn=layers.batch_norm,
weights_regularizer=layers.l2_regularizer(2.5e-5)):
net = layers.fully_connected(noise, 256)
net = tfgan.features.condition_tensor_from_onehot(net, cloud_labels)
net = layers.fully_connected(net, 512)
feature = layers.fully_connected(net, 1024)
noise2 = tf.random_normal([32, 1024, 128])
cloud = generate_cloud(tf.expand_dims(feature, axis=1), noise2)
return cloud
######################################### main #############################################
######################################### main #############################################
######################################### main #############################################
######################################### main #############################################
######################################### main #############################################
cloud_provider = tf.data.Dataset.from_generator(provide_data, output_types=(tf.float32, tf.float32), \
output_shapes=(tf.TensorShape([32, 1024, 3]), tf.TensorShape([32,40])))
point_clouds, cloud_labels = cloud_provider.make_one_shot_iterator().get_next()
iterator = Iterator.from_structure(cloud_provider.output_types,
cloud_provider.output_shapes)
training_init_op = iterator.make_initializer(cloud_provider)
noise = tf.random_normal([FLAGS.batch_size, FLAGS.noise_dims])
#with tf.variable_scope("my_scope", reuse=tf.AUTO_REUSE):
# Build the generator and discriminator.
gan_model = tfgan.gan_model(
generator_fn=conditional_generator, # you define
discriminator_fn=conditional_discriminator, # you define
real_data=point_clouds,
generator_inputs=(noise, cloud_labels))
# Build the GAN loss.
gan_loss = tfgan.gan_loss(
gan_model,
#gradient_penalty_weight=1.0,
#mutual_information_penalty_weight=0.0,
generator_loss_fn=tfgan_losses.minimax_generator_loss,
discriminator_loss_fn=tfgan_losses.minimax_discriminator_loss,
add_summaries=True)
# Create the train ops, which calculate gradients and apply updates to weights.
gen_lr = 1e-5
dis_lr = 1e-4
train_ops = tfgan.gan_train_ops(
gan_model,
gan_loss,
generator_optimizer=tf.train.AdamOptimizer(gen_lr, 0.5),
discriminator_optimizer=tf.train.AdamOptimizer(dis_lr, 0.5))
status_message = tf.string_join(
['Starting train step: ',
tf.as_string(tf.train.get_or_create_global_step())],
name='status_message')
demo_hook = tf.train.FinalOpsHook(final_ops=gan_model.generated_data)
g_loss_hook = tf.train.FinalOpsHook(final_ops=gan_loss[0])
d_loss_hook = tf.train.FinalOpsHook(final_ops=gan_loss[1])
for i in range(500):
step_count = tfgan.gan_train(train_ops,
hooks=[tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps),
demo_hook],
logdir='./log/gan_log/')
print(step_count)
generated_demos = demo_hook.final_ops_values
savefilename = './log/gan_log/demo' + str(i) + '.h5'
h5r = h5py.File(savefilename, 'w')
h5r.create_dataset('data', data=generated_demos)
h5r.close()
g_loss = g_loss_hook.final_ops_values
d_loss = d_loss_hook.final_ops_values
print(str(g_loss) + ' ' + str(d_loss))
#with tf.variable_scope('Generator'):
print('Done!')
``` |
{
"source": "Jiaming1999/ChainConsumer",
"score": 3
} |
#### File: ChainConsumer/examples/plot_summary.py
```python
import numpy as np
from chainconsumer import ChainConsumer
def get_instance():
np.random.seed(0)
c = ChainConsumer()
parameters = ["$x$", r"$\Omega_\epsilon$", "$r^2(x_0)$"]
for name in ["Ref. model", "Test A", "Test B", "Test C"]:
# Add some random data
mean = np.random.normal(loc=0, scale=3, size=3)
sigma = np.random.uniform(low=1, high=3, size=3)
data = np.random.multivariate_normal(mean=mean, cov=np.diag(sigma**2), size=100000)
c.add_chain(data, parameters=parameters, name=name)
return c
###############################################################################
# If we want the full shape of the distributions, well, thats the default
# behaviour!
c = get_instance()
c.configure(bar_shade=True)
c.plotter.plot_summary()
###############################################################################
# But lets make some changes. Say we don't like the colourful text. And we
# want errorbars, not distributions. And some fun truth values.
c = get_instance()
c.configure(legend_color_text=False)
c.configure_truth(ls=":", color="#FB8C00")
c.plotter.plot_summary(errorbar=True, truth=[[0], [-1, 1], [-2, 0, 2]])
###############################################################################
# Even better, lets use our reference model as the truth value and not plot
# it with the others
c = get_instance()
c.configure(legend_color_text=False)
c.configure_truth(ls="-", color="#555555")
c.plotter.plot_summary(errorbar=True, truth="Ref. model", include_truth_chain=False, extra_parameter_spacing=1.5)
``` |
{
"source": "Jiaming1999/dotty_dict",
"score": 2
} |
#### File: Jiaming1999/dotty_dict/setup.py
```python
import os
import re
import subprocess
from setuptools import find_packages, setup
from setuptools.command.develop import develop
from setuptools.command.install import install
from setuptools.command.test import test
__author__ = '<NAME>'
__copyright__ = 'Copyright (c) 2018, Pawelzny'
with open('README.rst', 'r') as readme_file:
readme = readme_file.read()
def get_version(*file_paths):
"""Retrieves the version from project/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
subprocess.check_call(['pipenv', 'install', '--dev', '--deploy', '--system'])
develop.run(self)
class TestCommand(test):
"""Run tests"""
def run(self):
subprocess.check_call(['pytest'])
test.run(self)
setup(
name='dotty_dict',
version=get_version('dotty_dict', '__init__.py'),
description="Dictionary wrapper for quick access to deeply nested keys.",
long_description=readme,
license="MIT license",
author="<NAME> @pawelzny",
author_email='<EMAIL>',
url='https://github.com/pawelzny/dotty_dict',
packages=find_packages(exclude=('tests', 'docs', 'bin', 'example')),
package_dir={'dotty_dict': 'dotty_dict'},
include_package_data=True,
use_scm_version=True,
setup_requires=['setuptools_scm'],
zip_safe=False,
keywords='dot notation dict wrapper helper utils lib',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'License :: OSI Approved :: MIT License',
],
cmdclass={
'develop': PostDevelopCommand,
'test': TestCommand,
},
)
``` |
{
"source": "Jiaming1999/kool",
"score": 3
} |
#### File: contrib/auth/group.py
```python
from kool.db.models import Model, where, table
from .permission import Permission
class Group(Model):
"""Groups are used to cluster similar users.
Extends:
Model
"""
def __init__(self, name):
super().__init__()
self.name = name
self.permissions = []
def __str__(self):
return '{}'.format(self.name)
def __repr__(self):
return '{}'.format(self.name)
def add_permission(self, perm_id):
"""Receives a permission id, queries it and
if it exists, adds it to list of permissions of
a given group
:param perm_id:
"""
permission = table(Permission)
if permission.get(where('_id') == str(perm_id)):
if not perm_id in self.permissions:
self.permissions.append(perm_id)
else:
raise ValueError('Topic id not found!')
self.update()
return self.permissions
def del_permission(self, perm_id):
"""Receives a permission id and deletes it from
a list of permissions for a given group
:param perm_id:
"""
if perm_id in self.permissions:
self.permissions.remove(perm_id)
self.update()
return self.permissions
```
#### File: kool/db/models.py
```python
from .flatfile import FlatFileDB, Query
from kool.utils import camel_to_snake, now
class Model(object):
db = None # database
def __init__(self, * args, ** kwargs):
"""
Model provides save, delete, purge operations to every
class that inherits it.
"""
# Get class name, so as to set the table name
cls_name = self.__class__.__name__
table_name = camel_to_snake(cls_name)
self._table = Model.db.create_table(name=table_name)
self.last_modified = None
self.date_created = None
self._id = None
def save(self, * args, ** kwargs):
"""
Saves current object to database.
It also updates the `last_modified` and `date_created` fields.
"""
data = {}
self.last_modified = '{}'.format(now())
if not self.date_created:
self.date_created = '{}'.format(now())
# Get objects dict
data = self.props()
if data:
# Creates a new instance
self._id = self._table.insert(data)
return self._id
def update(self, * args, ** kwargs):
"""
Update method provides a way of updating the values of an object.
"""
data = {}
self.last_modified = '{}'.format(now())
if not self.date_created:
self.date_created = '{}'.format(now())
# Get objects dict
data = self.props()
# Fetch exising object
obj = self._table.get(rid=self._id) if self._id else None
if obj and data:
# Updates an existing instance
ids = self._table.update(data, rids=[self._id])
self._id = ids[0]
return self._id
def delete(self, cond=None, rids=None, * args):
rids = []
rids = ([self._id,] if self._id else []) or rids or list(args)
if rids:
self._table.remove(cond=cond, rids=rids)
else:
raise ValueError('Record must be saved to delete')
def purge(self, confirm=False):
"""
Truncates the table. Operation is irreversible.
Keyword Arguments:
confirm {bool} -- user confirmation (default: {False})
"""
if confirm:
self._table.purge()
else:
raise ValueError('Confirm argument has to be set true')
def props(self):
"""Converts object to dictionary"""
return dict(
(key, value)
for (key, value) in self.__dict__.items()
if not (key.startswith('_') or key.startswith('__')))
def __getattr__(self, name):
"""
Forward all unknown attribute calls to the underlying standard table.
"""
return getattr(self._table, name)
# Instantiate database
Model.db = FlatFileDB()
def where(key):
return Query()[key]
def table(cls):
"""Returns a table object given a class"""
cls_name = cls.__name__
table_name = camel_to_snake(cls_name)
return Model().db.table(table_name)
``` |
{
"source": "JiamingBai/InteractiveDataScience.github.io",
"score": 3
} |
#### File: JiamingBai/InteractiveDataScience.github.io/app.py
```python
from flask import Flask, render_template, Response, request, redirect, url_for
app = Flask(__name__)
@app.route("/")
def index():
return render_template('prediction.html')
@app.route("/forward/", methods=['POST'])
def predict():
#Moving forward code
forward_message = "Testing..."
return render_template('prediction.html', message=forward_message);
@app.route('/json')
def json():
return render_template('prediction.html')
@app.route('/background_process_test')
def background_process_test():
print "Hello"
return "nothing"
``` |
{
"source": "JiamingHu121/Shadowrocket-Config",
"score": 2
} |
#### File: Shadowrocket-Config/src/srconfig.py
```python
import base64
import datetime
import urllib.request
import os
class SRConfig:
sep = os.sep
__DownloadFilePath = "third_party" + sep
__GFWListRuleFileName = "gfw.txt"
__BlockRuleFileName = "block.txt"
__BaseRuleFileName = "baserule.txt"
__RankRuleFileName = "rank.txt"
__OutputRuleName = "rule.txt"
__OutputSimplifyRuleName = "simplifyrule.txt"
__OutputRuleWithAdBlockName = "rulewithad.txt"
__SimplifyLength = 2000
__GFWListUrl = "https://raw.githubusercontent.com/gfwlist/gfwlist/master/gfwlist.txt"
__BlockRulesUrls = ["https://raw.githubusercontent.com/easylist/easylist/master/easylist/easylist_thirdparty.txt",
"https://raw.githubusercontent.com/easylist/easylist/master/easylist/easylist_adservers.txt"]
__ProxyList = []
__BlockList = []
__SimplifyList = []
def __init__(self):
self.__downloadFile(self.__GFWListUrl, self.__DownloadFilePath + self.__GFWListRuleFileName)
self.__downloadFiles(self.__BlockRulesUrls, self.__DownloadFilePath + self.__BlockRuleFileName)
def getRules(self):
baseRules = open(self.__DownloadFilePath + self.__BaseRuleFileName)
proxyRules = self.__getProxyRules(self.__DownloadFilePath + self.__GFWListRuleFileName)
blockRules = self.__getBlockRules(self.__DownloadFilePath + self.__BlockRuleFileName)
simplfyRules = self.__getSimplifyRules(self.__DownloadFilePath + self.__RankRuleFileName)
proxyFile = open(self.__OutputRuleName, 'w')
proxyWithAdBlockFile = open(self.__OutputRuleWithAdBlockName, 'w')
proxySimplifyFile = open(self.__OutputSimplifyRuleName, 'w')
proxyFile.write("# Shadowrocket: " + str(datetime.datetime.now()) + "\n")
proxyWithAdBlockFile.write("# Shadowrocket: " + str(datetime.datetime.now()) + "\n")
proxySimplifyFile.write("# Shadowrocket: " + str(datetime.datetime.now()) + "\n")
for line in baseRules:
proxyFile.write(line)
proxyWithAdBlockFile.write(line)
proxySimplifyFile.write(line)
if line == "[Rule]\n":
proxyFile.write(proxyRules)
proxyWithAdBlockFile.write(proxyRules)
proxyWithAdBlockFile.write(blockRules)
proxySimplifyFile.write(simplfyRules)
proxyFile.close()
proxyWithAdBlockFile.close()
proxySimplifyFile.close()
def __downloadFile(self, fileUrl, fileName):
response = urllib.request.urlopen(fileUrl)
data = response.read().decode('utf-8')
oFile = open(fileName, 'w')
oFile.write(data)
oFile.close()
def __downloadFiles(self, fileUrls, fileName):
writeData = ""
for url in fileUrls:
response = urllib.request.urlopen(url)
data = response.read().decode("utf-8")
writeData += data + "\n"
oFile = open(fileName, 'w')
oFile.write(writeData)
oFile.close()
def __getProxyRules(self, fileName):
proxyRules = ""
base64file = open(fileName)
pureRules = base64.b64decode(base64file.read()).decode('utf-8').split("\n")
for rule in pureRules:
newLine = self.__processProxyRule(rule)
if newLine in self.__ProxyList:
continue
else:
self.__ProxyList.append(newLine)
if newLine != "":
proxyRules += newLine + "\n"
return proxyRules
def __getSimplifyRules(self, fileName):
simplifyRules = ""
cnt = 0
f = open(fileName).readlines()
for line in f:
domain = line.split(" ")[0]
rank = line.split(" ")[1]
if rank != '-1\n':
rule = "DOMAIN-SUFFIX," + domain + ",Proxy"
simplifyRules += rule + "\n"
cnt = cnt + 1
if cnt > self.__SimplifyLength:
return simplifyRules
return simplifyRules
def __processProxyRule(self, rule):
if rule.startswith('!') or rule.startswith('[') or rule.startswith('@') or rule.startswith('/'):
return ""
elif rule.startswith('.'):
rule = rule[1:]
elif rule.startswith('||'):
rule = rule[2:]
elif rule.startswith('|https'):
rule = rule[9:]
elif rule.startswith('|http://'):
rule = rule[8:]
if rule.endswith('\n'):
rule = rule[:-1]
if "/" in rule:
rule = rule[:rule.index("/")]
if "*" in rule:
rule = rule[rule.index("*") + 1:]
if rule == "":
return ""
return "DOMAIN-SUFFIX," + rule + ",Proxy"
def __getBlockRules(self, fileName):
blockRules = ""
orignRules = open(fileName)
for rule in orignRules:
newLine = self.__processBlockRule(rule)
if newLine in self.__BlockList:
continue
else:
self.__BlockList.append(newLine)
if newLine != "":
blockRules += newLine + "\n"
return blockRules
def __processBlockRule(self, rule):
if rule.startswith('!') or rule.startswith('-'):
return ""
if "/" in rule:
return ""
if rule.startswith('||'):
rule = rule[2:]
if rule.endswith("\n"):
rule = rule[:-1]
if rule.endswith("^"):
rule = rule[:rule.index("^")]
if "^" in rule:
if "third-party" in rule:
rule = rule[:rule.index("^")]
else:
return ""
if "$" in rule:
rule = rule[:rule.index("$")]
if "/" in rule:
return ""
return "DOMAIN-SUFFIX," + rule + ",REJECT"
if __name__ == "__main__":
sConfig = SRConfig()
sConfig.getRules()
``` |
{
"source": "jiamingli9674/Intelligent-Checkout-System",
"score": 3
} |
#### File: scripts/utility/video_camera.py
```python
import cv2
from face_recog.face_id import FaceId
from anti_spoofing.anti_spoofing import check_authenticity
class VideoCamera(object):
def __init__(self):
#self.video = cv2.VideoCapture("../data/test_video_spoofing.mp4")
self.video = cv2.VideoCapture(0)
self.faceid = FaceId()
def __del__(self):
self.video.release()
def check_identity(self):
rtvl, frame = self.video.read()
is_real, frame_auth = check_authenticity(frame.copy())
if is_real:
frame = self.faceid.match_faces(frame.copy())
else:
frame = frame_auth
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
def encode_face(self):
rtvl, frame = self.video.read()
status, face_encoding, frame = self.faceid.encode_face(frame)
ret, jpeg = cv2.imencode('.jpg', frame)
return status, face_encoding, jpeg.tobytes()
def read_frame(self):
rtvl, frame = self.video.read()
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
```
#### File: Intelligent-Checkout-System/test/face_recognize.py
```python
import cv2
#Method for generating dataset to recognize a person
def generate_dataset(img, id, img_id):
#write the image in a data dir
cv2.imwrite("data/user." + str(id)+"."+str(img_id)+".jpg",img)
#Method that draws a boundary around the detected feature
def draw_boundary(img, classifier, scaleFactor, minNeighbors, color, text):
#Covert image to grayscale
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#detect features in gray-scale, return coordinates, width and height of features
features = classifier.detectMultiScale(gray_img, scaleFactor, minNeighbors)
coords = []
#draw rectangle around the feature and label it.
for (x,y,w,h) in features:
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
cv2.putText(img, text, (x, y-4), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 1, cv2.LINE_AA)
coords = [x,y,w,h]
return coords
#Method for detecting the features
def detect(img, faceCascade, img_id):
color = {"blue":(255,0,0), "red":(0,0,255), "green":(0,255,0), "white":(255,255,255)}
coords = draw_boundary(img, faceCascade, 1.1, 10, color['blue'], "Face")
if len(coords)==4:
#Update region of interest by cropping the image
roi_img = img[coords[1]:coords[1]+coords[3],coords[0]:coords[0]+coords[2]]
#Assign a unique id to each user.
user_id = 1
#img_id for making name of each unique image.
generate_dataset(roi_img, user_id, img_id)
return img
#Load the classifiers
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#Capture real time video stream.
video_capture = cv2.VideoCapture(0)
#Initialize the img_id with 0
img_id = 0
while True:
if img_id % 50 == 0:
print("Collected ", img_id, " images")
#Read image from video stream
_, img = video_capture.read()
#Call the method defined above.
img = detect(img, faceCascade, img_id)
#Writing processed image in a new window
cv2.imshow("face detection", img)
img_id += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#Turn the webcam off
video_capture.release()
#Destroy the output window
cv2.destroyAllWindows()
``` |
{
"source": "JiaMingLin/caffe",
"score": 2
} |
#### File: caffe/act-detector-scripts/ACT_datalayer.py
```python
import sys
import os
import random
import numpy as np
import cv2
from ACT_utils import iou2d
from Dataset import GetDataset
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'python'))
import caffe
distort_params = {
'brightness_prob': 0.5,
'brightness_delta': 32,
'contrast_prob': 0.5,
'contrast_lower': 0.5,
'contrast_upper': 1.5,
'hue_prob': 0.5,
'hue_delta': 18,
'saturation_prob': 0.5,
'saturation_lower': 0.5,
'saturation_upper': 1.5,
'random_order_prob': 0.0,
}
expand_params = {
'expand_prob': 0.5,
'max_expand_ratio': 4.0,
}
batch_samplers = [{
'sampler': {},
'max_trials': 1,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.1, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.3,},
'max_trials': 50,
'max_sample': 1,
},{
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.5,},
'max_trials': 50,
'max_sample': 1,
},{
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.7,},
'max_trials': 50,
'max_sample': 1,
},{
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'min_jaccard_overlap': 0.9,},
'max_trials': 50,
'max_sample': 1,
},{
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0,},
'sample_constraint': {'max_jaccard_overlap': 1.0,},
'max_trials': 50,
'max_sample': 1,
},]
def random_brightness(imglist, brightness_prob, brightness_delta):
if random.random() < brightness_prob:
brig = random.uniform(-brightness_delta, brightness_delta)
for i in range(len(imglist)):
imglist[i] += brig
return imglist
def random_contrast(imglist, contrast_prob, contrast_lower, contrast_upper):
if random.random() < contrast_prob:
cont = random.uniform(contrast_lower, contrast_upper)
for i in range(len(imglist)):
imglist[i] *= cont
return imglist
def random_saturation(imglist, saturation_prob, saturation_lower, saturation_upper):
if random.random() < saturation_prob:
satu = random.uniform(saturation_lower, saturation_upper)
for i in range(len(imglist)):
hsv = cv2.cvtColor(imglist[i], cv2.COLOR_BGR2HSV)
hsv[:, :, 1] *= satu
imglist[i] = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return imglist
def random_hue(imglist, hue_prob, hue_delta):
if random.random() < hue_prob:
hue = random.uniform(-hue_delta, hue_delta)
for i in range(len(imglist)):
hsv = cv2.cvtColor(imglist[i], cv2.COLOR_BGR2HSV)
hsv[:, :, 0] += hue
imglist[i] = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return imglist
def apply_distort(imglist, distort_param):
out_imglist = imglist
if distort_param['random_order_prob'] != 0: raise NotImplementedError
if random.random() > 0.5:
out_imglist = random_brightness(out_imglist, distort_param['brightness_prob'], distort_param['brightness_delta'])
out_imglist = random_contrast(out_imglist, distort_param['contrast_prob'], distort_param['contrast_lower'], distort_param['contrast_upper'])
out_imglist = random_saturation(out_imglist, distort_param['saturation_prob'], distort_param['saturation_lower'], distort_param['saturation_upper'])
out_imglist = random_hue(out_imglist, distort_param['hue_prob'], distort_param['hue_delta'])
else:
out_imglist = random_brightness(out_imglist, distort_param['brightness_prob'], distort_param['brightness_delta'])
out_imglist = random_saturation(out_imglist, distort_param['saturation_prob'], distort_param['saturation_lower'], distort_param['saturation_upper'])
out_imglist = random_hue(out_imglist, distort_param['hue_prob'], distort_param['hue_delta'])
out_imglist = random_contrast(out_imglist, distort_param['contrast_prob'], distort_param['contrast_lower'], distort_param['contrast_upper'])
return out_imglist
def apply_expand(imglist, tubes, expand_param, mean_values=None):
# Tubes: dict of label -> list of tubes with tubes being <x1> <y1> <x2> <y2>
out_imglist = imglist
out_tubes = tubes
if random.random() < expand_param['expand_prob']:
expand_ratio = random.uniform(1, expand_param['max_expand_ratio'])
oh,ow = imglist[0].shape[:2]
h = int(oh * expand_ratio)
w = int(ow * expand_ratio)
out_imglist = [np.zeros((h, w, 3), dtype=np.float32) for i in range(len(imglist))]
h_off = int(np.floor(h - oh))
w_off = int(np.floor(w - ow))
if mean_values is not None:
for i in range(len(imglist)):
out_imglist[i] += np.array(mean_values).reshape(1, 1, 3)
for i in range(len(imglist)):
out_imglist[i][h_off:h_off+oh, w_off:w_off+ow, :] = imglist[i]
# project boxes
for ilabel in tubes:
for itube in range(len(tubes[ilabel])):
out_tubes[ilabel][itube] += np.array([[w_off, h_off, w_off, h_off]], dtype=np.float32)
return out_imglist, out_tubes
def sample_cuboids(tubes, batch_samplers, imheight, imwidth):
sampled_cuboids = []
for batch_sampler in batch_samplers:
max_trials = batch_sampler['max_trials']
max_sample = batch_sampler['max_sample']
itrial = 0
isample = 0
sampler = batch_sampler['sampler']
min_scale = sampler['min_scale'] if 'min_scale' in sampler else 1
max_scale = sampler['max_scale'] if 'max_scale' in sampler else 1
min_aspect = sampler['min_aspect_ratio'] if 'min_aspect_ratio' in sampler else 1
max_aspect = sampler['max_aspect_ratio'] if 'max_aspect_ratio' in sampler else 1
while itrial < max_trials and isample < max_sample:
# sample a normalized box
scale = random.uniform(min_scale, max_scale)
aspect = random.uniform(min_aspect, max_aspect)
width = scale * np.sqrt(aspect)
height = scale / np.sqrt(aspect)
x = random.uniform(0, 1 - width)
y = random.uniform(0, 1 - height)
# rescale the box
sampled_cuboid = np.array([x*imwidth, y*imheight, (x+width)*imwidth, (y+height)*imheight], dtype=np.float32)
# check constraint
itrial += 1
if not 'sample_constraint' in batch_sampler:
sampled_cuboids.append(sampled_cuboid)
isample += 1
continue
constraints = batch_sampler['sample_constraint']
ious = np.array([np.mean(iou2d(t, sampled_cuboid)) for t in sum(tubes.values(),[])])
if ious.size == 0: # empty gt
isample += 1
continue
if 'min_jaccard_overlap' in constraints and ious.max() >= constraints['min_jaccard_overlap']:
sampled_cuboids.append( sampled_cuboid )
isample += 1
continue
if 'max_jaccard_overlap' in constraints and ious.min() >= constraints['max_jaccard_overlap']:
sampled_cuboids.append( sampled_cuboid )
isample += 1
continue
return sampled_cuboids
def crop_image(imglist, tubes, batch_samplers):
candidate_cuboids = sample_cuboids(tubes, batch_samplers, imglist[0].shape[0], imglist[0].shape[1])
if not candidate_cuboids:
return imglist, tubes
crop_cuboid = random.choice(candidate_cuboids)
x1, y1, x2, y2 = map(int, crop_cuboid.tolist())
for i in range(len(imglist)):
imglist[i] = imglist[i][y1:y2+1, x1:x2+1, :]
out_tubes = {}
wi = x2 - x1
hi = y2 - y1
for ilabel in tubes:
for itube in range(len(tubes[ilabel])):
t = tubes[ilabel][itube]
t -= np.array([[x1, y1, x1, y1]], dtype=np.float32)
# check if valid
cx = 0.5 * (t[:, 0] + t[:, 2])
cy = 0.5 * (t[:, 1] + t[:, 3])
if np.any(cx < 0) or np.any(cy < 0) or np.any(cx > wi) or np.any(cy > hi):
continue
if not ilabel in out_tubes:
out_tubes[ilabel] = []
# clip box
t[:, 0] = np.maximum(0, t[:, 0])
t[:, 1] = np.maximum(0, t[:, 1])
t[:, 2] = np.minimum(wi, t[:, 2])
t[:, 3] = np.minimum(hi, t[:, 3])
out_tubes[ilabel].append(t)
return imglist, out_tubes
# Assisting function for finding a good/bad tubelet
def tubelet_in_tube(tube, i, K):
# True if all frames from i to (i + K - 1) are inside tube
# it's sufficient to just check the first and last frame.
return (i in tube[: ,0] and i + K - 1 in tube[:, 0])
def tubelet_out_tube(tube, i, K):
# True if all frames between i and (i + K - 1) are outside of tube
return all([not j in tube[:, 0] for j in range(i, i + K)])
def tubelet_in_out_tubes(tube_list, i, K):
# Given a list of tubes: tube_list, return True if
# all frames from i to (i + K - 1) are either inside (tubelet_in_tube)
# or outside (tubelet_out_tube) the tubes.
return all([tubelet_in_tube(tube, i, K) or tubelet_out_tube(tube, i, K) for tube in tube_list])
def tubelet_has_gt(tube_list, i, K):
# Given a list of tubes: tube_list, return True if
# the tubelet starting spanning from [i to (i + K - 1)]
# is inside (tubelet_in_tube) at least a tube in tube_list.
return any([tubelet_in_tube(tube, i, K) for tube in tube_list])
class MultiframesLayer(caffe.Layer):
def shuffle(self): # shuffle the list of possible starting frames
self._order = list(range(self._nseqs))
if self._shuffle:
# set seed like that to have exactly the same shuffle even if we restart from a caffemodel
random.seed(self._rand_seed + self._nshuffles)
random.shuffle(self._order)
self._nshuffles += 1
self._next = 0
def setup(self, bottom, top):
layer_params = eval(self.param_str)
assert 'dataset_name' in layer_params
dataset_name = layer_params['dataset_name']
self._dataset = GetDataset(dataset_name)
assert 'K' in layer_params
self._K = layer_params['K']
assert self._K > 0
# parse optional argument
default_values = {
'rand_seed': 0,
'shuffle': True,
'batch_size': 32 // self._K,
'mean_values': [104, 117, 123],
'resize_height': 300,
'resize_width': 300,
'restart_iter': 0,
'flow': False,
'ninput': 1,
}
for k in default_values.keys():
if k in layer_params:
lay_param = layer_params[k]
else:
lay_param = default_values[k]
setattr(self, '_' + k, lay_param)
if not self._flow and self._ninput > 1:
raise NotImplementedError("ACT-detector: Not implemented: ninput > 1 with rgb frames")
d = self._dataset
K = self._K
# build index (v,i) of valid starting chunk
self._indices = []
for v in d.train_vlist():
vtubes = sum(d.gttubes(v).values(), [])
self._indices += [(v,i) for i in range(1, d.nframes(v)+2-K) if tubelet_in_out_tubes(vtubes,i,K) and tubelet_has_gt(vtubes,i,K)]
# self._indices += [(v,i) for i in range(1, d.nframes(v)+2-K) if all([ (i in t[:,0] and i+K-1 in t[:,0]) or all([not j in t[:,0] for j in xrange(i,i+K)]) for t in vtubes]) and any([ (i in t[:,0] and i+K-1 in t[:,0]) for t in vtubes]) ]
self._nseqs = len(self._indices)
self._iter = 0
self._nshuffles = 0
self.shuffle()
if self._restart_iter > 0:
assert self._next == 0
self._iter = self._restart_iter
iimages = self._restart_iter * self._batch_size
while iimages > self._nseqs:
self.shuffle()
iimages -= self._nseqs
self._next = iimages
for i in range(K):
top[i].reshape(self._batch_size, 3 * self._ninput, self._resize_height, self._resize_width)
top[K].reshape(1, 1, 1, 8)
def prepare_blob(self):
d = self._dataset
K = self._K
# Have the same data augmentation, even if restarted
random.seed(self._rand_seed + self._iter)
data = [np.empty((self._batch_size, 3 * self._ninput, self._resize_height, self._resize_width), dtype=np.float32) for ii in range(K)]
alltubes = []
for i in range(self._batch_size):
if self._next == self._nseqs:
self.shuffle()
v,frame = self._indices[self._order[self._next]]
# flipping with probability 0.5
do_mirror = random.getrandbits(1) == 1
# load images and tubes and apply mirror
images = []
if self._flow:
images = [cv2.imread(d.flowfile(v, min(frame+ii, d.nframes(v)))).astype(np.float32) for ii in range(K + self._ninput - 1)]
else:
images = [cv2.imread(d.imfile(v, frame+ii)).astype(np.float32) for ii in range(K)]
if do_mirror:
images = [im[:, ::-1, :] for im in images]
# reverse the x component of the flow
if self._flow:
for ii in range(K + self._ninput - 1):
images[ii][:, :, 2] = 255 - images[ii][:, :, 2]
h, w = d.resolution(v)
TT = {}
for ilabel, tubes in d.gttubes(v).items():
for t in tubes:
if frame not in t[:, 0]:
continue
assert frame + K - 1 in t[:, 0]
if do_mirror:
# copy otherwise it will change the gt of the dataset also
t = t.copy()
xmin = w - t[:, 3]
t[:, 3] = w - t[:, 1]
t[:, 1] = xmin
boxes = t[(t[:, 0] >= frame) * (t[:, 0] < frame + K) ,1:5]
assert boxes.shape[0] == K
if ilabel not in TT:
TT[ilabel] = []
TT[ilabel].append( boxes)
# apply data augmentation
images = apply_distort(images, distort_params)
images, TT = apply_expand(images, TT, expand_params, mean_values=self._mean_values)
images, TT = crop_image(images, TT, batch_samplers)
hi,wi = images[0].shape[:2]
# resize
images = [cv2.resize(im, (self._resize_width, self._resize_height), interpolation=cv2.INTER_LINEAR) for im in images]
for ii in range(K):
for iii in range(self._ninput):
data[ii][i, 3*iii:3*iii + 3, :, :] = np.transpose( images[ii + iii], (2, 0, 1))
idxtube = 0
for ilabel in TT:
for itube in range(len(TT[ilabel])):
for b in TT[ilabel][itube]:
alltubes.append([i, ilabel+1, idxtube, b[0]/wi, b[1]/hi, b[2]/wi, b[3]/hi, 0])
idxtube += 1
self._next += 1
self._iter += 1
for ii in range(K):
data[ii] -= np.tile(np.array(self._mean_values, dtype=np.float32)[None, :, None, None], (1, self._ninput, 1, 1))
label = np.array(alltubes, dtype=np.float32)
# label shape 1x1x1x8; if no boxes, then -1
if label.size == 0:
label = -np.ones((1, 1, 1, 8), dtype=np.float32)
else:
label = label.reshape(1, 1, -1, 8)
return data + [label]
def forward(self, bottom, top):
blobs = self.prepare_blob()
for ii in range(len(top) - 1):
top[ii].data[...] = blobs[ii].astype(np.float32, copy=False)
top[len(top) - 1].reshape(*(blobs[len(top) - 1].shape))
top[len(top) - 1].data[...] = blobs[len(top) - 1].astype(np.float32, copy=False)
def backward(self, bottom, propagate_down, top):
pass
def reshape(self, bottom, top):
# done in the forward
pass
```
#### File: caffe/act-detector-scripts/ACT.py
```python
import sys
import os
import pickle
import cv2
import numpy as np
CAFFE_PYTHON_PATH = os.path.join(os.path.dirname(__file__), "../python")
sys.path.insert(0, CAFFE_PYTHON_PATH)
import caffe
from Dataset import GetDataset
from ACT_utils import *
from copy import deepcopy
K = 6
IMGSIZE = 300
MEAN = np.array([[[104, 117, 123]]], dtype=np.float32)
NFLOWS = 5
def extract_tubelets(dname, gpu=-1, redo=False):
"""Extract the tubelets for a given dataset
args:
- dname: dataset name (example: 'JHMDB')
- gpu (default -1): use gpu given in argument, or use cpu if -1
- redo: wheter or not to recompute already computed files
save a pickle file for each frame
the file contains a tuple (dets, dets_all)
- dets is a numpy array with 2+4*K columns containing the tubelets starting at this frame after per-class nms at 0.45 and thresholding the scores at 0.01
the columns are <label> <score> and then <x1> <y1> <x2> <y2> for each of the frame in the tubelet
- dets_all contains the tubelets obtained after a global nms at 0.7 and thresholding the scores at 0.01
it is a numpy arrray with 4*K + L + 1 containing the coordinates of the tubelets and the scores for all labels
note: this version is inefficient: it is better to estimate the per-frame features once
"""
d = GetDataset(dname)
if gpu >= 0:
caffe.set_mode_gpu()
caffe.set_device(gpu)
model_dir = os.path.join(os.path.dirname(__file__), '../models/ACT-detector/', dname)
output_dir = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
# load the RGB network
rgb_proto = os.path.join(model_dir, "deploy_RGB.prototxt")
rgb_model = os.path.join(model_dir, "RGB.caffemodel")
net_rgb = caffe.Net(rgb_proto, caffe.TEST, weights=rgb_model)
# load the FLOW5 network
flo_proto = os.path.join(model_dir, "deploy_FLOW5.prototxt")
flo_model = os.path.join(model_dir, "FLOW5.caffemodel")
net_flo = caffe.Net(flo_proto, caffe.TEST, weights=flo_model)
vlist = d.test_vlist()
for iv, v in enumerate(vlist):
print("Processing video {:d}/{:d}: {:s}".format( iv+1, len(vlist), v))
h, w = d.resolution(v)
# network output is normalized between 0,1 ; so we will multiply it by the following array
resolution_array = np.array([w,h,w,h]*K, dtype=np.float32)
# now process each frame
for i in range(1, 1 + d.nframes(v) - K + 1):
outfile = os.path.join(output_dir, d.frame_format(v,i) + ".pkl")
# skip if already computed
if os.path.isfile(outfile) and not redo:
continue
# read the frames for the forward
kwargs_rgb = {}
kwargs_flo = {}
for j in range(K):
im = cv2.imread(d.imfile(v, i + j))
if im is None:
print("Image {:s} does not exist".format(d.imfile(v, i+j)))
return
imscale = cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR)
kwargs_rgb['data_stream' + str(j)] = np.transpose(imscale-MEAN, (2, 0, 1))[None, :, :, :]
imf = [cv2.imread(d.flowfile(v, min(d.nframes(v), i + j + iflow))) for iflow in range(NFLOWS)]
if np.any(imf) is None:
print("Flow image {:s} does not exist".format(d.flowfile(v, i+j)))
return
imscalef = [cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR) for im in imf]
timscale = [np.transpose(im-MEAN, (2, 0, 1))[None, :, :, :] for im in imscalef]
kwargs_flo['data_stream' + str(j) + 'flow'] = np.concatenate(timscale, axis=1)
# compute rgb and flow scores
# two forward passes: one for the rgb and one for the flow
net_rgb.forward(end="mbox_conf_flatten", **kwargs_rgb) # forward of rgb with confidence and regression
net_flo.forward(end="mbox_conf_flatten", **kwargs_flo) # forward of flow5 with confidence and regression
# compute late fusion of rgb and flow scores (keep regression from rgb)
# use net_rgb for standard detections, net_flo for having all boxes
scores = 0.5 * (net_rgb.blobs['mbox_conf_flatten'].data + net_flo.blobs['mbox_conf_flatten'].data)
net_rgb.blobs['mbox_conf_flatten'].data[...] = scores
net_flo.blobs['mbox_conf_flatten'].data[...] = scores
net_flo.blobs['mbox_loc'].data[...] = net_rgb.blobs['mbox_loc'].data
# two forward passes, only for the last layer
# dets is the detections after per-class NMS and thresholding (stardard)
# dets_all contains all the scores and regressions for all tubelets
dets = net_rgb.forward(start='detection_out')['detection_out'][0, 0, :, 1:]
dets_all = net_flo.forward(start='detection_out_full')['detection_out_full'][0, 0, :, 1:]
# parse detections with per-class NMS
if dets.shape[0] == 1 and np.all(dets == -1):
dets = np.empty((0, dets.shape[1]), dtype=np.float32)
dets[:, 2:] *= resolution_array # network output was normalized in [0..1]
dets[:, 0] -= 1 # label 0 was background, come back to label in [0..nlabels-1]
dets[:, 2::2] = np.maximum(0, np.minimum(w, dets[:, 2::2]))
dets[:, 3::2] = np.maximum(0, np.minimum(h, dets[:, 3::2]))
# parse detections with global NMS at 0.7 (top 300)
# coordinates were normalized in [0..1]
dets_all[:, 0:4*K] *= resolution_array
dets_all[:, 0:4*K:2] = np.maximum(0, np.minimum(w, dets_all[:, 0:4*K:2]))
dets_all[:, 1:4*K:2] = np.maximum(0, np.minimum(h, dets_all[:, 1:4*K:2]))
idx = nms_tubelets(np.concatenate((dets_all[:, :4*K], np.max(dets_all[:, 4*K+1:], axis=1)[:, None]), axis=1), 0.7, 300)
dets_all = dets_all[idx, :]
# save file
if not os.path.isdir(os.path.dirname(outfile)):
os.system('mkdir -p ' + os.path.dirname(outfile))
with open(outfile, 'wb') as fid:
pickle.dump((dets, dets_all), fid)
def load_frame_detections(d, vlist, dirname, nms):
if isinstance(d, str):
d = GetDataset(d)
alldets = [] # list of numpy array with <video_index> <frame_index> <ilabel> <score> <x1> <y1> <x2> <y2>
for iv, v in enumerate(vlist):
h,w = d.resolution(v)
# aggregate the results for each frame
vdets = {i: np.empty((0,6), dtype=np.float32) for i in range(1, 1 + d.nframes(v))} # x1, y1, x2, y2, score, ilabel
# load results for each starting frame
for i in range(1, 1 + d.nframes(v) - K + 1):
resname = os.path.join(dirname, d.frame_format(v,i) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets "+resname)
sys.exit()
with open(resname, 'rb') as fid:
dets, _ = pickle.load(fid)
if dets.size == 0:
continue
for k in range(K):
vdets[i+k] = np.concatenate( (vdets[i+k],dets[:,np.array([2+4*k,3+4*k,4+4*k,5+4*k,1,0])] ), axis=0)
# Perform NMS in each frame
for i in vdets:
idx = np.empty((0,), dtype=np.int32)
for ilabel in range(d.nlabels):
a = np.where(vdets[i][:,5] == ilabel)[0]
if a.size == 0:
continue
idx = np.concatenate((idx, a[nms2d(vdets[i][vdets[i][:, 5] == ilabel, :5], nms)]), axis=0)
if idx.size == 0:
continue
alldets.append(np.concatenate((iv * np.ones((idx.size, 1), dtype=np.float32), i * np.ones((idx.size, 1), dtype=np.float32), vdets[i][idx, :][:, np.array([5, 4, 0, 1, 2, 3], dtype=np.int32)]), axis=1))
return np.concatenate(alldets, axis=0)
def frameAP(dname, th=0.5, redo=False):
d = GetDataset(dname)
dirname = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameAP{:g}.pkl".format(th))
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
res = pickle.load(fid)
else:
vlist = d.test_vlist()
# load per-frame detections
alldets = load_frame_detections(d, vlist, dirname, 0.3)
res = {}
# compute AP for each class
for ilabel,label in enumerate(d.labels):
# detections of this class
detections = alldets[alldets[:, 2] == ilabel, :]
# load ground-truth of this class
gt = {}
for iv, v in enumerate(vlist):
tubes = d.gttubes(v)
if not ilabel in tubes:
continue
for tube in tubes[ilabel]:
for i in range(tube.shape[0]):
k = (iv, int(tube[i, 0]))
if not k in gt:
gt[k] = []
gt[k].append(tube[i, 1:5].tolist())
for k in gt:
gt[k] = np.array( gt[k] )
# pr will be an array containing precision-recall values
pr = np.empty((detections.shape[0] + 1, 2), dtype=np.float32)# precision,recall
pr[0, 0] = 1.0
pr[0, 1] = 0.0
fn = sum([g.shape[0] for g in gt.values()]) # false negatives
fp = 0 # false positives
tp = 0 # true positives
for i, j in enumerate(np.argsort(-detections[:,3])):
k = (int(detections[j,0]), int(detections[j,1]))
box = detections[j, 4:8]
ispositive = False
if k in gt:
ious = iou2d(gt[k], box)
amax = np.argmax(ious)
if ious[amax] >= th:
ispositive = True
gt[k] = np.delete(gt[k], amax, 0)
if gt[k].size == 0:
del gt[k]
if ispositive:
tp += 1
fn -= 1
else:
fp += 1
pr[i+1, 0] = float(tp) / float(tp + fp)
pr[i+1, 1] = float(tp) / float(tp + fn)
res[label] = pr
# save results
with open(eval_file, 'wb') as fid:
pickle.dump(res, fid)
# display results
ap = 100*np.array([pr_to_ap(res[label]) for label in d.labels])
print("frameAP")
for il, _ in enumerate(d.labels):
print("{:20s} {:8.2f}".format('', ap[il]))
print("{:20s} {:8.2f}".format("mAP", np.mean(ap)))
print("")
def frameAP_error(dname, th=0.5, redo=False):
d = GetDataset(dname)
dirname = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameAP{:g}ErrorAnalysis.pkl".format(th))
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
res = pickle.load(fid)
else:
vlist = d.test_vlist()
# load per-frame detections
alldets = load_frame_detections(d, vlist, dirname, 0.3)
res = {}
# compute AP for each class
for ilabel,label in enumerate(d.labels):
# detections of this class
detections = alldets[alldets[:, 2] == ilabel, :]
gt = {}
othergt = {}
labellist = {}
for iv, v in enumerate(vlist):
tubes = d.gttubes(v)
labellist[v] = tubes.keys()
for il in tubes:
for tube in tubes[il]:
for i in range(tube.shape[0]):
k = (iv, int(tube[i, 0]))
if il == ilabel:
if k not in gt:
gt[k] = []
gt[k].append(tube[i, 1:5].tolist())
else:
if k not in othergt:
othergt[k] = []
othergt[k].append(tube[i, 1:5].tolist())
for k in gt:
gt[k] = np.array(gt[k])
for k in othergt:
othergt[k] = np.array(othergt[k])
dupgt = deepcopy(gt)
# pr will be an array containing precision-recall values and 4 types of errors:
# localization, classification, timing, others
pr = np.empty((detections.shape[0] + 1, 6), dtype=np.float32)# precision, recall
pr[0, 0] = 1.0
pr[0, 1:] = 0.0
fn = sum([g.shape[0] for g in gt.values()]) # false negatives
fp = 0 # false positives
tp = 0 # true positives
EL = 0 # localization errors
EC = 0 # classification error: overlap >=0.5 with an another object
EO = 0 # other errors
ET = 0 # timing error: the video contains the action but not at this frame
for i, j in enumerate(np.argsort(-detections[:,3])):
k = (int(detections[j, 0]), int(detections[j,1]))
box = detections[j, 4:8]
ispositive = False
if k in dupgt:
if k in gt:
ious = iou2d(gt[k], box)
amax = np.argmax(ious)
if k in gt and ious[amax] >= th:
ispositive = True
gt[k] = np.delete(gt[k], amax, 0)
if gt[k].size == 0:
del gt[k]
else:
EL += 1
elif k in othergt:
ious = iou2d(othergt[k], box)
if np.max(ious) >= th:
EC += 1
else:
EO += 1
elif ilabel in labellist[k[0]]:
ET += 1
else:
EO += 1
if ispositive:
tp += 1
fn -= 1
else:
fp += 1
pr[i+1, 0] = float(tp)/float(tp+fp)
pr[i+1, 1] = float(tp)/float(tp+fn)
pr[i+1, 2] = float(EL)/float(tp+fp)
pr[i+1, 3] = float(EC)/float(tp+fp)
pr[i+1, 4] = float(ET)/float(tp+fp)
pr[i+1, 5] = float(EO)/float(tp+fp)
res[label] = pr
# save results
with open(eval_file, 'wb') as fid:
pickle.dump(res, fid)
# display results
AP = 100*np.array([pr_to_ap(res[label][:,[0, 1]]) for label in d.labels])
othersap = [100*np.array([pr_to_ap(res[label][:,[j, 1]]) for label in d.labels]) for j in range(2, 6)]
EL = othersap[0]
EC = othersap[1]
ET = othersap[2]
EO = othersap[3]
EM = 100 - 100*np.array([res[label][-1, 1] for label in d.labels]) # missed detections = 1 - recall
LIST = [AP, EL, EC, ET, EO, EM]
print("Error Analysis")
print("")
print("{:20s} {:8s} {:8s} {:8s} {:8s} {:8s} {:8s}".format('label', ' AP ', ' Loc. ', ' Cls. ', ' Time ', ' Other ', ' missed '))
print("")
for il, label in enumerate(d.labels):
print("{:20s} ".format(label) + " ".join(["{:8.2f}".format(L[il]) for L in LIST]))
print("")
print("{:20s} ".format("mean") + " ".join(["{:8.2f}".format(np.mean(L)) for L in LIST]))
print("")
def frameMABO(dname, redo=False):
d = GetDataset(dname)
dirname = os.path.join( os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameMABO.pkl")
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
BO = pickle.load(fid)
else:
vlist = d.test_vlist()
BO = {l: [] for l in d.labels} # best overlap
for v in vlist:
gt = d.gttubes(v)
h, w = d.resolution(v)
# load per-frame detections
vdets = {i: np.empty((0,4), dtype=np.float32) for i in range(1, 1+d.nframes(v))}
# load results for each chunk
for i in range(1, 1 + d.nframes(v) - K + 1):
resname = os.path.join(dirname, d.frame_format(v,i) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets " + resname)
sys.exit()
with open(resname, 'rb') as fid:
dets, _ = pickle.load(fid)
for k in range(K):
vdets[i+k] = np.concatenate((vdets[i + k], dets[:, 2+4*k:6+4*k]), axis=0)
# for each frame
for i in range(1, 1 + d.nframes(v)):
for ilabel in gt:
label = d.labels[ilabel]
for t in gt[ilabel]:
# the gt tube does not cover frame i
if not i in t[:,0]:
continue
gtbox = t[t[:,0] == i, 1:5] # box of gt tube at frame i
if vdets[i].size == 0: # we missed it
BO[label].append(0)
continue
ious = iou2d(vdets[i], gtbox)
BO[label].append( np.max(ious) )
# save file
with open(eval_file, 'wb') as fid:
pickle.dump( BO, fid)
# print MABO results
ABO = {la: 100 * np.mean(np.array(BO[la])) for la in d.labels} # average best overlap
for la in d.labels:
print("{:20s} {:6.2f}".format(la, ABO[la]))
print("{:20s} {:6.2f}".format("MABO", np.mean(np.array(ABO.values()))))
def frameCLASSIF(dname, redo=False):
d = GetDataset(dname)
dirname = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "frameCLASSIF.pkl")
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
CLASSIF = pickle.load(fid)
else:
vlist = d.test_vlist()
CORRECT = [0 for ilabel in range(d.nlabels)]
TOTAL = [0 for ilabel in range(d.nlabels)]
for v in vlist:
nframes = d.nframes(v)
# load all tubelets
VDets = {}
for startframe in range(1, nframes + 2 - K):
resname = os.path.join(dirname, d.frame_format(v, startframe) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets " + resname)
sys.exit()
with open(resname, 'rb') as fid:
_, VDets[startframe] = pickle.load(fid)
# iterate over ground-truth
tubes = d.gttubes(v)
for ilabel in tubes:
for g in tubes[ilabel]:
for i in range(g.shape[0]):
frame = int(g[i, 0])
# just in case a tube is longer than the video
if frame > nframes:
continue
gtbox = g[i, 1:5]
scores = np.zeros((d.nlabels,), dtype=np.float32)
# average the score over the 6 frames
for sf in range(max(1, frame - K + 1), min(nframes - K + 1, frame) + 1):
overlaps = iou2d(VDets[sf][:, 4*(frame-sf):4*(frame-sf)+4], gtbox)
scores += np.sum(VDets[sf][overlaps >= 0.7, 4*K + 1:],axis=0)
# check classif
if np.argmax(scores) == ilabel:
CORRECT[ilabel] += 1
TOTAL[ilabel] += 1
CLASSIF = [float(CORRECT[ilabel]) / float(TOTAL[ilabel]) for ilabel in range(d.nlabels)]
with open(eval_file, 'wb') as fid:
pickle.dump(CLASSIF, fid)
# print classif results
for il, la in enumerate(d.labels):
print("{:20s} {:6.2f}".format(la, 100*CLASSIF[il]))
print("{:20s} {:6.2f}".format("CLASSIF", 100*np.mean(np.array(CLASSIF))))
def BuildTubes(dname, redo=False):
d = GetDataset(dname)
dirname = os.path.join( os.path.dirname(__file__), '../results/ACT-detector/', dname)
vlist = d.test_vlist()
for iv, v in enumerate(vlist):
print("Processing video {:d}/{:d}: {:s}".format(iv + 1, len(vlist), v))
outfile = os.path.join(dirname, v + "_tubes.pkl")
if os.path.isfile(outfile) and not redo:
continue
RES = {}
nframes = d.nframes(v)
# load detected tubelets
VDets = {}
for startframe in range(1, nframes + 2 - K):
resname = os.path.join(dirname, d.frame_format(v, startframe) + '.pkl')
if not os.path.isfile(resname):
print("ERROR: Missing extracted tubelets " + resname)
sys.exit()
with open(resname, 'rb') as fid:
_, VDets[startframe] = pickle.load(fid)
for ilabel in range(d.nlabels):
FINISHED_TUBES = []
CURRENT_TUBES = [] # tubes is a list of tuple (frame, lstubelets)
def tubescore(tt):
return np.mean(np.array([tt[i][1][-1] for i in range(len(tt))]))
for frame in range(1, d.nframes(v) + 2 - K):
# load boxes of the new frame and do nms while keeping Nkeep highest scored
ltubelets = VDets[frame][:,range(4*K) + [4*K + 1 + ilabel]] # Nx(4K+1) with (x1 y1 x2 y2)*K ilabel-score
idx = nms_tubelets(ltubelets, 0.3, top_k=10)
ltubelets = ltubelets[idx,:]
# just start new tubes
if frame == 1:
for i in range(ltubelets.shape[0]):
CURRENT_TUBES.append( [(1,ltubelets[i,:])] )
continue
# sort current tubes according to average score
avgscore = [tubescore(t) for t in CURRENT_TUBES ]
argsort = np.argsort(-np.array(avgscore))
CURRENT_TUBES = [CURRENT_TUBES[i] for i in argsort]
# loop over tubes
finished = []
for it, t in enumerate(CURRENT_TUBES):
# compute ious between the last box of t and ltubelets
last_frame, last_tubelet = t[-1]
ious = []
offset = frame - last_frame
if offset < K:
nov = K - offset
ious = sum([iou2d(ltubelets[:, 4*iov:4*iov+4], last_tubelet[4*(iov+offset):4*(iov+offset+1)]) for iov in range(nov)])/float(nov)
else:
ious = iou2d(ltubelets[:, :4], last_tubelet[4*K-4:4*K])
valid = np.where(ious >= 0.2)[0]
if valid.size>0:
# take the one with maximum score
idx = valid[ np.argmax(ltubelets[valid, -1])]
CURRENT_TUBES[it].append((frame, ltubelets[idx,:]))
ltubelets = np.delete(ltubelets, idx, axis=0)
else:
# skip
if offset>=5:
finished.append(it)
# finished tubes that are done
for it in finished[::-1]: # process in reverse order to delete them with the right index
FINISHED_TUBES.append( CURRENT_TUBES[it][:])
del CURRENT_TUBES[it]
# start new tubes
for i in range(ltubelets.shape[0]):
CURRENT_TUBES.append([(frame,ltubelets[i,:])])
# all tubes are not finished
FINISHED_TUBES += CURRENT_TUBES
# build real tubes
output = []
for t in FINISHED_TUBES:
score = tubescore(t)
# just start new tubes
if score< 0.01:
continue
beginframe = t[0][0]
endframe = t[-1][0]+K-1
length = endframe+1-beginframe
# delete tubes with short duraton
if length < 15:
continue
# build final tubes by average the tubelets
out = np.zeros((length, 6), dtype=np.float32)
out[:, 0] = np.arange(beginframe,endframe+1)
n_per_frame = np.zeros((length, 1), dtype=np.int32)
for i in range(len(t)):
frame, box = t[i]
for k in range(K):
out[frame-beginframe+k, 1:5] += box[4*k:4*k+4]
out[frame-beginframe+k, -1] += box[-1]
n_per_frame[frame-beginframe+k ,0] += 1
out[:,1:] /= n_per_frame
output.append((out, score))
RES[ilabel] = output
with open(outfile, 'wb') as fid:
pickle.dump(RES, fid)
def videoAP(dname, th=0.5, redo=False):
d = GetDataset(dname)
dirname = os.path.join( os.path.dirname(__file__), '../results/ACT-detector/', dname)
eval_file = os.path.join(dirname, "videoAP{:g}.pkl".format(th))
if os.path.isfile(eval_file) and not redo:
with open(eval_file, 'rb') as fid:
res = pickle.load(fid)
else:
vlist = d.test_vlist()
# load detections
# alldets = for each label in 1..nlabels, list of tuple (v,score,tube as Kx5 array)
alldets = {ilabel: [] for ilabel in range(d.nlabels)}
for v in vlist:
tubename = os.path.join(dirname, v + '_tubes.pkl')
if not os.path.isfile(tubename):
print("ERROR: Missing extracted tubes " + tubename)
sys.exit()
with open(tubename, 'rb') as fid:
tubes = pickle.load(fid)
for ilabel in range(d.nlabels):
ltubes = tubes[ilabel]
idx = nms3dt(ltubes, 0.3)
alldets[ilabel] += [(v,ltubes[i][1], ltubes[i][0]) for i in idx]
# compute AP for each class
res = {}
for ilabel in range(d.nlabels):
detections = alldets[ilabel]
# load ground-truth
gt = {}
for v in vlist:
tubes = d.gttubes(v)
if not ilabel in tubes:
continue
gt[v] = tubes[ilabel]
if len(gt[v])==0:
del gt[v]
# precision,recall
pr = np.empty((len(detections) + 1, 2), dtype=np.float32)
pr[0,0] = 1.0
pr[0,1] = 0.0
fn = sum([ len(g) for g in gt.values()]) # false negatives
fp = 0 # false positives
tp = 0 # true positives
for i, j in enumerate( np.argsort(-np.array([dd[1] for dd in detections]))):
v, score, tube = detections[j]
ispositive = False
if v in gt:
ious = [iou3dt(g, tube) for g in gt[v]]
amax = np.argmax(ious)
if ious[amax] >= th:
ispositive = True
del gt[v][amax]
if len(gt[v]) == 0:
del gt[v]
if ispositive:
tp += 1
fn -= 1
else:
fp += 1
pr[i+1,0] = float(tp) / float(tp + fp)
pr[i+1,1] = float(tp) / float(tp + fn)
res[d.labels[ilabel]] = pr
# save results
with open(eval_file, 'wb') as fid:
pickle.dump(res, fid)
# display results
ap = 100 * np.array([pr_to_ap(res[label]) for label in d.labels])
print("frameAP")
for il, _ in enumerate(d.labels):
print("{:20s} {:8.2f}".format('', ap[il]))
print("{:20s} {:8.2f}".format("mAP", np.mean(ap)))
print("")
if __name__=="__main__":
exec(sys.argv[1])
``` |
{
"source": "JiaMingLin/DesignSpaceExplore",
"score": 2
} |
#### File: DesignSpaceExplore/workloads/resnet18.py
```python
from .common import ops
def network(res, num_class = 101):
nix = res["nix"]
niy = res["niy"]
network_template = {
"conv1": {"nix": nix, "niy": niy, "nif": 3, "nof": 64, "stride":2, "kernel": 7, "type": "conv"},
############################# block 1 #############################
"conv2_1": {"nix": nix/4, "niy": niy/4, "nif": 64, "nof": 64, "kernel": 3, "type": "conv"},#1
"conv2_2": {"nix": nix/4, "niy": niy/4, "nif": 64, "nof": 64, "kernel": 3, "type": "conv"},#1
"conv3_1": {"nix": nix/4, "niy": niy/4, "nif": 64, "nof": 64, "kernel": 3, "type": "conv"},#1
"conv3_2": {"nix": nix/4, "niy": niy/4, "nif": 64, "nof": 64, "kernel": 3, "type": "conv"},#1
"conv3_3": {"nix": nix/4, "niy": niy/4, "nif": 64, "nof": 128, "kernel": 3, "stride": 2, "type": "conv"},#1
############################# block 2 #############################
"conv4_1": {"nix": nix/16, "niy": niy/16, "nif": 128, "nof": 128, "kernel": 3, "type": "conv"},#1
"scale_conv_1": {"nix": nix/4, "niy": niy/4, "nif": 64, "nof": 128, "kernel": 1, "stride": 2, "type": "conv"},#1
"conv5_1": {"nix": nix/16, "niy": niy/16, "nif": 128, "nof": 128, "kernel": 3, "type": "conv"},#1
"conv5_2": {"nix": nix/16, "niy": niy/16, "nif": 128, "nof": 128, "kernel": 3, "type": "conv"},#1
"conv5_3": {"nix": nix/16, "niy": niy/16, "nif": 128, "nof": 256, "kernel": 3, "stride": 2, "type": "conv"},#1
############################# block 3 #############################
"conv6_1": {"nix": nix/8, "niy": niy/8, "nif": 256, "nof": 256, "kernel": 3, "type": "conv"},#1
"conv6_2": {"nix": nix/8, "niy": niy/8, "nif": 256, "nof": 256, "kernel": 3, "type": "conv"},#1
"conv7_1": {"nix": nix/8, "niy": niy/8, "nif": 256, "nof": 256, "kernel": 3, "type": "conv"},#1
"conv7_2": {"nix": nix/8, "niy": niy/8, "nif": 256, "nof": 512, "kernel": 3, "stride": 2, "type": "conv"},#1
############################# block 4 #############################
"conv8_1": {"nix": nix/16, "niy": niy/16, "nif": 512, "nof": 512, "kernel": 3, "type": "conv"},#1
"conv8_2": {"nix": nix/16, "niy": niy/16, "nif": 512, "nof": 512, "kernel": 3, "type": "conv"},#1
"conv9_1": {"nix": nix/16, "niy": niy/16, "nif": 512, "nof": 512, "kernel": 3, "type": "conv"},#1
"conv9_2": {"nix": nix/16, "niy": niy/16, "nif": 512, "nof": 512, "kernel": 3, "stride": 2, "type": "conv"},#1
"fc1": {"nix": 1, "niy": 1, "nif": int(niy/32)*int(niy/32)*512, "nof": num_class, "kernel": 1, "type":"fc"}, #13
}
operations = ops(network_template)
print("ResNet-18 OPs = ", operations)
return network_template
```
#### File: DesignSpaceExplore/workloads/simple_net.py
```python
def network(res, num_class = 101):
nix = res["nix"]
niy = res["niy"]
network_template = {
"conv1_1": {"nix": nix, "niy": niy, "nif": 3, "nof": 32, "kernel": 3, "type": "conv"}, #0
"conv1_2": {"nix": nix/2, "niy": niy/2, "nif": 32, "nof": 64, "kernel": 3, "type": "conv"},#1
"conv2_1": {"nix": nix/4, "niy": niy/4, "nif": 64, "nof": 64, "kernel": 3, "type": "conv"}, #2
"conv2_2": {"nix": nix/8, "niy": niy/8, "nif": 64, "nof": 64, "kernel": 3, "type": "conv"}, #3
"conv3_1": {"nix": nix/16, "niy": niy/16, "nif": 64, "nof": 64, "kernel": 3, "type": "conv"}, #4
# "fc1": {"nix": 1, "niy": 1, "nif": int(niy/32)*int(niy/32)*64, "nof": 512, "kernel": 1, "type":"fc"}, #13
# "fc2": {"nix": 1, "niy": 1, "nif": 512, "nof": 4096, "kernel": 1, "type":"fc"}, #14
}
return network_template
``` |
{
"source": "JiaMingLin/dlcv_adda",
"score": 3
} |
#### File: dlcv_adda/core/pretrain.py
```python
import os
import torch
import torch.nn as nn
import torch.optim as optim
import params
from utils import make_variable, save_model
from .test import evaluation
from tensorboardX import SummaryWriter
def train_src(exp, encoder, classifier, data_loader, data_loader_eval):
"""Train classifier for source domain."""
####################
# 1. setup network #
####################
src_acc = 0
# set train state for Dropout and BN layers
encoder.train()
classifier.train()
# setup criterion and optimizer
optimizer = optim.Adam(
list(encoder.parameters()) + list(classifier.parameters()),
lr=params.c_learning_rate,
betas=(params.beta1, params.beta2))
criterion = nn.CrossEntropyLoss()
####################
# 2. train network #
####################
writer = SummaryWriter(
log_dir = os.path.join('runs', exp)
)
for epoch in range(params.num_epochs_pre):
for step, (images, labels) in enumerate(data_loader):
# make images and labels variable
images = make_variable(images)
labels = make_variable(labels.squeeze_())
# zero gradients for optimizer
optimizer.zero_grad()
# compute loss for critic
preds = classifier(encoder(images))
loss = criterion(preds, labels)
# optimize source classifier
loss.backward()
optimizer.step()
# print step info
if ((step + 1) % params.log_step_pre == 0):
print("Epoch [{}/{}] Step [{}/{}]: loss={}"
.format(epoch + 1,
params.num_epochs_pre,
step + 1,
len(data_loader),
loss.item()))
# save model parameters
if ((epoch + 1) % params.save_step_pre == 0):
save_model(exp, encoder, "ADDA-source-encoder-{}.pt".format(epoch + 1))
save_model(exp, classifier, "ADDA-source-classifier-{}.pt".format(epoch + 1))
# eval model on test set
if ((epoch + 1) % params.eval_step_pre == 0):
acc = evaluation(encoder, classifier, data_loader_eval)
writer.add_scalar('src_acc', acc*100, (epoch + 1))
if acc > src_acc:
print("============== Save Best Model =============")
save_model(exp, encoder, "ADDA-source-encoder-best.pt")
save_model(exp, classifier, "ADDA-source-classifier-best.pt")
src_acc = acc
writer.close()
return encoder, classifier
```
#### File: dlcv_adda/datasets/datagen.py
```python
import torch.utils.data as data
import os
from PIL import Image
class DataGenerator(data.Dataset):
def __init__(self, data_root, train = True, transform=None):
"""
Args:
1. image folder
2. data name, label list
3. if train, loading data from train folder, or test folder
4.
"""
self.root = data_root
self.transform = transform
self.train = train
if train:
data_list = os.path.join(self.root, 'train.csv')
else:
data_list = os.path.join(self.root, 'test.csv')
with open(data_list) as fin:
data_list = fin.readlines()
self.img_paths = []
self.img_labels = []
self.n_data = 0
for data in data_list[1:]:
data = data.strip('\n').split(',')
self.img_paths.append(data[0])
self.img_labels.append(data[1])
self.n_data += 1
def __getitem__(self, idx):
img_path, label = self.img_paths[idx], self.img_labels[idx]
if self.train is True:
img_path = os.path.join(self.root, 'train', img_path)
else:
img_path = os.path.join(self.root, 'test', img_path)
with open(img_path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = int(label)
return img, label
def __len__(self):
return len(self.img_paths)
``` |
{
"source": "JiaMingLin/residual_adapters",
"score": 2
} |
#### File: JiaMingLin/residual_adapters/models.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import config_task
import math
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1_fonc(in_planes, out_planes=None, stride=1, bias=False):
if out_planes is None:
return nn.Conv2d(in_planes, in_planes, kernel_size=1, stride=stride, padding=0, bias=bias)
else:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=bias)
class conv1x1(nn.Module):
def __init__(self, planes, out_planes=None, stride=1):
super(conv1x1, self).__init__()
if config_task.mode == 'series_adapters':
self.conv = nn.Sequential(nn.BatchNorm2d(planes), conv1x1_fonc(planes))
elif config_task.mode == 'parallel_adapters':
self.conv = conv1x1_fonc(planes, out_planes, stride)
else:
self.conv = conv1x1_fonc(planes)
def forward(self, x):
y = self.conv(x)
if config_task.mode == 'series_adapters':
y += x
return y
class conv_task(nn.Module):
def __init__(self, in_planes, planes, stride=1, nb_tasks=1, is_proj=1, second=0):
super(conv_task, self).__init__()
self.is_proj = is_proj
self.second = second
self.conv = conv3x3(in_planes, planes, stride)
if config_task.mode == 'series_adapters' and is_proj:
self.bns = nn.ModuleList([nn.Sequential(conv1x1(planes), nn.BatchNorm2d(planes)) for i in range(nb_tasks)])
elif config_task.mode == 'parallel_adapters' and is_proj:
self.parallel_conv = nn.ModuleList([conv1x1(in_planes, planes, stride) for i in range(nb_tasks)])
self.bns = nn.ModuleList([nn.BatchNorm2d(planes) for i in range(nb_tasks)])
else:
self.bns = nn.ModuleList([nn.BatchNorm2d(planes) for i in range(nb_tasks)])
def forward(self, x):
task = config_task.task
y = self.conv(x)
if self.second == 0:
if config_task.isdropout1:
x = F.dropout2d(x, p=0.5, training = self.training)
else:
if config_task.isdropout2:
x = F.dropout2d(x, p=0.5, training = self.training)
if config_task.mode == 'parallel_adapters' and self.is_proj:
y = y + self.parallel_conv[task](x)
y = self.bns[task](y)
return y
# No projection: identity shortcut
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, shortcut=0, nb_tasks=1):
super(BasicBlock, self).__init__()
self.conv1 = conv_task(in_planes, planes, stride, nb_tasks, is_proj=int(config_task.proj[0]))
self.conv2 = nn.Sequential(nn.ReLU(True), conv_task(planes, planes, 1, nb_tasks, is_proj=int(config_task.proj[1]), second=1))
self.shortcut = shortcut
if self.shortcut == 1:
self.avgpool = nn.AvgPool2d(2)
def forward(self, x):
residual = x
y = self.conv1(x)
y = self.conv2(y)
if self.shortcut == 1:
residual = self.avgpool(x)
residual = torch.cat((residual, residual*0),1)
y += residual
y = F.relu(y)
return y
class ResNet(nn.Module):
def __init__(self, block, nblocks, num_classes=[10]):
super(ResNet, self).__init__()
nb_tasks = len(num_classes)
blocks = [block, block, block]
factor = config_task.factor
self.in_planes = int(32*factor)
self.pre_layers_conv = conv_task(3,int(32*factor), 1, nb_tasks)
self.layer1 = self._make_layer(blocks[0], int(64*factor), nblocks[0], stride=2, nb_tasks=nb_tasks)
self.layer2 = self._make_layer(blocks[1], int(128*factor), nblocks[1], stride=2, nb_tasks=nb_tasks)
self.layer3 = self._make_layer(blocks[2], int(256*factor), nblocks[2], stride=2, nb_tasks=nb_tasks)
self.end_bns = nn.ModuleList([nn.Sequential(nn.BatchNorm2d(int(256*factor)),nn.ReLU(True)) for i in range(nb_tasks)])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.linears = nn.ModuleList([nn.Linear(int(256*factor), num_classes[i]) for i in range(nb_tasks)])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, nblocks, stride=1, nb_tasks=1):
shortcut = 0
if stride != 1 or self.in_planes != planes * block.expansion:
shortcut = 1
layers = []
layers.append(block(self.in_planes, planes, stride, shortcut, nb_tasks=nb_tasks))
self.in_planes = planes * block.expansion
for i in range(1, nblocks):
layers.append(block(self.in_planes, planes, nb_tasks=nb_tasks))
return nn.Sequential(*layers)
def forward(self, x):
x = self.pre_layers_conv(x)
task = config_task.task
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.end_bns[task](x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.linears[task](x)
return x
def resnet26(num_classes=10, blocks=BasicBlock):
return ResNet(blocks, [4,4,4],num_classes)
``` |
{
"source": "JiaMingLin/tsn-pytorch",
"score": 3
} |
#### File: tsn-pytorch/customized_models/model_utils.py
```python
import os
import hashlib
import requests
from tqdm import tqdm
import torch
def deploy_model(model, cfg):
"""
Deploy model to multiple GPUs for DDP training.
"""
if cfg.DDP_CONFIG.DISTRIBUTED:
if cfg.DDP_CONFIG.GPU is not None:
torch.cuda.set_device(cfg.DDP_CONFIG.GPU)
model.cuda(cfg.DDP_CONFIG.GPU)
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[cfg.DDP_CONFIG.GPU],
find_unused_parameters=True)
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
elif cfg.DDP_CONFIG.GPU is not None:
torch.cuda.set_device(cfg.DDP_CONFIG.GPU)
model.cuda(cfg.DDP_CONFIG.GPU)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
return model
def load_model(model, cfg, load_fc=True):
"""
Load pretrained model weights.
"""
if os.path.isfile(cfg.CONFIG.MODEL.PRETRAINED_PATH):
print("=> loading checkpoint '{}'".format(cfg.CONFIG.MODEL.PRETRAINED_PATH))
if cfg.DDP_CONFIG.GPU is None:
checkpoint = torch.load(cfg.CONFIG.MODEL.PRETRAINED_PATH)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(cfg.DDP_CONFIG.GPU)
checkpoint = torch.load(cfg.CONFIG.MODEL.PRETRAINED_PATH, map_location=loc)
model_dict = model.state_dict()
if not load_fc:
del model_dict['module.fc.weight']
del model_dict['module.fc.bias']
pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
unused_dict = {k: v for k, v in checkpoint['state_dict'].items() if not k in model_dict}
not_found_dict = {k: v for k, v in model_dict.items() if not k in checkpoint['state_dict']}
print("unused model layers:", unused_dict.keys())
print("not found layers:", not_found_dict.keys())
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print("=> loaded checkpoint '{}' (epoch {})"
.format(cfg.CONFIG.MODEL.PRETRAINED_PATH, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(cfg.CONFIG.MODEL.PRETRAINED_PATH))
return model, None
def save_model(model, optimizer, epoch, cfg):
# pylint: disable=line-too-long
"""
Save trained model weights.
"""
model_save_dir = os.path.join(cfg.CONFIG.LOG.BASE_PATH,
cfg.CONFIG.LOG.EXP_NAME,
cfg.CONFIG.LOG.SAVE_DIR)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
ckpt_name = "f{}_s{}_ckpt_epoch{}.pth".format(cfg.CONFIG.DATA.CLIP_LEN, cfg.CONFIG.DATA.FRAME_RATE, epoch)
checkpoint = os.path.join(model_save_dir, ckpt_name)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc1': None,
'optimizer': optimizer.state_dict(),
}, filename=checkpoint)
def save_checkpoint(state, filename='checkpoint.pth'):
torch.save(state, filename)
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
sha1_file = sha1.hexdigest()
l = min(len(sha1_file), len(sha1_hash))
return sha1.hexdigest()[0:l] == sha1_hash[0:l]
def download(url, path=None, overwrite=False, sha1_hash=None):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
print('Downloading %s from %s...'%(fname, url))
r = requests.get(url, stream=True)
if r.status_code != 200:
raise RuntimeError("Failed downloading url %s"%url)
total_length = r.headers.get('content-length')
with open(fname, 'wb') as f:
if total_length is None: # no content length header
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
total_length = int(total_length)
for chunk in tqdm(r.iter_content(chunk_size=1024),
total=int(total_length / 1024. + 0.5),
unit='KB', unit_scale=False, dynamic_ncols=True):
f.write(chunk)
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning('File {} is downloaded but the content hash does not match. ' \
'The repo may be outdated or download may be incomplete. ' \
'If the "repo_url" is overridden, consider switching to ' \
'the default repo.'.format(fname))
return fname
```
#### File: tsn-pytorch/multi-task/train_val.py
```python
import sys
sys.path.append("../")
import argparse
import os
import time
import shutil
import torch
import torchvision
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.nn.utils import clip_grad_norm
from dataset import TSNDataSet
from multi_task_model import TSN
from transforms import *
from opts import parser
from tensorboardX import SummaryWriter
from math import ceil
# import warnings
# warnings.filterwarnings("ignore")
best_prec1 = 0
def main():
global args, best_prec1, tb_writer_train, tb_writer_val, log_dir
args = parser.parse_args()
if args.dataset == 'ucf101':
num_class = 101
# elif args.dataset == 'hmdb51':
# num_class = 51
# elif args.dataset == 'kinetics':
# num_class = 400
else:
raise ValueError('Unknown dataset '+args.dataset)
log_dir = 'logs/{}'.format(args.save_path)
tb_writer_train = SummaryWriter(os.path.join(log_dir, 'train'))
tb_writer_val = SummaryWriter(os.path.join(log_dir, 'val'))
model = TSN(num_class, args.num_segments,
base_model=args.arch, new_length=args.new_length,
consensus_type=args.consensus_type, dropout=args.dropout, partial_bn=not args.no_partialbn,
modality=args.modality, resume=args.resume, project_mode = args.project_mode)
crop_size = model.crop_size
scale_size = model.scale_size
input_mean = model.input_mean
input_std = model.input_std
policies = model.get_optim_policies()
# train_augmentation = model.get_augmentation()
device_ids = [int(id) for id in args.gpus.split(',')]
model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
cudnn.benchmark = True
# Data loading code
if args.modality != 'RGBDiff':
normalize = GroupNormalize(input_mean, input_std)
else:
normalize = IdentityTransform()
naming_pattern = "frame{:06d}.jpg"
rgb_data_root_path = os.path.join(args.data_root_path, 'jpegs_256')
tvl1_data_root_path = os.path.join(args.data_root_path, 'tvl1_flow')
rgb_train_list = '../settings/ucf101/train_rgb_split1.txt'
tvl1_train_list = '../settings/ucf101/train_flow_split1.txt'
rgb_val_list = '../settings/ucf101/val_rgb_split1.txt'
tvl1_val_list = '../settings/ucf101/val_flow_split1.txt'
rgb_train_loader = torch.utils.data.DataLoader(
TSNDataSet(rgb_data_root_path, rgb_train_list, num_segments=args.num_segments,
new_length=4,
modality="RGB",
image_tmpl=naming_pattern,
transform=torchvision.transforms.Compose([
torchvision.transforms.Compose([GroupMultiScaleCrop(224, [1, .875, .75, .66]),
GroupRandomHorizontalFlip(is_flow=False)]),
Stack(roll=args.arch == 'BNInception'),
ToTorchFormatTensor(div=args.arch != 'BNInception'),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
tvl1_train_loader = torch.utils.data.DataLoader(
TSNDataSet(tvl1_data_root_path, tvl1_train_list, num_segments=args.num_segments,
new_length=6,
modality="tvl1",
image_tmpl=naming_pattern,
transform=torchvision.transforms.Compose([
torchvision.transforms.Compose([GroupMultiScaleCrop(224, [1, .875, .75]),
GroupRandomHorizontalFlip(is_flow=True)]),
Stack(roll=args.arch == 'BNInception'),
ToTorchFormatTensor(div=args.arch != 'BNInception'),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
rgb_val_loader = torch.utils.data.DataLoader(
TSNDataSet(rgb_data_root_path, rgb_val_list, num_segments=args.num_segments,
new_length=4,
modality="RGB",
image_tmpl=naming_pattern,
random_shift=False,
transform=torchvision.transforms.Compose([
GroupScale(int(scale_size)),
GroupCenterCrop(crop_size),
Stack(roll=args.arch == 'BNInception'),
ToTorchFormatTensor(div=args.arch != 'BNInception'),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
tvl1_val_loader = torch.utils.data.DataLoader(
TSNDataSet(tvl1_data_root_path, tvl1_val_list, num_segments=args.num_segments,
new_length=6,
modality="tvl1",
image_tmpl=naming_pattern,
random_shift=False,
transform=torchvision.transforms.Compose([
GroupScale(int(scale_size)),
GroupCenterCrop(crop_size),
Stack(roll=args.arch == 'BNInception'),
ToTorchFormatTensor(div=args.arch != 'BNInception'),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
train_loaders = [rgb_train_loader, tvl1_train_loader]
val_loaders = [rgb_val_loader, tvl1_val_loader]
# define loss function (criterion) and optimizer
if args.loss_type == 'nll':
criterion = torch.nn.CrossEntropyLoss().cuda()
else:
raise ValueError("Unknown loss type")
for group in policies:
print(('group: {} has {} params, lr_mult: {}, decay_mult: {}'.format(
group['name'], len(group['params']), group['lr_mult'], group['decay_mult'])))
optimizer = torch.optim.SGD(policies,
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args.lr_steps)
# train for one epoch
avg_loss, avg_top1, avg_top5 = train(train_loaders, model, criterion, optimizer, epoch)
tb_writer_train.add_scalar('Monitor/RGB Training Loss', avg_loss[0], epoch)
tb_writer_train.add_scalar('Monitor/RGB Training Top1', avg_top1[0], epoch)
tb_writer_train.add_scalar('Monitor/RGB Training Top5', avg_top5[0], epoch)
tb_writer_train.add_scalar('Monitor/Flow Training Loss', avg_loss[1], epoch)
tb_writer_train.add_scalar('Monitor/Flow Training Top1', avg_top1[1], epoch)
tb_writer_train.add_scalar('Monitor/Flow Training Top5', avg_top5[1], epoch)
# evaluate on validation set
if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
avg_loss, avg_top1, avg_top5 = validate(val_loaders, model, criterion)
tb_writer_val.add_scalar('Monitor/RGB Validation Loss', avg_loss[0], epoch)
tb_writer_val.add_scalar('Monitor/RGB Validation Top1', avg_top1[0], epoch)
tb_writer_val.add_scalar('Monitor/RGB Validation Top5', avg_top5[0], epoch)
tb_writer_val.add_scalar('Monitor/Flow Validation Loss', avg_loss[1], epoch)
tb_writer_val.add_scalar('Monitor/Flow Validation Top1', avg_top1[1], epoch)
tb_writer_val.add_scalar('Monitor/Flow Validation Top5', avg_top5[1], epoch)
# remember best prec@1 and save checkpoint
is_best = avg_top1[1] > best_prec1
best_prec1 = max(avg_top1[1], best_prec1)
save_checkpoint(log_dir, {
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.module.state_dict(),
'best_prec1': best_prec1,
}, is_best)
def train(train_loaders, model, criterion, optimizer, epoch):
batch_time = [AverageMeter(), AverageMeter()]
data_time = [AverageMeter(), AverageMeter()]
losses = [AverageMeter(), AverageMeter()]
top1 = [AverageMeter(), AverageMeter()]
top5 = [AverageMeter(), AverageMeter()]
model.module.partialBN(False)
# switch to train mode
model.train()
end = time.time()
train_loader_iter = [iter(loader) for loader in train_loaders]
data_length = len(train_loaders)*(len(train_loaders[0]))
for i in range(data_length):
domain = i%len(train_loaders)
data_time[domain].update(time.time() - end)
loader_iter = train_loader_iter[domain]
input_data, target = next(loader_iter)
target = target.cuda()
input_var = torch.autograd.Variable(input_data)
target_var = torch.autograd.Variable(target)
# compute output for a given domain
output = model([input_var, domain])
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1,5))
losses[domain].update(loss.item(), input_data.size(0))
top1[domain].update(prec1.item(), input_data.size(0))
top5[domain].update(prec5.item(), input_data.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
if args.clip_gradient is not None:
total_norm = clip_grad_norm(model.parameters(), args.clip_gradient)
#if total_norm > args.clip_gradient:
# print("clipping gradient: {} with coef {}".format(total_norm, args.clip_gradient / total_norm))
optimizer.step()
# measure elapsed time
batch_time[domain].update(time.time() - end)
end = time.time()
if ceil(i/len(train_loaders)) % args.print_freq == 0 and i > 0:
print(('Domain: {0}, Epoch: [{1}][{2}/{3}], lr: {lr:.5f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
"RGB" if domain == 0 else "tvl1",
epoch, ceil(i/len(train_loaders)), len(train_loaders[domain]), batch_time=batch_time[domain],
data_time=data_time[domain], loss=losses[domain], top1=top1[domain], top5=top5[domain], lr=optimizer.param_groups[-1]['lr'])))
return [loss.avg for loss in losses], [t1.avg for t1 in top1], [t5.avg for t5 in top5]
def validate(val_loaders, model, criterion, logger=None):
batch_time = [AverageMeter(), AverageMeter()]
losses = [AverageMeter(), AverageMeter()]
top1 = [AverageMeter(), AverageMeter()]
top5 = [AverageMeter(), AverageMeter()]
# switch to evaluate mode
model.eval()
val_loader_iters = [iter(loader) for loader in val_loaders]
data_length = len(val_loaders) * len(val_loaders[0])
end = time.time()
with torch.no_grad():
for domain, loader in enumerate(val_loaders):
for i, (input_data, target) in enumerate(loader):
target = target.cuda()
input_var = torch.autograd.Variable(input_data)
target_var = torch.autograd.Variable(target)
# compute output
output = model([input_var, domain])
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1,5))
losses[domain].update(loss.data.item(), input_data.size(0))
top1[domain].update(prec1.item(), input_data.size(0))
top5[domain].update(prec5.item(), input_data.size(0))
# measure elapsed time
batch_time[domain].update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print(('Domain: {0},Test: [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
"RGB" if domain == 0 else "tvl1",
i, len(val_loaders[domain]), batch_time=batch_time[domain], loss=losses[domain],
top1=top1[domain], top5=top5[domain])))
print(('Domain: {0}, Testing Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.5f}'
.format("RGB" if domain == 0 else "tvl1", top1=top1[domain], top5=top5[domain], loss=losses[domain])))
return [loss.avg for loss in losses], [t1.avg for t1 in top1], [t5.avg for t5 in top5]
def save_checkpoint(log_dir, state, is_best, filename='checkpoint.pth.tar'):
filename = '_'.join(
(
'epoch_{}'.format(state['epoch']),
'{:0.2f}'.format(state['best_prec1']),
state['arch'],
args.modality.lower(),
filename)
)
save_path = os.path.join(log_dir, filename)
torch.save(state, save_path)
if is_best:
best_name = '_'.join(
(
'best',
'epoch_{}'.format(state['epoch']),
'{:0.2f}'.format(state['best_prec1']),
args.modality.lower(),
'.pth.tar')
)
best_path = os.path.join(log_dir, best_name)
shutil.copyfile(save_path, best_path)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, lr_steps):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
lr = args.lr * decay
decay = args.weight_decay
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_group['lr_mult']
param_group['weight_decay'] = decay * param_group['decay_mult']
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
## Log directory
main()
```
#### File: JiaMingLin/tsn-pytorch/utils.py
```python
import torch
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import average_precision_score, confusion_matrix
from matplotlib.pyplot import figure
def class_acc_mapping(acc, dataset):
with open(os.path.join('settings', dataset, 'actions.txt'), 'r') as f:
actions = f.readlines()
class_acc_map = [(item[0].strip(),item[1]) for item in zip(actions, acc)]
return class_acc_map
def eval_video(net, video_data, num_class, modality, args):
i, data, label = video_data
num_crop = args.test_crops
if modality == 'RGB':
length = 12
elif modality == 'Flow' or modality == 'tvl1':
length = 12
elif modality == 'RGBDiff':
length = 18
else:
raise ValueError("Unknown modality "+modality)
with torch.no_grad():
input_var = torch.autograd.Variable(data.view(-1, length, data.size(2), data.size(3)),
volatile=True)
rst = net(input_var).data.cpu().numpy().copy()
return i, rst.reshape((num_crop, args.test_segments, num_class)).mean(axis=0).reshape(
(args.test_segments, 1, num_class)
), label[0]
def save_output(output, video_labels, acc_class_map, args):
# reorder before saving
name_list = [x.strip().split()[0] for x in open(args.test_list)]
order_dict = {e:i for i, e in enumerate(sorted(name_list))}
reorder_output = [None] * len(output)
reorder_label = [None] * len(output)
for i in range(len(output)):
idx = order_dict[name_list[i]]
reorder_output[idx] = output[i]
reorder_label[idx] = video_labels[i]
np.savez(args.save_scores, scores=reorder_output, labels=reorder_label, acc_class_map=acc_class_map)
def softmax(raw_score, T=1):
exp_s = np.exp((raw_score - raw_score.max(axis=-1)[..., None])*T)
sum_s = exp_s.sum(axis=-1)
return exp_s / sum_s[..., None]
def default_aggregation_func(score_arr, normalization=True, crop_agg=None):
"""
This is the default function for make video-level prediction
:param score_arr: a 3-dim array with (frame, crop, class) layout
:return:
"""
crop_agg = np.mean if crop_agg is None else crop_agg
if normalization:
return softmax(crop_agg(score_arr, axis=1).mean(axis=0))
else:
return crop_agg(score_arr, axis=1).mean(axis=0)
def mean_class_accuracy(pred, labels):
cf = confusion_matrix(labels, pred).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
overall_acc = np.mean(cls_hit/cls_cnt)
class_acc = cls_hit/cls_cnt
return overall_acc, class_acc
def draw_summary(rgb_class_acc, flow_class_acc, fusion_class_acc, path):
print("Draw Summaries...")
acc_gain_fusion = [[item[1][0], (float(item[1][1]) - float(item[0][1]))] for item in zip(rgb_class_acc, fusion_class_acc)]
figure(figsize=(20, 24), dpi=80)
cnt = 0
for t in [rgb_class_acc, flow_class_acc, fusion_class_acc, acc_gain_fusion]:
actions = [item[0] for item in t]
acc = [float(item[1]) for item in t]
x = np.arange(len(actions))
plt.subplot(411+cnt)
plt.bar(x, acc); plt.xticks(x, actions)
plt.xlabel('Actions')
plt.ylabel('Accuracy')
plt.xticks(rotation=90)
cnt+=1
plt.savefig(os.path.join(path, 'summary.png'))
``` |
{
"source": "JiaMingLin/two-stream-pytorch",
"score": 3
} |
#### File: scripts/eval_ucf101_pytorch/VideoTemporalPrediction.py
```python
import glob
import os
import sys
import numpy as np
import math
import cv2
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
sys.path.insert(0, "../../")
import video_transforms
def VideoTemporalPrediction(
vid_name,
net,
num_categories,
start_frame=0,
num_frames=0,
num_samples=25,
optical_flow_frames=10
):
if num_frames == 0:
# print(vid_name)
imglist = glob.glob(os.path.join(vid_name, '*flow_x*.jpg'))
duration = len(imglist)
else:
duration = num_frames
clip_mean = [0.5] * 20
clip_std = [0.226] * 20
normalize = video_transforms.Normalize(mean=clip_mean,
std=clip_std)
val_transform = video_transforms.Compose([
video_transforms.ToTensor(),
normalize,
])
# selection
step = int(math.floor((duration-optical_flow_frames+1)/num_samples))
#dims = (256,340,optical_flow_frames*2,num_samples)
dims = (272,360,optical_flow_frames*2,num_samples)
flow = np.zeros(shape=dims, dtype=np.float64)
flow_flip = np.zeros(shape=dims, dtype=np.float64)
for i in range(num_samples):
for j in range(optical_flow_frames):
#flow_x_file = os.path.join(vid_name, 'flow_x_{0:04d}.jpg'.format(i*step+j+1 + start_frame))
flow_x_file = os.path.join(vid_name, 'flow_x_{0:05d}.jpg'.format(i*step+j+1 + start_frame))
#flow_y_file = os.path.join(vid_name, 'flow_y_{0:04d}.jpg'.format(i*step+j+1 + start_frame))
flow_y_file = os.path.join(vid_name, 'flow_y_{0:05d}.jpg'.format(i*step+j+1 + start_frame))
img_x = cv2.imread(flow_x_file, cv2.IMREAD_GRAYSCALE)
img_y = cv2.imread(flow_y_file, cv2.IMREAD_GRAYSCALE)
img_x = cv2.resize(img_x, dims[1::-1])
img_y = cv2.resize(img_y, dims[1::-1])
flow[:,:,j*2 ,i] = img_x
flow[:,:,j*2+1,i] = img_y
flow_flip[:,:,j*2 ,i] = 255 - img_x[:, ::-1]
flow_flip[:,:,j*2+1,i] = img_y[:, ::-1]
# crop
flow_1 = flow[:256, :256, :,:]
flow_2 = flow[:256, -256:, :,:]
flow_3 = flow[8:264, 52:308, :,:]
flow_4 = flow[-256:, :256, :,:]
flow_5 = flow[-256:, -256:, :,:]
flow_f_1 = flow_flip[:256, :256, :,:]
flow_f_2 = flow_flip[:256, -256:, :,:]
flow_f_3 = flow_flip[8:264, 52:308, :,:]
flow_f_4 = flow_flip[-256:, :256, :,:]
flow_f_5 = flow_flip[-256:, -256:, :,:]
# print(flow_1.shape,flow_2.shape,flow_3.shape,flow_4.shape,flow_5.shape,flow_f_1.shape,flow_f_2.shape,flow_f_3.shape,flow_f_4.shape,flow_f_5.shape)
flow = np.concatenate((flow_1,flow_2,flow_3,flow_4,flow_5,flow_f_1,flow_f_2,flow_f_3,flow_f_4,flow_f_5), axis=3)
_, _, _, c = flow.shape
flow_list = []
for c_index in range(c):
cur_img = flow[:,:,:,c_index].squeeze()
cur_img_tensor = val_transform(cur_img)
flow_list.append(np.expand_dims(cur_img_tensor.numpy(), 0))
flow_np = np.concatenate(flow_list,axis=0)
batch_size = 45 #25
prediction = np.zeros((num_categories,flow.shape[3]))
num_batches = int(math.ceil(float(flow.shape[3])/batch_size))
for bb in range(num_batches):
span = range(batch_size*bb, min(flow.shape[3],batch_size*(bb+1)))
input_data = flow_np[span,:,:,:]
imgDataTensor = torch.from_numpy(input_data).type(torch.FloatTensor).cuda()
imgDataVar = torch.autograd.Variable(imgDataTensor)
output = net(imgDataVar)
result = output.data.cpu().numpy()
prediction[:, span] = np.transpose(result)
return prediction
``` |
{
"source": "jiamings/d2c",
"score": 3
} |
#### File: diffusion/datasets/__init__.py
```python
import os
import torch
import numbers
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
from torch.utils.data import Subset, TensorDataset
import numpy as np
def get_dataset(args, config):
if config.data.random_flip is False:
tran_transform = test_transform = transforms.Compose(
[transforms.Resize(config.data.image_size), transforms.ToTensor()]
)
else:
tran_transform = transforms.Compose(
[
transforms.Resize(config.data.image_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
]
)
test_transform = transforms.Compose(
[transforms.Resize(config.data.image_size), transforms.ToTensor()]
)
train_samples = np.load(args.train_fname)
train_labels = np.zeros(len(train_samples))
data_mean = np.mean(train_samples, axis=(0, 2, 3), keepdims=True)
data_std = np.std(train_samples, axis=(0, 2, 3), keepdims=True)
train_samples = (train_samples - data_mean)/data_std
print("train data shape are - ", train_samples.shape, train_labels.shape)
print("train data stats are - ", np.mean(train_samples), np.std(train_samples),
np.min(train_samples), np.max(train_samples))
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(train_samples).float(), torch.from_numpy(train_labels).float())
return dataset
def logit_transform(image, lam=1e-6):
image = lam + (1 - 2 * lam) * image
return torch.log(image) - torch.log1p(-image)
def data_transform(config, X):
if config.data.uniform_dequantization:
X = X / 256.0 * 255.0 + torch.rand_like(X) / 256.0
if config.data.gaussian_dequantization:
X = X + torch.randn_like(X) * 0.01
if config.data.rescaled:
X = 2 * X - 1.0
elif config.data.logit_transform:
X = logit_transform(X)
if hasattr(config, "image_mean"):
return X - config.image_mean.to(X.device)[None, ...]
return X
def inverse_data_transform(config, X):
if hasattr(config, "image_mean"):
X = X + config.image_mean.to(X.device)[None, ...]
if config.data.logit_transform:
X = torch.sigmoid(X)
elif config.data.rescaled:
X = (X + 1.0) / 2.0
return torch.clamp(X, 0.0, 1.0)
```
#### File: d2c/diffusion/main.py
```python
import argparse
import traceback
import shutil
import logging
import yaml
import sys
import os
import torch
import numpy as np
import torch.utils.tensorboard as tb
from runners.diffusion import Diffusion
torch.set_printoptions(sci_mode=False)
def parse_args_and_config():
parser = argparse.ArgumentParser(description=globals()["__doc__"])
parser.add_argument(
"--config", type=str, required=True, help="Path to the config file"
)
parser.add_argument("--seed", type=int, default=1234, help="Random seed")
parser.add_argument(
"--exp", type=str, default="exp", help="Path for saving running related data."
)
parser.add_argument(
"--ckpt_path", type=str, default="", help="The checkpoint path to load"
)
parser.add_argument(
"--doc",
type=str,
required=True,
help="A string for documentation purpose. "
"Will be the name of the log folder.",
)
parser.add_argument(
"--comment", type=str, default="", help="A string for experiment comment"
)
parser.add_argument(
"--verbose",
type=str,
default="info",
help="Verbose level: info | debug | warning | critical",
)
parser.add_argument("--test", action="store_true", help="Whether to test the model")
parser.add_argument(
"--sample",
action="store_true",
help="Whether to produce samples from the model",
)
parser.add_argument("--fid", action="store_true")
parser.add_argument("--interpolation", action="store_true")
parser.add_argument(
"--resume_training", action="store_true", help="Whether to resume training"
)
parser.add_argument(
"-i",
"--image_folder",
type=str,
default="images",
help="The folder name of samples",
)
parser.add_argument(
"--ni",
action="store_true",
help="No interaction. Suitable for Slurm Job launcher",
)
parser.add_argument("--use_pretrained", action="store_true")
parser.add_argument(
"--sample_type",
type=str,
default="generalized",
help="sampling approach (generalized or ddpm_noisy)",
)
parser.add_argument(
"--skip_type",
type=str,
default="uniform",
help="skip according to (uniform or quadratic)",
)
parser.add_argument(
"--timesteps", type=int, default=1000, help="number of steps involved"
)
parser.add_argument(
"--eta",
type=float,
default=0.0,
help="eta used to control the variances of sigma",
)
parser.add_argument('--train_fname', type=str, default="",
help='Training file')
parser.add_argument('--latent_fname', type=str, default="",
help='latent fname')
parser.add_argument(
"--out_fname", type=str, default="images/moco_to_ddim.png", help="output fname of samples"
)
parser.add_argument(
"--inp_fname", type=str, default="", help="the path of input images to read"
)
parser.add_argument(
"--mean_file", type=str, default="", help="the mean filename"
)
parser.add_argument(
"--std_file", type=str, default="", help="the std filename"
)
parser.add_argument('--n_samples', type=int, default=5000,
help='number of samples to generate')
parser.add_argument("--sequence", action="store_true")
parser.add_argument('--dataset', action='store_true')
parser.add_argument('--autoencoder', action='store_true')
parser.add_argument('--reconstruct', action='store_true')
parser.add_argument('--noise_add_steps', type=int, default=0)
parser.add_argument('--noise_skip_steps', type=int, default=1)
args = parser.parse_args()
args.log_path = os.path.join(args.exp, "logs", args.doc)
if not os.path.exists(args.log_path):
os.makedirs(args.log_path)
# parse config file
with open(os.path.join("configs", args.config), "r") as f:
config = yaml.safe_load(f)
new_config = dict2namespace(config)
tb_path = os.path.join(args.exp, "tensorboard", args.doc)
new_config.tb_logger = tb.SummaryWriter(log_dir=tb_path)
level = getattr(logging, args.verbose.upper(), None)
if not isinstance(level, int):
raise ValueError("level {} not supported".format(args.verbose))
handler1 = logging.StreamHandler()
formatter = logging.Formatter(
"%(levelname)s - %(filename)s - %(asctime)s - %(message)s"
)
handler1.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler1)
logger.setLevel(level)
# add device
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
logging.info("Using device: {}".format(device))
new_config.device = device
# set random seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
return args, new_config
def dict2namespace(config):
namespace = argparse.Namespace()
for key, value in config.items():
if isinstance(value, dict):
new_value = dict2namespace(value)
else:
new_value = value
setattr(namespace, key, new_value)
return namespace
def main():
args, config = parse_args_and_config()
logging.info("Writing log file to {}".format(args.log_path))
logging.info("Exp instance id = {}".format(os.getpid()))
logging.info("Exp comment = {}".format(args.comment))
try:
runner = Diffusion(args, config)
if args.sample:
runner.sample()
elif args.test:
runner.test()
else:
runner.train()
except Exception:
logging.error(traceback.format_exc())
return 0
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jiamings/lagvae",
"score": 3
} |
#### File: lagvae/utils/__init__.py
```python
import tensorflow as tf
import GPUtil
import numpy as np
import os
from collections import namedtuple
Datasets = namedtuple('datasets', ['train', 'test'])
PREFIX = os.environ['DATA_PREFIX']
def sample_binary(x):
r = tf.random_uniform(tf.shape(x), minval=0.0, maxval=1.0)
return tf.cast(x > r, tf.float32)
def sample_binary_label(x, y):
r = tf.random_uniform(tf.shape(x), minval=0.0, maxval=1.0)
return tf.cast(x > r, tf.float32), y
def read_mnist(path='mnist', label=False, binarized=True, batch_sizes=(250, 250)):
"""
Returns train and test sets for MNIST.
:param path: path to read / download MNIST dataset.
:param label: whether there is label or not.
:param binarized: if the dataset is stochastically binarized.
:param batch_sizes: batch sizes of train and test.
:return: Two tf.data.Datasets, train and test.
"""
# from .mnist import make_train, make_test
path = os.path.join(PREFIX, path)
# train, test = make_train(path, label), make_test(path, label)
fn = sample_binary_label if label else sample_binary
from tensorflow.examples.tutorials.mnist import input_data
mnists = input_data.read_data_sets(path)
if label:
train = tf.data.Dataset.from_tensor_slices((mnists.train.images, mnists.train.labels))
test = tf.data.Dataset.from_tensor_slices((mnists.test.images, mnists.test.labels))
else:
train = tf.data.Dataset.from_tensor_slices(mnists.train.images)
test = tf.data.Dataset.from_tensor_slices(mnists.test.images)
train, test = train.batch(batch_sizes[0]), test.batch(batch_sizes[1])
if binarized:
train, test = train.map(fn), test.map(fn)
return train, test
def find_avaiable_gpu(max_load=0.3, max_memory=0.5):
gpu_avail = GPUtil.getFirstAvailable(attempts=10000, maxLoad=max_load, maxMemory=max_memory, interval=199)
return gpu_avail[0]
def gpu_session():
gpu_options = tf.GPUOptions(allow_growth=True)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
def obtain_log_path(path):
import os
prefix = os.environ['EXP_LOG_PATH']
assert len(prefix) > 0, 'Please set environment variable EXP_LOG_PATH'
return os.path.join(prefix, path)
def compute_kernel(x, y):
x_size = tf.shape(x)[0]
y_size = tf.shape(y)[0]
dim = tf.shape(x)[1]
tiled_x = tf.tile(tf.reshape(x, tf.stack([x_size, 1, dim])), tf.stack([1, y_size, 1]))
tiled_y = tf.tile(tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1]))
return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))
def compute_mmd(x, y):
"""
Compute kernel-estimated MMD between two variables, both with size [batch_size, dim]
:param x:
:param y:
:return:
"""
x_kernel = compute_kernel(x, x)
y_kernel = compute_kernel(y, y)
xy_kernel = compute_kernel(x, y)
return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)
``` |
{
"source": "jiaming-wang/MIP",
"score": 2
} |
#### File: MIP/model/base_net.py
```python
import torch
import math
import torch.optim as optim
import torch.nn as nn
from importlib import import_module
import torch.nn.functional as F
import numpy as np
from model.utils import *
######################################
# common model
######################################
class Upsampler(torch.nn.Module):
def __init__(self, scale, n_feat, bn=False, activation='prelu', bias=True):
super(Upsampler, self).__init__()
modules = []
if scale == 3:
modules.append(ConvBlock(n_feat, 9 * n_feat, 3, 1, 1, bias, activation=None, norm=None))
modules.append(torch.nn.PixelShuffle(3))
if bn:
modules.append(torch.nn.BatchNorm2d(n_feat))
else:
for _ in range(int(math.log(scale, 2))):
modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None))
modules.append(torch.nn.PixelShuffle(2))
if bn:
modules.append(torch.nn.BatchNorm2d(n_feat))
self.up = torch.nn.Sequential(*modules)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU(init=0.5)
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
out = self.up(x)
if self.activation is not None:
out = self.act(out)
return out
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None, pad_model=None):
super(ConvBlock, self).__init__()
self.pad_model = pad_model
self.norm = norm
if self.norm =='batch':
self.norm = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.norm = torch.nn.InstanceNorm2d(output_size)
else:
self.norm = None
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU(init=0.5)
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
else:
self.act = None
if self.pad_model == None:
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.padding = None
elif self.pad_model == 'reflection':
self.padding = nn.Sequential(nn.ReflectionPad2d(padding))
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, 0, bias=bias)
layers = filter(lambda x: x is not None, [self.padding, self.conv, self.norm, self.act])
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
######################################
# loss function
######################################
class TVLoss(nn.Module):
def __init__(self, TVLoss_weight = 1):
super(TVLoss, self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = (x.size()[2] - 1) * x.size()[3]
count_w = x.size()[2] * (x.size()[3] - 1)
h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()
w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()
return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size
class CycleLoss(nn.Module):
def __init__(self, scale = 1/2, loss_type = 'MSE'):
super(CycleLoss, self).__init__()
self.scale = scale
if loss_type == "MSE":
self.loss = nn.MSELoss()
elif loss_type == "L1":
self.loss = nn.L1Loss()
else:
raise ValueError
def forward(self, x_sr, x_lr):
downsampler = Downsampler(n_planes=3, factor=4, phase=0.5, preserve_size=True).cuda(0)
down_x = downsampler(x_sr)
# down_x = F.interpolate(x_hr, scale_factor=self.scale, mode='bicubic')
return self.loss(down_x, x_lr), down_x
######################################
# resnet_block
######################################
class ResnetBlock(torch.nn.Module):
def __init__(self, input_size, kernel_size=3, stride=1, padding=1, bias=True, scale=1, activation='prelu', norm='batch', pad_model=None):
super().__init__()
self.norm = norm
self.pad_model = pad_model
self.input_size = input_size
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.bias = bias
self.scale = scale
if self.norm =='batch':
self.normlayer = torch.nn.BatchNorm2d(input_size)
elif self.norm == 'instance':
self.normlayer = torch.nn.InstanceNorm2d(input_size)
else:
self.normlayer = None
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU(init=0.5)
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
else:
self.act = None
if self.pad_model == None:
self.conv1 = torch.nn.Conv2d(input_size, input_size, kernel_size, stride, padding, bias=bias)
self.conv2 = torch.nn.Conv2d(input_size, input_size, kernel_size, stride, padding, bias=bias)
self.pad = None
elif self.pad_model == 'reflection':
self.pad = nn.Sequential(nn.ReflectionPad2d(padding))
self.conv1 = torch.nn.Conv2d(input_size, input_size, kernel_size, stride, 0, bias=bias)
self.conv2 = torch.nn.Conv2d(input_size, input_size, kernel_size, stride, 0, bias=bias)
layers = filter(lambda x: x is not None, [self.pad, self.conv1, self.normlayer, self.act, self.pad, self.conv2, self.normlayer, self.act])
self.layers = nn.Sequential(*layers)
def forward(self, x):
residual = x
out = x
out = self.layers(x)
out = out * self.scale
out = torch.add(out, residual)
return out
class ResnetBlock_triple(ResnetBlock):
def __init__(self, *args, middle_size, output_size, **kwargs):
ResnetBlock.__init__(self, *args, **kwargs)
if self.norm =='batch':
self.normlayer1 = torch.nn.BatchNorm2d(middle_size)
self.normlayer2 = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.normlayer1 = torch.nn.InstanceNorm2d(middle_size)
self.normlayer2 = torch.nn.BatchNorm2d(output_size)
else:
self.normlayer1 = None
self.normlayer2 = None
if self.pad_model == None:
self.conv1 = torch.nn.Conv2d(self.input_size, middle_size, self.kernel_size, self.stride, self.padding, bias=self.bias)
self.conv2 = torch.nn.Conv2d(middle_size, output_size, self.kernel_size, self.stride, self.padding, bias=self.bias)
self.pad = None
elif self.pad_model == 'reflection':
self.pad= nn.Sequential(nn.ReflectionPad2d(self.padding))
self.conv1 = torch.nn.Conv2d(self.input_size, middle_size, self.kernel_size, self.stride, 0, bias=self.bias)
self.conv2 = torch.nn.Conv2d(middle_size, output_size, self.kernel_size, self.stride, 0, bias=self.bias)
layers = filter(lambda x: x is not None, [self.pad, self.conv1, self.normlayer1, self.act, self.pad, self.conv2, self.normlayer2, self.act])
self.layers = nn.Sequential(*layers)
def forward(self, x):
residual = x
out = x
out= self.layers(x)
out = out * self.scale
out = torch.add(out, residual)
return out
```
#### File: MIP/model/utils.py
```python
import torch
import torch.nn as nn
import numpy as np
# from .downsampler import Downsampler
def add_module(self, module):
self.add_module(str(len(self) + 1), module)
torch.nn.Module.add = add_module
class Concat(nn.Module):
def __init__(self, dim, *args):
super(Concat, self).__init__()
self.dim = dim
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def forward(self, input):
inputs = []
for module in self._modules.values():
inputs.append(module(input))
inputs_shapes2 = [x.shape[2] for x in inputs]
inputs_shapes3 = [x.shape[3] for x in inputs]
if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)):
inputs_ = inputs
else:
target_shape2 = min(inputs_shapes2)
target_shape3 = min(inputs_shapes3)
inputs_ = []
for inp in inputs:
diff2 = (inp.size(2) - target_shape2) // 2
diff3 = (inp.size(3) - target_shape3) // 2
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3])
return torch.cat(inputs_, dim=self.dim)
def __len__(self):
return len(self._modules)
class GenNoise(nn.Module):
def __init__(self, dim2):
super(GenNoise, self).__init__()
self.dim2 = dim2
def forward(self, input):
a = list(input.size())
a[1] = self.dim2
# print (input.data.type())
b = torch.zeros(a).type_as(input.data)
b.normal_()
x = torch.autograd.Variable(b)
return x
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
def act(act_fun = 'LeakyReLU'):
'''
Either string defining an activation function or module (e.g. nn.ReLU)
'''
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun == 'ELU':
return nn.ELU()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
def bn(num_features):
return nn.BatchNorm2d(num_features)
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'):
downsampler = None
if stride != 1 and downsample_mode != 'stride':
if downsample_mode == 'avg':
downsampler = nn.AvgPool2d(stride, stride)
elif downsample_mode == 'max':
downsampler = nn.MaxPool2d(stride, stride)
elif downsample_mode in ['lanczos2', 'lanczos3']:
downsampler = Downsampler(n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True)
else:
assert False
stride = 1
padder = None
to_pad = int((kernel_size - 1) / 2)
if pad == 'reflection':
padder = nn.ReflectionPad2d(to_pad)
to_pad = 0
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
layers = filter(lambda x: x is not None, [padder, convolver, downsampler])
return nn.Sequential(*layers)
class Downsampler(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes, factor, phase=0, kernel_width=None, support=None, sigma=None, preserve_size=False):
super(Downsampler, self).__init__()
assert phase in [0, 0.5], 'phase should be 0 or 0.5'
support = 2
kernel_width = 4 * factor + 1
kernel_type_ = 'lanczos'
preserve_size= True
# note that `kernel width` will be different to actual size for phase = 1/2
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width, support=support, sigma=sigma)
downsampler = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[i, i] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) / 2.)
else:
pad = int((self.kernel.shape[0] - factor) / 2.)
self.padding = nn.ReplicationPad2d(pad)
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x= input
self.x = x
return self.downsampler_(x)
def get_kernel(factor, kernel_type, phase, kernel_width, support=None, sigma=None):
assert kernel_type in ['lanczos', 'gauss', 'box']
# factor = float(factor)
if phase == 0.5 and kernel_type != 'box':
kernel = np.zeros([kernel_width - 1, kernel_width - 1])
else:
kernel = np.zeros([kernel_width, kernel_width])
if kernel_type == 'box':
assert phase == 0.5, 'Box filter is always half-phased'
kernel[:] = 1./(kernel_width * kernel_width)
elif kernel_type == 'gauss':
assert sigma, 'sigma is not specified'
assert phase != 0.5, 'phase 1/2 for gauss not implemented'
center = (kernel_width + 1.)/2.
print(center, kernel_width)
sigma_sq = sigma * sigma
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
di = (i - center)/2.
dj = (j - center)/2.
kernel[i - 1][j - 1] = np.exp(-(di * di + dj * dj)/(2 * sigma_sq))
kernel[i - 1][j - 1] = kernel[i - 1][j - 1]/(2. * np.pi * sigma_sq)
elif kernel_type == 'lanczos':
assert support, 'support is not specified'
center = (kernel_width + 1) / 2.
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
if phase == 0.5:
di = abs(i + 0.5 - center) / factor
dj = abs(j + 0.5 - center) / factor
else:
di = abs(i - center) / factor
dj = abs(j - center) / factor
pi_sq = np.pi * np.pi
val = 1
if di != 0:
val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)
val = val / (np.pi * np.pi * di * di)
if dj != 0:
val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)
val = val / (np.pi * np.pi * dj * dj)
kernel[i - 1][j - 1] = val
else:
assert False, 'wrong method name'
kernel /= kernel.sum()
return kernel
def fill_noise(x, noise_type):
"""Fills tensor `x` with noise of type `noise_type`."""
if noise_type == 'u':
x.uniform_()
elif noise_type == 'n':
x.normal_()
else:
assert False
def get_noise(input_depth, method, spatial_size, noise_type='u', var=1./10):
"""Returns a pytorch.Tensor of size (1 x `input_depth` x `spatial_size[0]` x `spatial_size[1]`)
initialized in a specific way.
Args:
input_depth: number of channels in the tensor
method: `noise` for fillting tensor with noise; `meshgrid` for np.meshgrid
spatial_size: spatial size of the tensor to initialize
noise_type: 'u' for uniform; 'n' for normal
var: a factor, a noise will be multiplicated by. Basically it is standard deviation scaler.
"""
if isinstance(spatial_size, int):
spatial_size = (spatial_size, spatial_size)
if method == 'noise':
shape = [1, input_depth, spatial_size[0], spatial_size[1]]
net_input = torch.zeros(shape)
fill_noise(net_input, noise_type)
net_input *= var
elif method == 'meshgrid':
assert input_depth == 2
X, Y = np.meshgrid(np.arange(0, spatial_size[1])/float(spatial_size[1]-1), np.arange(0, spatial_size[0])/float(spatial_size[0]-1))
meshgrid = np.concatenate([X[None,:], Y[None,:]])
net_input= np_to_torch(meshgrid)
else:
assert False
return net_input
""" Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class Up_my(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1):
x1 = self.up(x1)
# input is CHW
# diffY = x2.size()[2] - x1.size()[2]
# diffX = x2.size()[3] - x1.size()[3]
# x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
# diffY // 2, diffY - diffY // 2])
# x = torch.cat([x2, x1], dim=1)
return self.conv(x1)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
```
#### File: MIP/solver/basesolver.py
```python
import os, torch, time
from utils.utils import draw_curve_and_save, save_config
from data.dataset import data
from data.data import get_data
from torch.utils.data import DataLoader
class BaseSolver:
def __init__(self, cfg):
self.cfg = cfg
self.nEpochs = cfg['nEpochs']
self.checkpoint_dir = cfg['checkpoint']
self.epoch = 1
self.timestamp = int(time.time())
if cfg['gpu_mode']:
self.num_workers = cfg['threads']
else:
self.num_workers = 0
self.records = {'Epoch': [], 'PSNR': [], 'SSIM': [], 'Loss': []}
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
def load_checkpoint(self, model_path):
if os.path.exists(model_path):
ckpt = torch.load(model_path)
self.epoch = ckpt['epoch']
self.records = ckpt['records']
else:
raise FileNotFoundError
def save_checkpoint(self):
self.ckp = {
'epoch': self.epoch,
'records': self.records,
}
def train(self):
raise NotImplementedError
def eval(self):
raise NotImplementedError
def run(self):
while self.epoch <= self.nEpochs:
self.train()
self.eval()
self.save_checkpoint()
self.save_records()
self.epoch += 1
#self.logger.log('Training done.')
```
#### File: MIP/solver/solver.py
```python
import os, importlib, torch, shutil, cv2
from solver.basesolver import BaseSolver
from utils.utils import maek_optimizer, make_loss, calculate_psnr, calculate_ssim, save_config, save_net_config
import torch.backends.cudnn as cudnn
from tqdm import tqdm
import numpy as np
from importlib import import_module
from torch.autograd import Variable
from data.data import DatasetFromHdf5
from torch.utils.data import DataLoader
import torch.nn as nn
from tensorboardX import SummaryWriter
from utils.config import save_yml
from model.base_net import CycleLoss
from model.utils import *
from PIL import Image
from pylab import *
from data.data import get_data
class Solver(BaseSolver):
def __init__(self, cfg, name):
super(Solver, self).__init__(cfg)
self.init_epoch = self.cfg['schedule']
net_name = self.cfg['algorithm'].lower()
lib = importlib.import_module('model.' + net_name)
net = lib.Net
self.model = net(
num_channels=self.cfg['data']['n_colors'],
base_filter=64,
scale_factor=self.cfg['data']['upsacle'],
args = self.cfg
)
self.train_dataset = get_data(self.cfg, str(self.cfg['train_dataset'])+'/'+str(name)+'.png', str(self.cfg['train_dataset'])+'/'+str(name)+'.png', self.cfg['data']['upsacle'])
self.train_loader = DataLoader(self.train_dataset, self.cfg['data']['batch_size'], shuffle=False,
num_workers=self.num_workers)
for iteration, batch in enumerate(self.train_loader, 1):
lr, hr, bic, hr_ref, bic_ref, file_name = Variable(batch[0]), Variable(batch[1]), Variable(batch[2]), Variable(batch[4]), Variable(batch[5]), (batch[6])
self.hr_ref = hr_ref
self.lr = lr
self.file_name = file_name
self.noise_init = get_noise(32, 'noise', (48*4, 48*4))
self.noise = self.noise_init.detach().clone()
self.optimizer = maek_optimizer(self.cfg['schedule']['optimizer'], cfg, self.model.parameters())
self.loss = CycleLoss(scale=1/4, loss_type = 'MSE')
self.log_name = self.cfg['algorithm'] + '_' + str(self.cfg['data']['upsacle']) + '_' + str(self.timestamp)
# save log
self.writer = SummaryWriter('log/' + str(self.log_name))
save_net_config(self.log_name, self.model)
save_yml(cfg, os.path.join('log/' + str(self.log_name), 'config.yml'))
def train(self):
epoch_loss = 0
if self.cuda:
self.noise = self.noise_init.cuda(self.gpu_ids[0]) + (self.noise.normal_() * 0.03).cuda(self.gpu_ids[0])
self.optimizer.zero_grad()
self.model.train()
self.sr, out = self.model(self.noise, self.hr_ref)
self.noise = out.detach()
loss, _ = self.loss(self.sr, self.lr)
epoch_loss = epoch_loss + loss.data
loss.backward()
self.optimizer.step()
self.records['Loss'].append(epoch_loss / len(self.train_loader))
self.writer.add_scalar('loss',self.records['Loss'][-1], self.epoch)
print(str(self.epoch) + '/'+str(self.nEpochs), self.file_name, self.records['Loss'][-1])
def save_img(self, img, img_name):
save_img = img.squeeze().clamp(0, 1).numpy().transpose(1,2,0)
# save img
save_dir=os.path.join('results/',self.cfg['test']['type'])
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_fn = save_dir +'/'+ img_namezhengru
cv2.imwrite(save_fn, cv2.cvtColor(save_img*255, cv2.COLOR_BGR2RGB), [cv2.IMWRITE_PNG_COMPRESSION, 0])
def check_gpu(self):
self.cuda = self.cfg['gpu_mode']
torch.manual_seed(self.cfg['seed'])
if self.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
if self.cuda:
torch.cuda.manual_seed(self.cfg['seed'])
cudnn.benchmark = True
gups_list = self.cfg['gpus']
self.gpu_ids = []
for str_id in gups_list:
gid = int(str_id)
if gid >=0:
self.gpu_ids.append(gid)
torch.cuda.set_device(self.gpu_ids[0])
self.loss = self.loss.cuda(self.gpu_ids[0])
self.model = self.model.cuda(self.gpu_ids[0])
self.model = torch.nn.DataParallel(self.model, device_ids=self.gpu_ids)
self.hr_ref = self.hr_ref.cuda(self.gpu_ids[0])
self.lr = self.lr.cuda(self.gpu_ids[0])
def check_pretrained(self):
checkpoint = os.path.join(self.cfg['pretrain']['pre_folder'], self.cfg['pretrain']['pre_sr'])
if os.path.exists(checkpoint):
self.model.load_state_dict(torch.load(checkpoint, map_location=lambda storage, loc: storage)['net'])
self.epoch = torch.load(checkpoint, map_location=lambda storage, loc: storage)['epoch']
if self.epoch > self.nEpochs:
raise Exception("Pretrain epoch must less than the max epoch!")
else:
raise Exception("Pretrain path error!")
def save_checkpoint(self):
super(Solver, self).save_checkpoint()
if self.records['Loss'] != [] and self.records['Loss'][-1] == np.array(self.records['Loss']).min():
self.save_img(self.sr[0].cpu().data, self.file_name[0])
def run(self):
self.check_gpu()
while self.epoch <= self.nEpochs:
self.train()
self.epoch += 1
self.save_img(self.sr[0].cpu().data, self.file_name[0])
# save_config(self.log_name, 'Training done.')
``` |
{
"source": "jiaming-wang/N_SR",
"score": 2
} |
#### File: N_SR/data/data.py
```python
from os.path import join
from torchvision.transforms import Compose, ToTensor
from .dataset import Data, Data_test, Data_eval
from torchvision import transforms
import torch, h5py, numpy
import torch.utils.data as data
def transform():
return Compose([
ToTensor(),
])
def get_data(cfg, data_dir, upscale_factor):
data_dir = join(cfg['data_dir'], data_dir)
cfg = cfg
return Data(data_dir, upscale_factor, cfg, transform=transform())
class DatasetFromHdf5(data.Dataset):
def __init__(self, file_path):
super(DatasetFromHdf5, self).__init__()
hf = h5py.File(file_path)
self.data = numpy.array(hf.get('data'))
self.target = numpy.array(hf.get('label'))
self.data = numpy.transpose(self.data, (0, 3, 1, 2))
self.target = numpy.transpose(self.target, (0, 3, 1, 2))
def __getitem__(self, index):
return torch.from_numpy(self.data[index,:,:,:]).float(), torch.from_numpy(self.target[index,:,:,:]).float()
def __len__(self):
return self.data.shape[0]
def get_test_data(cfg, data_dir, upscale_factor):
data_dir = join(cfg['test']['data_dir'], data_dir)
return Data_test(data_dir, upscale_factor, cfg, transform=transform())
def get_eval_data(cfg, data_dir, upscale_factor):
data_dir = join(cfg['test']['data_dir'], data_dir)
return Data_eval(data_dir, upscale_factor, cfg, transform=transform())
```
#### File: N_SR/model/edsr.py
```python
import os
import torch
import torch.nn as nn
import torch.optim as optim
from model.base_net import *
# from torchvision.transforms import *
from torch.autograd import Variable
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.args = args
num_channels = self.args['data']['n_colors']
scale_factor = self.args['data']['upsacle']
base_filter = 256
n_resblocks = 32
# self.sub_mean = MeanShift(args['data']['rgb_range'])
# self.add_mean = MeanShift(args['data']['rgb_range'], sign=1)
self.head = ConvBlock(num_channels, base_filter, 3, 1, 1, activation='relu', norm=None)
body = [
ResnetBlock(base_filter, 3, 1, 1, 0.1, activation='relu', norm=None) for _ in range(n_resblocks)
]
body.append(ConvBlock(base_filter, base_filter, 3, 1, 1, activation='relu', norm=None))
self.up = Upsampler(scale_factor, base_filter, activation=None)
self.output_conv = ConvBlock(base_filter, num_channels, 3, 1, 1, activation='relu', norm=None)
self.body = nn.Sequential(*body)
# for m in self.modules():
# classname = m.__class__.__name__
# if classname.find('Conv2d') != -1:
# # torch.nn.init.kaiming_normal_(m.weight)
# torch.nn.init.xavier_uniform_(m.weight, gain=1)
# if m.bias is not None:
# m.bias.data.zero_()
# elif classname.find('ConvTranspose2d') != -1:
# # torch.nn.init.kaiming_normal_(m.weight)
# torch.nn.init.xavier_uniform_(m.weight, gain=1)
# if m.bias is not None:
# m.bias.data.zero_()
def forward(self, x):
#x = self.sub_mean(x)
x = self.head(x)
res = x
x = self.body(x)
x = res + x
x = self.up(x)
x = self.output_conv(x)
#x = self.add_mean(x)
return x
if __name__ == "__main__":
input = Variable(torch.FloatTensor(1, 3, 40, 40))
model = Net(num_channels= 3,base_filter=1,scale_factor= 1,args= 1)
out = model(input)
print(out.shape)
```
#### File: N_SR/solver/testsolver.py
```python
from solver.basesolver import BaseSolver
import os, torch, time, cv2, importlib
import torch.backends.cudnn as cudnn
from data.data import *
from torch.utils.data import DataLoader
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
class Testsolver(BaseSolver):
def __init__(self, cfg):
super(Testsolver, self).__init__(cfg)
net_name = self.cfg['algorithm'].lower()
lib = importlib.import_module('model.' + net_name)
net = lib.Net
self.model = net(
args = self.cfg
)
self.fmap_block = list()
self.input_block = list()
## define hook
def forward_hook(self, module, data_input, data_output):
self.fmap_block.append(data_output)
self.input_block.append(data_input)
def check(self):
self.cuda = self.cfg['gpu_mode']
torch.manual_seed(self.cfg['seed'])
if self.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
if self.cuda:
torch.cuda.manual_seed(self.cfg['seed'])
cudnn.benchmark = True
gups_list = self.cfg['gpus']
self.gpu_ids = []
for str_id in gups_list:
gid = int(str_id)
if gid >=0:
self.gpu_ids.append(gid)
torch.cuda.set_device(self.gpu_ids[0])
self.model_path = os.path.join(self.cfg['checkpoint'], self.cfg['test']['model'])
self.model = self.model.cuda(self.gpu_ids[0])
self.model = torch.nn.DataParallel(self.model, device_ids=self.gpu_ids)
self.model.load_state_dict(torch.load(self.model_path, map_location=lambda storage, loc: storage)['net'])
def test(self):
self.model.eval()
avg_time= []
for batch in self.data_loader:
input, target, bicubic, name = Variable(batch[0]), Variable(batch[1]), Variable(batch[2]), batch[3]
if self.cuda:
input = input.cuda(self.gpu_ids[0])
target = target.cuda(self.gpu_ids[0])
bicubic = bicubic.cuda(self.gpu_ids[0])
if self.cfg['algorithm'] == 'VDSR' or self.cfg['algorithm'] == 'SRCNN':
input = bicubic
## hook
# if self.cuda:
# hadle_hook = self.model.module.res_b1.register_forward_hook(self.forward_hook)
# else:
# hadle_hook = self.model.res_b1.register_forward_hook(self.forward_hook)
t0 = time.time()
with torch.no_grad():
prediction = self.model(input)
t1 = time.time()
if self.cfg['data']['normalize'] :
target = (target+1) /2
prediction = (prediction+1) /2
bicubic = (bicubic+1) /2
## remove hook, save feature maps
# hadle_hook.remove()
# self.fmap_block = self.fmap_block[0].squeeze().detach().cpu()
# self.fmap_block = (self.fmap_block*255).numpy().astype(np.uint8)
# for i in range(0, self.fmap_block[0].shape[1]-1):
# plt.imsave('./1/{}.png'.format(str(i)), self.fmap_block[i,:,:], cmap = plt.cm.jet)
# self.fmap_block = list()
# self.input_block = list()
print("===> Processing: %s || Timer: %.4f sec." % (name[0], (t1 - t0)))
avg_time.append(t1 - t0)
self.save_img(bicubic.cpu().data, name[0][0:-4]+'_bic.png')
self.save_img(target.cpu().data, name[0][0:-4]+'_gt.png')
self.save_img(prediction.cpu().data, name[0][0:-4]+'.png')
print("===> AVG Timer: %.4f sec." % (np.mean(avg_time)))
def eval(self):
self.model.eval()
avg_time= []
for batch in self.data_loader:
input, bicubic, name = Variable(batch[0]), Variable(batch[1]), batch[2]
if self.cuda:
input = input.cuda(self.gpu_ids[0])
bicubic = bicubic.cuda(self.gpu_ids[0])
t0 = time.time()
with torch.no_grad():
prediction = self.model(input)
t1 = time.time()
print("===> Processing: %s || Timer: %.4f sec." % (name[0], (t1 - t0)))
avg_time.append(t1 - t0)
self.save_img(bicubic.cpu().data, name[0][0:-4]+'_Bic.png')
self.save_img(prediction.cpu().data, name[0][0:-4]+'.png')
print("===> AVG Timer: %.4f sec." % (np.mean(avg_time)))
def save_img(self, img, img_name):
save_img = img.squeeze().clamp(0, 1).numpy().transpose(1,2,0)
# save img
save_dir=os.path.join('results/',self.cfg['test']['type'])
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_fn = save_dir +'/'+ img_name
cv2.imwrite(save_fn, cv2.cvtColor(save_img*255, cv2.COLOR_BGR2RGB), [cv2.IMWRITE_PNG_COMPRESSION, 0])
def run(self):
self.check()
if self.cfg['test']['type'] == 'test':
self.dataset = get_test_data(self.cfg, self.cfg['test']['test_dataset'], self.cfg['data']['upsacle'])
self.data_loader = DataLoader(self.dataset, shuffle=False, batch_size=1,
num_workers=self.cfg['threads'])
self.test()
elif self.cfg['test']['type'] == 'eval':
self.dataset = get_eval_data(self.cfg, self.cfg['test']['test_dataset'], self.cfg['data']['upsacle'])
self.data_loader = DataLoader(self.dataset, shuffle=False, batch_size=1,
num_workers=self.cfg['threads'])
self.eval()
else:
raise ValueError('Mode error!')
``` |
{
"source": "jiaming-wang/STP",
"score": 2
} |
#### File: STP/pytorch_code/two-stream.py
```python
import numpy as np
import os
import scipy.io as scio
import csv
import pandas as pd
def main():
path = './txt'
path1 = './txt-rgb'
all_num = 0
true_num = 0
for root, dirs, files in os.walk(path1):
for filespath in files:
txt_name = filespath
rgb_txt = path1 + '/' + txt_name
flow_txt = path + '/' + txt_name
rgb_list = []
flow_list = []
all_list = []
csv_rgb = pd.read_csv(rgb_txt)
csv_flow = pd.read_csv(flow_txt)
result = np.array(csv_rgb)[1:,:] + np.array(csv_flow)
probs = np.argmax(result/2, axis=1)
label = txt_name.split('_')[1]
if label == 'Inshore':
num_label = 0
elif label == 'Offshore':
num_label = 1
elif label == 'Neg':
num_label = 2
elif label == 'Traffic':
num_label = 3
probs = probs - num_label
all_num = all_num + len(probs)
true_num = true_num + np.sum(probs == 0)
print(true_num/all_num)
def load_mat():
path = './mat'
for root, dirs, files in os.walk(path):
for filespath in files:
all_list = scio.loadmat(path + '/' + filespath)['data']
rgb_list1 = all_list[0][0]
flow_list = all_list[0][1]
rgb_list = rgb_list1[1:]
for i in range(0, len(rgb_list)):
result = np.add(np.array(rgb_list[i]) + np.array(flow_list[i]))
if __name__ == '__main__':
main()
# load_mat()
``` |
{
"source": "jiamin-he/CSE-258",
"score": 3
} |
#### File: ref/code/pca.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import csv as csv
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.cross_validation import train_test_split
from math import *
from datetime import datetime
import sklearn.cluster as cluster
from sklearn.decomposition import PCA
def split_list(alist, wanted_parts=1):
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def rmsle(predicted, actual):
error = 0
for i in range(len(actual)):
error += pow(log(actual[i]+1)-log(predicted[i]+1), 2)
return sqrt(error/len(actual))
def remove_negative(items):
newlist = []
for item in items:
if item>0:
newlist.append(item)
else:
newlist.append(0)
return newlist
def getTimeData(df):
datetime_values = df['datetime'].values
hour_values = []
for datetime_value in datetime_values:
datetime_object = datetime.strptime(datetime_value, '%Y-%m-%d %H:%M:%S')
hour_values.append(datetime_object.hour)
df['hour'] = hour_values
return df
def getMonthData(df):
datetime_values = df['datetime'].values
month_values = []
for datetime_value in datetime_values:
datetime_object = datetime.strptime(datetime_value, '%Y-%m-%d %H:%M:%S')
month_values.append(datetime_object.month)
df['month'] = month_values
return df
def transform_data(df):
epoch = datetime.utcfromtimestamp(0)
datetime_values = df['datetime'].values
time_values = []
date_values = []
month_values = []
year_values =[]
weekday_values = []
isSunday_values = []
time_since_epoch_values = []
hour_cluster_values = []
month_cluster_values = []
for datetime_value in datetime_values:
datetime_object = datetime.strptime(datetime_value, '%Y-%m-%d %H:%M:%S')
time_values.append(datetime_object.hour)
date_values.append(datetime_object.day)
month_values.append(datetime_object.month)
year_values.append(datetime_object.year-2011)
weekday_values.append(datetime_object.weekday())
isSunday_values.append(1 if datetime_object.weekday() == 6 else 0)
time_since_epoch_values.append(int((datetime_object-epoch).total_seconds()/3600))
hour_cluster_values.append(hour_clusters[datetime_object.hour])
month_cluster_values.append(month_clusters[datetime_object.month-1])
df['time'] = time_values
df['date'] = date_values
df['month'] = month_values
df['year'] = year_values
df['weekday'] = weekday_values
df['isSunday'] = isSunday_values
df['time_since_epoch'] = time_since_epoch_values
df['hourCluster'] = hour_cluster_values
df['monthCluster'] = month_cluster_values
return df
if __name__ == '__main__':
df = pd.read_csv('../data/train.csv')
test_df = pd.read_csv('../data/test.csv')
hour_df = getTimeData(df)
hour_cluster_data = hour_df.groupby(['hour']).agg(lambda x: x.mean())[['count']]
hour_clust = cluster.KMeans(n_clusters=6)
hour_clusters = np.array(hour_clust.fit_predict(split_list(hour_cluster_data.iloc[:,0].values,24)))
month_df = getMonthData(df)
month_cluster_data = month_df.groupby(['month']).agg(lambda x: x.mean())[['count']]
month_clust = cluster.KMeans(n_clusters=4)
month_clusters = np.array(month_clust.fit_predict(split_list(month_cluster_data.iloc[:,0].values,12)))
df = transform_data(df)
test_df = transform_data(test_df)
df['count'] = [log(1+x) for x in df['count']]
df['casual'] = [log(1+x) for x in df['casual']]
df['registered'] = [log(1+x) for x in df['registered']]
features = ['season','holiday','workingday','weather','temp','atemp','humidity','windspeed','time','weekday','year','monthCluster', 'hourCluster', 'isSunday', 'month', 'date']
X_train_date = df[['date']].values
X_train_data = df[features].values
y_train_data = df[['count', 'casual', 'registered']].values
test_data = test_df[features].values
pca = PCA(n_components=5)
pca.fit(X_train_data)
PCA(copy=True, n_components=2, whiten=False)
print(pca.explained_variance_ratio_)
``` |
{
"source": "jiamo/aiobloom",
"score": 2
} |
#### File: aiobloom/aiobloom/aiobloom.py
```python
import aioredis
import math
import hashlib
from struct import unpack, pack
# modify from https://github.com/jaybaird/python-bloomfilter/blob/master/pybloom/pybloom.py
import sys
try:
import StringIO
import cStringIO
except ImportError:
from io import BytesIO
running_python_3 = sys.version_info[0] == 3
def range_fn(*args):
return range(*args)
def is_string_io(instance):
return isinstance(instance, BytesIO)
def make_hashfuncs(num_slices, num_bits):
if num_bits >= (1 << 31):
fmt_code, chunk_size = 'Q', 8
elif num_bits >= (1 << 15):
fmt_code, chunk_size = 'I', 4
else:
fmt_code, chunk_size = 'H', 2
total_hash_bits = 8 * num_slices * chunk_size
if total_hash_bits > 384:
hashfn = hashlib.sha512
elif total_hash_bits > 256:
hashfn = hashlib.sha384
elif total_hash_bits > 160:
hashfn = hashlib.sha256
elif total_hash_bits > 128:
hashfn = hashlib.sha1
else:
hashfn = hashlib.md5
fmt = fmt_code * (hashfn().digest_size // chunk_size)
num_salts, extra = divmod(num_slices, len(fmt))
if extra:
num_salts += 1
salts = tuple(hashfn(hashfn(pack('I', i)).digest()) for i in range_fn(num_salts))
def _make_hashfuncs(key):
if isinstance(key, str):
key = key.encode('utf-8')
else:
key = str(key).encode('utf-8')
i = 0
for salt in salts:
h = salt.copy()
h.update(key)
for uint in unpack(fmt, h.digest()):
yield uint % num_bits
i += 1
if i >= num_slices:
return
return _make_hashfuncs
REDIS_MAX = 1 << 32
class BloomFilter(object):
def __init__(self, capacity, error_rate=0.001, bloom_key='bloom_key',
redis_pool=None):
if not (0 < error_rate < 1):
raise ValueError("Error_Rate must be between 0 and 1.")
if not capacity > 0:
raise ValueError("Capacity must be > 0")
# given M = num_bits, k = num_slices, P = error_rate, n = capacity
# k = log2(1/P)
# solving for m = bits_per_slice
# n ~= M * ((ln(2) ** 2) / abs(ln(P)))
# n ~= (k * m) * ((ln(2) ** 2) / abs(ln(P)))
# m ~= n * abs(ln(P)) / (k * (ln(2) ** 2))
num_slices = int(math.ceil(math.log(1.0 / error_rate, 2)))
bits_per_slice = int(math.ceil(
(capacity * abs(math.log(error_rate))) /
(num_slices * (math.log(2) ** 2))))
self._setup(error_rate, num_slices, bits_per_slice, capacity, 0)
self.bloom_key = bloom_key
self.pool = redis_pool
def _setup(self, error_rate, num_slices, bits_per_slice, capacity, count):
self.error_rate = error_rate
self.num_slices = num_slices
self.bits_per_slice = bits_per_slice
self.capacity = capacity
self.num_bits = num_slices * bits_per_slice
if self.num_bits >= REDIS_MAX:
raise Exception(f"Can't use number of bits bigger than 2^32,"
f"you may need add error_rate or reduce capacity")
self.count = count
self.make_hashes = make_hashfuncs(self.num_slices, self.bits_per_slice)
async def connect(self, redis_url='127.0.0.1:6379'):
if self.pool:
return
host, _, port = redis_url.partition(':')
if not port:
port = 6379
try:
port = int(port)
except ValueError:
raise ValueError(port)
self.pool = await aioredis.create_redis_pool(
(host, port), minsize=10, maxsize=60)
print("redis_pool", self.pool, id(self.pool))
async def exist(self, key):
hashes = self.make_hashes(key)
hashes = list(hashes)
offset = 0
with await self.pool as redis:
bloom_filter = await redis.get(self.bloom_key)
if not bloom_filter:
return False # mean we don't add any key , just think no exists
include = True
for hash_position in hashes:
index = offset + hash_position
hash_byte_index = index // 8
if hash_byte_index >= len(bloom_filter):
# the hash key is big then total it should not exist:
return False
bit_is_set = bloom_filter[hash_byte_index] >> (7 - index % 8) & 1
if not bit_is_set:
include = False
break
offset += self.bits_per_slice
return include
async def add(self, key, skip_check=False):
hashes = self.make_hashes(key)
if self.count > self.capacity:
raise IndexError("BloomFilter is at capacity")
offset = 0
with await self.pool as redis:
pipe = redis.pipeline()
for hash_position in hashes:
index = offset + hash_position
pipe.setbit(self.bloom_key, index, 1)
offset += self.bits_per_slice
await pipe.execute()
def __getstate__(self):
d = self.__dict__.copy()
del d['make_hashes']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.make_hashes = make_hashfuncs(self.num_slices, self.bits_per_slice)
``` |
{
"source": "JiamoLiu/DEAL_Twitter",
"score": 3
} |
#### File: DEAL_Twitter/baseline_code/create_dataset.py
```python
import json
import itertools
from typing import ValuesView
import pandas as pd
import sys
import numpy
from scipy import sparse
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
import scipy.sparse
import ELMO
import math
from_link_file = "bidirectional_test.txt"
from_data_file = "test_node_data.json"
adj_file_name = "A_sp.npz"
train_adj_file_name = "ind_train_A.npz"
train_attr_file_name = "ind_train_X.npz"
attr_file_name = "X_sp.npz"
nodes_file_name = "nodes_keep.npy"
ones_zeroes_file = "pv0.10_pt0.00_pn0.10_arrays.npz"
number_of_samples = 170
train_range = 0.72
val_range = 0.08
def read_json_as_dict(filename, items_needed):
with open(filename) as handle:
jsondict = json.loads(handle.read())
return dict(itertools.islice(jsondict.items(),items_needed))
def read_txt_pandas(filename,deli = " ",first_n = 100):
f = pd.read_csv(filename, delimiter = deli,names= ["A","B"]).head(first_n)
return f
def get_unique_node_ids(pandas_in):
column_values = pandas_in[["A","B"]].values.ravel()
unique_values = pd.unique(column_values)
#print(unique_values)
return unique_values
def get_number_of_unique_nodes(pandas_in):
column_values = pandas_in[["A","B"]].values.ravel()
unique_values = pd.unique(column_values)
return unique_values.shape[0]
def index_id_as_dict(input_array):
res = {}
for i in range(len(input_array)):
res[input_array[i]] = i
#print(res)
return res
def get_adj_as_sparse(node_index, pandas_in):
input_rows = pandas_in.shape[0]
unique_nodes_number = get_number_of_unique_nodes(pandas_in)
row_1s = numpy.zeros(input_rows*2)
col_1s = numpy.zeros(input_rows*2)
for index, row in pandas_in.iterrows():
row_1s[index] = node_index[row["A"]]
col_1s[index] = node_index[row["B"]]
row_1s[index +input_rows] = node_index[row["B"]]
col_1s[index + input_rows] = node_index[row["A"]]
values = numpy.ones(2*input_rows)
return coo_matrix((values, (row_1s, col_1s)), shape=(unique_nodes_number,unique_nodes_number)).tocsr()
def get_node_attr_as_array(sparse_adj, node_index, attr_dict):
reverse_node_index = {value:key for key, value in node_index.items()}
res = []
for i in range(sparse_adj.shape[0]):
res.append(attr_dict[str(reverse_node_index[i])])
return res
def get_elmo_embedding_as_sparse(text_aray):
res = []
counter = 0
for node_data in text_aray:
res.append(ELMO.embed_sentence(node_data).detach().numpy())
counter = counter + 1
return csr_matrix(res)
def save_node_index(node_dict):
value = list(node_dict.values())
with open(nodes_file_name, 'wb') as f:
numpy.save(f, numpy.array(value))
def get_link_ones(pandas_in, node_index):
res = []
for index, row in pandas_in.iterrows():
res.append([node_index[row["A"]],node_index[row["B"]]])
return numpy.array(res)
def get_linked_nodes_and_links_from_sparse(sparse_adj):
res = []
connected = []
disconnected = []
for i in range(sparse_adj.shape[0]):
for j in range(sparse_adj.shape[1]):
if i == j:
continue
if (sparse_adj[i,j] == 1):
if i not in res:
res.append(i)
if j not in res:
res.append(j)
if ([i,j] not in connected and [j,i] not in connected):
connected.append([i,j])
if (sparse_adj[i,j] == 0):
if ([i,j] not in disconnected and [j,i] not in disconnected):
disconnected.append([i,j])
print(sparse_adj.shape)
return numpy.array(sorted(res)), numpy.array(connected), numpy.array(disconnected)
def generate_train_val_test_samples(sparse_adj,node_index,node_data):
number_of_nodes = sparse_adj.shape[0]
train_stop = math.floor(number_of_nodes * (train_range + val_range))
val_start = math.floor(number_of_nodes * (train_range))
#print(train_stop)
#print(val_start)
train_adj_matrix = sparse_adj[0:train_stop,0:train_stop]
train_linked_nodes,train_ones,train_zeroes = get_linked_nodes_and_links_from_sparse(train_adj_matrix)
val_adj_matrix = sparse_adj[val_start: train_stop, 0:train_stop]
linked_nodes,val_ones,val_zeroes = get_linked_nodes_and_links_from_sparse(val_adj_matrix)
#print(val_adj_matrix)
test_adj_matrix = sparse_adj[train_stop : sparse_adj.shape[0],:]
linked_nodes,test_ones,test_zeroes = get_linked_nodes_and_links_from_sparse(test_adj_matrix)
attr_arr = get_node_attr_as_array(train_adj_matrix, node_index, node_data)
train_sentence_embed_matrix = get_elmo_embedding_as_sparse(attr_arr)
print(train_sentence_embed_matrix.shape)
numpy.savez(ones_zeroes_file, train_ones,val_ones,val_zeroes,test_ones,test_zeroes)
scipy.sparse.save_npz(train_adj_file_name,train_adj_matrix)
scipy.sparse.save_npz(train_attr_file_name,train_sentence_embed_matrix)
#print(train_pandas)
if __name__ == "__main__":
dict = read_json_as_dict(from_data_file, sys.maxsize)
links = read_txt_pandas(from_link_file,first_n= number_of_samples)
node_ids = get_unique_node_ids(links)
node_index = index_id_as_dict(node_ids)
adj_matrix = get_adj_as_sparse(node_index,links)
save_node_index(node_index)
generate_train_val_test_samples(adj_matrix, node_index, dict)
#get_training_adj_attr(links, dict, adj_matrix)
``` |
{
"source": "jiamo/pcc",
"score": 3
} |
#### File: pcc/ast/ast.py
```python
class ASTNode(object):
def dump(self, indent=0):
raise NotImplementedError
class ExprAST(ASTNode):
pass
class NumberExprAST(ExprAST):
def __init__(self, val):
self.val = val
def dump(self, indent=0):
return '{0}{1}[{2}]'.format(
' ' * indent, self.__class__.__name__, self.val)
class VariableExprAST(ExprAST):
def __init__(self, name):
self.name = name
def dump(self, indent=0):
return '{0}{1}[{2}]'.format(
' ' * indent, self.__class__.__name__, self.name)
class VarExprAST(ExprAST):
def __init__(self, vars, body):
# vars is a sequence of (name, init) pairs
self.vars = vars
self.body = body
def dump(self, indent=0):
prefix = ' ' * indent
s = '{0}{1}\n'.format(prefix, self.__class__.__name__)
for name, init in self.vars:
s += '{0} {1}'.format(prefix, name)
if init is None:
s += '\n'
else:
s += '=\n' + init.dump(indent+2) + '\n'
s += '{0} Body:\n'.format(prefix)
s += self.body.dump(indent + 2)
return s
class UnaryExprAST(ExprAST):
def __init__(self, op, operand):
self.op = op
self.operand = operand
def dump(self, indent=0):
s = '{0}{1}[{2}]\n'.format(
' ' * indent, self.__class__.__name__, self.op)
s += self.operand.dump(indent + 2)
return s
class BinaryExprAST(ExprAST):
def __init__(self, op, lhs, rhs):
self.op = op
self.lhs = lhs
self.rhs = rhs
def dump(self, indent=0):
s = '{0}{1}[{2}]\n'.format(
' ' * indent, self.__class__.__name__, self.op)
s += self.lhs.dump(indent + 2) + '\n'
s += self.rhs.dump(indent + 2)
return s
class IfExprAST(ExprAST):
def __init__(self, cond_expr, then_expr, else_expr):
self.cond_expr = cond_expr
self.then_expr = then_expr
self.else_expr = else_expr
def dump(self, indent=0):
prefix = ' ' * indent
s = '{0}{1}\n'.format(prefix, self.__class__.__name__)
s += '{0} Condition:\n{1}\n'.format(
prefix, self.cond_expr.dump(indent + 2))
s += '{0} Then:\n{1}\n'.format(
prefix, self.then_expr.dump(indent + 2))
s += '{0} Else:\n{1}'.format(
prefix, self.else_expr.dump(indent + 2))
return s
class ForExprAST(ExprAST):
def __init__(self, id_name, start_expr, end_expr, step_expr, body):
self.id_name = id_name
self.start_expr = start_expr
self.end_expr = end_expr
self.step_expr = step_expr
self.body = body
def dump(self, indent=0):
prefix = ' ' * indent
s = '{0}{1}\n'.format(prefix, self.__class__.__name__)
s += '{0} Start [{1}]:\n{2}\n'.format(
prefix, self.id_name, self.start_expr.dump(indent + 2))
s += '{0} End:\n{1}\n'.format(
prefix, self.end_expr.dump(indent + 2))
s += '{0} Step:\n{1}\n'.format(
prefix, self.step_expr.dump(indent + 2))
s += '{0} Body:\n{1}\n'.format(
prefix, self.body.dump(indent + 2))
return s
class CallExprAST(ExprAST):
def __init__(self, callee, args):
self.callee = callee
self.args = args
def dump(self, indent=0):
s = '{0}{1}[{2}]\n'.format(
' ' * indent, self.__class__.__name__, self.callee)
for arg in self.args:
s += arg.dump(indent + 2) + '\n'
return s[:-1] # snip out trailing '\n'
class PrototypeAST(ASTNode):
def __init__(self, name, argnames, isoperator=False, prec=0):
self.name = name
self.argnames = argnames
self.isoperator = isoperator
self.prec = prec
def is_unary_op(self):
return self.isoperator and len(self.argnames) == 1
def is_binary_op(self):
return self.isoperator and len(self.argnames) == 2
def get_op_name(self):
assert self.isoperator
return self.name[-1]
def dump(self, indent=0):
s = '{0}{1} {2}({3})'.format(
' ' * indent, self.__class__.__name__, self.name,
', '.join(self.argnames))
if self.isoperator:
s += '[operator with prec={0}]'.format(self.prec)
return s
class FunctionAST(ASTNode):
def __init__(self, proto, body):
self.proto = proto
self.body = body
_anonymous_function_counter = 0
@classmethod
def create_anonymous(klass, expr):
"""Create an anonymous function to hold an expression."""
klass._anonymous_function_counter += 1
return klass(
PrototypeAST('_anon{0}'.format(klass._anonymous_function_counter),
[]),
expr)
def is_anonymous(self):
return self.proto.name.startswith('_anon')
def dump(self, indent=0):
s = '{0}{1}[{2}]\n'.format(
' ' * indent, self.__class__.__name__, self.proto.dump())
s += self.body.dump(indent + 2) + '\n'
return s
```
#### File: pcc/codegen/c_codegen.py
```python
import llvmlite.ir as ir
from collections import ChainMap
from contextlib import contextmanager
from llvmlite.ir import IRBuilder
from ..ast import c_ast as c_ast
bool_t = ir.IntType(1)
int8_t = ir.IntType(8)
int32_t = ir.IntType(32)
int64_t = ir.IntType(64)
voidptr_t = int8_t.as_pointer()
int64ptr_t = int64_t.as_pointer()
true_bit = bool_t(1)
false_bit = bool_t(0)
true_byte = int8_t(1)
false_byte = int8_t(0)
cstring = voidptr_t
class CodegenError(Exception):
pass
def get_ir_type(type_str):
# only support int and double
if type_str == "int":
ir_type = int64_t
else:
ir_type = ir.DoubleType()
return ir_type
def get_ir_type_from_node(node):
if isinstance(node.type, c_ast.PtrDecl):
# only support one level ptr just for simple use
return_type_str = node.type.type.type.names[0]
if return_type_str == "int":
data_ir_type = ir.IntType(64)
else:
data_ir_type = ir.DoubleType()
ir_type = ir.PointerType(data_ir_type)
else:
return_type_str = node.type.type.names[0]
if return_type_str == "int":
ir_type = ir.IntType(64)
else:
ir_type = ir.DoubleType()
return ir_type
class LLVMCodeGenerator(object):
def __init__(self):
self.module = ir.Module()
#
self.builder = None
self.func_symtab = {}
self.func_tyinfo = {}
self.global_symtab = {}
self.global_tyinfo = {}
self.global_builder:IRBuilder = ir.IRBuilder()
self.in_builder = None
self.env = ChainMap()
self.nlabels = 0
self.function = None
self.in_global = True
fnty = ir.FunctionType(int32_t, [cstring], var_arg=True)
callee_func = ir.Function(self.module, fnty, name="printf")
fnty1 = ir.FunctionType(int64ptr_t, [int64_t], var_arg=True)
callee_func1 = ir.Function(self.module, fnty1, name="malloc")
self.define('printf', (fnty, callee_func))
self.define('malloc', (fnty1, callee_func1))
def define(self, name, val):
self.env[name] = val
def lookup(self, name):
return self.env[name]
def new_label(self, name):
self.nlabels += 1
return f'label_{name}_{self.nlabels}'
@contextmanager
def new_scope(self):
self.env = self.env.new_child()
yield
self.env = self.env.parents
@contextmanager
def new_function(self):
oldfunc = self.function
oldbuilder = self.builder
self.in_global = False
try:
yield
finally:
self.function = oldfunc
self.builder = oldbuilder
self.in_global = True
def generate_code(self, node):
normal = self.codegen(node)
# for else end have no instruction
if self.builder:
if not self.builder.block.is_terminated:
self.builder.ret(ir.Constant(ir.IntType(64), int(0)))
return normal
def create_entry_block_alloca(
self, name, type_str, size, array_list=None, point_level=0):
ir_type = None
if type_str == "int":
ir_type = ir.IntType(64)
elif type_str == "double":
ir_type = ir.DoubleType()
if array_list is not None:
reversed_list = reversed(array_list)
for dim in reversed_list:
ir_type = ir.ArrayType(ir_type, dim)
ir_type.dim_array = array_list
if point_level != 0:
for level in range(point_level):
ir_type = ir.PointerType(ir_type)
if not self.in_global:
ret = self.builder.alloca(ir_type, size=None, name=name)
self.define(name, (ir_type, ret))
else:
ret = ir.GlobalVariable(self.module, ir_type, name)
self.define(name, (ir_type, ret))
return ret, ir_type
def codegen(self, node):
method = 'codegen_' + node.__class__.__name__
return getattr(self, method)(node)
def codegen_FileAST(self, node):
for ext in node.ext:
self.codegen(ext)
def codegen_NumberExprAST(self, node):
return ir.values.Constant(ir.DoubleType(), float(node.val)), None
def codegen_Constant(self, node):
node.show()
if node.type == "int":
return ir.values.Constant(ir.IntType(64), int(node.value)), None
elif node.type == 'string':
b = bytearray(node.value[1:-1] + '\00', encoding='ascii')
n = len(b)
array = ir.ArrayType(ir.IntType(8), n)
tmp = ir.values.Constant(
array,
b)
return tmp , None
else:
return ir.values.Constant(ir.DoubleType(), float(node.value)), None
def codegen_Assignment(self, node):
node.show()
lv, lv_addr = self.codegen(node.lvalue)
rv, _ = self.codegen(node.rvalue)
result = None
dispatch_type_double = 1
dispatch_type_int = 0
dispatch_dict = {
("+=", dispatch_type_double): self.builder.fadd,
("+=", dispatch_type_int): self.builder.add,
("-=", dispatch_type_double): self.builder.fsub,
("-=", dispatch_type_int): self.builder.sub,
}
if isinstance(lv.type, ir.IntType) and isinstance(rv.type, ir.IntType):
dispatch_type = dispatch_type_int
else:
dispatch_type = dispatch_type_double
dispatch = (node.op, dispatch_type)
handle = dispatch_dict.get(dispatch)
if node.op == '=':
result = self.builder.store(rv, lv_addr)
else:
addresult = handle(lv, rv, 'addtmp')
result = self.builder.store(addresult, lv_addr)
return result, None
def codegen_VariableExprAST(self, node):
var_addr = self.func_symtab[node.name]
return self.builder.load(var_addr, node.name), None
def codegen_UnaryExprAST(self, node):
operand, _ = self.codegen(node.operand)
func = self.module.globals.get('unary{0}'.format(node.op))
return self.builder.call(func, [operand], 'unop'), None
def codegen_UnaryOp(self, node):
node.show()
result = None
result_ptr = None
# import pdb;pdb.set_trace()
# TODO at now just support int ++
if node.op == "p++":
_, lv_addr = self.lookup(node.expr.name)
lv = self.builder.load(lv_addr, node.expr.name)
rv = ir.Constant(ir.IntType(64), 1)
addresult = self.builder.add(lv, rv, 'addtmp')
result = self.builder.store(addresult, lv_addr), None
if node.op == "p--":
_, lv_addr = self.lookup(node.expr.name)
lv = self.builder.load(lv_addr, node.expr.name)
rv = ir.Constant(ir.IntType(64), 1)
addresult = self.builder.sub(lv, rv, 'addtmp')
result = self.builder.store(addresult, lv_addr)
if node.op == '*':
name_ir, name_ptr = self.codegen(node.expr)
# result_ptr = self.builder.load(name_ptr)
result_ptr = name_ir
result = self.builder.load(result_ptr)
if node.op == '&':
name_ir, name_ptr = self.codegen(node.expr)
result_ptr = name_ptr
result = result_ptr # got point from value is the result
return result, result_ptr
def codegen_BinaryOp(self, node):
lhs, _ = self.codegen(node.left)
rhs, _ = self.codegen(node.right)
# import pdb;pdb.set_trace()
dispatch_type_double = 1
dispatch_type_int = 0
dispatch_dict = {
("+", dispatch_type_double): self.builder.fadd,
("+", dispatch_type_int): self.builder.add,
("-", dispatch_type_double): self.builder.fsub,
("-", dispatch_type_int): self.builder.sub,
("*", dispatch_type_double): self.builder.fmul,
("*", dispatch_type_int): self.builder.mul,
}
if isinstance(lhs.type, ir.IntType) and isinstance(rhs.type,
ir.IntType):
dispatch_type = dispatch_type_int
else:
dispatch_type = dispatch_type_double
dispatch = (node.op, dispatch_type)
handle = dispatch_dict.get(dispatch)
# import pdb;pdb.set_trace()
if node.op in ['+', '-', '*']:
return handle(lhs, rhs, 'tmp'), None
elif node.op in [">", "<", ">=", "<=", "!=", "=="]:
if dispatch_type == dispatch_type_int:
cmp = self.builder.icmp_signed(node.op, lhs, rhs, 'cmptmp')
return self.builder.uitofp(cmp, ir.DoubleType(),
'booltmp'), None
else:
cmp = self.builder.fcmp_unordered(node.op, lhs, rhs, 'cmptmp')
return self.builder.uitofp(cmp, ir.DoubleType(),
'booltmp'), None
else:
func = self.module.globals.get('binary{0}'.format(node.op))
return self.builder.call(func, [lhs, rhs], 'binop'), None
def codegen_If(self, node):
node.show()
cond_val, _ = self.codegen(node.cond)
cmp = self.builder.fcmp_ordered(
'!=', cond_val, ir.Constant(ir.DoubleType(), 0.0))
then_bb = self.builder.function.append_basic_block('then')
else_bb = self.builder.function.append_basic_block('else')
merge_bb = self.builder.function.append_basic_block('ifend')
self.builder.cbranch(cmp, then_bb, else_bb)
with self.new_scope():
self.builder.position_at_end(then_bb)
then_val, _ = self.codegen(node.iftrue)
# while true {
# n = n + 1;
# if n == 5 {
# continue; # we cant't reach after continue
# }
if not then_bb.is_terminated:
self.builder.branch(merge_bb)
with self.new_scope():
self.builder.position_at_end(else_bb)
if node.iffalse:
elseval, _ = self.codegen(node.iffalse)
if not else_bb.is_terminated:
self.builder.branch(merge_bb)
# context.builder.branch(merge)
self.builder.position_at_end(merge_bb) # begin at end to generate code
# self.builder.block = merge_bb
return None, None
def codegen_NoneType(self, node):
return None, None
def codegen_For(self, node):
saved_block = self.builder.block
self.builder.position_at_end(
saved_block) # why the save_block at the end
start_val, _ = self.codegen(node.init)
# The builder is what? loop is a block which begin with loop
test_bb = self.builder.function.append_basic_block('test')
loop_bb = self.builder.function.append_basic_block('loop')
next_bb = self.builder.function.append_basic_block('next')
# append by name nor just add it
after_loop_label = self.new_label("afterloop")
after_bb = ir.Block(self.builder.function, after_loop_label)
# self.builder.function.append_basic_block('afterloop')
self.builder.branch(test_bb)
self.builder.position_at_end(test_bb)
endcond, _ = self.codegen(node.cond)
cmp = self.builder.fcmp_ordered(
'!=', endcond, ir.values.Constant(ir.DoubleType(), 0.0),
'loopcond')
self.builder.cbranch(cmp, loop_bb, after_bb)
with self.new_scope():
self.define('break', after_bb)
self.define('continue', next_bb)
self.builder.position_at_end(loop_bb)
body_val, _ = self.codegen(node.stmt) # if was ready codegen
self.builder.branch(next_bb)
self.builder.position_at_end(next_bb)
self.codegen(node.next)
self.builder.branch(test_bb)
# this append_basic_blook change the label
# after_bb = self.builder.function.append_basic_block(after_loop_label)
self.builder.function.basic_blocks.append(after_bb)
self.builder.position_at_end(after_bb)
return ir.values.Constant(ir.DoubleType(), 0.0), None
def codegen_While(self, node):
node.show()
saved_block = self.builder.block
id_name = node.__class__.__name__
self.builder.position_at_end(saved_block)
# The builder is what? loop is a block which begin with loop
test_bb = self.builder.function.append_basic_block('test') # just create some block need to be filled
loop_bb = self.builder.function.append_basic_block('loop')
after_bb = self.builder.function.append_basic_block('afterloop')
self.builder.branch(test_bb)
self.builder.position_at_start(test_bb)
endcond, _ = self.codegen(node.cond)
cmp = self.builder.fcmp_ordered(
'!=', endcond, ir.values.Constant(ir.DoubleType(), 0.0),
'loopcond')
self.builder.cbranch(cmp, loop_bb, after_bb)
with self.new_scope():
self.define('break', after_bb)
self.define('continue', test_bb)
self.builder.position_at_end(loop_bb)
body_val, _ = self.codegen(node.stmt)
# after eval body we need to goto test_bb
# New code will be inserted into after_bb
self.builder.branch(test_bb)
self.builder.position_at_end(after_bb)
# The 'for' expression always returns 0
return ir.values.Constant(ir.DoubleType(), 0.0)
def codegen_Break(self, node):
self.builder.branch(self.lookup('break'))
def codegen_Continue(self, node):
self.builder.branch(self.lookup('continue'))
def codegen_Cast(self, node):
node.show()
expr, ptr = self.codegen(node.expr)
dest_type_str = node.to_type.type.type.names[0]
match (type(expr.type), dest_type_str):
# ir.types different from ir.IntType
case (ir.types.DoubleType, "int"):
return self.builder.fptosi(expr, int64_t), None
def codegen_FuncCall(self, node):
node.show()
callee = None
if isinstance(node.name, c_ast.ID):
callee = node.name.name
_, callee_func = self.lookup(callee)
call_args = []
if node.args:
call_args = [self.codegen(arg)[0] for arg in node.args.exprs]
# just for see and hard code it
if callee == "printf":
data_fmt = call_args[0]
global_fmt = ir.GlobalVariable(
self.module, data_fmt.type, "printf_format")
global_fmt.initializer = data_fmt
format_ptr = self.builder.bitcast(global_fmt, cstring)
return self.builder.call(
callee_func, [format_ptr]+call_args[1:], 'calltmp'), None
elif callee == 'malloc':
return self.builder.call(
callee_func, call_args[0:], 'calltmp'), None
else:
if callee_func is None or not isinstance(callee_func, ir.Function):
raise CodegenError('Call to unknown function', node.callee)
if node.args and len(callee_func.args) != len(node.args.exprs):
raise CodegenError('Call argument length mismatch', node.callee)
return self.builder.call(callee_func, call_args, 'calltmp'), None
def codegen_Decl(self, node):
if isinstance(node.type, c_ast.TypeDecl):
type_str = node.type.type.names[0]
# import pdb;pdb.set_trace()
if type_str == "int":
ir_type = ir.IntType(64)
init = 0
else:
ir_type = ir.DoubleType()
init = 0.0
# import pdb;pdb.set_trace()
if node.init is not None:
init_val, _ = self.codegen(node.init)
else:
init_val = ir.values.Constant(ir_type, init)
var_addr, var_ir_type = self.create_entry_block_alloca(
node.name, type_str, 1)
if isinstance(init_val.type, ir.IntType) and \
isinstance(ir_type, ir.DoubleType):
if self.builder:
init_val = self.builder.uitofp(init_val, ir.DoubleType(), 'booltmp')
if self.in_global:
var_addr.initializer = init_val
else:
self.builder.store(init_val, var_addr)
elif isinstance(node.type, c_ast.ArrayDecl):
# At now only support Int
array_list = []
array_node = node.type
while True:
array_next_type = array_node.type
if isinstance(array_next_type, c_ast.TypeDecl):
array_list.append(int(array_node.dim.value))
type_str = array_next_type.type.names[0]
break
elif isinstance(array_next_type, c_ast.ArrayDecl):
array_list.append(int(array_node.dim.value))
array_node = array_next_type
continue
pass
var_addr, var_ir_type = self.create_entry_block_alloca(
node.name, type_str, 1, array_list)
elif isinstance(node.type, c_ast.PtrDecl):
point_level = 1
# the type is recursive.
sub_node = node.type
while True:
sub_next_type = sub_node.type
if isinstance(sub_next_type, c_ast.TypeDecl):
# pointer_list.append(int(sub_node.dim.value))
#
type_str = sub_next_type.type.names[0]
break
elif isinstance(sub_next_type, c_ast.PtrDecl):
# At now I only care about **P not *a[4]
# make the easy work done first
point_level += 1
sub_node = sub_next_type
continue
pass
var_addr, var_ir_type= self.create_entry_block_alloca(
node.name, type_str, 1, point_level=point_level)
else:
return None, None
return None, var_addr
def codegen_ID(self, node):
node.show()
valtype, var = self.lookup(node.name)
node.ir_type = valtype
return self.builder.load(var), var
def codegen_ArrayRef(self, node):
node.show()
name = node.name
subscript = node.subscript
name_ir, name_ptr = self.codegen(name)
value_ir_type = name.ir_type.element
subscript_ir, subscript_ptr = self.codegen(subscript)
if len(name.ir_type.dim_array) > 1:
level_lenth = name.ir_type.dim_array[-1] * 8
else:
level_lenth = 1 * 8
dim_lenth = ir.Constant(ir.IntType(64), level_lenth)
subscript_ir = self.builder.fptoui(subscript_ir, ir.IntType(64))
subscript_value_in_array = self.builder.mul(
dim_lenth, subscript_ir, "array_add")
name_ptr_int = self.builder.ptrtoint(name_ptr, ir.IntType(64))
value_ptr = self.builder.add(
subscript_value_in_array, name_ptr_int, 'addtmp')
# import pdb;pdb.set_trace()
value_ptr = self.builder.inttoptr(
value_ptr, ir.PointerType(value_ir_type))
value_result = self.builder.load(value_ptr)
# the node.ir_type should be used in somewhere
node.ir_type = name.ir_type.gep(ir.Constant(ir.IntType(64), 0))
node.ir_type.dim_array = name.ir_type.dim_array[:-1]
return value_result, value_ptr
def codegen_stmt(self, node):
typ = type(node)
if typ in (
c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp,
c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef,
c_ast.StructRef):
return self.codegen(node)[0], None
elif typ in (c_ast.Compound,):
# No extra indentation required before the opening brace of a
# compound - because it consists of multiple lines it has to
# compute its own indentation.
return self.codegen(node)[0], None
def codegen_Return(self, node):
node.show()
retval, _ = self.codegen(node.expr)
self.builder.ret(retval), None
def codegen_Compound(self, node):
node.show()
if node.block_items:
for stmt in node.block_items:
self.codegen(stmt)
return None, None
def codegen_FuncDecl(self, node):
node.show()
if isinstance(node.type, c_ast.PtrDecl):
# only support one level ptr just for simple use
return_type_str = node.type.type.type.names[0]
if return_type_str == "int":
data_ir_type = ir.IntType(64)
else:
data_ir_type = ir.DoubleType()
ir_type = ir.PointerType(data_ir_type)
else:
return_type_str = node.type.type.names[0]
if return_type_str == "int":
ir_type = ir.IntType(64)
else:
ir_type = ir.DoubleType()
return ir_type, None
def codegen_FuncDef(self, node):
node.show()
# import pdb;pdb.set_trace()
# deep level func have deep level
self.in_builder = False
# we don't want funcdecl in codegen_decl too
ir_type, _ = self.codegen(node.decl.type)
funcname = node.decl.name
if funcname == "main":
self.return_type = ir_type # for call in C
arg_types = []
if node.decl.type.args:
for arg_type in node.decl.type.args.params:
arg_types.append(get_ir_type_from_node(arg_type))
with self.new_function():
self.function = ir.Function(
self.module,
ir.FunctionType(ir_type, arg_types),
name=funcname)
self.block = self.function.append_basic_block()
self.builder = ir.IRBuilder(self.block)
self.define(funcname, (ir_type, self.function))
if node.decl.type.args:
for i, p in enumerate(node.decl.type.args.params):
arg_type = arg_types[i]
# breakpoint()
var = self.builder.alloca(arg_type, name=p.name)
self.define(p.name, (arg_type, var))
self.builder.store(self.function.args[i], var)
self.codegen(node.body)
if not self.builder.block.is_terminated:
self.builder.ret(ir.Constant(ir.IntType(64), int(0)))
return None, None
def codegen_FunctionAST(self, node):
self.func_symtab = {}
func, _ = self.codegen(node.proto)
bb_entry = func.append_basic_block('entry')
self.builder = ir.IRBuilder(bb_entry)
# Add all arguments to the symbol table and create their allocas
for i, arg in enumerate(func.args):
arg.name = node.proto.argnames[i]
alloca = self.builder.alloca(ir.DoubleType(), name=arg.name)
self.builder.store(arg, alloca)
self.func_symtab[arg.name] = alloca
retval, _ = self.codegen(node.body)
self.builder.ret(retval)
return func, None
```
#### File: pcc/tests/test_array.py
```python
import sys
import os
this_dir = os.path.dirname(__file__)
parent_dir = os.path.dirname(this_dir)
sys.path.insert(0, parent_dir)
from pcc.evaluater.c_evaluator import CEvaluator
import unittest
class TestArray(unittest.TestCase):
# def _assert_body(self, toplevel, expected):
# """Assert the flattened body of the given toplevel function"""
# self.assertIsInstance(toplevel, FunctionAST)
# self.assertEqual(self._flatten(toplevel.body), expected)
def test_array(self):
# Evaluate some code.
pcc = CEvaluator()
ret = pcc.evaluate('''
int main(){
int i = 1;
int j = 1;
int a[100];
int len = 100;
int len2 = 10;
int sum = 0 ;
for(i = 0; i < len ; i++){
a[i] = i + 1;
}
for(i = 0; i < len ; i++){
sum += a[i];
}
return sum ;
}
''', llvmdump=True)
print("The answer is %d"%ret)
assert (ret == 5050)
if __name__ == '__main__':
unittest.main()
```
#### File: pcc/tests/test_c_evluater.py
```python
import sys
import os
this_dir = os.path.dirname(__file__)
parent_dir = os.path.dirname(this_dir)
sys.path.insert(0, parent_dir)
from pcc.evaluater.c_evaluator import CEvaluator
import unittest
import unittest
class TestCevluatar(unittest.TestCase):
def test_simple(self):
pcc = CEvaluator()
# kalei.evaluate('def binary: 1 (x y) y')
ret = pcc.evaluate('''
int add(int x, int y){
return x + y;
}
int main(){
int a = 3;
int b = 4;
return add(a, b);
}
''', llvmdump=True)
print("The answer is {}".format(ret))
assert (ret == 7)
# This is a good point to self start main
# print(pcc.evaluate('main()'))
if __name__ == '__main__':
# Evaluate some code
# if __name__ == '__main__':
unittest.main()
```
#### File: pcc/tests/test_simple_init_func_assign.py
```python
import os
import sys
this_dir = os.path.dirname(__file__)
parent_dir = os.path.dirname(this_dir)
sys.path.insert(0, parent_dir)
from pcc.evaluater.c_evaluator import CEvaluator
import unittest
class TestSimpleFunc(unittest.TestCase):
def test_init_assign_func_call(self):
pcc = CEvaluator()
ret = pcc.evaluate('''
int f(int x){
return 4;
}
int main(){
int a = 3;
int b = f(3);
if (b > a){
b += 3;
}
return b - a ;
}
''', llvmdump=True)
print("The answer is %d" % ret)
``` |
{
"source": "jiamo/pfcm",
"score": 2
} |
#### File: pfcm/tests/test_async_pfcm.py
```python
from pfcm.pfcm import Pfcm, FcmAPI
import os
import yaml
import datetime
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(cur_dir)
print(parent_dir)
private_file = os.path.join(parent_dir, 'service_token.json')
with open('config.yml') as f:
config = yaml.load(f.read())
print(config)
project_name = config["default"]["project_name"]
registration_id = config["default"]["one_token"]
@pytest.mark.asyncio
async def test_pfcm_send_one_device(event_loop):
message_title = "one device"
message_body = "{} body of message".format(datetime.datetime.now())
fsmapi = FcmAPI(project_name, private_file, event_loop)
pfcm = Pfcm(fsmapi)
for i in range(10):
results = await pfcm.send_msg_async(
registration_id=registration_id,
message_title=message_title,
message_body=message_body)
for result in results:
print(result)
```
#### File: pfcm/tests/test_pfcm.py
```python
from pfcm.pfcm import Pfcm, FcmAPI
import os
import yaml
import datetime
cur_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(cur_dir)
print(parent_dir)
private_file = os.path.join(parent_dir, 'service_token.json')
with open('config.yml') as f:
config = yaml.load(f.read())
print(config)
project_name = config["default"]["project_name"]
registration_id = config["default"]["one_token"]
def test_pfcm_send_one_device():
message_title = "one device"
message_body = "{} body of message".format(datetime.datetime.now())
fsmapi = FcmAPI(project_name, private_file)
pfcm = Pfcm(fsmapi)
for i in range(10):
results = pfcm.send_msg(
registration_id=registration_id,
message_title=message_title,
message_body=message_body)
for result in results:
print(result)
def test_pfcm_send_topic():
message_title = "topic"
message_body = "{} body of message".format(datetime.datetime.now())
fsmapi = FcmAPI(project_name, private_file)
pfcm = Pfcm(fsmapi)
topic = "Global_Topic_Dev"
results = pfcm.send_msg(
topic=topic,
message_title=message_title,
message_body=message_body)
for result in results:
print(result)
``` |
{
"source": "jiamo/polly_read",
"score": 3
} |
#### File: polly_read/polly_read/txt_to_ssml.py
```python
import boto3
from botocore.exceptions import BotoCoreError, ClientError
from contextlib import closing
from .ssml_builder import Paragraph
import argparse
import os
import io
import sys
import textwrap
import glob
polly = boto3.client("polly")
voices = ['Geraint', 'Gwyneth', 'Mads', 'Naja',
'Hans', 'Marlene', 'Nicole', 'Russell', 'Amy', 'Brian', 'Emma',
'Raveena', 'Ivy', 'Joanna', 'Joey', 'Justin', 'Kendra', 'Kimberly',
'Matthew', 'Salli', 'Conchita', 'Enrique', 'Miguel', 'Penelope',
'Chantal', 'Celine', 'Mathieu', 'Dora', 'Karl', 'Carla', 'Giorgio',
'Mizuki', 'Liv', 'Lotte', 'Ruben', 'Ewa', 'Jacek', 'Jan', 'Maja',
'Ricardo', 'Vitoria', 'Cristiano', 'Ines', 'Carmen', 'Maxim',
'Tatyana', 'Astrid', 'Filiz', 'Vicki', 'Takumi', 'Seoyeon', 'Aditi']
default_voice = 'Joey'
class TextToMp3:
def __init__(self, text, out_bytesio, voice=default_voice):
"""
The main reason using io.BytesIO here is for aws service
and save directly to mp3 in s3
"""
self.out_bytesio = out_bytesio
self.rules = {}
self.voice = voice
text = text.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("\n", "") # only remove \n in paragraph
self.pieces = text.split("\n")
def start(self):
for piece in self.pieces:
self.add_paragraph(piece)
self.close()
def close(self):
self.out_bytesio.seek(0)
def add_paragraph(self, text):
# build new p
if len(text) >= 1500: # the TextLengthExceededException of polly
# split into different paragraph
sentences = text.split(".")
print(sentences)
p = Paragraph([])
while sentences:
# it is strange add . but it need
sentence = sentences.pop(0)
sentence = sentence.strip(" ")
if not sentence:
continue
if p.add_sentence(sentence + "."):
continue
else:
if not p.is_empty():
self.translate(p.to_ssml())
p.clean()
# need handle the failed sentence
# print("failed sentence {}".format(sentence))
sentences.insert(0, sentence)
continue
else:
# which mean the sentence it self has 1500
# in this case we need human being to see it
print("This is sentence is too much long:\n{}".format(
sentence))
sys.exit()
if not p.is_empty():
self.translate(p.to_ssml())
else:
p = Paragraph.build_from_text(text)
self.translate(p.to_ssml())
def translate(self, ssml_string):
print("translate {} : {}".format(len(ssml_string), ssml_string))
try:
# Request speech synthesis
response = polly.synthesize_speech(
Text=ssml_string,
TextType="ssml",
OutputFormat="mp3",
VoiceId=self.voice)
except (BotoCoreError, ClientError) as error:
print(error)
sys.exit(-1)
# Access the audio stream from the response
if "AudioStream" in response:
with closing(response["AudioStream"]) as stream:
try:
self.out_bytesio.write(stream.read())
except IOError as error:
print(error)
sys.exit(-1)
else:
print("Could not stream audio")
sys.exit(-1)
print("translate finish")
class DirTranslate():
def __init__(self, indir, voice):
self.indir = indir
self.voice = voice
def start(self):
txts = glob.glob(self.indir + "/*.txt")
txts = sorted(txts)
for txt in txts:
print("handing txt {}".format(txt))
with open(txt, "r", errors='ignore') as f:
text = f.read()
mp3 = txt[:-3] + "mp3"
io_outfile = io.BytesIO()
txt2mp3 = TextToMp3(text, io_outfile, self.voice)
txt2mp3.start()
with open(mp3, "wb") as f:
f.write(io_outfile.read())
``` |
{
"source": "jiamo/ppjson",
"score": 2
} |
#### File: ppjson/ppjson/ppjson.py
```python
from sly import Lexer, Parser
import sys
from copy import deepcopy
class JsonLexer(Lexer):
tokens = {
LSBRACKET,
RSBRACKET,
LBRACE,
RBRACE,
COLON,
STRING,
SINGLE_STRING,
CONSTANT,
COMMA,
INT,
FLOAT,
LITERRAL_VALUE,
TRUE,
FALSE,
NULL,
}
# WS = r'[ \t\n\r]+'
# todo how to do it
# literals = { '=', '+', '-', '*', '/', '(', ')' }
ignore = ' \t\n\r'
# Tokens
LITERRAL_VALUE = r'[a-zA-Z_][a-zA-Z0-9_]*'
LITERRAL_VALUE['true'] = TRUE
LITERRAL_VALUE['false'] = FALSE
LITERRAL_VALUE['null'] = NULL
LSBRACKET = r'\['
RSBRACKET = r'\]'
LBRACE = r'\{'
RBRACE = r'\}'
COLON = r':'
COMMA = r','
@_(r'"([ !#-\[\]-\U0010ffff]+|\\(["\/\\bfnrt]|u[0-9A-Fa-f]{4}))*"')
def STRING(self, t):
t.value = str(t.value[1:-1])
return t
@_(r'-?(0|[1-9][0-9]*)(\.[0-9]+)?([Ee][+-]?[0-9]+)?')
def FLOAT(self, t):
t.value = float(t.value)
return t
@_(r'-?(0|[1-9][0-9]*)')
def INT(self, t):
t.value = int(t.value)
return t
# @_(r"'([^'\n]|(\\'))*'")
# def STRING(self, t):
# t.value = str(t.value[1:-1])
# return t
@_(r'\n+')
def newline(self, t):
self.lineno += t.value.count('\n')
def error(self, t):
raise Exception(str(t))
class JsonParser(Parser):
debugfile = 'parser.out'
tokens = JsonLexer.tokens
ARRAY = 1
DICT = 2
def __init__(self):
self.names = {}
self.value = None
self.json_type = None
self.json_value = None
@_('value')
def json_text(self, p):
print("json_text")
self.json_value = p.value
print("self.json_value", p.value)
@_('')
def empty(self, p):
print("empty")
@_('object')
def value(self, p):
print("value-object:", p.object)
return p.object
@_('array')
def value(self, p):
print("value-array:", p.array)
return p.array
@_('STRING')
def value(self, p):
print("value-string")
return p.STRING
@_('TRUE')
def value(self, p):
print("LITERRAL_VALUE", p)
return True
@_('FALSE')
def value(self, p):
print("LITERRAL_VALUE", p)
return False
@_('NULL')
def value(self, p):
print("LITERRAL_VALUE", p)
return None
@_('INT')
def value(self, p):
return p.INT
@_('FLOAT')
def value(self, p):
return p.FLOAT
@_('LSBRACKET')
def begin_array(self, p):
print("begin_array")
@_('RSBRACKET')
def end_array(self, p):
print("end_array")
@_('LBRACE')
def begin_object(self, p):
print("begin_object")
@_('RBRACE')
def end_object(self, p):
print("end_object")
@_('begin_object [ member_list ] end_object')
def object(self, p):
# TODO simple the process may be can just return the p.memlist
print("object --- is", p.member_list)
result = {}
if isinstance(p.member_list, list):
for value in p.member_list:
result.update(value)
elif p.member_list is not None:
result = p.member_list
return result
@_('begin_array [ value_list ] end_array')
def array(self, p):
# This is not very good. because the value_list may not be list!
result = []
if isinstance(p.value_list, list):
result = p.value_list
elif p.value_list is not None:
result.append(p.value_list)
return result
@_('member')
def member_list(self, p):
print("member_list-member ---", p.member)
return p.member
@_('member_list COMMA member')
def member_list(self, p):
print("member_list - member")
result = []
if isinstance(p.member_list, list):
p.member_list.append(p.member)
result = p.member_list
else:
result = [p.member_list, p.member]
return result
# very same as member
@_('value')
def value_list(self, p):
print("array-array")
return p.value
@_('value_list COMMA value')
def value_list(self, p):
result = []
if isinstance(p.value_list, list):
p.value_list.append(p.value)
result = p.value_list
else:
result = [p.value_list, p.value]
print("array-list", p.value_list, p.value, 'r is ', result)
return result
@_('COLON')
def name_separator(self, p):
print("name_separator")
@_('STRING name_separator value')
def member(self, p):
print("member, ", type(p.STRING), " ", p.STRING)
return {
p.STRING: p.value
}
def error(self, p):
raise Exception(str(p))
def loads(s):
lexer = JsonLexer()
parser = JsonParser()
tokens = lexer.tokenize(s)
# print(list(tokens))
parser.parse(tokens)
return parser.json_value
if __name__ == '__main__':
lexer = JsonLexer()
parser = JsonParser()
while True:
try:
text = input('ppjson > ')
except EOFError:
break
if text:
tokens = lexer.tokenize(text)
# debug_tokens = list(tokens)
# for tok in debug_tokens:
# print(tok)
# sys.stdout.flush()
parser.parse(tokens)
print("value is {} and the python type is {}".format(
parser.json_value, type(parser.json_value) ))
```
#### File: ppjson/tests/test_empty.py
```python
import simplejson as json
from ppjson import ppjson
def test_empty_dict():
assert json.loads('{}') == ppjson.loads('{}')
``` |
{
"source": "jian01/api-example-exercise",
"score": 3
} |
#### File: api-example-exercise/api/__main__.py
```python
from contact_databases.ram_database import ContactRamDatabase, UnexistentContact
from model.contact import Contact
from flask import Flask, render_template, url_for, request
import json
app = Flask(__name__)
contact_database = ContactRamDatabase()
@app.route('/contact_list', methods=['GET'])
def get_contact_list():
"""
Gets the whole contact list
Returns a json with the form of:
{"contacts": [{"contact_id": 4, "name": "Juanito", "number": "4888-8888"}, ...]}
"""
contact_list = []
contacts = contact_database.get_contacts()
for contact_id, contact in contacts.items():
contact_list.append({"contact_id": contact_id,
"name": contact.name,
"number": contact.number})
return json.dumps({'contacts': contact_list}), 200
@app.route('/contact/<contact_id>', methods=['GET'])
def get_contact(contact_id):
"""
Gets the contact data for the contact id queried
Returns a json with the form of:
{"contact_id": 4, "name": "Juanito", "number": "4888-8888"}
If the contact does not exists returns "Unexistent Contact" with code 404
:param contact_id: the contact id
"""
return "Not implemented", 500
@app.route('/contact', methods=['POST'])
def add_contact():
"""
Add the contact for the data sent
Body in json with the form of:
{"name": "Juanito", "number": "4888-8888"}
Returns a json with the form of:
{"contact_id": 4, "name": "Juanito", "number": "4888-8888"}
"""
body = request.json
return "Not implemented", 500
@app.route('/contact/<contact_id>', methods=['PUT'])
def modify_contact(contact_id):
"""
Modifies the contact for the id sent
Body in json with the form of:
{"name": "Juanito", "number": "4888-8888"}
Returns a json with the form of:
{"contact_id": contact_id, "name": "Juanito", "number": "4888-8888"}
If the contact does not exists returns "Unexistent Contact" with code 404
:param contact_id: the contact id
"""
body = request.json
return "Not implemented", 500
@app.route('/contact/<contact_id>', methods=['DELETE'])
def delete_contact(contact_id):
"""
Deletes the contact for the id sent
Returns an "OK" message with code 200.
If the contact does not exists returns "Unexistent Contact" with code 404
:param contact_id: the contact id
"""
return "Not implemented", 500
app.run(host='127.0.0.1', port=8080, debug=True)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.