id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 51
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
251,400 | MillionIntegrals/vel | vel/metrics/accuracy.py | Accuracy._value_function | def _value_function(self, x_input, y_true, y_pred):
""" Return classification accuracy of input """
if len(y_true.shape) == 1:
return y_pred.argmax(1).eq(y_true).double().mean().item()
else:
raise NotImplementedError | python | def _value_function(self, x_input, y_true, y_pred):
if len(y_true.shape) == 1:
return y_pred.argmax(1).eq(y_true).double().mean().item()
else:
raise NotImplementedError | [
"def",
"_value_function",
"(",
"self",
",",
"x_input",
",",
"y_true",
",",
"y_pred",
")",
":",
"if",
"len",
"(",
"y_true",
".",
"shape",
")",
"==",
"1",
":",
"return",
"y_pred",
".",
"argmax",
"(",
"1",
")",
".",
"eq",
"(",
"y_true",
")",
".",
"double",
"(",
")",
".",
"mean",
"(",
")",
".",
"item",
"(",
")",
"else",
":",
"raise",
"NotImplementedError"
] | Return classification accuracy of input | [
"Return",
"classification",
"accuracy",
"of",
"input"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/metrics/accuracy.py#L9-L14 |
251,401 | MillionIntegrals/vel | vel/storage/streaming/visdom.py | VisdomStreaming.on_epoch_end | def on_epoch_end(self, epoch_info):
""" Update data in visdom on push """
metrics_df = pd.DataFrame([epoch_info.result]).set_index('epoch_idx')
visdom_append_metrics(
self.vis,
metrics_df,
first_epoch=epoch_info.global_epoch_idx == 1
) | python | def on_epoch_end(self, epoch_info):
metrics_df = pd.DataFrame([epoch_info.result]).set_index('epoch_idx')
visdom_append_metrics(
self.vis,
metrics_df,
first_epoch=epoch_info.global_epoch_idx == 1
) | [
"def",
"on_epoch_end",
"(",
"self",
",",
"epoch_info",
")",
":",
"metrics_df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"epoch_info",
".",
"result",
"]",
")",
".",
"set_index",
"(",
"'epoch_idx'",
")",
"visdom_append_metrics",
"(",
"self",
".",
"vis",
",",
"metrics_df",
",",
"first_epoch",
"=",
"epoch_info",
".",
"global_epoch_idx",
"==",
"1",
")"
] | Update data in visdom on push | [
"Update",
"data",
"in",
"visdom",
"on",
"push"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/streaming/visdom.py#L22-L30 |
251,402 | MillionIntegrals/vel | vel/storage/streaming/visdom.py | VisdomStreaming.on_batch_end | def on_batch_end(self, batch_info):
""" Stream LR to visdom """
if self.settings.stream_lr:
iteration_idx = (
float(batch_info.epoch_number) +
float(batch_info.batch_number) / batch_info.batches_per_epoch
)
lr = batch_info.optimizer.param_groups[-1]['lr']
metrics_df = pd.DataFrame([lr], index=[iteration_idx], columns=['lr'])
visdom_append_metrics(
self.vis,
metrics_df,
first_epoch=(batch_info.epoch_number == 1) and (batch_info.batch_number == 0)
) | python | def on_batch_end(self, batch_info):
if self.settings.stream_lr:
iteration_idx = (
float(batch_info.epoch_number) +
float(batch_info.batch_number) / batch_info.batches_per_epoch
)
lr = batch_info.optimizer.param_groups[-1]['lr']
metrics_df = pd.DataFrame([lr], index=[iteration_idx], columns=['lr'])
visdom_append_metrics(
self.vis,
metrics_df,
first_epoch=(batch_info.epoch_number == 1) and (batch_info.batch_number == 0)
) | [
"def",
"on_batch_end",
"(",
"self",
",",
"batch_info",
")",
":",
"if",
"self",
".",
"settings",
".",
"stream_lr",
":",
"iteration_idx",
"=",
"(",
"float",
"(",
"batch_info",
".",
"epoch_number",
")",
"+",
"float",
"(",
"batch_info",
".",
"batch_number",
")",
"/",
"batch_info",
".",
"batches_per_epoch",
")",
"lr",
"=",
"batch_info",
".",
"optimizer",
".",
"param_groups",
"[",
"-",
"1",
"]",
"[",
"'lr'",
"]",
"metrics_df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"lr",
"]",
",",
"index",
"=",
"[",
"iteration_idx",
"]",
",",
"columns",
"=",
"[",
"'lr'",
"]",
")",
"visdom_append_metrics",
"(",
"self",
".",
"vis",
",",
"metrics_df",
",",
"first_epoch",
"=",
"(",
"batch_info",
".",
"epoch_number",
"==",
"1",
")",
"and",
"(",
"batch_info",
".",
"batch_number",
"==",
"0",
")",
")"
] | Stream LR to visdom | [
"Stream",
"LR",
"to",
"visdom"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/storage/streaming/visdom.py#L32-L48 |
251,403 | MillionIntegrals/vel | vel/launcher.py | main | def main():
""" Paperboy entry point - parse the arguments and run a command """
parser = argparse.ArgumentParser(description='Paperboy deep learning launcher')
parser.add_argument('config', metavar='FILENAME', help='Configuration file for the run')
parser.add_argument('command', metavar='COMMAND', help='A command to run')
parser.add_argument('varargs', nargs='*', metavar='VARARGS', help='Extra options to the command')
parser.add_argument('-r', '--run_number', type=int, default=0, help="A run number")
parser.add_argument('-d', '--device', default='cuda', help="A device to run the model on")
parser.add_argument('-s', '--seed', type=int, default=None, help="Random seed for the project")
parser.add_argument(
'-p', '--param', type=str, metavar='NAME=VALUE', action='append', default=[],
help="Configuration parameters"
)
parser.add_argument(
'--continue', action='store_true', default=False, help="Continue previously started learning process"
)
parser.add_argument(
'--profile', type=str, default=None, help="Profiler output"
)
args = parser.parse_args()
model_config = ModelConfig.from_file(
args.config, args.run_number, continue_training=getattr(args, 'continue'), device=args.device, seed=args.seed,
params={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)}
)
if model_config.project_dir not in sys.path:
sys.path.append(model_config.project_dir)
multiprocessing_setting = model_config.provide_with_default('multiprocessing', default=None)
if multiprocessing_setting:
# This needs to be called before any of PyTorch module is imported
multiprocessing.set_start_method(multiprocessing_setting)
# Set seed already in the launcher
from vel.util.random import set_seed
set_seed(model_config.seed)
model_config.banner(args.command)
if args.profile:
print("[PROFILER] Running Vel in profiling mode, output filename={}".format(args.profile))
import cProfile
import pstats
profiler = cProfile.Profile()
profiler.enable()
model_config.run_command(args.command, args.varargs)
profiler.disable()
profiler.dump_stats(args.profile)
profiler.print_stats(sort='tottime')
print("======================================================================")
pstats.Stats(profiler).strip_dirs().sort_stats('tottime').print_stats(30)
print("======================================================================")
pstats.Stats(profiler).strip_dirs().sort_stats('cumtime').print_stats(30)
else:
model_config.run_command(args.command, args.varargs)
model_config.quit_banner() | python | def main():
parser = argparse.ArgumentParser(description='Paperboy deep learning launcher')
parser.add_argument('config', metavar='FILENAME', help='Configuration file for the run')
parser.add_argument('command', metavar='COMMAND', help='A command to run')
parser.add_argument('varargs', nargs='*', metavar='VARARGS', help='Extra options to the command')
parser.add_argument('-r', '--run_number', type=int, default=0, help="A run number")
parser.add_argument('-d', '--device', default='cuda', help="A device to run the model on")
parser.add_argument('-s', '--seed', type=int, default=None, help="Random seed for the project")
parser.add_argument(
'-p', '--param', type=str, metavar='NAME=VALUE', action='append', default=[],
help="Configuration parameters"
)
parser.add_argument(
'--continue', action='store_true', default=False, help="Continue previously started learning process"
)
parser.add_argument(
'--profile', type=str, default=None, help="Profiler output"
)
args = parser.parse_args()
model_config = ModelConfig.from_file(
args.config, args.run_number, continue_training=getattr(args, 'continue'), device=args.device, seed=args.seed,
params={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)}
)
if model_config.project_dir not in sys.path:
sys.path.append(model_config.project_dir)
multiprocessing_setting = model_config.provide_with_default('multiprocessing', default=None)
if multiprocessing_setting:
# This needs to be called before any of PyTorch module is imported
multiprocessing.set_start_method(multiprocessing_setting)
# Set seed already in the launcher
from vel.util.random import set_seed
set_seed(model_config.seed)
model_config.banner(args.command)
if args.profile:
print("[PROFILER] Running Vel in profiling mode, output filename={}".format(args.profile))
import cProfile
import pstats
profiler = cProfile.Profile()
profiler.enable()
model_config.run_command(args.command, args.varargs)
profiler.disable()
profiler.dump_stats(args.profile)
profiler.print_stats(sort='tottime')
print("======================================================================")
pstats.Stats(profiler).strip_dirs().sort_stats('tottime').print_stats(30)
print("======================================================================")
pstats.Stats(profiler).strip_dirs().sort_stats('cumtime').print_stats(30)
else:
model_config.run_command(args.command, args.varargs)
model_config.quit_banner() | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Paperboy deep learning launcher'",
")",
"parser",
".",
"add_argument",
"(",
"'config'",
",",
"metavar",
"=",
"'FILENAME'",
",",
"help",
"=",
"'Configuration file for the run'",
")",
"parser",
".",
"add_argument",
"(",
"'command'",
",",
"metavar",
"=",
"'COMMAND'",
",",
"help",
"=",
"'A command to run'",
")",
"parser",
".",
"add_argument",
"(",
"'varargs'",
",",
"nargs",
"=",
"'*'",
",",
"metavar",
"=",
"'VARARGS'",
",",
"help",
"=",
"'Extra options to the command'",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--run_number'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"A run number\"",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--device'",
",",
"default",
"=",
"'cuda'",
",",
"help",
"=",
"\"A device to run the model on\"",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Random seed for the project\"",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--param'",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"'NAME=VALUE'",
",",
"action",
"=",
"'append'",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"\"Configuration parameters\"",
")",
"parser",
".",
"add_argument",
"(",
"'--continue'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Continue previously started learning process\"",
")",
"parser",
".",
"add_argument",
"(",
"'--profile'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Profiler output\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"model_config",
"=",
"ModelConfig",
".",
"from_file",
"(",
"args",
".",
"config",
",",
"args",
".",
"run_number",
",",
"continue_training",
"=",
"getattr",
"(",
"args",
",",
"'continue'",
")",
",",
"device",
"=",
"args",
".",
"device",
",",
"seed",
"=",
"args",
".",
"seed",
",",
"params",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"(",
"Parser",
".",
"parse_equality",
"(",
"eq",
")",
"for",
"eq",
"in",
"args",
".",
"param",
")",
"}",
")",
"if",
"model_config",
".",
"project_dir",
"not",
"in",
"sys",
".",
"path",
":",
"sys",
".",
"path",
".",
"append",
"(",
"model_config",
".",
"project_dir",
")",
"multiprocessing_setting",
"=",
"model_config",
".",
"provide_with_default",
"(",
"'multiprocessing'",
",",
"default",
"=",
"None",
")",
"if",
"multiprocessing_setting",
":",
"# This needs to be called before any of PyTorch module is imported",
"multiprocessing",
".",
"set_start_method",
"(",
"multiprocessing_setting",
")",
"# Set seed already in the launcher",
"from",
"vel",
".",
"util",
".",
"random",
"import",
"set_seed",
"set_seed",
"(",
"model_config",
".",
"seed",
")",
"model_config",
".",
"banner",
"(",
"args",
".",
"command",
")",
"if",
"args",
".",
"profile",
":",
"print",
"(",
"\"[PROFILER] Running Vel in profiling mode, output filename={}\"",
".",
"format",
"(",
"args",
".",
"profile",
")",
")",
"import",
"cProfile",
"import",
"pstats",
"profiler",
"=",
"cProfile",
".",
"Profile",
"(",
")",
"profiler",
".",
"enable",
"(",
")",
"model_config",
".",
"run_command",
"(",
"args",
".",
"command",
",",
"args",
".",
"varargs",
")",
"profiler",
".",
"disable",
"(",
")",
"profiler",
".",
"dump_stats",
"(",
"args",
".",
"profile",
")",
"profiler",
".",
"print_stats",
"(",
"sort",
"=",
"'tottime'",
")",
"print",
"(",
"\"======================================================================\"",
")",
"pstats",
".",
"Stats",
"(",
"profiler",
")",
".",
"strip_dirs",
"(",
")",
".",
"sort_stats",
"(",
"'tottime'",
")",
".",
"print_stats",
"(",
"30",
")",
"print",
"(",
"\"======================================================================\"",
")",
"pstats",
".",
"Stats",
"(",
"profiler",
")",
".",
"strip_dirs",
"(",
")",
".",
"sort_stats",
"(",
"'cumtime'",
")",
".",
"print_stats",
"(",
"30",
")",
"else",
":",
"model_config",
".",
"run_command",
"(",
"args",
".",
"command",
",",
"args",
".",
"varargs",
")",
"model_config",
".",
"quit_banner",
"(",
")"
] | Paperboy entry point - parse the arguments and run a command | [
"Paperboy",
"entry",
"point",
"-",
"parse",
"the",
"arguments",
"and",
"run",
"a",
"command"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/launcher.py#L10-L72 |
251,404 | MillionIntegrals/vel | vel/util/random.py | set_seed | def set_seed(seed: int):
""" Set random seed for python, numpy and pytorch RNGs """
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed) | python | def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed) | [
"def",
"set_seed",
"(",
"seed",
":",
"int",
")",
":",
"random",
".",
"seed",
"(",
"seed",
")",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"torch",
".",
"random",
".",
"manual_seed",
"(",
"seed",
")"
] | Set random seed for python, numpy and pytorch RNGs | [
"Set",
"random",
"seed",
"for",
"python",
"numpy",
"and",
"pytorch",
"RNGs"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/random.py#L6-L10 |
251,405 | MillionIntegrals/vel | vel/util/better.py | better | def better(old_value, new_value, mode):
""" Check if new value is better than the old value"""
if (old_value is None or np.isnan(old_value)) and (new_value is not None and not np.isnan(new_value)):
return True
if mode == 'min':
return new_value < old_value
elif mode == 'max':
return new_value > old_value
else:
raise RuntimeError(f"Mode '{mode}' value is not supported") | python | def better(old_value, new_value, mode):
if (old_value is None or np.isnan(old_value)) and (new_value is not None and not np.isnan(new_value)):
return True
if mode == 'min':
return new_value < old_value
elif mode == 'max':
return new_value > old_value
else:
raise RuntimeError(f"Mode '{mode}' value is not supported") | [
"def",
"better",
"(",
"old_value",
",",
"new_value",
",",
"mode",
")",
":",
"if",
"(",
"old_value",
"is",
"None",
"or",
"np",
".",
"isnan",
"(",
"old_value",
")",
")",
"and",
"(",
"new_value",
"is",
"not",
"None",
"and",
"not",
"np",
".",
"isnan",
"(",
"new_value",
")",
")",
":",
"return",
"True",
"if",
"mode",
"==",
"'min'",
":",
"return",
"new_value",
"<",
"old_value",
"elif",
"mode",
"==",
"'max'",
":",
"return",
"new_value",
">",
"old_value",
"else",
":",
"raise",
"RuntimeError",
"(",
"f\"Mode '{mode}' value is not supported\"",
")"
] | Check if new value is better than the old value | [
"Check",
"if",
"new",
"value",
"is",
"better",
"than",
"the",
"old",
"value"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/better.py#L4-L14 |
251,406 | MillionIntegrals/vel | vel/rl/modules/deterministic_critic_head.py | DeterministicCriticHead.reset_weights | def reset_weights(self):
""" Initialize weights to sane defaults """
init.uniform_(self.linear.weight, -3e-3, 3e-3)
init.zeros_(self.linear.bias) | python | def reset_weights(self):
init.uniform_(self.linear.weight, -3e-3, 3e-3)
init.zeros_(self.linear.bias) | [
"def",
"reset_weights",
"(",
"self",
")",
":",
"init",
".",
"uniform_",
"(",
"self",
".",
"linear",
".",
"weight",
",",
"-",
"3e-3",
",",
"3e-3",
")",
"init",
".",
"zeros_",
"(",
"self",
".",
"linear",
".",
"bias",
")"
] | Initialize weights to sane defaults | [
"Initialize",
"weights",
"to",
"sane",
"defaults"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/modules/deterministic_critic_head.py#L20-L23 |
251,407 | MillionIntegrals/vel | vel/rl/discount_bootstrap.py | discount_bootstrap | def discount_bootstrap(rewards_buffer, dones_buffer, final_values, discount_factor, number_of_steps):
""" Calculate state values bootstrapping off the following state values """
true_value_buffer = torch.zeros_like(rewards_buffer)
# discount/bootstrap off value fn
current_value = final_values
for i in reversed(range(number_of_steps)):
current_value = rewards_buffer[i] + discount_factor * current_value * (1.0 - dones_buffer[i])
true_value_buffer[i] = current_value
return true_value_buffer | python | def discount_bootstrap(rewards_buffer, dones_buffer, final_values, discount_factor, number_of_steps):
true_value_buffer = torch.zeros_like(rewards_buffer)
# discount/bootstrap off value fn
current_value = final_values
for i in reversed(range(number_of_steps)):
current_value = rewards_buffer[i] + discount_factor * current_value * (1.0 - dones_buffer[i])
true_value_buffer[i] = current_value
return true_value_buffer | [
"def",
"discount_bootstrap",
"(",
"rewards_buffer",
",",
"dones_buffer",
",",
"final_values",
",",
"discount_factor",
",",
"number_of_steps",
")",
":",
"true_value_buffer",
"=",
"torch",
".",
"zeros_like",
"(",
"rewards_buffer",
")",
"# discount/bootstrap off value fn",
"current_value",
"=",
"final_values",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"number_of_steps",
")",
")",
":",
"current_value",
"=",
"rewards_buffer",
"[",
"i",
"]",
"+",
"discount_factor",
"*",
"current_value",
"*",
"(",
"1.0",
"-",
"dones_buffer",
"[",
"i",
"]",
")",
"true_value_buffer",
"[",
"i",
"]",
"=",
"current_value",
"return",
"true_value_buffer"
] | Calculate state values bootstrapping off the following state values | [
"Calculate",
"state",
"values",
"bootstrapping",
"off",
"the",
"following",
"state",
"values"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/discount_bootstrap.py#L4-L15 |
251,408 | MillionIntegrals/vel | vel/internals/model_config.py | ModelConfig.find_project_directory | def find_project_directory(start_path) -> str:
""" Locate top-level project directory """
start_path = os.path.realpath(start_path)
possible_name = os.path.join(start_path, ModelConfig.PROJECT_FILE_NAME)
if os.path.exists(possible_name):
return start_path
else:
up_path = os.path.realpath(os.path.join(start_path, '..'))
if os.path.realpath(start_path) == up_path:
raise RuntimeError(f"Couldn't find project file starting from {start_path}")
else:
return ModelConfig.find_project_directory(up_path) | python | def find_project_directory(start_path) -> str:
start_path = os.path.realpath(start_path)
possible_name = os.path.join(start_path, ModelConfig.PROJECT_FILE_NAME)
if os.path.exists(possible_name):
return start_path
else:
up_path = os.path.realpath(os.path.join(start_path, '..'))
if os.path.realpath(start_path) == up_path:
raise RuntimeError(f"Couldn't find project file starting from {start_path}")
else:
return ModelConfig.find_project_directory(up_path) | [
"def",
"find_project_directory",
"(",
"start_path",
")",
"->",
"str",
":",
"start_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"start_path",
")",
"possible_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"start_path",
",",
"ModelConfig",
".",
"PROJECT_FILE_NAME",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"possible_name",
")",
":",
"return",
"start_path",
"else",
":",
"up_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"start_path",
",",
"'..'",
")",
")",
"if",
"os",
".",
"path",
".",
"realpath",
"(",
"start_path",
")",
"==",
"up_path",
":",
"raise",
"RuntimeError",
"(",
"f\"Couldn't find project file starting from {start_path}\"",
")",
"else",
":",
"return",
"ModelConfig",
".",
"find_project_directory",
"(",
"up_path",
")"
] | Locate top-level project directory | [
"Locate",
"top",
"-",
"level",
"project",
"directory"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/model_config.py#L18-L30 |
251,409 | MillionIntegrals/vel | vel/internals/model_config.py | ModelConfig.from_file | def from_file(cls, filename: str, run_number: int, continue_training: bool = False, seed: int = None,
device: str = 'cuda', params=None):
""" Create model config from file """
with open(filename, 'r') as fp:
model_config_contents = Parser.parse(fp)
project_config_path = ModelConfig.find_project_directory(os.path.dirname(os.path.abspath(filename)))
with open(os.path.join(project_config_path, cls.PROJECT_FILE_NAME), 'r') as fp:
project_config_contents = Parser.parse(fp)
aggregate_dictionary = {
**project_config_contents,
**model_config_contents
}
return ModelConfig(
filename=filename,
configuration=aggregate_dictionary,
run_number=run_number,
project_dir=project_config_path,
continue_training=continue_training,
seed=seed,
device=device,
parameters=params
) | python | def from_file(cls, filename: str, run_number: int, continue_training: bool = False, seed: int = None,
device: str = 'cuda', params=None):
with open(filename, 'r') as fp:
model_config_contents = Parser.parse(fp)
project_config_path = ModelConfig.find_project_directory(os.path.dirname(os.path.abspath(filename)))
with open(os.path.join(project_config_path, cls.PROJECT_FILE_NAME), 'r') as fp:
project_config_contents = Parser.parse(fp)
aggregate_dictionary = {
**project_config_contents,
**model_config_contents
}
return ModelConfig(
filename=filename,
configuration=aggregate_dictionary,
run_number=run_number,
project_dir=project_config_path,
continue_training=continue_training,
seed=seed,
device=device,
parameters=params
) | [
"def",
"from_file",
"(",
"cls",
",",
"filename",
":",
"str",
",",
"run_number",
":",
"int",
",",
"continue_training",
":",
"bool",
"=",
"False",
",",
"seed",
":",
"int",
"=",
"None",
",",
"device",
":",
"str",
"=",
"'cuda'",
",",
"params",
"=",
"None",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fp",
":",
"model_config_contents",
"=",
"Parser",
".",
"parse",
"(",
"fp",
")",
"project_config_path",
"=",
"ModelConfig",
".",
"find_project_directory",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_config_path",
",",
"cls",
".",
"PROJECT_FILE_NAME",
")",
",",
"'r'",
")",
"as",
"fp",
":",
"project_config_contents",
"=",
"Parser",
".",
"parse",
"(",
"fp",
")",
"aggregate_dictionary",
"=",
"{",
"*",
"*",
"project_config_contents",
",",
"*",
"*",
"model_config_contents",
"}",
"return",
"ModelConfig",
"(",
"filename",
"=",
"filename",
",",
"configuration",
"=",
"aggregate_dictionary",
",",
"run_number",
"=",
"run_number",
",",
"project_dir",
"=",
"project_config_path",
",",
"continue_training",
"=",
"continue_training",
",",
"seed",
"=",
"seed",
",",
"device",
"=",
"device",
",",
"parameters",
"=",
"params",
")"
] | Create model config from file | [
"Create",
"model",
"config",
"from",
"file"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/model_config.py#L33-L58 |
251,410 | MillionIntegrals/vel | vel/internals/model_config.py | ModelConfig.from_memory | def from_memory(cls, model_data: dict, run_number: int, project_dir: str,
continue_training=False, seed: int = None, device: str = 'cuda', params=None):
""" Create model config from supplied data """
return ModelConfig(
filename="[memory]",
configuration=model_data,
run_number=run_number,
project_dir=project_dir,
continue_training=continue_training,
seed=seed,
device=device,
parameters=params
) | python | def from_memory(cls, model_data: dict, run_number: int, project_dir: str,
continue_training=False, seed: int = None, device: str = 'cuda', params=None):
return ModelConfig(
filename="[memory]",
configuration=model_data,
run_number=run_number,
project_dir=project_dir,
continue_training=continue_training,
seed=seed,
device=device,
parameters=params
) | [
"def",
"from_memory",
"(",
"cls",
",",
"model_data",
":",
"dict",
",",
"run_number",
":",
"int",
",",
"project_dir",
":",
"str",
",",
"continue_training",
"=",
"False",
",",
"seed",
":",
"int",
"=",
"None",
",",
"device",
":",
"str",
"=",
"'cuda'",
",",
"params",
"=",
"None",
")",
":",
"return",
"ModelConfig",
"(",
"filename",
"=",
"\"[memory]\"",
",",
"configuration",
"=",
"model_data",
",",
"run_number",
"=",
"run_number",
",",
"project_dir",
"=",
"project_dir",
",",
"continue_training",
"=",
"continue_training",
",",
"seed",
"=",
"seed",
",",
"device",
"=",
"device",
",",
"parameters",
"=",
"params",
")"
] | Create model config from supplied data | [
"Create",
"model",
"config",
"from",
"supplied",
"data"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/model_config.py#L61-L73 |
251,411 | MillionIntegrals/vel | vel/internals/model_config.py | ModelConfig.run_command | def run_command(self, command_name, varargs):
""" Instantiate model class """
command_descriptor = self.get_command(command_name)
return command_descriptor.run(*varargs) | python | def run_command(self, command_name, varargs):
command_descriptor = self.get_command(command_name)
return command_descriptor.run(*varargs) | [
"def",
"run_command",
"(",
"self",
",",
"command_name",
",",
"varargs",
")",
":",
"command_descriptor",
"=",
"self",
".",
"get_command",
"(",
"command_name",
")",
"return",
"command_descriptor",
".",
"run",
"(",
"*",
"varargs",
")"
] | Instantiate model class | [
"Instantiate",
"model",
"class"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/model_config.py#L109-L112 |
251,412 | MillionIntegrals/vel | vel/internals/model_config.py | ModelConfig.project_data_dir | def project_data_dir(self, *args) -> str:
""" Directory where to store data """
return os.path.normpath(os.path.join(self.project_dir, 'data', *args)) | python | def project_data_dir(self, *args) -> str:
return os.path.normpath(os.path.join(self.project_dir, 'data', *args)) | [
"def",
"project_data_dir",
"(",
"self",
",",
"*",
"args",
")",
"->",
"str",
":",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"project_dir",
",",
"'data'",
",",
"*",
"args",
")",
")"
] | Directory where to store data | [
"Directory",
"where",
"to",
"store",
"data"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/model_config.py#L128-L130 |
251,413 | MillionIntegrals/vel | vel/internals/model_config.py | ModelConfig.output_dir | def output_dir(self, *args) -> str:
""" Directory where to store output """
return os.path.join(self.project_dir, 'output', *args) | python | def output_dir(self, *args) -> str:
return os.path.join(self.project_dir, 'output', *args) | [
"def",
"output_dir",
"(",
"self",
",",
"*",
"args",
")",
"->",
"str",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"project_dir",
",",
"'output'",
",",
"*",
"args",
")"
] | Directory where to store output | [
"Directory",
"where",
"to",
"store",
"output"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/model_config.py#L132-L134 |
251,414 | MillionIntegrals/vel | vel/internals/model_config.py | ModelConfig.project_top_dir | def project_top_dir(self, *args) -> str:
""" Project top-level directory """
return os.path.join(self.project_dir, *args) | python | def project_top_dir(self, *args) -> str:
return os.path.join(self.project_dir, *args) | [
"def",
"project_top_dir",
"(",
"self",
",",
"*",
"args",
")",
"->",
"str",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"project_dir",
",",
"*",
"args",
")"
] | Project top-level directory | [
"Project",
"top",
"-",
"level",
"directory"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/model_config.py#L136-L138 |
251,415 | MillionIntegrals/vel | vel/internals/model_config.py | ModelConfig.provide_with_default | def provide_with_default(self, name, default=None):
""" Return a dependency-injected instance """
return self.provider.instantiate_by_name_with_default(name, default_value=default) | python | def provide_with_default(self, name, default=None):
return self.provider.instantiate_by_name_with_default(name, default_value=default) | [
"def",
"provide_with_default",
"(",
"self",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"return",
"self",
".",
"provider",
".",
"instantiate_by_name_with_default",
"(",
"name",
",",
"default_value",
"=",
"default",
")"
] | Return a dependency-injected instance | [
"Return",
"a",
"dependency",
"-",
"injected",
"instance"
] | e0726e1f63742b728966ccae0c8b825ea0ba491a | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/internals/model_config.py#L165-L167 |
251,416 | douban/libmc | misc/runbench.py | benchmark_method | def benchmark_method(f):
"decorator to turn f into a factory of benchmarks"
@wraps(f)
def inner(name, *args, **kwargs):
return Benchmark(name, f, args, kwargs)
return inner | python | def benchmark_method(f):
"decorator to turn f into a factory of benchmarks"
@wraps(f)
def inner(name, *args, **kwargs):
return Benchmark(name, f, args, kwargs)
return inner | [
"def",
"benchmark_method",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"inner",
"(",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Benchmark",
"(",
"name",
",",
"f",
",",
"args",
",",
"kwargs",
")",
"return",
"inner"
] | decorator to turn f into a factory of benchmarks | [
"decorator",
"to",
"turn",
"f",
"into",
"a",
"factory",
"of",
"benchmarks"
] | 12e5528e55708d08003671c10267287ed77e4dc4 | https://github.com/douban/libmc/blob/12e5528e55708d08003671c10267287ed77e4dc4/misc/runbench.py#L118-L123 |
251,417 | douban/libmc | misc/runbench.py | bench | def bench(participants=participants, benchmarks=benchmarks,
bench_time=BENCH_TIME):
"""Do you even lift?"""
mcs = [p.factory() for p in participants]
means = [[] for p in participants]
stddevs = [[] for p in participants]
# Have each lifter do one benchmark each
last_fn = None
for benchmark_name, fn, args, kwargs in benchmarks:
logger.info('')
logger.info('%s', benchmark_name)
for i, (participant, mc) in enumerate(zip(participants, mcs)):
# FIXME: set before bench for get
if 'get' in fn.__name__:
last_fn(mc, *args, **kwargs)
sw = Stopwatch()
while sw.total() < bench_time:
with sw.timing():
fn(mc, *args, **kwargs)
means[i].append(sw.mean())
stddevs[i].append(sw.stddev())
logger.info(u'%76s: %s', participant.name, sw)
last_fn = fn
return means, stddevs | python | def bench(participants=participants, benchmarks=benchmarks,
bench_time=BENCH_TIME):
mcs = [p.factory() for p in participants]
means = [[] for p in participants]
stddevs = [[] for p in participants]
# Have each lifter do one benchmark each
last_fn = None
for benchmark_name, fn, args, kwargs in benchmarks:
logger.info('')
logger.info('%s', benchmark_name)
for i, (participant, mc) in enumerate(zip(participants, mcs)):
# FIXME: set before bench for get
if 'get' in fn.__name__:
last_fn(mc, *args, **kwargs)
sw = Stopwatch()
while sw.total() < bench_time:
with sw.timing():
fn(mc, *args, **kwargs)
means[i].append(sw.mean())
stddevs[i].append(sw.stddev())
logger.info(u'%76s: %s', participant.name, sw)
last_fn = fn
return means, stddevs | [
"def",
"bench",
"(",
"participants",
"=",
"participants",
",",
"benchmarks",
"=",
"benchmarks",
",",
"bench_time",
"=",
"BENCH_TIME",
")",
":",
"mcs",
"=",
"[",
"p",
".",
"factory",
"(",
")",
"for",
"p",
"in",
"participants",
"]",
"means",
"=",
"[",
"[",
"]",
"for",
"p",
"in",
"participants",
"]",
"stddevs",
"=",
"[",
"[",
"]",
"for",
"p",
"in",
"participants",
"]",
"# Have each lifter do one benchmark each",
"last_fn",
"=",
"None",
"for",
"benchmark_name",
",",
"fn",
",",
"args",
",",
"kwargs",
"in",
"benchmarks",
":",
"logger",
".",
"info",
"(",
"''",
")",
"logger",
".",
"info",
"(",
"'%s'",
",",
"benchmark_name",
")",
"for",
"i",
",",
"(",
"participant",
",",
"mc",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"participants",
",",
"mcs",
")",
")",
":",
"# FIXME: set before bench for get",
"if",
"'get'",
"in",
"fn",
".",
"__name__",
":",
"last_fn",
"(",
"mc",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"sw",
"=",
"Stopwatch",
"(",
")",
"while",
"sw",
".",
"total",
"(",
")",
"<",
"bench_time",
":",
"with",
"sw",
".",
"timing",
"(",
")",
":",
"fn",
"(",
"mc",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"means",
"[",
"i",
"]",
".",
"append",
"(",
"sw",
".",
"mean",
"(",
")",
")",
"stddevs",
"[",
"i",
"]",
".",
"append",
"(",
"sw",
".",
"stddev",
"(",
")",
")",
"logger",
".",
"info",
"(",
"u'%76s: %s'",
",",
"participant",
".",
"name",
",",
"sw",
")",
"last_fn",
"=",
"fn",
"return",
"means",
",",
"stddevs"
] | Do you even lift? | [
"Do",
"you",
"even",
"lift?"
] | 12e5528e55708d08003671c10267287ed77e4dc4 | https://github.com/douban/libmc/blob/12e5528e55708d08003671c10267287ed77e4dc4/misc/runbench.py#L266-L297 |
251,418 | liampauling/betfair | betfairlightweight/resources/baseresource.py | BaseResource.strip_datetime | def strip_datetime(value):
"""
Converts value to datetime if string or int.
"""
if isinstance(value, basestring):
try:
return parse_datetime(value)
except ValueError:
return
elif isinstance(value, integer_types):
try:
return datetime.datetime.utcfromtimestamp(value / 1e3)
except (ValueError, OverflowError, OSError):
return | python | def strip_datetime(value):
if isinstance(value, basestring):
try:
return parse_datetime(value)
except ValueError:
return
elif isinstance(value, integer_types):
try:
return datetime.datetime.utcfromtimestamp(value / 1e3)
except (ValueError, OverflowError, OSError):
return | [
"def",
"strip_datetime",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"try",
":",
"return",
"parse_datetime",
"(",
"value",
")",
"except",
"ValueError",
":",
"return",
"elif",
"isinstance",
"(",
"value",
",",
"integer_types",
")",
":",
"try",
":",
"return",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"value",
"/",
"1e3",
")",
"except",
"(",
"ValueError",
",",
"OverflowError",
",",
"OSError",
")",
":",
"return"
] | Converts value to datetime if string or int. | [
"Converts",
"value",
"to",
"datetime",
"if",
"string",
"or",
"int",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/resources/baseresource.py#L26-L39 |
251,419 | liampauling/betfair | betfairlightweight/baseclient.py | BaseClient.set_session_token | def set_session_token(self, session_token):
"""
Sets session token and new login time.
:param str session_token: Session token from request.
"""
self.session_token = session_token
self._login_time = datetime.datetime.now() | python | def set_session_token(self, session_token):
self.session_token = session_token
self._login_time = datetime.datetime.now() | [
"def",
"set_session_token",
"(",
"self",
",",
"session_token",
")",
":",
"self",
".",
"session_token",
"=",
"session_token",
"self",
".",
"_login_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")"
] | Sets session token and new login time.
:param str session_token: Session token from request. | [
"Sets",
"session",
"token",
"and",
"new",
"login",
"time",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/baseclient.py#L78-L85 |
251,420 | liampauling/betfair | betfairlightweight/baseclient.py | BaseClient.get_password | def get_password(self):
"""
If password is not provided will look in environment variables
for username+'password'.
"""
if self.password is None:
if os.environ.get(self.username+'password'):
self.password = os.environ.get(self.username+'password')
else:
raise PasswordError(self.username) | python | def get_password(self):
if self.password is None:
if os.environ.get(self.username+'password'):
self.password = os.environ.get(self.username+'password')
else:
raise PasswordError(self.username) | [
"def",
"get_password",
"(",
"self",
")",
":",
"if",
"self",
".",
"password",
"is",
"None",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"self",
".",
"username",
"+",
"'password'",
")",
":",
"self",
".",
"password",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"self",
".",
"username",
"+",
"'password'",
")",
"else",
":",
"raise",
"PasswordError",
"(",
"self",
".",
"username",
")"
] | If password is not provided will look in environment variables
for username+'password'. | [
"If",
"password",
"is",
"not",
"provided",
"will",
"look",
"in",
"environment",
"variables",
"for",
"username",
"+",
"password",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/baseclient.py#L87-L96 |
251,421 | liampauling/betfair | betfairlightweight/baseclient.py | BaseClient.get_app_key | def get_app_key(self):
"""
If app_key is not provided will look in environment
variables for username.
"""
if self.app_key is None:
if os.environ.get(self.username):
self.app_key = os.environ.get(self.username)
else:
raise AppKeyError(self.username) | python | def get_app_key(self):
if self.app_key is None:
if os.environ.get(self.username):
self.app_key = os.environ.get(self.username)
else:
raise AppKeyError(self.username) | [
"def",
"get_app_key",
"(",
"self",
")",
":",
"if",
"self",
".",
"app_key",
"is",
"None",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"self",
".",
"username",
")",
":",
"self",
".",
"app_key",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"self",
".",
"username",
")",
"else",
":",
"raise",
"AppKeyError",
"(",
"self",
".",
"username",
")"
] | If app_key is not provided will look in environment
variables for username. | [
"If",
"app_key",
"is",
"not",
"provided",
"will",
"look",
"in",
"environment",
"variables",
"for",
"username",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/baseclient.py#L98-L107 |
251,422 | liampauling/betfair | betfairlightweight/baseclient.py | BaseClient.session_expired | def session_expired(self):
"""
Returns True if login_time not set or seconds since
login time is greater than 200 mins.
"""
if not self._login_time or (datetime.datetime.now()-self._login_time).total_seconds() > 12000:
return True | python | def session_expired(self):
if not self._login_time or (datetime.datetime.now()-self._login_time).total_seconds() > 12000:
return True | [
"def",
"session_expired",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_login_time",
"or",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"self",
".",
"_login_time",
")",
".",
"total_seconds",
"(",
")",
">",
"12000",
":",
"return",
"True"
] | Returns True if login_time not set or seconds since
login time is greater than 200 mins. | [
"Returns",
"True",
"if",
"login_time",
"not",
"set",
"or",
"seconds",
"since",
"login",
"time",
"is",
"greater",
"than",
"200",
"mins",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/baseclient.py#L117-L123 |
251,423 | liampauling/betfair | betfairlightweight/utils.py | check_status_code | def check_status_code(response, codes=None):
"""
Checks response.status_code is in codes.
:param requests.request response: Requests response
:param list codes: List of accepted codes or callable
:raises: StatusCodeError if code invalid
"""
codes = codes or [200]
if response.status_code not in codes:
raise StatusCodeError(response.status_code) | python | def check_status_code(response, codes=None):
codes = codes or [200]
if response.status_code not in codes:
raise StatusCodeError(response.status_code) | [
"def",
"check_status_code",
"(",
"response",
",",
"codes",
"=",
"None",
")",
":",
"codes",
"=",
"codes",
"or",
"[",
"200",
"]",
"if",
"response",
".",
"status_code",
"not",
"in",
"codes",
":",
"raise",
"StatusCodeError",
"(",
"response",
".",
"status_code",
")"
] | Checks response.status_code is in codes.
:param requests.request response: Requests response
:param list codes: List of accepted codes or callable
:raises: StatusCodeError if code invalid | [
"Checks",
"response",
".",
"status_code",
"is",
"in",
"codes",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/utils.py#L7-L17 |
251,424 | liampauling/betfair | betfairlightweight/endpoints/betting.py | Betting.list_runner_book | def list_runner_book(self, market_id, selection_id, handicap=None, price_projection=None, order_projection=None,
match_projection=None, include_overall_position=None, partition_matched_by_strategy_ref=None,
customer_strategy_refs=None, currency_code=None, matched_since=None, bet_ids=None, locale=None,
session=None, lightweight=None):
"""
Returns a list of dynamic data about a market and a specified runner.
Dynamic data includes prices, the status of the market, the status of selections,
the traded volume, and the status of any orders you have placed in the market
:param unicode market_id: The unique id for the market
:param int selection_id: The unique id for the selection in the market
:param double handicap: The projection of price data you want to receive in the response
:param dict price_projection: The projection of price data you want to receive in the response
:param str order_projection: The orders you want to receive in the response
:param str match_projection: If you ask for orders, specifies the representation of matches
:param bool include_overall_position: If you ask for orders, returns matches for each selection
:param bool partition_matched_by_strategy_ref: If you ask for orders, returns the breakdown of matches
by strategy for each selection
:param list customer_strategy_refs: If you ask for orders, restricts the results to orders matching
any of the specified set of customer defined strategies
:param str currency_code: A Betfair standard currency code
:param str matched_since: If you ask for orders, restricts the results to orders that have at
least one fragment matched since the specified date
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param str locale: The language used for the response
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketBook]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listRunnerBook')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.MarketBook, elapsed_time, lightweight) | python | def list_runner_book(self, market_id, selection_id, handicap=None, price_projection=None, order_projection=None,
match_projection=None, include_overall_position=None, partition_matched_by_strategy_ref=None,
customer_strategy_refs=None, currency_code=None, matched_since=None, bet_ids=None, locale=None,
session=None, lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listRunnerBook')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.MarketBook, elapsed_time, lightweight) | [
"def",
"list_runner_book",
"(",
"self",
",",
"market_id",
",",
"selection_id",
",",
"handicap",
"=",
"None",
",",
"price_projection",
"=",
"None",
",",
"order_projection",
"=",
"None",
",",
"match_projection",
"=",
"None",
",",
"include_overall_position",
"=",
"None",
",",
"partition_matched_by_strategy_ref",
"=",
"None",
",",
"customer_strategy_refs",
"=",
"None",
",",
"currency_code",
"=",
"None",
",",
"matched_since",
"=",
"None",
",",
"bet_ids",
"=",
"None",
",",
"locale",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listRunnerBook'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"MarketBook",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns a list of dynamic data about a market and a specified runner.
Dynamic data includes prices, the status of the market, the status of selections,
the traded volume, and the status of any orders you have placed in the market
:param unicode market_id: The unique id for the market
:param int selection_id: The unique id for the selection in the market
:param double handicap: The projection of price data you want to receive in the response
:param dict price_projection: The projection of price data you want to receive in the response
:param str order_projection: The orders you want to receive in the response
:param str match_projection: If you ask for orders, specifies the representation of matches
:param bool include_overall_position: If you ask for orders, returns matches for each selection
:param bool partition_matched_by_strategy_ref: If you ask for orders, returns the breakdown of matches
by strategy for each selection
:param list customer_strategy_refs: If you ask for orders, restricts the results to orders matching
any of the specified set of customer defined strategies
:param str currency_code: A Betfair standard currency code
:param str matched_since: If you ask for orders, restricts the results to orders that have at
least one fragment matched since the specified date
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param str locale: The language used for the response
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketBook] | [
"Returns",
"a",
"list",
"of",
"dynamic",
"data",
"about",
"a",
"market",
"and",
"a",
"specified",
"runner",
".",
"Dynamic",
"data",
"includes",
"prices",
"the",
"status",
"of",
"the",
"market",
"the",
"status",
"of",
"selections",
"the",
"traded",
"volume",
"and",
"the",
"status",
"of",
"any",
"orders",
"you",
"have",
"placed",
"in",
"the",
"market"
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/betting.py#L193-L226 |
251,425 | liampauling/betfair | betfairlightweight/endpoints/betting.py | Betting.list_current_orders | def list_current_orders(self, bet_ids=None, market_ids=None, order_projection=None, customer_order_refs=None,
customer_strategy_refs=None, date_range=time_range(), order_by=None, sort_dir=None,
from_record=None, record_count=None, session=None, lightweight=None):
"""
Returns a list of your current orders.
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param list market_ids: One or more market ids
:param str order_projection: Optionally restricts the results to the specified order status
:param list customer_order_refs: Optionally restricts the results to the specified customer order references
:param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy
references
:param dict date_range: Optionally restricts the results to be from/to the specified date, these dates
are contextual to the orders being returned and therefore the dates used to filter on will change
to placed, matched, voided or settled dates depending on the orderBy
:param str order_by: Specifies how the results will be ordered. If no value is passed in, it defaults to BY_BET
:param str sort_dir: Specifies the direction the results will be sorted in
:param int from_record: Specifies the first record that will be returned
:param int record_count: Specifies how many records will be returned from the index position 'fromRecord'
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.CurrentOrders
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listCurrentOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.CurrentOrders, elapsed_time, lightweight) | python | def list_current_orders(self, bet_ids=None, market_ids=None, order_projection=None, customer_order_refs=None,
customer_strategy_refs=None, date_range=time_range(), order_by=None, sort_dir=None,
from_record=None, record_count=None, session=None, lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listCurrentOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.CurrentOrders, elapsed_time, lightweight) | [
"def",
"list_current_orders",
"(",
"self",
",",
"bet_ids",
"=",
"None",
",",
"market_ids",
"=",
"None",
",",
"order_projection",
"=",
"None",
",",
"customer_order_refs",
"=",
"None",
",",
"customer_strategy_refs",
"=",
"None",
",",
"date_range",
"=",
"time_range",
"(",
")",
",",
"order_by",
"=",
"None",
",",
"sort_dir",
"=",
"None",
",",
"from_record",
"=",
"None",
",",
"record_count",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listCurrentOrders'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"CurrentOrders",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns a list of your current orders.
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param list market_ids: One or more market ids
:param str order_projection: Optionally restricts the results to the specified order status
:param list customer_order_refs: Optionally restricts the results to the specified customer order references
:param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy
references
:param dict date_range: Optionally restricts the results to be from/to the specified date, these dates
are contextual to the orders being returned and therefore the dates used to filter on will change
to placed, matched, voided or settled dates depending on the orderBy
:param str order_by: Specifies how the results will be ordered. If no value is passed in, it defaults to BY_BET
:param str sort_dir: Specifies the direction the results will be sorted in
:param int from_record: Specifies the first record that will be returned
:param int record_count: Specifies how many records will be returned from the index position 'fromRecord'
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.CurrentOrders | [
"Returns",
"a",
"list",
"of",
"your",
"current",
"orders",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/betting.py#L228-L255 |
251,426 | liampauling/betfair | betfairlightweight/endpoints/betting.py | Betting.list_cleared_orders | def list_cleared_orders(self, bet_status='SETTLED', event_type_ids=None, event_ids=None, market_ids=None,
runner_ids=None, bet_ids=None, customer_order_refs=None, customer_strategy_refs=None,
side=None, settled_date_range=time_range(), group_by=None, include_item_description=None,
locale=None, from_record=None, record_count=None, session=None, lightweight=None):
"""
Returns a list of settled bets based on the bet status,
ordered by settled date.
:param str bet_status: Restricts the results to the specified status
:param list event_type_ids: Optionally restricts the results to the specified Event Type IDs
:param list event_ids: Optionally restricts the results to the specified Event IDs
:param list market_ids: Optionally restricts the results to the specified market IDs
:param list runner_ids: Optionally restricts the results to the specified Runners
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param list customer_order_refs: Optionally restricts the results to the specified customer order references
:param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy
references
:param str side: Optionally restricts the results to the specified side
:param dict settled_date_range: Optionally restricts the results to be from/to the specified settled date
:param str group_by: How to aggregate the lines, if not supplied then the lowest level is returned
:param bool include_item_description: If true then an ItemDescription object is included in the response
:param str locale: The language used for the response
:param int from_record: Specifies the first record that will be returned
:param int record_count: Specifies how many records will be returned from the index position 'fromRecord'
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.ClearedOrders
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listClearedOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.ClearedOrders, elapsed_time, lightweight) | python | def list_cleared_orders(self, bet_status='SETTLED', event_type_ids=None, event_ids=None, market_ids=None,
runner_ids=None, bet_ids=None, customer_order_refs=None, customer_strategy_refs=None,
side=None, settled_date_range=time_range(), group_by=None, include_item_description=None,
locale=None, from_record=None, record_count=None, session=None, lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listClearedOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.ClearedOrders, elapsed_time, lightweight) | [
"def",
"list_cleared_orders",
"(",
"self",
",",
"bet_status",
"=",
"'SETTLED'",
",",
"event_type_ids",
"=",
"None",
",",
"event_ids",
"=",
"None",
",",
"market_ids",
"=",
"None",
",",
"runner_ids",
"=",
"None",
",",
"bet_ids",
"=",
"None",
",",
"customer_order_refs",
"=",
"None",
",",
"customer_strategy_refs",
"=",
"None",
",",
"side",
"=",
"None",
",",
"settled_date_range",
"=",
"time_range",
"(",
")",
",",
"group_by",
"=",
"None",
",",
"include_item_description",
"=",
"None",
",",
"locale",
"=",
"None",
",",
"from_record",
"=",
"None",
",",
"record_count",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listClearedOrders'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"ClearedOrders",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns a list of settled bets based on the bet status,
ordered by settled date.
:param str bet_status: Restricts the results to the specified status
:param list event_type_ids: Optionally restricts the results to the specified Event Type IDs
:param list event_ids: Optionally restricts the results to the specified Event IDs
:param list market_ids: Optionally restricts the results to the specified market IDs
:param list runner_ids: Optionally restricts the results to the specified Runners
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param list customer_order_refs: Optionally restricts the results to the specified customer order references
:param list customer_strategy_refs: Optionally restricts the results to the specified customer strategy
references
:param str side: Optionally restricts the results to the specified side
:param dict settled_date_range: Optionally restricts the results to be from/to the specified settled date
:param str group_by: How to aggregate the lines, if not supplied then the lowest level is returned
:param bool include_item_description: If true then an ItemDescription object is included in the response
:param str locale: The language used for the response
:param int from_record: Specifies the first record that will be returned
:param int record_count: Specifies how many records will be returned from the index position 'fromRecord'
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.ClearedOrders | [
"Returns",
"a",
"list",
"of",
"settled",
"bets",
"based",
"on",
"the",
"bet",
"status",
"ordered",
"by",
"settled",
"date",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/betting.py#L257-L289 |
251,427 | liampauling/betfair | betfairlightweight/endpoints/betting.py | Betting.list_market_profit_and_loss | def list_market_profit_and_loss(self, market_ids, include_settled_bets=None, include_bsp_bets=None,
net_of_commission=None, session=None, lightweight=None):
"""
Retrieve profit and loss for a given list of OPEN markets.
:param list market_ids: List of markets to calculate profit and loss
:param bool include_settled_bets: Option to include settled bets (partially settled markets only)
:param bool include_bsp_bets: Option to include BSP bets
:param bool net_of_commission: Option to return profit and loss net of users current commission
rate for this market including any special tariffs
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketProfitLoss]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listMarketProfitAndLoss')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.MarketProfitLoss, elapsed_time, lightweight) | python | def list_market_profit_and_loss(self, market_ids, include_settled_bets=None, include_bsp_bets=None,
net_of_commission=None, session=None, lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listMarketProfitAndLoss')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.MarketProfitLoss, elapsed_time, lightweight) | [
"def",
"list_market_profit_and_loss",
"(",
"self",
",",
"market_ids",
",",
"include_settled_bets",
"=",
"None",
",",
"include_bsp_bets",
"=",
"None",
",",
"net_of_commission",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listMarketProfitAndLoss'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"MarketProfitLoss",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Retrieve profit and loss for a given list of OPEN markets.
:param list market_ids: List of markets to calculate profit and loss
:param bool include_settled_bets: Option to include settled bets (partially settled markets only)
:param bool include_bsp_bets: Option to include BSP bets
:param bool net_of_commission: Option to return profit and loss net of users current commission
rate for this market including any special tariffs
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketProfitLoss] | [
"Retrieve",
"profit",
"and",
"loss",
"for",
"a",
"given",
"list",
"of",
"OPEN",
"markets",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/betting.py#L291-L309 |
251,428 | liampauling/betfair | betfairlightweight/endpoints/betting.py | Betting.place_orders | def place_orders(self, market_id, instructions, customer_ref=None, market_version=None,
customer_strategy_ref=None, async_=None, session=None, lightweight=None):
"""
Place new orders into market.
:param str market_id: The market id these orders are to be placed on
:param list instructions: The number of place instructions
:param str customer_ref: Optional parameter allowing the client to pass a unique string
(up to 32 chars) that is used to de-dupe mistaken re-submissions
:param dict market_version: Optional parameter allowing the client to specify which
version of the market the orders should be placed on, e.g. "{'version': 123456}"
:param str customer_strategy_ref: An optional reference customers can use to specify
which strategy has sent the order
:param bool async_: An optional flag (not setting equates to false) which specifies if
the orders should be placed asynchronously
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.PlaceOrders
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'placeOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.PlaceOrders, elapsed_time, lightweight) | python | def place_orders(self, market_id, instructions, customer_ref=None, market_version=None,
customer_strategy_ref=None, async_=None, session=None, lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'placeOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.PlaceOrders, elapsed_time, lightweight) | [
"def",
"place_orders",
"(",
"self",
",",
"market_id",
",",
"instructions",
",",
"customer_ref",
"=",
"None",
",",
"market_version",
"=",
"None",
",",
"customer_strategy_ref",
"=",
"None",
",",
"async_",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'placeOrders'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"PlaceOrders",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Place new orders into market.
:param str market_id: The market id these orders are to be placed on
:param list instructions: The number of place instructions
:param str customer_ref: Optional parameter allowing the client to pass a unique string
(up to 32 chars) that is used to de-dupe mistaken re-submissions
:param dict market_version: Optional parameter allowing the client to specify which
version of the market the orders should be placed on, e.g. "{'version': 123456}"
:param str customer_strategy_ref: An optional reference customers can use to specify
which strategy has sent the order
:param bool async_: An optional flag (not setting equates to false) which specifies if
the orders should be placed asynchronously
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.PlaceOrders | [
"Place",
"new",
"orders",
"into",
"market",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/betting.py#L311-L334 |
251,429 | liampauling/betfair | betfairlightweight/streaming/cache.py | MarketBookCache.serialise | def serialise(self):
"""Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
"""
return {
'marketId': self.market_id,
'totalAvailable': None,
'isMarketDataDelayed': None,
'lastMatchTime': None,
'betDelay': self.market_definition.get('betDelay'),
'version': self.market_definition.get('version'),
'complete': self.market_definition.get('complete'),
'runnersVoidable': self.market_definition.get('runnersVoidable'),
'totalMatched': self.total_matched,
'status': self.market_definition.get('status'),
'bspReconciled': self.market_definition.get('bspReconciled'),
'crossMatching': self.market_definition.get('crossMatching'),
'inplay': self.market_definition.get('inPlay'),
'numberOfWinners': self.market_definition.get('numberOfWinners'),
'numberOfRunners': len(self.market_definition.get('runners')),
'numberOfActiveRunners': self.market_definition.get('numberOfActiveRunners'),
'runners': [
runner.serialise(
self.market_definition_runner_dict[(runner.selection_id, runner.handicap)]
) for runner in self.runners
],
'publishTime': self.publish_time,
'priceLadderDefinition': self.market_definition.get('priceLadderDefinition'),
'keyLineDescription': self.market_definition.get('keyLineDefinition'),
'marketDefinition': self.market_definition, # used in lightweight
} | python | def serialise(self):
return {
'marketId': self.market_id,
'totalAvailable': None,
'isMarketDataDelayed': None,
'lastMatchTime': None,
'betDelay': self.market_definition.get('betDelay'),
'version': self.market_definition.get('version'),
'complete': self.market_definition.get('complete'),
'runnersVoidable': self.market_definition.get('runnersVoidable'),
'totalMatched': self.total_matched,
'status': self.market_definition.get('status'),
'bspReconciled': self.market_definition.get('bspReconciled'),
'crossMatching': self.market_definition.get('crossMatching'),
'inplay': self.market_definition.get('inPlay'),
'numberOfWinners': self.market_definition.get('numberOfWinners'),
'numberOfRunners': len(self.market_definition.get('runners')),
'numberOfActiveRunners': self.market_definition.get('numberOfActiveRunners'),
'runners': [
runner.serialise(
self.market_definition_runner_dict[(runner.selection_id, runner.handicap)]
) for runner in self.runners
],
'publishTime': self.publish_time,
'priceLadderDefinition': self.market_definition.get('priceLadderDefinition'),
'keyLineDescription': self.market_definition.get('keyLineDefinition'),
'marketDefinition': self.market_definition, # used in lightweight
} | [
"def",
"serialise",
"(",
"self",
")",
":",
"return",
"{",
"'marketId'",
":",
"self",
".",
"market_id",
",",
"'totalAvailable'",
":",
"None",
",",
"'isMarketDataDelayed'",
":",
"None",
",",
"'lastMatchTime'",
":",
"None",
",",
"'betDelay'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'betDelay'",
")",
",",
"'version'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'version'",
")",
",",
"'complete'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'complete'",
")",
",",
"'runnersVoidable'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'runnersVoidable'",
")",
",",
"'totalMatched'",
":",
"self",
".",
"total_matched",
",",
"'status'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'status'",
")",
",",
"'bspReconciled'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'bspReconciled'",
")",
",",
"'crossMatching'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'crossMatching'",
")",
",",
"'inplay'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'inPlay'",
")",
",",
"'numberOfWinners'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'numberOfWinners'",
")",
",",
"'numberOfRunners'",
":",
"len",
"(",
"self",
".",
"market_definition",
".",
"get",
"(",
"'runners'",
")",
")",
",",
"'numberOfActiveRunners'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'numberOfActiveRunners'",
")",
",",
"'runners'",
":",
"[",
"runner",
".",
"serialise",
"(",
"self",
".",
"market_definition_runner_dict",
"[",
"(",
"runner",
".",
"selection_id",
",",
"runner",
".",
"handicap",
")",
"]",
")",
"for",
"runner",
"in",
"self",
".",
"runners",
"]",
",",
"'publishTime'",
":",
"self",
".",
"publish_time",
",",
"'priceLadderDefinition'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'priceLadderDefinition'",
")",
",",
"'keyLineDescription'",
":",
"self",
".",
"market_definition",
".",
"get",
"(",
"'keyLineDefinition'",
")",
",",
"'marketDefinition'",
":",
"self",
".",
"market_definition",
",",
"# used in lightweight",
"}"
] | Creates standard market book json response,
will error if EX_MARKET_DEF not incl. | [
"Creates",
"standard",
"market",
"book",
"json",
"response",
"will",
"error",
"if",
"EX_MARKET_DEF",
"not",
"incl",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/cache.py#L223-L253 |
251,430 | liampauling/betfair | betfairlightweight/endpoints/scores.py | Scores.list_race_details | def list_race_details(self, meeting_ids=None, race_ids=None, session=None, lightweight=None):
"""
Search for races to get their details.
:param dict meeting_ids: Optionally restricts the results to the specified meeting IDs.
The unique Id for the meeting equivalent to the eventId for that specific race as
returned by listEvents
:param str race_ids: Optionally restricts the results to the specified race IDs. The
unique Id for the race in the format meetingid.raceTime (hhmm). raceTime is in GMT
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceDetail]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listRaceDetails')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.RaceDetails, elapsed_time, lightweight) | python | def list_race_details(self, meeting_ids=None, race_ids=None, session=None, lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listRaceDetails')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.RaceDetails, elapsed_time, lightweight) | [
"def",
"list_race_details",
"(",
"self",
",",
"meeting_ids",
"=",
"None",
",",
"race_ids",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listRaceDetails'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"RaceDetails",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Search for races to get their details.
:param dict meeting_ids: Optionally restricts the results to the specified meeting IDs.
The unique Id for the meeting equivalent to the eventId for that specific race as
returned by listEvents
:param str race_ids: Optionally restricts the results to the specified race IDs. The
unique Id for the race in the format meetingid.raceTime (hhmm). raceTime is in GMT
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceDetail] | [
"Search",
"for",
"races",
"to",
"get",
"their",
"details",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/scores.py#L13-L30 |
251,431 | liampauling/betfair | betfairlightweight/endpoints/scores.py | Scores.list_available_events | def list_available_events(self, event_ids=None, event_type_ids=None, event_status=None, session=None,
lightweight=None):
"""
Search for events that have live score data available.
:param list event_ids: Optionally restricts the results to the specified event IDs
:param list event_type_ids: Optionally restricts the results to the specified event type IDs
:param list event_status: Optionally restricts the results to the specified event status
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.AvailableEvent]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listAvailableEvents')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.AvailableEvent, elapsed_time, lightweight) | python | def list_available_events(self, event_ids=None, event_type_ids=None, event_status=None, session=None,
lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listAvailableEvents')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.AvailableEvent, elapsed_time, lightweight) | [
"def",
"list_available_events",
"(",
"self",
",",
"event_ids",
"=",
"None",
",",
"event_type_ids",
"=",
"None",
",",
"event_status",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listAvailableEvents'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"AvailableEvent",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Search for events that have live score data available.
:param list event_ids: Optionally restricts the results to the specified event IDs
:param list event_type_ids: Optionally restricts the results to the specified event type IDs
:param list event_status: Optionally restricts the results to the specified event status
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.AvailableEvent] | [
"Search",
"for",
"events",
"that",
"have",
"live",
"score",
"data",
"available",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/scores.py#L34-L50 |
251,432 | liampauling/betfair | betfairlightweight/endpoints/scores.py | Scores.list_scores | def list_scores(self, update_keys, session=None, lightweight=None):
"""
Returns a list of current scores for the given events.
:param list update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Score]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listScores')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.Score, elapsed_time, lightweight) | python | def list_scores(self, update_keys, session=None, lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listScores')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.Score, elapsed_time, lightweight) | [
"def",
"list_scores",
"(",
"self",
",",
"update_keys",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listScores'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"Score",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns a list of current scores for the given events.
:param list update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Score] | [
"Returns",
"a",
"list",
"of",
"current",
"scores",
"for",
"the",
"given",
"events",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/scores.py#L52-L66 |
251,433 | liampauling/betfair | betfairlightweight/endpoints/scores.py | Scores.list_incidents | def list_incidents(self, update_keys, session=None, lightweight=None):
"""
Returns a list of incidents for the given events.
:param dict update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Incidents]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listIncidents')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.Incidents, elapsed_time, lightweight) | python | def list_incidents(self, update_keys, session=None, lightweight=None):
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listIncidents')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.Incidents, elapsed_time, lightweight) | [
"def",
"list_incidents",
"(",
"self",
",",
"update_keys",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"URI",
",",
"'listIncidents'",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"Incidents",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns a list of incidents for the given events.
:param dict update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Incidents] | [
"Returns",
"a",
"list",
"of",
"incidents",
"for",
"the",
"given",
"events",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/scores.py#L68-L82 |
251,434 | liampauling/betfair | betfairlightweight/endpoints/inplayservice.py | InPlayService.get_event_timeline | def get_event_timeline(self, event_id, session=None, lightweight=None):
"""
Returns event timeline for event id provided.
:param int event_id: Event id to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.EventTimeline
"""
url = '%s%s' % (self.url, 'eventTimeline')
params = {
'eventId': event_id,
'alt': 'json',
'regionCode': 'UK',
'locale': 'en_GB'
}
(response, elapsed_time) = self.request(params=params, session=session, url=url)
return self.process_response(response, resources.EventTimeline, elapsed_time, lightweight) | python | def get_event_timeline(self, event_id, session=None, lightweight=None):
url = '%s%s' % (self.url, 'eventTimeline')
params = {
'eventId': event_id,
'alt': 'json',
'regionCode': 'UK',
'locale': 'en_GB'
}
(response, elapsed_time) = self.request(params=params, session=session, url=url)
return self.process_response(response, resources.EventTimeline, elapsed_time, lightweight) | [
"def",
"get_event_timeline",
"(",
"self",
",",
"event_id",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"url",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"url",
",",
"'eventTimeline'",
")",
"params",
"=",
"{",
"'eventId'",
":",
"event_id",
",",
"'alt'",
":",
"'json'",
",",
"'regionCode'",
":",
"'UK'",
",",
"'locale'",
":",
"'en_GB'",
"}",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"params",
"=",
"params",
",",
"session",
"=",
"session",
",",
"url",
"=",
"url",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"EventTimeline",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns event timeline for event id provided.
:param int event_id: Event id to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.EventTimeline | [
"Returns",
"event",
"timeline",
"for",
"event",
"id",
"provided",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/inplayservice.py#L18-L36 |
251,435 | liampauling/betfair | betfairlightweight/endpoints/inplayservice.py | InPlayService.get_event_timelines | def get_event_timelines(self, event_ids, session=None, lightweight=None):
"""
Returns a list of event timelines based on event id's
supplied.
:param list event_ids: List of event id's to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.EventTimeline]
"""
url = '%s%s' % (self.url, 'eventTimelines')
params = {
'eventIds': ','.join(str(x) for x in event_ids),
'alt': 'json',
'regionCode': 'UK',
'locale': 'en_GB'
}
(response, elapsed_time) = self.request(params=params, session=session, url=url)
return self.process_response(response, resources.EventTimeline, elapsed_time, lightweight) | python | def get_event_timelines(self, event_ids, session=None, lightweight=None):
url = '%s%s' % (self.url, 'eventTimelines')
params = {
'eventIds': ','.join(str(x) for x in event_ids),
'alt': 'json',
'regionCode': 'UK',
'locale': 'en_GB'
}
(response, elapsed_time) = self.request(params=params, session=session, url=url)
return self.process_response(response, resources.EventTimeline, elapsed_time, lightweight) | [
"def",
"get_event_timelines",
"(",
"self",
",",
"event_ids",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"url",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"url",
",",
"'eventTimelines'",
")",
"params",
"=",
"{",
"'eventIds'",
":",
"','",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"event_ids",
")",
",",
"'alt'",
":",
"'json'",
",",
"'regionCode'",
":",
"'UK'",
",",
"'locale'",
":",
"'en_GB'",
"}",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"params",
"=",
"params",
",",
"session",
"=",
"session",
",",
"url",
"=",
"url",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"EventTimeline",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns a list of event timelines based on event id's
supplied.
:param list event_ids: List of event id's to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.EventTimeline] | [
"Returns",
"a",
"list",
"of",
"event",
"timelines",
"based",
"on",
"event",
"id",
"s",
"supplied",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/inplayservice.py#L38-L57 |
251,436 | liampauling/betfair | betfairlightweight/endpoints/inplayservice.py | InPlayService.get_scores | def get_scores(self, event_ids, session=None, lightweight=None):
"""
Returns a list of scores based on event id's
supplied.
:param list event_ids: List of event id's to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Scores]
"""
url = '%s%s' % (self.url, 'scores')
params = {
'eventIds': ','.join(str(x) for x in event_ids),
'alt': 'json',
'regionCode': 'UK',
'locale': 'en_GB'
}
(response, elapsed_time) = self.request(params=params, session=session, url=url)
return self.process_response(response, resources.Scores, elapsed_time, lightweight) | python | def get_scores(self, event_ids, session=None, lightweight=None):
url = '%s%s' % (self.url, 'scores')
params = {
'eventIds': ','.join(str(x) for x in event_ids),
'alt': 'json',
'regionCode': 'UK',
'locale': 'en_GB'
}
(response, elapsed_time) = self.request(params=params, session=session, url=url)
return self.process_response(response, resources.Scores, elapsed_time, lightweight) | [
"def",
"get_scores",
"(",
"self",
",",
"event_ids",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"url",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"url",
",",
"'scores'",
")",
"params",
"=",
"{",
"'eventIds'",
":",
"','",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"event_ids",
")",
",",
"'alt'",
":",
"'json'",
",",
"'regionCode'",
":",
"'UK'",
",",
"'locale'",
":",
"'en_GB'",
"}",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"params",
"=",
"params",
",",
"session",
"=",
"session",
",",
"url",
"=",
"url",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"Scores",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns a list of scores based on event id's
supplied.
:param list event_ids: List of event id's to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Scores] | [
"Returns",
"a",
"list",
"of",
"scores",
"based",
"on",
"event",
"id",
"s",
"supplied",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/inplayservice.py#L59-L78 |
251,437 | liampauling/betfair | betfairlightweight/endpoints/streaming.py | Streaming.create_stream | def create_stream(self, unique_id=0, listener=None, timeout=11, buffer_size=1024, description='BetfairSocket',
host=None):
"""
Creates BetfairStream.
:param dict unique_id: Id used to start unique id's of the stream (+1 before every request)
:param resources.Listener listener: Listener class to use
:param float timeout: Socket timeout
:param int buffer_size: Socket buffer size
:param str description: Betfair stream description
:param str host: Host endpoint (prod (default) or integration)
:rtype: BetfairStream
"""
listener = listener if listener else BaseListener()
return BetfairStream(
unique_id,
listener,
app_key=self.client.app_key,
session_token=self.client.session_token,
timeout=timeout,
buffer_size=buffer_size,
description=description,
host=host,
) | python | def create_stream(self, unique_id=0, listener=None, timeout=11, buffer_size=1024, description='BetfairSocket',
host=None):
listener = listener if listener else BaseListener()
return BetfairStream(
unique_id,
listener,
app_key=self.client.app_key,
session_token=self.client.session_token,
timeout=timeout,
buffer_size=buffer_size,
description=description,
host=host,
) | [
"def",
"create_stream",
"(",
"self",
",",
"unique_id",
"=",
"0",
",",
"listener",
"=",
"None",
",",
"timeout",
"=",
"11",
",",
"buffer_size",
"=",
"1024",
",",
"description",
"=",
"'BetfairSocket'",
",",
"host",
"=",
"None",
")",
":",
"listener",
"=",
"listener",
"if",
"listener",
"else",
"BaseListener",
"(",
")",
"return",
"BetfairStream",
"(",
"unique_id",
",",
"listener",
",",
"app_key",
"=",
"self",
".",
"client",
".",
"app_key",
",",
"session_token",
"=",
"self",
".",
"client",
".",
"session_token",
",",
"timeout",
"=",
"timeout",
",",
"buffer_size",
"=",
"buffer_size",
",",
"description",
"=",
"description",
",",
"host",
"=",
"host",
",",
")"
] | Creates BetfairStream.
:param dict unique_id: Id used to start unique id's of the stream (+1 before every request)
:param resources.Listener listener: Listener class to use
:param float timeout: Socket timeout
:param int buffer_size: Socket buffer size
:param str description: Betfair stream description
:param str host: Host endpoint (prod (default) or integration)
:rtype: BetfairStream | [
"Creates",
"BetfairStream",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/streaming.py#L19-L43 |
251,438 | liampauling/betfair | betfairlightweight/endpoints/historic.py | Historic.get_my_data | def get_my_data(self, session=None):
"""
Returns a list of data descriptions for data which has been purchased by the signed in user.
:param requests.session session: Requests session object
:rtype: dict
"""
params = clean_locals(locals())
method = 'GetMyData'
(response, elapsed_time) = self.request(method, params, session)
return response | python | def get_my_data(self, session=None):
params = clean_locals(locals())
method = 'GetMyData'
(response, elapsed_time) = self.request(method, params, session)
return response | [
"def",
"get_my_data",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'GetMyData'",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"response"
] | Returns a list of data descriptions for data which has been purchased by the signed in user.
:param requests.session session: Requests session object
:rtype: dict | [
"Returns",
"a",
"list",
"of",
"data",
"descriptions",
"for",
"data",
"which",
"has",
"been",
"purchased",
"by",
"the",
"signed",
"in",
"user",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/historic.py#L22-L33 |
251,439 | liampauling/betfair | betfairlightweight/endpoints/historic.py | Historic.get_data_size | def get_data_size(self, sport, plan, from_day, from_month, from_year, to_day, to_month, to_year, event_id=None,
event_name=None, market_types_collection=None, countries_collection=None,
file_type_collection=None, session=None):
"""
Returns a dictionary of file count and combines size files.
:param sport: sport to filter data for.
:param plan: plan type to filter for, Basic Plan, Advanced Plan or Pro Plan.
:param from_day: day of month to start data from.
:param from_month: month to start data from.
:param from_year: year to start data from.
:param to_day: day of month to end data at.
:param to_month: month to end data at.
:param to_year: year to end data at.
:param event_id: id of a specific event to get data for.
:param event_name: name of a specific event to get data for.
:param market_types_collection: list of specific marketTypes to filter for.
:param countries_collection: list of countries to filter for.
:param file_type_collection: list of file types.
:param requests.session session: Requests session object
:rtype: dict
"""
params = clean_locals(locals())
method = 'GetAdvBasketDataSize'
(response, elapsed_time) = self.request(method, params, session)
return response | python | def get_data_size(self, sport, plan, from_day, from_month, from_year, to_day, to_month, to_year, event_id=None,
event_name=None, market_types_collection=None, countries_collection=None,
file_type_collection=None, session=None):
params = clean_locals(locals())
method = 'GetAdvBasketDataSize'
(response, elapsed_time) = self.request(method, params, session)
return response | [
"def",
"get_data_size",
"(",
"self",
",",
"sport",
",",
"plan",
",",
"from_day",
",",
"from_month",
",",
"from_year",
",",
"to_day",
",",
"to_month",
",",
"to_year",
",",
"event_id",
"=",
"None",
",",
"event_name",
"=",
"None",
",",
"market_types_collection",
"=",
"None",
",",
"countries_collection",
"=",
"None",
",",
"file_type_collection",
"=",
"None",
",",
"session",
"=",
"None",
")",
":",
"params",
"=",
"clean_locals",
"(",
"locals",
"(",
")",
")",
"method",
"=",
"'GetAdvBasketDataSize'",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"method",
",",
"params",
",",
"session",
")",
"return",
"response"
] | Returns a dictionary of file count and combines size files.
:param sport: sport to filter data for.
:param plan: plan type to filter for, Basic Plan, Advanced Plan or Pro Plan.
:param from_day: day of month to start data from.
:param from_month: month to start data from.
:param from_year: year to start data from.
:param to_day: day of month to end data at.
:param to_month: month to end data at.
:param to_year: year to end data at.
:param event_id: id of a specific event to get data for.
:param event_name: name of a specific event to get data for.
:param market_types_collection: list of specific marketTypes to filter for.
:param countries_collection: list of countries to filter for.
:param file_type_collection: list of file types.
:param requests.session session: Requests session object
:rtype: dict | [
"Returns",
"a",
"dictionary",
"of",
"file",
"count",
"and",
"combines",
"size",
"files",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/historic.py#L63-L89 |
251,440 | liampauling/betfair | betfairlightweight/endpoints/racecard.py | RaceCard.login | def login(self, session=None):
"""
Parses app key from betfair exchange site.
:param requests.session session: Requests session object
"""
session = session or self.client.session
try:
response = session.get(self.login_url)
except ConnectionError:
raise APIError(None, self.login_url, None, 'ConnectionError')
except Exception as e:
raise APIError(None, self.login_url, None, e)
app_key = re.findall(r'''"appKey":\s"(.*?)"''', response.text)
if app_key:
self.app_key = app_key[0]
else:
raise RaceCardError("Unable to find appKey") | python | def login(self, session=None):
session = session or self.client.session
try:
response = session.get(self.login_url)
except ConnectionError:
raise APIError(None, self.login_url, None, 'ConnectionError')
except Exception as e:
raise APIError(None, self.login_url, None, e)
app_key = re.findall(r'''"appKey":\s"(.*?)"''', response.text)
if app_key:
self.app_key = app_key[0]
else:
raise RaceCardError("Unable to find appKey") | [
"def",
"login",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"session",
"=",
"session",
"or",
"self",
".",
"client",
".",
"session",
"try",
":",
"response",
"=",
"session",
".",
"get",
"(",
"self",
".",
"login_url",
")",
"except",
"ConnectionError",
":",
"raise",
"APIError",
"(",
"None",
",",
"self",
".",
"login_url",
",",
"None",
",",
"'ConnectionError'",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"APIError",
"(",
"None",
",",
"self",
".",
"login_url",
",",
"None",
",",
"e",
")",
"app_key",
"=",
"re",
".",
"findall",
"(",
"r'''\"appKey\":\\s\"(.*?)\"'''",
",",
"response",
".",
"text",
")",
"if",
"app_key",
":",
"self",
".",
"app_key",
"=",
"app_key",
"[",
"0",
"]",
"else",
":",
"raise",
"RaceCardError",
"(",
"\"Unable to find appKey\"",
")"
] | Parses app key from betfair exchange site.
:param requests.session session: Requests session object | [
"Parses",
"app",
"key",
"from",
"betfair",
"exchange",
"site",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/racecard.py#L22-L39 |
251,441 | liampauling/betfair | betfairlightweight/endpoints/racecard.py | RaceCard.get_race_card | def get_race_card(self, market_ids, data_entries=None, session=None, lightweight=None):
"""
Returns a list of race cards based on market ids provided.
:param list market_ids: The filter to select desired markets
:param str data_entries: Data to be returned
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceCard]
"""
if not self.app_key:
raise RaceCardError("You need to login before requesting a race_card\n"
"APIClient.race_card.login()")
params = self.create_race_card_req(market_ids, data_entries)
(response, elapsed_time) = self.request(params=params, session=session)
return self.process_response(response, resources.RaceCard, elapsed_time, lightweight) | python | def get_race_card(self, market_ids, data_entries=None, session=None, lightweight=None):
if not self.app_key:
raise RaceCardError("You need to login before requesting a race_card\n"
"APIClient.race_card.login()")
params = self.create_race_card_req(market_ids, data_entries)
(response, elapsed_time) = self.request(params=params, session=session)
return self.process_response(response, resources.RaceCard, elapsed_time, lightweight) | [
"def",
"get_race_card",
"(",
"self",
",",
"market_ids",
",",
"data_entries",
"=",
"None",
",",
"session",
"=",
"None",
",",
"lightweight",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"app_key",
":",
"raise",
"RaceCardError",
"(",
"\"You need to login before requesting a race_card\\n\"",
"\"APIClient.race_card.login()\"",
")",
"params",
"=",
"self",
".",
"create_race_card_req",
"(",
"market_ids",
",",
"data_entries",
")",
"(",
"response",
",",
"elapsed_time",
")",
"=",
"self",
".",
"request",
"(",
"params",
"=",
"params",
",",
"session",
"=",
"session",
")",
"return",
"self",
".",
"process_response",
"(",
"response",
",",
"resources",
".",
"RaceCard",
",",
"elapsed_time",
",",
"lightweight",
")"
] | Returns a list of race cards based on market ids provided.
:param list market_ids: The filter to select desired markets
:param str data_entries: Data to be returned
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceCard] | [
"Returns",
"a",
"list",
"of",
"race",
"cards",
"based",
"on",
"market",
"ids",
"provided",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/endpoints/racecard.py#L41-L57 |
251,442 | liampauling/betfair | betfairlightweight/streaming/listener.py | StreamListener.on_data | def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data
:param raw_data: Received raw data
:return: Return False to stop stream and close connection
"""
try:
data = json.loads(raw_data)
except ValueError:
logger.error('value error: %s' % raw_data)
return
unique_id = data.get('id')
if self._error_handler(data, unique_id):
return False
operation = data['op']
if operation == 'connection':
self._on_connection(data, unique_id)
elif operation == 'status':
self._on_status(data, unique_id)
elif operation in ['mcm', 'ocm']:
# historic data does not contain unique_id
if self.stream_unique_id not in [unique_id, 'HISTORICAL']:
logger.warning('Unwanted data received from uniqueId: %s, expecting: %s' %
(unique_id, self.stream_unique_id))
return
self._on_change_message(data, unique_id) | python | def on_data(self, raw_data):
try:
data = json.loads(raw_data)
except ValueError:
logger.error('value error: %s' % raw_data)
return
unique_id = data.get('id')
if self._error_handler(data, unique_id):
return False
operation = data['op']
if operation == 'connection':
self._on_connection(data, unique_id)
elif operation == 'status':
self._on_status(data, unique_id)
elif operation in ['mcm', 'ocm']:
# historic data does not contain unique_id
if self.stream_unique_id not in [unique_id, 'HISTORICAL']:
logger.warning('Unwanted data received from uniqueId: %s, expecting: %s' %
(unique_id, self.stream_unique_id))
return
self._on_change_message(data, unique_id) | [
"def",
"on_data",
"(",
"self",
",",
"raw_data",
")",
":",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"raw_data",
")",
"except",
"ValueError",
":",
"logger",
".",
"error",
"(",
"'value error: %s'",
"%",
"raw_data",
")",
"return",
"unique_id",
"=",
"data",
".",
"get",
"(",
"'id'",
")",
"if",
"self",
".",
"_error_handler",
"(",
"data",
",",
"unique_id",
")",
":",
"return",
"False",
"operation",
"=",
"data",
"[",
"'op'",
"]",
"if",
"operation",
"==",
"'connection'",
":",
"self",
".",
"_on_connection",
"(",
"data",
",",
"unique_id",
")",
"elif",
"operation",
"==",
"'status'",
":",
"self",
".",
"_on_status",
"(",
"data",
",",
"unique_id",
")",
"elif",
"operation",
"in",
"[",
"'mcm'",
",",
"'ocm'",
"]",
":",
"# historic data does not contain unique_id",
"if",
"self",
".",
"stream_unique_id",
"not",
"in",
"[",
"unique_id",
",",
"'HISTORICAL'",
"]",
":",
"logger",
".",
"warning",
"(",
"'Unwanted data received from uniqueId: %s, expecting: %s'",
"%",
"(",
"unique_id",
",",
"self",
".",
"stream_unique_id",
")",
")",
"return",
"self",
".",
"_on_change_message",
"(",
"data",
",",
"unique_id",
")"
] | Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data
:param raw_data: Received raw data
:return: Return False to stop stream and close connection | [
"Called",
"when",
"raw",
"data",
"is",
"received",
"from",
"connection",
".",
"Override",
"this",
"method",
"if",
"you",
"wish",
"to",
"manually",
"handle",
"the",
"stream",
"data"
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/listener.py#L85-L115 |
251,443 | liampauling/betfair | betfairlightweight/streaming/listener.py | StreamListener._on_connection | def _on_connection(self, data, unique_id):
"""Called on collection operation
:param data: Received data
"""
if unique_id is None:
unique_id = self.stream_unique_id
self.connection_id = data.get('connectionId')
logger.info('[Connect: %s]: connection_id: %s' % (unique_id, self.connection_id)) | python | def _on_connection(self, data, unique_id):
if unique_id is None:
unique_id = self.stream_unique_id
self.connection_id = data.get('connectionId')
logger.info('[Connect: %s]: connection_id: %s' % (unique_id, self.connection_id)) | [
"def",
"_on_connection",
"(",
"self",
",",
"data",
",",
"unique_id",
")",
":",
"if",
"unique_id",
"is",
"None",
":",
"unique_id",
"=",
"self",
".",
"stream_unique_id",
"self",
".",
"connection_id",
"=",
"data",
".",
"get",
"(",
"'connectionId'",
")",
"logger",
".",
"info",
"(",
"'[Connect: %s]: connection_id: %s'",
"%",
"(",
"unique_id",
",",
"self",
".",
"connection_id",
")",
")"
] | Called on collection operation
:param data: Received data | [
"Called",
"on",
"collection",
"operation"
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/listener.py#L117-L125 |
251,444 | liampauling/betfair | betfairlightweight/streaming/listener.py | StreamListener._on_status | def _on_status(data, unique_id):
"""Called on status operation
:param data: Received data
"""
status_code = data.get('statusCode')
logger.info('[Subscription: %s]: %s' % (unique_id, status_code)) | python | def _on_status(data, unique_id):
status_code = data.get('statusCode')
logger.info('[Subscription: %s]: %s' % (unique_id, status_code)) | [
"def",
"_on_status",
"(",
"data",
",",
"unique_id",
")",
":",
"status_code",
"=",
"data",
".",
"get",
"(",
"'statusCode'",
")",
"logger",
".",
"info",
"(",
"'[Subscription: %s]: %s'",
"%",
"(",
"unique_id",
",",
"status_code",
")",
")"
] | Called on status operation
:param data: Received data | [
"Called",
"on",
"status",
"operation"
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/listener.py#L128-L134 |
251,445 | liampauling/betfair | betfairlightweight/streaming/listener.py | StreamListener._error_handler | def _error_handler(data, unique_id):
"""Called when data first received
:param data: Received data
:param unique_id: Unique id
:return: True if error present
"""
if data.get('statusCode') == 'FAILURE':
logger.error('[Subscription: %s] %s: %s' % (unique_id, data.get('errorCode'), data.get('errorMessage')))
if data.get('connectionClosed'):
return True
if data.get('status'):
# Clients shouldn't disconnect if status 503 is returned; when the stream
# recovers updates will be sent containing the latest data
logger.warning('[Subscription: %s] status: %s' % (unique_id, data['status'])) | python | def _error_handler(data, unique_id):
if data.get('statusCode') == 'FAILURE':
logger.error('[Subscription: %s] %s: %s' % (unique_id, data.get('errorCode'), data.get('errorMessage')))
if data.get('connectionClosed'):
return True
if data.get('status'):
# Clients shouldn't disconnect if status 503 is returned; when the stream
# recovers updates will be sent containing the latest data
logger.warning('[Subscription: %s] status: %s' % (unique_id, data['status'])) | [
"def",
"_error_handler",
"(",
"data",
",",
"unique_id",
")",
":",
"if",
"data",
".",
"get",
"(",
"'statusCode'",
")",
"==",
"'FAILURE'",
":",
"logger",
".",
"error",
"(",
"'[Subscription: %s] %s: %s'",
"%",
"(",
"unique_id",
",",
"data",
".",
"get",
"(",
"'errorCode'",
")",
",",
"data",
".",
"get",
"(",
"'errorMessage'",
")",
")",
")",
"if",
"data",
".",
"get",
"(",
"'connectionClosed'",
")",
":",
"return",
"True",
"if",
"data",
".",
"get",
"(",
"'status'",
")",
":",
"# Clients shouldn't disconnect if status 503 is returned; when the stream",
"# recovers updates will be sent containing the latest data",
"logger",
".",
"warning",
"(",
"'[Subscription: %s] status: %s'",
"%",
"(",
"unique_id",
",",
"data",
"[",
"'status'",
"]",
")",
")"
] | Called when data first received
:param data: Received data
:param unique_id: Unique id
:return: True if error present | [
"Called",
"when",
"data",
"first",
"received"
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/listener.py#L157-L171 |
251,446 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream.stop | def stop(self):
"""Stops read loop and closes socket if it has been created.
"""
self._running = False
if self._socket is None:
return
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except socket.error:
pass
self._socket = None | python | def stop(self):
self._running = False
if self._socket is None:
return
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except socket.error:
pass
self._socket = None | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"_running",
"=",
"False",
"if",
"self",
".",
"_socket",
"is",
"None",
":",
"return",
"try",
":",
"self",
".",
"_socket",
".",
"shutdown",
"(",
"socket",
".",
"SHUT_RDWR",
")",
"self",
".",
"_socket",
".",
"close",
"(",
")",
"except",
"socket",
".",
"error",
":",
"pass",
"self",
".",
"_socket",
"=",
"None"
] | Stops read loop and closes socket if it has been created. | [
"Stops",
"read",
"loop",
"and",
"closes",
"socket",
"if",
"it",
"has",
"been",
"created",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L62-L74 |
251,447 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream.authenticate | def authenticate(self):
"""Authentication request.
"""
unique_id = self.new_unique_id()
message = {
'op': 'authentication',
'id': unique_id,
'appKey': self.app_key,
'session': self.session_token,
}
self._send(message)
return unique_id | python | def authenticate(self):
unique_id = self.new_unique_id()
message = {
'op': 'authentication',
'id': unique_id,
'appKey': self.app_key,
'session': self.session_token,
}
self._send(message)
return unique_id | [
"def",
"authenticate",
"(",
"self",
")",
":",
"unique_id",
"=",
"self",
".",
"new_unique_id",
"(",
")",
"message",
"=",
"{",
"'op'",
":",
"'authentication'",
",",
"'id'",
":",
"unique_id",
",",
"'appKey'",
":",
"self",
".",
"app_key",
",",
"'session'",
":",
"self",
".",
"session_token",
",",
"}",
"self",
".",
"_send",
"(",
"message",
")",
"return",
"unique_id"
] | Authentication request. | [
"Authentication",
"request",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L76-L87 |
251,448 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream.heartbeat | def heartbeat(self):
"""Heartbeat request to keep session alive.
"""
unique_id = self.new_unique_id()
message = {
'op': 'heartbeat',
'id': unique_id,
}
self._send(message)
return unique_id | python | def heartbeat(self):
unique_id = self.new_unique_id()
message = {
'op': 'heartbeat',
'id': unique_id,
}
self._send(message)
return unique_id | [
"def",
"heartbeat",
"(",
"self",
")",
":",
"unique_id",
"=",
"self",
".",
"new_unique_id",
"(",
")",
"message",
"=",
"{",
"'op'",
":",
"'heartbeat'",
",",
"'id'",
":",
"unique_id",
",",
"}",
"self",
".",
"_send",
"(",
"message",
")",
"return",
"unique_id"
] | Heartbeat request to keep session alive. | [
"Heartbeat",
"request",
"to",
"keep",
"session",
"alive",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L89-L98 |
251,449 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream.subscribe_to_markets | def subscribe_to_markets(self, market_filter, market_data_filter, initial_clk=None, clk=None,
conflate_ms=None, heartbeat_ms=None, segmentation_enabled=True):
"""
Market subscription request.
:param dict market_filter: Market filter
:param dict market_data_filter: Market data filter
:param str initial_clk: Sequence token for reconnect
:param str clk: Sequence token for reconnect
:param int conflate_ms: conflation rate (bounds are 0 to 120000)
:param int heartbeat_ms: heartbeat rate (500 to 5000)
:param bool segmentation_enabled: allow the server to send large sets of data
in segments, instead of a single block
"""
unique_id = self.new_unique_id()
message = {
'op': 'marketSubscription',
'id': unique_id,
'marketFilter': market_filter,
'marketDataFilter': market_data_filter,
'initialClk': initial_clk,
'clk': clk,
'conflateMs': conflate_ms,
'heartbeatMs': heartbeat_ms,
'segmentationEnabled': segmentation_enabled,
}
if initial_clk and clk:
# if resubscribe only update unique_id
self.listener.stream_unique_id = unique_id
else:
self.listener.register_stream(unique_id, 'marketSubscription')
self._send(message)
return unique_id | python | def subscribe_to_markets(self, market_filter, market_data_filter, initial_clk=None, clk=None,
conflate_ms=None, heartbeat_ms=None, segmentation_enabled=True):
unique_id = self.new_unique_id()
message = {
'op': 'marketSubscription',
'id': unique_id,
'marketFilter': market_filter,
'marketDataFilter': market_data_filter,
'initialClk': initial_clk,
'clk': clk,
'conflateMs': conflate_ms,
'heartbeatMs': heartbeat_ms,
'segmentationEnabled': segmentation_enabled,
}
if initial_clk and clk:
# if resubscribe only update unique_id
self.listener.stream_unique_id = unique_id
else:
self.listener.register_stream(unique_id, 'marketSubscription')
self._send(message)
return unique_id | [
"def",
"subscribe_to_markets",
"(",
"self",
",",
"market_filter",
",",
"market_data_filter",
",",
"initial_clk",
"=",
"None",
",",
"clk",
"=",
"None",
",",
"conflate_ms",
"=",
"None",
",",
"heartbeat_ms",
"=",
"None",
",",
"segmentation_enabled",
"=",
"True",
")",
":",
"unique_id",
"=",
"self",
".",
"new_unique_id",
"(",
")",
"message",
"=",
"{",
"'op'",
":",
"'marketSubscription'",
",",
"'id'",
":",
"unique_id",
",",
"'marketFilter'",
":",
"market_filter",
",",
"'marketDataFilter'",
":",
"market_data_filter",
",",
"'initialClk'",
":",
"initial_clk",
",",
"'clk'",
":",
"clk",
",",
"'conflateMs'",
":",
"conflate_ms",
",",
"'heartbeatMs'",
":",
"heartbeat_ms",
",",
"'segmentationEnabled'",
":",
"segmentation_enabled",
",",
"}",
"if",
"initial_clk",
"and",
"clk",
":",
"# if resubscribe only update unique_id",
"self",
".",
"listener",
".",
"stream_unique_id",
"=",
"unique_id",
"else",
":",
"self",
".",
"listener",
".",
"register_stream",
"(",
"unique_id",
",",
"'marketSubscription'",
")",
"self",
".",
"_send",
"(",
"message",
")",
"return",
"unique_id"
] | Market subscription request.
:param dict market_filter: Market filter
:param dict market_data_filter: Market data filter
:param str initial_clk: Sequence token for reconnect
:param str clk: Sequence token for reconnect
:param int conflate_ms: conflation rate (bounds are 0 to 120000)
:param int heartbeat_ms: heartbeat rate (500 to 5000)
:param bool segmentation_enabled: allow the server to send large sets of data
in segments, instead of a single block | [
"Market",
"subscription",
"request",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L100-L132 |
251,450 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream.subscribe_to_orders | def subscribe_to_orders(self, order_filter=None, initial_clk=None, clk=None, conflate_ms=None,
heartbeat_ms=None, segmentation_enabled=True):
"""
Order subscription request.
:param dict order_filter: Order filter to be applied
:param str initial_clk: Sequence token for reconnect
:param str clk: Sequence token for reconnect
:param int conflate_ms: conflation rate (bounds are 0 to 120000)
:param int heartbeat_ms: heartbeat rate (500 to 5000)
:param bool segmentation_enabled: allow the server to send large sets of data
in segments, instead of a single block
"""
unique_id = self.new_unique_id()
message = {
'op': 'orderSubscription',
'id': unique_id,
'orderFilter': order_filter,
'initialClk': initial_clk,
'clk': clk,
'conflateMs': conflate_ms,
'heartbeatMs': heartbeat_ms,
'segmentationEnabled': segmentation_enabled,
}
if initial_clk and clk:
# if resubscribe only update unique_id
self.listener.stream_unique_id = unique_id
else:
self.listener.register_stream(unique_id, 'orderSubscription')
self._send(message)
return unique_id | python | def subscribe_to_orders(self, order_filter=None, initial_clk=None, clk=None, conflate_ms=None,
heartbeat_ms=None, segmentation_enabled=True):
unique_id = self.new_unique_id()
message = {
'op': 'orderSubscription',
'id': unique_id,
'orderFilter': order_filter,
'initialClk': initial_clk,
'clk': clk,
'conflateMs': conflate_ms,
'heartbeatMs': heartbeat_ms,
'segmentationEnabled': segmentation_enabled,
}
if initial_clk and clk:
# if resubscribe only update unique_id
self.listener.stream_unique_id = unique_id
else:
self.listener.register_stream(unique_id, 'orderSubscription')
self._send(message)
return unique_id | [
"def",
"subscribe_to_orders",
"(",
"self",
",",
"order_filter",
"=",
"None",
",",
"initial_clk",
"=",
"None",
",",
"clk",
"=",
"None",
",",
"conflate_ms",
"=",
"None",
",",
"heartbeat_ms",
"=",
"None",
",",
"segmentation_enabled",
"=",
"True",
")",
":",
"unique_id",
"=",
"self",
".",
"new_unique_id",
"(",
")",
"message",
"=",
"{",
"'op'",
":",
"'orderSubscription'",
",",
"'id'",
":",
"unique_id",
",",
"'orderFilter'",
":",
"order_filter",
",",
"'initialClk'",
":",
"initial_clk",
",",
"'clk'",
":",
"clk",
",",
"'conflateMs'",
":",
"conflate_ms",
",",
"'heartbeatMs'",
":",
"heartbeat_ms",
",",
"'segmentationEnabled'",
":",
"segmentation_enabled",
",",
"}",
"if",
"initial_clk",
"and",
"clk",
":",
"# if resubscribe only update unique_id",
"self",
".",
"listener",
".",
"stream_unique_id",
"=",
"unique_id",
"else",
":",
"self",
".",
"listener",
".",
"register_stream",
"(",
"unique_id",
",",
"'orderSubscription'",
")",
"self",
".",
"_send",
"(",
"message",
")",
"return",
"unique_id"
] | Order subscription request.
:param dict order_filter: Order filter to be applied
:param str initial_clk: Sequence token for reconnect
:param str clk: Sequence token for reconnect
:param int conflate_ms: conflation rate (bounds are 0 to 120000)
:param int heartbeat_ms: heartbeat rate (500 to 5000)
:param bool segmentation_enabled: allow the server to send large sets of data
in segments, instead of a single block | [
"Order",
"subscription",
"request",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L134-L164 |
251,451 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream._create_socket | def _create_socket(self):
"""Creates ssl socket, connects to stream api and
sets timeout.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = ssl.wrap_socket(s)
s.connect((self.host, self.__port))
s.settimeout(self.timeout)
return s | python | def _create_socket(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = ssl.wrap_socket(s)
s.connect((self.host, self.__port))
s.settimeout(self.timeout)
return s | [
"def",
"_create_socket",
"(",
"self",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"s",
"=",
"ssl",
".",
"wrap_socket",
"(",
"s",
")",
"s",
".",
"connect",
"(",
"(",
"self",
".",
"host",
",",
"self",
".",
"__port",
")",
")",
"s",
".",
"settimeout",
"(",
"self",
".",
"timeout",
")",
"return",
"s"
] | Creates ssl socket, connects to stream api and
sets timeout. | [
"Creates",
"ssl",
"socket",
"connects",
"to",
"stream",
"api",
"and",
"sets",
"timeout",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L176-L184 |
251,452 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream._read_loop | def _read_loop(self):
"""Read loop, splits by CRLF and pushes received data
to _data.
"""
while self._running:
received_data_raw = self._receive_all()
if self._running:
self.receive_count += 1
self.datetime_last_received = datetime.datetime.utcnow()
received_data_split = received_data_raw.split(self.__CRLF)
for received_data in received_data_split:
if received_data:
self._data(received_data) | python | def _read_loop(self):
while self._running:
received_data_raw = self._receive_all()
if self._running:
self.receive_count += 1
self.datetime_last_received = datetime.datetime.utcnow()
received_data_split = received_data_raw.split(self.__CRLF)
for received_data in received_data_split:
if received_data:
self._data(received_data) | [
"def",
"_read_loop",
"(",
"self",
")",
":",
"while",
"self",
".",
"_running",
":",
"received_data_raw",
"=",
"self",
".",
"_receive_all",
"(",
")",
"if",
"self",
".",
"_running",
":",
"self",
".",
"receive_count",
"+=",
"1",
"self",
".",
"datetime_last_received",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"received_data_split",
"=",
"received_data_raw",
".",
"split",
"(",
"self",
".",
"__CRLF",
")",
"for",
"received_data",
"in",
"received_data_split",
":",
"if",
"received_data",
":",
"self",
".",
"_data",
"(",
"received_data",
")"
] | Read loop, splits by CRLF and pushes received data
to _data. | [
"Read",
"loop",
"splits",
"by",
"CRLF",
"and",
"pushes",
"received",
"data",
"to",
"_data",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L186-L198 |
251,453 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream._receive_all | def _receive_all(self):
"""Whilst socket is running receives data from socket,
till CRLF is detected.
"""
(data, part) = ('', '')
if is_py3:
crlf_bytes = bytes(self.__CRLF, encoding=self.__encoding)
else:
crlf_bytes = self.__CRLF
while self._running and part[-2:] != crlf_bytes:
try:
part = self._socket.recv(self.buffer_size)
except (socket.timeout, socket.error) as e:
if self._running:
self.stop()
raise SocketError('[Connect: %s]: Socket %s' % (self._unique_id, e))
else:
return # 133, prevents error if stop is called mid recv
# an empty string indicates the server shutdown the socket
if len(part) == 0:
self.stop()
raise SocketError('Connection closed by server')
data += part.decode(self.__encoding)
return data | python | def _receive_all(self):
(data, part) = ('', '')
if is_py3:
crlf_bytes = bytes(self.__CRLF, encoding=self.__encoding)
else:
crlf_bytes = self.__CRLF
while self._running and part[-2:] != crlf_bytes:
try:
part = self._socket.recv(self.buffer_size)
except (socket.timeout, socket.error) as e:
if self._running:
self.stop()
raise SocketError('[Connect: %s]: Socket %s' % (self._unique_id, e))
else:
return # 133, prevents error if stop is called mid recv
# an empty string indicates the server shutdown the socket
if len(part) == 0:
self.stop()
raise SocketError('Connection closed by server')
data += part.decode(self.__encoding)
return data | [
"def",
"_receive_all",
"(",
"self",
")",
":",
"(",
"data",
",",
"part",
")",
"=",
"(",
"''",
",",
"''",
")",
"if",
"is_py3",
":",
"crlf_bytes",
"=",
"bytes",
"(",
"self",
".",
"__CRLF",
",",
"encoding",
"=",
"self",
".",
"__encoding",
")",
"else",
":",
"crlf_bytes",
"=",
"self",
".",
"__CRLF",
"while",
"self",
".",
"_running",
"and",
"part",
"[",
"-",
"2",
":",
"]",
"!=",
"crlf_bytes",
":",
"try",
":",
"part",
"=",
"self",
".",
"_socket",
".",
"recv",
"(",
"self",
".",
"buffer_size",
")",
"except",
"(",
"socket",
".",
"timeout",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"if",
"self",
".",
"_running",
":",
"self",
".",
"stop",
"(",
")",
"raise",
"SocketError",
"(",
"'[Connect: %s]: Socket %s'",
"%",
"(",
"self",
".",
"_unique_id",
",",
"e",
")",
")",
"else",
":",
"return",
"# 133, prevents error if stop is called mid recv",
"# an empty string indicates the server shutdown the socket",
"if",
"len",
"(",
"part",
")",
"==",
"0",
":",
"self",
".",
"stop",
"(",
")",
"raise",
"SocketError",
"(",
"'Connection closed by server'",
")",
"data",
"+=",
"part",
".",
"decode",
"(",
"self",
".",
"__encoding",
")",
"return",
"data"
] | Whilst socket is running receives data from socket,
till CRLF is detected. | [
"Whilst",
"socket",
"is",
"running",
"receives",
"data",
"from",
"socket",
"till",
"CRLF",
"is",
"detected",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L200-L226 |
251,454 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream._data | def _data(self, received_data):
"""Sends data to listener, if False is returned; socket
is closed.
:param received_data: Decoded data received from socket.
"""
if self.listener.on_data(received_data) is False:
self.stop()
raise ListenerError(self.listener.connection_id, received_data) | python | def _data(self, received_data):
if self.listener.on_data(received_data) is False:
self.stop()
raise ListenerError(self.listener.connection_id, received_data) | [
"def",
"_data",
"(",
"self",
",",
"received_data",
")",
":",
"if",
"self",
".",
"listener",
".",
"on_data",
"(",
"received_data",
")",
"is",
"False",
":",
"self",
".",
"stop",
"(",
")",
"raise",
"ListenerError",
"(",
"self",
".",
"listener",
".",
"connection_id",
",",
"received_data",
")"
] | Sends data to listener, if False is returned; socket
is closed.
:param received_data: Decoded data received from socket. | [
"Sends",
"data",
"to",
"listener",
"if",
"False",
"is",
"returned",
";",
"socket",
"is",
"closed",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L228-L236 |
251,455 | liampauling/betfair | betfairlightweight/streaming/betfairstream.py | BetfairStream._send | def _send(self, message):
"""If not running connects socket and
authenticates. Adds CRLF and sends message
to Betfair.
:param message: Data to be sent to Betfair.
"""
if not self._running:
self._connect()
self.authenticate()
message_dumped = json.dumps(message) + self.__CRLF
try:
self._socket.send(message_dumped.encode())
except (socket.timeout, socket.error) as e:
self.stop()
raise SocketError('[Connect: %s]: Socket %s' % (self._unique_id, e)) | python | def _send(self, message):
if not self._running:
self._connect()
self.authenticate()
message_dumped = json.dumps(message) + self.__CRLF
try:
self._socket.send(message_dumped.encode())
except (socket.timeout, socket.error) as e:
self.stop()
raise SocketError('[Connect: %s]: Socket %s' % (self._unique_id, e)) | [
"def",
"_send",
"(",
"self",
",",
"message",
")",
":",
"if",
"not",
"self",
".",
"_running",
":",
"self",
".",
"_connect",
"(",
")",
"self",
".",
"authenticate",
"(",
")",
"message_dumped",
"=",
"json",
".",
"dumps",
"(",
"message",
")",
"+",
"self",
".",
"__CRLF",
"try",
":",
"self",
".",
"_socket",
".",
"send",
"(",
"message_dumped",
".",
"encode",
"(",
")",
")",
"except",
"(",
"socket",
".",
"timeout",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"self",
".",
"stop",
"(",
")",
"raise",
"SocketError",
"(",
"'[Connect: %s]: Socket %s'",
"%",
"(",
"self",
".",
"_unique_id",
",",
"e",
")",
")"
] | If not running connects socket and
authenticates. Adds CRLF and sends message
to Betfair.
:param message: Data to be sent to Betfair. | [
"If",
"not",
"running",
"connects",
"socket",
"and",
"authenticates",
".",
"Adds",
"CRLF",
"and",
"sends",
"message",
"to",
"Betfair",
"."
] | 8479392eb4849c525d78d43497c32c0bb108e977 | https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/streaming/betfairstream.py#L238-L253 |
251,456 | dmbee/seglearn | seglearn/pipe.py | Pype.fit_transform | def fit_transform(self, X, y=None, **fit_params):
"""
Fit the model and transform with the final estimator
Fits all the transforms one after the other and transforms the
data, then uses fit_transform on transformed data with the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like, shape = [n_samples, n_transformed_features]
Transformed samples
yt : array-like, shape = [n_samples]
Transformed target
"""
Xt, yt, fit_params = self._fit(X, y, **fit_params)
if isinstance(self._final_estimator, XyTransformerMixin):
Xt, yt, _ = self._final_estimator.fit_transform(Xt, yt)
else:
if hasattr(self._final_estimator, 'fit_transform'):
Xt = self._final_estimator.fit_transform(Xt, yt)
else:
self._final_estimator.fit(Xt, yt)
Xt = self._final_estimator.transform(Xt)
self.N_fit = len(yt)
return Xt, yt | python | def fit_transform(self, X, y=None, **fit_params):
Xt, yt, fit_params = self._fit(X, y, **fit_params)
if isinstance(self._final_estimator, XyTransformerMixin):
Xt, yt, _ = self._final_estimator.fit_transform(Xt, yt)
else:
if hasattr(self._final_estimator, 'fit_transform'):
Xt = self._final_estimator.fit_transform(Xt, yt)
else:
self._final_estimator.fit(Xt, yt)
Xt = self._final_estimator.transform(Xt)
self.N_fit = len(yt)
return Xt, yt | [
"def",
"fit_transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"*",
"*",
"fit_params",
")",
":",
"Xt",
",",
"yt",
",",
"fit_params",
"=",
"self",
".",
"_fit",
"(",
"X",
",",
"y",
",",
"*",
"*",
"fit_params",
")",
"if",
"isinstance",
"(",
"self",
".",
"_final_estimator",
",",
"XyTransformerMixin",
")",
":",
"Xt",
",",
"yt",
",",
"_",
"=",
"self",
".",
"_final_estimator",
".",
"fit_transform",
"(",
"Xt",
",",
"yt",
")",
"else",
":",
"if",
"hasattr",
"(",
"self",
".",
"_final_estimator",
",",
"'fit_transform'",
")",
":",
"Xt",
"=",
"self",
".",
"_final_estimator",
".",
"fit_transform",
"(",
"Xt",
",",
"yt",
")",
"else",
":",
"self",
".",
"_final_estimator",
".",
"fit",
"(",
"Xt",
",",
"yt",
")",
"Xt",
"=",
"self",
".",
"_final_estimator",
".",
"transform",
"(",
"Xt",
")",
"self",
".",
"N_fit",
"=",
"len",
"(",
"yt",
")",
"return",
"Xt",
",",
"yt"
] | Fit the model and transform with the final estimator
Fits all the transforms one after the other and transforms the
data, then uses fit_transform on transformed data with the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like, shape = [n_samples, n_transformed_features]
Transformed samples
yt : array-like, shape = [n_samples]
Transformed target | [
"Fit",
"the",
"model",
"and",
"transform",
"with",
"the",
"final",
"estimator",
"Fits",
"all",
"the",
"transforms",
"one",
"after",
"the",
"other",
"and",
"transforms",
"the",
"data",
"then",
"uses",
"fit_transform",
"on",
"transformed",
"data",
"with",
"the",
"final",
"estimator",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L173-L214 |
251,457 | dmbee/seglearn | seglearn/pipe.py | Pype.predict | def predict(self, X):
"""
Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
yp : array-like
Predicted transformed target
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.predict(Xt) | python | def predict(self, X):
Xt, _, _ = self._transform(X)
return self._final_estimator.predict(Xt) | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"Xt",
",",
"_",
",",
"_",
"=",
"self",
".",
"_transform",
"(",
"X",
")",
"return",
"self",
".",
"_final_estimator",
".",
"predict",
"(",
"Xt",
")"
] | Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
yp : array-like
Predicted transformed target | [
"Apply",
"transforms",
"to",
"the",
"data",
"and",
"predict",
"with",
"the",
"final",
"estimator"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L216-L232 |
251,458 | dmbee/seglearn | seglearn/pipe.py | Pype.transform_predict | def transform_predict(self, X, y):
"""
Apply transforms to the data, and predict with the final estimator.
Unlike predict, this also returns the transformed target
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : array-like
target
Returns
-------
yt : array-like
Transformed target
yp : array-like
Predicted transformed target
"""
Xt, yt, _ = self._transform(X, y)
yp = self._final_estimator.predict(Xt)
return yt, yp | python | def transform_predict(self, X, y):
Xt, yt, _ = self._transform(X, y)
yp = self._final_estimator.predict(Xt)
return yt, yp | [
"def",
"transform_predict",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"Xt",
",",
"yt",
",",
"_",
"=",
"self",
".",
"_transform",
"(",
"X",
",",
"y",
")",
"yp",
"=",
"self",
".",
"_final_estimator",
".",
"predict",
"(",
"Xt",
")",
"return",
"yt",
",",
"yp"
] | Apply transforms to the data, and predict with the final estimator.
Unlike predict, this also returns the transformed target
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : array-like
target
Returns
-------
yt : array-like
Transformed target
yp : array-like
Predicted transformed target | [
"Apply",
"transforms",
"to",
"the",
"data",
"and",
"predict",
"with",
"the",
"final",
"estimator",
".",
"Unlike",
"predict",
"this",
"also",
"returns",
"the",
"transformed",
"target"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L234-L256 |
251,459 | dmbee/seglearn | seglearn/pipe.py | Pype.score | def score(self, X, y=None, sample_weight=None):
"""
Apply transforms, and score with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
Returns
-------
score : float
"""
Xt, yt, swt = self._transform(X, y, sample_weight)
self.N_test = len(yt)
score_params = {}
if swt is not None:
score_params['sample_weight'] = swt
if self.scorer is None:
return self._final_estimator.score(Xt, yt, **score_params)
return self.scorer(self._final_estimator, Xt, yt, **score_params) | python | def score(self, X, y=None, sample_weight=None):
Xt, yt, swt = self._transform(X, y, sample_weight)
self.N_test = len(yt)
score_params = {}
if swt is not None:
score_params['sample_weight'] = swt
if self.scorer is None:
return self._final_estimator.score(Xt, yt, **score_params)
return self.scorer(self._final_estimator, Xt, yt, **score_params) | [
"def",
"score",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"Xt",
",",
"yt",
",",
"swt",
"=",
"self",
".",
"_transform",
"(",
"X",
",",
"y",
",",
"sample_weight",
")",
"self",
".",
"N_test",
"=",
"len",
"(",
"yt",
")",
"score_params",
"=",
"{",
"}",
"if",
"swt",
"is",
"not",
"None",
":",
"score_params",
"[",
"'sample_weight'",
"]",
"=",
"swt",
"if",
"self",
".",
"scorer",
"is",
"None",
":",
"return",
"self",
".",
"_final_estimator",
".",
"score",
"(",
"Xt",
",",
"yt",
",",
"*",
"*",
"score_params",
")",
"return",
"self",
".",
"scorer",
"(",
"self",
".",
"_final_estimator",
",",
"Xt",
",",
"yt",
",",
"*",
"*",
"score_params",
")"
] | Apply transforms, and score with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
Returns
-------
score : float | [
"Apply",
"transforms",
"and",
"score",
"with",
"the",
"final",
"estimator"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L258-L290 |
251,460 | dmbee/seglearn | seglearn/pipe.py | Pype.predict_proba | def predict_proba(self, X):
"""
Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
Predicted probability of each class
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.predict_proba(Xt) | python | def predict_proba(self, X):
Xt, _, _ = self._transform(X)
return self._final_estimator.predict_proba(Xt) | [
"def",
"predict_proba",
"(",
"self",
",",
"X",
")",
":",
"Xt",
",",
"_",
",",
"_",
"=",
"self",
".",
"_transform",
"(",
"X",
")",
"return",
"self",
".",
"_final_estimator",
".",
"predict_proba",
"(",
"Xt",
")"
] | Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
Predicted probability of each class | [
"Apply",
"transforms",
"and",
"predict_proba",
"of",
"the",
"final",
"estimator"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L292-L308 |
251,461 | dmbee/seglearn | seglearn/pipe.py | Pype.decision_function | def decision_function(self, X):
"""
Apply transforms, and decision_function of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.decision_function(Xt) | python | def decision_function(self, X):
Xt, _, _ = self._transform(X)
return self._final_estimator.decision_function(Xt) | [
"def",
"decision_function",
"(",
"self",
",",
"X",
")",
":",
"Xt",
",",
"_",
",",
"_",
"=",
"self",
".",
"_transform",
"(",
"X",
")",
"return",
"self",
".",
"_final_estimator",
".",
"decision_function",
"(",
"Xt",
")"
] | Apply transforms, and decision_function of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes] | [
"Apply",
"transforms",
"and",
"decision_function",
"of",
"the",
"final",
"estimator"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L310-L325 |
251,462 | dmbee/seglearn | seglearn/pipe.py | Pype.predict_log_proba | def predict_log_proba(self, X):
"""
Apply transforms, and predict_log_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt, _, _ = self._transform(X)
return self._final_estimator.predict_log_proba(Xt) | python | def predict_log_proba(self, X):
Xt, _, _ = self._transform(X)
return self._final_estimator.predict_log_proba(Xt) | [
"def",
"predict_log_proba",
"(",
"self",
",",
"X",
")",
":",
"Xt",
",",
"_",
",",
"_",
"=",
"self",
".",
"_transform",
"(",
"X",
")",
"return",
"self",
".",
"_final_estimator",
".",
"predict_log_proba",
"(",
"Xt",
")"
] | Apply transforms, and predict_log_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes] | [
"Apply",
"transforms",
"and",
"predict_log_proba",
"of",
"the",
"final",
"estimator"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L327-L342 |
251,463 | dmbee/seglearn | seglearn/feature_functions.py | base_features | def base_features():
''' Returns dictionary of some basic features that can be calculated for segmented time
series data '''
features = {'mean': mean,
'median': median,
'abs_energy': abs_energy,
'std': std,
'var': var,
'min': minimum,
'max': maximum,
'skew': skew,
'kurt': kurt,
'mse': mse,
'mnx': mean_crossings}
return features | python | def base_features():
''' Returns dictionary of some basic features that can be calculated for segmented time
series data '''
features = {'mean': mean,
'median': median,
'abs_energy': abs_energy,
'std': std,
'var': var,
'min': minimum,
'max': maximum,
'skew': skew,
'kurt': kurt,
'mse': mse,
'mnx': mean_crossings}
return features | [
"def",
"base_features",
"(",
")",
":",
"features",
"=",
"{",
"'mean'",
":",
"mean",
",",
"'median'",
":",
"median",
",",
"'abs_energy'",
":",
"abs_energy",
",",
"'std'",
":",
"std",
",",
"'var'",
":",
"var",
",",
"'min'",
":",
"minimum",
",",
"'max'",
":",
"maximum",
",",
"'skew'",
":",
"skew",
",",
"'kurt'",
":",
"kurt",
",",
"'mse'",
":",
"mse",
",",
"'mnx'",
":",
"mean_crossings",
"}",
"return",
"features"
] | Returns dictionary of some basic features that can be calculated for segmented time
series data | [
"Returns",
"dictionary",
"of",
"some",
"basic",
"features",
"that",
"can",
"be",
"calculated",
"for",
"segmented",
"time",
"series",
"data"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L37-L51 |
251,464 | dmbee/seglearn | seglearn/feature_functions.py | all_features | def all_features():
''' Returns dictionary of all features in the module
.. note:: Some of the features (hist4, corr) are relatively expensive to compute
'''
features = {'mean': mean,
'median': median,
'gmean': gmean,
'hmean': hmean,
'vec_sum': vec_sum,
'abs_sum': abs_sum,
'abs_energy': abs_energy,
'std': std,
'var': var,
'variation': variation,
'min': minimum,
'max': maximum,
'skew': skew,
'kurt': kurt,
'mean_diff': mean_diff,
'mean_abs_diff': means_abs_diff,
'mse': mse,
'mnx': mean_crossings,
'hist4': hist(),
'corr': corr2,
'mean_abs_value': mean_abs,
'zero_crossings': zero_crossing(),
'slope_sign_changes': slope_sign_changes(),
'waveform_length': waveform_length,
'emg_var': emg_var,
'root_mean_square': root_mean_square,
'willison_amplitude': willison_amplitude()}
return features | python | def all_features():
''' Returns dictionary of all features in the module
.. note:: Some of the features (hist4, corr) are relatively expensive to compute
'''
features = {'mean': mean,
'median': median,
'gmean': gmean,
'hmean': hmean,
'vec_sum': vec_sum,
'abs_sum': abs_sum,
'abs_energy': abs_energy,
'std': std,
'var': var,
'variation': variation,
'min': minimum,
'max': maximum,
'skew': skew,
'kurt': kurt,
'mean_diff': mean_diff,
'mean_abs_diff': means_abs_diff,
'mse': mse,
'mnx': mean_crossings,
'hist4': hist(),
'corr': corr2,
'mean_abs_value': mean_abs,
'zero_crossings': zero_crossing(),
'slope_sign_changes': slope_sign_changes(),
'waveform_length': waveform_length,
'emg_var': emg_var,
'root_mean_square': root_mean_square,
'willison_amplitude': willison_amplitude()}
return features | [
"def",
"all_features",
"(",
")",
":",
"features",
"=",
"{",
"'mean'",
":",
"mean",
",",
"'median'",
":",
"median",
",",
"'gmean'",
":",
"gmean",
",",
"'hmean'",
":",
"hmean",
",",
"'vec_sum'",
":",
"vec_sum",
",",
"'abs_sum'",
":",
"abs_sum",
",",
"'abs_energy'",
":",
"abs_energy",
",",
"'std'",
":",
"std",
",",
"'var'",
":",
"var",
",",
"'variation'",
":",
"variation",
",",
"'min'",
":",
"minimum",
",",
"'max'",
":",
"maximum",
",",
"'skew'",
":",
"skew",
",",
"'kurt'",
":",
"kurt",
",",
"'mean_diff'",
":",
"mean_diff",
",",
"'mean_abs_diff'",
":",
"means_abs_diff",
",",
"'mse'",
":",
"mse",
",",
"'mnx'",
":",
"mean_crossings",
",",
"'hist4'",
":",
"hist",
"(",
")",
",",
"'corr'",
":",
"corr2",
",",
"'mean_abs_value'",
":",
"mean_abs",
",",
"'zero_crossings'",
":",
"zero_crossing",
"(",
")",
",",
"'slope_sign_changes'",
":",
"slope_sign_changes",
"(",
")",
",",
"'waveform_length'",
":",
"waveform_length",
",",
"'emg_var'",
":",
"emg_var",
",",
"'root_mean_square'",
":",
"root_mean_square",
",",
"'willison_amplitude'",
":",
"willison_amplitude",
"(",
")",
"}",
"return",
"features"
] | Returns dictionary of all features in the module
.. note:: Some of the features (hist4, corr) are relatively expensive to compute | [
"Returns",
"dictionary",
"of",
"all",
"features",
"in",
"the",
"module"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L54-L86 |
251,465 | dmbee/seglearn | seglearn/feature_functions.py | emg_features | def emg_features(threshold=0):
'''Return a dictionary of popular features used for EMG time series classification.'''
return {
'mean_abs_value': mean_abs,
'zero_crossings': zero_crossing(threshold),
'slope_sign_changes': slope_sign_changes(threshold),
'waveform_length': waveform_length,
'integrated_emg': abs_sum,
'emg_var': emg_var,
'simple square integral': abs_energy,
'root_mean_square': root_mean_square,
'willison_amplitude': willison_amplitude(threshold),
} | python | def emg_features(threshold=0):
'''Return a dictionary of popular features used for EMG time series classification.'''
return {
'mean_abs_value': mean_abs,
'zero_crossings': zero_crossing(threshold),
'slope_sign_changes': slope_sign_changes(threshold),
'waveform_length': waveform_length,
'integrated_emg': abs_sum,
'emg_var': emg_var,
'simple square integral': abs_energy,
'root_mean_square': root_mean_square,
'willison_amplitude': willison_amplitude(threshold),
} | [
"def",
"emg_features",
"(",
"threshold",
"=",
"0",
")",
":",
"return",
"{",
"'mean_abs_value'",
":",
"mean_abs",
",",
"'zero_crossings'",
":",
"zero_crossing",
"(",
"threshold",
")",
",",
"'slope_sign_changes'",
":",
"slope_sign_changes",
"(",
"threshold",
")",
",",
"'waveform_length'",
":",
"waveform_length",
",",
"'integrated_emg'",
":",
"abs_sum",
",",
"'emg_var'",
":",
"emg_var",
",",
"'simple square integral'",
":",
"abs_energy",
",",
"'root_mean_square'",
":",
"root_mean_square",
",",
"'willison_amplitude'",
":",
"willison_amplitude",
"(",
"threshold",
")",
",",
"}"
] | Return a dictionary of popular features used for EMG time series classification. | [
"Return",
"a",
"dictionary",
"of",
"popular",
"features",
"used",
"for",
"EMG",
"time",
"series",
"classification",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L99-L111 |
251,466 | dmbee/seglearn | seglearn/feature_functions.py | means_abs_diff | def means_abs_diff(X):
''' mean absolute temporal derivative '''
return np.mean(np.abs(np.diff(X, axis=1)), axis=1) | python | def means_abs_diff(X):
''' mean absolute temporal derivative '''
return np.mean(np.abs(np.diff(X, axis=1)), axis=1) | [
"def",
"means_abs_diff",
"(",
"X",
")",
":",
"return",
"np",
".",
"mean",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"X",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"1",
")"
] | mean absolute temporal derivative | [
"mean",
"absolute",
"temporal",
"derivative"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L189-L191 |
251,467 | dmbee/seglearn | seglearn/feature_functions.py | mse | def mse(X):
''' computes mean spectral energy for each variable in a segmented time series '''
return np.mean(np.square(np.abs(np.fft.fft(X, axis=1))), axis=1) | python | def mse(X):
''' computes mean spectral energy for each variable in a segmented time series '''
return np.mean(np.square(np.abs(np.fft.fft(X, axis=1))), axis=1) | [
"def",
"mse",
"(",
"X",
")",
":",
"return",
"np",
".",
"mean",
"(",
"np",
".",
"square",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"fft",
".",
"fft",
"(",
"X",
",",
"axis",
"=",
"1",
")",
")",
")",
",",
"axis",
"=",
"1",
")"
] | computes mean spectral energy for each variable in a segmented time series | [
"computes",
"mean",
"spectral",
"energy",
"for",
"each",
"variable",
"in",
"a",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L194-L196 |
251,468 | dmbee/seglearn | seglearn/feature_functions.py | mean_crossings | def mean_crossings(X):
''' Computes number of mean crossings for each variable in a segmented time series '''
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
mnx = np.zeros((N, D))
for i in range(D):
pos = X[:, :, i] > 0
npos = ~pos
c = (pos[:, :-1] & npos[:, 1:]) | (npos[:, :-1] & pos[:, 1:])
mnx[:, i] = np.count_nonzero(c, axis=1)
return mnx | python | def mean_crossings(X):
''' Computes number of mean crossings for each variable in a segmented time series '''
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
mnx = np.zeros((N, D))
for i in range(D):
pos = X[:, :, i] > 0
npos = ~pos
c = (pos[:, :-1] & npos[:, 1:]) | (npos[:, :-1] & pos[:, 1:])
mnx[:, i] = np.count_nonzero(c, axis=1)
return mnx | [
"def",
"mean_crossings",
"(",
"X",
")",
":",
"X",
"=",
"np",
".",
"atleast_3d",
"(",
"X",
")",
"N",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"D",
"=",
"X",
".",
"shape",
"[",
"2",
"]",
"mnx",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"D",
")",
")",
"for",
"i",
"in",
"range",
"(",
"D",
")",
":",
"pos",
"=",
"X",
"[",
":",
",",
":",
",",
"i",
"]",
">",
"0",
"npos",
"=",
"~",
"pos",
"c",
"=",
"(",
"pos",
"[",
":",
",",
":",
"-",
"1",
"]",
"&",
"npos",
"[",
":",
",",
"1",
":",
"]",
")",
"|",
"(",
"npos",
"[",
":",
",",
":",
"-",
"1",
"]",
"&",
"pos",
"[",
":",
",",
"1",
":",
"]",
")",
"mnx",
"[",
":",
",",
"i",
"]",
"=",
"np",
".",
"count_nonzero",
"(",
"c",
",",
"axis",
"=",
"1",
")",
"return",
"mnx"
] | Computes number of mean crossings for each variable in a segmented time series | [
"Computes",
"number",
"of",
"mean",
"crossings",
"for",
"each",
"variable",
"in",
"a",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L199-L210 |
251,469 | dmbee/seglearn | seglearn/feature_functions.py | corr2 | def corr2(X):
''' computes correlations between all variable pairs in a segmented time series
.. note:: this feature is expensive to compute with the current implementation, and cannot be
used with univariate time series
'''
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
if D == 1:
return np.zeros(N, dtype=np.float)
trii = np.triu_indices(D, k=1)
DD = len(trii[0])
r = np.zeros((N, DD))
for i in np.arange(N):
rmat = np.corrcoef(X[i]) # get the ith window from each signal, result will be DxD
r[i] = rmat[trii]
return r | python | def corr2(X):
''' computes correlations between all variable pairs in a segmented time series
.. note:: this feature is expensive to compute with the current implementation, and cannot be
used with univariate time series
'''
X = np.atleast_3d(X)
N = X.shape[0]
D = X.shape[2]
if D == 1:
return np.zeros(N, dtype=np.float)
trii = np.triu_indices(D, k=1)
DD = len(trii[0])
r = np.zeros((N, DD))
for i in np.arange(N):
rmat = np.corrcoef(X[i]) # get the ith window from each signal, result will be DxD
r[i] = rmat[trii]
return r | [
"def",
"corr2",
"(",
"X",
")",
":",
"X",
"=",
"np",
".",
"atleast_3d",
"(",
"X",
")",
"N",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"D",
"=",
"X",
".",
"shape",
"[",
"2",
"]",
"if",
"D",
"==",
"1",
":",
"return",
"np",
".",
"zeros",
"(",
"N",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"trii",
"=",
"np",
".",
"triu_indices",
"(",
"D",
",",
"k",
"=",
"1",
")",
"DD",
"=",
"len",
"(",
"trii",
"[",
"0",
"]",
")",
"r",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"DD",
")",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N",
")",
":",
"rmat",
"=",
"np",
".",
"corrcoef",
"(",
"X",
"[",
"i",
"]",
")",
"# get the ith window from each signal, result will be DxD",
"r",
"[",
"i",
"]",
"=",
"rmat",
"[",
"trii",
"]",
"return",
"r"
] | computes correlations between all variable pairs in a segmented time series
.. note:: this feature is expensive to compute with the current implementation, and cannot be
used with univariate time series | [
"computes",
"correlations",
"between",
"all",
"variable",
"pairs",
"in",
"a",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L241-L260 |
251,470 | dmbee/seglearn | seglearn/feature_functions.py | waveform_length | def waveform_length(X):
''' cumulative length of the waveform over a segment for each variable in the segmented time
series '''
return np.sum(np.abs(np.diff(X, axis=1)), axis=1) | python | def waveform_length(X):
''' cumulative length of the waveform over a segment for each variable in the segmented time
series '''
return np.sum(np.abs(np.diff(X, axis=1)), axis=1) | [
"def",
"waveform_length",
"(",
"X",
")",
":",
"return",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"X",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"1",
")"
] | cumulative length of the waveform over a segment for each variable in the segmented time
series | [
"cumulative",
"length",
"of",
"the",
"waveform",
"over",
"a",
"segment",
"for",
"each",
"variable",
"in",
"the",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L299-L302 |
251,471 | dmbee/seglearn | seglearn/feature_functions.py | root_mean_square | def root_mean_square(X):
''' root mean square for each variable in the segmented time series '''
segment_width = X.shape[1]
return np.sqrt(np.sum(X * X, axis=1) / segment_width) | python | def root_mean_square(X):
''' root mean square for each variable in the segmented time series '''
segment_width = X.shape[1]
return np.sqrt(np.sum(X * X, axis=1) / segment_width) | [
"def",
"root_mean_square",
"(",
"X",
")",
":",
"segment_width",
"=",
"X",
".",
"shape",
"[",
"1",
"]",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"X",
"*",
"X",
",",
"axis",
"=",
"1",
")",
"/",
"segment_width",
")"
] | root mean square for each variable in the segmented time series | [
"root",
"mean",
"square",
"for",
"each",
"variable",
"in",
"the",
"segmented",
"time",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/feature_functions.py#L305-L308 |
251,472 | dmbee/seglearn | seglearn/split.py | TemporalKFold.split | def split(self, X, y):
'''
Splits time series data and target arrays, and generates splitting indices
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ]
target vector
Returns
-------
X : array-like, shape [n_series * n_splits, ]
Split time series data and contextual data
y : array-like, shape [n_series * n_splits]
Split target data
cv : list, shape [2, n_splits]
Splitting indices
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
Ns = len(Xt)
Xt_new, y_new = self._ts_slice(Xt, y)
if Xc is not None:
Xc_new = np.concatenate([Xc] * self.n_splits)
X_new = TS_Data(Xt_new, Xc_new)
else:
X_new = np.array(Xt_new)
cv = self._make_indices(Ns)
return X_new, y_new, cv | python | def split(self, X, y):
'''
Splits time series data and target arrays, and generates splitting indices
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ]
target vector
Returns
-------
X : array-like, shape [n_series * n_splits, ]
Split time series data and contextual data
y : array-like, shape [n_series * n_splits]
Split target data
cv : list, shape [2, n_splits]
Splitting indices
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
Ns = len(Xt)
Xt_new, y_new = self._ts_slice(Xt, y)
if Xc is not None:
Xc_new = np.concatenate([Xc] * self.n_splits)
X_new = TS_Data(Xt_new, Xc_new)
else:
X_new = np.array(Xt_new)
cv = self._make_indices(Ns)
return X_new, y_new, cv | [
"def",
"split",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"check_ts_data",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"Ns",
"=",
"len",
"(",
"Xt",
")",
"Xt_new",
",",
"y_new",
"=",
"self",
".",
"_ts_slice",
"(",
"Xt",
",",
"y",
")",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xc_new",
"=",
"np",
".",
"concatenate",
"(",
"[",
"Xc",
"]",
"*",
"self",
".",
"n_splits",
")",
"X_new",
"=",
"TS_Data",
"(",
"Xt_new",
",",
"Xc_new",
")",
"else",
":",
"X_new",
"=",
"np",
".",
"array",
"(",
"Xt_new",
")",
"cv",
"=",
"self",
".",
"_make_indices",
"(",
"Ns",
")",
"return",
"X_new",
",",
"y_new",
",",
"cv"
] | Splits time series data and target arrays, and generates splitting indices
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ]
target vector
Returns
-------
X : array-like, shape [n_series * n_splits, ]
Split time series data and contextual data
y : array-like, shape [n_series * n_splits]
Split target data
cv : list, shape [2, n_splits]
Splitting indices | [
"Splits",
"time",
"series",
"data",
"and",
"target",
"arrays",
"and",
"generates",
"splitting",
"indices"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/split.py#L56-L90 |
251,473 | dmbee/seglearn | seglearn/split.py | TemporalKFold._ts_slice | def _ts_slice(self, Xt, y):
''' takes time series data, and splits each series into temporal folds '''
Ns = len(Xt)
Xt_new = []
for i in range(self.n_splits):
for j in range(Ns):
Njs = int(len(Xt[j]) / self.n_splits)
Xt_new.append(Xt[j][(Njs * i):(Njs * (i + 1))])
Xt_new = np.array(Xt_new)
if len(np.atleast_1d(y[0])) == len(Xt[0]):
# y is a time series
y_new = []
for i in range(self.n_splits):
for j in range(Ns):
Njs = int(len(y[j]) / self.n_splits)
y_new.append(y[j][(Njs * i):(Njs * (i + 1))])
y_new = np.array(y_new)
else:
# y is contextual to each series
y_new = np.concatenate([y for i in range(self.n_splits)])
return Xt_new, y_new | python | def _ts_slice(self, Xt, y):
''' takes time series data, and splits each series into temporal folds '''
Ns = len(Xt)
Xt_new = []
for i in range(self.n_splits):
for j in range(Ns):
Njs = int(len(Xt[j]) / self.n_splits)
Xt_new.append(Xt[j][(Njs * i):(Njs * (i + 1))])
Xt_new = np.array(Xt_new)
if len(np.atleast_1d(y[0])) == len(Xt[0]):
# y is a time series
y_new = []
for i in range(self.n_splits):
for j in range(Ns):
Njs = int(len(y[j]) / self.n_splits)
y_new.append(y[j][(Njs * i):(Njs * (i + 1))])
y_new = np.array(y_new)
else:
# y is contextual to each series
y_new = np.concatenate([y for i in range(self.n_splits)])
return Xt_new, y_new | [
"def",
"_ts_slice",
"(",
"self",
",",
"Xt",
",",
"y",
")",
":",
"Ns",
"=",
"len",
"(",
"Xt",
")",
"Xt_new",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
":",
"for",
"j",
"in",
"range",
"(",
"Ns",
")",
":",
"Njs",
"=",
"int",
"(",
"len",
"(",
"Xt",
"[",
"j",
"]",
")",
"/",
"self",
".",
"n_splits",
")",
"Xt_new",
".",
"append",
"(",
"Xt",
"[",
"j",
"]",
"[",
"(",
"Njs",
"*",
"i",
")",
":",
"(",
"Njs",
"*",
"(",
"i",
"+",
"1",
")",
")",
"]",
")",
"Xt_new",
"=",
"np",
".",
"array",
"(",
"Xt_new",
")",
"if",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"y",
"[",
"0",
"]",
")",
")",
"==",
"len",
"(",
"Xt",
"[",
"0",
"]",
")",
":",
"# y is a time series",
"y_new",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
":",
"for",
"j",
"in",
"range",
"(",
"Ns",
")",
":",
"Njs",
"=",
"int",
"(",
"len",
"(",
"y",
"[",
"j",
"]",
")",
"/",
"self",
".",
"n_splits",
")",
"y_new",
".",
"append",
"(",
"y",
"[",
"j",
"]",
"[",
"(",
"Njs",
"*",
"i",
")",
":",
"(",
"Njs",
"*",
"(",
"i",
"+",
"1",
")",
")",
"]",
")",
"y_new",
"=",
"np",
".",
"array",
"(",
"y_new",
")",
"else",
":",
"# y is contextual to each series",
"y_new",
"=",
"np",
".",
"concatenate",
"(",
"[",
"y",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
")",
"return",
"Xt_new",
",",
"y_new"
] | takes time series data, and splits each series into temporal folds | [
"takes",
"time",
"series",
"data",
"and",
"splits",
"each",
"series",
"into",
"temporal",
"folds"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/split.py#L92-L114 |
251,474 | dmbee/seglearn | seglearn/split.py | TemporalKFold._make_indices | def _make_indices(self, Ns):
''' makes indices for cross validation '''
N_new = int(Ns * self.n_splits)
test = [np.full(N_new, False) for i in range(self.n_splits)]
for i in range(self.n_splits):
test[i][np.arange(Ns * i, Ns * (i + 1))] = True
train = [np.logical_not(test[i]) for i in range(self.n_splits)]
test = [np.arange(N_new)[test[i]] for i in range(self.n_splits)]
train = [np.arange(N_new)[train[i]] for i in range(self.n_splits)]
cv = list(zip(train, test))
return cv | python | def _make_indices(self, Ns):
''' makes indices for cross validation '''
N_new = int(Ns * self.n_splits)
test = [np.full(N_new, False) for i in range(self.n_splits)]
for i in range(self.n_splits):
test[i][np.arange(Ns * i, Ns * (i + 1))] = True
train = [np.logical_not(test[i]) for i in range(self.n_splits)]
test = [np.arange(N_new)[test[i]] for i in range(self.n_splits)]
train = [np.arange(N_new)[train[i]] for i in range(self.n_splits)]
cv = list(zip(train, test))
return cv | [
"def",
"_make_indices",
"(",
"self",
",",
"Ns",
")",
":",
"N_new",
"=",
"int",
"(",
"Ns",
"*",
"self",
".",
"n_splits",
")",
"test",
"=",
"[",
"np",
".",
"full",
"(",
"N_new",
",",
"False",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
":",
"test",
"[",
"i",
"]",
"[",
"np",
".",
"arange",
"(",
"Ns",
"*",
"i",
",",
"Ns",
"*",
"(",
"i",
"+",
"1",
")",
")",
"]",
"=",
"True",
"train",
"=",
"[",
"np",
".",
"logical_not",
"(",
"test",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
"test",
"=",
"[",
"np",
".",
"arange",
"(",
"N_new",
")",
"[",
"test",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
"train",
"=",
"[",
"np",
".",
"arange",
"(",
"N_new",
")",
"[",
"train",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_splits",
")",
"]",
"cv",
"=",
"list",
"(",
"zip",
"(",
"train",
",",
"test",
")",
")",
"return",
"cv"
] | makes indices for cross validation | [
"makes",
"indices",
"for",
"cross",
"validation"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/split.py#L116-L129 |
251,475 | dmbee/seglearn | seglearn/preprocessing.py | TargetRunLengthEncoder.transform | def transform(self, X, y, sample_weight=None):
'''
Transforms the time series data with run length encoding of the target variable
Note this transformation changes the number of samples in the data
If sample_weight is provided, it is transformed to align to the new target encoding
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ...]
target variable encoded as a time series
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_rle_series, ]
transformed time series data
yt : array-like, shape [n_rle_series]
target values for each series
sample_weight_new : array-like shape [n_rle_series]
sample weights
'''
check_ts_data_with_ts_target(X, y)
Xt, Xc = get_ts_data_parts(X)
N = len(Xt) # number of time series
# transformed data
yt = []
Xtt = []
swt = sample_weight
Nt = []
for i in range(N):
Xi, yi = self._transform(Xt[i], y[i])
yt+=yi
Xtt+=Xi
Nt.append(len(yi)) # number of contiguous class instances
if Xc is not None:
Xct = expand_variables_to_segments(Xc, Nt)
Xtt = TS_Data(Xtt, Xct)
if sample_weight is not None:
swt = expand_variables_to_segments(sample_weight, Nt)
return Xtt, yt, swt | python | def transform(self, X, y, sample_weight=None):
'''
Transforms the time series data with run length encoding of the target variable
Note this transformation changes the number of samples in the data
If sample_weight is provided, it is transformed to align to the new target encoding
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ...]
target variable encoded as a time series
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_rle_series, ]
transformed time series data
yt : array-like, shape [n_rle_series]
target values for each series
sample_weight_new : array-like shape [n_rle_series]
sample weights
'''
check_ts_data_with_ts_target(X, y)
Xt, Xc = get_ts_data_parts(X)
N = len(Xt) # number of time series
# transformed data
yt = []
Xtt = []
swt = sample_weight
Nt = []
for i in range(N):
Xi, yi = self._transform(Xt[i], y[i])
yt+=yi
Xtt+=Xi
Nt.append(len(yi)) # number of contiguous class instances
if Xc is not None:
Xct = expand_variables_to_segments(Xc, Nt)
Xtt = TS_Data(Xtt, Xct)
if sample_weight is not None:
swt = expand_variables_to_segments(sample_weight, Nt)
return Xtt, yt, swt | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
",",
"sample_weight",
"=",
"None",
")",
":",
"check_ts_data_with_ts_target",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"N",
"=",
"len",
"(",
"Xt",
")",
"# number of time series",
"# transformed data",
"yt",
"=",
"[",
"]",
"Xtt",
"=",
"[",
"]",
"swt",
"=",
"sample_weight",
"Nt",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"Xi",
",",
"yi",
"=",
"self",
".",
"_transform",
"(",
"Xt",
"[",
"i",
"]",
",",
"y",
"[",
"i",
"]",
")",
"yt",
"+=",
"yi",
"Xtt",
"+=",
"Xi",
"Nt",
".",
"append",
"(",
"len",
"(",
"yi",
")",
")",
"# number of contiguous class instances",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xct",
"=",
"expand_variables_to_segments",
"(",
"Xc",
",",
"Nt",
")",
"Xtt",
"=",
"TS_Data",
"(",
"Xtt",
",",
"Xct",
")",
"if",
"sample_weight",
"is",
"not",
"None",
":",
"swt",
"=",
"expand_variables_to_segments",
"(",
"sample_weight",
",",
"Nt",
")",
"return",
"Xtt",
",",
"yt",
",",
"swt"
] | Transforms the time series data with run length encoding of the target variable
Note this transformation changes the number of samples in the data
If sample_weight is provided, it is transformed to align to the new target encoding
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series, ...]
target variable encoded as a time series
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_rle_series, ]
transformed time series data
yt : array-like, shape [n_rle_series]
target values for each series
sample_weight_new : array-like shape [n_rle_series]
sample weights | [
"Transforms",
"the",
"time",
"series",
"data",
"with",
"run",
"length",
"encoding",
"of",
"the",
"target",
"variable",
"Note",
"this",
"transformation",
"changes",
"the",
"number",
"of",
"samples",
"in",
"the",
"data",
"If",
"sample_weight",
"is",
"provided",
"it",
"is",
"transformed",
"to",
"align",
"to",
"the",
"new",
"target",
"encoding"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/preprocessing.py#L66-L115 |
251,476 | dmbee/seglearn | seglearn/preprocessing.py | TargetRunLengthEncoder._rle | def _rle(self, a):
'''
rle implementation credit to Thomas Browne from his SOF post Sept 2015
Parameters
----------
a : array, shape[n,]
input vector
Returns
-------
z : array, shape[nt,]
run lengths
p : array, shape[nt,]
start positions of each run
ar : array, shape[nt,]
values for each run
'''
ia = np.asarray(a)
n = len(ia)
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return (z, p, ia[i]) | python | def _rle(self, a):
'''
rle implementation credit to Thomas Browne from his SOF post Sept 2015
Parameters
----------
a : array, shape[n,]
input vector
Returns
-------
z : array, shape[nt,]
run lengths
p : array, shape[nt,]
start positions of each run
ar : array, shape[nt,]
values for each run
'''
ia = np.asarray(a)
n = len(ia)
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return (z, p, ia[i]) | [
"def",
"_rle",
"(",
"self",
",",
"a",
")",
":",
"ia",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"n",
"=",
"len",
"(",
"ia",
")",
"y",
"=",
"np",
".",
"array",
"(",
"ia",
"[",
"1",
":",
"]",
"!=",
"ia",
"[",
":",
"-",
"1",
"]",
")",
"# pairwise unequal (string safe)",
"i",
"=",
"np",
".",
"append",
"(",
"np",
".",
"where",
"(",
"y",
")",
",",
"n",
"-",
"1",
")",
"# must include last element posi",
"z",
"=",
"np",
".",
"diff",
"(",
"np",
".",
"append",
"(",
"-",
"1",
",",
"i",
")",
")",
"# run lengths",
"p",
"=",
"np",
".",
"cumsum",
"(",
"np",
".",
"append",
"(",
"0",
",",
"z",
")",
")",
"[",
":",
"-",
"1",
"]",
"# positions",
"return",
"(",
"z",
",",
"p",
",",
"ia",
"[",
"i",
"]",
")"
] | rle implementation credit to Thomas Browne from his SOF post Sept 2015
Parameters
----------
a : array, shape[n,]
input vector
Returns
-------
z : array, shape[nt,]
run lengths
p : array, shape[nt,]
start positions of each run
ar : array, shape[nt,]
values for each run | [
"rle",
"implementation",
"credit",
"to",
"Thomas",
"Browne",
"from",
"his",
"SOF",
"post",
"Sept",
"2015"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/preprocessing.py#L117-L141 |
251,477 | dmbee/seglearn | seglearn/preprocessing.py | TargetRunLengthEncoder._transform | def _transform(self, X, y):
'''
Transforms single series
'''
z, p, y_rle = self._rle(y)
p = np.append(p, len(y))
big_enough = p[1:] - p[:-1] >= self.min_length
Xt = []
for i in range(len(y_rle)):
if (big_enough[i]):
Xt.append(X[p[i]:p[i+1]])
yt = y_rle[big_enough].tolist()
return Xt, yt | python | def _transform(self, X, y):
'''
Transforms single series
'''
z, p, y_rle = self._rle(y)
p = np.append(p, len(y))
big_enough = p[1:] - p[:-1] >= self.min_length
Xt = []
for i in range(len(y_rle)):
if (big_enough[i]):
Xt.append(X[p[i]:p[i+1]])
yt = y_rle[big_enough].tolist()
return Xt, yt | [
"def",
"_transform",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"z",
",",
"p",
",",
"y_rle",
"=",
"self",
".",
"_rle",
"(",
"y",
")",
"p",
"=",
"np",
".",
"append",
"(",
"p",
",",
"len",
"(",
"y",
")",
")",
"big_enough",
"=",
"p",
"[",
"1",
":",
"]",
"-",
"p",
"[",
":",
"-",
"1",
"]",
">=",
"self",
".",
"min_length",
"Xt",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"y_rle",
")",
")",
":",
"if",
"(",
"big_enough",
"[",
"i",
"]",
")",
":",
"Xt",
".",
"append",
"(",
"X",
"[",
"p",
"[",
"i",
"]",
":",
"p",
"[",
"i",
"+",
"1",
"]",
"]",
")",
"yt",
"=",
"y_rle",
"[",
"big_enough",
"]",
".",
"tolist",
"(",
")",
"return",
"Xt",
",",
"yt"
] | Transforms single series | [
"Transforms",
"single",
"series"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/preprocessing.py#L143-L157 |
251,478 | dmbee/seglearn | seglearn/util.py | get_ts_data_parts | def get_ts_data_parts(X):
'''
Separates time series data object into time series variables and contextual variables
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_series, ]
Time series data
Xs : array-like, shape [n_series, n_contextd = np.colum _variables]
contextual variables
'''
if not isinstance(X, TS_Data):
return X, None
return X.ts_data, X.context_data | python | def get_ts_data_parts(X):
'''
Separates time series data object into time series variables and contextual variables
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_series, ]
Time series data
Xs : array-like, shape [n_series, n_contextd = np.colum _variables]
contextual variables
'''
if not isinstance(X, TS_Data):
return X, None
return X.ts_data, X.context_data | [
"def",
"get_ts_data_parts",
"(",
"X",
")",
":",
"if",
"not",
"isinstance",
"(",
"X",
",",
"TS_Data",
")",
":",
"return",
"X",
",",
"None",
"return",
"X",
".",
"ts_data",
",",
"X",
".",
"context_data"
] | Separates time series data object into time series variables and contextual variables
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_series, ]
Time series data
Xs : array-like, shape [n_series, n_contextd = np.colum _variables]
contextual variables | [
"Separates",
"time",
"series",
"data",
"object",
"into",
"time",
"series",
"variables",
"and",
"contextual",
"variables"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/util.py#L13-L32 |
251,479 | dmbee/seglearn | seglearn/util.py | check_ts_data_with_ts_target | def check_ts_data_with_ts_target(X, y=None):
'''
Checks time series data with time series target is good. If not raises value error.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series, ...]
target data
'''
if y is not None:
Nx = len(X)
Ny = len(y)
if Nx != Ny:
raise ValueError("Number of time series different in X (%d) and y (%d)"
% (Nx, Ny))
Xt, _ = get_ts_data_parts(X)
Ntx = np.array([len(Xt[i]) for i in np.arange(Nx)])
Nty = np.array([len(np.atleast_1d(y[i])) for i in np.arange(Nx)])
if np.count_nonzero(Nty == Ntx) == Nx:
return
else:
raise ValueError("Invalid time series lengths.\n"
"Ns: ", Nx,
"Ntx: ", Ntx,
"Nty: ", Nty) | python | def check_ts_data_with_ts_target(X, y=None):
'''
Checks time series data with time series target is good. If not raises value error.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series, ...]
target data
'''
if y is not None:
Nx = len(X)
Ny = len(y)
if Nx != Ny:
raise ValueError("Number of time series different in X (%d) and y (%d)"
% (Nx, Ny))
Xt, _ = get_ts_data_parts(X)
Ntx = np.array([len(Xt[i]) for i in np.arange(Nx)])
Nty = np.array([len(np.atleast_1d(y[i])) for i in np.arange(Nx)])
if np.count_nonzero(Nty == Ntx) == Nx:
return
else:
raise ValueError("Invalid time series lengths.\n"
"Ns: ", Nx,
"Ntx: ", Ntx,
"Nty: ", Nty) | [
"def",
"check_ts_data_with_ts_target",
"(",
"X",
",",
"y",
"=",
"None",
")",
":",
"if",
"y",
"is",
"not",
"None",
":",
"Nx",
"=",
"len",
"(",
"X",
")",
"Ny",
"=",
"len",
"(",
"y",
")",
"if",
"Nx",
"!=",
"Ny",
":",
"raise",
"ValueError",
"(",
"\"Number of time series different in X (%d) and y (%d)\"",
"%",
"(",
"Nx",
",",
"Ny",
")",
")",
"Xt",
",",
"_",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"Ntx",
"=",
"np",
".",
"array",
"(",
"[",
"len",
"(",
"Xt",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"Nx",
")",
"]",
")",
"Nty",
"=",
"np",
".",
"array",
"(",
"[",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"y",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"Nx",
")",
"]",
")",
"if",
"np",
".",
"count_nonzero",
"(",
"Nty",
"==",
"Ntx",
")",
"==",
"Nx",
":",
"return",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid time series lengths.\\n\"",
"\"Ns: \"",
",",
"Nx",
",",
"\"Ntx: \"",
",",
"Ntx",
",",
"\"Nty: \"",
",",
"Nty",
")"
] | Checks time series data with time series target is good. If not raises value error.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series, ...]
target data | [
"Checks",
"time",
"series",
"data",
"with",
"time",
"series",
"target",
"is",
"good",
".",
"If",
"not",
"raises",
"value",
"error",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/util.py#L67-L96 |
251,480 | dmbee/seglearn | seglearn/util.py | ts_stats | def ts_stats(Xt, y, fs=1.0, class_labels=None):
'''
Generates some helpful statistics about the data X
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series]
target data
fs : float
sampling frequency
class_labels : list of strings, default None
List of target class names
Returns
-------
results : dict
| Dictionary of relevant statistics for the time series data
| results['total'] has stats for the whole data set
| results['by_class'] has stats segragated by target class
'''
check_ts_data(Xt)
Xt, Xs = get_ts_data_parts(Xt)
if Xs is not None:
S = len(np.atleast_1d(Xs[0]))
else:
S = 0
C = np.max(y) + 1 # number of classes
if class_labels is None:
class_labels = np.arange(C)
N = len(Xt)
if Xt[0].ndim > 1:
D = Xt[0].shape[1]
else:
D = 1
Ti = np.array([Xt[i].shape[0] for i in range(N)], dtype=np.float64) / fs
ic = np.array([y == i for i in range(C)])
Tic = [Ti[ic[i]] for i in range(C)]
T = np.sum(Ti)
total = {"n_series": N, "n_classes": C, "n_TS_vars": D, "n_context_vars": S, "Total_Time": T,
"Series_Time_Mean": np.mean(Ti),
"Series_Time_Std": np.std(Ti),
"Series_Time_Range": (np.min(Ti), np.max(Ti))}
by_class = {"Class_labels": class_labels,
"n_series": np.array([len(Tic[i]) for i in range(C)]),
"Total_Time": np.array([np.sum(Tic[i]) for i in range(C)]),
"Series_Time_Mean": np.array([np.mean(Tic[i]) for i in range(C)]),
"Series_Time_Std": np.array([np.std(Tic[i]) for i in range(C)]),
"Series_Time_Min": np.array([np.min(Tic[i]) for i in range(C)]),
"Series_Time_Max": np.array([np.max(Tic[i]) for i in range(C)])}
results = {'total': total,
'by_class': by_class}
return results | python | def ts_stats(Xt, y, fs=1.0, class_labels=None):
'''
Generates some helpful statistics about the data X
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series]
target data
fs : float
sampling frequency
class_labels : list of strings, default None
List of target class names
Returns
-------
results : dict
| Dictionary of relevant statistics for the time series data
| results['total'] has stats for the whole data set
| results['by_class'] has stats segragated by target class
'''
check_ts_data(Xt)
Xt, Xs = get_ts_data_parts(Xt)
if Xs is not None:
S = len(np.atleast_1d(Xs[0]))
else:
S = 0
C = np.max(y) + 1 # number of classes
if class_labels is None:
class_labels = np.arange(C)
N = len(Xt)
if Xt[0].ndim > 1:
D = Xt[0].shape[1]
else:
D = 1
Ti = np.array([Xt[i].shape[0] for i in range(N)], dtype=np.float64) / fs
ic = np.array([y == i for i in range(C)])
Tic = [Ti[ic[i]] for i in range(C)]
T = np.sum(Ti)
total = {"n_series": N, "n_classes": C, "n_TS_vars": D, "n_context_vars": S, "Total_Time": T,
"Series_Time_Mean": np.mean(Ti),
"Series_Time_Std": np.std(Ti),
"Series_Time_Range": (np.min(Ti), np.max(Ti))}
by_class = {"Class_labels": class_labels,
"n_series": np.array([len(Tic[i]) for i in range(C)]),
"Total_Time": np.array([np.sum(Tic[i]) for i in range(C)]),
"Series_Time_Mean": np.array([np.mean(Tic[i]) for i in range(C)]),
"Series_Time_Std": np.array([np.std(Tic[i]) for i in range(C)]),
"Series_Time_Min": np.array([np.min(Tic[i]) for i in range(C)]),
"Series_Time_Max": np.array([np.max(Tic[i]) for i in range(C)])}
results = {'total': total,
'by_class': by_class}
return results | [
"def",
"ts_stats",
"(",
"Xt",
",",
"y",
",",
"fs",
"=",
"1.0",
",",
"class_labels",
"=",
"None",
")",
":",
"check_ts_data",
"(",
"Xt",
")",
"Xt",
",",
"Xs",
"=",
"get_ts_data_parts",
"(",
"Xt",
")",
"if",
"Xs",
"is",
"not",
"None",
":",
"S",
"=",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"Xs",
"[",
"0",
"]",
")",
")",
"else",
":",
"S",
"=",
"0",
"C",
"=",
"np",
".",
"max",
"(",
"y",
")",
"+",
"1",
"# number of classes",
"if",
"class_labels",
"is",
"None",
":",
"class_labels",
"=",
"np",
".",
"arange",
"(",
"C",
")",
"N",
"=",
"len",
"(",
"Xt",
")",
"if",
"Xt",
"[",
"0",
"]",
".",
"ndim",
">",
"1",
":",
"D",
"=",
"Xt",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
"else",
":",
"D",
"=",
"1",
"Ti",
"=",
"np",
".",
"array",
"(",
"[",
"Xt",
"[",
"i",
"]",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"N",
")",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"/",
"fs",
"ic",
"=",
"np",
".",
"array",
"(",
"[",
"y",
"==",
"i",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
"Tic",
"=",
"[",
"Ti",
"[",
"ic",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
"T",
"=",
"np",
".",
"sum",
"(",
"Ti",
")",
"total",
"=",
"{",
"\"n_series\"",
":",
"N",
",",
"\"n_classes\"",
":",
"C",
",",
"\"n_TS_vars\"",
":",
"D",
",",
"\"n_context_vars\"",
":",
"S",
",",
"\"Total_Time\"",
":",
"T",
",",
"\"Series_Time_Mean\"",
":",
"np",
".",
"mean",
"(",
"Ti",
")",
",",
"\"Series_Time_Std\"",
":",
"np",
".",
"std",
"(",
"Ti",
")",
",",
"\"Series_Time_Range\"",
":",
"(",
"np",
".",
"min",
"(",
"Ti",
")",
",",
"np",
".",
"max",
"(",
"Ti",
")",
")",
"}",
"by_class",
"=",
"{",
"\"Class_labels\"",
":",
"class_labels",
",",
"\"n_series\"",
":",
"np",
".",
"array",
"(",
"[",
"len",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Total_Time\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"sum",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Series_Time_Mean\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"mean",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Series_Time_Std\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"std",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Series_Time_Min\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"min",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
",",
"\"Series_Time_Max\"",
":",
"np",
".",
"array",
"(",
"[",
"np",
".",
"max",
"(",
"Tic",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"C",
")",
"]",
")",
"}",
"results",
"=",
"{",
"'total'",
":",
"total",
",",
"'by_class'",
":",
"by_class",
"}",
"return",
"results"
] | Generates some helpful statistics about the data X
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like, shape [n_series]
target data
fs : float
sampling frequency
class_labels : list of strings, default None
List of target class names
Returns
-------
results : dict
| Dictionary of relevant statistics for the time series data
| results['total'] has stats for the whole data set
| results['by_class'] has stats segragated by target class | [
"Generates",
"some",
"helpful",
"statistics",
"about",
"the",
"data",
"X"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/util.py#L99-L163 |
251,481 | dmbee/seglearn | seglearn/datasets.py | load_watch | def load_watch():
'''
Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was
recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a
smartwatch. It is a multivariate time series.
The study can be found here: https://arxiv.org/abs/1802.01489
Returns
-------
data : dict
data['X'] : list, length 140
| inertial sensor data, each element with shape [n_samples, 6]
| sampled at 50 Hz
data['y'] : array, length 140
target vector (exercise type)
data['side'] : array, length 140
the extremity side, 1 = right, 0 = left
data['subject'] : array, length 140
the subject (participant) number
data['X_labels'] : str list, length 6
ordered labels for the sensor data variables
data['y_labels'] :str list, length 7
ordered labels for the target (exercise type)
Examples
--------
>>> from seglearn.datasets import load_watch
>>> data = load_watch()
>>> print(data.keys())
'''
module_path = dirname(__file__)
data = np.load(module_path + "/data/watch_dataset.npy").item()
return data | python | def load_watch():
'''
Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was
recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a
smartwatch. It is a multivariate time series.
The study can be found here: https://arxiv.org/abs/1802.01489
Returns
-------
data : dict
data['X'] : list, length 140
| inertial sensor data, each element with shape [n_samples, 6]
| sampled at 50 Hz
data['y'] : array, length 140
target vector (exercise type)
data['side'] : array, length 140
the extremity side, 1 = right, 0 = left
data['subject'] : array, length 140
the subject (participant) number
data['X_labels'] : str list, length 6
ordered labels for the sensor data variables
data['y_labels'] :str list, length 7
ordered labels for the target (exercise type)
Examples
--------
>>> from seglearn.datasets import load_watch
>>> data = load_watch()
>>> print(data.keys())
'''
module_path = dirname(__file__)
data = np.load(module_path + "/data/watch_dataset.npy").item()
return data | [
"def",
"load_watch",
"(",
")",
":",
"module_path",
"=",
"dirname",
"(",
"__file__",
")",
"data",
"=",
"np",
".",
"load",
"(",
"module_path",
"+",
"\"/data/watch_dataset.npy\"",
")",
".",
"item",
"(",
")",
"return",
"data"
] | Loads some of the 6-axis inertial sensor data from my smartwatch project. The sensor data was
recorded as study subjects performed sets of 20 shoulder exercise repetitions while wearing a
smartwatch. It is a multivariate time series.
The study can be found here: https://arxiv.org/abs/1802.01489
Returns
-------
data : dict
data['X'] : list, length 140
| inertial sensor data, each element with shape [n_samples, 6]
| sampled at 50 Hz
data['y'] : array, length 140
target vector (exercise type)
data['side'] : array, length 140
the extremity side, 1 = right, 0 = left
data['subject'] : array, length 140
the subject (participant) number
data['X_labels'] : str list, length 6
ordered labels for the sensor data variables
data['y_labels'] :str list, length 7
ordered labels for the target (exercise type)
Examples
--------
>>> from seglearn.datasets import load_watch
>>> data = load_watch()
>>> print(data.keys()) | [
"Loads",
"some",
"of",
"the",
"6",
"-",
"axis",
"inertial",
"sensor",
"data",
"from",
"my",
"smartwatch",
"project",
".",
"The",
"sensor",
"data",
"was",
"recorded",
"as",
"study",
"subjects",
"performed",
"sets",
"of",
"20",
"shoulder",
"exercise",
"repetitions",
"while",
"wearing",
"a",
"smartwatch",
".",
"It",
"is",
"a",
"multivariate",
"time",
"series",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/datasets.py#L13-L46 |
251,482 | dmbee/seglearn | seglearn/transform.py | shuffle_data | def shuffle_data(X, y=None, sample_weight=None):
''' Shuffles indices X, y, and sample_weight together'''
if len(X) > 1:
ind = np.arange(len(X), dtype=np.int)
np.random.shuffle(ind)
Xt = X[ind]
yt = y
swt = sample_weight
if yt is not None:
yt = yt[ind]
if swt is not None:
swt = swt[ind]
return Xt, yt, swt
else:
return X, y, sample_weight | python | def shuffle_data(X, y=None, sample_weight=None):
''' Shuffles indices X, y, and sample_weight together'''
if len(X) > 1:
ind = np.arange(len(X), dtype=np.int)
np.random.shuffle(ind)
Xt = X[ind]
yt = y
swt = sample_weight
if yt is not None:
yt = yt[ind]
if swt is not None:
swt = swt[ind]
return Xt, yt, swt
else:
return X, y, sample_weight | [
"def",
"shuffle_data",
"(",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"if",
"len",
"(",
"X",
")",
">",
"1",
":",
"ind",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"X",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"ind",
")",
"Xt",
"=",
"X",
"[",
"ind",
"]",
"yt",
"=",
"y",
"swt",
"=",
"sample_weight",
"if",
"yt",
"is",
"not",
"None",
":",
"yt",
"=",
"yt",
"[",
"ind",
"]",
"if",
"swt",
"is",
"not",
"None",
":",
"swt",
"=",
"swt",
"[",
"ind",
"]",
"return",
"Xt",
",",
"yt",
",",
"swt",
"else",
":",
"return",
"X",
",",
"y",
",",
"sample_weight"
] | Shuffles indices X, y, and sample_weight together | [
"Shuffles",
"indices",
"X",
"y",
"and",
"sample_weight",
"together"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L70-L86 |
251,483 | dmbee/seglearn | seglearn/transform.py | expand_variables_to_segments | def expand_variables_to_segments(v, Nt):
''' expands contextual variables v, by repeating each instance as specified in Nt '''
N_v = len(np.atleast_1d(v[0]))
return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))]) | python | def expand_variables_to_segments(v, Nt):
''' expands contextual variables v, by repeating each instance as specified in Nt '''
N_v = len(np.atleast_1d(v[0]))
return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))]) | [
"def",
"expand_variables_to_segments",
"(",
"v",
",",
"Nt",
")",
":",
"N_v",
"=",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"v",
"[",
"0",
"]",
")",
")",
"return",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"full",
"(",
"(",
"Nt",
"[",
"i",
"]",
",",
"N_v",
")",
",",
"v",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"v",
")",
")",
"]",
")"
] | expands contextual variables v, by repeating each instance as specified in Nt | [
"expands",
"contextual",
"variables",
"v",
"by",
"repeating",
"each",
"instance",
"as",
"specified",
"in",
"Nt"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L549-L552 |
251,484 | dmbee/seglearn | seglearn/transform.py | sliding_window | def sliding_window(time_series, width, step, order='F'):
'''
Segments univariate time series with sliding window
Parameters
----------
time_series : array like shape [n_samples]
time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
w : array like shape [n_segments, width]
resampled time series segments
'''
w = np.hstack([time_series[i:1 + i - width or None:step] for i in range(0, width)])
result = w.reshape((int(len(w) / width), width), order='F')
if order == 'F':
return result
else:
return np.ascontiguousarray(result) | python | def sliding_window(time_series, width, step, order='F'):
'''
Segments univariate time series with sliding window
Parameters
----------
time_series : array like shape [n_samples]
time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
w : array like shape [n_segments, width]
resampled time series segments
'''
w = np.hstack([time_series[i:1 + i - width or None:step] for i in range(0, width)])
result = w.reshape((int(len(w) / width), width), order='F')
if order == 'F':
return result
else:
return np.ascontiguousarray(result) | [
"def",
"sliding_window",
"(",
"time_series",
",",
"width",
",",
"step",
",",
"order",
"=",
"'F'",
")",
":",
"w",
"=",
"np",
".",
"hstack",
"(",
"[",
"time_series",
"[",
"i",
":",
"1",
"+",
"i",
"-",
"width",
"or",
"None",
":",
"step",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"width",
")",
"]",
")",
"result",
"=",
"w",
".",
"reshape",
"(",
"(",
"int",
"(",
"len",
"(",
"w",
")",
"/",
"width",
")",
",",
"width",
")",
",",
"order",
"=",
"'F'",
")",
"if",
"order",
"==",
"'F'",
":",
"return",
"result",
"else",
":",
"return",
"np",
".",
"ascontiguousarray",
"(",
"result",
")"
] | Segments univariate time series with sliding window
Parameters
----------
time_series : array like shape [n_samples]
time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
w : array like shape [n_segments, width]
resampled time series segments | [
"Segments",
"univariate",
"time",
"series",
"with",
"sliding",
"window"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L555-L578 |
251,485 | dmbee/seglearn | seglearn/transform.py | sliding_tensor | def sliding_tensor(mv_time_series, width, step, order='F'):
'''
segments multivariate time series with sliding window
Parameters
----------
mv_time_series : array like shape [n_samples, n_variables]
multivariate time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
data : array like shape [n_segments, width, n_variables]
segmented multivariate time series data
'''
D = mv_time_series.shape[1]
data = [sliding_window(mv_time_series[:, j], width, step, order) for j in range(D)]
return np.stack(data, axis=2) | python | def sliding_tensor(mv_time_series, width, step, order='F'):
'''
segments multivariate time series with sliding window
Parameters
----------
mv_time_series : array like shape [n_samples, n_variables]
multivariate time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
data : array like shape [n_segments, width, n_variables]
segmented multivariate time series data
'''
D = mv_time_series.shape[1]
data = [sliding_window(mv_time_series[:, j], width, step, order) for j in range(D)]
return np.stack(data, axis=2) | [
"def",
"sliding_tensor",
"(",
"mv_time_series",
",",
"width",
",",
"step",
",",
"order",
"=",
"'F'",
")",
":",
"D",
"=",
"mv_time_series",
".",
"shape",
"[",
"1",
"]",
"data",
"=",
"[",
"sliding_window",
"(",
"mv_time_series",
"[",
":",
",",
"j",
"]",
",",
"width",
",",
"step",
",",
"order",
")",
"for",
"j",
"in",
"range",
"(",
"D",
")",
"]",
"return",
"np",
".",
"stack",
"(",
"data",
",",
"axis",
"=",
"2",
")"
] | segments multivariate time series with sliding window
Parameters
----------
mv_time_series : array like shape [n_samples, n_variables]
multivariate time series or sequence
width : int > 0
segment width in samples
step : int > 0
stepsize for sliding in samples
Returns
-------
data : array like shape [n_segments, width, n_variables]
segmented multivariate time series data | [
"segments",
"multivariate",
"time",
"series",
"with",
"sliding",
"window"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L581-L601 |
251,486 | dmbee/seglearn | seglearn/transform.py | SegmentXY.transform | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into segments
Note this transformation changes the number of samples in the data
If y is provided, it is segmented and transformed to align to the new samples as per
``y_func``
Currently sample weights always returned as None
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_segments, ]
transformed time series data
yt : array-like, shape [n_segments]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
N = len(Xt) # number of time series
if Xt[0].ndim > 1:
Xt = np.array([sliding_tensor(Xt[i], self.width, self._step, self.order)
for i in np.arange(N)])
else:
Xt = np.array([sliding_window(Xt[i], self.width, self._step, self.order)
for i in np.arange(N)])
Nt = [len(Xt[i]) for i in np.arange(len(Xt))]
Xt = np.concatenate(Xt)
if Xc is not None:
Xc = expand_variables_to_segments(Xc, Nt)
Xt = TS_Data(Xt, Xc)
if yt is not None:
yt = np.array([sliding_window(yt[i], self.width, self._step, self.order)
for i in np.arange(N)])
yt = np.concatenate(yt)
yt = self.y_func(yt)
if self.shuffle is True:
check_random_state(self.random_state)
Xt, yt, _ = shuffle_data(Xt, yt)
return Xt, yt, None | python | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into segments
Note this transformation changes the number of samples in the data
If y is provided, it is segmented and transformed to align to the new samples as per
``y_func``
Currently sample weights always returned as None
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_segments, ]
transformed time series data
yt : array-like, shape [n_segments]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
N = len(Xt) # number of time series
if Xt[0].ndim > 1:
Xt = np.array([sliding_tensor(Xt[i], self.width, self._step, self.order)
for i in np.arange(N)])
else:
Xt = np.array([sliding_window(Xt[i], self.width, self._step, self.order)
for i in np.arange(N)])
Nt = [len(Xt[i]) for i in np.arange(len(Xt))]
Xt = np.concatenate(Xt)
if Xc is not None:
Xc = expand_variables_to_segments(Xc, Nt)
Xt = TS_Data(Xt, Xc)
if yt is not None:
yt = np.array([sliding_window(yt[i], self.width, self._step, self.order)
for i in np.arange(N)])
yt = np.concatenate(yt)
yt = self.y_func(yt)
if self.shuffle is True:
check_random_state(self.random_state)
Xt, yt, _ = shuffle_data(Xt, yt)
return Xt, yt, None | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"check_ts_data",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"yt",
"=",
"y",
"N",
"=",
"len",
"(",
"Xt",
")",
"# number of time series",
"if",
"Xt",
"[",
"0",
"]",
".",
"ndim",
">",
"1",
":",
"Xt",
"=",
"np",
".",
"array",
"(",
"[",
"sliding_tensor",
"(",
"Xt",
"[",
"i",
"]",
",",
"self",
".",
"width",
",",
"self",
".",
"_step",
",",
"self",
".",
"order",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N",
")",
"]",
")",
"else",
":",
"Xt",
"=",
"np",
".",
"array",
"(",
"[",
"sliding_window",
"(",
"Xt",
"[",
"i",
"]",
",",
"self",
".",
"width",
",",
"self",
".",
"_step",
",",
"self",
".",
"order",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N",
")",
"]",
")",
"Nt",
"=",
"[",
"len",
"(",
"Xt",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"Xt",
")",
")",
"]",
"Xt",
"=",
"np",
".",
"concatenate",
"(",
"Xt",
")",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xc",
"=",
"expand_variables_to_segments",
"(",
"Xc",
",",
"Nt",
")",
"Xt",
"=",
"TS_Data",
"(",
"Xt",
",",
"Xc",
")",
"if",
"yt",
"is",
"not",
"None",
":",
"yt",
"=",
"np",
".",
"array",
"(",
"[",
"sliding_window",
"(",
"yt",
"[",
"i",
"]",
",",
"self",
".",
"width",
",",
"self",
".",
"_step",
",",
"self",
".",
"order",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N",
")",
"]",
")",
"yt",
"=",
"np",
".",
"concatenate",
"(",
"yt",
")",
"yt",
"=",
"self",
".",
"y_func",
"(",
"yt",
")",
"if",
"self",
".",
"shuffle",
"is",
"True",
":",
"check_random_state",
"(",
"self",
".",
"random_state",
")",
"Xt",
",",
"yt",
",",
"_",
"=",
"shuffle_data",
"(",
"Xt",
",",
"yt",
")",
"return",
"Xt",
",",
"yt",
",",
"None"
] | Transforms the time series data into segments
Note this transformation changes the number of samples in the data
If y is provided, it is segmented and transformed to align to the new samples as per
``y_func``
Currently sample weights always returned as None
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
Xt : array-like, shape [n_segments, ]
transformed time series data
yt : array-like, shape [n_segments]
expanded target vector
sample_weight_new : None | [
"Transforms",
"the",
"time",
"series",
"data",
"into",
"segments",
"Note",
"this",
"transformation",
"changes",
"the",
"number",
"of",
"samples",
"in",
"the",
"data",
"If",
"y",
"is",
"provided",
"it",
"is",
"segmented",
"and",
"transformed",
"to",
"align",
"to",
"the",
"new",
"samples",
"as",
"per",
"y_func",
"Currently",
"sample",
"weights",
"always",
"returned",
"as",
"None"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L329-L385 |
251,487 | dmbee/seglearn | seglearn/transform.py | PadTrunc.transform | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into fixed length segments using padding and or truncation
If y is a time series and passed, it will be transformed as well
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
Xt = self._mv_resize(Xt)
if Xc is not None:
Xt = TS_Data(Xt, Xc)
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
yt = self._mv_resize(yt)
swt = None
elif yt is not None:
# todo: is this needed?
yt = np.array(yt)
return Xt, yt, swt | python | def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data into fixed length segments using padding and or truncation
If y is a time series and passed, it will be transformed as well
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : None
'''
check_ts_data(X, y)
Xt, Xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
Xt = self._mv_resize(Xt)
if Xc is not None:
Xt = TS_Data(Xt, Xc)
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
yt = self._mv_resize(yt)
swt = None
elif yt is not None:
# todo: is this needed?
yt = np.array(yt)
return Xt, yt, swt | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"check_ts_data",
"(",
"X",
",",
"y",
")",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"yt",
"=",
"y",
"swt",
"=",
"sample_weight",
"Xt",
"=",
"self",
".",
"_mv_resize",
"(",
"Xt",
")",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xt",
"=",
"TS_Data",
"(",
"Xt",
",",
"Xc",
")",
"if",
"yt",
"is",
"not",
"None",
"and",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"yt",
"[",
"0",
"]",
")",
")",
">",
"1",
":",
"# y is a time series",
"yt",
"=",
"self",
".",
"_mv_resize",
"(",
"yt",
")",
"swt",
"=",
"None",
"elif",
"yt",
"is",
"not",
"None",
":",
"# todo: is this needed?",
"yt",
"=",
"np",
".",
"array",
"(",
"yt",
")",
"return",
"Xt",
",",
"yt",
",",
"swt"
] | Transforms the time series data into fixed length segments using padding and or truncation
If y is a time series and passed, it will be transformed as well
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : None | [
"Transforms",
"the",
"time",
"series",
"data",
"into",
"fixed",
"length",
"segments",
"using",
"padding",
"and",
"or",
"truncation",
"If",
"y",
"is",
"a",
"time",
"series",
"and",
"passed",
"it",
"will",
"be",
"transformed",
"as",
"well"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L655-L696 |
251,488 | dmbee/seglearn | seglearn/transform.py | InterpLongToWide._check_data | def _check_data(self, X):
'''
Checks that unique identifiers vaf_types are consistent between time series.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
'''
if len(X) > 1:
sval = np.unique(X[0][:, 1])
if np.all([np.all(np.unique(X[i][:, 1]) == sval) for i in range(1, len(X))]):
pass
else:
raise ValueError("Unique identifier var_types not consistent between time series") | python | def _check_data(self, X):
'''
Checks that unique identifiers vaf_types are consistent between time series.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
'''
if len(X) > 1:
sval = np.unique(X[0][:, 1])
if np.all([np.all(np.unique(X[i][:, 1]) == sval) for i in range(1, len(X))]):
pass
else:
raise ValueError("Unique identifier var_types not consistent between time series") | [
"def",
"_check_data",
"(",
"self",
",",
"X",
")",
":",
"if",
"len",
"(",
"X",
")",
">",
"1",
":",
"sval",
"=",
"np",
".",
"unique",
"(",
"X",
"[",
"0",
"]",
"[",
":",
",",
"1",
"]",
")",
"if",
"np",
".",
"all",
"(",
"[",
"np",
".",
"all",
"(",
"np",
".",
"unique",
"(",
"X",
"[",
"i",
"]",
"[",
":",
",",
"1",
"]",
")",
"==",
"sval",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"X",
")",
")",
"]",
")",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unique identifier var_types not consistent between time series\"",
")"
] | Checks that unique identifiers vaf_types are consistent between time series.
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data | [
"Checks",
"that",
"unique",
"identifiers",
"vaf_types",
"are",
"consistent",
"between",
"time",
"series",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L900-L915 |
251,489 | dmbee/seglearn | seglearn/transform.py | FeatureRep._check_features | def _check_features(self, features, Xti):
'''
tests output of each feature against a segmented time series X
Parameters
----------
features : dict
feature function dictionary
Xti : array-like, shape [n_samples, segment_width, n_variables]
segmented time series (instance)
Returns
-------
ftr_sizes : dict
number of features output by each feature function
'''
N = Xti.shape[0]
N_fts = len(features)
fshapes = np.zeros((N_fts, 2), dtype=np.int)
keys = [key for key in features]
for i in np.arange(N_fts):
fshapes[i] = np.row_stack(features[keys[i]](Xti)).shape
# make sure each feature returns an array shape [N, ]
if not np.all(fshapes[:, 0] == N):
raise ValueError("feature function returned array with invalid length, ",
np.array(features.keys())[fshapes[:, 0] != N])
return {keys[i]: fshapes[i, 1] for i in range(N_fts)} | python | def _check_features(self, features, Xti):
'''
tests output of each feature against a segmented time series X
Parameters
----------
features : dict
feature function dictionary
Xti : array-like, shape [n_samples, segment_width, n_variables]
segmented time series (instance)
Returns
-------
ftr_sizes : dict
number of features output by each feature function
'''
N = Xti.shape[0]
N_fts = len(features)
fshapes = np.zeros((N_fts, 2), dtype=np.int)
keys = [key for key in features]
for i in np.arange(N_fts):
fshapes[i] = np.row_stack(features[keys[i]](Xti)).shape
# make sure each feature returns an array shape [N, ]
if not np.all(fshapes[:, 0] == N):
raise ValueError("feature function returned array with invalid length, ",
np.array(features.keys())[fshapes[:, 0] != N])
return {keys[i]: fshapes[i, 1] for i in range(N_fts)} | [
"def",
"_check_features",
"(",
"self",
",",
"features",
",",
"Xti",
")",
":",
"N",
"=",
"Xti",
".",
"shape",
"[",
"0",
"]",
"N_fts",
"=",
"len",
"(",
"features",
")",
"fshapes",
"=",
"np",
".",
"zeros",
"(",
"(",
"N_fts",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"keys",
"=",
"[",
"key",
"for",
"key",
"in",
"features",
"]",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"N_fts",
")",
":",
"fshapes",
"[",
"i",
"]",
"=",
"np",
".",
"row_stack",
"(",
"features",
"[",
"keys",
"[",
"i",
"]",
"]",
"(",
"Xti",
")",
")",
".",
"shape",
"# make sure each feature returns an array shape [N, ]",
"if",
"not",
"np",
".",
"all",
"(",
"fshapes",
"[",
":",
",",
"0",
"]",
"==",
"N",
")",
":",
"raise",
"ValueError",
"(",
"\"feature function returned array with invalid length, \"",
",",
"np",
".",
"array",
"(",
"features",
".",
"keys",
"(",
")",
")",
"[",
"fshapes",
"[",
":",
",",
"0",
"]",
"!=",
"N",
"]",
")",
"return",
"{",
"keys",
"[",
"i",
"]",
":",
"fshapes",
"[",
"i",
",",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"N_fts",
")",
"}"
] | tests output of each feature against a segmented time series X
Parameters
----------
features : dict
feature function dictionary
Xti : array-like, shape [n_samples, segment_width, n_variables]
segmented time series (instance)
Returns
-------
ftr_sizes : dict
number of features output by each feature function | [
"tests",
"output",
"of",
"each",
"feature",
"against",
"a",
"segmented",
"time",
"series",
"X"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1137-L1165 |
251,490 | dmbee/seglearn | seglearn/transform.py | FeatureRep._generate_feature_labels | def _generate_feature_labels(self, X):
'''
Generates string feature labels
'''
Xt, Xc = get_ts_data_parts(X)
ftr_sizes = self._check_features(self.features, Xt[0:3])
f_labels = []
# calculated features
for key in ftr_sizes:
for i in range(ftr_sizes[key]):
f_labels += [key + '_' + str(i)]
# contextual features
if Xc is not None:
Ns = len(np.atleast_1d(Xc[0]))
s_labels = ["context_" + str(i) for i in range(Ns)]
f_labels += s_labels
return f_labels | python | def _generate_feature_labels(self, X):
'''
Generates string feature labels
'''
Xt, Xc = get_ts_data_parts(X)
ftr_sizes = self._check_features(self.features, Xt[0:3])
f_labels = []
# calculated features
for key in ftr_sizes:
for i in range(ftr_sizes[key]):
f_labels += [key + '_' + str(i)]
# contextual features
if Xc is not None:
Ns = len(np.atleast_1d(Xc[0]))
s_labels = ["context_" + str(i) for i in range(Ns)]
f_labels += s_labels
return f_labels | [
"def",
"_generate_feature_labels",
"(",
"self",
",",
"X",
")",
":",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"ftr_sizes",
"=",
"self",
".",
"_check_features",
"(",
"self",
".",
"features",
",",
"Xt",
"[",
"0",
":",
"3",
"]",
")",
"f_labels",
"=",
"[",
"]",
"# calculated features",
"for",
"key",
"in",
"ftr_sizes",
":",
"for",
"i",
"in",
"range",
"(",
"ftr_sizes",
"[",
"key",
"]",
")",
":",
"f_labels",
"+=",
"[",
"key",
"+",
"'_'",
"+",
"str",
"(",
"i",
")",
"]",
"# contextual features",
"if",
"Xc",
"is",
"not",
"None",
":",
"Ns",
"=",
"len",
"(",
"np",
".",
"atleast_1d",
"(",
"Xc",
"[",
"0",
"]",
")",
")",
"s_labels",
"=",
"[",
"\"context_\"",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"Ns",
")",
"]",
"f_labels",
"+=",
"s_labels",
"return",
"f_labels"
] | Generates string feature labels | [
"Generates",
"string",
"feature",
"labels"
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1167-L1187 |
251,491 | dmbee/seglearn | seglearn/transform.py | FeatureRepMix._retrieve_indices | def _retrieve_indices(cols):
'''
Retrieve a list of indices corresponding to the provided column specification.
'''
if isinstance(cols, int):
return [cols]
elif isinstance(cols, slice):
start = cols.start if cols.start else 0
stop = cols.stop
step = cols.step if cols.step else 1
return list(range(start, stop, step))
elif isinstance(cols, list) and cols:
if isinstance(cols[0], bool):
return np.flatnonzero(np.asarray(cols))
elif isinstance(cols[0], int):
return cols
else:
raise TypeError('No valid column specifier. Only a scalar, list or slice of all'
'integers or a boolean mask are allowed.') | python | def _retrieve_indices(cols):
'''
Retrieve a list of indices corresponding to the provided column specification.
'''
if isinstance(cols, int):
return [cols]
elif isinstance(cols, slice):
start = cols.start if cols.start else 0
stop = cols.stop
step = cols.step if cols.step else 1
return list(range(start, stop, step))
elif isinstance(cols, list) and cols:
if isinstance(cols[0], bool):
return np.flatnonzero(np.asarray(cols))
elif isinstance(cols[0], int):
return cols
else:
raise TypeError('No valid column specifier. Only a scalar, list or slice of all'
'integers or a boolean mask are allowed.') | [
"def",
"_retrieve_indices",
"(",
"cols",
")",
":",
"if",
"isinstance",
"(",
"cols",
",",
"int",
")",
":",
"return",
"[",
"cols",
"]",
"elif",
"isinstance",
"(",
"cols",
",",
"slice",
")",
":",
"start",
"=",
"cols",
".",
"start",
"if",
"cols",
".",
"start",
"else",
"0",
"stop",
"=",
"cols",
".",
"stop",
"step",
"=",
"cols",
".",
"step",
"if",
"cols",
".",
"step",
"else",
"1",
"return",
"list",
"(",
"range",
"(",
"start",
",",
"stop",
",",
"step",
")",
")",
"elif",
"isinstance",
"(",
"cols",
",",
"list",
")",
"and",
"cols",
":",
"if",
"isinstance",
"(",
"cols",
"[",
"0",
"]",
",",
"bool",
")",
":",
"return",
"np",
".",
"flatnonzero",
"(",
"np",
".",
"asarray",
"(",
"cols",
")",
")",
"elif",
"isinstance",
"(",
"cols",
"[",
"0",
"]",
",",
"int",
")",
":",
"return",
"cols",
"else",
":",
"raise",
"TypeError",
"(",
"'No valid column specifier. Only a scalar, list or slice of all'",
"'integers or a boolean mask are allowed.'",
")"
] | Retrieve a list of indices corresponding to the provided column specification. | [
"Retrieve",
"a",
"list",
"of",
"indices",
"corresponding",
"to",
"the",
"provided",
"column",
"specification",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1301-L1319 |
251,492 | dmbee/seglearn | seglearn/transform.py | FeatureRepMix._validate | def _validate(self):
'''
Internal function to validate the transformer before applying all internal transformers.
'''
if self.f_labels is None:
raise NotFittedError('FeatureRepMix')
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate transformers
for trans in transformers:
if not isinstance(trans, FeatureRep):
raise TypeError("All transformers must be an instance of FeatureRep."
" '%s' (type %s) doesn't." % (trans, type(trans))) | python | def _validate(self):
'''
Internal function to validate the transformer before applying all internal transformers.
'''
if self.f_labels is None:
raise NotFittedError('FeatureRepMix')
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate transformers
for trans in transformers:
if not isinstance(trans, FeatureRep):
raise TypeError("All transformers must be an instance of FeatureRep."
" '%s' (type %s) doesn't." % (trans, type(trans))) | [
"def",
"_validate",
"(",
"self",
")",
":",
"if",
"self",
".",
"f_labels",
"is",
"None",
":",
"raise",
"NotFittedError",
"(",
"'FeatureRepMix'",
")",
"if",
"not",
"self",
".",
"transformers",
":",
"return",
"names",
",",
"transformers",
",",
"_",
"=",
"zip",
"(",
"*",
"self",
".",
"transformers",
")",
"# validate names",
"self",
".",
"_validate_names",
"(",
"names",
")",
"# validate transformers",
"for",
"trans",
"in",
"transformers",
":",
"if",
"not",
"isinstance",
"(",
"trans",
",",
"FeatureRep",
")",
":",
"raise",
"TypeError",
"(",
"\"All transformers must be an instance of FeatureRep.\"",
"\" '%s' (type %s) doesn't.\"",
"%",
"(",
"trans",
",",
"type",
"(",
"trans",
")",
")",
")"
] | Internal function to validate the transformer before applying all internal transformers. | [
"Internal",
"function",
"to",
"validate",
"the",
"transformer",
"before",
"applying",
"all",
"internal",
"transformers",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1355-L1374 |
251,493 | dmbee/seglearn | seglearn/transform.py | FunctionTransformer.transform | def transform(self, X):
'''
Transforms the time series data based on the provided function. Note this transformation
must not change the number of samples in the data.
Parameters
----------
X : array-like, shape [n_samples, ...]
time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_samples, ...]
transformed time series data
'''
if self.func is None:
return X
else:
Xt, Xc = get_ts_data_parts(X)
n_samples = len(Xt)
Xt = self.func(Xt, **self.func_kwargs)
if len(Xt) != n_samples:
raise ValueError("FunctionTransformer changes sample number (not supported).")
if Xc is not None:
Xt = TS_Data(Xt, Xc)
return Xt | python | def transform(self, X):
'''
Transforms the time series data based on the provided function. Note this transformation
must not change the number of samples in the data.
Parameters
----------
X : array-like, shape [n_samples, ...]
time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_samples, ...]
transformed time series data
'''
if self.func is None:
return X
else:
Xt, Xc = get_ts_data_parts(X)
n_samples = len(Xt)
Xt = self.func(Xt, **self.func_kwargs)
if len(Xt) != n_samples:
raise ValueError("FunctionTransformer changes sample number (not supported).")
if Xc is not None:
Xt = TS_Data(Xt, Xc)
return Xt | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"if",
"self",
".",
"func",
"is",
"None",
":",
"return",
"X",
"else",
":",
"Xt",
",",
"Xc",
"=",
"get_ts_data_parts",
"(",
"X",
")",
"n_samples",
"=",
"len",
"(",
"Xt",
")",
"Xt",
"=",
"self",
".",
"func",
"(",
"Xt",
",",
"*",
"*",
"self",
".",
"func_kwargs",
")",
"if",
"len",
"(",
"Xt",
")",
"!=",
"n_samples",
":",
"raise",
"ValueError",
"(",
"\"FunctionTransformer changes sample number (not supported).\"",
")",
"if",
"Xc",
"is",
"not",
"None",
":",
"Xt",
"=",
"TS_Data",
"(",
"Xt",
",",
"Xc",
")",
"return",
"Xt"
] | Transforms the time series data based on the provided function. Note this transformation
must not change the number of samples in the data.
Parameters
----------
X : array-like, shape [n_samples, ...]
time series data and (optionally) contextual data
Returns
-------
Xt : array-like, shape [n_samples, ...]
transformed time series data | [
"Transforms",
"the",
"time",
"series",
"data",
"based",
"on",
"the",
"provided",
"function",
".",
"Note",
"this",
"transformation",
"must",
"not",
"change",
"the",
"number",
"of",
"samples",
"in",
"the",
"data",
"."
] | d8d7039e92c4c6571a70350c03298aceab8dbeec | https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1465-L1491 |
251,494 | SAP/PyHDB | pyhdb/protocol/segments.py | RequestSegment.build_payload | def build_payload(self, payload):
"""Build payload of all parts and write them into the payload buffer"""
remaining_size = self.MAX_SEGMENT_PAYLOAD_SIZE
for part in self.parts:
part_payload = part.pack(remaining_size)
payload.write(part_payload)
remaining_size -= len(part_payload) | python | def build_payload(self, payload):
remaining_size = self.MAX_SEGMENT_PAYLOAD_SIZE
for part in self.parts:
part_payload = part.pack(remaining_size)
payload.write(part_payload)
remaining_size -= len(part_payload) | [
"def",
"build_payload",
"(",
"self",
",",
"payload",
")",
":",
"remaining_size",
"=",
"self",
".",
"MAX_SEGMENT_PAYLOAD_SIZE",
"for",
"part",
"in",
"self",
".",
"parts",
":",
"part_payload",
"=",
"part",
".",
"pack",
"(",
"remaining_size",
")",
"payload",
".",
"write",
"(",
"part_payload",
")",
"remaining_size",
"-=",
"len",
"(",
"part_payload",
")"
] | Build payload of all parts and write them into the payload buffer | [
"Build",
"payload",
"of",
"all",
"parts",
"and",
"write",
"them",
"into",
"the",
"payload",
"buffer"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/segments.py#L75-L82 |
251,495 | SAP/PyHDB | pyhdb/protocol/types.py | escape | def escape(value):
"""
Escape a single value.
"""
if isinstance(value, (tuple, list)):
return "(" + ", ".join([escape(arg) for arg in value]) + ")"
else:
typ = by_python_type.get(value.__class__)
if typ is None:
raise InterfaceError(
"Unsupported python input: %s (%s)" % (value, value.__class__)
)
return typ.to_sql(value) | python | def escape(value):
if isinstance(value, (tuple, list)):
return "(" + ", ".join([escape(arg) for arg in value]) + ")"
else:
typ = by_python_type.get(value.__class__)
if typ is None:
raise InterfaceError(
"Unsupported python input: %s (%s)" % (value, value.__class__)
)
return typ.to_sql(value) | [
"def",
"escape",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"\"(\"",
"+",
"\", \"",
".",
"join",
"(",
"[",
"escape",
"(",
"arg",
")",
"for",
"arg",
"in",
"value",
"]",
")",
"+",
"\")\"",
"else",
":",
"typ",
"=",
"by_python_type",
".",
"get",
"(",
"value",
".",
"__class__",
")",
"if",
"typ",
"is",
"None",
":",
"raise",
"InterfaceError",
"(",
"\"Unsupported python input: %s (%s)\"",
"%",
"(",
"value",
",",
"value",
".",
"__class__",
")",
")",
"return",
"typ",
".",
"to_sql",
"(",
"value",
")"
] | Escape a single value. | [
"Escape",
"a",
"single",
"value",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L555-L569 |
251,496 | SAP/PyHDB | pyhdb/protocol/types.py | escape_values | def escape_values(values):
"""
Escape multiple values from a list, tuple or dict.
"""
if isinstance(values, (tuple, list)):
return tuple([escape(value) for value in values])
elif isinstance(values, dict):
return dict([
(key, escape(value)) for (key, value) in values.items()
])
else:
raise InterfaceError("escape_values expects list, tuple or dict") | python | def escape_values(values):
if isinstance(values, (tuple, list)):
return tuple([escape(value) for value in values])
elif isinstance(values, dict):
return dict([
(key, escape(value)) for (key, value) in values.items()
])
else:
raise InterfaceError("escape_values expects list, tuple or dict") | [
"def",
"escape_values",
"(",
"values",
")",
":",
"if",
"isinstance",
"(",
"values",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"tuple",
"(",
"[",
"escape",
"(",
"value",
")",
"for",
"value",
"in",
"values",
"]",
")",
"elif",
"isinstance",
"(",
"values",
",",
"dict",
")",
":",
"return",
"dict",
"(",
"[",
"(",
"key",
",",
"escape",
"(",
"value",
")",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"values",
".",
"items",
"(",
")",
"]",
")",
"else",
":",
"raise",
"InterfaceError",
"(",
"\"escape_values expects list, tuple or dict\"",
")"
] | Escape multiple values from a list, tuple or dict. | [
"Escape",
"multiple",
"values",
"from",
"a",
"list",
"tuple",
"or",
"dict",
"."
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L572-L583 |
251,497 | SAP/PyHDB | pyhdb/protocol/types.py | Date.prepare | def prepare(cls, value):
"""Pack datetime value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
value = datetime.datetime.strptime(value, "%Y-%m-%d")
year = value.year | 0x8000 # for some unknown reasons year has to be bit-or'ed with 0x8000
month = value.month - 1 # for some unknown reasons HANA counts months starting from zero
pfield += cls._struct.pack(year, month, value.day)
return pfield | python | def prepare(cls, value):
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
value = datetime.datetime.strptime(value, "%Y-%m-%d")
year = value.year | 0x8000 # for some unknown reasons year has to be bit-or'ed with 0x8000
month = value.month - 1 # for some unknown reasons HANA counts months starting from zero
pfield += cls._struct.pack(year, month, value.day)
return pfield | [
"def",
"prepare",
"(",
"cls",
",",
"value",
")",
":",
"pfield",
"=",
"struct",
".",
"pack",
"(",
"'b'",
",",
"cls",
".",
"type_code",
")",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
":",
"value",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"value",
",",
"\"%Y-%m-%d\"",
")",
"year",
"=",
"value",
".",
"year",
"|",
"0x8000",
"# for some unknown reasons year has to be bit-or'ed with 0x8000",
"month",
"=",
"value",
".",
"month",
"-",
"1",
"# for some unknown reasons HANA counts months starting from zero",
"pfield",
"+=",
"cls",
".",
"_struct",
".",
"pack",
"(",
"year",
",",
"month",
",",
"value",
".",
"day",
")",
"return",
"pfield"
] | Pack datetime value into proper binary format | [
"Pack",
"datetime",
"value",
"into",
"proper",
"binary",
"format"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L368-L376 |
251,498 | SAP/PyHDB | pyhdb/protocol/types.py | Time.prepare | def prepare(cls, value):
"""Pack time value into proper binary format"""
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
if "." in value:
value = datetime.datetime.strptime(value, "%H:%M:%S.%f")
else:
value = datetime.datetime.strptime(value, "%H:%M:%S")
millisecond = value.second * 1000 + value.microsecond // 1000
hour = value.hour | 0x80 # for some unknown reasons hour has to be bit-or'ed with 0x80
pfield += cls._struct.pack(hour, value.minute, millisecond)
return pfield | python | def prepare(cls, value):
pfield = struct.pack('b', cls.type_code)
if isinstance(value, string_types):
if "." in value:
value = datetime.datetime.strptime(value, "%H:%M:%S.%f")
else:
value = datetime.datetime.strptime(value, "%H:%M:%S")
millisecond = value.second * 1000 + value.microsecond // 1000
hour = value.hour | 0x80 # for some unknown reasons hour has to be bit-or'ed with 0x80
pfield += cls._struct.pack(hour, value.minute, millisecond)
return pfield | [
"def",
"prepare",
"(",
"cls",
",",
"value",
")",
":",
"pfield",
"=",
"struct",
".",
"pack",
"(",
"'b'",
",",
"cls",
".",
"type_code",
")",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
":",
"if",
"\".\"",
"in",
"value",
":",
"value",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"value",
",",
"\"%H:%M:%S.%f\"",
")",
"else",
":",
"value",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"value",
",",
"\"%H:%M:%S\"",
")",
"millisecond",
"=",
"value",
".",
"second",
"*",
"1000",
"+",
"value",
".",
"microsecond",
"//",
"1000",
"hour",
"=",
"value",
".",
"hour",
"|",
"0x80",
"# for some unknown reasons hour has to be bit-or'ed with 0x80",
"pfield",
"+=",
"cls",
".",
"_struct",
".",
"pack",
"(",
"hour",
",",
"value",
".",
"minute",
",",
"millisecond",
")",
"return",
"pfield"
] | Pack time value into proper binary format | [
"Pack",
"time",
"value",
"into",
"proper",
"binary",
"format"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L439-L450 |
251,499 | SAP/PyHDB | pyhdb/protocol/types.py | MixinLobType.prepare | def prepare(cls, value, length=0, position=0, is_last_data=True):
"""Prepare Lob header.
Note that the actual lob data is NOT written here but appended after the parameter block for each row!
"""
hstruct = WriteLobHeader.header_struct
lob_option_dataincluded = WriteLobHeader.LOB_OPTION_DATAINCLUDED if length > 0 else 0
lob_option_lastdata = WriteLobHeader.LOB_OPTION_LASTDATA if is_last_data else 0
options = lob_option_dataincluded | lob_option_lastdata
pfield = hstruct.pack(cls.type_code, options, length, position)
return pfield | python | def prepare(cls, value, length=0, position=0, is_last_data=True):
hstruct = WriteLobHeader.header_struct
lob_option_dataincluded = WriteLobHeader.LOB_OPTION_DATAINCLUDED if length > 0 else 0
lob_option_lastdata = WriteLobHeader.LOB_OPTION_LASTDATA if is_last_data else 0
options = lob_option_dataincluded | lob_option_lastdata
pfield = hstruct.pack(cls.type_code, options, length, position)
return pfield | [
"def",
"prepare",
"(",
"cls",
",",
"value",
",",
"length",
"=",
"0",
",",
"position",
"=",
"0",
",",
"is_last_data",
"=",
"True",
")",
":",
"hstruct",
"=",
"WriteLobHeader",
".",
"header_struct",
"lob_option_dataincluded",
"=",
"WriteLobHeader",
".",
"LOB_OPTION_DATAINCLUDED",
"if",
"length",
">",
"0",
"else",
"0",
"lob_option_lastdata",
"=",
"WriteLobHeader",
".",
"LOB_OPTION_LASTDATA",
"if",
"is_last_data",
"else",
"0",
"options",
"=",
"lob_option_dataincluded",
"|",
"lob_option_lastdata",
"pfield",
"=",
"hstruct",
".",
"pack",
"(",
"cls",
".",
"type_code",
",",
"options",
",",
"length",
",",
"position",
")",
"return",
"pfield"
] | Prepare Lob header.
Note that the actual lob data is NOT written here but appended after the parameter block for each row! | [
"Prepare",
"Lob",
"header",
".",
"Note",
"that",
"the",
"actual",
"lob",
"data",
"is",
"NOT",
"written",
"here",
"but",
"appended",
"after",
"the",
"parameter",
"block",
"for",
"each",
"row!"
] | 826539d06b8bcef74fe755e7489b8a8255628f12 | https://github.com/SAP/PyHDB/blob/826539d06b8bcef74fe755e7489b8a8255628f12/pyhdb/protocol/types.py#L501-L510 |
Subsets and Splits