code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import os
from pathlib import Path
from vehicle.utils import LOGGER, RANK, SETTINGS, TESTS_RUNNING, ops
from vehicle.utils.torch_utils import model_info_for_loggers
try:
import comet_ml
assert not TESTS_RUNNING # do not log pytest
assert hasattr(comet_ml, '__version__') # verify package is not directory
assert SETTINGS['comet'] is True # verify integration is enabled
except (ImportError, AssertionError):
comet_ml = None
# Ensures certain logging functions only run for supported tasks
COMET_SUPPORTED_TASKS = ['detect']
# Names of plots created by YOLOv8 that are logged to Comet
EVALUATION_PLOT_NAMES = 'F1_curve', 'P_curve', 'R_curve', 'PR_curve', 'confusion_matrix'
LABEL_PLOT_NAMES = 'labels', 'labels_correlogram'
_comet_image_prediction_count = 0
def _get_comet_mode():
return os.getenv('COMET_MODE', 'online')
def _get_comet_model_name():
return os.getenv('COMET_MODEL_NAME', 'YOLOv8')
def _get_eval_batch_logging_interval():
return int(os.getenv('COMET_EVAL_BATCH_LOGGING_INTERVAL', 1))
def _get_max_image_predictions_to_log():
return int(os.getenv('COMET_MAX_IMAGE_PREDICTIONS', 100))
def _scale_confidence_score(score):
scale = float(os.getenv('COMET_MAX_CONFIDENCE_SCORE', 100.0))
return score * scale
def _should_log_confusion_matrix():
return os.getenv('COMET_EVAL_LOG_CONFUSION_MATRIX', 'false').lower() == 'true'
def _should_log_image_predictions():
return os.getenv('COMET_EVAL_LOG_IMAGE_PREDICTIONS', 'true').lower() == 'true'
def _get_experiment_type(mode, project_name):
"""Return an experiment based on mode and project name."""
if mode == 'offline':
return comet_ml.OfflineExperiment(project_name=project_name)
return comet_ml.Experiment(project_name=project_name)
def _create_experiment(args):
"""Ensures that the experiment object is only created in a single process during distributed training."""
if RANK not in (-1, 0):
return
try:
comet_mode = _get_comet_mode()
_project_name = os.getenv('COMET_PROJECT_NAME', args.project)
experiment = _get_experiment_type(comet_mode, _project_name)
experiment.log_parameters(vars(args))
experiment.log_others({
'eval_batch_logging_interval': _get_eval_batch_logging_interval(),
'log_confusion_matrix_on_eval': _should_log_confusion_matrix(),
'log_image_predictions': _should_log_image_predictions(),
'max_image_predictions': _get_max_image_predictions_to_log(), })
experiment.log_other('Created from', 'yolov8')
except Exception as e:
LOGGER.warning(f'WARNING ⚠️ Comet installed but not initialized correctly, not logging this run. {e}')
def _fetch_trainer_metadata(trainer):
"""Returns metadata for YOLO training including epoch and asset saving status."""
curr_epoch = trainer.epoch + 1
train_num_steps_per_epoch = len(trainer.train_loader.dataset) // trainer.batch_size
curr_step = curr_epoch * train_num_steps_per_epoch
final_epoch = curr_epoch == trainer.epochs
save = trainer.args.save
save_period = trainer.args.save_period
save_interval = curr_epoch % save_period == 0
save_assets = save and save_period > 0 and save_interval and not final_epoch
return dict(
curr_epoch=curr_epoch,
curr_step=curr_step,
save_assets=save_assets,
final_epoch=final_epoch,
)
def _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad):
"""YOLOv8 resizes images during training and the label values
are normalized based on this resized shape. This function rescales the
bounding box labels to the original image shape.
"""
resized_image_height, resized_image_width = resized_image_shape
# Convert normalized xywh format predictions to xyxy in resized scale format
box = ops.xywhn2xyxy(box, h=resized_image_height, w=resized_image_width)
# Scale box predictions from resized image scale back to original image scale
box = ops.scale_boxes(resized_image_shape, box, original_image_shape, ratio_pad)
# Convert bounding box format from xyxy to xywh for Comet logging
box = ops.xyxy2xywh(box)
# Adjust xy center to correspond top-left corner
box[:2] -= box[2:] / 2
box = box.tolist()
return box
def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None):
"""Format ground truth annotations for detection."""
indices = batch['batch_idx'] == img_idx
bboxes = batch['bboxes'][indices]
if len(bboxes) == 0:
LOGGER.debug(f'COMET WARNING: Image: {image_path} has no bounding boxes labels')
return None
cls_labels = batch['cls'][indices].squeeze(1).tolist()
if class_name_map:
cls_labels = [str(class_name_map[label]) for label in cls_labels]
original_image_shape = batch['ori_shape'][img_idx]
resized_image_shape = batch['resized_shape'][img_idx]
ratio_pad = batch['ratio_pad'][img_idx]
data = []
for box, label in zip(bboxes, cls_labels):
box = _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad)
data.append({
'boxes': [box],
'label': f'gt_{label}',
'score': _scale_confidence_score(1.0), })
return {'name': 'ground_truth', 'data': data}
def _format_prediction_annotations_for_detection(image_path, metadata, class_label_map=None):
"""Format YOLO predictions for object detection visualization."""
stem = image_path.stem
image_id = int(stem) if stem.isnumeric() else stem
predictions = metadata.get(image_id)
if not predictions:
LOGGER.debug(f'COMET WARNING: Image: {image_path} has no bounding boxes predictions')
return None
data = []
for prediction in predictions:
boxes = prediction['bbox']
score = _scale_confidence_score(prediction['score'])
cls_label = prediction['category_id']
if class_label_map:
cls_label = str(class_label_map[cls_label])
data.append({'boxes': [boxes], 'label': cls_label, 'score': score})
return {'name': 'prediction', 'data': data}
def _fetch_annotations(img_idx, image_path, batch, prediction_metadata_map, class_label_map):
"""Join the ground truth and prediction annotations if they exist."""
ground_truth_annotations = _format_ground_truth_annotations_for_detection(img_idx, image_path, batch,
class_label_map)
prediction_annotations = _format_prediction_annotations_for_detection(image_path, prediction_metadata_map,
class_label_map)
annotations = [
annotation for annotation in [ground_truth_annotations, prediction_annotations] if annotation is not None]
return [annotations] if annotations else None
def _create_prediction_metadata_map(model_predictions):
"""Create metadata map for model predictions by groupings them based on image ID."""
pred_metadata_map = {}
for prediction in model_predictions:
pred_metadata_map.setdefault(prediction['image_id'], [])
pred_metadata_map[prediction['image_id']].append(prediction)
return pred_metadata_map
def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch):
"""Log the confusion matrix to Comet experiment."""
conf_mat = trainer.validator.confusion_matrix.matrix
names = list(trainer.data['names'].values()) + ['background']
experiment.log_confusion_matrix(
matrix=conf_mat,
labels=names,
max_categories=len(names),
epoch=curr_epoch,
step=curr_step,
)
def _log_images(experiment, image_paths, curr_step, annotations=None):
"""Logs images to the experiment with optional annotations."""
if annotations:
for image_path, annotation in zip(image_paths, annotations):
experiment.log_image(image_path, name=image_path.stem, step=curr_step, annotations=annotation)
else:
for image_path in image_paths:
experiment.log_image(image_path, name=image_path.stem, step=curr_step)
def _log_image_predictions(experiment, validator, curr_step):
"""Logs predicted boxes for a single image during training."""
global _comet_image_prediction_count
task = validator.args.task
if task not in COMET_SUPPORTED_TASKS:
return
jdict = validator.jdict
if not jdict:
return
predictions_metadata_map = _create_prediction_metadata_map(jdict)
dataloader = validator.dataloader
class_label_map = validator.names
batch_logging_interval = _get_eval_batch_logging_interval()
max_image_predictions = _get_max_image_predictions_to_log()
for batch_idx, batch in enumerate(dataloader):
if (batch_idx + 1) % batch_logging_interval != 0:
continue
image_paths = batch['im_file']
for img_idx, image_path in enumerate(image_paths):
if _comet_image_prediction_count >= max_image_predictions:
return
image_path = Path(image_path)
annotations = _fetch_annotations(
img_idx,
image_path,
batch,
predictions_metadata_map,
class_label_map,
)
_log_images(
experiment,
[image_path],
curr_step,
annotations=annotations,
)
_comet_image_prediction_count += 1
def _log_plots(experiment, trainer):
"""Logs evaluation plots and label plots for the experiment."""
plot_filenames = [trainer.save_dir / f'{plots}.png' for plots in EVALUATION_PLOT_NAMES]
_log_images(experiment, plot_filenames, None)
label_plot_filenames = [trainer.save_dir / f'{labels}.jpg' for labels in LABEL_PLOT_NAMES]
_log_images(experiment, label_plot_filenames, None)
def _log_model(experiment, trainer):
"""Log the best-trained model to Comet.ml."""
model_name = _get_comet_model_name()
experiment.log_model(
model_name,
file_or_folder=str(trainer.best),
file_name='best.pt',
overwrite=True,
)
def on_pretrain_routine_start(trainer):
"""Creates or resumes a CometML experiment at the start of a YOLO pre-training routine."""
experiment = comet_ml.get_global_experiment()
is_alive = getattr(experiment, 'alive', False)
if not experiment or not is_alive:
_create_experiment(trainer.args)
def on_train_epoch_end(trainer):
"""Log metrics and save batch images at the end of training epochs."""
experiment = comet_ml.get_global_experiment()
if not experiment:
return
metadata = _fetch_trainer_metadata(trainer)
curr_epoch = metadata['curr_epoch']
curr_step = metadata['curr_step']
experiment.log_metrics(
trainer.label_loss_items(trainer.tloss, prefix='train'),
step=curr_step,
epoch=curr_epoch,
)
if curr_epoch == 1:
_log_images(experiment, trainer.save_dir.glob('train_batch*.jpg'), curr_step)
def on_fit_epoch_end(trainer):
"""Logs model assets at the end of each epoch."""
experiment = comet_ml.get_global_experiment()
if not experiment:
return
metadata = _fetch_trainer_metadata(trainer)
curr_epoch = metadata['curr_epoch']
curr_step = metadata['curr_step']
save_assets = metadata['save_assets']
experiment.log_metrics(trainer.metrics, step=curr_step, epoch=curr_epoch)
experiment.log_metrics(trainer.lr, step=curr_step, epoch=curr_epoch)
if curr_epoch == 1:
experiment.log_metrics(model_info_for_loggers(trainer), step=curr_step, epoch=curr_epoch)
if not save_assets:
return
_log_model(experiment, trainer)
if _should_log_confusion_matrix():
_log_confusion_matrix(experiment, trainer, curr_step, curr_epoch)
if _should_log_image_predictions():
_log_image_predictions(experiment, trainer.validator, curr_step)
def on_train_end(trainer):
"""Perform operations at the end of training."""
experiment = comet_ml.get_global_experiment()
if not experiment:
return
metadata = _fetch_trainer_metadata(trainer)
curr_epoch = metadata['curr_epoch']
curr_step = metadata['curr_step']
plots = trainer.args.plots
_log_model(experiment, trainer)
if plots:
_log_plots(experiment, trainer)
_log_confusion_matrix(experiment, trainer, curr_step, curr_epoch)
_log_image_predictions(experiment, trainer.validator, curr_step)
experiment.end()
global _comet_image_prediction_count
_comet_image_prediction_count = 0
callbacks = {
'on_pretrain_routine_start': on_pretrain_routine_start,
'on_train_epoch_end': on_train_epoch_end,
'on_fit_epoch_end': on_fit_epoch_end,
'on_train_end': on_train_end} if comet_ml else {} | zipdetr | /zipdetr-2.0.10.tar.gz/zipdetr-2.0.10/vehicle/utils/callbacks/comet.py | comet.py |
import re
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from vehicle.utils import LOGGER, SETTINGS, TESTS_RUNNING
from vehicle.utils.torch_utils import model_info_for_loggers
try:
import clearml
from clearml import Task
from clearml.binding.frameworks.pytorch_bind import PatchPyTorchModelIO
from clearml.binding.matplotlib_bind import PatchedMatplotlib
assert hasattr(clearml, '__version__') # verify package is not directory
assert not TESTS_RUNNING # do not log pytest
assert SETTINGS['clearml'] is True # verify integration is enabled
except (ImportError, AssertionError):
clearml = None
def _log_debug_samples(files, title='Debug Samples') -> None:
"""
Log files (images) as debug samples in the ClearML task.
Args:
files (list): A list of file paths in PosixPath format.
title (str): A title that groups together images with the same values.
"""
task = Task.current_task()
if task:
for f in files:
if f.exists():
it = re.search(r'_batch(\d+)', f.name)
iteration = int(it.groups()[0]) if it else 0
task.get_logger().report_image(title=title,
series=f.name.replace(it.group(), ''),
local_path=str(f),
iteration=iteration)
def _log_plot(title, plot_path) -> None:
"""
Log an image as a plot in the plot section of ClearML.
Args:
title (str): The title of the plot.
plot_path (str): The path to the saved image file.
"""
img = mpimg.imread(plot_path)
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks
ax.imshow(img)
Task.current_task().get_logger().report_matplotlib_figure(title=title,
series='',
figure=fig,
report_interactive=False)
def on_pretrain_routine_start(trainer):
"""Runs at start of pretraining routine; initializes and connects/ logs task to ClearML."""
try:
task = Task.current_task()
if task:
# Make sure the automatic pytorch and matplotlib bindings are disabled!
# We are logging these plots and model files manually in the integration
PatchPyTorchModelIO.update_current_task(None)
PatchedMatplotlib.update_current_task(None)
else:
task = Task.init(project_name=trainer.args.project or 'YOLOv8',
task_name=trainer.args.name,
tags=['YOLOv8'],
output_uri=True,
reuse_last_task_id=False,
auto_connect_frameworks={
'pytorch': False,
'matplotlib': False})
LOGGER.warning('ClearML Initialized a new task. If you want to run remotely, '
'please add clearml-init and connect your arguments before initializing YOLO.')
task.connect(vars(trainer.args), name='General')
except Exception as e:
LOGGER.warning(f'WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}')
def on_train_epoch_end(trainer):
task = Task.current_task()
if task:
"""Logs debug samples for the first epoch of YOLO training."""
if trainer.epoch == 1:
_log_debug_samples(sorted(trainer.save_dir.glob('train_batch*.jpg')), 'Mosaic')
"""Report the current training progress."""
for k, v in trainer.validator.metrics.results_dict.items():
task.get_logger().report_scalar('train', k, v, iteration=trainer.epoch)
def on_fit_epoch_end(trainer):
"""Reports model information to logger at the end of an epoch."""
task = Task.current_task()
if task:
# You should have access to the validation bboxes under jdict
task.get_logger().report_scalar(title='Epoch Time',
series='Epoch Time',
value=trainer.epoch_time,
iteration=trainer.epoch)
if trainer.epoch == 0:
for k, v in model_info_for_loggers(trainer).items():
task.get_logger().report_single_value(k, v)
def on_val_end(validator):
"""Logs validation results including labels and predictions."""
if Task.current_task():
# Log val_labels and val_pred
_log_debug_samples(sorted(validator.save_dir.glob('val*.jpg')), 'Validation')
def on_train_end(trainer):
"""Logs final model and its name on training completion."""
task = Task.current_task()
if task:
# Log final results, CM matrix + PR plots
files = [
'results.png', 'confusion_matrix.png', 'confusion_matrix_normalized.png',
*(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
for f in files:
_log_plot(title=f.stem, plot_path=f)
# Report final metrics
for k, v in trainer.validator.metrics.results_dict.items():
task.get_logger().report_single_value(k, v)
# Log the final model
task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False)
callbacks = {
'on_pretrain_routine_start': on_pretrain_routine_start,
'on_train_epoch_end': on_train_epoch_end,
'on_fit_epoch_end': on_fit_epoch_end,
'on_val_end': on_val_end,
'on_train_end': on_train_end} if clearml else {} | zipdetr | /zipdetr-2.0.10.tar.gz/zipdetr-2.0.10/vehicle/utils/callbacks/clearml.py | clearml.py |
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
import os
import binascii
import struct
import datetime
if sys.version_info[0] == 2:
import scandir
os.scandir = scandir.scandir
class EntryBase(object):
""" base class for PK headers """
def loaditems(self, fh):
""" loads any items refered to by the header """
pass
######################################################
# Decoder classes
######################################################
class CentralDirEntry(EntryBase):
HeaderSize = 42
MagicNumber = b'\x01\x02'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
self.createVersion, self.neededVersion, self.flags, self.method, self.timestamp, \
self.crc32, self.compressedSize, self.originalSize, self.nameLength, self.extraLength, \
self.commentLength, self.diskNrStart, self.zipAttrs, self.osAttrs, self.dataOfs = \
struct.unpack_from("<4H4L5HLL", data, ofs)
ofs += self.HeaderSize
self.nameOffset = baseofs + ofs
ofs += self.nameLength
self.extraOffset = baseofs + ofs
ofs += self.extraLength
self.commentOffset = baseofs + ofs
ofs += self.commentLength
self.endOffset = baseofs + ofs
self.name = None
self.extra = None
self.comment = None
def loaditems(self, fh):
fh.seek(self.nameOffset)
self.name = fh.read(self.nameLength).decode("utf-8", "ignore")
fh.seek(self.extraOffset)
self.extra = fh.read(self.extraLength)
fh.seek(self.commentOffset)
self.comment = fh.read(self.commentLength).decode("utf-8", "ignore")
def summary(self):
def flagdesc(fl):
if fl&64: return "AES"
if fl&1: return "CRYPT"
return ""
return "%10d (%5.1f%%) %s %08x [%5s] %s" % (
self.originalSize,
100.0*self.compressedSize/self.originalSize if self.originalSize else 0,
datetime.datetime.utcfromtimestamp(self.timestamp),
self.crc32,
flagdesc(self.flags),
self.name
)
def __repr__(self):
r = "PK.0102: %04x %04x %04x %04x %08x %08x %08x %08x %04x %04x %04x %04x %04x %08x %08x | %08x %08x %08x %08x" % (
self.createVersion, self.neededVersion, self.flags, self.method, self.timestamp,
self.crc32, self.compressedSize, self.originalSize, self.nameLength, self.extraLength,
self.commentLength, self.diskNrStart, self.zipAttrs, self.osAttrs, self.dataOfs,
self.nameOffset, self.extraOffset, self.commentOffset, self.endOffset)
if self.name:
r += " - " + self.name
if self.comment:
r += "\n" + self.comment
return r
class LocalFileHeader(EntryBase):
HeaderSize = 26
MagicNumber = b'\x03\x04'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
self.neededVersion, self.flags, self.method, self.timestamp, self.crc32, \
self.compressedSize, self.originalSize, self.nameLength, self.extraLength = \
struct.unpack_from("<3H4LHH", data, ofs)
ofs += self.HeaderSize
self.nameOffset = baseofs + ofs
ofs += self.nameLength
self.extraOffset = baseofs + ofs
ofs += self.extraLength
self.dataOffset = baseofs + ofs
ofs += self.compressedSize
self.endOffset = baseofs + ofs
self.name = None
self.extra = None
self.data = None
def loaditems(self, fh):
fh.seek(self.nameOffset)
self.name = fh.read(self.nameLength).decode("utf-8", "ignore")
fh.seek(self.extraOffset)
self.extra = fh.read(self.extraLength)
# not loading data
def __repr__(self):
r = "PK.0304: %04x %04x %04x %08x %08x %08x %08x %04x %04x | %08x %08x %08x %08x" % (
self.neededVersion, self.flags, self.method, self.timestamp, self.crc32,
self.compressedSize, self.originalSize, self.nameLength, self.extraLength,
self.nameOffset, self.extraOffset, self.dataOffset, self.endOffset)
if self.name:
r += " - " + self.name
return r
class EndOfCentralDir(EntryBase):
HeaderSize = 18
MagicNumber = b'\x05\x06'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
self.thisDiskNr, self.startDiskNr, self.thisEntries, self.totalEntries, self.dirSize, self.dirOffset, self.commentLength = \
struct.unpack_from("<4HLLH", data, ofs)
ofs += self.HeaderSize
self.commentOffset = baseofs + ofs
ofs += self.commentLength
self.endOffset = baseofs + ofs
self.comment = None
def loaditems(self, fh):
fh.seek(self.commentOffset)
self.comment = fh.read(self.commentLength)
if self.comment.startswith(b'signed by SignApk'):
self.comment = repr(self.comment[:17]) + str(binascii.b2a_hex(self.comment[18:]), 'ascii')
else:
self.comment = self.comment.decode('utf-8', 'ignore')
def __repr__(self):
r = "PK.0506: %04x %04x %04x %04x %08x %08x %04x | %08x %08x" % (
self.thisDiskNr, self.startDiskNr, self.thisEntries, self.totalEntries, self.dirSize, self.dirOffset, self.commentLength,
self.commentOffset, self.endOffset)
if self.comment:
r += "\n" + self.comment
return r
class DataDescriptor(EntryBase):
HeaderSize = 12
MagicNumber = b'\x07\x08'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
self.crc, self.compSize, self.uncompSize = \
struct.unpack_from("<3L", data, ofs)
ofs += self.HeaderSize
self.endOffset = baseofs + ofs
def __repr__(self):
return "PK.0708: %08x %08x %08x | %08x" % (
self.crc, self.compSize, self.uncompSize,
self.endOffset)
# todo
class Zip64EndOfDir(EntryBase):
HeaderSize = 0
MagicNumber = b'\x06\x06'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
class Zip64EndOfDirLocator(EntryBase):
HeaderSize = 0
MagicNumber = b'\x06\x07'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
class ExtraEntry(EntryBase):
HeaderSize = 0
MagicNumber = b'\x06\x08'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
class SpannedArchive(EntryBase):
HeaderSize = 0
MagicNumber = b'\x03\x03'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
class ArchiveSignature(EntryBase):
HeaderSize = 0
MagicNumber = b'\x05\x05'
def __init__(self, baseofs, data, ofs):
self.pkOffset = baseofs + ofs - 4
def getDecoderClass(typ):
""" return Decoder class for the PK type """
for cls in (CentralDirEntry, LocalFileHeader, EndOfCentralDir, DataDescriptor, Zip64EndOfDir, Zip64EndOfDirLocator, ExtraEntry, SpannedArchive, ArchiveSignature):
if cls.MagicNumber == typ:
return cls
def findPKHeaders(args, fh):
""" scan entire file for PK headers """
def processchunk(o, chunk):
n = -1
while True:
n = chunk.find(b'PK', n+1)
if n == -1 or n+4 > len(chunk):
break
cls = getDecoderClass(chunk[n+2:n+4])
if cls:
hdrEnd = n+4+cls.HeaderSize
if hdrEnd > len(chunk):
continue
# todo: skip entries entirely within repeated chunk
# if n<64 and hdrEnd>64:
# continue
yield cls(o, chunk, n+4)
prev = b''
o = 0
if args.offset:
fh.seek(args.offset, os.SEEK_SET if args.offset >= 0 else os.SEEK_END)
o = args.offset
while args.length is None or o < args.length:
want = args.chunksize
if args.length is not None and want > args.length - o:
want = args.length - o
fh.seek(o)
chunk = fh.read(want)
if len(chunk) == 0:
break
for ch in processchunk(o-len(prev), prev+chunk):
yield ch
# 64 so all header types would fit, exclusive their variable size parts
prev = chunk[-64:]
o += len(chunk)
def scanzip(args, fh):
""" do a quick scan of the .zip file, starting by locating the EOD marker """
# 100 bytes is the smallest .zip possible
fh.seek(-100, 2)
eoddata = fh.read()
iEND = eoddata.find(b'PK\x05\x06')
if iEND==-1:
# try with larger chunk
ofs = max(fh.tell()-0x10100, 0)
fh.seek(ofs, 0)
eoddata = fh.read()
iEND = eoddata.find(b'PK\x05\x06')
if iEND==-1:
print("expected PK0506 - probably not a PKZIP file")
return
else:
ofs = fh.tell()-0x100
eod = EndOfCentralDir(ofs, eoddata, iEND+4)
dirofs = eod.dirOffset
print(repr(eod))
for _ in range(eod.thisEntries):
fh.seek(dirofs)
dirdata = fh.read(46)
if dirdata[:4] != b'PK\x01\x02':
print("expected PK0102")
return
d = CentralDirEntry(dirofs, dirdata, 4)
fh.seek(d.nameOffset)
d.name = fh.read(d.nameLength).decode("utf-8", "ignore")
if args.verbose:
print(repr(d))
else:
print(d.summary())
dirofs = d.endOffset
def processfile(args, fh):
""" process one opened file / url """
if args.quick:
scanzip(args, fh)
else:
for entry in findPKHeaders(args, fh):
entry.loaditems(fh)
print("%08x: %s" % (entry.pkOffset, entry))
def DirEnumerator(args, path):
"""
Enumerate all files / links in a directory,
optionally recursing into subdirectories,
or ignoring links.
"""
for d in os.scandir(path):
try:
if d.name == '.' or d.name == '..':
pass
elif d.is_symlink() and args.skiplinks:
pass
elif d.is_file():
yield d.path
elif d.is_dir() and args.recurse:
for f in DirEnumerator(args, d.path):
yield f
except Exception as e:
print("EXCEPTION %s accessing %s/%s" % (e, path, d.name))
def EnumeratePaths(args, paths):
"""
Enumerate all urls, paths, files from the commandline
optionally recursing into subdirectories.
"""
for fn in paths:
try:
# 3 - for ftp://, 4 for http://, 5 for https://
if fn.find("://") in (3,4,5):
yield fn
if os.path.islink(fn) and args.skiplinks:
pass
elif os.path.isdir(fn) and args.recurse:
for f in DirEnumerator(args, fn):
yield f
elif os.path.isfile(fn):
yield fn
except Exception as e:
print("EXCEPTION %s accessing %s" % (e, fn))
def main():
import argparse
parser = argparse.ArgumentParser(description='zipdump - scan file contents for PKZIP data',
epilog='zipdump can quickly scan a zip from an URL without downloading the complete archive')
parser.add_argument('--verbose', '-v', action='count')
parser.add_argument('--cat', '-c', type=str, help='decompress file to stdout')
parser.add_argument('--print', '-p', type=str, help='print raw file data to stdout')
parser.add_argument('--recurse', '-r', action='store_true', help='recurse into directories')
parser.add_argument('--skiplinks', '-L', action='store_true', help='skip symbolic links')
parser.add_argument('--offset', '-o', type=int, help='start processing at offset')
parser.add_argument('--length', '-l', type=int, help='max length of data to process')
parser.add_argument('--quick', '-q', action='store_true', help='Quick dir scan. This is quick with URLs as well.')
parser.add_argument('--chunksize', type=int, default=1024*1024)
parser.add_argument('FILES', type=str, nargs='*', help='Files or URLs')
args = parser.parse_args()
if args.FILES:
for fn in EnumeratePaths(args, args.FILES):
print("==> ", fn, " <==")
try:
if fn.find("://") in (3,4,5):
# when argument looks like a url, use urlstream to open
import urlstream
with urlstream.open(fn) as fh:
processfile(args, fh)
else:
with open(fn, "rb") as fh:
processfile(args, fh)
except Exception as e:
print("ERROR: %s" % e)
raise
else:
processfile(args, sys.stdin.buffer)
if __name__ == '__main__':
main() | zipdump | /zipdump-0.1.zip/zipdump-0.1/zipdump.py | zipdump.py |
# zipdump
Analyze zipfile, either local, or from url
`zipdump` can either do a full scan, finding all PK-like headers, or it can do a quick scan ( like the usual `zip -v` type output ).
`zipdump -q` works equally quick on web based resources as on local files.
This makes it quite easy to quickly investigate a large number of large .zip files without actually needing to download them.
I wrote this tool because i wanted to look at the contents of lots of apple ios firmware files without downloading 100s of GB of data.
For instance:
python zipdump.py -q http://appldnld.apple.com/ios10.0/031-64655-20160705-A371AD14-3E3F-11E6-A58B-C84C60941A1E/com_apple_MobileAsset_SoftwareUpdate/d75e3af423ae0308a8b9e0847292375ba02e3b11.zip
`zipdump` works with both python2 and pyton3.
(c) 2016 Willem Hengeveld <[email protected]>
| zipdump | /zipdump-0.1.zip/zipdump-0.1/README.md | README.md |
import sys
import re
from errno import EINVAL, ENOENT
if sys.version_info[0] == 3:
import urllib.request
from urllib.request import Request
urllib2 = urllib.request
else:
import urllib2
from urllib2 import Request
# set this to True when debugging this module
debuglog = False
def open(url, mode=None):
"""
Use urlstream.open for doing a simple request, without customizing request headers
'mode' is ignored, it is there to be argument compatible with file.open()
"""
return urlstream(Request(url))
class urlstream(object):
""" urlstream requests chunks from a web resource as directed by read + seek requests """
def __init__(self, req):
self.req = req
self.absolutepos = 0
self.buffer = None
self.bufferstart = None # position of start of buffer
def clearrange(self):
""" remove Range header from request """
if hasattr(self.req, 'remove_header'):
# python3
self.req.remove_header('Range')
else:
# python2
self.req.headers.pop('Range',None)
def next(self, size):
""" download next chunk """
# Retrieve anywhere between 64k and 1M byte
if size is not None:
size = min(max(size, 0x10000), 0x100000)
if self.absolutepos < 0:
# relative to the end of the file
self.req.headers['Range'] = "bytes=%d" % self.absolutepos
elif size is None:
# open ended range
self.req.headers['Range'] = "bytes=%d-" % (self.absolutepos)
else:
# fixed absolute range
self.req.headers['Range'] = "bytes=%d-%d" % (self.absolutepos, self.absolutepos+size-1)
if debuglog: print("next: ", self.req.headers['Range'])
f = self.doreq()
# note: Content-Range header has actual resulting range.
# the format of the Content-Range header:
#
# Content-Range: bytes (\d+)-(\d+)/(\d+)
#
if self.absolutepos < 0:
crange = f.headers.get('Content-Range')
if crange:
m = re.match(r'bytes\s+(\d+)-', crange)
if m:
self.absolutepos = int(m.group(1))
return f.read()
def read(self, size=None):
""" read bytes from stream """
if size is None:
if self.absolutepos==0:
self.clearrange()
if debuglog: print("read: entire file")
f = self.doreq()
return f.read()
# read until end of file
return self.next(None)
# read chunk until size bytes received
data = b""
while size > 0:
if self.buffer is None:
self.buffer = self.next(size)
self.bufferstart = self.absolutepos
if self.buffer is None:
return data
slicestart = self.absolutepos - self.bufferstart
want = min(size, len(self.buffer)-slicestart)
sliceend = slicestart+want
data += self.buffer[slicestart:sliceend]
if sliceend == len(self.buffer):
self.buffer = None
self.bufferstart = None
self.absolutepos += want
size -= want
return data
def seek(self, size, whence=0):
""" seek to a different offset """
if debuglog: print("seek", size, whence)
if whence == 0 and size>=0:
self.absolutepos = size
elif whence == 1:
self.absolutepos += size
elif whence == 2 and size<0:
self.absolutepos = size
else:
raise IOError(EINVAL, "Invalid seek arguments")
if self.buffer and not self.bufferstart <= self.absolutepos < self.bufferstart+len(self.buffer):
self.buffer = None
self.bufferstart = None
def tell(self):
""" return current absolute position """
if self.absolutepos>=0:
if debuglog: print("tell -> ", self.absolutepos)
return self.absolutepos
# note: with python3 i could have used the 'method' property
saved_method = self.req.get_method
self.req.get_method = lambda : 'HEAD'
if debuglog: print("tell: HEAD")
self.clearrange()
try:
head_response = self.doreq()
result = head_response.getcode()
except urllib2.HTTPError as err:
self.req.get_method = saved_method
raise
self.req.get_method = saved_method
self.contentLength = int(head_response.headers.get("Content-Length"))
self.absolutepos += self.contentLength
return self.absolutepos
def doreq(self):
""" do the actual http request, translating 404 into ENOENT """
try:
return urllib2.urlopen(self.req)
except urllib2.HTTPError as err:
if err.code!=404:
raise
raise IOError(ENOENT, "Not found")
# for supporting 'with'
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass | zipdump | /zipdump-0.1.zip/zipdump-0.1/urlstream.py | urlstream.py |
Look help at each command.
zipe only supports cp932 and utf-8 as encoding to. And no support making encrypt zip archive for now.
::
usage: zipe [-h] [-F ENCODING] -T ENCODING [-r] [-v]
[-x EXCLUDE | -i INCLUDE]
ZIP_FILE ENTRY [ENTRY ...]
Zipper for not native encoded filename file
positional arguments:
ZIP_FILE ZIP archive
ENTRY file entries
optional arguments:
-h, --help show this help message and exit
-F ENCODING, --from ENCODING
filename encoding from(Default
sys.getfilesystemencoding())
-T ENCODING, --to ENCODING
filename encoding to
-r, --recursive archive recursively
-v, --verbose verbose mode
-x EXCLUDE, --exclude EXCLUDE
exclude file pattern in RegExp
-i INCLUDE, --include INCLUDE
include file pattern in RegExp
::
usage: unzipe [-h] [-l] [-P PASSWORD] [--force] -F ENCODING [-T ENCODING]
[-x EXCLUDE | -i INCLUDE]
ZIP_FILE [ENTRY [ENTRY ...]]
Unzipper for not native encoded filename file
positional arguments:
ZIP_FILE ZIP archive
ENTRY can specify file entries
optional arguments:
-h, --help show this help message and exit
-l, --list list entries instead of extracting
-P PASSWORD, --password PASSWORD
password for encrypted
--force extract file even if its absolute path
-v, --verbose verbose mode
-F ENCODING, --from ENCODING
filename encoding from
-T ENCODING, --to ENCODING
filename encoding to(Default
sys.getfilesystemencoding())
-x EXCLUDE, --exclude EXCLUDE
exclude filename pattern in RegExp
-i INCLUDE, --include INCLUDE
include filename pattern in RegExp
| zipe | /zipe-0.1.3.tar.gz/zipe-0.1.3/README.rst | README.rst |
zipencrypt
==========
zipencrypt is a Python module to provide *weak* password-based
encryption for zipfiles. It is meant as a drop-in replacement for
zipfile from the standard lib and provides the counterpart to the
decryption implemented there.
It is implemented in pure python and does not rely on any 3rd party libraries.
Compatible to python 2.7 and 3.4+
Installation
------------
::
pip install zipencrypt
The code
--------
.. code:: python
from zipencrypt import ZipFile
with ZipFile("file.zip", mode="w") as f:
f.writestr("file1.txt", "plaintext", pwd=b"password")
Do not use this!
----------------
The standard encryption of ZIP is known to be seriously flawed (see
`here <https://en.wikipedia.org/wiki/Zip_(file_format)#Encryption>`_).
That is probably the reason why it is not implemented in zipfile in the
standard lib. There are however legitimate use cases.
| zipencrypt | /zipencrypt-0.3.1.tar.gz/zipencrypt-0.3.1/README.rst | README.rst |
# zipexec is a pure python alternative to zipgrep
zipexec allows you to specify any command line utiliity you would like to run against a group of ```.zip``` files. While zcat, zgrep and other utilities work well on gzip compressed files, I needed similar functionality on pkzip compressed files (with .zip extensions) and was unable to find a tool I liked.
Execute command line utilities of your choice against the contents of zip files.
## Help
```
$ zipexec -h
usage: zipexec [-h] [-c CMD_TO_RUN] path_to_zips
positional arguments:
path_to_zips Where are the zips to search
optional arguments:
-h, --help show this help message and exit
-c CMD_TO_RUN, --cmd_to_run CMD_TO_RUN
Command line to run on each zip
```
NOTE: Take care when specifying the names of files and commands that your local linux system does not expand the asterisks.
## Example Usage
### If you only pass a path to a folder containing zip files to zip exec it will prompt you for a command
```
$ zipexec /home/uploads/zipfiles/
What command would you like to run?: grep -R -C2 -n "search string" *
```
### If you do specify a command with -c or --command it will execute that
Example 1: grep through the content of all the zip files:
```
$ zipexec -c 'grep -R -n -C2 "import os" *.py' /home/uploads/zipfiles/
```
Example 2: Do a directory listing of all of the zip files that are named file*.zip
```
$ zipexec -c 'ls -laR' /home/uploads/zipfiles/files\*.zip
``` | zipexec | /zipexec-1.0.tar.gz/zipexec-1.0/README.md | README.md |
====
ZIPF
====
|travis| |coveralls| |sonar_quality| |sonar_maintainability| |code_climate_maintainability| |pip|
--------------------------------------
What does it do?
--------------------------------------
The zipf package was realized to simplify creations and operations with zipf distributions, like sum, subtraction, multiplications, divisions, statical operations such as mean, variance and much more.
--------------------------------------
How do I get it?
--------------------------------------
Just type into your terminal:
.. code:: shell
pip install zipf
--------------------------------------
Calculating distances and divergence
--------------------------------------
I wrote another package called `dictances`_ which calculates various distances and divergences between discrete distributions such as zipf. Here's an example:
.. code:: python
from zipf import Zipf
from dictances import *
a = zipf.load("my_first_zipf.json")
b = zipf.load("my_second_zipf.json")
euclidean(a, b)
chebyshev(a, b)
hamming(a, b)
kullback_leibler(a, b)
jensen_shannon(a, b)
--------------------------------------
Creating a zipf using a zipf_factory
--------------------------------------
Here's a couple of examples:
Zipf from a list
-------------------------
.. code:: python
from zipf.factories import ZipfFromList
my_factory = ZipfFromList()
my_zipf = my_factory.run(["one", "one", "two", "my", "oh", "my", 1, 2, 3])
print(my_zipf)
'''
{
"one": 0.22222222222222215,
"my": 0.22222222222222215,
"two": 0.11111111111111108,
"oh": 0.11111111111111108,
"1": 0.11111111111111108,
"2": 0.11111111111111108,
"3": 0.11111111111111108
}
'''
Zipf from a text
-------------------------
.. code:: python
from zipf.factories import ZipfFromText
my_factory = ZipfFromText()
my_factory.set_word_filter(lambda w: len(w) > 3)
my_zipf = my_factory.run(
"""You've got to find what you love.
And that is as true for your work as it is for your lovers.
Keep looking. Don't settle.""")
print(my_zipf)
'''
{
"your": 0.16666666666666666,
"find": 0.08333333333333333,
"what": 0.08333333333333333,
"love": 0.08333333333333333,
"that": 0.08333333333333333,
"true": 0.08333333333333333,
"work": 0.08333333333333333,
"lovers": 0.08333333333333333,
"Keep": 0.08333333333333333,
"looking": 0.08333333333333333,
"settle": 0.08333333333333333
}
'''
Zipf from a k-sequence
-------------------------
.. code:: python
from zipf.factories import ZipfFromKSequence
sequence_fraction_len = 5
my_factory = ZipfFromKSequence(sequence_fraction_len)
my_zipf = my_factory.run(
"ACTGGAAATGATGGDTGATDGATGAGTDGATGGGGGAAAGDTGATDGATDGATGDTGGGGADDDGATAGDTAGTDGAGAGAGDTGATDGAAAGDTG")
print(my_zipf)
'''
{
"TGGGG": 0.1,
"ACTGG": 0.05,
"AAATG": 0.05,
"ATGGD": 0.05,
"TGATD": 0.05,
"GATGA": 0.05,
"GTDGA": 0.05,
"GAAAG": 0.05,
"DTGAT": 0.05,
"DGATD": 0.05,
"GATGD": 0.05,
"ADDDG": 0.05,
"ATAGD": 0.05,
"TAGTD": 0.05,
"GAGAG": 0.05,
"AGDTG": 0.05,
"ATDGA": 0.05,
"AAGDT": 0.05,
"G": 0.05
}
'''
Zipf from a text file
-------------------------
.. code:: python
from zipf.factories import ZipfFromFile
my_factory = ZipfFromFile()
my_factory.set_word_filter(lambda w: w != "brown")
my_zipf = my_factory.run()
print(my_zipf)
'''
{
"The": 0.125,
"quick": 0.125,
"fox": 0.125,
"jumps": 0.125,
"over": 0.125,
"the": 0.125,
"lazy": 0.125,
"dog": 0.125
}
'''
Zipf from webpage
-------------------------
.. code:: python
from zipf.factories import ZipfFromUrl
import json
my_factory = ZipfFromUrl()
my_factory.set_word_filter(lambda w: int(w) > 100)
my_factory.set_interface(lambda r: json.loads(r.text)["ip"])
my_zipf = my_factory.run("https://api.ipify.org/?format=json")
print(my_zipf)
'''
{
"134": 0.5,
"165": 0.5
}
'''
Zipf from directory
-------------------------
.. code:: python
from zipf.factories import ZipfFromDir
import json
my_factory = ZipfFromDir(use_cli=True)
my_factory.set_word_filter(lambda w: len(w) > 4)
my_zipf = my_factory.run("path/to/my/directory", ["txt"])
'''
My directory contains 2 files with the following texts:
- You must not lose faith in humanity.
Humanity is an ocean; if a few drops of the ocean are dirty,
the ocean does not become dirty.
- Try not to become a man of success,
but rather try to become a man of value.
'''
print(my_zipf)
'''
{
"ocean": 0.20000000000000004,
"become": 0.20000000000000004,
"dirty": 0.13333333333333336,
"faith": 0.06666666666666668,
"humanity": 0.06666666666666668,
"Humanity": 0.06666666666666668,
"drops": 0.06666666666666668,
"success": 0.06666666666666668,
"rather": 0.06666666666666668,
"value": 0.06666666666666668
}
'''
--------------------------------------
Options in creating a zipf
--------------------------------------
Some built in options are available, and you can read the options of any factory object by printing it:
.. code:: python
from zipf.zipf.factories import ZipfFromList
print(ZipfFromList())
'''
{
"remove_stop_words": false, # Removes stop words (currently only Italian's)
"minimum_count": 0, # Removes words that appear less than 'minimum_count'
"chain_min_len": 1, # Chains up words, starting by a min of 'chain_min_len'
"chain_max_len": 1, # and ending to a maximum of 'chain_max_len'
"chaining_character": " ", # The character to interpose between words
"chain_after_filter": false, # The chaining is done after filtering
"chain_after_clean": false # The chaining is done after cleaning
}
'''
--------------------------------------
License
--------------------------------------
This library is released under MIT license.
.. |travis| image:: https://travis-ci.org/LucaCappelletti94/zipf.png
:target: https://travis-ci.org/LucaCappelletti94/zipf
.. |coveralls| image:: https://coveralls.io/repos/github/LucaCappelletti94/zipf/badge.svg?branch=master
:target: https://coveralls.io/github/LucaCappelletti94/zipf
.. |sonar_quality| image:: https://sonarcloud.io/api/project_badges/measure?project=zipf.lucacappelletti&metric=alert_status
:target: https://sonarcloud.io/dashboard/index/zipf.lucacappelletti
.. |sonar_maintainability| image:: https://sonarcloud.io/api/project_badges/measure?project=zipf.lucacappelletti&metric=sqale_rating
:target: https://sonarcloud.io/dashboard/index/zipf.lucacappelletti
.. |pip| image:: https://badge.fury.io/py/zipf.svg
:target: https://badge.fury.io/py/zipf
.. |code_climate_maintainability| image:: https://api.codeclimate.com/v1/badges/c758496736a2c9cecbff/maintainability
:target: https://codeclimate.com/github/LucaCappelletti94/zipf/maintainability
:alt: Maintainability
.. _dictances: https://github.com/LucaCappelletti94/dictances | zipf | /zipf-1.6.0.tar.gz/zipf-1.6.0/README.rst | README.rst |
================
Zipf classifier
================
|pip|
Introduction
-------------
ZipfClassifier is a classifier that, *even though in principle usable on any distribution*, leverages the assumption that some kind of datasets such as as:
- texts
- `images (paper here)`_
- `spoken language (paper here)`_
follow the `Zipf law`_.
Installation
------------
.. code:: shell
pip install zipf_classifier
Working examples and explanation
--------------------------------
A `documentation`_ is available with a full explanation of the classifier workings.
License
===================
This package is licensed under MIT license.
.. |pip| image:: https://badge.fury.io/py/zipf_classifier.svg
:target: https://badge.fury.io/py/zipf_classifier
.. _dictances: https://github.com/LucaCappelletti94/dictances
.. _zipf: https://github.com/LucaCappelletti94/zipf
.. _images (paper here): http://www.dcs.warwick.ac.uk/bmvc2007/proceedings/CD-ROM/papers/paper-288.pdf
.. _spoken language (paper here): http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0033993
.. _Zipf law: https://en.wikipedia.org/wiki/Zipf%27s_law
.. _documentation: https://github.com/LucaCappelletti94/zipf_classifier/blob/master/documentation/documentation/Documentazione%20progetto/main.pdf | zipf_classifier | /zipf_classifier-1.6.2.tar.gz/zipf_classifier-1.6.2/README.rst | README.rst |
============
zipfanalysis
============
Tools in python for analysing Zipf's law from text samples.
This can be installed as a package from the python3 package library using the terminal command:
::
>>> pip3 install zipfanalysis
-----
Usage
-----
The package can be used from within python scripts to estimate Zipf exponents, assuming a simple power law model for
word frequencies and ranks. To use the pacakge import it using
::
import zipfanalysis
-------------
Simple Method
-------------
The easiest way to carry out an analysis on a book or text file, using different estimators, is:
::
alpha_clauset = zipfanalysis.clauset("path_to_book.txt")
alpha_pdf = zipfanalysis.ols_pdf("path_to_book.txt", min_frequency=3)
alpha_cdf = zipfanalysis.ols_cdf("path_to_book.txt", min_frequency=3)
alpha_abc = zipfanalysis.abc("path_to_book.txt")
---------------
In Depth Method
---------------
Convert a book or text file to the frequency of words, ranked from highest to lowest:
::
word_counts = zipfanalysis.preprocessing.preprocessing.get_rank_frequency_from_text("path_to_book.txt")
Carry out different types of analysis to fit a power law to the data:
::
# Clauset et al estimator
alpha_clauset = zipfanalysis.estimators.clauset.clauset_estimator(word_counts)
# Ordinary Least Squares regression on log(rank) ~ log(frequency)
# Optional low frequency cut-off
alpha_pdf = zipfanalysis.estimators.ols_regression_pdf.ols_regression_pdf_estimator(word_counts, min_frequency=2)
# Ordinary least squares regression on the complemantary cumulative distribution function of ranks
# OLS on log(P(R>rank)) ~ log(rank)
# Optional low frequency cut-off
alpha_cdf = zipfanalysis.estimators.ols_regression_cdf.ols_regression_cdf_estimator(word_counts)
# Approximate Bayesian computation (regression method)
# Assumes model of p(rank) = C prob_rank^(-alpha)
# prob_rank is a word's rank in an underlying probability distribution
alpha_abc = zipfanalysis.estimators.approximate_bayesian_computation.abc_estimator(word_counts)
------------------------
Development - Next Steps
------------------------
1. Speed up abc. Current bottleneck is sampling from infinite power law. Could be sped up by considering we only need the frequency vector of ranks, not the whole sample. So for example could sample from unoform distribution then drop values into interger ranked buckets based on inverse CDF.
2. Build in frequency rank analysis. Convert to frequency counts representation, then carry out fit on that.
3. Add significance testing
4. Add ability to calcaulte x_min and truncated power laws.
5. Speed up OLS on the cdf
| zipfanalysis | /zipfanalysis-0.5.tar.gz/zipfanalysis-0.5/README.rst | README.rst |
# zipfile-dclimplode
[](https://pypi.org/project/zipfile-dclimplode/)
Monkey patch the standard `zipfile` module to enable DCL Implode support.
DCL stands for `PKWARE(R) Data Compression Library`.
Based on [`zipfile-deflate64`](https://github.com/brianhelba/zipfile-deflate64) and [`zipfile-zstandard`](https://github.com/taisei-project/python-zipfile-zstd), which provides similar functionality but for the `deflate64` algorithm. Unlike `zipfile-deflate64`, this package supports both compression and decompression.
Requires [`dclimplode`](https://github.com/cielavenir/dclimplode) for dclimplode bindings.
Note: if you need Python2, use [zipfile39](https://github.com/cielavenir/zipfile39) instead (it is also compatible with Python3).
## Installation
```bash
pip install zipfile-dclimplode
```
## Usage
Anywhere in a Python codebase:
```python
import zipfile_dclimplode # This has the side effect of patching the zipfile module to support DCL Implode
```
Alternatively, `zipfile_dclimplode` re-exports the `zipfile` API, as a convenience:
```python
import zipfile_dclimplode as zipfile
zipfile.ZipFile(...)
```
Compression example:
```python
import zipfile_dclimplode as zipfile
zf = zipfile.ZipFile('/tmp/test.zip', 'w', zipfile.ZIP_DCLIMPLODED, compresslevel=3)
zf.write('large_file.img')
```
compresslevel: 1,2,3 (binary) 11,12,13 (ascii)
| zipfile-dclimplode | /zipfile-dclimplode-0.0.3.1.tar.gz/zipfile-dclimplode-0.0.3.1/README.md | README.md |
import zipfile
import dclimplode
import threading
import inspect
import struct
from ._patcher import patch
zipfile.ZIP_DCLIMPLODED = 10
zipfile.ZIP_PKIMPLODED = 10
if zipfile.ZIP_DCLIMPLODED not in zipfile.compressor_names:
zipfile.compressor_names[zipfile.ZIP_DCLIMPLODED] = 'dclimplode'
zipfile.DCLIMPLODED_VERSION = 25
@patch(zipfile, '_check_compression')
def zstd_check_compression(compression):
if compression == zipfile.ZIP_DCLIMPLODED:
pass
else:
patch.originals['_check_compression'](compression)
@patch(zipfile, '_get_decompressor')
def zstd_get_decompressor(compress_type):
if compress_type == zipfile.ZIP_DCLIMPLODED:
return dclimplode.decompressobj()
else:
return patch.originals['_get_decompressor'](compress_type)
if 'compresslevel' in inspect.signature(zipfile._get_compressor).parameters:
@patch(zipfile, '_get_compressor')
def zstd_get_compressor(compress_type, compresslevel=None):
if compress_type == zipfile.ZIP_DCLIMPLODED:
if compresslevel is None:
compresslevel = 3
compressmethod = compresslevel//10
compresslevel = compresslevel%10
return dclimplode.compressobj(compressmethod, 1<<(9+compresslevel))
else:
return patch.originals['_get_compressor'](compress_type, compresslevel=compresslevel)
else:
@patch(zipfile, '_get_compressor')
def zstd_get_compressor(compress_type, compresslevel=None):
if compress_type == zipfile.ZIP_DCLIMPLODED:
if compresslevel is None:
compresslevel = 3
compressmethod = compresslevel//10
compresslevel = compresslevel%10
return dclimplode.compressobj(compressmethod, 1<<(9+compresslevel))
else:
return patch.originals['_get_compressor'](compress_type)
@patch(zipfile.ZipInfo, 'FileHeader')
def zstd_FileHeader(self, zip64=None):
if self.compress_type == zipfile.ZIP_DCLIMPLODED:
self.create_version = max(self.create_version, zipfile.DCLIMPLODED_VERSION)
self.extract_version = max(self.extract_version, zipfile.DCLIMPLODED_VERSION)
return patch.originals['FileHeader'](self, zip64=zip64) | zipfile-dclimplode | /zipfile-dclimplode-0.0.3.1.tar.gz/zipfile-dclimplode-0.0.3.1/zipfile_dclimplode/_zipfile.py | _zipfile.py |
# zipfile-isal
[](https://pypi.org/project/zipfile-isal/)
Monkey patch the standard `zipfile` module to enable accelerated deflate support via isal.
Based on [`zipfile-deflate64`](https://github.com/brianhelba/zipfile-deflate64) and [`zipfile-zstandard`](https://github.com/taisei-project/python-zipfile-zstd), which provides similar functionality but for the `deflate64` algorithm. Unlike `zipfile-deflate64`, this package supports both compression and decompression.
Requires [`isal`](https://github.com/pycompression/python-isal) (and [`slz`](https://github.com/cielavenir/python-slz) optionally).
Note: if you need Python2, use [zipfile39](https://github.com/cielavenir/zipfile39) instead (it is also compatible with Python3).
## Installation
```bash
pip install zipfile-isal
```
## Usage
Anywhere in a Python codebase:
```python
import zipfile_isal # This has the side effect of patching the zipfile module to support isal
```
Alternatively, `zipfile_isal` re-exports the `zipfile` API, as a convenience:
```python
import zipfile_isal as zipfile
zipfile.ZipFile(...)
```
Compression example:
```python
import zipfile_isal as zipfile
zf = zipfile.ZipFile('/tmp/test.zip', 'w', zipfile.ZIP_DEFLATED, compresslevel=-12)
zf.write('large_file.img')
```
compresslevel:
- -10, -11, -12, -13 correspond to isal 0, 1, 2, 3
- -21 corresponds to slz
- 11 to 19 correspond to 7-zip 1-9
| zipfile-isal | /zipfile-isal-0.0.3.3.tar.gz/zipfile-isal-0.0.3.3/README.md | README.md |
import zipfile
import zlib
import threading
import inspect
import struct
from ._patcher import patch
try:
from isal import isal_zlib
zipfile.crc32 = isal_zlib.crc32
except ImportError:
isal_zlib = None
try:
import slz
except ImportError:
slz = None
try:
import codecs7z
except ImportError:
codecs7z = None
#@patch(zipfile, '_check_compression')
#def zstd_check_compression(compression):
# if compression == zipfile.ZIP_DEFLATED:
# pass
# else:
# patch.originals['_check_compression'](compression)
@patch(zipfile, '_get_decompressor')
def zstd_get_decompressor(compress_type):
if compress_type == zipfile.ZIP_DEFLATED:
if isal_zlib is not None:
return isal_zlib.decompressobj(-15)
return zlib.decompressobj(-15)
else:
return patch.originals['_get_decompressor'](compress_type)
if 'compresslevel' in inspect.signature(zipfile._get_compressor).parameters:
@patch(zipfile, '_get_compressor')
def zstd_get_compressor(compress_type, compresslevel=None):
if compress_type == zipfile.ZIP_DEFLATED:
if compresslevel is None:
compresslevel = 6
if compresslevel < -20:
assert slz is not None
return slz.compressobj()
if compresslevel <= -10:
assert isal_zlib is not None
return isal_zlib.compressobj(-(compresslevel+10), isal_zlib.DEFLATED, -15, 9)
if compresslevel >= 10:
assert codecs7z is not None
return codecs7z.deflate_compressobj(compresslevel-10)
return zlib.compressobj(compresslevel, zlib.DEFLATED, -15)
else:
return patch.originals['_get_compressor'](compress_type, compresslevel=compresslevel)
else:
@patch(zipfile, '_get_compressor')
def zstd_get_compressor(compress_type, compresslevel=None):
if compress_type == zipfile.ZIP_DEFLATED:
if compresslevel is None:
compresslevel = 6
if compresslevel < -20:
assert slz is not None
return slz.compressobj()
if compresslevel <= -10:
assert isal_zlib is not None
return isal_zlib.compressobj(-(compresslevel+10), isal_zlib.DEFLATED, -15, 9)
return zlib.compressobj(compresslevel, zlib.DEFLATED, -15)
else:
return patch.originals['_get_compressor'](compress_type) | zipfile-isal | /zipfile-isal-0.0.3.3.tar.gz/zipfile-isal-0.0.3.3/zipfile_isal/_zipfile.py | _zipfile.py |
For zipfile-xz information, see xz branch.
# zipfile-ppmd
[](https://pypi.org/project/zipfile-ppmd/)
Monkey patch the standard `zipfile` module to enable PPMd support.
Based on [`zipfile-deflate64`](https://github.com/brianhelba/zipfile-deflate64) and [`zipfile-zstandard`](https://github.com/taisei-project/python-zipfile-zstd), which provides similar functionality but for the `deflate64` algorithm. Unlike `zipfile-deflate64`, this package supports both compression and decompression.
Requires [`pyppmd`](https://github.com/miurahr/pyppmd) for ppmd bindings. Note that 0.16.0+ is required, which is not released yet. Please do `python3 -m pip install git+https://github.com/miurahr/pyppmd`.
Note: if you need Python2, use [zipfile39](https://github.com/cielavenir/zipfile39) instead (it is also compatible with Python3).
## Installation
```bash
pip install zipfile-ppmd
```
## Usage
Anywhere in a Python codebase:
```python
import zipfile_ppmd # This has the side effect of patching the zipfile module to support PPMd
```
Alternatively, `zipfile_ppmd` re-exports the `zipfile` API, as a convenience:
```python
import zipfile_ppmd as zipfile
zipfile.ZipFile(...)
```
Compression example:
```python
import zipfile_ppmd as zipfile
zf = zipfile.ZipFile('/tmp/test.zip', 'w', zipfile.ZIP_PPMD, compresslevel=5)
zf.write('large_file.img')
```
| zipfile-ppmd | /zipfile-ppmd-0.0.3.3.tar.gz/zipfile-ppmd-0.0.3.3/README.md | README.md |
import zipfile
import pyppmd
import threading
import inspect
import struct
from ._patcher import patch
zipfile.ZIP_PPMD = 98
zipfile.compressor_names[zipfile.ZIP_PPMD] = 'ppmd'
zipfile.PPMD_VERSION = 63
class PPMDCompressor(object):
def __init__(self, level=None):
self._comp = None
self._level = level
if self._level is None:
self._level = 5
if self._level < 1:
self._level = 1
if self._level > 9:
self._level = 9
def _init(self):
### level interpretation ###
# https://github.com/jinfeihan57/p7zip/blob/v17.04/CPP/7zip/Compress/PpmdZip.cpp#L155
# by the way, using numeric parameter is not covered by LGPL (according to clause 3).
order = 3 + self._level
sasize = 1 << min(self._level, 8)
restore_method = 0 if self._level < 7 else 1
### level interpretation end ###
prop = (order-1) | (sasize-1)<<4 | (restore_method)<<12;
self._comp = pyppmd.Ppmd8Encoder(order, sasize<<20, restore_method=restore_method, endmark=False)
return struct.pack('<H', prop)
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.encode(data)
return self._comp.encode(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class PPMDDecompressor(object):
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 2:
return b''
prop, = struct.unpack('<H', self._unconsumed[:2])
order = (prop&0x000f)+1
sasize = ((prop&0x0ff0)>>4)+1
restore_method = (prop&0xf000)>>12
self._decomp = pyppmd.Ppmd8Decoder(order, sasize<<20, restore_method=restore_method, endmark=False)
data = self._unconsumed[2:]
del self._unconsumed
result = self._decomp.decode(data)
self.eof = self._decomp.eof
return result
@patch(zipfile, '_check_compression')
def zstd_check_compression(compression):
if compression == zipfile.ZIP_PPMD:
pass
else:
patch.originals['_check_compression'](compression)
@patch(zipfile, '_get_decompressor')
def zstd_get_decompressor(compress_type):
if compress_type == zipfile.ZIP_PPMD:
return PPMDDecompressor()
else:
return patch.originals['_get_decompressor'](compress_type)
if 'compresslevel' in inspect.signature(zipfile._get_compressor).parameters:
@patch(zipfile, '_get_compressor')
def zstd_get_compressor(compress_type, compresslevel=None):
if compress_type == zipfile.ZIP_PPMD:
if compresslevel is None:
compresslevel = 5
return PPMDCompressor(compresslevel)
else:
return patch.originals['_get_compressor'](compress_type, compresslevel=compresslevel)
else:
@patch(zipfile, '_get_compressor')
def zstd_get_compressor(compress_type, compresslevel=None):
if compress_type == zipfile.ZIP_PPMD:
if compresslevel is None:
compresslevel = 5
return PPMDCompressor(compresslevel)
else:
return patch.originals['_get_compressor'](compress_type)
@patch(zipfile.ZipInfo, 'FileHeader')
def zstd_FileHeader(self, zip64=None):
if self.compress_type == zipfile.ZIP_PPMD:
self.create_version = max(self.create_version, zipfile.PPMD_VERSION)
self.extract_version = max(self.extract_version, zipfile.PPMD_VERSION)
return patch.originals['FileHeader'](self, zip64=zip64) | zipfile-ppmd | /zipfile-ppmd-0.0.3.3.tar.gz/zipfile-ppmd-0.0.3.3/zipfile_ppmd/_zipfile.py | _zipfile.py |
# zipfile-xz
[](https://pypi.org/project/zipfile-xz/)
Monkey patch the standard `zipfile` module to enable XZ support.
Based on [`zipfile-deflate64`](https://github.com/brianhelba/zipfile-deflate64) and [`zipfile-zstandard`](https://github.com/taisei-project/python-zipfile-zstd), which provides similar functionality but for the `deflate64` algorithm. Unlike `zipfile-deflate64`, this package supports both compression and decompression.
Note: if you need Python2, use [zipfile39](https://github.com/cielavenir/zipfile39) instead (it is also compatible with Python3).
Note: XZ is based on LZMA2, so the compression ratio will be similar to ZIP_LZMA.
## Installation
```bash
pip install zipfile-xz
```
## Usage
Anywhere in a Python codebase:
```python
import zipfile_xz # This has the side effect of patching the zipfile module to support XZ
```
Alternatively, `zipfile_ppmd` re-exports the `zipfile` API, as a convenience:
```python
import zipfile_xz as zipfile
zipfile.ZipFile(...)
```
Compression example:
```python
import zipfile_xz as zipfile
zf = zipfile.ZipFile('/tmp/test.zip', 'w', zipfile.ZIP_XZ, compresslevel=6)
zf.write('large_file.img')
```
| zipfile-xz | /zipfile-xz-0.0.3.1.tar.gz/zipfile-xz-0.0.3.1/README.md | README.md |
import zipfile
import zstandard as zstd
import threading
import inspect
from ._patcher import patch
zipfile.ZIP_ZSTANDARD = 93
zipfile.compressor_names[zipfile.ZIP_ZSTANDARD] = 'zstandard'
zipfile.ZSTANDARD_VERSION = 20
@patch(zipfile, '_check_compression')
def zstd_check_compression(compression):
if compression == zipfile.ZIP_ZSTANDARD:
pass
else:
patch.originals['_check_compression'](compression)
class ZstdDecompressObjWrapper:
def __init__(self, o):
self.o = o
def __getattr__(self, attr):
if attr == 'eof':
return False
return getattr(self.o, attr)
@patch(zipfile, '_get_decompressor')
def zstd_get_decompressor(compress_type):
if compress_type == zipfile.ZIP_ZSTANDARD:
return ZstdDecompressObjWrapper(zstd.ZstdDecompressor().decompressobj())
else:
return patch.originals['_get_decompressor'](compress_type)
if 'compresslevel' in inspect.signature(zipfile._get_compressor).parameters:
@patch(zipfile, '_get_compressor')
def zstd_get_compressor(compress_type, compresslevel=None):
if compress_type == zipfile.ZIP_ZSTANDARD:
if compresslevel is None:
compresslevel = 3
return zstd.ZstdCompressor(level=compresslevel, threads=12).compressobj()
else:
return patch.originals['_get_compressor'](compress_type, compresslevel=compresslevel)
else:
@patch(zipfile, '_get_compressor')
def zstd_get_compressor(compress_type, compresslevel=None):
if compress_type == zipfile.ZIP_ZSTANDARD:
if compresslevel is None:
compresslevel = 3
return zstd.ZstdCompressor(level=compresslevel, threads=12).compressobj()
else:
return patch.originals['_get_compressor'](compress_type)
@patch(zipfile.ZipInfo, 'FileHeader')
def zstd_FileHeader(self, zip64=None):
if self.compress_type == zipfile.ZIP_ZSTANDARD:
self.create_version = max(self.create_version, zipfile.ZSTANDARD_VERSION)
self.extract_version = max(self.extract_version, zipfile.ZSTANDARD_VERSION)
return patch.originals['FileHeader'](self, zip64=zip64) | zipfile-zstd | /zipfile_zstd-0.0.4-py3-none-any.whl/zipfile_zstd/_zipfile.py | _zipfile.py |
.. image:: https://travis-ci.org/cournape/zipfile2.png?branch=master
:target: https://travis-ci.org/cournape/zipfile2
zipfile2 contains an improved ZipFile class that may be used as a 100 %
backward compatible replacement.
Improvements compared to upstream zipfile stdlib:
* Handling of symlinks (read and write)
* Compatible 2.6 onwards (including 3.x), include context manager
* Raises an exception by default when duplicate members are detected.
* Special class `LeanZipFile` to avoid using too much memory when handling
zip files with a large number of members. Contrary to the stdlib
ZipFile, it does not create the list of all archives when opening the
file. This can save 100s of MB for zipfiles with a large number of
members.
| zipfile2 | /zipfile2-0.0.12.tar.gz/zipfile2-0.0.12/README.rst | README.rst |
import io
import os
import re
import importlib.util
import sys
import time
import stat
import shutil
import struct
import binascii
try:
import threading
except ImportError:
import dummy_threading as threading
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__version__ = "0.1.3"
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except OSError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except OSError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except OSError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def __repr__(self):
result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)]
if self.compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self.compress_type,
self.compress_type))
hi = self.external_attr >> 16
lo = self.external_attr & 0xFFFF
if hi:
result.append(' filemode=%r' % stat.filemode(hi))
if lo:
result.append(' external_attr=%#x' % lo)
isdir = self.is_dir()
if not isdir or self.file_size:
result.append(' file_size=%r' % self.file_size)
if ((not isdir or self.compress_size) and
(self.compress_type != ZIP_STORED or
self.file_size != self.compress_size)):
result.append(' compress_size=%r' % self.compress_size)
result.append('>')
return ''.join(result)
def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while len(extra) >= 4:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
@classmethod
def from_file(cls, filename, arcname=None):
"""Construct an appropriate ZipInfo for a file on the filesystem.
filename should be the path to a file or directory on the filesystem.
arcname is the name which it will have within the archive (by default,
this will be the same as filename, but without a drive letter and with
leading path separators removed).
"""
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = cls(arcname, date_time)
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
if isdir:
zinfo.file_size = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.file_size = st.st_size
return zinfo
def is_dir(self):
"""Return True if this archive member is a directory."""
return self.filename[-1] == '/'
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = None
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
if _ZipDecrypter.crctable is None:
_ZipDecrypter.crctable = _ZipDecrypter._GenerateCRCTable()
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise NotImplementedError("That compression method is not supported")
def _get_compressor(compress_type):
if compress_type == ZIP_DEFLATED:
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Compressor()
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class _SharedFile:
def __init__(self, file, pos, close, lock, writing):
self._file = file
self._pos = pos
self._close = close
self._lock = lock
self._writing = writing
def read(self, n=-1):
with self._lock:
if self._writing():
raise ValueError("Can't read from the ZIP file while there "
"is an open writing handle on it. "
"Close the writing handle before trying to read.")
self._file.seek(self._pos)
data = self._file.read(n)
self._pos = self._file.tell()
return data
def close(self):
if self._file is not None:
fileobj = self._file
self._file = None
self._close(fileobj)
# Provide the tell method for unseekable stream
class _Tellable:
def __init__(self, fp):
self.fp = fp
self.offset = 0
def write(self, data):
n = self.fp.write(data)
self.offset += n
return n
def tell(self):
return self.offset
def flush(self):
self.fp.flush()
def close(self):
self.fp.close()
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'')
else:
self._expected_crc = None
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if not self.closed:
result.append(' name=%r mode=%r' % (self.name, self.mode))
if self._compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self._compress_type,
self._compress_type))
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
return io.BufferedIOBase.readline(self, limit)
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc)
# Check the CRC if we're at the end of the file
if self._eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
## Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if not data:
raise EOFError
if self._decrypter is not None:
data = bytes(map(self._decrypter, data))
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class _ZipWriteFile(io.BufferedIOBase):
def __init__(self, zf, zinfo, zip64):
self._zinfo = zinfo
self._zip64 = zip64
self._zipfile = zf
self._compressor = _get_compressor(zinfo.compress_type)
self._file_size = 0
self._compress_size = 0
self._crc = 0
@property
def _fileobj(self):
return self._zipfile.fp
def writable(self):
return True
def write(self, data):
nbytes = len(data)
self._file_size += nbytes
self._crc = crc32(data, self._crc)
if self._compressor:
data = self._compressor.compress(data)
self._compress_size += len(data)
self._fileobj.write(data)
return nbytes
def close(self):
super().close()
# Flush any data from the compressor, and update header info
if self._compressor:
buf = self._compressor.flush()
self._compress_size += len(buf)
self._fileobj.write(buf)
self._zinfo.compress_size = self._compress_size
else:
self._zinfo.compress_size = self._file_size
self._zinfo.CRC = self._crc
self._zinfo.file_size = self._file_size
# Write updated header info
if self._zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if self._zip64 else '<LLL'
self._fileobj.write(struct.pack(fmt, self._zinfo.CRC,
self._zinfo.compress_size, self._zinfo.file_size))
self._zipfile.start_dir = self._fileobj.tell()
else:
if not self._zip64:
if self._file_size > ZIP64_LIMIT:
raise RuntimeError('File size unexpectedly exceeded ZIP64 '
'limit')
if self._compress_size > ZIP64_LIMIT:
raise RuntimeError('Compressed size unexpectedly exceeded '
'ZIP64 limit')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
# Preserve current position in file
self._zipfile.start_dir = self._fileobj.tell()
self._fileobj.seek(self._zinfo.header_offset)
self._fileobj.write(self._zinfo.FileHeader(self._zip64))
self._fileobj.seek(self._zipfile.start_dir)
self._zipfile._writing = False
# Successfully written: Add file to our caches
self._zipfile.filelist.append(self._zinfo)
self._zipfile.NameToInfo[self._zinfo.filename] = self._zinfo
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
self._writing = False
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self._start_disk = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = self._start_disk = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self._start_disk = self.fp.tell()
else:
raise ValueError("Mode must be 'r', 'w', 'x', or 'a'")
except:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# self._start_disk: Position of the start of ZIP archive
# It is zero, unless ZIP was concatenated to another file
self._start_disk = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
self._start_disk -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = self._start_disk + offset_cd
print("given, inferred, offset", offset_cd, inferred, self._start_disk)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + self._start_disk
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + self._start_disk
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment).__name__)
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None, *, force_zip64=False):
"""Return file-like object for 'name'.
name is a string for the file name within the ZIP file, or a ZipInfo
object.
mode should be 'r' to read a file already in the ZIP file, or 'w' to
write to a file newly added to the archive.
pwd is the password to decrypt files (only used for reading).
When writing, if the file size is not known in advance but may exceed
2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large
files. If the size is known in advance, it is best to pass a ZipInfo
instance for name, with zinfo.file_size set.
"""
if mode not in {"r", "w"}:
raise ValueError('open() requires mode "r" or "w"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
if pwd and (mode == "w"):
raise ValueError("pwd is only supported for reading files")
if not self.fp:
raise ValueError(
"Attempt to use ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
elif mode == 'w':
zinfo = ZipInfo(name)
zinfo.compress_type = self.compression
else:
# Get info object for name
zinfo = self.getinfo(name)
if mode == 'w':
return self._open_to_write(zinfo, force_zip64=force_zip64)
if self._writing:
raise ValueError("Can't read from the ZIP file while there "
"is an open writing handle on it. "
"Close the writing handle before trying to read.")
# Open for reading:
self._fileRefCnt += 1
zef_file = _SharedFile(self.fp, zinfo.header_offset,
self._fpclose, self._lock, lambda: self._writing)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %r is encrypted, password "
"required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file %r" % name)
return ZipExtFile(zef_file, mode, zinfo, zd, True)
except:
zef_file.close()
raise
def _open_to_write(self, zinfo, force_zip64=False):
if force_zip64 and not self._allowZip64:
raise ValueError(
"force_zip64 is True, but allowZip64 was False when opening "
"the ZIP file."
)
if self._writing:
raise ValueError("Can't write to the ZIP file while there is "
"another write handle open on it. "
"Close the first handle before opening another.")
# Sizes and CRC are overwritten with correct data after processing the file
if not hasattr(zinfo, 'file_size'):
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.flag_bits = 0x00
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
if not self._seekable:
zinfo.flag_bits |= 0x08
if not zinfo.external_attr:
zinfo.external_attr = 0o600 << 16 # permissions: ?rw-------
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
(force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT)
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell()
self._writecheck(zinfo)
self._didModify = True
self.fp.write(zinfo.FileHeader(zip64))
self._writing = True
return _ZipWriteFile(self, zinfo, zip64)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.is_dir():
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise ValueError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise ValueError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists"
)
zinfo = ZipInfo.from_file(filename, arcname)
if zinfo.is_dir():
zinfo.compress_size = 0
zinfo.CRC = 0
else:
if compress_type is not None:
zinfo.compress_type = compress_type
else:
zinfo.compress_type = self.compression
if zinfo.is_dir():
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
else:
with open(filename, "rb") as src, self.open(zinfo, 'w') as dest:
shutil.copyfileobj(src, dest, 1024*8)
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists."
)
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
with self.open(zinfo, mode='w') as dest:
dest.write(data)
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
if self._writing:
raise ValueError("Can't close the ZIP file while there is "
"an open writing handle on it. "
"Close the writing handle before closing the zip.")
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
header_offset = zinfo.header_offset - self._start_disk
if header_offset > ZIP64_LIMIT:
extra.append(header_offset)
header_offset = 0xffffffff
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir - self._start_disk
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=True, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename="", filterfunc=None):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyc.
This method will compile the module.py into module.pyc if
necessary.
If filterfunc(pathname) is given, it is called with every argument.
When it is False, the file or directory is skipped.
"""
if filterfunc and not filterfunc(pathname):
if self.debug:
label = 'path' if os.path.isdir(pathname) else 'file'
print('%s %r skipped by filterfunc' % (label, pathname))
return
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename,
filterfunc=filterfunc) # Recursive call
elif ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
if sys.version_info >= (3, 5):
pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='')
pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1)
pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2)
else:
pycache_opt0 = importlib.util.cache_from_source(file_py,
debug_override=True)
pycache_opt1 = importlib.util.cache_from_source(file_py,
debug_override=False)
pycache_opt2 = None
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_opt0) and
os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt0
arcname = file_pyc
elif (os.path.isfile(pycache_opt1) and
os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt1
arcname = file_pyc
elif pycache_opt2 and (os.path.isfile(pycache_opt2) and
os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt2
arcname = file_pyc
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
if sys.flags.optimize == 0:
fname = pycache_opt0
elif (sys.flags.optimize == 1) or not pycache_opt2:
fname = pycache_opt1
else:
fname = pycache_opt2
arcname = file_pyc
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_opt0
arcname = file_pyc
else:
arcname = file_pyc
if self._optimize == 1:
fname = pycache_opt1
elif self._optimize == 2:
fname = pycache_opt2 or pycache_opt1
else:
msg = "invalid value for 'optimize': {!r}".format(self._optimize)
raise ValueError(msg)
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE, file=sys.stderr)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE, file=sys.stderr)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE, file=sys.stderr)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE, file=sys.stderr)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.extractall(args[2])
elif args[0] == '-c':
if len(args) < 3:
print(USAGE, file=sys.stderr)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
if zippath:
zf.write(path, zippath)
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(args[1], 'w') as zf:
for path in args[2:]:
zippath = os.path.basename(path)
if not zippath:
zippath = os.path.basename(os.path.dirname(path))
if zippath in ('', os.curdir, os.pardir):
zippath = ''
addToZip(zf, path, zippath)
if __name__ == "__main__":
main() | zipfile36 | /zipfile36-0.1.3.tar.gz/zipfile36-0.1.3/zipfile36.py | zipfile36.py |
import collections
import importlib
import sys
import os
import os.path
import tempfile
import subprocess
import py_compile
import contextlib
import shutil
import zipfile
from importlib.util import source_from_cache
from test.support import make_legacy_pyc, strip_python_stderr
# Cached result of the expensive test performed in the function below.
__cached_interp_requires_environment = None
def interpreter_requires_environment():
"""
Returns True if our sys.executable interpreter requires environment
variables in order to be able to run at all.
This is designed to be used with @unittest.skipIf() to annotate tests
that need to use an assert_python*() function to launch an isolated
mode (-I) or no environment mode (-E) sub-interpreter process.
A normal build & test does not run into this situation but it can happen
when trying to run the standard library test suite from an interpreter that
doesn't have an obvious home with Python's current home finding logic.
Setting PYTHONHOME is one way to get most of the testsuite to run in that
situation. PYTHONPATH or PYTHONUSERSITE are other common environment
variables that might impact whether or not the interpreter can start.
"""
global __cached_interp_requires_environment
if __cached_interp_requires_environment is None:
# Try running an interpreter with -E to see if it works or not.
try:
subprocess.check_call([sys.executable, '-E',
'-c', 'import sys; sys.exit(0)'])
except subprocess.CalledProcessError:
__cached_interp_requires_environment = True
else:
__cached_interp_requires_environment = False
return __cached_interp_requires_environment
_PythonRunResult = collections.namedtuple("_PythonRunResult",
("rc", "out", "err"))
# Executing the interpreter in a subprocess
def run_python_until_end(*args, **env_vars):
env_required = interpreter_requires_environment()
if '__isolated' in env_vars:
isolated = env_vars.pop('__isolated')
else:
isolated = not env_vars and not env_required
cmd_line = [sys.executable, '-X', 'faulthandler']
if isolated:
# isolated mode: ignore Python environment variables, ignore user
# site-packages, and don't add the current directory to sys.path
cmd_line.append('-I')
elif not env_vars and not env_required:
# ignore Python environment variables
cmd_line.append('-E')
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# set TERM='' unless the TERM environment variable is passed explicitly
# see issues #11390 and #18300
if 'TERM' not in env_vars:
env['TERM'] = ''
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
env.update(env_vars)
cmd_line.extend(args)
proc = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
with proc:
try:
out, err = proc.communicate()
finally:
proc.kill()
subprocess._cleanup()
rc = proc.returncode
err = strip_python_stderr(err)
return _PythonRunResult(rc, out, err), cmd_line
def _assert_python(expected_success, *args, **env_vars):
res, cmd_line = run_python_until_end(*args, **env_vars)
if (res.rc and expected_success) or (not res.rc and not expected_success):
# Limit to 80 lines to ASCII characters
maxlen = 80 * 100
out, err = res.out, res.err
if len(out) > maxlen:
out = b'(... truncated stdout ...)' + out[-maxlen:]
if len(err) > maxlen:
err = b'(... truncated stderr ...)' + err[-maxlen:]
out = out.decode('ascii', 'replace').rstrip()
err = err.decode('ascii', 'replace').rstrip()
raise AssertionError("Process return code is %d\n"
"command line: %r\n"
"\n"
"stdout:\n"
"---\n"
"%s\n"
"---\n"
"\n"
"stderr:\n"
"---\n"
"%s\n"
"---"
% (res.rc, cmd_line,
out,
err))
return res
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
stderr) tuple.
If the __cleanenv keyword is set, env_vars is used as a fresh environment.
Python is started in isolated mode (command line option -I),
except if the __isolated keyword is set to False.
"""
return _assert_python(True, *args, **env_vars)
def assert_python_failure(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails (rc != 0) and return a (return code, stdout,
stderr) tuple.
See assert_python_ok() for more options.
"""
return _assert_python(False, *args, **env_vars)
def spawn_python(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw):
"""Run a Python subprocess with the given arguments.
kw is extra keyword args to pass to subprocess.Popen. Returns a Popen
object.
"""
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
# Under Fedora (?), GNU readline can output junk on stderr when initialized,
# depending on the TERM setting. Setting TERM=vt100 is supposed to disable
# that. References:
# - http://reinout.vanrees.org/weblog/2009/08/14/readline-invisible-character-hack.html
# - http://stackoverflow.com/questions/15760712/python-readline-module-prints-escape-character-during-import
# - http://lists.gnu.org/archive/html/bug-readline/2007-08/msg00004.html
env = kw.setdefault('env', dict(os.environ))
env['TERM'] = 'vt100'
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=stdout, stderr=stderr,
**kw)
def kill_python(p):
"""Run the given Popen process until completion and return stdout."""
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def make_script(script_dir, script_basename, source, omit_suffix=False):
script_filename = script_basename
if not omit_suffix:
script_filename += os.extsep + 'py'
script_name = os.path.join(script_dir, script_filename)
# The script should be encoded to UTF-8, the default string encoding
script_file = open(script_name, 'w', encoding='utf-8')
script_file.write(source)
script_file.close()
importlib.invalidate_caches()
return script_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
if name_in_zip is None:
parts = script_name.split(os.sep)
if len(parts) >= 2 and parts[-2] == '__pycache__':
legacy_pyc = make_legacy_pyc(source_from_cache(script_name))
name_in_zip = os.path.basename(legacy_pyc)
script_name = legacy_pyc
else:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
zip_file.close()
#if test.support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir, init_source=''):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', init_source)
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = py_compile.compile(init_name, doraise=True)
script_name = py_compile.compile(script_name, doraise=True)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
zip_file.close()
for name in unlink:
os.unlink(name)
#if test.support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, script_name_in_zip) | zipfile36 | /zipfile36-0.1.3.tar.gz/zipfile36-0.1.3/test/support/script_helper.py | script_helper.py |
import collections.abc
import contextlib
import errno
import faulthandler
import fnmatch
import functools
import gc
import importlib
import importlib.util
import logging.handlers
import nntplib
import os
import platform
import re
import shutil
import socket
import stat
import struct
import subprocess
import sys
import sysconfig
import tempfile
import time
import types
import unittest
import urllib.error
import warnings
try:
import _thread, threading
except ImportError:
_thread = None
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
try:
import zlib
except ImportError:
zlib = None
try:
import gzip
except ImportError:
gzip = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
try:
import resource
except ImportError:
resource = None
__all__ = [
# globals
"PIPE_MAX_SIZE", "verbose", "max_memuse", "use_resources", "failfast",
# exceptions
"Error", "TestFailed", "ResourceDenied",
# imports
"import_module", "import_fresh_module", "CleanImport",
# modules
"unload", "forget",
# io
"record_original_stdout", "get_original_stdout", "captured_stdout",
"captured_stdin", "captured_stderr",
# filesystem
"TESTFN", "SAVEDCWD", "unlink", "rmtree", "temp_cwd", "findfile",
"create_empty_file", "can_symlink", "fs_is_case_insensitive",
# unittest
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "check_syntax_error",
"TransientResource", "time_out", "socket_peer_reset", "ioerror_peer_reset",
"transient_internet", "BasicTestRunner", "run_unittest", "run_doctest",
"skip_unless_symlink", "requires_gzip", "requires_bz2", "requires_lzma",
"bigmemtest", "bigaddrspacetest", "cpython_only", "get_attribute",
"requires_IEEE_754", "skip_unless_xattr", "requires_zlib",
"anticipate_failure", "load_package_tests", "detect_api_mismatch",
"check__all__",
# sys
"is_jython", "is_android", "check_impl_detail", "unix_shell",
# network
"HOST", "IPV6_ENABLED", "find_unused_port", "bind_port", "open_urlresource",
# processes
'temp_umask', "reap_children",
# logging
"TestHandler",
# threads
"threading_setup", "threading_cleanup", "reap_threads", "start_threads",
# miscellaneous
"check_warnings", "check_no_resource_warning", "EnvironmentVarGuard",
"run_with_locale", "swap_item",
"swap_attr", "Matcher", "set_memlimit", "SuppressCrashReport", "sortdict",
"run_with_tz", "PGO",
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect.
"""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False, *, required_on=()):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed. If a module is required on a platform but optional for
others, set required_on to an iterable of platform prefixes which will be
compared against sys.platform.
"""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
if sys.platform.startswith(tuple(required_on)):
raise
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported.
"""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise.
"""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def load_package_tests(pkg_dir, loader, standard_tests, pattern):
"""Generic load_tests implementation for simple test packages.
Most packages can implement load_tests using this function as follows:
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
"""
if pattern is None:
pattern = "test*"
top_dir = os.path.dirname( # Lib
os.path.dirname( # test
os.path.dirname(__file__))) # support
package_tests = loader.discover(start_dir=pkg_dir,
top_level_dir=top_dir,
pattern=pattern)
standard_tests.addTests(package_tests)
return standard_tests
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Import and return a module, deliberately bypassing sys.modules.
This function imports and returns a fresh copy of the named Python module
by removing the named module from sys.modules before doing the import.
Note that unlike reload, the original module is not affected by
this operation.
*fresh* is an iterable of additional module names that are also removed
from the sys.modules cache before doing the import.
*blocked* is an iterable of module names that are replaced with None
in the module cache during the import to ensure that attempts to import
them raise ImportError.
The named module and any modules named in the *fresh* and *blocked*
parameters are saved before starting the import and then reinserted into
sys.modules when the fresh import is complete.
Module and package deprecation messages are suppressed during this import
if *deprecated* is True.
This function will raise ImportError if the named module cannot be
imported.
"""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
failfast = False
match_tests = None
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an [email protected] shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except OSError as exc:
print("support.rmtree(): os.lstat(%r) failed with %s" % (fullname, exc),
file=sys.__stderr__)
mode = 0
if stat.S_ISDIR(mode):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except (FileNotFoundError, NotADirectoryError):
pass
def rmdir(dirname):
try:
_rmdir(dirname)
except FileNotFoundError:
pass
def rmtree(path):
try:
_rmtree(path)
except FileNotFoundError:
pass
def make_legacy_pyc(source):
"""Move a PEP 3147/488 pyc file to its legacy pyc location.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147/488 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = importlib.util.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + 'c')
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147/488 or
legacy .pyc files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147/488 and legacy pyc files.
unlink(source + 'c')
for opt in ('', 1, 2):
unlink(importlib.util.cache_from_source(source, optimization=opt))
# Check whether a gui is actually available
def _is_gui_available():
if hasattr(_is_gui_available, 'result'):
return _is_gui_available.result
reason = None
if sys.platform.startswith('win'):
# if Python is running as a service (such as the buildbot service),
# gui interaction may be disallowed
import ctypes
import ctypes.wintypes
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
if not bool(uof.dwFlags & WSF_VISIBLE):
reason = "gui not available (WSF_VISIBLE flag not set)"
elif sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
reason = "gui tests cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
reason = "cannot run without OS X gui process"
# check on every platform whether tkinter can actually do anything
if not reason:
try:
from tkinter import Tk
root = Tk()
root.withdraw()
root.update()
root.destroy()
except Exception as e:
err_string = str(e)
if len(err_string) > 50:
err_string = err_string[:50] + ' [...]'
reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
err_string)
_is_gui_available.reason = reason
_is_gui_available.result = not reason
return _is_gui_available.result
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
Known resources are set by regrtest.py. If not running under regrtest.py,
all resources are assumed enabled unless use_resources has been set.
"""
return use_resources is None or resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available."""
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
if resource == 'gui' and not _is_gui_available():
raise ResourceDenied(_is_gui_available.reason)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
# Don't use "localhost", since resolving it uses the DNS under recent
# Windows versions (see issue #18792).
HOST = "127.0.0.1"
HOSTv6 = "::1"
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def system_must_validate_cert(f):
"""Skip the test on TLS certificate validation failures."""
@functools.wraps(f)
def dec(*args, **kwargs):
try:
f(*args, **kwargs)
except IOError as e:
if "CERTIFICATE_VERIFY_FAILED" in str(e):
raise unittest.SkipTest("system does not contain "
"necessary certificates")
raise
return dec
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
# A constant likely larger than the underlying OS socket buffer size, to make
# writes blocking.
# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
# for a discussion of this number).
SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_gzip = unittest.skipUnless(gzip, 'requires gzip')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
is_android = bool(sysconfig.get_config_var('ANDROID_API_LEVEL'))
if sys.platform != 'win32':
unix_shell = '/system/bin/sh' if is_android else '/bin/sh'
else:
unix_shell = None
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or None if there is no such character.
FS_NONASCII = None
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
os.fsdecode(os.fsencode(character))
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name == 'nt':
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first: it is
# not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name
break
if FS_NONASCII:
TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII
else:
TESTFN_NONASCII = None
# Save the initial cwd
SAVEDCWD = os.getcwd()
# Set by libregrtest/main.py so we can skip tests that are not
# useful for PGO
PGO = False
@contextlib.contextmanager
def temp_dir(path=None, quiet=False):
"""Return a context manager that creates a temporary directory.
Arguments:
path: the directory to create temporarily. If omitted or None,
defaults to creating a temporary directory using tempfile.mkdtemp.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, if the path is specified and cannot be
created, only a warning is issued.
"""
dir_created = False
if path is None:
path = tempfile.mkdtemp()
dir_created = True
path = os.path.realpath(path)
else:
try:
os.mkdir(path)
dir_created = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp dir: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield path
finally:
if dir_created:
rmtree(path)
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change CWD to: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that temporarily creates and changes the CWD.
The function temporarily changes the current working directory
after creating a temporary directory in the current directory with
name *name*. If *name* is None, the temporary directory is
created using tempfile.mkdtemp.
If *quiet* is False (default) and it is not possible to
create or change the CWD, an error is raised. If *quiet* is True,
only a warning is raised and the original CWD is used.
"""
with temp_dir(path=name, quiet=quiet) as temp_path:
with change_cwd(temp_path, quiet=quiet) as cwd_dir:
yield cwd_dir
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
# TEST_HOME_DIR refers to the top level directory of the "test" package
# that contains Python's regression test suite
TEST_SUPPORT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_HOME_DIR = os.path.dirname(TEST_SUPPORT_DIR)
# TEST_DATA_DIR is used as a target download location for remote resources
TEST_DATA_DIR = os.path.join(TEST_HOME_DIR, "data")
def findfile(filename, subdir=None):
"""Try to find a file on sys.path or in the test directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path).
Setting *subdir* indicates a relative path to use to find the file
rather than looking directly in the path directories.
"""
if os.path.isabs(filename):
return filename
if subdir is not None:
filename = os.path.join(subdir, filename)
path = [TEST_HOME_DIR] + sys.path
for dn in path:
fn = os.path.join(dn, filename)
if os.path.exists(fn): return fn
return filename
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(TEST_DATA_DIR, filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
if verbose:
print('\tfetching %s ...' % url, file=get_original_stdout())
opener = urllib.request.build_opener()
if gzip:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
f = opener.open(url, timeout=15)
if gzip and f.headers.get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=f)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_no_resource_warning(testcase):
"""Context manager to check that no ResourceWarning is emitted.
Usage:
with check_no_resource_warning(self):
f = open(...)
...
del f
You must remove the object which may emit ResourceWarning before
the end of the context manager.
"""
with warnings.catch_warnings(record=True) as warns:
warnings.filterwarnings('always', category=ResourceWarning)
yield
gc_collect()
testcase.assertEqual(warns, [])
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(OSError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(OSError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
(isinstance(err, urllib.error.HTTPError) and
500 <= err.code <= 599) or
(isinstance(err, urllib.error.URLError) and
(("ConnectionRefusedError" in err.reason) or
("TimeoutError" in err.reason) or
("EOFError" in err.reason))) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except nntplib.NNTPTemporaryError as err:
if verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
except OSError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], OSError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise OSError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], OSError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt not in ('', '-O0', '-Og')
_header = 'nP'
_align = '0n'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
import _testcapi
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f, stderr=subprocess.DEVNULL)
f.close()
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'size' is a requested size for the test (in arbitrary, test-interpreted
units.) 'memuse' is the number of bytes per unit for the test, or a good
estimate of it. For example, a test that needs two byte buffers, of 4 GiB
each, could be decorated with @bigmemtest(size=_4G, memuse=2).
The 'size' argument is normally passed to the decorated test method as an
extra argument. If 'dry_run' is true, the value passed to the test method
may be less than the requested value. If 'dry_run' is false, it means the
test doesn't support dummy runs when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip(_is_gui_available.reason)
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
def case_pred(test):
if match_tests is None:
return True
for name in test.id().split("."):
if fnmatch.fnmatchcase(name, match_tests):
return True
return False
_filter_suite(suite, case_pred)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
# Rather than trying to enumerate all the cases where docstrings may be
# disabled, we just check for that directly
def _check_docstrings():
"""Just used to check if docstrings are enabled"""
MISSING_C_DOCSTRINGS = (check_impl_detail() and
sys.platform != 'win32' and
not sysconfig.get_config_var('WITH_DOC_STRINGS'))
HAVE_DOCSTRINGS = (_check_docstrings.__doc__ is not None and
not MISSING_C_DOCSTRINGS)
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if _thread:
return _thread._count(), threading._dangling.copy()
else:
return 1, ()
def threading_cleanup(*original_values):
if not _thread:
return
_MAX_COUNT = 100
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
time.sleep(0.01)
gc_collect()
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not _thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def start_threads(threads, unlock=None):
threads = list(threads)
started = []
try:
try:
for t in threads:
t.start()
started.append(t)
except:
if verbose:
print("Can't start %d threads, only %d threads started" %
(len(threads), len(started)))
raise
yield
finally:
try:
if unlock:
unlock()
endtime = starttime = time.time()
for timeout in range(1, 16):
endtime += 60
for t in started:
t.join(max(endtime - time.time(), 0.01))
started = [t for t in started if t.isAlive()]
if not started:
break
if verbose:
print('Unable to join %d threads during a period of '
'%d minutes' % (len(started), timeout))
finally:
started = [t for t in started if t.isAlive()]
if started:
faulthandler.dump_traceback(sys.stdout)
raise AssertionError('Unable to join %d threads' % len(started))
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip()
return stderr
requires_type_collecting = unittest.skipIf(hasattr(sys, 'getcounts'),
'types are immortal if COUNT_ALLOCS is defined')
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
def optim_args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
optimization settings in sys.flags."""
return subprocess._optim_args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_fp, tmp_name = tempfile.mkstemp()
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match(r"2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
def fs_is_case_insensitive(directory):
"""Detects if the file system for the specified directory is case-insensitive."""
with tempfile.NamedTemporaryFile(dir=directory) as base:
base_path = base.name
case_path = base_path.upper()
if case_path == base_path:
case_path = base_path.lower()
try:
return os.path.samefile(base_path, case_path)
except FileNotFoundError:
return False
def detect_api_mismatch(ref_api, other_api, *, ignore=()):
"""Returns the set of items in ref_api not in other_api, except for a
defined list of items to be ignored in this check.
By default this skips private attributes beginning with '_' but
includes all magic methods, i.e. those starting and ending in '__'.
"""
missing_items = set(dir(ref_api)) - set(dir(other_api))
if ignore:
missing_items -= set(ignore)
missing_items = set(m for m in missing_items
if not m.startswith('_') or m.endswith('__'))
return missing_items
def check__all__(test_case, module, name_of_module=None, extra=(),
blacklist=()):
"""Assert that the __all__ variable of 'module' contains all public names.
The module's public names (its API) are detected automatically based on
whether they match the public name convention and were defined in
'module'.
The 'name_of_module' argument can specify (as a string or tuple thereof)
what module(s) an API could be defined in in order to be detected as a
public API. One case for this is when 'module' imports part of its public
API from other modules, possibly a C backend (like 'csv' and its '_csv').
The 'extra' argument can be a set of names that wouldn't otherwise be
automatically detected as "public", like objects without a proper
'__module__' attriubute. If provided, it will be added to the
automatically detected ones.
The 'blacklist' argument can be a set of names that must not be treated
as part of the public API even though their names indicate otherwise.
Usage:
import bar
import foo
import unittest
from test import support
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, foo)
class OtherTestCase(unittest.TestCase):
def test__all__(self):
extra = {'BAR_CONST', 'FOO_CONST'}
blacklist = {'baz'} # Undocumented name.
# bar imports part of its API from _bar.
support.check__all__(self, bar, ('bar', '_bar'),
extra=extra, blacklist=blacklist)
"""
if name_of_module is None:
name_of_module = (module.__name__, )
elif isinstance(name_of_module, str):
name_of_module = (name_of_module, )
expected = set(extra)
for name in dir(module):
if name.startswith('_') or name in blacklist:
continue
obj = getattr(module, name)
if (getattr(obj, '__module__', None) in name_of_module or
(not hasattr(obj, '__module__') and
not isinstance(obj, types.ModuleType))):
expected.add(name)
test_case.assertCountEqual(module.__all__, expected)
class SuppressCrashReport:
"""Try to prevent a crash report from popping up.
On Windows, don't display the Windows Error Reporting dialog. On UNIX,
disable the creation of coredump file.
"""
old_value = None
old_modes = None
def __enter__(self):
"""On Windows, disable Windows Error Reporting dialogs using
SetErrorMode.
On UNIX, try to save the previous core file size limit, then set
soft limit to 0.
"""
if sys.platform.startswith('win'):
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
self._k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
self.old_value = self._k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
self._k32.SetErrorMode(self.old_value | SEM_NOGPFAULTERRORBOX)
# Suppress assert dialogs in debug builds
# (see http://bugs.python.org/issue23314)
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
self.old_modes = {}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
old_mode = msvcrt.CrtSetReportMode(report_type,
msvcrt.CRTDBG_MODE_FILE)
old_file = msvcrt.CrtSetReportFile(report_type,
msvcrt.CRTDBG_FILE_STDERR)
self.old_modes[report_type] = old_mode, old_file
else:
if resource is not None:
try:
self.old_value = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE,
(0, self.old_value[1]))
except (ValueError, OSError):
pass
if sys.platform == 'darwin':
# Check if the 'Crash Reporter' on OSX was configured
# in 'Developer' mode and warn that it will get triggered
# when it is.
#
# This assumes that this context manager is used in tests
# that might trigger the next manager.
value = subprocess.Popen(['/usr/bin/defaults', 'read',
'com.apple.CrashReporter', 'DialogType'],
stdout=subprocess.PIPE).communicate()[0]
if value.strip() == b'developer':
print("this test triggers the Crash Reporter, "
"that is intentional", end='', flush=True)
return self
def __exit__(self, *ignore_exc):
"""Restore Windows ErrorMode or core file behavior to initial value."""
if self.old_value is None:
return
if sys.platform.startswith('win'):
self._k32.SetErrorMode(self.old_value)
if self.old_modes:
import msvcrt
for report_type, (old_mode, old_file) in self.old_modes.items():
msvcrt.CrtSetReportMode(report_type, old_mode)
msvcrt.CrtSetReportFile(report_type, old_file)
else:
if resource is not None:
try:
resource.setrlimit(resource.RLIMIT_CORE, self.old_value)
except (ValueError, OSError):
pass
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
def run_in_subinterp(code):
"""
Run code in a subinterpreter. Raise unittest.SkipTest if the tracemalloc
module is enabled.
"""
# Issue #10915, #15751: PyGILState_*() functions don't work with
# sub-interpreters, the tracemalloc module uses these functions internally
try:
import tracemalloc
except ImportError:
pass
else:
if tracemalloc.is_tracing():
raise unittest.SkipTest("run_in_subinterp() cannot be used "
"if tracemalloc module is tracing "
"memory allocations")
import _testcapi
return _testcapi.run_in_subinterp(code)
def check_free_after_iterating(test, iter, cls, args=()):
class A(cls):
def __del__(self):
nonlocal done
done = True
try:
next(it)
except StopIteration:
pass
done = False
it = iter(A(*args))
# Issue 26494: Shouldn't crash
test.assertRaises(StopIteration, next, it)
# The sequence should be deallocated just after the end of iterating
gc_collect()
test.assertTrue(done) | zipfile36 | /zipfile36-0.1.3.tar.gz/zipfile36-0.1.3/test/support/__init__.py | __init__.py |
import io
import os
import importlib.util
import sys
import time
import stat
import shutil
import struct
import binascii
import threading
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
_DD_SIGNATURE = 0x08074b50
_EXTRA_FIELD_STRUCT = struct.Struct('<HH')
def _strip_extra(extra, xids):
# Remove Extra Fields with specified IDs.
unpack = _EXTRA_FIELD_STRUCT.unpack
modified = False
buffer = []
start = i = 0
while i + 4 <= len(extra):
xid, xlen = unpack(extra[i : i + 4])
j = i + 4 + xlen
if xid in xids:
if i != start:
buffer.append(extra[start : i])
start = j
modified = True
i = j
if not modified:
return extra
return b''.join(buffer)
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except OSError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except OSError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks > 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except OSError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'_compresslevel',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self._compresslevel = None # Level for the compressor
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def __repr__(self):
result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)]
if self.compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self.compress_type,
self.compress_type))
hi = self.external_attr >> 16
lo = self.external_attr & 0xFFFF
if hi:
result.append(' filemode=%r' % stat.filemode(hi))
if lo:
result.append(' external_attr=%#x' % lo)
isdir = self.is_dir()
if not isdir or self.file_size:
result.append(' file_size=%r' % self.file_size)
if ((not isdir or self.compress_size) and
(self.compress_type != ZIP_STORED or
self.file_size != self.compress_size)):
result.append(' compress_size=%r' % self.compress_size)
result.append('>')
return ''.join(result)
def FileHeader(self, zip64=None):
"""Return the per-file header as a bytes object."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while len(extra) >= 4:
tp, ln = unpack('<HH', extra[:4])
if ln+4 > len(extra):
raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
if tp == 0x0001:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
@classmethod
def from_file(cls, filename, arcname=None):
"""Construct an appropriate ZipInfo for a file on the filesystem.
filename should be the path to a file or directory on the filesystem.
arcname is the name which it will have within the archive (by default,
this will be the same as filename, but without a drive letter and with
leading path separators removed).
"""
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = cls(arcname, date_time)
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
if isdir:
zinfo.file_size = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.file_size = st.st_size
return zinfo
def is_dir(self):
"""Return True if this archive member is a directory."""
return self.filename[-1] == '/'
# ZIP encryption uses the CRC32 one-byte primitive for scrambling some
# internal keys. We noticed that a direct implementation is faster than
# relying on binascii.crc32().
_crctable = None
def _gen_crc(crc):
for j in range(8):
if crc & 1:
crc = (crc >> 1) ^ 0xEDB88320
else:
crc >>= 1
return crc
# ZIP supports a password-based form of encryption. Even though known
# plaintext attacks have been found against it, it is still useful
# to be able to get data out of such a file.
#
# Usage:
# zd = _ZipDecrypter(mypwd)
# plain_bytes = zd(cypher_bytes)
def _ZipDecrypter(pwd):
key0 = 305419896
key1 = 591751049
key2 = 878082192
global _crctable
if _crctable is None:
_crctable = list(map(_gen_crc, range(256)))
crctable = _crctable
def crc32(ch, crc):
"""Compute the CRC32 primitive on one byte."""
return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF]
def update_keys(c):
nonlocal key0, key1, key2
key0 = crc32(c, key0)
key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF
key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF
key2 = crc32(key1 >> 24, key2)
for p in pwd:
update_keys(p)
def decrypter(data):
"""Decrypt a bytes object."""
result = bytearray()
append = result.append
for c in data:
k = key2 | 2
c ^= ((k * (k^1)) >> 8) & 0xFF
update_keys(c)
append(c)
return bytes(result)
return decrypter
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise NotImplementedError("That compression method is not supported")
def _get_compressor(compress_type, compresslevel=None):
if compress_type == ZIP_DEFLATED:
if compresslevel is not None:
return zlib.compressobj(compresslevel, zlib.DEFLATED, -15)
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
if compresslevel is not None:
return bz2.BZ2Compressor(compresslevel)
return bz2.BZ2Compressor()
# compresslevel is ignored for ZIP_LZMA
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class _SharedFile:
def __init__(self, file, pos, close, lock, writing):
self._file = file
self._pos = pos
self._close = close
self._lock = lock
self._writing = writing
self.seekable = file.seekable
self.tell = file.tell
def seek(self, offset, whence=0):
with self._lock:
if self._writing():
raise ValueError("Can't reposition in the ZIP file while "
"there is an open writing handle on it. "
"Close the writing handle before trying to read.")
self._file.seek(offset, whence)
self._pos = self._file.tell()
return self._pos
def read(self, n=-1):
with self._lock:
if self._writing():
raise ValueError("Can't read from the ZIP file while there "
"is an open writing handle on it. "
"Close the writing handle before trying to read.")
self._file.seek(self._pos)
data = self._file.read(n)
self._pos = self._file.tell()
return data
def close(self):
if self._file is not None:
fileobj = self._file
self._file = None
self._close(fileobj)
# Provide the tell method for unseekable stream
class _Tellable:
def __init__(self, fp):
self.fp = fp
self.offset = 0
def write(self, data):
n = self.fp.write(data)
self.offset += n
return n
def tell(self):
return self.offset
def flush(self):
self.fp.flush()
def close(self):
self.fp.close()
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Chunk size to read during seek
MAX_SEEK_READ = 1 << 24
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'')
else:
self._expected_crc = None
self._seekable = False
try:
if fileobj.seekable():
self._orig_compress_start = fileobj.tell()
self._orig_compress_size = zipinfo.compress_size
self._orig_file_size = zipinfo.file_size
self._orig_start_crc = self._running_crc
self._seekable = True
except AttributeError:
pass
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if not self.closed:
result.append(' name=%r mode=%r' % (self.name, self.mode))
if self._compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self._compress_type,
self._compress_type))
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
return io.BufferedIOBase.readline(self, limit)
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached.
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc)
# Check the CRC if we're at the end of the file
if self._eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
## Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if not data:
raise EOFError
if self._decrypter is not None:
data = self._decrypter(data)
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
def seekable(self):
return self._seekable
def seek(self, offset, whence=0):
if not self._seekable:
raise io.UnsupportedOperation("underlying stream is not seekable")
curr_pos = self.tell()
if whence == 0: # Seek from start of file
new_pos = offset
elif whence == 1: # Seek from current position
new_pos = curr_pos + offset
elif whence == 2: # Seek from EOF
new_pos = self._orig_file_size + offset
else:
raise ValueError("whence must be os.SEEK_SET (0), "
"os.SEEK_CUR (1), or os.SEEK_END (2)")
if new_pos > self._orig_file_size:
new_pos = self._orig_file_size
if new_pos < 0:
new_pos = 0
read_offset = new_pos - curr_pos
buff_offset = read_offset + self._offset
if buff_offset >= 0 and buff_offset < len(self._readbuffer):
# Just move the _offset index if the new position is in the _readbuffer
self._offset = buff_offset
read_offset = 0
elif read_offset < 0:
# Position is before the current position. Reset the ZipExtFile
self._fileobj.seek(self._orig_compress_start)
self._running_crc = self._orig_start_crc
self._compress_left = self._orig_compress_size
self._left = self._orig_file_size
self._readbuffer = b''
self._offset = 0
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
read_offset = new_pos
while read_offset > 0:
read_len = min(self.MAX_SEEK_READ, read_offset)
self.read(read_len)
read_offset -= read_len
return self.tell()
def tell(self):
if not self._seekable:
raise io.UnsupportedOperation("underlying stream is not seekable")
filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset
return filepos
class _ZipWriteFile(io.BufferedIOBase):
def __init__(self, zf, zinfo, zip64):
self._zinfo = zinfo
self._zip64 = zip64
self._zipfile = zf
self._compressor = _get_compressor(zinfo.compress_type,
zinfo._compresslevel)
self._file_size = 0
self._compress_size = 0
self._crc = 0
@property
def _fileobj(self):
return self._zipfile.fp
def writable(self):
return True
def write(self, data):
if self.closed:
raise ValueError('I/O operation on closed file.')
nbytes = len(data)
self._file_size += nbytes
self._crc = crc32(data, self._crc)
if self._compressor:
data = self._compressor.compress(data)
self._compress_size += len(data)
self._fileobj.write(data)
return nbytes
def close(self):
if self.closed:
return
try:
super().close()
# Flush any data from the compressor, and update header info
if self._compressor:
buf = self._compressor.flush()
self._compress_size += len(buf)
self._fileobj.write(buf)
self._zinfo.compress_size = self._compress_size
else:
self._zinfo.compress_size = self._file_size
self._zinfo.CRC = self._crc
self._zinfo.file_size = self._file_size
# Write updated header info
if self._zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LLQQ' if self._zip64 else '<LLLL'
self._fileobj.write(struct.pack(fmt, _DD_SIGNATURE, self._zinfo.CRC,
self._zinfo.compress_size, self._zinfo.file_size))
self._zipfile.start_dir = self._fileobj.tell()
else:
if not self._zip64:
if self._file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size unexpectedly exceeded ZIP64 limit')
if self._compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size unexpectedly exceeded ZIP64 limit')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
# Preserve current position in file
self._zipfile.start_dir = self._fileobj.tell()
self._fileobj.seek(self._zinfo.header_offset)
self._fileobj.write(self._zinfo.FileHeader(self._zip64))
self._fileobj.seek(self._zipfile.start_dir)
# Successfully written: Add file to our caches
self._zipfile.filelist.append(self._zinfo)
self._zipfile.NameToInfo[self._zinfo.filename] = self._zinfo
finally:
self._zipfile._writing = False
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True,
compresslevel=None)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
compresslevel: None (default for the given compression type) or an integer
specifying the level to pass to the compressor.
When using ZIP_STORED or ZIP_LZMA this keyword has no effect.
When using ZIP_DEFLATED integers 0 through 9 are accepted.
When using ZIP_BZIP2 integers 1 through 9 are accepted.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True,
compresslevel=None):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.compresslevel = compresslevel
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, os.PathLike):
file = os.fspath(file)
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
self._writing = False
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise ValueError("Mode must be 'r', 'w', 'x', or 'a'")
except:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment).__name__)
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None, *, force_zip64=False):
"""Return file-like object for 'name'.
name is a string for the file name within the ZIP file, or a ZipInfo
object.
mode should be 'r' to read a file already in the ZIP file, or 'w' to
write to a file newly added to the archive.
pwd is the password to decrypt files (only used for reading).
When writing, if the file size is not known in advance but may exceed
2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large
files. If the size is known in advance, it is best to pass a ZipInfo
instance for name, with zinfo.file_size set.
"""
if mode not in {"r", "w"}:
raise ValueError('open() requires mode "r" or "w"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
if pwd and (mode == "w"):
raise ValueError("pwd is only supported for reading files")
if not self.fp:
raise ValueError(
"Attempt to use ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
elif mode == 'w':
zinfo = ZipInfo(name)
zinfo.compress_type = self.compression
zinfo._compresslevel = self.compresslevel
else:
# Get info object for name
zinfo = self.getinfo(name)
if mode == 'w':
return self._open_to_write(zinfo, force_zip64=force_zip64)
if self._writing:
raise ValueError("Can't read from the ZIP file while there "
"is an open writing handle on it. "
"Close the writing handle before trying to read.")
# Open for reading:
self._fileRefCnt += 1
zef_file = _SharedFile(self.fp, zinfo.header_offset,
self._fpclose, self._lock, lambda: self._writing)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %r is encrypted, password "
"required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = zd(header[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file %r" % name)
return ZipExtFile(zef_file, mode, zinfo, zd, True)
except:
zef_file.close()
raise
def _open_to_write(self, zinfo, force_zip64=False):
if force_zip64 and not self._allowZip64:
raise ValueError(
"force_zip64 is True, but allowZip64 was False when opening "
"the ZIP file."
)
if self._writing:
raise ValueError("Can't write to the ZIP file while there is "
"another write handle open on it. "
"Close the first handle before opening another.")
# Sizes and CRC are overwritten with correct data after processing the file
if not hasattr(zinfo, 'file_size'):
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.flag_bits = 0x00
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
if not self._seekable:
zinfo.flag_bits |= 0x08
if not zinfo.external_attr:
zinfo.external_attr = 0o600 << 16 # permissions: ?rw-------
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
(force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT)
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell()
self._writecheck(zinfo)
self._didModify = True
self.fp.write(zinfo.FileHeader(zip64))
self._writing = True
return _ZipWriteFile(self, zinfo, zip64)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if path is None:
path = os.getcwd()
else:
path = os.fspath(path)
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
if path is None:
path = os.getcwd()
else:
path = os.fspath(path)
for zipinfo in members:
self._extract_member(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.is_dir():
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise ValueError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise ValueError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None,
compress_type=None, compresslevel=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists"
)
zinfo = ZipInfo.from_file(filename, arcname)
if zinfo.is_dir():
zinfo.compress_size = 0
zinfo.CRC = 0
else:
if compress_type is not None:
zinfo.compress_type = compress_type
else:
zinfo.compress_type = self.compression
if compresslevel is not None:
zinfo._compresslevel = compresslevel
else:
zinfo._compresslevel = self.compresslevel
if zinfo.is_dir():
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
else:
with open(filename, "rb") as src, self.open(zinfo, 'w') as dest:
shutil.copyfileobj(src, dest, 1024*8)
def writestr(self, zinfo_or_arcname, data,
compress_type=None, compresslevel=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo._compresslevel = self.compresslevel
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists."
)
if compress_type is not None:
zinfo.compress_type = compress_type
if compresslevel is not None:
zinfo._compresslevel = compresslevel
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
with self.open(zinfo, mode='w') as dest:
dest.write(data)
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
if self._writing:
raise ValueError("Can't close the ZIP file while there is "
"an open writing handle on it. "
"Close the writing handle before closing the zip.")
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = _strip_extra(extra_data, (1,))
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=True, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename="", filterfunc=None):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyc.
This method will compile the module.py into module.pyc if
necessary.
If filterfunc(pathname) is given, it is called with every argument.
When it is False, the file or directory is skipped.
"""
pathname = os.fspath(pathname)
if filterfunc and not filterfunc(pathname):
if self.debug:
label = 'path' if os.path.isdir(pathname) else 'file'
print('%s %r skipped by filterfunc' % (label, pathname))
return
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = sorted(os.listdir(pathname))
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename,
filterfunc=filterfunc) # Recursive call
elif ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in sorted(os.listdir(pathname)):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='')
pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1)
pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_opt0) and
os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt0
arcname = file_pyc
elif (os.path.isfile(pycache_opt1) and
os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt1
arcname = file_pyc
elif (os.path.isfile(pycache_opt2) and
os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt2
arcname = file_pyc
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
if sys.flags.optimize == 0:
fname = pycache_opt0
elif sys.flags.optimize == 1:
fname = pycache_opt1
else:
fname = pycache_opt2
arcname = file_pyc
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_opt0
arcname = file_pyc
else:
arcname = file_pyc
if self._optimize == 1:
fname = pycache_opt1
elif self._optimize == 2:
fname = pycache_opt2
else:
msg = "invalid value for 'optimize': {!r}".format(self._optimize)
raise ValueError(msg)
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args=None):
import argparse
description = 'A simple command-line interface for zipfile module.'
parser = argparse.ArgumentParser(description=description)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-l', '--list', metavar='<zipfile>',
help='Show listing of a zipfile')
group.add_argument('-e', '--extract', nargs=2,
metavar=('<zipfile>', '<output_dir>'),
help='Extract zipfile into target dir')
group.add_argument('-c', '--create', nargs='+',
metavar=('<name>', '<file>'),
help='Create zipfile from sources')
group.add_argument('-t', '--test', metavar='<zipfile>',
help='Test if a zipfile is valid')
args = parser.parse_args(args)
if args.test is not None:
src = args.test
with ZipFile(src, 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args.list is not None:
src = args.list
with ZipFile(src, 'r') as zf:
zf.printdir()
elif args.extract is not None:
src, curdir = args.extract
with ZipFile(src, 'r') as zf:
zf.extractall(curdir)
elif args.create is not None:
zip_name = args.create.pop(0)
files = args.create
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
if zippath:
zf.write(path, zippath)
for nm in sorted(os.listdir(path)):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(zip_name, 'w') as zf:
for path in files:
zippath = os.path.basename(path)
if not zippath:
zippath = os.path.basename(os.path.dirname(path))
if zippath in ('', os.curdir, os.pardir):
zippath = ''
addToZip(zf, path, zippath)
if __name__ == "__main__":
main() | zipfile37 | /zipfile37-0.1.3.tar.gz/zipfile37-0.1.3/zipfile37.py | zipfile37.py |
import binascii
import functools
import importlib.util
import io
import itertools
import os
import posixpath
import shutil
import stat
import struct
import sys
import threading
import time
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
_DD_SIGNATURE = 0x08074b50
_EXTRA_FIELD_STRUCT = struct.Struct('<HH')
def _strip_extra(extra, xids):
# Remove Extra Fields with specified IDs.
unpack = _EXTRA_FIELD_STRUCT.unpack
modified = False
buffer = []
start = i = 0
while i + 4 <= len(extra):
xid, xlen = unpack(extra[i : i + 4])
j = i + 4 + xlen
if xid in xids:
if i != start:
buffer.append(extra[start : i])
start = j
modified = True
i = j
if not modified:
return extra
return b''.join(buffer)
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except OSError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except OSError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks > 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except OSError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'_compresslevel',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self._compresslevel = None # Level for the compressor
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def __repr__(self):
result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)]
if self.compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self.compress_type,
self.compress_type))
hi = self.external_attr >> 16
lo = self.external_attr & 0xFFFF
if hi:
result.append(' filemode=%r' % stat.filemode(hi))
if lo:
result.append(' external_attr=%#x' % lo)
isdir = self.is_dir()
if not isdir or self.file_size:
result.append(' file_size=%r' % self.file_size)
if ((not isdir or self.compress_size) and
(self.compress_type != ZIP_STORED or
self.file_size != self.compress_size)):
result.append(' compress_size=%r' % self.compress_size)
result.append('>')
return ''.join(result)
def FileHeader(self, zip64=None):
"""Return the per-file header as a bytes object."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while len(extra) >= 4:
tp, ln = unpack('<HH', extra[:4])
if ln+4 > len(extra):
raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
if tp == 0x0001:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
if len(counts) <= idx:
raise BadZipFile(
"Corrupt zip64 extra field. File size not found."
)
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
if len(counts) <= idx:
raise BadZipFile(
"Corrupt zip64 extra field. Compress size not found."
)
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
if len(counts) <= idx:
raise BadZipFile(
"Corrupt zip64 extra field. Header offset not found."
)
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
@classmethod
def from_file(cls, filename, arcname=None, *, strict_timestamps=True):
"""Construct an appropriate ZipInfo for a file on the filesystem.
filename should be the path to a file or directory on the filesystem.
arcname is the name which it will have within the archive (by default,
this will be the same as filename, but without a drive letter and with
leading path separators removed).
"""
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
if not strict_timestamps and date_time[0] < 1980:
date_time = (1980, 1, 1, 0, 0, 0)
elif not strict_timestamps and date_time[0] > 2107:
date_time = (2107, 12, 31, 23, 59, 59)
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = cls(arcname, date_time)
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
if isdir:
zinfo.file_size = 0
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.file_size = st.st_size
return zinfo
def is_dir(self):
"""Return True if this archive member is a directory."""
return self.filename[-1] == '/'
# ZIP encryption uses the CRC32 one-byte primitive for scrambling some
# internal keys. We noticed that a direct implementation is faster than
# relying on binascii.crc32().
_crctable = None
def _gen_crc(crc):
for j in range(8):
if crc & 1:
crc = (crc >> 1) ^ 0xEDB88320
else:
crc >>= 1
return crc
# ZIP supports a password-based form of encryption. Even though known
# plaintext attacks have been found against it, it is still useful
# to be able to get data out of such a file.
#
# Usage:
# zd = _ZipDecrypter(mypwd)
# plain_bytes = zd(cypher_bytes)
def _ZipDecrypter(pwd):
key0 = 305419896
key1 = 591751049
key2 = 878082192
global _crctable
if _crctable is None:
_crctable = list(map(_gen_crc, range(256)))
crctable = _crctable
def crc32(ch, crc):
"""Compute the CRC32 primitive on one byte."""
return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF]
def update_keys(c):
nonlocal key0, key1, key2
key0 = crc32(c, key0)
key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF
key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF
key2 = crc32(key1 >> 24, key2)
for p in pwd:
update_keys(p)
def decrypter(data):
"""Decrypt a bytes object."""
result = bytearray()
append = result.append
for c in data:
k = key2 | 2
c ^= ((k * (k^1)) >> 8) & 0xFF
update_keys(c)
append(c)
return bytes(result)
return decrypter
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise NotImplementedError("That compression method is not supported")
def _get_compressor(compress_type, compresslevel=None):
if compress_type == ZIP_DEFLATED:
if compresslevel is not None:
return zlib.compressobj(compresslevel, zlib.DEFLATED, -15)
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
if compresslevel is not None:
return bz2.BZ2Compressor(compresslevel)
return bz2.BZ2Compressor()
# compresslevel is ignored for ZIP_LZMA
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
_check_compression(compress_type)
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class _SharedFile:
def __init__(self, file, pos, close, lock, writing):
self._file = file
self._pos = pos
self._close = close
self._lock = lock
self._writing = writing
self.seekable = file.seekable
self.tell = file.tell
def seek(self, offset, whence=0):
with self._lock:
if self._writing():
raise ValueError("Can't reposition in the ZIP file while "
"there is an open writing handle on it. "
"Close the writing handle before trying to read.")
self._file.seek(offset, whence)
self._pos = self._file.tell()
return self._pos
def read(self, n=-1):
with self._lock:
if self._writing():
raise ValueError("Can't read from the ZIP file while there "
"is an open writing handle on it. "
"Close the writing handle before trying to read.")
self._file.seek(self._pos)
data = self._file.read(n)
self._pos = self._file.tell()
return data
def close(self):
if self._file is not None:
fileobj = self._file
self._file = None
self._close(fileobj)
# Provide the tell method for unseekable stream
class _Tellable:
def __init__(self, fp):
self.fp = fp
self.offset = 0
def write(self, data):
n = self.fp.write(data)
self.offset += n
return n
def tell(self):
return self.offset
def flush(self):
self.fp.flush()
def close(self):
self.fp.close()
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Chunk size to read during seek
MAX_SEEK_READ = 1 << 24
def __init__(self, fileobj, mode, zipinfo, pwd=None,
close_fileobj=False):
self._fileobj = fileobj
self._pwd = pwd
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self.newlines = None
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'')
else:
self._expected_crc = None
self._seekable = False
try:
if fileobj.seekable():
self._orig_compress_start = fileobj.tell()
self._orig_compress_size = zipinfo.compress_size
self._orig_file_size = zipinfo.file_size
self._orig_start_crc = self._running_crc
self._seekable = True
except AttributeError:
pass
self._decrypter = None
if pwd:
if zipinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zipinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zipinfo.CRC >> 24) & 0xff
h = self._init_decrypter()
if h != check_byte:
raise RuntimeError("Bad password for file %r" % zipinfo.orig_filename)
def _init_decrypter(self):
self._decrypter = _ZipDecrypter(self._pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = self._fileobj.read(12)
self._compress_left -= 12
return self._decrypter(header)[11]
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if not self.closed:
result.append(' name=%r mode=%r' % (self.name, self.mode))
if self._compress_type != ZIP_STORED:
result.append(' compress_type=%s' %
compressor_names.get(self._compress_type,
self._compress_type))
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
return io.BufferedIOBase.readline(self, limit)
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached.
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc)
# Check the CRC if we're at the end of the file
if self._eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
## Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if not data:
raise EOFError
if self._decrypter is not None:
data = self._decrypter(data)
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
def seekable(self):
return self._seekable
def seek(self, offset, whence=0):
if not self._seekable:
raise io.UnsupportedOperation("underlying stream is not seekable")
curr_pos = self.tell()
if whence == 0: # Seek from start of file
new_pos = offset
elif whence == 1: # Seek from current position
new_pos = curr_pos + offset
elif whence == 2: # Seek from EOF
new_pos = self._orig_file_size + offset
else:
raise ValueError("whence must be os.SEEK_SET (0), "
"os.SEEK_CUR (1), or os.SEEK_END (2)")
if new_pos > self._orig_file_size:
new_pos = self._orig_file_size
if new_pos < 0:
new_pos = 0
read_offset = new_pos - curr_pos
buff_offset = read_offset + self._offset
if buff_offset >= 0 and buff_offset < len(self._readbuffer):
# Just move the _offset index if the new position is in the _readbuffer
self._offset = buff_offset
read_offset = 0
elif read_offset < 0:
# Position is before the current position. Reset the ZipExtFile
self._fileobj.seek(self._orig_compress_start)
self._running_crc = self._orig_start_crc
self._compress_left = self._orig_compress_size
self._left = self._orig_file_size
self._readbuffer = b''
self._offset = 0
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
read_offset = new_pos
if self._decrypter is not None:
self._init_decrypter()
while read_offset > 0:
read_len = min(self.MAX_SEEK_READ, read_offset)
self.read(read_len)
read_offset -= read_len
return self.tell()
def tell(self):
if not self._seekable:
raise io.UnsupportedOperation("underlying stream is not seekable")
filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset
return filepos
class _ZipWriteFile(io.BufferedIOBase):
def __init__(self, zf, zinfo, zip64):
self._zinfo = zinfo
self._zip64 = zip64
self._zipfile = zf
self._compressor = _get_compressor(zinfo.compress_type,
zinfo._compresslevel)
self._file_size = 0
self._compress_size = 0
self._crc = 0
@property
def _fileobj(self):
return self._zipfile.fp
def writable(self):
return True
def write(self, data):
if self.closed:
raise ValueError('I/O operation on closed file.')
nbytes = len(data)
self._file_size += nbytes
self._crc = crc32(data, self._crc)
if self._compressor:
data = self._compressor.compress(data)
self._compress_size += len(data)
self._fileobj.write(data)
return nbytes
def close(self):
if self.closed:
return
try:
super().close()
# Flush any data from the compressor, and update header info
if self._compressor:
buf = self._compressor.flush()
self._compress_size += len(buf)
self._fileobj.write(buf)
self._zinfo.compress_size = self._compress_size
else:
self._zinfo.compress_size = self._file_size
self._zinfo.CRC = self._crc
self._zinfo.file_size = self._file_size
# Write updated header info
if self._zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LLQQ' if self._zip64 else '<LLLL'
self._fileobj.write(struct.pack(fmt, _DD_SIGNATURE, self._zinfo.CRC,
self._zinfo.compress_size, self._zinfo.file_size))
self._zipfile.start_dir = self._fileobj.tell()
else:
if not self._zip64:
if self._file_size > ZIP64_LIMIT:
raise RuntimeError(
'File size unexpectedly exceeded ZIP64 limit')
if self._compress_size > ZIP64_LIMIT:
raise RuntimeError(
'Compressed size unexpectedly exceeded ZIP64 limit')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
# Preserve current position in file
self._zipfile.start_dir = self._fileobj.tell()
self._fileobj.seek(self._zinfo.header_offset)
self._fileobj.write(self._zinfo.FileHeader(self._zip64))
self._fileobj.seek(self._zipfile.start_dir)
# Successfully written: Add file to our caches
self._zipfile.filelist.append(self._zinfo)
self._zipfile.NameToInfo[self._zinfo.filename] = self._zinfo
finally:
self._zipfile._writing = False
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True,
compresslevel=None)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read 'r', write 'w', exclusive create 'x',
or append 'a'.
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
compresslevel: None (default for the given compression type) or an integer
specifying the level to pass to the compressor.
When using ZIP_STORED or ZIP_LZMA this keyword has no effect.
When using ZIP_DEFLATED integers 0 through 9 are accepted.
When using ZIP_BZIP2 integers 1 through 9 are accepted.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True,
compresslevel=None, *, strict_timestamps=True):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.compresslevel = compresslevel
self.mode = mode
self.pwd = None
self._comment = b''
self._strict_timestamps = strict_timestamps
# Check if we were passed a file-like object
if isinstance(file, os.PathLike):
file = os.fspath(file)
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
self.fp = io.open(file, filemode)
except OSError:
if filemode in modeDict:
filemode = modeDict[filemode]
continue
raise
break
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
self._fileRefCnt = 1
self._lock = threading.RLock()
self._seekable = True
self._writing = False
try:
if mode == 'r':
self._RealGetContents()
elif mode in ('w', 'x'):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
try:
self.start_dir = self.fp.tell()
except (AttributeError, OSError):
self.fp = _Tellable(self.fp)
self.start_dir = 0
self._seekable = False
else:
# Some file-like objects can provide tell() but not seek()
try:
self.fp.seek(self.start_dir)
except (AttributeError, OSError):
self._seekable = False
elif mode == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
self.start_dir = self.fp.tell()
else:
raise ValueError("Mode must be 'r', 'w', 'x', or 'a'")
except:
fp = self.fp
self.fp = None
self._fpclose(fp)
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __repr__(self):
result = ['<%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)]
if self.fp is not None:
if self._filePassed:
result.append(' file=%r' % self.fp)
elif self.filename is not None:
result.append(' filename=%r' % self.filename)
result.append(' mode=%r' % self.mode)
else:
result.append(' [closed]')
result.append('>')
return ''.join(result)
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment).__name__)
# check for valid comment length
if len(comment) > ZIP_MAX_COMMENT:
import warnings
warnings.warn('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT, stacklevel=2)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None, *, force_zip64=False):
"""Return file-like object for 'name'.
name is a string for the file name within the ZIP file, or a ZipInfo
object.
mode should be 'r' to read a file already in the ZIP file, or 'w' to
write to a file newly added to the archive.
pwd is the password to decrypt files (only used for reading).
When writing, if the file size is not known in advance but may exceed
2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large
files. If the size is known in advance, it is best to pass a ZipInfo
instance for name, with zinfo.file_size set.
"""
if mode not in {"r", "w"}:
raise ValueError('open() requires mode "r" or "w"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
if pwd and (mode == "w"):
raise ValueError("pwd is only supported for reading files")
if not self.fp:
raise ValueError(
"Attempt to use ZIP archive that was already closed")
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
elif mode == 'w':
zinfo = ZipInfo(name)
zinfo.compress_type = self.compression
zinfo._compresslevel = self.compresslevel
else:
# Get info object for name
zinfo = self.getinfo(name)
if mode == 'w':
return self._open_to_write(zinfo, force_zip64=force_zip64)
if self._writing:
raise ValueError("Can't read from the ZIP file while there "
"is an open writing handle on it. "
"Close the writing handle before trying to read.")
# Open for reading:
self._fileRefCnt += 1
zef_file = _SharedFile(self.fp, zinfo.header_offset,
self._fpclose, self._lock, lambda: self._writing)
try:
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %r is encrypted, password "
"required for extraction" % name)
else:
pwd = None
return ZipExtFile(zef_file, mode, zinfo, pwd, True)
except:
zef_file.close()
raise
def _open_to_write(self, zinfo, force_zip64=False):
if force_zip64 and not self._allowZip64:
raise ValueError(
"force_zip64 is True, but allowZip64 was False when opening "
"the ZIP file."
)
if self._writing:
raise ValueError("Can't write to the ZIP file while there is "
"another write handle open on it. "
"Close the first handle before opening another.")
# Sizes and CRC are overwritten with correct data after processing the file
if not hasattr(zinfo, 'file_size'):
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
zinfo.flag_bits = 0x00
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
if not self._seekable:
zinfo.flag_bits |= 0x08
if not zinfo.external_attr:
zinfo.external_attr = 0o600 << 16 # permissions: ?rw-------
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
(force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT)
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell()
self._writecheck(zinfo)
self._didModify = True
self.fp.write(zinfo.FileHeader(zip64))
self._writing = True
return _ZipWriteFile(self, zinfo, zip64)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if path is None:
path = os.getcwd()
else:
path = os.fspath(path)
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
if path is None:
path = os.getcwd()
else:
path = os.fspath(path)
for zipinfo in members:
self._extract_member(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.is_dir():
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
import warnings
warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ('w', 'x', 'a'):
raise ValueError("write() requires mode 'w', 'x', or 'a'")
if not self.fp:
raise ValueError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if not self._allowZip64:
requires_zip64 = None
if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif zinfo.file_size > ZIP64_LIMIT:
requires_zip64 = "Filesize"
elif zinfo.header_offset > ZIP64_LIMIT:
requires_zip64 = "Zipfile size"
if requires_zip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
def write(self, filename, arcname=None,
compress_type=None, compresslevel=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists"
)
zinfo = ZipInfo.from_file(filename, arcname,
strict_timestamps=self._strict_timestamps)
if zinfo.is_dir():
zinfo.compress_size = 0
zinfo.CRC = 0
else:
if compress_type is not None:
zinfo.compress_type = compress_type
else:
zinfo.compress_type = self.compression
if compresslevel is not None:
zinfo._compresslevel = compresslevel
else:
zinfo._compresslevel = self.compresslevel
if zinfo.is_dir():
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
self.start_dir = self.fp.tell()
else:
with open(filename, "rb") as src, self.open(zinfo, 'w') as dest:
shutil.copyfileobj(src, dest, 1024*8)
def writestr(self, zinfo_or_arcname, data,
compress_type=None, compresslevel=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo._compresslevel = self.compresslevel
if zinfo.filename[-1] == '/':
zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
zinfo.external_attr |= 0x10 # MS-DOS directory flag
else:
zinfo.external_attr = 0o600 << 16 # ?rw-------
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise ValueError(
"Attempt to write to ZIP archive that was already closed")
if self._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists."
)
if compress_type is not None:
zinfo.compress_type = compress_type
if compresslevel is not None:
zinfo._compresslevel = compresslevel
zinfo.file_size = len(data) # Uncompressed size
with self._lock:
with self.open(zinfo, mode='w') as dest:
dest.write(data)
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
if self._writing:
raise ValueError("Can't close the ZIP file while there is "
"an open writing handle on it. "
"Close the writing handle before closing the zip.")
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
def _write_end_record(self):
for zinfo in self.filelist: # write central directory
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = _strip_extra(extra_data, (1,))
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - self.start_dir
centDirOffset = self.start_dir
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
elif centDirOffset > ZIP64_LIMIT:
requires_zip64 = "Central directory offset"
elif centDirSize > ZIP64_LIMIT:
requires_zip64 = "Central directory size"
if requires_zip64:
# Need to write the ZIP64 end-of-archive records
if not self._allowZip64:
raise LargeZipFile(requires_zip64 +
" would require ZIP64 extensions")
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
def _fpclose(self, fp):
assert self._fileRefCnt > 0
self._fileRefCnt -= 1
if not self._fileRefCnt and not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=True, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename="", filterfunc=None):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyc.
This method will compile the module.py into module.pyc if
necessary.
If filterfunc(pathname) is given, it is called with every argument.
When it is False, the file or directory is skipped.
"""
pathname = os.fspath(pathname)
if filterfunc and not filterfunc(pathname):
if self.debug:
label = 'path' if os.path.isdir(pathname) else 'file'
print('%s %r skipped by filterfunc' % (label, pathname))
return
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = sorted(os.listdir(pathname))
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename,
filterfunc=filterfunc) # Recursive call
elif ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in sorted(os.listdir(pathname)):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
if filterfunc and not filterfunc(path):
if self.debug:
print('file %r skipped by filterfunc' % path)
continue
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='')
pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1)
pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_opt0) and
os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt0
arcname = file_pyc
elif (os.path.isfile(pycache_opt1) and
os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt1
arcname = file_pyc
elif (os.path.isfile(pycache_opt2) and
os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_opt2
arcname = file_pyc
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
if sys.flags.optimize == 0:
fname = pycache_opt0
elif sys.flags.optimize == 1:
fname = pycache_opt1
else:
fname = pycache_opt2
arcname = file_pyc
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_opt0
arcname = file_pyc
else:
arcname = file_pyc
if self._optimize == 1:
fname = pycache_opt1
elif self._optimize == 2:
fname = pycache_opt2
else:
msg = "invalid value for 'optimize': {!r}".format(self._optimize)
raise ValueError(msg)
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def _unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in itertools.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def _parents(path):
"""
Given a path with elements separated by
posixpath.sep, generate all parents of that path.
>>> list(_parents('b/d'))
['b']
>>> list(_parents('/b/d/'))
['/b']
>>> list(_parents('b/d/f/'))
['b/d', 'b']
>>> list(_parents('b'))
[]
>>> list(_parents(''))
[]
"""
return itertools.islice(_ancestry(path), 1, None)
def _ancestry(path):
"""
Given a path with elements separated by
posixpath.sep, generate all elements of that path
>>> list(_ancestry('b/d'))
['b/d', 'b']
>>> list(_ancestry('/b/d/'))
['/b/d', '/b']
>>> list(_ancestry('b/d/f/'))
['b/d/f', 'b/d', 'b']
>>> list(_ancestry('b'))
['b']
>>> list(_ancestry(''))
[]
"""
path = path.rstrip(posixpath.sep)
while path and path != posixpath.sep:
yield path
path, tail = posixpath.split(path)
class Path:
"""
A pathlib-compatible interface for zip files.
Consider a zip file with this structure::
.
├── a.txt
└── b
├── c.txt
└── d
└── e.txt
>>> data = io.BytesIO()
>>> zf = ZipFile(data, 'w')
>>> zf.writestr('a.txt', 'content of a')
>>> zf.writestr('b/c.txt', 'content of c')
>>> zf.writestr('b/d/e.txt', 'content of e')
>>> zf.filename = 'abcde.zip'
Path accepts the zipfile object itself or a filename
>>> root = Path(zf)
From there, several path operations are available.
Directory iteration (including the zip file itself):
>>> a, b = root.iterdir()
>>> a
Path('abcde.zip', 'a.txt')
>>> b
Path('abcde.zip', 'b/')
name property:
>>> b.name
'b'
join with divide operator:
>>> c = b / 'c.txt'
>>> c
Path('abcde.zip', 'b/c.txt')
>>> c.name
'c.txt'
Read text:
>>> c.read_text()
'content of c'
existence:
>>> c.exists()
True
>>> (b / 'missing.txt').exists()
False
Coercion to string:
>>> str(c)
'abcde.zip/b/c.txt'
"""
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
def __init__(self, root, at=""):
self.root = root if isinstance(root, ZipFile) else ZipFile(root)
self.at = at
@property
def open(self):
return functools.partial(self.root.open, self.at)
@property
def name(self):
return posixpath.basename(self.at.rstrip("/"))
def read_text(self, *args, **kwargs):
with self.open() as strm:
return io.TextIOWrapper(strm, *args, **kwargs).read()
def read_bytes(self):
with self.open() as strm:
return strm.read()
def _is_child(self, path):
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
def _next(self, at):
return Path(self.root, at)
def is_dir(self):
return not self.at or self.at.endswith("/")
def is_file(self):
return not self.is_dir()
def exists(self):
return self.at in self._names()
def iterdir(self):
if not self.is_dir():
raise ValueError("Can't listdir a file")
subs = map(self._next, self._names())
return filter(self._is_child, subs)
def __str__(self):
return posixpath.join(self.root.filename, self.at)
def __repr__(self):
return self.__repr.format(self=self)
def joinpath(self, add):
next = posixpath.join(self.at, add)
next_dir = posixpath.join(self.at, add, "")
names = self._names()
return self._next(next_dir if next not in names and next_dir in names else next)
__truediv__ = joinpath
@staticmethod
def _implied_dirs(names):
return _unique_everseen(
parent + "/"
for name in names
for parent in _parents(name)
if parent + "/" not in names
)
@classmethod
def _add_implied_dirs(cls, names):
return names + list(cls._implied_dirs(names))
@property
def parent(self):
parent_at = posixpath.dirname(self.at.rstrip('/'))
if parent_at:
parent_at += '/'
return self._next(parent_at)
def _names(self):
return self._add_implied_dirs(self.root.namelist())
def main(args=None):
import argparse
description = 'A simple command-line interface for zipfile module.'
parser = argparse.ArgumentParser(description=description)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-l', '--list', metavar='<zipfile>',
help='Show listing of a zipfile')
group.add_argument('-e', '--extract', nargs=2,
metavar=('<zipfile>', '<output_dir>'),
help='Extract zipfile into target dir')
group.add_argument('-c', '--create', nargs='+',
metavar=('<name>', '<file>'),
help='Create zipfile from sources')
group.add_argument('-t', '--test', metavar='<zipfile>',
help='Test if a zipfile is valid')
args = parser.parse_args(args)
if args.test is not None:
src = args.test
with ZipFile(src, 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args.list is not None:
src = args.list
with ZipFile(src, 'r') as zf:
zf.printdir()
elif args.extract is not None:
src, curdir = args.extract
with ZipFile(src, 'r') as zf:
zf.extractall(curdir)
elif args.create is not None:
zip_name = args.create.pop(0)
files = args.create
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
if zippath:
zf.write(path, zippath)
for nm in sorted(os.listdir(path)):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(zip_name, 'w') as zf:
for path in files:
zippath = os.path.basename(path)
if not zippath:
zippath = os.path.basename(os.path.dirname(path))
if zippath in ('', os.curdir, os.pardir):
zippath = ''
addToZip(zf, path, zippath)
if __name__ == "__main__":
main() | zipfile38 | /zipfile38-0.0.3.tar.gz/zipfile38-0.0.3/zipfile38.py | zipfile38.py |
|pypi|
A backport of the zipfile module from Python 3.8, which contains notable improvements such as the "strict_timestamps" keyword argument (which enables the creation of reproducible zip archives).
installation::
pip install zipfile38
usage:
.. code:: python
if sys.version_info >= (3, 8):
import zipfile
else:
import zipfile38 as zipfile
---
shout out to `Thomas Kluyver's backport for 3.6 <https://gitlab.com/takluyver/zipfile36>`_ and `Markus Scheidgen's backport for 3.7 <https://github.com/markus1978/zipfile37>`_.
.. |pypi| image:: https://img.shields.io/pypi/v/zipfile38.svg
:target: https://pypi.org/project/zipfile38/
| zipfile38 | /zipfile38-0.0.3.tar.gz/zipfile38-0.0.3/README.rst | README.rst |
[](https://pypi.org/project/zipfile39/)
## zipfile39
- Backport of zipfile Python 3.9 (especially caae717) to older Python including **Python 2.7.**
- This means Python 2.7 can use `zf.open(name, 'w')`.
- Uses backports.lzma for ZIP_LZMA (method 14) Python2 handler.
- Introduces ZIP_DEFLATED64 (method 9), ZIP_DCLIMPLODED (method 10), ZIP_ZSTANDARD(method 93), ZIP_XZ (method 95) and ZIP_PPMD (method 98) handlers.
- ZIP_ZSTANDARD Python2 uses zstandard 0.14.1 (the last compatible version).
- isal / pyppmd / zipfile_deflate64 Python2 use my own backport.
- If isal is installed:
- crc32 and inflation are accelerated automatically.
- ZIP_DEFLATED compresslevel -10, -11, -12 and -13 are available, which correspond to isal compression level 0, 1, 2 and 3.
- If slz is installed:
- ZIP_DEFLATED compresslevel -21 is available.
- If codecs7z is installed:
- ZIP_DEFLATED/ZIP_BZIP2 compresslevel 11 - 19 are available. Enjoy 7-zip's ultimate compression on Python.
### Requisites
- Installation requisites:
- [pathlib2](https://pypi.org/project/pathlib2/) (Python2 only)
- [contextlib2](https://pypi.org/project/contextlib2/) (Python2 only)
- Optional requisites:
- [backports.lzma](https://pypi.org/project/backports.lzma/) (Python2 only)
- [dclimplode](https://pypi.org/project/dclimplode/)
- [zstandard](https://pypi.org/project/zstandard/) or [pyzstd](https://pypi.org/project/pyzstd/) (Py2 unavailable)
- [isal](https://pypi.org/project/isal/)
- Python2 need `python -m pip install git+https://github.com/cielavenir/[email protected]`
- (now this branch support macOS as well)
- Also see https://github.com/cielavenir/python-isal-py2/releases/tag/v0.11.1-py2
- [slz](https://pypi.org/project/slz/)
- [codecs7z](https://pypi.org/project/codecs7z/)
- [pyppmd](https://pypi.org/project/pyppmd/)
- Python2 need `python -m pip install git+https://github.com/cielavenir/pyppmd-py2@py2`
- Also see https://github.com/cielavenir/pyppmd-py2/releases/tag/v0.17.0.1
- [zipfile_deflate64](https://pypi.org/project/zipfile_deflate64/)
- Need 0.2.0 or later.
- Python2 need `python -m pip install git+https://github.com/cielavenir/zipfile-deflate64@py2`
- Also see https://github.com/cielavenir/zipfile-deflate64/releases/tag/v0.2.0.4
- or [inflate64](https://pypi.org/project/inflate64/) (Py2 unavailable)
- Test requisites:
- All optional requisites
- [backports.tempfile](https://pypi.org/project/backports.tempfile/) (Python2 only)
- [funcsigs](https://pypi.org/project/funcsigs/) (Python2 only)
### Wheels
Some dependencies need complex build procedures. For your sake those wheels are published in actions CI.
### Legal
- I'm not sure about the license term when pyppmd / codecs7z / inflate64 is loaded (I'm not lawyer though).
- For pyppmd, note that PPMd code itself is public domain. See https://github.com/miurahr/pyppmd/issues/5#issuecomment-892280467 for detail.
| zipfile39 | /zipfile39-0.0.8.0.tar.gz/zipfile39-0.0.8.0/README.md | README.md |
## 0.0.8.0
- Fixed zipfile.Path.open encoding
## 0.0.7.0
- Introduced \_\_version\_\_
## 0.0.6.0
- Support inflate64/pyzstd modules (they are fallbacks, the feature does not change)
## 0.0.5.3
- Fixed pypi desc display
## 0.0.5.2
- Fixed Python 3.4/3.5 compatibility
## 0.0.5.1
- Added bzip2 compress level 11-19
## 0.0.5.0
- Added deflate64 compression support
## 0.0.4.2
- Fixed pyppmd 0.17.0 compatibility
## 0.0.4.1
- Fixed dclimplode version header
## 0.0.4
- Added dclimplode support
## 0.0.3.1
- Fixed extracting to file was not working on Python2
## 0.0.3
- add slz support
- changed compresslevel scheme
## 0.0.2.1
- fixed a typo in ppmd handler
## 0.0.2
- simplified xz handler
## 0.0.1.3
- finalized ppmd handler
## 0.0.1.2
- related to ppmd
## 0.0.1.1
- integrated isal
## 0.0.1
- add deflate64 decompression support
- add ppmd handler
- add compression level support for LZMA
## 0.0.0.5
- add threads= to zstandard
## 0.0.0.4
- fixed open('r') on Python2
## 0.0.0.3
- fixed Python3 compatibility
## 0.0.0.2
- fixed Python2
## 0.0.0.1
- initial version
- add xz handler
| zipfile39 | /zipfile39-0.0.8.0.tar.gz/zipfile39-0.0.8.0/CHANGELOG.md | CHANGELOG.md |
# git zipfix
git-zipfix is a tool to make it easier and faster to perform modifications on
historical commits within large repositories.
The command `git zipfix $1` is, in effect, a more efficient version of the
following common snippet:
```bash
$ TARGET=$(git rev-parse --validate $1)
$ git commit --fixup=$TARGET
$ EDITOR=true git rebase -i --autosquash $TARGET^
```
> **NOTE** This hasn't been tested nor reviewed much yet. Use my personal
> scripts at your own risk :-).
## Usage
Stage changes, and call git-zipfix to apply them to a commit
```bash
$ git add ...
$ git zipfix HEAD^
```
With the `-e` and `-m` flags, `git zipfix` quickly edits commit messages.
```bash
$ git zipfix -e HEAD^ # Opens an editor for message
$ git zipfix -m "New message" HEAD^ # Takes message from cmdline
```
### Conflicts
When conflicts occur, `git zipfix` will attempt to resolve them
automatically. If it fails, it will either prompt the user for a resolution,
or start the `kdiff3` tool to resolve the conflict. Other difftools are not
currently handled.
### Working Directory Changes
`git zipfix` makes no effort to update the index or working directory after
applying changes, however it will emit a warning if the final state of the
repository after the rebase does not match the initial state.
Differences in state should be easy to spot, as the index and working
directory will still reflect the initial state.
## Performance
With large repositories such as mozilla-central, git-zipfix is often
significantly faster incremental targeted changes, due to not needing to
update either the index or working directory during rebases.
I did a simple test, applying a single-line change to a commit 11 patches up
the stack. The following are my extremely non-scientific time measurements:
| Command | Real Time |
| ---------------------------- | --------- |
| `git rebase -i --autosquash` | 16.931s |
| `git zipfix` | 0.541s |
The following are the commands I ran:
```bash
# Apply changes with git rebase -i --autosquash
$ git reset 6fceb7da316d && git add .
$ time bash -c 'TARGET=14f1c85bf60d; git commit --fixup=$TARGET; EDITOR=true git rebase -i --autosquash $TARGET~'
[mybranch 286c7cff7330] fixup! Bug ...
1 file changed, 1 insertion(+)
Successfully rebased and updated refs/heads/mybranch.
real 0m16.931s
user 0m15.289s
sys 0m3.579s
# Apply changes with git zipfix
$ git reset 6fceb7da316d && git add .
$ time git zipfix 14f1c85bf60d
Applying staged changes to '14f1c85bf60d'
Reparenting commit 1/10: 23a741dff61496ba929d979942e0ab590db1fece
Reparenting commit 2/10: 8376b15993e506883a54c5d1b75becd083224eb7
<snip>
Reparenting commit 10/10: 6fceb7da316dbf4fedb5360ed09bd7b03f28bc6a
Updating HEAD (6fceb7da316dbf4fedb5360ed09bd7b03f28bc6a => 996ec1a718bad36edab0e7c1129d698d29cdcdfc)
real 0m0.541s
user 0m0.354s
sys 0m0.150s
```
### How is it faster?
1. To avoid spawning unnecessary subprocesses and hitting disk too
frequently, `git zipfix` uses an in-memory cache of objects in the ODB
which it has already seen.
2. Intermediate git trees, blobs, and commits created during processing are
helds exclusively in-memory, and only persisted when necessary.
3. A custom implementation of the merge algorithm is used which directly
merges trees rather than using the index. This ends up being faster on
large repositories, as only the subset of modified files and directories
need to be examined when merging.
Currently this algorithm is incapable of handling copy and rename
operations correctly, instead treating them as file creation and deletion
actions. This may be resolveable in the future.
| zipfix | /zipfix-0.1.tar.gz/zipfix-0.1/README.md | README.md |
[](https://app.circleci.com/pipelines/github/sandes/zipfly)

[](https://pepy.tech/project/zipfly)
# ZipFly
ZipFly is a zip archive generator based on zipfile.py.
It was created to generate very large ZIP archives for immediate sending out to clients, or for writing large ZIP archives without memory inflation.
# Requirements
Python 3.6+ Added <a href="https://docs.python.org/3/library/zipfile.html#zipfile-objects" target="blank">support</a> for writing to unseekable streams.
# Install
pip3 install zipfly
# Basic usage, compress on-the-fly during writes
Using this library will save you from having to write the Zip to disk. Some data will be buffered by the zipfile deflater, but memory inflation is going to be very constrained. Data will be written to destination by default at regular 32KB intervals.
`ZipFly` defaults attributes:<br>
- <b>paths:</b> [ ] <br/>
- <b>mode:</b> (write) w <br/>
- <b>chunksize:</b> (bytes) 32768 <br/>
- <b>compression:</b> Stored <br/>
- <b>allowZip64:</b> True <br/>
- <b>compresslevel:</b> None <br/>
- <b>storesize:</b> (bytes) 0 <br/>
- <b>encode:</b> utf-8 <br/>
<br/>
`paths` <b>list of dictionaries:</b>
| |.
|---------------- |-------------------------------
|**fs** |Should be the path to a file on the filesystem
|**n** *(Optional)* |Is the name which it will have within the archive <br> (by default, this will be the same as **fs**)
<br>
```python
import zipfly
paths = [
{
'fs': '/path/to/large/file'
},
]
zfly = zipfly.ZipFly(paths = paths)
generator = zfly.generator()
print (generator)
# <generator object ZipFly.generator at 0x7f74d52bcc50>
with open("large.zip", "wb") as f:
for i in generator:
f.write(i)
```
# Examples
> <b>Streaming multiple files in a zip with <a href="https://github.com/sandes/zipfly/blob/master/examples/streaming_django.py" target="_blank">Django</a> or <a href="https://github.com/sandes/zipfly/blob/master/examples/streaming_flask.py" target="_blank">Flask</a></b>
Send forth large files to clients with the most popular frameworks
> <b>Create paths</b>
Easy way to create the array `paths` from a parent folder.
> <b>Predict the size of the zip file before creating it</b>
Use the `BufferPredictionSize` to compute the correct size of the resulting archive before creating it.
> <b>Streaming a large file</b>
Efficient way to read a single very large binary file in python
> <b>Set a comment</b>
Your own comment in the zip file
| zipfly | /zipfly-6.0.5.tar.gz/zipfly-6.0.5/README.md | README.md |
===============================
zipfreeinfo
===============================
.. image:: https://img.shields.io/pypi/v/zipfreeinfo.svg
:target: https://pypi.python.org/pypi/zipfreeinfo
.. image:: https://img.shields.io/travis/marcelluseasley/zipfreeinfo.svg
:target: https://travis-ci.org/marcelluseasley/zipfreeinfo
.. image:: https://readthedocs.org/projects/zipfreeinfo/badge/?version=latest
:target: https://zipfreeinfo.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://pyup.io/repos/github/marcelluseasley/zipfreeinfo/shield.svg
:target: https://pyup.io/repos/github/marcelluseasley/zipfreeinfo/
:alt: Updates
Provided a zip code, you are given information about that location.
* Free software: MIT license
* Documentation: https://zipfreeinfo.readthedocs.io.
Features
--------
* Provides city, state, latitude/longitude data for a given zip code
Credits
---------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| zipfreeinfo | /zipfreeinfo-1.0.0.tar.gz/zipfreeinfo-1.0.0/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/marcelluseasley/zipfreeinfo/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
and "help wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
zipfreeinfo could always use more documentation, whether as part of the
official zipfreeinfo docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/marcelluseasley/zipfreeinfo/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zipfreeinfo` for local development.
1. Fork the `zipfreeinfo` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zipfreeinfo.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zipfreeinfo
$ cd zipfreeinfo/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 zipfreeinfo tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check
https://travis-ci.org/marcelluseasley/zipfreeinfo/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ py.test tests.test_zipfreeinfo
| zipfreeinfo | /zipfreeinfo-1.0.0.tar.gz/zipfreeinfo-1.0.0/CONTRIBUTING.rst | CONTRIBUTING.rst |
=====
Usage
=====
To use zipfreeinfo in a project::
from zipfreeinfo import zipfreeinfo
zipfreeinfo.set_auth_credentials('your auth id','your auth token')
# Returns a dictionary with nested data structures containing zip code data
# Also sets internal zipdata structure with zip code data, which can be returned with function calls
print(zipfreeinfo.get_zip_info("30141"))
print(zipfreeinfo.get_zipcode()) # returns string
print(zipfreeinfo.get_city()) # returns string
print(zipfreeinfo.get_state()) # returns string
print(zipfreeinfo.get_state_abbreviation()) # returns string
print(zipfreeinfo.get_county_fips()) # returns string
print(zipfreeinfo.get_county_name()) # returns string
print(zipfreeinfo.get_latitude()) # returns float
print(zipfreeinfo.get_longitude()) # returns float
| zipfreeinfo | /zipfreeinfo-1.0.0.tar.gz/zipfreeinfo-1.0.0/docs/usage.rst | usage.rst |
.. zipfreeinfo documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to zipfreeinfo's documentation!
======================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
authorshistory
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zipfreeinfo | /zipfreeinfo-1.0.0.tar.gz/zipfreeinfo-1.0.0/docs/index.rst | index.rst |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install zipfreeinfo, run this command in your terminal:
.. code-block:: console
$ pip install zipfreeinfo
This is the preferred method to install zipfreeinfo, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for zipfreeinfo can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/marcelluseasley/zipfreeinfo
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/marcelluseasley/zipfreeinfo/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/marcelluseasley/zipfreeinfo
.. _tarball: https://github.com/marcelluseasley/zipfreeinfo/tarball/master
| zipfreeinfo | /zipfreeinfo-1.0.0.tar.gz/zipfreeinfo-1.0.0/docs/installation.rst | installation.rst |
# zipf
CLI tool to directly zip several files/folders or an existing folder
Make a zip from list of files/folders with or without specifying the archive name ("%Y_%m_%d-%H_%M_%S.zip" if not specified) or from an existing folder.
# Installation
```sh
With pip:
sudo pip3 install zipfs
With yay:
yay -a zipf
With yaourt:
yaourt -a zipf
```
# Compatibility
python >= 3
# Usage
<pre>
<b>zipf / zipper</b> [<b>F_PATH_01 F_PATH_02 ...</b>] [<b>ARCHIVE_NAME</b>]
<b>options:</b>
<!-- --> <b>-h, --help</b> show this help message and exit
</pre>
# Examples
For **help**:<br/>
```sh
zipf -h
or
rt --help
```
<br/>Make a zip archive from 3 files:<br/>
```sh
zipf titi/toto.jpg titi/tutu.jpg tata.txt
```
Giving the **2019_05_15-17_25_44.zip** archive<br/><br/>
Make a zip archive from 2 files and one folder with specifying the archive name (work):<br/>
```sh
zipf titi/toto.jpg titi/ tata.txt work
```
Giving the **work.zip** archive
| zipfs | /zipfs-1.0.5.tar.gz/zipfs-1.0.5/README.md | README.md |
import sys
import os
import subprocess
import shutil
from subprocess import call
from datetime import datetime
CBRED = '\033[38;5;196;1m'
CBORANGE = '\033[38;5;202;1m'
CBGREEN = '\033[38;5;40;1m'
CBWHITE = '\033[1;37m'
CBBLUE = '\033[1;34m'
CBASE = '\033[0m'
def _help_requested(args):
if len(args) == 1 and (args[0] == "-h" or args[0] == "--help"):
readme_path = "/usr/lib/zipf/README.md"
f = open(readme_path, 'r')
print(CBBLUE + "\n\t####### zipf documentation #######\n" + CBWHITE)
for line in f:
if line == "```sh\n" or line == "```\n" or line == "<pre>\n" or line == "</pre>\n":
continue
line = line.replace('```sh', '').replace('```', '').replace('<pre>', '').replace('</b>', ''). \
replace('<b>', '').replace('<!-- -->', '').replace('<br/>', '').replace('```sh', ''). \
replace('***', '').replace('***', '').replace('**', '').replace('*', '')
print(" " + line, end='')
print(CBASE)
exit()
def _ok(msg=""):
print(CBGREEN + "\n\t[OK] " + CBASE + msg)
# def _info(msg=""):
# print(CBWHITE + "\n\t[INFO] " + CBASE + msg)
def _warning(msg=""):
print(CBORANGE + "\n\t[WARNING] " + CBASE + msg)
def _error(msg=""):
print(CBRED + "\n\t[ERROR] " + CBASE + msg)
def _skipped():
print(CBBLUE + "\n\t\t\tskipped\n\n" + CBASE)
def _path_exists(path):
if not os.path.exists(path):
return False
return True
def _get_abs_path(f_path):
return os.path.normpath((os.path.join(os.getcwd(), os.path.expanduser(f_path))))
def _error_man(init_msg, err_msg, folder_path, moved_folder_path):
_error(init_msg + " error:\n\t\t" + str(err_msg))
sudo_conf = input(CBWHITE + "\n\t\tuse sudo?\n\t\t\t[Enter] to proceed\t\t[any case] to skip\n")
if sudo_conf == "":
subprocess.check_call(['sudo', "mv", folder_path, moved_folder_path])
else:
_skipped()
def _check_f_moved(folder_path, moved_folder_path):
if os.path.exists(folder_path) or not os.path.exists(moved_folder_path):
if os.path.exists(folder_path):
_warning(CBBLUE + "%s" % folder_path + CBASE + " still exists")
if not os.path.exists(moved_folder_path):
_warning(CBBLUE + "%s" % moved_folder_path + CBASE + " doesn't exist")
return False
return True
def _check_archive_created(archive_name):
archive_path = os.getcwd() + "/" + archive_name + ".zip"
if os.path.isfile(archive_path):
_ok(CBBLUE + "%s" % archive_path + CBASE + " created")
return True
else:
_error(CBBLUE + "%s" % archive_path + CBASE + " not created")
return False
def _zip_folder(folder_path, archive_name=None):
if not archive_name:
cdatetime = datetime.now()
archive_name = cdatetime.strftime("%Y_%m_%d-%H_%M_%S")
shutil.make_archive(archive_name, 'zip', folder_path)
_check_archive_created(archive_name)
# shutil.rmtree(folder_path)
def _zip_files(flist, archive_name=None):
cdatetime = datetime.now()
ctime = cdatetime.strftime("%Y_%m_%d-%H_%M_%S")
if not archive_name:
archive_name = ctime
cpath = os.getcwd()
folder_path = cpath + "/" + archive_name
if _path_exists(folder_path):
_warning(CBBLUE + "%s" % folder_path + CBASE + " already exists" + CBASE)
moved_folder_path = folder_path + "_" + ctime
try:
shutil.move(folder_path, moved_folder_path)
except PermissionError as err_msg:
_error_man("permission", err_msg, folder_path, moved_folder_path)
except OSError as err_msg:
_error_man("os", err_msg, folder_path, moved_folder_path)
except Exception as err_msg:
_error_man("", err_msg, folder_path, moved_folder_path)
if _check_f_moved(folder_path, moved_folder_path):
_ok(CBBLUE + "%s" % folder_path + CBASE + " moved" + CBASE)
else:
_error("an issue occurred when moving file " + CBBLUE + "%s" % folder_path + CBASE)
raise ValueError("not able to rename " + CBBLUE + "%s" % folder_path + CBASE)
try:
os.mkdir(folder_path)
except Exception as err_msg:
_error("an issue occurred when creating folder " + CBBLUE + "%s" % folder_path + CBASE + "\n%s" % err_msg)
raise ValueError("not able to create " + CBBLUE + "%s" % folder_path + CBASE + " folder")
for f in flist:
f_path = _get_abs_path(f)
if os.path.isdir(f_path):
call(['cp', '-a', f_path, folder_path])
elif os.path.isfile(f_path):
shutil.copy(f_path, folder_path)
shutil.make_archive(archive_name, 'zip', folder_path)
_check_archive_created(archive_name)
shutil.rmtree(folder_path)
def main():
inputs = sys.argv[1:]
_help_requested(inputs)
if len(inputs) == 0:
_error("needs at least one argument being a folder name or a list of files/folders")
raise ValueError("no input ... no zip ...")
elif len(inputs) == 1:
f_path = _get_abs_path(inputs[0])
print(f_path)
if _path_exists(f_path):
if os.path.isdir(f_path):
_zip_folder(f_path)
else:
_zip_files([f_path])
else:
_error(CBBLUE + "%s" % f_path + CBASE + " path doesn't exist")
raise ValueError(
"needs at least one existing path in argument being a folder name or a list of files/folders")
elif len(inputs) == 2 and not _path_exists(_get_abs_path(inputs[1])) and os.path.isdir(_get_abs_path(inputs[0])):
folder_path = _get_abs_path(inputs[0])
archive_name = inputs[1]
_zip_folder(folder_path, archive_name)
else:
last_arg = inputs[-1]
if _path_exists(_get_abs_path(last_arg)):
_zip_files(inputs)
else:
_zip_files(inputs[:-1], last_arg)
if __name__ == "__main__":
main() | zipfs | /zipfs-1.0.5.tar.gz/zipfs-1.0.5/zipf/zipf.py | zipf.py |
===========
mini-ziphmm
===========
.. |ci-status| image:: https://img.shields.io/travis/mailund/mini-ziphmm.svg
:target: https://travis-ci.org/mailund/mini-ziphmm
:alt: Build status
.. |coveralls| image:: https://img.shields.io/coveralls/mailund/mini-ziphmm.svg
:target: https://coveralls.io/github/mailund/mini-ziphmm
:alt: Coverage
.. |versions| image:: https://img.shields.io/pypi/v/ziphmm.svg
:target: https://pypi.python.org/pypi/ziphmm
:alt: Packgage version
.. |status| image:: https://img.shields.io/pypi/status/ziphmm.svg
:target: https://pypi.python.org/pypi/ziphmm
:alt: Package stability
This is a minimal reimplementation of zipHMM in python.
Also contains cython and weave implementations of standard hidden markov models
for comparison.
Undocumented and no install procedure - check test.py for usage
To run:
```bash
make
python test.py
```
| ziphmm | /ziphmm-0.1.1b2.tar.gz/ziphmm-0.1.1b2/README.rst | README.rst |
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any, Self, TypeAlias
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError
from starlette import status
from starlette.requests import Request
from starlette.responses import JSONResponse
from ziphy.fastapi_utils import jsonify
# :)
def cls_name_to_snake(obj: type[Any] | Any) -> str:
name = obj.__name__ if isinstance(obj, type) else type(obj).__name__
parts, upper = [], False
for i in range(len(name)):
if name[i].isupper():
if not upper and i > 0:
parts.append("_")
parts.append(name[i].lower())
upper = True
else:
if upper and len(parts) > 2 and parts[-2] != "_":
parts.append("_" + parts.pop())
parts.append(name[i])
upper = False
return "".join(parts)
@dataclass
class ExceptionHandler:
status_code: int
error_name: str | None = None
detail: Any = None
def __call__(self, _request: Request, exception: Exception) -> JSONResponse:
error_name = self.error_name or cls_name_to_snake(exception)
content = {"error": error_name}
if self.detail is not False:
content["detail"] = jsonify(self.detail) if self.detail else str(exception)
return JSONResponse(content=content, status_code=self.status_code)
class HTTPError(Exception):
status_code: int = 400
error_name: str | None = None
detail: Any | None = None
def __init__(
self,
status_code: int | None = None,
error_name: str | None = None,
detail: Any = None,
) -> None:
self.status_code = status_code or self.status_code
self.error_name = error_name or self.error_name
self.detail = detail or self.detail
@classmethod
def fastapi_handler(cls, request: Request, exception: Self) -> JSONResponse:
handler = ExceptionHandler(
exception.status_code, exception.error_name, exception.detail
)
return handler(request, exception)
def pydantic_validation_exception_handler(
_request: Request, exception: RequestValidationError
) -> JSONResponse:
errors = jsonable_encoder(exception.errors())
return JSONResponse(
content={"error": "validation_error", "detail": errors},
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
)
ExceptionHandlersAlias: TypeAlias = dict[
int | type[Exception], Callable[[Request, Any], Any]
] | ziphy-fastapi-utils | /ziphy_fastapi_utils-2.0.4.tar.gz/ziphy_fastapi_utils-2.0.4/ziphy/fastapi_utils/exceptions.py | exceptions.py |
zipimportx: faster zipfile imports for frozen python apps
==========================================================
This package aims to speed up imports from zipfiles for frozen python apps (and
other scenarios where the zipfile is assumed not to change) by taking several
shortcuts that aren't available to the standard zipimport module.
It exports a single useful name, "zipimporter", which is a drop-in replacement
for the standard zipimporter class. To replace the builtin zipimport mechanism
with zipimportx, do the following::
import zipimportx
zipimportx.zipimporter.install()
With no additional work you may already find a small speedup when importing
from a zipfile. Since zipimportx assumes that the zipfile will not change or
go missing, it does fewer stat() calls and integrity checks than the standard
zipimport implementation.
To further speed up the loading of a zipfile, you can pre-compute the zipimport
"directory information" dictionary and store it in a separate index file. This
will reduce the time spent parsing information out of the zipfile. Create an
index file like this::
from zipimportx import zipimporter
zipimporter("mylib.zip").write_index()
This will create the file "mylib.zip.idx" containing the pre-parsed zipfile
directory information. Specifically, it will contain a marshalled dictionary
object with the same structure as those in zipimport._zip_directory_cache.
In my tests, use of these indexes speeds up the initial loading of a zipfile by
about a factor of 3 on Linux, and a factor of 5 on Windows.
To further speed up the loading of a collection of modules, you can "preload"
the actual module data by including it directly in the index. This allows the
data for several modules to be loaded in a single sequential read rather than
requiring a separate read for each module. Preload module data like this::
from zipimportx import zipimporter
zipimporter("mylib.zip").write_index(preload=["mymod*","mypkg*"])
Each entry in the "preload" list is a filename pattern. Files from the zipfile
that match any of these patterns will be preloaded when the zipfile is first
accessed for import. You may want to remove them from the actual zipfile in
order to save space.
Finally, it's possible to convert a zipfile into inline python code and include
that code directly in your frozen application. This can simulate the effect
of having that zipfile on sys.path, while avoiding any fie IO during the import
process. To get the necessary sourcecode, do the following::
from zipimportx import zipimporter
code = zipimporter("mylib.zip").get_inline_code()
Finally, it's worth re-iterating the big assumption made by this module: the
zipfile must never change or go missing. If the data in the index does not
reflect the actual contents of the zipfile, imports will break in unspecified
and probably disasterous ways.
Note also that this package uses nothing but builtin modules. To bootstrap
zipfile imports for a frozen application, you can inline this module's code
directly into your application's startup script. Simply do something like
this in your build process::
import zipimportx
import inspect
SCRIPT = '''
%s
zipimporter.install()
import myapp
myapp.main()
''' % (inspect.getsource(zipimportx),)
freeze_this_script_somehow(SCRIPT)
zipimportx.zipimporter("path/to/frozen/library.zip").write_index()
| zipimportx | /zipimportx-0.3.2.tar.gz/zipimportx-0.3.2/README.txt | README.txt |
======
zipind
======
.. image:: https://img.shields.io/pypi/v/zipind.svg
:target: https://pypi.python.org/pypi/zipind
.. image:: https://readthedocs.org/projects/zipind/badge/?version=latest
:target: https://zipind.readthedocs.io/en/latest/?version=latest
:alt: Documentation Status
zipind - From a folder, make a splitted ZIP with INDependent parts
* Free software: MIT license
* Documentation: https://zipind.readthedocs.io.
Features
--------
- Compact folder to .zip or .rar, dividing it into independent parts, grouping your files in alphanumeric order.
- Preserve the ordering of folders and files.
- Preserve the internal structure of folders.
- If any file exceeds the defined maximum size, the specific file is splitted in dependent mode.
- Set the file types to be ignored in compression (config/ignore_extensions.txt)
- Verify that the file path length is less than the specified limit (default 250 characters).
- Sanitize the folder and file names characters to ensure compatibility with UTF-8 encoding, by auto-renaming.
Requirements
------------
- To compress to Zip format, It is necessary to have 7Zip_ app installed and added in system variables
- To compress to Rar format, It is necessary to have Winrar_ app installed and added in system variables
Usage
-----
Let's zip a folder, with a maximum of 100MB per file, in zip mode and ignoring 'ISO' extension files.
**Through python script importation**
.. code-block:: python
import zipind
path_folder = r'c://my_project'
zipind.run(path_folder, mode='zip', mb_perfile=100, mode='zip', ignore_extensions=['iso'])
**Through terminal in chatbot-like style**
.. code-block:: text
$ zipind
Zipind will start by responding:
.. code-block:: text
Zipind - From a folder, make a splitted ZIP with INDependent parts
>> github.com/apenasrr/zipind <<
Paste the folder path to be compressed:
Now paste the folder path to be compressed:
.. code-block:: text
Paste the folder path to be compressed: c://my_project
Answer the questions to customize the parameters and your project will be processed.
**CLI Mode**
Soon...
We recommend
------------
`mises.org`_ - Educate yourself about economic and political freedom
`lbry.tv`_ - Store files and videos on blockchain ensuring free speech
`A Cypherpunk's Manifesto`_ - How encryption is essential to Free Speech and Privacy
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
.. _`7Zip`: https://www.7-zip.org/download.html
.. _`Winrar`: https://www.win-rar.com/download.html
.. _`mises.org`: https://mises.org/
.. _`lbry.tv`: http://lbry.tv/
.. _`A Cypherpunk's Manifesto`: https://www.activism.net/cypherpunk/manifesto.html | zipind | /zipind-1.1.3.tar.gz/zipind-1.1.3/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/apenasrr/zipind/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
zipind could always use more documentation, whether as part of the
official zipind docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/apenasrr/zipind/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zipind` for local development.
1. Fork the `zipind` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zipind.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zipind
$ cd zipind/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 zipind tests
$ python setup.py test or pytest
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.5, 3.6, 3.7 and 3.8, and for PyPy.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_zipind
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bump2version patch # possible: major / minor / patch
$ git push
$ git push --tags
Travis will then deploy to PyPI if tests pass.
| zipind | /zipind-1.1.3.tar.gz/zipind-1.1.3/CONTRIBUTING.rst | CONTRIBUTING.rst |
zipind package
==============
Submodules
----------
zipind.cli module
-----------------
.. automodule:: zipind.cli
:members:
:undoc-members:
:show-inheritance:
zipind.zipind module
--------------------
.. automodule:: zipind.zipind
:members:
:undoc-members:
:show-inheritance:
zipind.zipind\_cli module
-------------------------
.. automodule:: zipind.zipind_cli
:members:
:undoc-members:
:show-inheritance:
zipind.zipind\_core module
--------------------------
.. automodule:: zipind.zipind_core
:members:
:undoc-members:
:show-inheritance:
zipind.zipind\_utils module
---------------------------
.. automodule:: zipind.zipind_utils
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: zipind
:members:
:undoc-members:
:show-inheritance:
| zipind | /zipind-1.1.3.tar.gz/zipind-1.1.3/docs/zipind.rst | zipind.rst |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install zipind, run this command in your terminal:
.. code-block:: console
$ pip install zipind
This is the preferred method to install zipind, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for zipind can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/apenasrr/zipind
Or download the `tarball`_:
.. code-block:: console
$ curl -OJL https://github.com/apenasrr/zipind/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/apenasrr/zipind
.. _tarball: https://github.com/apenasrr/zipind/tarball/master
| zipind | /zipind-1.1.3.tar.gz/zipind-1.1.3/docs/installation.rst | installation.rst |
# zipjson - a simple tool to create and read compressed JSON files
## Installation
```bash
pip install git+https://github.com/vguzov/zipjson.git
```
## Usage:
The following code creates a .zip archive with `data.json` file inside it, containing the serialized data, then reads it
```python
import zipjson
any_jsonable_data = {"something":42}
file_object = open("test.json.zip", "wb") # Mind the additional 'b' flag
zipjson.dump(any_jsonable_data, file_object)
loaded_data = zipjson.load(open("test.json.zip", "rb"))
print(loaded_data) # {'something': 42}
```
In-memory methods `dumps` and `loads` are supported as well
| zipjson | /zipjson-0.0.1.tar.gz/zipjson-0.0.1/README.md | README.md |
from functools import wraps
import inspect
from zipkin_agent import Layer, Component
from opencensus.trace.execution_context import get_opencensus_tracer
def trace(
op: str = None,
layer: Layer = Layer.Unknown,
component: Component = Component.Unknown,
tags: dict = None,
):
def decorator(func):
_op = op or func.__name__
if inspect.iscoroutinefunction(func):
@wraps(func)
async def wrapper(*args, **kwargs):
_tracer = get_opencensus_tracer()
span = _tracer.span(name=_op)
span.add_attribute('layer', layer.name)
span.add_attribute('component', component.name)
if tags:
for tag_k, tag_v in tags.items():
span.add_attribute(tag_k, tag_v)
with span:
return await func(*args, **kwargs)
return wrapper
else:
@wraps(func)
def wrapper(*args, **kwargs):
_tracer = get_opencensus_tracer()
span = _tracer.span(name=_op)
span.add_attribute('layer', layer.name)
span.add_attribute('component', component.name)
if tags:
for tag_k, tag_v in tags.items():
span.add_attribute(tag_k, tag_v)
with span:
return func(*args, **kwargs)
return wrapper
return decorator
def runnable(
op: str = None,
layer: Layer = Layer.Unknown,
component: Component = Component.Unknown,
tags: dict = None,
):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
_op = op or "Thread/"+func.__name__
_tracer = get_opencensus_tracer()
with _tracer.new_local_span(name=_op) as span:
span.add_attribute('layer', layer.name)
span.add_attribute('component', component.name)
if tags:
for tag_k, tag_v in tags.items():
span.add_attribute(tag_k, tag_v)
func(*args, **kwargs)
return wrapper
return decorator | zipkin-agent | /zipkin_agent-0.1.1-py3-none-any.whl/zipkin_agent/decorators.py | decorators.py |
import os
import re
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import List
QUEUE_TIMEOUT = 1 # type: int
RE_IGNORE_PATH = re.compile('^$') # type: re.Pattern
options = None # here to include 'options' in globals
options = globals().copy() # THIS MUST PRECEDE DIRECTLY BEFORE LIST OF CONFIG OPTIONS!
service_name = os.getenv('AGENT_NAME') or 'Python Service Name' # type: str
zipkin_host = os.getenv('ZIPKIN_HOST') or 'localhost'
zipkin_port = os.getenv('ZIPKIN_PORT') or 9411
force_tls = os.getenv('AGENT_FORCE_TLS', '').lower() == 'true' # type: bool
protocol = (os.getenv('AGENT_PROTOCOL') or 'grpc').lower() # type: str
authentication = os.getenv('AGENT_AUTHENTICATION') # type: str
logging_level = os.getenv('AGENT_LOGGING_LEVEL') or 'INFO' # type: str
disable_plugins = (os.getenv('AGENT_DISABLE_PLUGINS') or '').split(',') # type: List[str]
max_buffer_size = int(os.getenv('AGENT_MAX_BUFFER_SIZE', '1000')) # type: int
ignore_suffix = os.getenv('IGNORE_SUFFIX') or '.jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,' \
'.mp4,.html,.svg ' # type: str
flask_collect_http_params = True if os.getenv('FLASK_COLLECT_HTTP_PARAMS') and \
os.getenv('FLASK_COLLECT_HTTP_PARAMS') == 'True' else False # type: bool
http_params_length_threshold = int(os.getenv('HTTP_PARAMS_LENGTH_THRESHOLD') or '1024') # type: int
trace_ignore_path = os.getenv('TRACE_IGNORE_PATH') or '' # type: str
profile_active = True if os.getenv('AGENT_PROFILE_ACTIVE') and \
os.getenv('AGENT_PROFILE_ACTIVE') == 'True' else False # type: bool
profile_task_query_interval = int(os.getenv('PROFILE_TASK_QUERY_INTERVAL') or '20')
options = {key for key in globals() if key not in options} # THIS MUST FOLLOW DIRECTLY AFTER LIST OF CONFIG OPTIONS!
def init(**kwargs):
glob = globals()
for key, val in kwargs.items():
if key not in options:
raise KeyError('invalid config option %s' % key)
glob[key] = val | zipkin-agent | /zipkin_agent-0.1.1-py3-none-any.whl/zipkin_agent/config.py | config.py |
from zipkin_agent import Layer, Component, config
from zipkin_agent.agent import init_zk_tracer
from zipkin_agent.trace import tags
from opencensus.trace.propagation.trace_context_http_header_format import TraceContextPropagator
from opencensus.trace.status import Status
from opencensus.trace.execution_context import get_opencensus_tracer, get_current_span
def install():
from flask import Flask
_full_dispatch_request = Flask.full_dispatch_request
_handle_user_exception = Flask.handle_user_exception
_handle_exception = Flask.handle_exception
def params_tostring(params):
return "\n".join([k + '=[' + ",".join(params.getlist(k)) + ']' for k, _ in params.items()])
def _zk_full_dispatch_request(this: Flask):
import flask
req = flask.request
if 'traceparent' in req.headers:
init_zk_tracer(span_context=TraceContextPropagator().from_headers(req.headers))
resp = _full_dispatch_request(this)
return resp
else:
init_zk_tracer()
with get_opencensus_tracer().span(name=req.url.split("?")[0]) as span:
span.add_attribute('kind', 'client')
span.add_attribute('layer', Layer.Http.name)
span.add_attribute('component', Component.Flask.name)
span.add_attribute('peer', '%s:%s' % (req.environ["REMOTE_ADDR"], req.environ["REMOTE_PORT"]))
span.add_attribute(tags.HttpMethod, req.method)
span.add_attribute(tags.HttpUrl, req.url.split("?")[0])
span.add_attribute(tags.HttpParams, params_tostring(req.values)[0:config.http_params_length_threshold])
resp = _full_dispatch_request(this)
span.set_status(Status(code=resp.status_code))
span.add_attribute(tags.HttpStatus, resp.status_code)
return resp
def _zk_handle_user_exception(this: Flask, e):
if e is not None:
entry_span = get_current_span()
if entry_span is not None:
entry_span.set_status(Status.from_exception(e))
return _handle_user_exception(this, e)
def _zk_handle_exception(this: Flask, e):
if e is not None:
entry_span = get_current_span()
if entry_span is not None:
entry_span.set_status(Status.from_exception(e))
return _handle_exception(this, e)
Flask.full_dispatch_request = _zk_full_dispatch_request
Flask.handle_user_exception = _zk_handle_user_exception
Flask.handle_exception = _zk_handle_exception | zipkin-agent | /zipkin_agent-0.1.1-py3-none-any.whl/zipkin_agent/plugins/trace_flask.py | trace_flask.py |
import inspect
import logging
from zipkin_agent.loggings import logger
import pkgutil
import re
import traceback
import pkg_resources
from packaging import version
from zipkin_agent import config
import zipkin_agent
def install():
disable_patterns = config.disable_plugins
if isinstance(disable_patterns, str):
disable_patterns = [re.compile(p.strip()) for p in disable_patterns.split(',') if p.strip()]
else:
disable_patterns = [re.compile(p.strip()) for p in disable_patterns if p.strip()]
for importer, modname, ispkg in pkgutil.iter_modules(zipkin_agent.plugins.__path__):
if any(pattern.match(modname) for pattern in disable_patterns):
logger.info('plugin %s is disabled and thus won\'t be installed', modname)
continue
logger.debug('installing plugin %s', modname)
plugin = importer.find_module(modname).load_module(modname)
supported = pkg_version_check(plugin)
if not supported:
logger.debug('check version for plugin %s\'s corresponding package failed, thus '
'won\'t be installed', modname)
continue
if not hasattr(plugin, 'install') or inspect.ismethod(getattr(plugin, 'install')):
logger.warning('no `install` method in plugin %s, thus the plugin won\'t be installed', modname)
continue
# noinspection PyBroadException
try:
plugin.install()
except Exception:
logger.warning('failed to install plugin %s', modname)
traceback.print_exc() if logger.isEnabledFor(logging.DEBUG) else None
_operators = {
'<': lambda cv, ev: cv < ev,
'<=': lambda cv, ev: cv < ev or cv == ev,
'==': lambda cv, ev: cv == ev,
'>=': lambda cv, ev: cv > ev or cv == ev,
'>': lambda cv, ev: cv > ev,
'!=': lambda cv, ev: cv != ev
}
class VersionRuleException(Exception):
def __init__(self, message):
self.message = message
def pkg_version_check(plugin):
supported = True
# no version rules was set, no checks
if not hasattr(plugin, "version_rule"):
return supported
pkg_name = plugin.version_rule.get("name")
rules = plugin.version_rule.get("rules")
try:
current_pkg_version = pkg_resources.get_distribution(pkg_name).version
except pkg_resources.DistributionNotFound:
# when failed to get the version, we consider it as supported.
return supported
current_version = version.parse(current_pkg_version)
# pass one rule in rules (OR)
for rule in rules:
if rule.find(" ") == -1:
if check(rule, current_version):
return supported
else:
# have to pass all rule_uint in this rule (AND)
rule_units = rule.split(" ")
results = [check(unit, current_version) for unit in rule_units]
if False in results:
# check failed, try to check next rule
continue
else:
return supported
supported = False
return supported
def check(rule_unit, current_version):
idx = 2 if rule_unit[1] == '=' else 1
symbol = rule_unit[0:idx]
expect_pkg_version = rule_unit[idx:]
expect_version = version.parse(expect_pkg_version)
f = _operators.get(symbol) or None
if not f:
raise VersionRuleException("version rule {} error. only allow >,>=,==,<=,<,!= symbols".format(rule_unit))
return f(current_version, expect_version) | zipkin-agent | /zipkin_agent-0.1.1-py3-none-any.whl/zipkin_agent/plugins/__init__.py | __init__.py |
from zipkin_agent import Layer, Component, config
from zipkin_agent.agent import init_zk_tracer
from zipkin_agent.trace import tags
from opencensus.trace.propagation.trace_context_http_header_format import TraceContextPropagator
from opencensus.trace.status import Status
from opencensus.trace.execution_context import get_opencensus_tracer, get_current_span
def install():
from flask import Flask
_full_dispatch_request = Flask.full_dispatch_request
_handle_user_exception = Flask.handle_user_exception
_handle_exception = Flask.handle_exception
def params_tostring(params):
return "\n".join([k + '=[' + ",".join(params.getlist(k)) + ']' for k, _ in params.items()])
def _zk_full_dispatch_request(this: Flask):
import flask
req = flask.request
if 'traceparent' in req.headers:
init_zk_tracer(span_context=TraceContextPropagator().from_headers(req.headers))
resp = _full_dispatch_request(this)
return resp
else:
init_zk_tracer()
with get_opencensus_tracer().span(name=req.url.split("?")[0]) as span:
span.add_attribute('kind', 'service')
span.add_attribute('layer', Layer.Http.name)
span.add_attribute('component', Component.Flask.name)
span.add_attribute('peer', '%s:%s' % (req.environ["REMOTE_ADDR"], req.environ["REMOTE_PORT"]))
span.add_attribute(tags.HttpMethod, req.method)
span.add_attribute(tags.HttpUrl, req.url.split("?")[0])
span.add_attribute(tags.HttpParams, params_tostring(req.values)[0:config.http_params_length_threshold])
resp = _full_dispatch_request(this)
span.set_status(Status(code=resp.status_code))
span.add_attribute(tags.HttpStatus, resp.status_code)
return resp
def _zk_handle_user_exception(this: Flask, e):
if e is not None:
entry_span = get_current_span()
if entry_span is not None:
entry_span.set_status(Status.from_exception(e))
return _handle_user_exception(this, e)
def _zk_handle_exception(this: Flask, e):
if e is not None:
entry_span = get_current_span()
if entry_span is not None:
entry_span.set_status(Status.from_exception(e))
return _handle_exception(this, e)
Flask.full_dispatch_request = _zk_full_dispatch_request
Flask.handle_user_exception = _zk_handle_user_exception
Flask.handle_exception = _zk_handle_exception | zipkin-agent | /zipkin_agent-0.1.1-py3-none-any.whl/zipkin_agent/build/lib/plugins/sw_flask.py | sw_flask.py |
import inspect
import logging
from zipkin_agent.loggings import logger
import pkgutil
import re
import traceback
import pkg_resources
from packaging import version
from zipkin_agent import config
import zipkin_agent
def install():
disable_patterns = config.disable_plugins
if isinstance(disable_patterns, str):
disable_patterns = [re.compile(p.strip()) for p in disable_patterns.split(',') if p.strip()]
else:
disable_patterns = [re.compile(p.strip()) for p in disable_patterns if p.strip()]
for importer, modname, ispkg in pkgutil.iter_modules(zipkin_agent.plugins.__path__):
if any(pattern.match(modname) for pattern in disable_patterns):
logger.info('plugin %s is disabled and thus won\'t be installed', modname)
continue
logger.debug('installing plugin %s', modname)
plugin = importer.find_module(modname).load_module(modname)
supported = pkg_version_check(plugin)
if not supported:
logger.debug('check version for plugin %s\'s corresponding package failed, thus '
'won\'t be installed', modname)
continue
if not hasattr(plugin, 'install') or inspect.ismethod(getattr(plugin, 'install')):
logger.warning('no `install` method in plugin %s, thus the plugin won\'t be installed', modname)
continue
# noinspection PyBroadException
try:
plugin.install()
except Exception:
logger.warning('failed to install plugin %s', modname)
traceback.print_exc() if logger.isEnabledFor(logging.DEBUG) else None
_operators = {
'<': lambda cv, ev: cv < ev,
'<=': lambda cv, ev: cv < ev or cv == ev,
'==': lambda cv, ev: cv == ev,
'>=': lambda cv, ev: cv > ev or cv == ev,
'>': lambda cv, ev: cv > ev,
'!=': lambda cv, ev: cv != ev
}
class VersionRuleException(Exception):
def __init__(self, message):
self.message = message
def pkg_version_check(plugin):
supported = True
# no version rules was set, no checks
if not hasattr(plugin, "version_rule"):
return supported
pkg_name = plugin.version_rule.get("name")
rules = plugin.version_rule.get("rules")
try:
current_pkg_version = pkg_resources.get_distribution(pkg_name).version
except pkg_resources.DistributionNotFound:
# when failed to get the version, we consider it as supported.
return supported
current_version = version.parse(current_pkg_version)
# pass one rule in rules (OR)
for rule in rules:
if rule.find(" ") == -1:
if check(rule, current_version):
return supported
else:
# have to pass all rule_uint in this rule (AND)
rule_units = rule.split(" ")
results = [check(unit, current_version) for unit in rule_units]
if False in results:
# check failed, try to check next rule
continue
else:
return supported
supported = False
return supported
def check(rule_unit, current_version):
idx = 2 if rule_unit[1] == '=' else 1
symbol = rule_unit[0:idx]
expect_pkg_version = rule_unit[idx:]
expect_version = version.parse(expect_pkg_version)
f = _operators.get(symbol) or None
if not f:
raise VersionRuleException("version rule {} error. only allow >,>=,==,<=,<,!= symbols".format(rule_unit))
return f(current_version, expect_version) | zipkin-agent | /zipkin_agent-0.1.1-py3-none-any.whl/zipkin_agent/build/lib/plugins/__init__.py | __init__.py |
python-zipkin
=============
|Build Status|
*python-zipkin* is an api for recording and sending
messages to `Zipkin <http://twitter.github.io/zipkin/>`_. Why use it?
From the http://twitter.github.io/zipkin/:
"Collecting traces helps developers gain deeper knowledge about how
certain requests perform in a distributed system. Let's say we're having
problems with user requests timing out. We can look up traced requests
that timed out and display it in the web UI. We'll be able to quickly
find the service responsible for adding the unexpected response time. If
the service has been annotated adequately we can also find out where in
that service the issue is happening."
Supported versions
------------------
**Python**: ``2.6``, ``2.7`` (the current Python Thrift release doesn't
support Python 3)
Recording annotations
~~~~~~~~~~~~~~~~~~~~~
``python-zipkin`` creates a single span per served requests. It
automatically adds a number of annotations (see below). You can also add
your own annotations from anywhere in your code:
.. code:: python
from zipkin.api import api as zipkin_api
zipkin_api.record_event('MySQL: "SELECT * FROM auth_users"', duration=15000) # Note duration is in microseconds, as defined by Zipkin
zipkin_api.record_key_value('Cache misses', 15) # You can use string, int, long and bool values
Hacking
-------
See
`CONTRIBUTING.md <https://github.com/prezi/python-zipkin/blob/master/CONTRIBUTING.md>`_
for guidelines.
You can start hacking on ``python-zipkin`` with:
.. code:: sh
git clone https://github.com/prezi/python-zipkin.git
cd python-zipkin
git remote rename origin upstream
virtualenv virtualenv
. virtualenv/bin/activate
python setup.py test
.. |Build Status| image:: https://travis-ci.org/prezi/python-zipkin.svg?branch=master
:target: https://travis-ci.org/prezi/python-zipkin
| zipkin | /zipkin-0.1.0.tar.gz/zipkin-0.1.0/README.rst | README.rst |
import logging
import os
import click
import zipline.repo.extract_objects as eo
from zipline.repo import JOIN_FOLDER_NAME, \
GROUP_BY_FOLDER_NAME, STAGING_QUERY_FOLDER_NAME
from zipline.repo.validator import ZiplineRepoValidator
from zipline.schema.serializer import thrift_simple_json_protected
from zipline.schema.thrift.ttypes import GroupBy, LeftOuterJoin, StagingQuery
# This is set in the main function -
# from command line or from env variable during invocation
FOLDER_NAME_TO_CLASS = {
GROUP_BY_FOLDER_NAME: GroupBy,
JOIN_FOLDER_NAME: LeftOuterJoin,
STAGING_QUERY_FOLDER_NAME: StagingQuery,
}
def get_folder_name_from_class_name(class_name):
return {v.__name__: k for k, v in FOLDER_NAME_TO_CLASS.items()}[class_name]
@click.command()
@click.option(
'--zipline_root',
envvar='ZIPLINE_ROOT',
help='Path to the root zipline folder',
default=False)
@click.option(
'--input_path',
help='Relative Path to the root zipline folder, which contains the objects to be serialized',
required=True)
@click.option(
'--output_root',
help='Relative Path to the root zipline folder, to where the serialized output should be written',
default="production")
@click.option(
'--debug',
help='debug mode',
is_flag=True)
@click.option(
'--force-overwrite',
help='Force overwriting existing materialized conf.',
is_flag=True)
def extract_and_convert(zipline_root, input_path, output_root, debug, force_overwrite):
"""
CLI tool to convert Python zipline GroupBy's, Joins and Staging queries into their thrift representation.
The materialized objects are what will be submitted to spark jobs - driven by airflow, or by manual user testing.
"""
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
if not zipline_root:
zipline_root = os.getcwd()
_print_highlighted("Using zipline root path", zipline_root)
zipline_root_path = os.path.expanduser(zipline_root)
obj_folder_name = input_path.split('/', 1)[0]
obj_class = FOLDER_NAME_TO_CLASS[obj_folder_name]
full_input_path = os.path.join(zipline_root_path, input_path)
_print_highlighted(f"Input {obj_folder_name} from", full_input_path)
assert os.path.exists(full_input_path), f"Input Path: {full_input_path} doesn't exist"
if os.path.isdir(full_input_path):
results = eo.from_folder(zipline_root_path, full_input_path, obj_class, log_level=log_level)
elif os.path.isfile(full_input_path):
assert full_input_path.endswith(".py"), f"Input Path: {input_path} isn't a python file"
results = eo.from_file(zipline_root_path, full_input_path, obj_class, log_level=log_level)
else:
raise Exception(f"Input Path: {full_input_path}, isn't a file or a folder")
validator = ZiplineRepoValidator(zipline_root_path, output_root, log_level=log_level)
extra_online_group_bys = {}
num_written_objs = 0
full_output_root = os.path.join(zipline_root_path, output_root)
for name, obj in results.items():
if _write_obj(full_output_root, validator, name, obj, log_level, force_overwrite):
num_written_objs += 1
# In case of online join, we need to materialize the underlying online group_bys.
if obj_class is LeftOuterJoin and obj.online:
online_group_bys = {rt.groupBy.name: rt.groupBy for rt in obj.rightParts}
extra_online_group_bys.update(online_group_bys)
if extra_online_group_bys:
num_written_group_bys = 0
# load materialized joins to validate the additional group_bys against.
validator.load_objs()
for name, obj in extra_online_group_bys.items():
if _write_obj(full_output_root, validator, name, obj, log_level, force_overwrite):
num_written_group_bys += 1
print(f"Successfully wrote {num_written_group_bys} online GroupBy objects to {full_output_root}")
print(f"Successfully wrote {num_written_objs} {(obj_class).__name__} objects to {full_output_root}")
def _write_obj(full_output_root: str,
validator: ZiplineRepoValidator,
name: str,
obj: object,
log_level: int,
force_overwrite: bool) -> bool:
"""
Returns True if the object is successfully written.
"""
team_name = name.split(".")[0]
obj_class = type(obj)
class_name = obj_class.__name__
name = name.split('.', 1)[1]
_print_highlighted(f"{class_name} Team", team_name)
_print_highlighted(f"{class_name} Name", name)
obj_folder_name = get_folder_name_from_class_name(class_name)
output_path = os.path.join(full_output_root, obj_folder_name, team_name)
output_file = os.path.join(output_path, name)
skip_reasons = validator.can_skip_materialize(obj)
if skip_reasons:
reasons = ', '.join(skip_reasons)
_print_warning(f"Skipping {class_name} {name}: {reasons}")
if os.path.exists(output_file):
_print_warning(f"old file exists for skipped config: {output_file}")
return False
validation_errors = validator.validate_obj(obj)
if validation_errors:
_print_error(f"Could not write {class_name} {name}",
', '.join(validation_errors))
return False
if force_overwrite:
_print_warning(f"force overwrite {class_name} {name}")
elif not validator.safe_to_overwrite(obj):
_print_warning(f"cannot overwrite {class_name} {name} with existing online conf")
return False
_write_obj_as_json(name, obj, output_file, obj_class)
return True
def _write_obj_as_json(name: str, obj: object, output_file: str, obj_class: type):
class_name = obj_class.__name__
output_folder = os.path.dirname(output_file)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
assert os.path.isdir(output_folder), f"{output_folder} isn't a folder."
assert hasattr(obj, "name"), f"Can't serialize objects without the name attribute for object {name}"
with open(output_file, "w") as f:
_print_highlighted(f"Writing {class_name} to", output_file)
f.write(thrift_simple_json_protected(obj, obj_class))
def _print_highlighted(left, right):
# print in blue and bold
print(f"{left:>25} - \033[34m\033[1m{right}\033[00m")
def _print_error(left, right):
# print in red.
print(f"\033[91m{left:>25} - \033[1m{right}\033[00m")
def _print_warning(string):
# print in yellow.
print(f"\033[93m{string}\033[00m")
if __name__ == '__main__':
extract_and_convert() | zipline-ai | /zipline_ai-0.0.19-py3-none-any.whl/zipline_ai-0.0.19.data/scripts/materialize.py | materialize.py |
import copy
import importlib
import json
import logging
import re
from typing import List, Dict, Union, Optional, Iterable
import zipline.group_by as gb
import zipline.schema.thrift.ttypes as ttypes
import zipline.utils as utils
from zipline.metadata_helper import get_dependencies
from zipline.schema.serializer import thrift_simple_json
logging.basicConfig(level=logging.INFO)
# Regex for matching table name in check_conistency args in LeftOuterJoin.
TABLE_NAME_REGEX = re.compile(r"^\w+\.\w+$")
def _expand_selectors(group_by: ttypes.GroupBy,
selectors: Optional[List[Union[ttypes.AggregationSelector, str]]]):
if selectors is None:
if group_by.aggregations:
for aggregation in group_by.aggregations:
if aggregation.windows:
yield ttypes.AggregationSelector(
name=aggregation.name,
windows=aggregation.windows
)
else:
yield ttypes.AggregationSelector(
name=aggregation.name
)
else:
for column in gb.get_columns(group_by.sources[0]):
yield ttypes.AggregationSelector(name=column)
else:
valid_names: Optional[Iterable[str]] = None
aggregation_map: Dict[str, ttypes.Aggregation]
if group_by.aggregations:
aggregation_map = {
aggregation.name: aggregation
for aggregation in group_by.aggregations
}
valid_names = aggregation_map.keys()
else: # pre-aggregated
valid_names = set([column for column in gb.get_columns(group_by.sources[0])])
for selector in selectors:
if isinstance(selector, ttypes.AggregationSelector):
utils.check_contains(selector.name,
valid_names,
"aggregation",
group_by.name)
if selector.windows:
assert group_by.aggregations, f"""
group_by:{group_by.name} doesn't have windows, and is pre-aggregated.
You requested: {selector}
"""
utils.check_contains(selector.windows,
aggregation_map[selector.name].windows,
"window",
f"{group_by.name}:{selector.name}",
gb.window_to_str_pretty)
yield selector
else:
# selector is a string name
utils.check_contains(selector, valid_names, "aggregation", group_by.name)
yield ttypes.AggregationSelector(
name=selector
)
def JoinPart(group_by: ttypes.GroupBy,
keyMapping: Dict[str, str] = None, # mapping of key columns from the join
selectors: Optional[List[Union[ttypes.AggregationSelector, str]]] = None,
prefix: str = None # all aggregations will be prefixed with that name
) -> ttypes.JoinPart:
# used for reset for next run
import_copy = __builtins__['__import__']
group_by_module_name = utils.get_mod_name_from_gc(group_by, "group_bys")
logging.debug("group_by's module info from garbage collector {}".format(group_by_module_name))
group_by_module = importlib.import_module(group_by_module_name)
__builtins__['__import__'] = utils.import_module_set_name(group_by_module, ttypes.GroupBy)
if keyMapping:
utils.check_contains(keyMapping.values(),
group_by.keyColumns,
"key",
group_by.name)
join_part = ttypes.JoinPart(
groupBy=group_by,
keyMapping=keyMapping,
selectors=list(_expand_selectors(group_by, selectors)),
prefix=prefix
)
# reset before next run
__builtins__['__import__'] = import_copy
return join_part
def LeftOuterJoin(left: ttypes.Source,
rightParts: List[ttypes.JoinPart],
check_consistency: bool = False,
check_consistency_table: str = "",
check_consistency_source: ttypes.EventSource = None,
additional_args: List[str] = None,
additional_env: List[str] = None,
user_var = None,
online: bool = False,
production: bool = False,
frontfill: bool = True) -> ttypes.LeftOuterJoin:
"""
defines the primary keys and timestamps for which Zipline is to compute
features and a list of Aggregations to combine into a single data source
(usually for training data).
Note: users can only set one of consistency_check, consistency_check_table,
and consistency_check_source option.
:param left: The primary keys and timestamps for the driver.
:param rightParts: a list of JoinParts(aggregations) to select.
:param consistency_check: If True, default Zipline online offline consistency check is used.
:param check_conistency_table: If set, table name in the format of
<namespace>.<table> will be used in online offline consistency check.
:param consistency_check_source: If set as EventSource, its table and query
with select keys_json, values_json, timestamp will be used to run
consistency_check_source.
:param additional_args: Additional args passed to JoinJob.
:param additional_env: Additional environment variable passed to Spark submit.
:param user_var: users can assign any variables and later be json serialized to the metadata
:param online: Set to True if the Join is served online.
:param production: Set to True if the Join is used in the production environment.
:param frontfill: Set to True if the JoinJob for daily frontfill should be created.
"""
# create a deep copy for case: multiple LeftOuterJoin use the same left,
# validation will fail after the first iteration
updated_left = copy.deepcopy(left)
if left.events:
if left.events.query.select:
assert "ts" not in left.events.query.select.keys(), "'ts' is a reserved key word for Zipline," \
" please specify the expression in timeColumn"
# mapping ts to query.timeColumn to events only
updated_left.events.query.select.update({"ts": updated_left.events.query.timeColumn})
# name is set externally, cannot be set here.
root_base_source = updated_left.entities if updated_left.entities else updated_left.events
# todo: validation if select is blank
if root_base_source.query.select:
root_keys = set(root_base_source.query.select.keys())
# JoinJob will compute a 128 bit row hash as the row_id
assert "row_id" not in root_keys, "'row_id' is a reserved key word for Zipline"
for joinPart in rightParts:
mapping = joinPart.keyMapping if joinPart.keyMapping else {}
utils.check_contains(mapping.keys(), root_keys, "root key", "")
uncovered_keys = set(joinPart.groupBy.keyColumns) - set(mapping.values()) - root_keys
assert not uncovered_keys, f"""
Not all keys columns needed to join with GroupBy:{joinPart.groupBy.name} are present.
Missing keys are: {uncovered_keys},
Missing keys should be either mapped or selected in root.
KeyMapping only mapped: {mapping.values()}
Root only selected: {root_keys}
"""
dependencies = get_dependencies(updated_left)
right_sources = [joinPart.groupBy.sources for joinPart in rightParts]
# flattening
right_sources = [source for source_list in right_sources for source in source_list]
right_dependencies = [dep for source in right_sources for dep in get_dependencies(source)]
dependencies.extend(right_dependencies)
metadata_map = {
"dependencies": json.dumps(list({frozenset(item.items()): item for item in dependencies}.values()))
}
if additional_args:
metadata_map["additional_args"] = additional_args
if additional_env:
metadata_map["additional_env"] = additional_env
if user_var:
metadata_map["user_json"] = json.dumps(user_var)
check_consistency_options = [
'check_consistency', 'check_consistency_table', 'check_consistency_source']
scope = locals()
set_options = [option for option in check_consistency_options if eval(option, scope)]
assert len(set_options) < 2, "only one of ({}) options must be set: {} are set".format(
", ".format(check_consistency_options), ", ".format(set_options))
if check_consistency is True:
metadata_map["check_consistency"] = "default"
elif check_consistency_table:
assert TABLE_NAME_REGEX.match(check_consistency_table), \
f"check_consistency_table {check_consistency_table} is invalid table name"
metadata_map["check_consistency_table"] = check_consistency_table
if check_consistency_source:
assert isinstance(check_consistency_source, ttypes.EventSource), \
"check_consistency source should be a EventSource object."
error_msg = utils.validate_check_consistency_source(check_consistency_source)
assert not error_msg, f"check_consistency_source is invalid: {error_msg}"
metadata_map["check_consistency_source"] = json.loads(
thrift_simple_json(check_consistency_source))
metadata = json.dumps(metadata_map)
return ttypes.LeftOuterJoin(
left=updated_left,
rightParts=rightParts,
metadata=metadata,
online=online,
production=production,
frontfill=frontfill
) | zipline-ai | /zipline_ai-0.0.19-py3-none-any.whl/zipline/join.py | join.py |
import gc
import importlib
from collections.abc import Iterable
from typing import List
from zipline.group_by import GroupBy
from zipline.schema.thrift.ttypes import Source, EventSource, StagingQuery
# Required fields in check consistency select statement.
REQUIRED_CHECK_CONSITENCY_FIELDS = frozenset(['keys_json', 'values_json', 'timestamp'])
def edit_distance(str1, str2):
m = len(str1) + 1
n = len(str2) + 1
dp = [[0 for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif str1[i - 1] == str2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + min(dp[i][j - 1],
dp[i - 1][j],
dp[i - 1][j - 1])
return dp[m - 1][n - 1]
def check_contains_single(candidate, valid_items, type_name, name, print_function=repr):
name_suffix = f"for {name}" if name else ""
candidate_str = print_function(candidate)
if not valid_items:
assert f"{candidate_str}, is not a valid {type_name} because no {type_name}s are specified {name_suffix}"
elif candidate not in valid_items:
sorted_items = sorted(map(print_function, valid_items),
key=lambda item: edit_distance(candidate_str, item))
printed_items = '\n '.join(sorted_items)
assert candidate in valid_items, f"""{candidate_str}, is not a valid {type_name} {name_suffix}
Please pick one from:
{printed_items}
"""
def check_contains(candidates, *args):
if isinstance(candidates, Iterable) and not isinstance(candidates, str):
for candidate in candidates:
check_contains_single(candidate, *args)
else:
check_contains_single(candidates, *args)
def get_streaming_sources(group_by: GroupBy) -> List[Source]:
"""Checks if the group by has a source with streaming enabled."""
return [source for source in group_by.sources if is_streaming(source)]
def is_streaming(source: Source) -> bool:
"""Checks if the source has streaming enabled."""
return (source.entities and source.entities.mutationTopic is not None) or \
(source.events and source.events.topic is not None)
def import_module_set_name(module, cls):
"""evaluate imported modules to assign object name"""
for name, obj in list(module.__dict__.items()):
if isinstance(obj, cls):
# the name would be `team_name.python_script_name.[group_by_name|join_name|staging_query_name]`
# real world case: psx.reservation_status.v1
obj.name = module.__name__.partition(".")[2] + "." + name
return module
def get_mod_name_from_gc(obj, mod_prefix):
"""get an object's module information from garbage collector"""
mod_name = None
# get obj's module info from garbage collector
gc.collect()
for ref in gc.get_referrers(obj):
if '__name__' in ref and ref['__name__'].startswith(mod_prefix):
mod_name = ref['__name__']
break
return mod_name
def get_staging_query_output_table_name(staging_query: StagingQuery):
"""generate output table name for staging query job"""
staging_query_module = importlib.import_module(get_mod_name_from_gc(staging_query, "staging_queries"))
import_module_set_name(staging_query_module, StagingQuery)
return staging_query.name.replace('.', '_')
def validate_check_consistency_source(check_consistency_source: EventSource) -> str:
"""Validate check_consistency_source object and returns error_msg if there is any errors.
"""
if not isinstance(check_consistency_source, EventSource):
return "check_consistency_source should be a EventSource object."
if not check_consistency_source.table:
return "check_consistency_source should have table."
if not check_consistency_source.query or not check_consistency_source.query.select:
return "check_consistency_source must have query with select."
keys_not_present = REQUIRED_CHECK_CONSITENCY_FIELDS - check_consistency_source.query.select.keys()
if keys_not_present:
keys_not_present_str = ", ".join(keys_not_present)
return f"required fields {keys_not_present} are not present in Select." | zipline-ai | /zipline_ai-0.0.19-py3-none-any.whl/zipline/utils.py | utils.py |
import copy
import json
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import zipline.schema.thrift.ttypes as ttypes
from zipline.metadata_helper import get_dependencies, get_underlying_source
OperationType = int # type(zthrift.Operation.FIRST)
# The GroupBy's default online/production status is None and it will inherit
# online/production status from the Joins it is included.
# If it is included in multiple joins, it is considered online/production
# if any of the joins are online/production. Otherwise it is not online/production
# unless it is explicitly marked as online/production on the GroupBy itself.
DEFAULT_ONLINE = None
DEFAULT_PRODUCTION = None
# Fields allowed to be specified separated in streaming_query.
STREAMING_QUERY_FIELDS_ALLOWED = frozenset([
'select',
'where',
'timeColumn',
])
@dataclass
class DefaultAggregation:
operation: int = ttypes.Operation.LAST
windows: Optional[List[ttypes.Window]] = None
def window_to_str_pretty(window: ttypes.Window):
unit = ttypes.TimeUnit._VALUES_TO_NAMES[window.timeUnit].lower()
return f"{window.length} {unit}"
def op_to_str(operation: OperationType):
return ttypes.Operation._VALUES_TO_NAMES[operation].lower()
def _expand_aggregations(columns: List[str],
defaultAggregation: DefaultAggregation):
"""
used to aggregate all the columns in the query using the same operation.
"""
operation_name = op_to_str(defaultAggregation.operation)
for column in columns:
yield ttypes.Aggregation(
name=f"{column}_{operation_name}",
inputColumn=column,
operation=defaultAggregation.operation,
windows=defaultAggregation.windows,
)
def Select(**kwargs):
"""
Convenience function to convert kwargs into a map.
A map from alias to expression is what the underlying thrift object expects
"""
return kwargs
def Aggregations(**kwargs):
"""
fills in missing arguments of the aggregation object.
default operation is LAST
default name is {arg_name}_{operation_name}
default input column is {arg_name}
"""
aggs = []
for name, aggregation in kwargs.items():
assert isinstance(aggregation, ttypes.Aggregation), \
f"argument for {name}, {aggregation} is not instance of Aggregation"
if not aggregation.name:
aggregation.name = name
if not aggregation.operation: # Default operation is last
aggregation.operation = ttypes.Operation.LAST
if not aggregation.inputColumn: # Default input column is the variable name
aggregation.inputColumn = name
aggs.append(aggregation)
return aggs
def get_query(source: ttypes.Source):
return get_underlying_source(source).query
def get_streaming_query(source: ttypes.Source):
return get_underlying_source(source).streamingQuery
def get_columns(source: ttypes.Source):
query = get_query(source)
columns = query.select.keys()
return columns
def contains_windowed_aggregation(aggregations: Optional[Union[List[ttypes.Aggregation], DefaultAggregation]]) -> bool:
if not aggregations:
return False
if isinstance(aggregations, DefaultAggregation):
if aggregations.windows:
return True
else:
for agg in aggregations:
if agg.windows:
return True
return False
def set_ts_as_time_column(query: ttypes.Query):
query.select.update({"ts": query.timeColumn})
def get_topic(source: ttypes.Source) -> str:
if source.events:
return source.events.topic
else:
return source.entities.mutationTopic
def contains_realtime_source(sources: List[ttypes.Source]) -> bool:
return any(get_topic(source) for source in sources)
def validate_streaming_query(query: ttypes.Query,
streaming_query: ttypes.Query):
default_query = ttypes.Query()
query_keys = query.select.keys()
streaming_query_keys = streaming_query.select.keys()
assert len(query_keys - streaming_query_keys) == 0, \
"streaming query select keys ({}) and query select keys ({}) do not match".format(
query_keys, streaming_query_keys)
# nonempty field that has value different from default values.
nonempty_field_keys = set([k for (k, v) in streaming_query.__dict__.items()
if v is not None and len(v) > 0
and getattr(default_query, k) != v])
disallowed_keys = nonempty_field_keys - STREAMING_QUERY_FIELDS_ALLOWED
assert len(disallowed_keys) == 0, \
"streaming query cannot specify the following fields: {}".format(", ".join(disallowed_keys))
def validate_group_by(sources: List[ttypes.Source],
keys: List[str],
has_aggregations: bool,
is_realtime):
# check ts is not included in query.select
first_source_columns = set(get_columns(sources[0]))
assert "ts" not in first_source_columns, "'ts' is a reserved key word for Zipline," \
" please specify the expression in timeColumn"
for src in sources:
query = get_query(src)
if src.events:
assert query.ingestionTimeColumn is None, "ingestionTimeColumn should not be specified for " \
"event source as it should be the same with timeColumn"
assert query.reversalColumn is None, "reversalColumn should not be specified for event source " \
"as it won't have mutations"
if has_aggregations:
assert query.timeColumn, "Please specify timeColumn for source's query with windowed aggregations"
streaming_query = get_streaming_query(src)
if streaming_query:
validate_streaming_query(query, streaming_query)
else:
streaming_query = query
if is_realtime:
assert streaming_query.timeColumn, \
"Please specify timeColumn for source's streaming query with realtime streaming enabled"
# all sources should select the same columns
for i, source in enumerate(sources[1:]):
column_set = set(get_columns(source))
column_diff = column_set ^ first_source_columns
assert not column_diff, f"""
Mismatched columns among sources [1, {i+2}], Difference: {column_diff}
"""
# all keys should be present in the selected columns
unselected_keys = set(keys) - first_source_columns
assert not unselected_keys, f"""
Keys {unselected_keys}, are unselected in source
"""
def GroupBy(sources: Union[List[ttypes.Source], ttypes.Source],
keys: List[str],
aggregations: Optional[Union[List[ttypes.Aggregation], DefaultAggregation]],
# env variables - to control params for underlying compute jobs
# STREAMING_PARALLELISM, UPLOAD_PARALLELISM etc
env: Dict[str, str] = {},
online: bool = DEFAULT_ONLINE,
production: bool = DEFAULT_PRODUCTION) -> ttypes.GroupBy:
assert sources, "Sources are not specified"
if isinstance(sources, ttypes.Source):
sources = [sources]
has_aggregations = contains_windowed_aggregation(aggregations)
is_realtime = online and contains_realtime_source(sources)
validate_group_by(sources, keys, has_aggregations, is_realtime)
# create a deep copy for case: multiple group_bys use the same sources,
# validation_sources will fail after the first group_by
updated_sources = copy.deepcopy(sources)
# mapping ts with query.timeColumn
for src in updated_sources:
query = get_query(src)
streaming_query = get_streaming_query(src)
if src.events:
set_ts_as_time_column(query)
if is_realtime and streaming_query:
set_ts_as_time_column(streaming_query)
# entity source
elif query.timeColumn:
# timeColumn for entity source is optional
set_ts_as_time_column(query)
if is_realtime and streaming_query:
set_ts_as_time_column(streaming_query)
query = get_query(updated_sources[0])
columns = get_columns(updated_sources[0])
expanded_aggregations = aggregations
# expand default aggregation to actual aggregations
if isinstance(aggregations, DefaultAggregation):
# TODO: validate that all timeColumns and partitionColumns
# are the same in all the sources
# column names that need to be excluded from aggregation
non_aggregate_columns = keys + [
"ts",
query.timeColumn,
query.partitionColumn
]
aggregate_columns = [
column
for column in columns
if column not in non_aggregate_columns
]
expanded_aggregations = list(_expand_aggregations(
aggregate_columns,
aggregations
))
# flattening
dependencies = [dep for source in updated_sources for dep in get_dependencies(source) ]
metadata = json.dumps({"dependencies": dependencies, "env": env})
return ttypes.GroupBy(
sources=updated_sources,
keyColumns=keys,
aggregations=expanded_aggregations,
metadata=metadata,
online=online,
production=production
) | zipline-ai | /zipline_ai-0.0.19-py3-none-any.whl/zipline/group_by.py | group_by.py |
from collections import defaultdict
import json
import logging
import os
import shutil
import subprocess
import tempfile
from typing import List, Optional
from zipline.repo import JOIN_FOLDER_NAME, \
GROUP_BY_FOLDER_NAME
from zipline.logger import get_logger
import zipline.repo.extract_objects as eo
from zipline.schema.thrift.ttypes import \
GroupBy, LeftOuterJoin
from zipline.schema.serializer import \
thrift_simple_json, file2thrift
from typing import Dict
# Fields that are skipped for diff check.
SKIPPED_FIELDS = frozenset(['production', 'metadata', 'frontfill'])
def _filter_skipped_fields_from_joinparts(json_obj: Dict, skipped_fields):
for join_part in json_obj['rightParts']:
group_by = join_part['groupBy']
for field in skipped_fields:
group_by.pop(field, None)
def is_valid_conf(conf: object):
return conf.name is not None
def extract_json_confs(obj_class: type, path: str):
if os.path.isfile(path):
conf = file2thrift(path, obj_class)
return [conf] if is_valid_conf(conf) else []
result = []
for sub_root, sub_dirs, sub_files in os.walk(path):
for f in sub_files:
obj = file2thrift(os.path.join(sub_root, f), obj_class)
if is_valid_conf(obj):
result.append(obj)
return result
class ZiplineRepoValidator(object):
def __init__(self, zipline_root_path: str, output_root: str, log_level=logging.INFO):
self.logger = get_logger(log_level)
self.old_objs = defaultdict(dict)
# returned key has "group_by." prefix in the name so we remove the prefix.
self.zipline_root_path = zipline_root_path
self.output_root = output_root
self.log_level = log_level
self.logger = get_logger(log_level)
self.load_objs()
def load_objs(self):
# we keep the objs in the list not in a set since thrift does not
# implement __hash__ for ttypes object.
self.old_group_bys = extract_json_confs(
GroupBy,
os.path.join(self.zipline_root_path, self.output_root, GROUP_BY_FOLDER_NAME))
self.old_joins = extract_json_confs(
LeftOuterJoin,
os.path.join(self.zipline_root_path, self.output_root, JOIN_FOLDER_NAME))
self.old_objs['GroupBy'] = self.old_group_bys
self.old_objs['LeftOuterJoin'] = self.old_joins
def _get_old_obj(self, obj_class: type, obj_name: str) -> object:
"""
returns:
materialized version of the obj given the object's name.
"""
return next(
(x for x in self.old_objs[obj_class.__name__] if x.name == obj_name),
None
)
def _get_old_joins_with_group_by(self, group_by: GroupBy) -> List[LeftOuterJoin]:
"""
returns:
materialized joins including the group_by as dicts.
"""
return [join for join in self.old_joins if join.rightParts is not None and
group_by.name in [rp.groupBy.name for rp in join.rightParts]]
def can_skip_materialize(self, obj: object) -> List[str]:
"""
Check if the object can be skipped to be materialized and return reasons
if it can be.
"""
reasons = []
if isinstance(obj, GroupBy):
# GroupBys explicitly marked as offline should not be materialized.
if obj.online is False:
reasons.append(f"is explicitly marked as offline")
# Otherwise group_bys included in online join or are marked explicitly
# online itself are materialized.
elif not any(join.online for join in self._get_old_joins_with_group_by(obj)) \
and not obj.online:
reasons.append(f"is not marked online nor is included in any online join")
return reasons
def validate_obj(self, obj: object) -> List[str]:
"""
Validate Zipline API obj against other entities in the repo.
returns:
list of errors.
"""
if isinstance(obj, GroupBy):
return self._validate_group_by(obj)
elif isinstance(obj, LeftOuterJoin):
return self._validate_join(obj)
return []
def _has_diff(
self,
obj: object,
old_obj: object,
skipped_fields=SKIPPED_FIELDS) -> str:
new_json = {k: v for k, v in json.loads(thrift_simple_json(obj)).items()
if k not in skipped_fields}
old_json = {k: v for k, v in json.loads(thrift_simple_json(old_obj)).items()
if k not in skipped_fields}
if isinstance(obj, LeftOuterJoin):
_filter_skipped_fields_from_joinparts(new_json, skipped_fields)
_filter_skipped_fields_from_joinparts(old_json, skipped_fields)
return new_json != old_json
def safe_to_overwrite(self, obj: object) -> bool:
"""When an object is already materialized as online, it is no more safe
to materialize and overwrite the old conf.
"""
obj_class = type(obj).__name__
old_obj = self._get_old_obj(type(obj), obj.name)
return not old_obj or not self._has_diff(obj, old_obj) or not old_obj.online
def _validate_join(self, join: LeftOuterJoin) -> List[str]:
"""
Validate join's status with materialized versions of group_bys
included by the join.
Returns:
list of validation errors.
"""
included_group_bys = [rp.groupBy for rp in join.rightParts]
offline_included_group_bys = [gb.name for gb in included_group_bys
if gb.online is False]
errors = []
old_group_bys = [group_by for group_by in included_group_bys
if self._get_old_obj(GroupBy, group_by.name)]
non_prod_old_group_bys = [group_by.name for group_by in old_group_bys
if group_by.production is False]
if join.production and non_prod_old_group_bys:
errors.append("join {} is production but includes "
"the following non production group_bys: {}".format(
join.name, ', '.join(non_prod_old_group_bys)))
if join.online:
if offline_included_group_bys:
errors.append("join {} is online but includes "
"the following offline group_bys: {}".format(
join.name, ', '.join(offline_included_group_bys)))
# If join is online we materialize the underlying group bys are materialized
# So we need to check if they are valid.
group_by_errors = [self._validate_group_by(group_by) for group_by in included_group_bys]
errors += [f"join {join.name}'s underlying {error}"
for errors in group_by_errors for error in errors]
return errors
def _validate_group_by(self, group_by: GroupBy) -> List[str]:
"""
Validate group_by's status with materialized versions of joins
including the group_by.
Return:
List of validation errors.
"""
joins = self._get_old_joins_with_group_by(group_by)
online_joins = [join.name for join in joins if join.online is True]
prod_joins = [join.name for join in joins if join.production is True]
errors = []
# group by that are marked explicitly offline should not be present in
# materialized online joins.
if group_by.online is False and online_joins:
errors.append(
"group_by {} is explicitly marked offline but included in "
"the following online joins: {}".format(
group_by.name, ", ".join(online_joins)))
# group by that are marked explicitly non-production should not be
# present in materialized production joins.
if prod_joins:
if group_by.production is False:
errors.append("group_by {} is explicitly marked as non-production "
"but included in the following production joins: {}".format(
group_by.name, ', '.join(prod_joins)))
# if the group by is included in any of materialized production join,
# set it to production in the materialized output.
else:
group_by.production = True
return errors | zipline-ai | /zipline_ai-0.0.19-py3-none-any.whl/zipline/repo/validator.py | validator.py |
import logging
import os
import click
import zipline.repo.extract_objects as eo
from zipline.repo import JOIN_FOLDER_NAME, \
GROUP_BY_FOLDER_NAME, STAGING_QUERY_FOLDER_NAME
from zipline.repo.validator import ZiplineRepoValidator
from zipline.schema.serializer import thrift_simple_json_protected
from zipline.schema.thrift.ttypes import GroupBy, LeftOuterJoin, StagingQuery
# This is set in the main function -
# from command line or from env variable during invocation
FOLDER_NAME_TO_CLASS = {
GROUP_BY_FOLDER_NAME: GroupBy,
JOIN_FOLDER_NAME: LeftOuterJoin,
STAGING_QUERY_FOLDER_NAME: StagingQuery,
}
def get_folder_name_from_class_name(class_name):
return {v.__name__: k for k, v in FOLDER_NAME_TO_CLASS.items()}[class_name]
@click.command()
@click.option(
'--zipline_root',
envvar='ZIPLINE_ROOT',
help='Path to the root zipline folder',
default=False)
@click.option(
'--input_path',
help='Relative Path to the root zipline folder, which contains the objects to be serialized',
required=True)
@click.option(
'--output_root',
help='Relative Path to the root zipline folder, to where the serialized output should be written',
default="production")
@click.option(
'--debug',
help='debug mode',
is_flag=True)
@click.option(
'--force-overwrite',
help='Force overwriting existing materialized conf.',
is_flag=True)
def extract_and_convert(zipline_root, input_path, output_root, debug, force_overwrite):
"""
CLI tool to convert Python zipline GroupBy's, Joins and Staging queries into their thrift representation.
The materialized objects are what will be submitted to spark jobs - driven by airflow, or by manual user testing.
"""
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
if not zipline_root:
zipline_root = os.getcwd()
_print_highlighted("Using zipline root path", zipline_root)
zipline_root_path = os.path.expanduser(zipline_root)
obj_folder_name = input_path.split('/', 1)[0]
obj_class = FOLDER_NAME_TO_CLASS[obj_folder_name]
full_input_path = os.path.join(zipline_root_path, input_path)
_print_highlighted(f"Input {obj_folder_name} from", full_input_path)
assert os.path.exists(full_input_path), f"Input Path: {full_input_path} doesn't exist"
if os.path.isdir(full_input_path):
results = eo.from_folder(zipline_root_path, full_input_path, obj_class, log_level=log_level)
elif os.path.isfile(full_input_path):
assert full_input_path.endswith(".py"), f"Input Path: {input_path} isn't a python file"
results = eo.from_file(zipline_root_path, full_input_path, obj_class, log_level=log_level)
else:
raise Exception(f"Input Path: {full_input_path}, isn't a file or a folder")
validator = ZiplineRepoValidator(zipline_root_path, output_root, log_level=log_level)
extra_online_group_bys = {}
num_written_objs = 0
full_output_root = os.path.join(zipline_root_path, output_root)
for name, obj in results.items():
if _write_obj(full_output_root, validator, name, obj, log_level, force_overwrite):
num_written_objs += 1
# In case of online join, we need to materialize the underlying online group_bys.
if obj_class is LeftOuterJoin and obj.online:
online_group_bys = {rt.groupBy.name: rt.groupBy for rt in obj.rightParts}
extra_online_group_bys.update(online_group_bys)
if extra_online_group_bys:
num_written_group_bys = 0
# load materialized joins to validate the additional group_bys against.
validator.load_objs()
for name, obj in extra_online_group_bys.items():
if _write_obj(full_output_root, validator, name, obj, log_level, force_overwrite):
num_written_group_bys += 1
print(f"Successfully wrote {num_written_group_bys} online GroupBy objects to {full_output_root}")
print(f"Successfully wrote {num_written_objs} {(obj_class).__name__} objects to {full_output_root}")
def _write_obj(full_output_root: str,
validator: ZiplineRepoValidator,
name: str,
obj: object,
log_level: int,
force_overwrite: bool) -> bool:
"""
Returns True if the object is successfully written.
"""
team_name = name.split(".")[0]
obj_class = type(obj)
class_name = obj_class.__name__
name = name.split('.', 1)[1]
_print_highlighted(f"{class_name} Team", team_name)
_print_highlighted(f"{class_name} Name", name)
obj_folder_name = get_folder_name_from_class_name(class_name)
output_path = os.path.join(full_output_root, obj_folder_name, team_name)
output_file = os.path.join(output_path, name)
skip_reasons = validator.can_skip_materialize(obj)
if skip_reasons:
reasons = ', '.join(skip_reasons)
_print_warning(f"Skipping {class_name} {name}: {reasons}")
if os.path.exists(output_file):
_print_warning(f"old file exists for skipped config: {output_file}")
return False
validation_errors = validator.validate_obj(obj)
if validation_errors:
_print_error(f"Could not write {class_name} {name}",
', '.join(validation_errors))
return False
if force_overwrite:
_print_warning(f"force overwrite {class_name} {name}")
elif not validator.safe_to_overwrite(obj):
_print_warning(f"cannot overwrite {class_name} {name} with existing online conf")
return False
_write_obj_as_json(name, obj, output_file, obj_class)
return True
def _write_obj_as_json(name: str, obj: object, output_file: str, obj_class: type):
class_name = obj_class.__name__
output_folder = os.path.dirname(output_file)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
assert os.path.isdir(output_folder), f"{output_folder} isn't a folder."
assert hasattr(obj, "name"), f"Can't serialize objects without the name attribute for object {name}"
with open(output_file, "w") as f:
_print_highlighted(f"Writing {class_name} to", output_file)
f.write(thrift_simple_json_protected(obj, obj_class))
def _print_highlighted(left, right):
# print in blue and bold
print(f"{left:>25} - \033[34m\033[1m{right}\033[00m")
def _print_error(left, right):
# print in red.
print(f"\033[91m{left:>25} - \033[1m{right}\033[00m")
def _print_warning(string):
# print in yellow.
print(f"\033[93m{string}\033[00m")
if __name__ == '__main__':
extract_and_convert() | zipline-ai | /zipline_ai-0.0.19-py3-none-any.whl/zipline/repo/materialize.py | materialize.py |
import json
from thrift.Thrift import TType
from thrift.protocol.TJSONProtocol import TSimpleJSONProtocolFactory, TJSONProtocolFactory
from thrift import TSerialization
class ThriftJSONDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
self._thrift_class = kwargs.pop('thrift_class')
super(ThriftJSONDecoder, self).__init__(*args, **kwargs)
def decode(self, json_str):
if isinstance(json_str, dict):
dct = json_str
else:
dct = super(ThriftJSONDecoder, self).decode(json_str)
return self._convert(dct, TType.STRUCT,
(self._thrift_class, self._thrift_class.thrift_spec))
def _convert(self, val, ttype, ttype_info):
if ttype == TType.STRUCT:
(thrift_class, thrift_spec) = ttype_info
ret = thrift_class()
for field in thrift_spec:
if field is None:
continue
(_, field_ttype, field_name, field_ttype_info, dummy) = field
if field_name not in val:
continue
converted_val = self._convert(val[field_name], field_ttype, field_ttype_info)
setattr(ret, field_name, converted_val)
elif ttype == TType.LIST:
(element_ttype, element_ttype_info, _) = ttype_info
ret = [self._convert(x, element_ttype, element_ttype_info) for x in val]
elif ttype == TType.SET:
(element_ttype, element_ttype_info) = ttype_info
ret = set([self._convert(x, element_ttype, element_ttype_info) for x in val])
elif ttype == TType.MAP:
(key_ttype, key_ttype_info, val_ttype, val_ttype_info, _) = ttype_info
ret = dict([(self._convert(k, key_ttype, key_ttype_info),
self._convert(v, val_ttype, val_ttype_info)) for (k, v) in val.items()])
elif ttype == TType.STRING:
ret = str(val)
elif ttype == TType.DOUBLE:
ret = float(val)
elif ttype == TType.I64:
ret = int(val)
elif ttype == TType.I32 or ttype == TType.I16 or ttype == TType.BYTE:
ret = int(val)
elif ttype == TType.BOOL:
ret = bool(val)
else:
raise TypeError('Unrecognized thrift field type: %d' % ttype)
return ret
def json2thrift(json_str, thrift_class):
return json.loads(json_str, cls=ThriftJSONDecoder, thrift_class=thrift_class)
def file2thrift(path, thrift_class):
with open(path, 'r') as file:
return json2thrift(file.read(), thrift_class)
def thrift_json(obj):
return TSerialization.serialize(obj, protocol_factory=TJSONProtocolFactory())
def thrift_simple_json(obj):
simple = TSerialization.serialize(obj, protocol_factory=TSimpleJSONProtocolFactory())
parsed = json.loads(simple)
return json.dumps(parsed, indent=2)
def thrift_simple_json_protected(obj, obj_type):
serialized = thrift_simple_json(obj)
# ensure that reversal works - we will use this reversal during deployment
thrift_obj = json.loads(serialized, cls=ThriftJSONDecoder, thrift_class=obj_type)
actual = thrift_simple_json(thrift_obj)
assert actual == serialized, f"""Serialization can't be reversed
Expected:
{serialized}
***************
Actual:
{actual}
"""
return serialized | zipline-ai | /zipline_ai-0.0.19-py3-none-any.whl/zipline/schema/serializer.py | serializer.py |
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class PartitionStrategy(object):
INCREMENTAL = 0
CUMULATIVE = 1
_VALUES_TO_NAMES = {
0: "INCREMENTAL",
1: "CUMULATIVE",
}
_NAMES_TO_VALUES = {
"INCREMENTAL": 0,
"CUMULATIVE": 1,
}
class Operation(object):
MIN = 0
MAX = 1
FIRST = 2
LAST = 3
TOP = 4
BOTTOM = 5
APPROX_UNIQUE_COUNT = 6
COUNT = 7
SUM = 8
MEAN = 9
VARIANCE = 10
SKEW = 11
KURTOSIS = 12
APPROX_PERCENTILE = 13
_VALUES_TO_NAMES = {
0: "MIN",
1: "MAX",
2: "FIRST",
3: "LAST",
4: "TOP",
5: "BOTTOM",
6: "APPROX_UNIQUE_COUNT",
7: "COUNT",
8: "SUM",
9: "MEAN",
10: "VARIANCE",
11: "SKEW",
12: "KURTOSIS",
13: "APPROX_PERCENTILE",
}
_NAMES_TO_VALUES = {
"MIN": 0,
"MAX": 1,
"FIRST": 2,
"LAST": 3,
"TOP": 4,
"BOTTOM": 5,
"APPROX_UNIQUE_COUNT": 6,
"COUNT": 7,
"SUM": 8,
"MEAN": 9,
"VARIANCE": 10,
"SKEW": 11,
"KURTOSIS": 12,
"APPROX_PERCENTILE": 13,
}
class TimeUnit(object):
SECONDS = 0
MINUTES = 1
HOURS = 2
DAYS = 3
WEEKS = 4
YEARS = 5
_VALUES_TO_NAMES = {
0: "SECONDS",
1: "MINUTES",
2: "HOURS",
3: "DAYS",
4: "WEEKS",
5: "YEARS",
}
_NAMES_TO_VALUES = {
"SECONDS": 0,
"MINUTES": 1,
"HOURS": 2,
"DAYS": 3,
"WEEKS": 4,
"YEARS": 5,
}
class Query(object):
"""
Attributes:
- select
- where
- startPartition
- endPartition
- timeColumn
- partitionColumn
- partitionFormat
- setups
- ingestionTimeColumn
- reversalColumn
- dependencies
"""
def __init__(self, select=None, where=None, startPartition=None, endPartition=None, timeColumn=None, partitionColumn="ds", partitionFormat="yyyy-MM-dd", setups=[
], ingestionTimeColumn=None, reversalColumn=None, dependencies=[
],):
self.select = select
self.where = where
self.startPartition = startPartition
self.endPartition = endPartition
self.timeColumn = timeColumn
self.partitionColumn = partitionColumn
self.partitionFormat = partitionFormat
if setups is self.thrift_spec[8][4]:
setups = [
]
self.setups = setups
self.ingestionTimeColumn = ingestionTimeColumn
self.reversalColumn = reversalColumn
if dependencies is self.thrift_spec[11][4]:
dependencies = [
]
self.dependencies = dependencies
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.select = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.select[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.where = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.startPartition = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.endPartition = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.timeColumn = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.partitionColumn = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.partitionFormat = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.setups = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.setups.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.ingestionTimeColumn = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.STRING:
self.reversalColumn = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.LIST:
self.dependencies = []
(_etype16, _size13) = iprot.readListBegin()
for _i17 in range(_size13):
_elem18 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.dependencies.append(_elem18)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Query')
if self.select is not None:
oprot.writeFieldBegin('select', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.select))
for kiter19, viter20 in self.select.items():
oprot.writeString(kiter19.encode('utf-8') if sys.version_info[0] == 2 else kiter19)
oprot.writeString(viter20.encode('utf-8') if sys.version_info[0] == 2 else viter20)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.where is not None:
oprot.writeFieldBegin('where', TType.STRING, 2)
oprot.writeString(self.where.encode('utf-8') if sys.version_info[0] == 2 else self.where)
oprot.writeFieldEnd()
if self.startPartition is not None:
oprot.writeFieldBegin('startPartition', TType.STRING, 3)
oprot.writeString(self.startPartition.encode('utf-8') if sys.version_info[0] == 2 else self.startPartition)
oprot.writeFieldEnd()
if self.endPartition is not None:
oprot.writeFieldBegin('endPartition', TType.STRING, 4)
oprot.writeString(self.endPartition.encode('utf-8') if sys.version_info[0] == 2 else self.endPartition)
oprot.writeFieldEnd()
if self.timeColumn is not None:
oprot.writeFieldBegin('timeColumn', TType.STRING, 5)
oprot.writeString(self.timeColumn.encode('utf-8') if sys.version_info[0] == 2 else self.timeColumn)
oprot.writeFieldEnd()
if self.partitionColumn is not None:
oprot.writeFieldBegin('partitionColumn', TType.STRING, 6)
oprot.writeString(self.partitionColumn.encode('utf-8') if sys.version_info[0] == 2 else self.partitionColumn)
oprot.writeFieldEnd()
if self.partitionFormat is not None:
oprot.writeFieldBegin('partitionFormat', TType.STRING, 7)
oprot.writeString(self.partitionFormat.encode('utf-8') if sys.version_info[0] == 2 else self.partitionFormat)
oprot.writeFieldEnd()
if self.setups is not None:
oprot.writeFieldBegin('setups', TType.LIST, 8)
oprot.writeListBegin(TType.STRING, len(self.setups))
for iter21 in self.setups:
oprot.writeString(iter21.encode('utf-8') if sys.version_info[0] == 2 else iter21)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ingestionTimeColumn is not None:
oprot.writeFieldBegin('ingestionTimeColumn', TType.STRING, 9)
oprot.writeString(self.ingestionTimeColumn.encode('utf-8') if sys.version_info[0] == 2 else self.ingestionTimeColumn)
oprot.writeFieldEnd()
if self.reversalColumn is not None:
oprot.writeFieldBegin('reversalColumn', TType.STRING, 10)
oprot.writeString(self.reversalColumn.encode('utf-8') if sys.version_info[0] == 2 else self.reversalColumn)
oprot.writeFieldEnd()
if self.dependencies is not None:
oprot.writeFieldBegin('dependencies', TType.LIST, 11)
oprot.writeListBegin(TType.STRING, len(self.dependencies))
for iter22 in self.dependencies:
oprot.writeString(iter22.encode('utf-8') if sys.version_info[0] == 2 else iter22)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StagingQuery(object):
"""
StagingQuery can be used to express free form ETL (including joins/group by) within Zipline. Simple column level
transformations that don't require joins or aggregations should be expressed as a `Query`.
Attributes:
- name
- query
- startDate
- dependencies
- querySetupCommands
"""
def __init__(self, name=None, query=None, startDate=None, dependencies=None, querySetupCommands=None,):
self.name = name
self.query = query
self.startDate = startDate
self.dependencies = dependencies
self.querySetupCommands = querySetupCommands
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.query = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.startDate = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.dependencies = []
(_etype26, _size23) = iprot.readListBegin()
for _i27 in range(_size23):
_elem28 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.dependencies.append(_elem28)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.querySetupCommands = []
(_etype32, _size29) = iprot.readListBegin()
for _i33 in range(_size29):
_elem34 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.querySetupCommands.append(_elem34)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('StagingQuery')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRING, 2)
oprot.writeString(self.query.encode('utf-8') if sys.version_info[0] == 2 else self.query)
oprot.writeFieldEnd()
if self.startDate is not None:
oprot.writeFieldBegin('startDate', TType.STRING, 3)
oprot.writeString(self.startDate.encode('utf-8') if sys.version_info[0] == 2 else self.startDate)
oprot.writeFieldEnd()
if self.dependencies is not None:
oprot.writeFieldBegin('dependencies', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.dependencies))
for iter35 in self.dependencies:
oprot.writeString(iter35.encode('utf-8') if sys.version_info[0] == 2 else iter35)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.querySetupCommands is not None:
oprot.writeFieldBegin('querySetupCommands', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.querySetupCommands))
for iter36 in self.querySetupCommands:
oprot.writeString(iter36.encode('utf-8') if sys.version_info[0] == 2 else iter36)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EventSource(object):
"""
The underlying data that generates your feature is an event
with a logical timestamp for when it occurred in the real world,
and the event is immutable in the underlying source.
Attributes:
- table
- topic
- query
- partitionStrategy
- streamingQuery
"""
def __init__(self, table=None, topic=None, query=None, partitionStrategy=0, streamingQuery=None,):
self.table = table
self.topic = topic
self.query = query
self.partitionStrategy = partitionStrategy
self.streamingQuery = streamingQuery
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.topic = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.query = Query()
self.query.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.partitionStrategy = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.streamingQuery = Query()
self.streamingQuery.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('EventSource')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table.encode('utf-8') if sys.version_info[0] == 2 else self.table)
oprot.writeFieldEnd()
if self.topic is not None:
oprot.writeFieldBegin('topic', TType.STRING, 2)
oprot.writeString(self.topic.encode('utf-8') if sys.version_info[0] == 2 else self.topic)
oprot.writeFieldEnd()
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRUCT, 3)
self.query.write(oprot)
oprot.writeFieldEnd()
if self.partitionStrategy is not None:
oprot.writeFieldBegin('partitionStrategy', TType.I32, 4)
oprot.writeI32(self.partitionStrategy)
oprot.writeFieldEnd()
if self.streamingQuery is not None:
oprot.writeFieldBegin('streamingQuery', TType.STRUCT, 5)
self.streamingQuery.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EntitySource(object):
"""
The underlying data that generates your feature is an entity
that exists across time - as opposed to an event that occurs
at a point in time. Each partition of underlying data contains
a snapshot of these entities and their values as of that day.
"Dim" DB snapshots are the Quintessential use-case for this source.
Attributes:
- snapshotTable
- mutationTable
- mutationTopic
- query
- streamingQuery
"""
def __init__(self, snapshotTable=None, mutationTable=None, mutationTopic=None, query=None, streamingQuery=None,):
self.snapshotTable = snapshotTable
self.mutationTable = mutationTable
self.mutationTopic = mutationTopic
self.query = query
self.streamingQuery = streamingQuery
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.snapshotTable = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.mutationTable = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.mutationTopic = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.query = Query()
self.query.read(iprot)
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRUCT:
self.streamingQuery = Query()
self.streamingQuery.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('EntitySource')
if self.snapshotTable is not None:
oprot.writeFieldBegin('snapshotTable', TType.STRING, 1)
oprot.writeString(self.snapshotTable.encode('utf-8') if sys.version_info[0] == 2 else self.snapshotTable)
oprot.writeFieldEnd()
if self.mutationTable is not None:
oprot.writeFieldBegin('mutationTable', TType.STRING, 2)
oprot.writeString(self.mutationTable.encode('utf-8') if sys.version_info[0] == 2 else self.mutationTable)
oprot.writeFieldEnd()
if self.mutationTopic is not None:
oprot.writeFieldBegin('mutationTopic', TType.STRING, 3)
oprot.writeString(self.mutationTopic.encode('utf-8') if sys.version_info[0] == 2 else self.mutationTopic)
oprot.writeFieldEnd()
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRUCT, 4)
self.query.write(oprot)
oprot.writeFieldEnd()
if self.streamingQuery is not None:
oprot.writeFieldBegin('streamingQuery', TType.STRUCT, 5)
self.streamingQuery.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Source(object):
"""
Attributes:
- events
- entities
"""
def __init__(self, events=None, entities=None,):
self.events = events
self.entities = entities
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.events = EventSource()
self.events.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.entities = EntitySource()
self.entities.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Source')
if self.events is not None:
oprot.writeFieldBegin('events', TType.STRUCT, 1)
self.events.write(oprot)
oprot.writeFieldEnd()
if self.entities is not None:
oprot.writeFieldBegin('entities', TType.STRUCT, 2)
self.entities.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Window(object):
"""
Attributes:
- length
- timeUnit
"""
def __init__(self, length=None, timeUnit=None,):
self.length = length
self.timeUnit = timeUnit
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.length = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.timeUnit = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Window')
if self.length is not None:
oprot.writeFieldBegin('length', TType.I32, 1)
oprot.writeI32(self.length)
oprot.writeFieldEnd()
if self.timeUnit is not None:
oprot.writeFieldBegin('timeUnit', TType.I32, 2)
oprot.writeI32(self.timeUnit)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Aggregation(object):
"""
Attributes:
- name
- inputColumn
- operation
- constructorJson
- bucketColumns
- windows
- documentation
"""
def __init__(self, name=None, inputColumn=None, operation=None, constructorJson=None, bucketColumns=None, windows=None, documentation=None,):
self.name = name
self.inputColumn = inputColumn
self.operation = operation
self.constructorJson = constructorJson
self.bucketColumns = bucketColumns
self.windows = windows
self.documentation = documentation
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.inputColumn = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.operation = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.constructorJson = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.bucketColumns = []
(_etype40, _size37) = iprot.readListBegin()
for _i41 in range(_size37):
_elem42 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.bucketColumns.append(_elem42)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.windows = []
(_etype46, _size43) = iprot.readListBegin()
for _i47 in range(_size43):
_elem48 = Window()
_elem48.read(iprot)
self.windows.append(_elem48)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.documentation = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Aggregation')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.inputColumn is not None:
oprot.writeFieldBegin('inputColumn', TType.STRING, 2)
oprot.writeString(self.inputColumn.encode('utf-8') if sys.version_info[0] == 2 else self.inputColumn)
oprot.writeFieldEnd()
if self.operation is not None:
oprot.writeFieldBegin('operation', TType.I32, 3)
oprot.writeI32(self.operation)
oprot.writeFieldEnd()
if self.constructorJson is not None:
oprot.writeFieldBegin('constructorJson', TType.STRING, 4)
oprot.writeString(self.constructorJson.encode('utf-8') if sys.version_info[0] == 2 else self.constructorJson)
oprot.writeFieldEnd()
if self.bucketColumns is not None:
oprot.writeFieldBegin('bucketColumns', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.bucketColumns))
for iter49 in self.bucketColumns:
oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.windows is not None:
oprot.writeFieldBegin('windows', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.windows))
for iter50 in self.windows:
iter50.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.documentation is not None:
oprot.writeFieldBegin('documentation', TType.STRING, 7)
oprot.writeString(self.documentation.encode('utf-8') if sys.version_info[0] == 2 else self.documentation)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GroupBy(object):
"""
Attributes:
- name
- sources
- keyColumns
- aggregations
- metadata
- online
- production
"""
def __init__(self, name=None, sources=None, keyColumns=None, aggregations=None, metadata=None, online=None, production=None,):
self.name = name
self.sources = sources
self.keyColumns = keyColumns
self.aggregations = aggregations
self.metadata = metadata
self.online = online
self.production = production
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.sources = []
(_etype54, _size51) = iprot.readListBegin()
for _i55 in range(_size51):
_elem56 = Source()
_elem56.read(iprot)
self.sources.append(_elem56)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.keyColumns = []
(_etype60, _size57) = iprot.readListBegin()
for _i61 in range(_size57):
_elem62 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.keyColumns.append(_elem62)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.aggregations = []
(_etype66, _size63) = iprot.readListBegin()
for _i67 in range(_size63):
_elem68 = Aggregation()
_elem68.read(iprot)
self.aggregations.append(_elem68)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.metadata = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.online = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.production = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('GroupBy')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.sources is not None:
oprot.writeFieldBegin('sources', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.sources))
for iter69 in self.sources:
iter69.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.keyColumns is not None:
oprot.writeFieldBegin('keyColumns', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.keyColumns))
for iter70 in self.keyColumns:
oprot.writeString(iter70.encode('utf-8') if sys.version_info[0] == 2 else iter70)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.aggregations is not None:
oprot.writeFieldBegin('aggregations', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.aggregations))
for iter71 in self.aggregations:
iter71.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.metadata is not None:
oprot.writeFieldBegin('metadata', TType.STRING, 5)
oprot.writeString(self.metadata.encode('utf-8') if sys.version_info[0] == 2 else self.metadata)
oprot.writeFieldEnd()
if self.online is not None:
oprot.writeFieldBegin('online', TType.BOOL, 6)
oprot.writeBool(self.online)
oprot.writeFieldEnd()
if self.production is not None:
oprot.writeFieldBegin('production', TType.BOOL, 7)
oprot.writeBool(self.production)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AggregationSelector(object):
"""
Attributes:
- name
- windows
"""
def __init__(self, name=None, windows=None,):
self.name = name
self.windows = windows
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.windows = []
(_etype75, _size72) = iprot.readListBegin()
for _i76 in range(_size72):
_elem77 = Window()
_elem77.read(iprot)
self.windows.append(_elem77)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('AggregationSelector')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.windows is not None:
oprot.writeFieldBegin('windows', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.windows))
for iter78 in self.windows:
iter78.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JoinPart(object):
"""
Attributes:
- groupBy
- keyMapping
- selectors
- prefix
"""
def __init__(self, groupBy=None, keyMapping=None, selectors=None, prefix=None,):
self.groupBy = groupBy
self.keyMapping = keyMapping
self.selectors = selectors
self.prefix = prefix
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.groupBy = GroupBy()
self.groupBy.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.keyMapping = {}
(_ktype80, _vtype81, _size79) = iprot.readMapBegin()
for _i83 in range(_size79):
_key84 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val85 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.keyMapping[_key84] = _val85
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.selectors = []
(_etype89, _size86) = iprot.readListBegin()
for _i90 in range(_size86):
_elem91 = AggregationSelector()
_elem91.read(iprot)
self.selectors.append(_elem91)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.prefix = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('JoinPart')
if self.groupBy is not None:
oprot.writeFieldBegin('groupBy', TType.STRUCT, 1)
self.groupBy.write(oprot)
oprot.writeFieldEnd()
if self.keyMapping is not None:
oprot.writeFieldBegin('keyMapping', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.keyMapping))
for kiter92, viter93 in self.keyMapping.items():
oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else kiter92)
oprot.writeString(viter93.encode('utf-8') if sys.version_info[0] == 2 else viter93)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.selectors is not None:
oprot.writeFieldBegin('selectors', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.selectors))
for iter94 in self.selectors:
iter94.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 4)
oprot.writeString(self.prefix.encode('utf-8') if sys.version_info[0] == 2 else self.prefix)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class LeftOuterJoin(object):
"""
Attributes:
- name
- left
- rightParts
- metadata
- online
- production
- frontfill
"""
def __init__(self, name=None, left=None, rightParts=None, metadata=None, online=False, production=False, frontfill=True,):
self.name = name
self.left = left
self.rightParts = rightParts
self.metadata = metadata
self.online = online
self.production = production
self.frontfill = frontfill
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.left = Source()
self.left.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.rightParts = []
(_etype98, _size95) = iprot.readListBegin()
for _i99 in range(_size95):
_elem100 = JoinPart()
_elem100.read(iprot)
self.rightParts.append(_elem100)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.metadata = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.online = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.production = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.frontfill = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('LeftOuterJoin')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.left is not None:
oprot.writeFieldBegin('left', TType.STRUCT, 2)
self.left.write(oprot)
oprot.writeFieldEnd()
if self.rightParts is not None:
oprot.writeFieldBegin('rightParts', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.rightParts))
for iter101 in self.rightParts:
iter101.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.metadata is not None:
oprot.writeFieldBegin('metadata', TType.STRING, 4)
oprot.writeString(self.metadata.encode('utf-8') if sys.version_info[0] == 2 else self.metadata)
oprot.writeFieldEnd()
if self.online is not None:
oprot.writeFieldBegin('online', TType.BOOL, 5)
oprot.writeBool(self.online)
oprot.writeFieldEnd()
if self.production is not None:
oprot.writeFieldBegin('production', TType.BOOL, 6)
oprot.writeBool(self.production)
oprot.writeFieldEnd()
if self.frontfill is not None:
oprot.writeFieldBegin('frontfill', TType.BOOL, 7)
oprot.writeBool(self.frontfill)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(Query)
Query.thrift_spec = (
None, # 0
(1, TType.MAP, 'select', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 1
(2, TType.STRING, 'where', 'UTF8', None, ), # 2
(3, TType.STRING, 'startPartition', 'UTF8', None, ), # 3
(4, TType.STRING, 'endPartition', 'UTF8', None, ), # 4
(5, TType.STRING, 'timeColumn', 'UTF8', None, ), # 5
(6, TType.STRING, 'partitionColumn', 'UTF8', "ds", ), # 6
(7, TType.STRING, 'partitionFormat', 'UTF8', "yyyy-MM-dd", ), # 7
(8, TType.LIST, 'setups', (TType.STRING, 'UTF8', False), [
], ), # 8
(9, TType.STRING, 'ingestionTimeColumn', 'UTF8', None, ), # 9
(10, TType.STRING, 'reversalColumn', 'UTF8', None, ), # 10
(11, TType.LIST, 'dependencies', (TType.STRING, 'UTF8', False), [
], ), # 11
)
all_structs.append(StagingQuery)
StagingQuery.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.STRING, 'query', 'UTF8', None, ), # 2
(3, TType.STRING, 'startDate', 'UTF8', None, ), # 3
(4, TType.LIST, 'dependencies', (TType.STRING, 'UTF8', False), None, ), # 4
(5, TType.LIST, 'querySetupCommands', (TType.STRING, 'UTF8', False), None, ), # 5
)
all_structs.append(EventSource)
EventSource.thrift_spec = (
None, # 0
(1, TType.STRING, 'table', 'UTF8', None, ), # 1
(2, TType.STRING, 'topic', 'UTF8', None, ), # 2
(3, TType.STRUCT, 'query', [Query, None], None, ), # 3
(4, TType.I32, 'partitionStrategy', None, 0, ), # 4
(5, TType.STRUCT, 'streamingQuery', [Query, None], None, ), # 5
)
all_structs.append(EntitySource)
EntitySource.thrift_spec = (
None, # 0
(1, TType.STRING, 'snapshotTable', 'UTF8', None, ), # 1
(2, TType.STRING, 'mutationTable', 'UTF8', None, ), # 2
(3, TType.STRING, 'mutationTopic', 'UTF8', None, ), # 3
(4, TType.STRUCT, 'query', [Query, None], None, ), # 4
(5, TType.STRUCT, 'streamingQuery', [Query, None], None, ), # 5
)
all_structs.append(Source)
Source.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'events', [EventSource, None], None, ), # 1
(2, TType.STRUCT, 'entities', [EntitySource, None], None, ), # 2
)
all_structs.append(Window)
Window.thrift_spec = (
None, # 0
(1, TType.I32, 'length', None, None, ), # 1
(2, TType.I32, 'timeUnit', None, None, ), # 2
)
all_structs.append(Aggregation)
Aggregation.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.STRING, 'inputColumn', 'UTF8', None, ), # 2
(3, TType.I32, 'operation', None, None, ), # 3
(4, TType.STRING, 'constructorJson', 'UTF8', None, ), # 4
(5, TType.LIST, 'bucketColumns', (TType.STRING, 'UTF8', False), None, ), # 5
(6, TType.LIST, 'windows', (TType.STRUCT, [Window, None], False), None, ), # 6
(7, TType.STRING, 'documentation', 'UTF8', None, ), # 7
)
all_structs.append(GroupBy)
GroupBy.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.LIST, 'sources', (TType.STRUCT, [Source, None], False), None, ), # 2
(3, TType.LIST, 'keyColumns', (TType.STRING, 'UTF8', False), None, ), # 3
(4, TType.LIST, 'aggregations', (TType.STRUCT, [Aggregation, None], False), None, ), # 4
(5, TType.STRING, 'metadata', 'UTF8', None, ), # 5
(6, TType.BOOL, 'online', None, None, ), # 6
(7, TType.BOOL, 'production', None, None, ), # 7
)
all_structs.append(AggregationSelector)
AggregationSelector.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.LIST, 'windows', (TType.STRUCT, [Window, None], False), None, ), # 2
)
all_structs.append(JoinPart)
JoinPart.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'groupBy', [GroupBy, None], None, ), # 1
(2, TType.MAP, 'keyMapping', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2
(3, TType.LIST, 'selectors', (TType.STRUCT, [AggregationSelector, None], False), None, ), # 3
(4, TType.STRING, 'prefix', 'UTF8', None, ), # 4
)
all_structs.append(LeftOuterJoin)
LeftOuterJoin.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'left', [Source, None], None, ), # 2
(3, TType.LIST, 'rightParts', (TType.STRUCT, [JoinPart, None], False), None, ), # 3
(4, TType.STRING, 'metadata', 'UTF8', None, ), # 4
(5, TType.BOOL, 'online', None, False, ), # 5
(6, TType.BOOL, 'production', None, False, ), # 6
(7, TType.BOOL, 'frontfill', None, True, ), # 7
)
fix_spec(all_structs)
del all_structs | zipline-ai | /zipline_ai-0.0.19-py3-none-any.whl/zipline/schema/thrift/ttypes.py | ttypes.py |
Zipline BitMEX
==============
BitMEX bundle for `Zipline <https://github.com/quantopian/zipline>`_
**[WARNING]** There is a bug in this repo. It can ingest the data from the BitMEX API to the Zipline folder, but somehow I can't run an algorithm upon it. Any PRs or advice would be appreciated!
Usage
-----
1. Install this package with pip:
::
pip install zipline-bitmex
. You may want to run this command with ``--user`` parameter.
2. Register this package to Zipline by writing following content to
``$HOME/.zipline/extension.py``:
.. code:: python
from zipline.data.bundles import register
from zipline_bitmex import bitmex_bundle
import pandas as pd
start = pd.Timestamp('2019-01-01', tz='utc')
end = pd.Timestamp('2019-01-07', tz='utc')
register(
'bitmex',
bitmex_bundle(['XBTUSD']),
calendar_name='bitmex',
start_session=start,
end_session=end,
minutes_per_day=24*60,
)
3. Ingest the data bundle with:
::
zipline ingest -b bitmex
| zipline-bitmex | /zipline-bitmex-0.1.2.tar.gz/zipline-bitmex-0.1.2/README.rst | README.rst |
from logging import getLogger
from typing import Callable, Dict, Iterator, List, Mapping, NoReturn, Text, Tuple
from requests import get
from zipline.assets import AssetDBWriter
from zipline.data.minute_bars import BcolzMinuteBarWriter
from zipline.data.us_equity_pricing import BcolzDailyBarWriter, SQLiteAdjustmentWriter
from zipline.utils.cache import dataframe_cache
from zipline.utils.calendars import register_calendar, TradingCalendar
import numpy as np
import pandas as pd
from .bitmex_calendar import BitmexCalendar
LOGGER = getLogger()
def _bitmex_rest(operation: Text, params: Dict) -> List:
assert operation[0] == '/'
res = get('https://www.bitmex.com/api/v1' + operation, params=params)
if not res.ok:
raise Exception(res)
res = res.json()
if not isinstance(res, list):
raise Exception(res)
return res
def _get_metadata(sid_map: List[Tuple[int, Text]]) -> pd.DataFrame:
metadata = pd.DataFrame(
np.empty(
len(sid_map),
dtype=[
('symbol', 'str'),
('root_symbol', 'str'),
('asset_name', 'str'),
('expiration_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('tick_size', 'float'),
('multiplier', 'float'),
],
),
)
for sid, symbol in sid_map:
res = _bitmex_rest('/instrument', {'symbol': symbol})
assert len(res) == 1
res = res[0]
metadata.loc[sid, 'symbol'] = symbol
metadata.loc[sid, 'root_symbol'] = res['rootSymbol']
metadata.loc[sid, 'asset_name'] = res['underlying']
metadata.loc[sid, 'expiration_date'] = pd.to_datetime(res['expiry'])
metadata.loc[sid, 'auto_close_date'] = pd.to_datetime(res['settle'])
metadata.loc[sid, 'tick_size'] = res['tickSize']
metadata.loc[sid, 'multiplier'] = res['lotSize']
metadata['exchange'] = 'bitmex'
return metadata
def _get_bars(
sid_map: List[Tuple[int, Text]],
start_session: pd.Timestamp,
end_session: pd.Timestamp,
cache: dataframe_cache,
bin_size: Text,
) -> Iterator[Tuple[int, pd.DataFrame]]:
for sid, symbol in sid_map:
key = symbol + '-' + bin_size
if key not in cache:
cache[key] = pd.DataFrame()
while cache[key].empty or cache[key].index[-1] < end_session:
cursor = start_session if cache[key].empty else cache[key].index[-1]
_res = _bitmex_rest(
'/trade/bucketed',
{
'binSize': bin_size,
'count': 500,
'symbol': symbol,
'startTime': cursor.isoformat(),
'endTime': end_session.isoformat(),
},
)
if not _res:
break
res = pd.DataFrame.from_dict(_res)
res.drop('symbol', axis=1, inplace=True)
res['timestamp'] = res['timestamp'].map(lambda x: pd.to_datetime(x, utc=True))
res.set_index('timestamp', inplace=True)
if not cache[key].empty:
cache[key] = cache[key].drop(index=cache[key].index[-1])
cache[key] = pd.concat([cache[key], res])
yield sid, cache[key]
def _get_minute_bars(
sid_map: List[Tuple[int, Text]],
start_session: pd.Timestamp,
end_session: pd.Timestamp,
cache: dataframe_cache,
) -> Iterator[Tuple[int, pd.DataFrame]]:
return _get_bars(sid_map, start_session, end_session, cache, '1m')
def _get_daily_bars(
sid_map: List[Tuple[int, Text]],
start_session: pd.Timestamp,
end_session: pd.Timestamp,
cache: dataframe_cache,
) -> Iterator[Tuple[int, pd.DataFrame]]:
return _get_bars(sid_map, start_session, end_session, cache, '1d')
def bitmex_bundle(symbols: List[Text]) -> Callable:
def ingest(
environ: Mapping,
asset_db_writer: AssetDBWriter,
minute_bar_writer: BcolzMinuteBarWriter,
daily_bar_writer: BcolzDailyBarWriter,
adjustment_writer: SQLiteAdjustmentWriter,
calendar: TradingCalendar,
start_session: pd.Timestamp,
end_session: pd.Timestamp,
cache: dataframe_cache,
show_progress: bool,
output_dir: Text,
) -> NoReturn:
sid_map = list(zip(range(len(symbols)), symbols))
asset_db_writer.write(
futures=_get_metadata(sid_map),
exchanges=pd.DataFrame(data=[['bitmex', 'UTC']], columns=['exchange', 'timezone']),
)
minute_bar_writer.write(
_get_minute_bars(sid_map, start_session, end_session, cache),
show_progress=show_progress,
)
daily_bar_writer.write(
_get_daily_bars(sid_map, start_session, end_session, cache),
show_progress=show_progress,
)
# adjustment_writer.write()
return ingest
register_calendar('bitmex', BitmexCalendar()) | zipline-bitmex | /zipline-bitmex-0.1.2.tar.gz/zipline-bitmex-0.1.2/zipline_bitmex/bitmex_bundle.py | bitmex_bundle.py |
import argparse
import io
import os
import random
import re
import requests
import string
import sys
from decouple import config
from dotenv import find_dotenv, load_dotenv
from pathlib import Path
from typing import Any, Dict, List, Optional, TextIO
class ZipURL(object):
"""
Zipline URL Object
:param file_url: str: Zipline File Display URL
"""
__slots__ = ['url', 'raw']
def __init__(self, file_url: str):
self.url: str = file_url
self.raw: str = self._get_raw(file_url)
def __repr__(self):
return f'<url={self.url} raw={self.raw}>'
def __str__(self):
return self.url
@staticmethod
def _get_raw(url: str) -> str:
try:
s = url.split('/', 4)
return f"{s[0]}//{s[2]}/r/{s[4]}"
except Exception:
return ''
class Zipline(object):
"""
Zipline Python API
:param base_url: str: Zipline URL
:param kwargs: Zipline Headers
"""
allowed_headers = ['format', 'image_compression_percent', 'expires_at',
'password', 'zws', 'embed', 'max_views', 'uploadtext',
'authorization', 'no_json', 'x_zipline_filename',
'original_name', 'override_domain']
def __init__(self, base_url: str, **kwargs):
self.base_url: str = base_url.rstrip('/')
self._headers: Dict[str, str] = {}
for header, value in kwargs.items():
if header.lower() not in self.allowed_headers:
continue
if value is None:
continue
key = header.replace('_', '-').title()
self._headers[key] = str(value)
def send_file(self, file_name: str, file_object: TextIO,
overrides: Optional[dict] = None) -> ZipURL:
"""
Send File to Zipline
:param file_name: str: Name of File for files tuple
:param file_object: TextIO: File to Upload
:param overrides: dict: Header Overrides
:return: str: File URL
"""
url = self.base_url + '/api/upload'
files = {'file': (file_name, file_object)}
headers = self._headers | overrides if overrides else self._headers
r = requests.post(url, headers=headers, files=files)
r.raise_for_status()
return ZipURL(r.json()['files'][0])
def format_output(filename: str, url: ZipURL) -> str:
"""
Format URL Output
:param filename: str: Original or File Name
:param url: ZipURL: ZipURL to Format
:return: str: Formatted Output
"""
zipline_format = config('ZIPLINE_FORMAT', '{filename}\n{url}\n{raw_url}')
return zipline_format.format(filename=filename, url=url, raw_url=url.raw)
def gen_rand(length: Optional[int] = 4) -> str:
"""
Generate Random Streng at Given length
:param length: int: Length of Random String
:return: str: Random String
"""
length: int = length if not length < 0 else 4
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
def get_default(values: List[str], default: Optional[Any] = None,
cast: Optional[type] = str, pre: Optional[str] = 'ZIPLINE_',
suf: Optional[str] = '') -> Optional[str]:
"""
Get Default Environment Variable from List of values
:param values: list: List of Values to Check
:param default: any: Default Value if None
:param cast: type: Type to Cast Value
:param pre: str: Environment Variable Prefix
:param suf: str: Environment Variable Suffix
:return: str: Environment Variable or None
"""
for value in values:
result = config(f'{pre}{value.upper()}{suf}', '', cast)
if result:
return result
return default
def setup(env_file: Path) -> None:
print('Setting up Environment File...')
url = input('Zipline URL: ').strip()
token = input('Zipline Authorization Token: ').strip()
if not url or not token:
raise ValueError('Missing URL or Token.')
output = f'ZIPLINE_URL={url}\nZIPLINE_TOKEN={token}\n'
embed = input('Enabled Embed? [Yes]/No: ').strip()
if not embed or embed.lower() not in ['n', 'o', 'no', 'noo']:
output += 'ZIPLINE_EMBED=true\n'
expire = input('Default Expire? [Blank for None]: ').strip().lower()
if expire:
match = re.search(r'^(\d+)(?:ms|s|m|h|d|w|y)$', expire)
if not match:
print(f'Warning: invalid expire format: {expire}. See --help')
else:
output += f'ZIPLINE_EXPIRE={expire}\n'
with open(env_file, 'w') as f:
f.write(output)
print(f'Setup Complete. Variables Saved to: {env_file}')
sys.exit(0)
def run() -> None:
zipline_file = '.zipline'
env_file = Path(os.path.expanduser('~')) / zipline_file
dotenv_path = env_file if os.path.isfile(env_file) else find_dotenv(filename=zipline_file)
env = load_dotenv(dotenv_path=dotenv_path)
parser = argparse.ArgumentParser(description='Zipline CLI.')
parser.add_argument('files', metavar='Files', type=str, nargs='*', help='Files to Upload.')
parser.add_argument('-u', '--url', type=str, default=get_default(['url']), help='Zipline URL.')
parser.add_argument('-a', '-t', '--authorization', '--token', type=str,
default=get_default(['token', 'authorization']),
help='Zipline Access Token for Authorization or ZIPLINE_TOKEN.')
parser.add_argument('-e', '-x', '--expires_at', '--expire', type=str, default=get_default(['expire', 'expire_at']),
help='Ex: 1d, 2w. See: https://zipline.diced.tech/docs/guides/upload-options#image-expiration')
parser.add_argument('--embed', action='store_true', default=get_default(['embed'], False, bool),
help='Enable Embeds on Uploads.')
parser.add_argument('-s', '--setup', action='store_true', default=False,
help='Automatic Setup of Environment Variables.')
args = parser.parse_args()
if args.setup:
setup(env_file)
if not env and not args.url and not args.authorization and not os.path.isfile(env_file):
env_file.touch()
print('First Run Detected, Entering Setup.')
setup(env_file)
if not args.url:
parser.print_help()
raise ValueError('Missing URL. Use --setup or specify --url')
if not args.authorization:
parser.print_help()
raise ValueError('Missing Token. Use --setup or specify --token')
if args.expires_at:
args.expires_at = args.expires_at.strip().lower()
match = re.search(r'^(\d+)(?:ms|s|m|h|d|w|y)$', args.expires_at)
if not match:
parser.print_help()
raise ValueError(f'Invalid Expire Format: {args.expires_at}.')
zipline = Zipline(args.url, **vars(args))
if not args.files:
content: str = sys.stdin.read().rstrip('\n') + '\n'
text_f: TextIO = io.StringIO(content)
name = f'{gen_rand(8)}.txt'
url: ZipURL = zipline.send_file(name, text_f)
print(format_output(name, url))
sys.exit(0)
exit_code = 1
for name in args.files:
if not os.path.isfile(name):
print(f'Warning: File Not Found: {name}')
continue
with open(name) as f:
# name, ext = os.path.splitext(os.path.basename(filename))
# ext = f'.{ext}' if ext else ''
# name = f'{name}-{gen_rand(8)}{ext}'
# url: str = zipline.send_file(name, f)
url: ZipURL = zipline.send_file(name, f)
print(format_output(name, url))
exit_code = 0
sys.exit(exit_code)
def main() -> None:
try:
run()
except KeyboardInterrupt:
sys.exit(1)
except Exception as error:
print('\nError: {}'.format(str(error)))
sys.exit(1)
if __name__ == '__main__':
main() | zipline-cli | /zipline-cli-0.1.5.tar.gz/zipline-cli-0.1.5/zipline.py | zipline.py |
[](https://discord.gg/wXy6m2X8wY)
[](https://github.com/cssnr/zipline-cli/issues)
[](https://app.codacy.com/gh/cssnr/zipline-cli/dashboard)
[](https://pypi.org/project/zipline-cli/)
[](https://drone.hosted-domains.com/cssnr/zipline-cli)
[](https://github.com/cssnr/zipline-cli)
# Zipline CLI
Python 3 CLI Uploader for Zipline.
Zipline CLI is currently functional and **Under Active Development**.
Please open a [Feature Request](https://github.com/cssnr/zipline-cli/discussions/new?category=feature-requests)
for new features and submit an [Issue](https://github.com/cssnr/zipline-cli/issues)
for any bugs you find.
* Zipline Docs: [https://zipline.diced.tech/](https://zipline.diced.tech/)
## Table of Contents
* [Quick Start](#quick-start)
* [Install](#install)
* [CLI Usage](#cli-usage)
* [Environment Variables](#environment-variables)
* [Python API Reference](#python-api-reference)
* [Additional Information](#additional-information)
## Quick Start
```bash
python3 -m pip install zipline-cli
zipline --setup
```
## Install
From PyPi using pip:
```bash
python3 -m pip install zipline-cli
```
From GitHub using pip:
```bash
python3 -m pip install git+https://github.com/cssnr/zipline-cli.git
```
From Source using pip:
```bash
git clone https://github.com/cssnr/zipline-cli.git
python3 -m pip install -e zipline-cli
```
From Source using setuptools:
```bash
git clone https://github.com/cssnr/zipline-cli.git
cd zipline-cli
python3 setup.py install
```
### Uninstall
To completely remove from any above install methods:
```bash
python3 -m pip uninstall zipline-cli
```
## CLI Usage
Setup Zipline URL and Token:
```bash
zipline --setup
```
Upload a File:
```bash
zipline test.txt
```
Upload Multiple Files:
```bash
zipline file1.txt file2.txt
```
Create Text File from Input
```bash
cat test.txt | zipline
```
Create Text File from Clipboard
```bash
zipline
# Paste or Type contents, followed by a newline, then Ctrl+D (Ctrl+Z on Windows)
```
## Environment Variables
Environment Variables are stored in the `.zipline` file in your home directory.
* Location: `~/.zipline` or `$HOME/.zipline`
| Variable | Description |
|----------------|------------------------------------------------------------------------------|
| ZIPLINE_URL | URL to your Zipline Instance |
| ZIPLINE_TOKEN | Authorization Token from Zipline |
| ZIPLINE_EMBED | Set this enable Embed on your uploads |
| ZIPLINE_FORMAT | Output Format after upload. Variables: `{filename}`, `{url}` and `{raw_url}` |
| ZIPLINE_EXPIRE | See: https://zipline.diced.tech/docs/guides/upload-options#image-expiration |
See [.zipline.example](.zipline.example) for an example `.zipline` file.
You may override them by exporting the variables in your current environment
or using the corresponding command line arguments. See `-h` for more info.
## Python API Reference
Initialize the class with your Zipline URL.
Everything else is a header passed as a kwarg.
The API does not yet support environment variables.
Zipline Token/Authorization is a header kwarg and can be passed as follows:
```python
from zipline import Zipline
zipline = Zipline('ZIPLINE_URL', authorization='ZIPLINE_TOKEN')
```
Upload a File
```python
from zipline import Zipline
zipline = Zipline('ZIPLINE_URL', authorization='ZIPLINE_TOKEN')
with open('text.txt') as f:
url = zipline.send_file('test.txt', f)
print(url)
```
## Additional Information
Still have questions, concerns, or comments?
* [Feature Requests](https://github.com/cssnr/zipline-cli/discussions/categories/feature-requests)
* [Helpdesk Q&A](https://github.com/cssnr/zipline-cli/discussions/categories/helpdesk-q-a)
* [Discord](https://discord.gg/wXy6m2X8wY)
> Zipline Guide: Hit That Fresh Nar Nar: [youtube.com/watch?v=bJHYo2aGWgE](https://www.youtube.com/watch?v=bJHYo2aGWgE)
| zipline-cli | /zipline-cli-0.1.5.tar.gz/zipline-cli-0.1.5/README.md | README.md |
from zipline.data.bundles import register, yahoo_equities
import requests
import os
from pandas_datareader.data import DataReader
"""
For ingest chinese history day bar from Yahoo
From both Shenzhen And Shanghai Stock Exchange
"""
from .all_stocks import get_all_stocks, get_cache_dir
def get_all_yahoo_stock_names(cache=True):
if cache==False:
print('Get All Stock List.....')
all_stocks = get_all_stocks(cache=cache)
return [full_code(code) for code in all_stocks.index]
def full_code(code):
if int(code[0]) >= 6:
return "%s.SS" % code
else:
return '%s.SZ' % code
def register_cn_bundle_from_yahoo(name, cache=True):
"""
register a new bundle of stocks from chinese market from yahoo
:param name: the name of bundle
:return: register result
"""
symbol_list = get_filtered_symbols(cache)
return register(
name,
yahoo_equities(dict(list(zip(symbol_list, symbol_list)))),
)
def check_code(code):
"""
XXX: used check_data_reader instead
:param code:
:return:
"""
checkurl = r'http://finance.yahoo.com/_finance_doubledown/api/resource/searchassist;gossipConfig=%7B%22url%22%3A%7B%22host%22%3A%22s.yimg.com%22%2C%22path%22%3A%22%2Fxb%2Fv6%2Ffinance%2Fautocomplete%22%2C%22query%22%3A%7B%22appid%22%3A%22yahoo.com%22%2C%22nresults%22%3A10%2C%22output%22%3A%22yjsonp%22%2C%22region%22%3A%22US%22%2C%22lang%22%3A%22en-US%22%7D%2C%22protocol%22%3A%22https%22%7D%2C%22isJSONP%22%3Atrue%2C%22queryKey%22%3A%22query%22%2C%22resultAccessor%22%3A%22ResultSet.Result%22%2C%22suggestionTitleAccessor%22%3A%22symbol%22%2C%22suggestionMeta%22%3A%5B%22symbol%22%2C%22name%22%2C%22exch%22%2C%22type%22%2C%22exchDisp%22%2C%22typeDisp%22%5D%7D;searchTerm={{CODE}}?bkt=3E0%2507canary&dev_info=0&device=desktop&intl=us&lang=en-US&partner=none®ion=US&site=finance&tz=America%2FLos_Angeles&ver=0.101.302&returnMeta=true'
referer = 'http://finance.yahoo.com/'
url = checkurl.replace('{{CODE}}', code)
response = requests.get(url, headers={
'Referer' : 'http://finance.yahoo.com/',
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
})
try:
data = response.json()
items = data['data']['items']
if len(items) > 0:
for item in items:
if item['symbol'].upper() == code.upper():
print('.')
return True
except Exception as e:
print(str(e))
return False
print('x')
return False
def check_data_reader(code):
try:
DataReader(code, 'yahoo')
except:
print('%s ok!' % code)
return False
print('%s not ok!' % code)
return True
def get_filtered_symbols(cache=True):
cache_dir = get_cache_dir()
file_path = os.path.join(cache_dir, 'symbols.txt')
if cache and os.path.isfile(file_path):
with open(file_path, 'r') as f:
content = f.read()
if content:
return content.split("\n")
symbols = get_all_yahoo_stock_names(cache)
if cache==False:
print('Check availablity from Yahoo...')
filtered_symbols = list(filter(check_data_reader, symbols))
print('cache output to %s' % file_path)
with open(file_path, 'w') as f:
f.write("\n".join(filtered_symbols))
print('done!')
return filtered_symbols
def zipline_cn_databundle_update():
print('Start to fetch data and update cache')
get_filtered_symbols(cache=False) | zipline-cn-databundle | /zipline-cn-databundle-0.5.tar.gz/zipline-cn-databundle-0.5/zipline_cn_databundle/yahoo.py | yahoo.py |
import click
import tushare as ts
import pandas as pd
import os
from .squant_source import load_splits_and_dividends, zipline_splits_and_dividends
"""
从tushare获取股票信息
其中....
需要读取通联数据, 请设置 环境变量
ZIPLINE_TL_TOKEN
保存通联数据的token
"""
"""
ZIPLINE_TL_TOKEN = os.environ.get('ZIPLINE_TL_TOKEN')
if not ZIPLINE_TL_TOKEN:
raise Exception("no datayes token in envirionment ZIPLINE_TL_TOKEN, we need this token to fetch ajustments data")
ts.set_token('ZIPLINE_TL_TOKEN')
"""
def tushare_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
metadata, histories, symbol_map = get_basic_info()
# 写入股票基础信息
asset_db_writer.write(metadata)
# 准备写入dailybar
daily_bar_writer.write(get_hist_data(symbol_map, histories, start_session, end_session), show_progress=show_progress)
# 送股,分红数据, 从squant 获取
splits, dividends = zipline_splits_and_dividends(symbol_map)
adjustment_writer.write(
splits=pd.concat(splits, ignore_index=True),
dividends=pd.concat(dividends, ignore_index=True),
)
def get_basic_info(show_progress=True):
# 先获取列表
if show_progress:
click.echo("获取股票基础信息")
ts_symbols = ts.get_stock_basics()
if show_progress:
click.echo("写入股票列表")
symbols = []
histories = {}
# 获取股票数据
i = 0
total = len(ts_symbols)
for index, row in ts_symbols.iterrows():
i = i +1
if i > 10:
break
srow = {}
# 获取历史报价信息
click.echo("正在获取代码%s(%s)的历史行情信息 (%d/%d)" % (index, row['name'], i, total))
histories[index] = ts.get_hist_data(index)
srow['start_date'] = histories[index].index[-1]
srow['end_date'] = histories[index].index[0]
srow['symbol'] = index
srow['asset_name'] = row['name']
symbols.append(srow)
df_symbols = pd.DataFrame(data=symbols).sort_values('symbol')
symbol_map = pd.DataFrame.copy(df_symbols.symbol)
# fix the symbol exchange info
df = df_symbols.apply(func=convert_symbol_series, axis=1)
return df, histories, symbol_map
def symbol_to_exchange(symbol):
isymbol = int(symbol)
if (isymbol>=600000):
return symbol + ".SS", "SSE"
else:
return symbol + ".SZ", "SZSE"
def convert_symbol_series(s):
symbol, e = symbol_to_exchange(s['symbol'])
s['symbol'] = symbol
s['exchange'] = e
return s
def get_hist_data(symbol_map, histories, start_session, end_session):
for sid, index in symbol_map.iteritems():
history = histories[index]
"""
writer needs format with
[index], open, close, high, low, volume
so we do not need to change the format from tushare
but we need resort it
"""
yield sid, history.sort_index()
if __name__ == '__main__':
df_symbols, histories, symbol_map = get_basic_info()
print(df_symbols)
"""
for h,df in histories.items():
print(df)
""" | zipline-cn-databundle | /zipline-cn-databundle-0.5.tar.gz/zipline-cn-databundle-0.5/zipline_cn_databundle/tushare_source.py | tushare_source.py |
from squant.data.stock import file_parser
from squant.zipline.datasource import get_symbol_list
import os
import datetime
from .tdx.reader import TdxReader, TdxFileNotFoundException
import pandas as pd
"""
Squant is a private library that parse the data from our private source
That read data from binary files
via RainX<[email protected]>
ipython 3 only
"""
# 需要设定沪深文件目录
CQCX_SH = os.environ.get("CQCX_SH")
CQCX_SZ = os.environ.get("CQCX_SZ")
TDX_DIR = os.environ.get("TDX_DIR")
if not CQCX_SH or not CQCX_SZ:
raise Exception("need set cqcx file on CQCX_SH CQCX_SZ")
if not os.path.isfile(CQCX_SH) \
or not os.path.isfile(CQCX_SZ):
raise Exception("setting CQCX_SH, CQCX_SZ path is not correct")
if not TDX_DIR:
raise Exception("Please Setting TDX data dir")
CQCX_LIST = (CQCX_SH, CQCX_SZ)
def load_splits_and_dividends():
"""
获取所有除权出息的信息, 根据zipline平台的特点,忽略配股信息
:return:
"""
splits = {}
dividends = {}
for CQCX in CQCX_LIST:
cqcx_data = file_parser.get_cqcx(CQCX.encode("utf-8"))
for row in cqcx_data:
code = str(row['stock']).zfill(6)
# sgVal 送股数,每1000股送股数
if row['sgVal'] != 0:
if code not in splits.keys():
splits[code] = []
splits[code].append({
'effective_date' : int_to_date(row['date']),
'ratio' : 1000 / (1000 + row['sgVal']),
})
if row['pxVal'] != 0:
if code not in dividends.keys():
dividends[code] = []
dividends[code].append({
'amount' : row['pxVal'] / 1000,
'ex_date' : int_to_date(row['date']),
})
return splits, dividends
def zipline_splits_and_dividends(symbol_map):
raw_splits, raw_dividends = load_splits_and_dividends()
splits = []
dividends = []
for sid, code in symbol_map.iteritems():
if code in raw_splits:
split = pd.DataFrame(data=raw_splits[code])
split['sid'] = sid
split.index = split['effective_date'] = pd.DatetimeIndex(split['effective_date'])
splits.append(split)
if code in raw_dividends:
dividend = pd.DataFrame(data = raw_dividends[code])
dividend['sid'] = sid
dividend['record_date'] = dividend['declared_date'] = dividend['pay_date'] = pd.NaT
dividend.index = dividend['ex_date'] = pd.DatetimeIndex(dividend['ex_date'])
dividends.append(dividend)
return splits, dividends
def int_to_date(d):
d = str(d)
return datetime.date(int(d[:4]), int(d[4:6]), int(d[6:]))
def squant_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
tdx_reader = TdxReader(TDX_DIR)
symbol_df = get_symbol_list()
# 只保留未停牌的
symbol_df = symbol_df[symbol_df['status'] == False]
# 由于meta,split,dividend 和 行情数据源不同,所以有可能会不同,所以我们这里统一根据
symbol_map = symbol_df.simplesymbol
# 更新日期信息
def update_start_and_end_date(s):
start_date = start_session.replace(tzinfo=None)
end_date = end_session.replace(tzinfo=None)
if s.start_date < start_date:
s.start_date = start_date
if s.end_date == pd.Timestamp('1900-01-01') or s.end_date is pd.NaT:
s.end_date = end_date
return s
symbol_df = symbol_df.apply(func=update_start_and_end_date, axis=1)
# 写入基础信息
asset_db_writer.write(symbol_df)
# 写入数据文件
daily_bar_writer.write(get_hist_data(symbol_df, symbol_map, tdx_reader, start_session, end_session, calendar),
show_progress=show_progress)
# split and diviends
splits, dividends = zipline_splits_and_dividends(symbol_map)
# hack for tdx data , for tdx source for shenzhen market, we can not get data before 1991-12-23
splits_df = pd.concat(splits, ignore_index=True)
dividends_df = pd.concat(dividends, ignore_index=True)
splits_df= splits_df.loc[splits_df['effective_date'] > start_session]
dividends_df = dividends_df.loc[dividends_df['ex_date'] > start_session]
adjustment_writer.write(
splits=splits_df,
dividends=dividends_df,
)
def get_hist_data(symbol_df, symbol_map, tdx_reader, start_session, end_session, calendar):
for sid, index in symbol_map.iteritems():
exchagne = ''
if symbol_df.loc[sid]['exchange'] == 'SZSE':
exchagne = 'sz'
elif symbol_df.loc[sid]['exchange'] == 'SSE':
exchagne = 'sh'
try:
history = tdx_reader.get_df(index, exchagne)
#print('max-min for %s is %s : %s', (index, history.index[0], history.index[-1]))
# 去除没有报价信息的内容
if history.index[0] > pd.Timestamp((end_session.date())):
continue
except TdxFileNotFoundException as e:
#print('symbol %s file no found, ignore it ' % index)
continue
# history.to_pickle('/tmp/debug.pickle')
#reindex
sessions = calendar.sessions_in_range(start_session, end_session)
history = history.reindex(
sessions.tz_localize(None),
copy=False,
).fillna(0.0)
yield sid, history.sort_index()
pass
if __name__ == '__main__':
import tushare as ts
import pandas as pd
ts_symbols = ts.get_stock_basics()
symbols = []
# 获取股票数据
i = 0
total = len(ts_symbols)
for index, row in ts_symbols.iterrows():
i = i +1
if i > 10:
break
srow = {}
srow['t'] = 1
srow['symbol'] = index
srow['asset_name'] = row['name']
symbols.append(srow)
df_symbols = pd.DataFrame(data=symbols).sort_values('symbol')
symbol_map = pd.DataFrame.copy(df_symbols.symbol)
raw_splits, raw_dividends = load_splits_and_dividends()
splits = []
dividends = []
for sid, code in symbol_map.iteritems():
if code in raw_splits:
split = pd.DataFrame(data=raw_splits[code])
split['sid'] = sid
split.index = split['effective_date'] = pd.DatetimeIndex(split['effective_date'])
splits.append(split)
if code in raw_dividends:
dividend = pd.DataFrame(data = raw_dividends[code])
dividend['sid'] = sid
dividend['record_date'] = dividend['declared_date'] = dividend['pay_date'] = pd.NaT
dividend.index = dividend['ex_date'] = pd.DatetimeIndex(dividend['ex_date'])
dividends.append(dividend)
print(pd.concat(splits, ignore_index=True))
print(pd.concat(dividends, ignore_index=True)) | zipline-cn-databundle | /zipline-cn-databundle-0.5.tar.gz/zipline-cn-databundle-0.5/zipline_cn_databundle/squant_source.py | squant_source.py |
import os
import logbook
import pandas as pd
import pytz
from six import iteritems
from .index_list import get_list
from zipline.utils.paths import (
cache_root,
data_root,
)
import pandas as pd
from cn_stock_holidays.zipline.default_calendar import shsz_calendar
from cn_treasury_curve.data import get_zipline_format
import requests
import shutil
logger = logbook.Logger('Loader')
ONE_HOUR = pd.Timedelta(hours=1)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def get_data_filepath(name):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root()
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
#ignore first date check
return last >= last_date
def load_market_data(trading_day=None, trading_days=None, bm_symbol='000001.SS', trading_day_before=2):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from Yahoo Finance. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to '^GSPC', the Yahoo
ticker for the S&P 500.
trading_day_before : int, optional
Trading day before is 2 default
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
if trading_day is None:
trading_day = shsz_calendar.trading_day
if trading_days is None:
trading_days = shsz_calendar.all_sessions
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
last_date = trading_days[trading_days.get_loc(now, method='ffill') - trading_day_before]
br = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
)
tc = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
)
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
path = get_data_filepath(get_benchmark_filename(symbol))
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.Series.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new benchmark data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n"
"Downloading benchmark data for '{symbol}'.",
start=first_date,
end=last_date,
symbol=symbol,
path=path,
)
try:
symbol_list = get_list().symbol.values
if str(symbol).upper() in symbol_list:
get_url = 'https://raw.githubusercontent.com/rainx/cn_index_benchmark_for_zipline/master/data/%s_benchmark.csv' % str(symbol).upper()
print("fetch data via url : %s " % get_url)
response = requests.get(get_url)
with open(path, 'wb') as fileobj:
fileobj.write(response.content)
print("length of response is : %s" % len(response.content))
data = pd.Series.from_csv(path).tz_localize('UTC')
else:
#logger.exception('your bm_symbol not in existing symbol list')
raise Exception('your bm_symbol not in existing symbol list')
except (OSError, IOError):
logger.exception('failed to cache the new benchmark returns')
raise
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def ensure_treasury_data(bm_symbol, first_date, last_date, now):
"""
Ensure we have treasury data from treasury module associated with
`bm_symbol`.
Parameters
----------
bm_symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
filename = "cn_treasury_curves.csv"
path = get_data_filepath(filename)
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new treasury data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
try:
data = get_zipline_format()
data.to_csv(path)
#reload it and convert to UTC tz
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
except (OSError, IOError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data | zipline-cn-databundle | /zipline-cn-databundle-0.5.tar.gz/zipline-cn-databundle-0.5/zipline_cn_databundle/loader.py | loader.py |
import pandas as pd
from io import StringIO
import click
import os
import sys
JSON_DATA = """
[
{"name": "上证指数", "symbol": "000001.XSHG"},
{"name": "A股指数", "symbol": "000002.XSHG"},
{"name": "B股指数", "symbol": "000003.XSHG"},
{"name": "工业指数", "symbol": "000004.XSHG"},
{"name": "商业指数", "symbol": "000005.XSHG"},
{"name": "地产指数", "symbol": "000006.XSHG"},
{"name": "公用指数", "symbol": "000007.XSHG"},
{"name": "综合指数", "symbol": "000008.XSHG"},
{"name": "上证380", "symbol": "000009.XSHG"},
{"name": "上证180", "symbol": "000010.XSHG"},
{"name": "基金指数", "symbol": "000011.XSHG"},
{"name": "国债指数", "symbol": "000012.XSHG"},
{"name": "上证企业债指数", "symbol": "000013.XSHG"},
{"name": "红利指数", "symbol": "000015.XSHG"},
{"name": "上证50", "symbol": "000016.XSHG"},
{"name": "新综指", "symbol": "000017.XSHG"},
{"name": "180金融", "symbol": "000018.XSHG"},
{"name": "治理指数", "symbol": "000019.XSHG"},
{"name": "中型综指", "symbol": "000020.XSHG"},
{"name": "180治理", "symbol": "000021.XSHG"},
{"name": "上证公司债指数", "symbol": "000022.XSHG"},
{"name": "180基建", "symbol": "000025.XSHG"},
{"name": "180资源", "symbol": "000026.XSHG"},
{"name": "180运输", "symbol": "000027.XSHG"},
{"name": "180成长", "symbol": "000028.XSHG"},
{"name": "180价值", "symbol": "000029.XSHG"},
{"name": "180R成长", "symbol": "000030.XSHG"},
{"name": "180R价值", "symbol": "000031.XSHG"},
{"name": "上证能源", "symbol": "000032.XSHG"},
{"name": "上证材料", "symbol": "000033.XSHG"},
{"name": "上证工业", "symbol": "000034.XSHG"},
{"name": "上证可选", "symbol": "000035.XSHG"},
{"name": "上证消费", "symbol": "000036.XSHG"},
{"name": "上证医药", "symbol": "000037.XSHG"},
{"name": "上证金融", "symbol": "000038.XSHG"},
{"name": "上证信息", "symbol": "000039.XSHG"},
{"name": "上证电信", "symbol": "000040.XSHG"},
{"name": "上证公用", "symbol": "000041.XSHG"},
{"name": "上证央企", "symbol": "000042.XSHG"},
{"name": "超大盘", "symbol": "000043.XSHG"},
{"name": "上证中盘", "symbol": "000044.XSHG"},
{"name": "上证小盘", "symbol": "000045.XSHG"},
{"name": "上证中小", "symbol": "000046.XSHG"},
{"name": "上证全指", "symbol": "000047.XSHG"},
{"name": "责任指数", "symbol": "000048.XSHG"},
{"name": "上证民企", "symbol": "000049.XSHG"},
{"name": "50等权", "symbol": "000050.XSHG"},
{"name": "180等权", "symbol": "000051.XSHG"},
{"name": "50基本", "symbol": "000052.XSHG"},
{"name": "180基本", "symbol": "000053.XSHG"},
{"name": "上证海外", "symbol": "000054.XSHG"},
{"name": "上证地企", "symbol": "000055.XSHG"},
{"name": "上证国企", "symbol": "000056.XSHG"},
{"name": "全指成长", "symbol": "000057.XSHG"},
{"name": "全指价值", "symbol": "000058.XSHG"},
{"name": "全R成长", "symbol": "000059.XSHG"},
{"name": "全R价值", "symbol": "000060.XSHG"},
{"name": "沪企债30", "symbol": "000061.XSHG"},
{"name": "上证沪企", "symbol": "000062.XSHG"},
{"name": "上证周期", "symbol": "000063.XSHG"},
{"name": "非周期", "symbol": "000064.XSHG"},
{"name": "上证龙头", "symbol": "000065.XSHG"},
{"name": "上证商品", "symbol": "000066.XSHG"},
{"name": "上证新兴", "symbol": "000067.XSHG"},
{"name": "上证资源", "symbol": "000068.XSHG"},
{"name": "消费80", "symbol": "000069.XSHG"},
{"name": "能源等权", "symbol": "000070.XSHG"},
{"name": "材料等权", "symbol": "000071.XSHG"},
{"name": "工业等权", "symbol": "000072.XSHG"},
{"name": "可选等权", "symbol": "000073.XSHG"},
{"name": "消费等权", "symbol": "000074.XSHG"},
{"name": "医药等权", "symbol": "000075.XSHG"},
{"name": "金融等权", "symbol": "000076.XSHG"},
{"name": "信息等权", "symbol": "000077.XSHG"},
{"name": "电信等权", "symbol": "000078.XSHG"},
{"name": "公用等权", "symbol": "000079.XSHG"},
{"name": "上证流通", "symbol": "000090.XSHG"},
{"name": "沪财中小", "symbol": "000091.XSHG"},
{"name": "资源50", "symbol": "000092.XSHG"},
{"name": "180分层", "symbol": "000093.XSHG"},
{"name": "上证上游", "symbol": "000094.XSHG"},
{"name": "上证中游", "symbol": "000095.XSHG"},
{"name": "上证下游", "symbol": "000096.XSHG"},
{"name": "高端装备", "symbol": "000097.XSHG"},
{"name": "上证F200", "symbol": "000098.XSHG"},
{"name": "上证F300", "symbol": "000099.XSHG"},
{"name": "上证F500", "symbol": "000100.XSHG"},
{"name": "5年信用", "symbol": "000101.XSHG"},
{"name": "沪投资品", "symbol": "000102.XSHG"},
{"name": "沪消费品", "symbol": "000103.XSHG"},
{"name": "380能源", "symbol": "000104.XSHG"},
{"name": "380材料", "symbol": "000105.XSHG"},
{"name": "380工业", "symbol": "000106.XSHG"},
{"name": "380可选", "symbol": "000107.XSHG"},
{"name": "380消费", "symbol": "000108.XSHG"},
{"name": "380医药", "symbol": "000109.XSHG"},
{"name": "380金融", "symbol": "000110.XSHG"},
{"name": "380信息", "symbol": "000111.XSHG"},
{"name": "380电信", "symbol": "000112.XSHG"},
{"name": "380公用", "symbol": "000113.XSHG"},
{"name": "持续产业", "symbol": "000114.XSHG"},
{"name": "380等权", "symbol": "000115.XSHG"},
{"name": "信用100", "symbol": "000116.XSHG"},
{"name": "380成长", "symbol": "000117.XSHG"},
{"name": "380价值", "symbol": "000118.XSHG"},
{"name": "380R成长", "symbol": "000119.XSHG"},
{"name": "380R价值", "symbol": "000120.XSHG"},
{"name": "医药主题", "symbol": "000121.XSHG"},
{"name": "农业主题", "symbol": "000122.XSHG"},
{"name": "180动态", "symbol": "000123.XSHG"},
{"name": "180稳定", "symbol": "000125.XSHG"},
{"name": "消费50", "symbol": "000126.XSHG"},
{"name": "380基本", "symbol": "000128.XSHG"},
{"name": "180波动", "symbol": "000129.XSHG"},
{"name": "380波动", "symbol": "000130.XSHG"},
{"name": "上证高新", "symbol": "000131.XSHG"},
{"name": "上证100", "symbol": "000132.XSHG"},
{"name": "上证150", "symbol": "000133.XSHG"},
{"name": "上证银行", "symbol": "000134.XSHG"},
{"name": "180高贝", "symbol": "000135.XSHG"},
{"name": "180低贝", "symbol": "000136.XSHG"},
{"name": "380高贝", "symbol": "000137.XSHG"},
{"name": "380低贝", "symbol": "000138.XSHG"},
{"name": "上证转债", "symbol": "000139.XSHG"},
{"name": "380动态", "symbol": "000141.XSHG"},
{"name": "380稳定", "symbol": "000142.XSHG"},
{"name": "优势资源", "symbol": "000145.XSHG"},
{"name": "优势制造", "symbol": "000146.XSHG"},
{"name": "优势消费", "symbol": "000147.XSHG"},
{"name": "消费领先", "symbol": "000148.XSHG"},
{"name": "180红利", "symbol": "000149.XSHG"},
{"name": "380红利", "symbol": "000150.XSHG"},
{"name": "上国红利", "symbol": "000151.XSHG"},
{"name": "上央红利", "symbol": "000152.XSHG"},
{"name": "上民红利", "symbol": "000153.XSHG"},
{"name": "市值百强", "symbol": "000155.XSHG"},
{"name": "上证环保", "symbol": "000158.XSHG"},
{"name": "上证沪股通指数", "symbol": "000159.XSHG"},
{"name": "上证一带一路主题指数", "symbol": "000160.XSHG"},
{"name": "上证中国制造2025主题指数", "symbol": "000161.XSHG"},
{"name": "上证互联网+主题指数", "symbol": "000162.XSHG"},
{"name": "沪深300", "symbol": "000300.XSHG"},
{"name": "资源80", "symbol": "000801.XSHG"},
{"name": "500沪市", "symbol": "000802.XSHG"},
{"name": "300波动", "symbol": "000803.XSHG"},
{"name": "500波动", "symbol": "000804.XSHG"},
{"name": "A股资源", "symbol": "000805.XSHG"},
{"name": "消费服务", "symbol": "000806.XSHG"},
{"name": "食品饮料", "symbol": "000807.XSHG"},
{"name": "医药生物", "symbol": "000808.XSHG"},
{"name": "细分农业", "symbol": "000809.XSHG"},
{"name": "细分能源", "symbol": "000810.XSHG"},
{"name": "细分有色", "symbol": "000811.XSHG"},
{"name": "细分机械", "symbol": "000812.XSHG"},
{"name": "细分化工", "symbol": "000813.XSHG"},
{"name": "细分医药", "symbol": "000814.XSHG"},
{"name": "细分食品", "symbol": "000815.XSHG"},
{"name": "细分地产", "symbol": "000816.XSHG"},
{"name": "兴证海峡", "symbol": "000817.XSHG"},
{"name": "细分金融", "symbol": "000818.XSHG"},
{"name": "有色金属", "symbol": "000819.XSHG"},
{"name": "煤炭指数", "symbol": "000820.XSHG"},
{"name": "300红利", "symbol": "000821.XSHG"},
{"name": "500红利", "symbol": "000822.XSHG"},
{"name": "中证800有色金属指数", "symbol": "000823.XSHG"},
{"name": "国企红利", "symbol": "000824.XSHG"},
{"name": "央企红利", "symbol": "000825.XSHG"},
{"name": "民企红利", "symbol": "000826.XSHG"},
{"name": "中证环保", "symbol": "000827.XSHG"},
{"name": "300高贝", "symbol": "000828.XSHG"},
{"name": "300低贝", "symbol": "000829.XSHG"},
{"name": "500高贝", "symbol": "000830.XSHG"},
{"name": "500低贝", "symbol": "000831.XSHG"},
{"name": "中证转债", "symbol": "000832.XSHG"},
{"name": "中高企债", "symbol": "000833.XSHG"},
{"name": "创业价值", "symbol": "000838.XSHG"},
{"name": "浙企综指", "symbol": "000839.XSHG"},
{"name": "浙江民企", "symbol": "000840.XSHG"},
{"name": "800医药", "symbol": "000841.XSHG"},
{"name": "800等权", "symbol": "000842.XSHG"},
{"name": "300动态", "symbol": "000843.XSHG"},
{"name": "300稳定", "symbol": "000844.XSHG"},
{"name": "ESG100", "symbol": "000846.XSHG"},
{"name": "中证腾安价值100指数", "symbol": "000847.XSHG"},
{"name": "沪深300非银行金融指数", "symbol": "000849.XSHG"},
{"name": "沪深300有色金属指数", "symbol": "000850.XSHG"},
{"name": "中证百度百发策略100指数", "symbol": "000851.XSHG"},
{"name": "中证1000指数", "symbol": "000852.XSHG"},
{"name": "中证申万一带一路主题投资指数", "symbol": "000853.XSHG"},
{"name": "央视财经500指数", "symbol": "000855.XSHG"},
{"name": "小康指数", "symbol": "000901.XSHG"},
{"name": "中证流通", "symbol": "000902.XSHG"},
{"name": "中证100", "symbol": "000903.XSHG"},
{"name": "中证200", "symbol": "000904.XSHG"},
{"name": "中证500", "symbol": "000905.XSHG"},
{"name": "中证800", "symbol": "000906.XSHG"},
{"name": "中证700", "symbol": "000907.XSHG"},
{"name": "300能源", "symbol": "000908.XSHG"},
{"name": "300材料", "symbol": "000909.XSHG"},
{"name": "300工业", "symbol": "000910.XSHG"},
{"name": "300可选", "symbol": "000911.XSHG"},
{"name": "300消费", "symbol": "000912.XSHG"},
{"name": "300医药", "symbol": "000913.XSHG"},
{"name": "300金融", "symbol": "000914.XSHG"},
{"name": "300信息", "symbol": "000915.XSHG"},
{"name": "300电信", "symbol": "000916.XSHG"},
{"name": "300公用", "symbol": "000917.XSHG"},
{"name": "300成长", "symbol": "000918.XSHG"},
{"name": "300价值", "symbol": "000919.XSHG"},
{"name": "300R成长", "symbol": "000920.XSHG"},
{"name": "300R价值", "symbol": "000921.XSHG"},
{"name": "中证红利", "symbol": "000922.XSHG"},
{"name": "公司债", "symbol": "000923.XSHG"},
{"name": "基本面50", "symbol": "000925.XSHG"},
{"name": "中证央企", "symbol": "000926.XSHG"},
{"name": "央企100", "symbol": "000927.XSHG"},
{"name": "中证能源", "symbol": "000928.XSHG"},
{"name": "中证材料", "symbol": "000929.XSHG"},
{"name": "中证工业", "symbol": "000930.XSHG"},
{"name": "中证可选", "symbol": "000931.XSHG"},
{"name": "中证消费", "symbol": "000932.XSHG"},
{"name": "中证医药", "symbol": "000933.XSHG"},
{"name": "中证金融", "symbol": "000934.XSHG"},
{"name": "中证信息", "symbol": "000935.XSHG"},
{"name": "中证电信", "symbol": "000936.XSHG"},
{"name": "中证公用", "symbol": "000937.XSHG"},
{"name": "中证民企", "symbol": "000938.XSHG"},
{"name": "民企200", "symbol": "000939.XSHG"},
{"name": "财富大盘", "symbol": "000940.XSHG"},
{"name": "新能源", "symbol": "000941.XSHG"},
{"name": "内地消费", "symbol": "000942.XSHG"},
{"name": "内地基建", "symbol": "000943.XSHG"},
{"name": "内地资源", "symbol": "000944.XSHG"},
{"name": "内地运输", "symbol": "000945.XSHG"},
{"name": "内地金融", "symbol": "000946.XSHG"},
{"name": "内地银行", "symbol": "000947.XSHG"},
{"name": "内地地产", "symbol": "000948.XSHG"},
{"name": "内地农业", "symbol": "000949.XSHG"},
{"name": "300基建", "symbol": "000950.XSHG"},
{"name": "300银行", "symbol": "000951.XSHG"},
{"name": "300地产", "symbol": "000952.XSHG"},
{"name": "中证地企", "symbol": "000953.XSHG"},
{"name": "地企100", "symbol": "000954.XSHG"},
{"name": "中证国企", "symbol": "000955.XSHG"},
{"name": "国企200", "symbol": "000956.XSHG"},
{"name": "300运输", "symbol": "000957.XSHG"},
{"name": "创业成长", "symbol": "000958.XSHG"},
{"name": "银河99", "symbol": "000959.XSHG"},
{"name": "中证龙头", "symbol": "000960.XSHG"},
{"name": "中证上游", "symbol": "000961.XSHG"},
{"name": "中证中游", "symbol": "000962.XSHG"},
{"name": "中证下游", "symbol": "000963.XSHG"},
{"name": "中证新兴", "symbol": "000964.XSHG"},
{"name": "基本200", "symbol": "000965.XSHG"},
{"name": "基本400", "symbol": "000966.XSHG"},
{"name": "基本600", "symbol": "000967.XSHG"},
{"name": "300周期", "symbol": "000968.XSHG"},
{"name": "300非周", "symbol": "000969.XSHG"},
{"name": "ESG40", "symbol": "000970.XSHG"},
{"name": "等权90", "symbol": "000971.XSHG"},
{"name": "300沪市", "symbol": "000972.XSHG"},
{"name": "技术领先", "symbol": "000973.XSHG"},
{"name": "中证800金融指数", "symbol": "000974.XSHG"},
{"name": "钱江30", "symbol": "000975.XSHG"},
{"name": "新华金牛", "symbol": "000976.XSHG"},
{"name": "内地低碳", "symbol": "000977.XSHG"},
{"name": "医药100", "symbol": "000978.XSHG"},
{"name": "大宗商品", "symbol": "000979.XSHG"},
{"name": "中证超大", "symbol": "000980.XSHG"},
{"name": "300分层", "symbol": "000981.XSHG"},
{"name": "500等权", "symbol": "000982.XSHG"},
{"name": "智能资产", "symbol": "000983.XSHG"},
{"name": "300等权", "symbol": "000984.XSHG"},
{"name": "中证全指", "symbol": "000985.XSHG"},
{"name": "全指能源", "symbol": "000986.XSHG"},
{"name": "全指材料", "symbol": "000987.XSHG"},
{"name": "全指工业", "symbol": "000988.XSHG"},
{"name": "全指可选", "symbol": "000989.XSHG"},
{"name": "全指消费", "symbol": "000990.XSHG"},
{"name": "全指医药", "symbol": "000991.XSHG"},
{"name": "全指金融", "symbol": "000992.XSHG"},
{"name": "全指信息", "symbol": "000993.XSHG"},
{"name": "全指电信", "symbol": "000994.XSHG"},
{"name": "全指公用", "symbol": "000995.XSHG"},
{"name": "领先行业", "symbol": "000996.XSHG"},
{"name": "大消费", "symbol": "000997.XSHG"},
{"name": "中证TMT", "symbol": "000998.XSHG"},
{"name": "证两岸三地500指数", "symbol": "000999.XSHG"},
{"name": "深证成指", "symbol": "399001.XSHE"},
{"name": "深成指R", "symbol": "399002.XSHE"},
{"name": "成份B指", "symbol": "399003.XSHE"},
{"name": "深证100R", "symbol": "399004.XSHE"},
{"name": "中小板指", "symbol": "399005.XSHE"},
{"name": "创业板指", "symbol": "399006.XSHE"},
{"name": "深证300", "symbol": "399007.XSHE"},
{"name": "中小300", "symbol": "399008.XSHE"},
{"name": "深证200", "symbol": "399009.XSHE"},
{"name": "深证700", "symbol": "399010.XSHE"},
{"name": "深证1000", "symbol": "399011.XSHE"},
{"name": "创业300", "symbol": "399012.XSHE"},
{"name": "深市精选", "symbol": "399013.XSHE"},
{"name": "深证中小创新指数", "symbol": "399015.XSHE"},
{"name": "新指数", "symbol": "399100.XSHE"},
{"name": "中小板综", "symbol": "399101.XSHE"},
{"name": "创业板综合指数", "symbol": "399102.XSHE"},
{"name": "乐富指数", "symbol": "399103.XSHE"},
{"name": "深证综指", "symbol": "399106.XSHE"},
{"name": "深证A指", "symbol": "399107.XSHE"},
{"name": "深证B指", "symbol": "399108.XSHE"},
{"name": "农林指数", "symbol": "399231.XSHE"},
{"name": "采矿指数", "symbol": "399232.XSHE"},
{"name": "制造指数", "symbol": "399233.XSHE"},
{"name": "水电指数", "symbol": "399234.XSHE"},
{"name": "建筑指数", "symbol": "399235.XSHE"},
{"name": "批零指数", "symbol": "399236.XSHE"},
{"name": "运输指数", "symbol": "399237.XSHE"},
{"name": "餐饮指数", "symbol": "399238.XSHE"},
{"name": "IT指数", "symbol": "399239.XSHE"},
{"name": "金融指数", "symbol": "399240.XSHE"},
{"name": "地产指数", "symbol": "399241.XSHE"},
{"name": "商务指数", "symbol": "399242.XSHE"},
{"name": "科研指数", "symbol": "399243.XSHE"},
{"name": "公共指数", "symbol": "399244.XSHE"},
{"name": "文化指数", "symbol": "399248.XSHE"},
{"name": "综企指数", "symbol": "399249.XSHE"},
{"name": "深证中高等级信用债指数", "symbol": "399298.XSHE"},
{"name": "深证中低等级信用债指数", "symbol": "399299.XSHE"},
{"name": "沪深300", "symbol": "399300.XSHE"},
{"name": "深信用债", "symbol": "399301.XSHE"},
{"name": "深公司债", "symbol": "399302.XSHE"},
{"name": "国证2000", "symbol": "399303.XSHE"},
{"name": "基金指数", "symbol": "399305.XSHE"},
{"name": "深证ETF", "symbol": "399306.XSHE"},
{"name": "深证转债", "symbol": "399307.XSHE"},
{"name": "国证50", "symbol": "399310.XSHE"},
{"name": "国证1000", "symbol": "399311.XSHE"},
{"name": "国证300", "symbol": "399312.XSHE"},
{"name": "巨潮100", "symbol": "399313.XSHE"},
{"name": "巨潮大盘", "symbol": "399314.XSHE"},
{"name": "巨潮中盘", "symbol": "399315.XSHE"},
{"name": "巨潮小盘", "symbol": "399316.XSHE"},
{"name": "国证A指", "symbol": "399317.XSHE"},
{"name": "巨潮B股指数", "symbol": "399318.XSHE"},
{"name": "资源优势", "symbol": "399319.XSHE"},
{"name": "国证服务", "symbol": "399320.XSHE"},
{"name": "国证红利", "symbol": "399321.XSHE"},
{"name": "国证治理", "symbol": "399322.XSHE"},
{"name": "深证红利", "symbol": "399324.XSHE"},
{"name": "成长40", "symbol": "399326.XSHE"},
{"name": "深证治理", "symbol": "399328.XSHE"},
{"name": "深证100", "symbol": "399330.XSHE"},
{"name": "深证创新", "symbol": "399332.XSHE"},
{"name": "中小板R", "symbol": "399333.XSHE"},
{"name": "深证央企", "symbol": "399335.XSHE"},
{"name": "深证民营", "symbol": "399337.XSHE"},
{"name": "深证科技", "symbol": "399339.XSHE"},
{"name": "深证责任", "symbol": "399341.XSHE"},
{"name": "深证300R", "symbol": "399344.XSHE"},
{"name": "深证成长", "symbol": "399346.XSHE"},
{"name": "深证价值", "symbol": "399348.XSHE"},
{"name": "皖江30", "symbol": "399350.XSHE"},
{"name": "深报指数", "symbol": "399351.XSHE"},
{"name": "深报综指", "symbol": "399352.XSHE"},
{"name": "国证物流", "symbol": "399353.XSHE"},
{"name": "长三角", "symbol": "399355.XSHE"},
{"name": "珠三角", "symbol": "399356.XSHE"},
{"name": "环渤海", "symbol": "399357.XSHE"},
{"name": "泰达指数", "symbol": "399358.XSHE"},
{"name": "国证基建", "symbol": "399359.XSHE"},
{"name": "国证装备", "symbol": "399360.XSHE"},
{"name": "国证商业", "symbol": "399361.XSHE"},
{"name": "国证民营", "symbol": "399362.XSHE"},
{"name": "计算机指", "symbol": "399363.XSHE"},
{"name": "中金消费", "symbol": "399364.XSHE"},
{"name": "国证农业", "symbol": "399365.XSHE"},
{"name": "国证大宗", "symbol": "399366.XSHE"},
{"name": "巨潮地产", "symbol": "399367.XSHE"},
{"name": "国证军工", "symbol": "399368.XSHE"},
{"name": "CBN-兴全", "symbol": "399369.XSHE"},
{"name": "国证成长", "symbol": "399370.XSHE"},
{"name": "国证价值", "symbol": "399371.XSHE"},
{"name": "大盘成长", "symbol": "399372.XSHE"},
{"name": "大盘价值", "symbol": "399373.XSHE"},
{"name": "中盘成长", "symbol": "399374.XSHE"},
{"name": "中盘价值", "symbol": "399375.XSHE"},
{"name": "小盘成长", "symbol": "399376.XSHE"},
{"name": "小盘价值", "symbol": "399377.XSHE"},
{"name": "南方低碳", "symbol": "399378.XSHE"},
{"name": "国证基金", "symbol": "399379.XSHE"},
{"name": "国证ETF", "symbol": "399380.XSHE"},
{"name": "1000能源", "symbol": "399381.XSHE"},
{"name": "1000材料", "symbol": "399382.XSHE"},
{"name": "1000工业", "symbol": "399383.XSHE"},
{"name": "1000可选", "symbol": "399384.XSHE"},
{"name": "1000消费", "symbol": "399385.XSHE"},
{"name": "1000医药", "symbol": "399386.XSHE"},
{"name": "1000金融", "symbol": "399387.XSHE"},
{"name": "1000信息", "symbol": "399388.XSHE"},
{"name": "国证通信", "symbol": "399389.XSHE"},
{"name": "1000公用", "symbol": "399390.XSHE"},
{"name": "投资时钟", "symbol": "399391.XSHE"},
{"name": "国证新兴", "symbol": "399392.XSHE"},
{"name": "国证地产", "symbol": "399393.XSHE"},
{"name": "国证医药", "symbol": "399394.XSHE"},
{"name": "国证有色", "symbol": "399395.XSHE"},
{"name": "国证食品", "symbol": "399396.XSHE"},
{"name": "OCT文化", "symbol": "399397.XSHE"},
{"name": "绩效指数", "symbol": "399398.XSHE"},
{"name": "中经GDP", "symbol": "399399.XSHE"},
{"name": "大中盘", "symbol": "399400.XSHE"},
{"name": "中小盘", "symbol": "399401.XSHE"},
{"name": "周期100", "symbol": "399402.XSHE"},
{"name": "防御100", "symbol": "399403.XSHE"},
{"name": "大盘低波", "symbol": "399404.XSHE"},
{"name": "大盘高贝", "symbol": "399405.XSHE"},
{"name": "中盘低波", "symbol": "399406.XSHE"},
{"name": "中盘高贝", "symbol": "399407.XSHE"},
{"name": "小盘低波", "symbol": "399408.XSHE"},
{"name": "小盘高贝", "symbol": "399409.XSHE"},
{"name": "苏州率先", "symbol": "399410.XSHE"},
{"name": "红利100", "symbol": "399411.XSHE"},
{"name": "国证新能", "symbol": "399412.XSHE"},
{"name": "国证转债", "symbol": "399413.XSHE"},
{"name": "I100", "symbol": "399415.XSHE"},
{"name": "I300", "symbol": "399416.XSHE"},
{"name": "国证新能源汽车指数", "symbol": "399417.XSHE"},
{"name": "国证国家安全指数", "symbol": "399418.XSHE"},
{"name": "国证高铁指数", "symbol": "399419.XSHE"},
{"name": "国证保险证券指数", "symbol": "399420.XSHE"},
{"name": "中关村50指数", "symbol": "399423.XSHE"},
{"name": "国证德高行专利领先指数", "symbol": "399427.XSHE"},
{"name": "国证定向增发指数", "symbol": "399428.XSHE"},
{"name": "新丝路指数", "symbol": "399429.XSHE"},
{"name": "国证银行行业指数", "symbol": "399431.XSHE"},
{"name": "国证汽车与汽车零配件行业指数", "symbol": "399432.XSHE"},
{"name": "国证交通运输行业指数", "symbol": "399433.XSHE"},
{"name": "国证传媒行业指数", "symbol": "399434.XSHE"},
{"name": "国证农牧渔产品行业指数", "symbol": "399435.XSHE"},
{"name": "国证煤炭行业指数", "symbol": "399436.XSHE"},
{"name": "国证证券行业指数", "symbol": "399437.XSHE"},
{"name": "国证电力公用事业行业指数", "symbol": "399438.XSHE"},
{"name": "国证石油天然气行业指数", "symbol": "399439.XSHE"},
{"name": "国证黑色金属行业指数", "symbol": "399440.XSHE"},
{"name": "国证生物医药指数", "symbol": "399441.XSHE"},
{"name": "企债指数", "symbol": "399481.XSHE"},
{"name": "央视50", "symbol": "399550.XSHE"},
{"name": "央视创新", "symbol": "399551.XSHE"},
{"name": "央视成长", "symbol": "399552.XSHE"},
{"name": "央视回报", "symbol": "399553.XSHE"},
{"name": "央视治理", "symbol": "399554.XSHE"},
{"name": "央视责任", "symbol": "399555.XSHE"},
{"name": "央视生态", "symbol": "399556.XSHE"},
{"name": "央视文化", "symbol": "399557.XSHE"},
{"name": "中小成长", "symbol": "399602.XSHE"},
{"name": "中小价值", "symbol": "399604.XSHE"},
{"name": "创业板R", "symbol": "399606.XSHE"},
{"name": "科技100", "symbol": "399608.XSHE"},
{"name": "TMT50", "symbol": "399610.XSHE"},
{"name": "中创100R", "symbol": "399611.XSHE"},
{"name": "中创100", "symbol": "399612.XSHE"},
{"name": "深证能源", "symbol": "399613.XSHE"},
{"name": "深证材料", "symbol": "399614.XSHE"},
{"name": "深证工业", "symbol": "399615.XSHE"},
{"name": "深证可选", "symbol": "399616.XSHE"},
{"name": "深证消费", "symbol": "399617.XSHE"},
{"name": "深证医药", "symbol": "399618.XSHE"},
{"name": "深证金融", "symbol": "399619.XSHE"},
{"name": "深证信息", "symbol": "399620.XSHE"},
{"name": "深证电信", "symbol": "399621.XSHE"},
{"name": "深证公用", "symbol": "399622.XSHE"},
{"name": "中小基础", "symbol": "399623.XSHE"},
{"name": "中创400", "symbol": "399624.XSHE"},
{"name": "中创500", "symbol": "399625.XSHE"},
{"name": "中创成长", "symbol": "399626.XSHE"},
{"name": "中创价值", "symbol": "399627.XSHE"},
{"name": "700成长", "symbol": "399628.XSHE"},
{"name": "700价值", "symbol": "399629.XSHE"},
{"name": "1000成长", "symbol": "399630.XSHE"},
{"name": "1000价值", "symbol": "399631.XSHE"},
{"name": "深100EW", "symbol": "399632.XSHE"},
{"name": "深300EW", "symbol": "399633.XSHE"},
{"name": "中小板EW", "symbol": "399634.XSHE"},
{"name": "创业板EW", "symbol": "399635.XSHE"},
{"name": "深证装备", "symbol": "399636.XSHE"},
{"name": "深证地产", "symbol": "399637.XSHE"},
{"name": "深证环保", "symbol": "399638.XSHE"},
{"name": "深证大宗", "symbol": "399639.XSHE"},
{"name": "创业基础", "symbol": "399640.XSHE"},
{"name": "深证新兴", "symbol": "399641.XSHE"},
{"name": "中小新兴", "symbol": "399642.XSHE"},
{"name": "创业新兴", "symbol": "399643.XSHE"},
{"name": "深证时钟", "symbol": "399644.XSHE"},
{"name": "100低波", "symbol": "399645.XSHE"},
{"name": "深消费50", "symbol": "399646.XSHE"},
{"name": "深医药50", "symbol": "399647.XSHE"},
{"name": "深证GDP", "symbol": "399648.XSHE"},
{"name": "中小红利", "symbol": "399649.XSHE"},
{"name": "中小治理", "symbol": "399650.XSHE"},
{"name": "中小责任", "symbol": "399651.XSHE"},
{"name": "中创高新", "symbol": "399652.XSHE"},
{"name": "深证龙头", "symbol": "399653.XSHE"},
{"name": "深证文化", "symbol": "399654.XSHE"},
{"name": "深证绩效", "symbol": "399655.XSHE"},
{"name": "100绩效", "symbol": "399656.XSHE"},
{"name": "300绩效", "symbol": "399657.XSHE"},
{"name": "中小绩效", "symbol": "399658.XSHE"},
{"name": "深成指EW", "symbol": "399659.XSHE"},
{"name": "中创EW", "symbol": "399660.XSHE"},
{"name": "深证低波", "symbol": "399661.XSHE"},
{"name": "深证高贝", "symbol": "399662.XSHE"},
{"name": "中小低波", "symbol": "399663.XSHE"},
{"name": "中小高贝", "symbol": "399664.XSHE"},
{"name": "中创低波", "symbol": "399665.XSHE"},
{"name": "中创高贝", "symbol": "399666.XSHE"},
{"name": "创业板G", "symbol": "399667.XSHE"},
{"name": "创业板V", "symbol": "399668.XSHE"},
{"name": "深证农业", "symbol": "399669.XSHE"},
{"name": "深周期50", "symbol": "399670.XSHE"},
{"name": "深防御50", "symbol": "399671.XSHE"},
{"name": "深红利50", "symbol": "399672.XSHE"},
{"name": "创业板50", "symbol": "399673.XSHE"},
{"name": "深A医药卫生指数", "symbol": "399674.XSHE"},
{"name": "深A软件与互联网指数", "symbol": "399675.XSHE"},
{"name": "深A医药卫生等权指数", "symbol": "399676.XSHE"},
{"name": "深A软件与互联网等权指数", "symbol": "399677.XSHE"},
{"name": "深证次新股指数", "symbol": "399678.XSHE"},
{"name": "深证200指数", "symbol": "399679.XSHE"},
{"name": "深成能源行业指数", "symbol": "399680.XSHE"},
{"name": "深成原材料行业指数", "symbol": "399681.XSHE"},
{"name": "深成工业行业指数", "symbol": "399682.XSHE"},
{"name": "深成可选消费行业指数", "symbol": "399683.XSHE"},
{"name": "深成主要消费行业指数", "symbol": "399684.XSHE"},
{"name": "深成医药卫生行业指数", "symbol": "399685.XSHE"},
{"name": "深成金融地产行业指数", "symbol": "399686.XSHE"},
{"name": "深成信息技术行业指数", "symbol": "399687.XSHE"},
{"name": "深成电信业务行业指数", "symbol": "399688.XSHE"},
{"name": "深成公用事业行业指数", "symbol": "399689.XSHE"},
{"name": "深证F60", "symbol": "399701.XSHE"},
{"name": "深证F120", "symbol": "399702.XSHE"},
{"name": "深证F200", "symbol": "399703.XSHE"},
{"name": "深证上游", "symbol": "399704.XSHE"},
{"name": "深证中游", "symbol": "399705.XSHE"},
{"name": "深证下游", "symbol": "399706.XSHE"},
{"name": "中证申万证券行业指数", "symbol": "399707.XSHE"},
{"name": "500深市", "symbol": "399802.XSHE"},
{"name": "中证工业4.0指数", "symbol": "399803.XSHE"},
{"name": "中证体育产业指数", "symbol": "399804.XSHE"},
{"name": "中证互联网金融指数", "symbol": "399805.XSHE"},
{"name": "中证环境治理指数", "symbol": "399806.XSHE"},
{"name": "中证高铁产业指数", "symbol": "399807.XSHE"},
{"name": "中证新能源指数", "symbol": "399808.XSHE"},
{"name": "中证方正富邦保险主题指数", "symbol": "399809.XSHE"},
{"name": "中证申万传媒行业投资指数", "symbol": "399810.XSHE"},
{"name": "中证申万电子行业投资指数", "symbol": "399811.XSHE"},
{"name": "中证养老产业指数", "symbol": "399812.XSHE"},
{"name": "中证国防安全指数", "symbol": "399813.XSHE"},
{"name": "中证大农业指数", "symbol": "399814.XSHE"},
{"name": "中证阿拉善生态主题100指数", "symbol": "399817.XSHE"},
{"name": "中证南方小康产业指数", "symbol": "399901.XSHE"},
{"name": "中证100指数", "symbol": "399903.XSHE"},
{"name": "中证中盘200指数", "symbol": "399904.XSHE"},
{"name": "中证500", "symbol": "399905.XSHE"},
{"name": "中证中小盘700指数", "symbol": "399907.XSHE"},
{"name": "沪深300能源指数", "symbol": "399908.XSHE"},
{"name": "沪深300原材料指数", "symbol": "399909.XSHE"},
{"name": "沪深300工业指数", "symbol": "399910.XSHE"},
{"name": "沪深300可选消费指数", "symbol": "399911.XSHE"},
{"name": "沪深300主要消费指数", "symbol": "399912.XSHE"},
{"name": "沪深300医药卫生指数", "symbol": "399913.XSHE"},
{"name": "沪深300金融地产指数", "symbol": "399914.XSHE"},
{"name": "沪深300信息技术指数", "symbol": "399915.XSHE"},
{"name": "沪深300电信业务指数", "symbol": "399916.XSHE"},
{"name": "沪深300公用事业指数", "symbol": "399917.XSHE"},
{"name": "沪深300成长指数", "symbol": "399918.XSHE"},
{"name": "沪深300价值指数", "symbol": "399919.XSHE"},
{"name": "沪深300相对成长指数", "symbol": "399920.XSHE"},
{"name": "中证红利指数", "symbol": "399922.XSHE"},
{"name": "中证锐联基本面50指数", "symbol": "399925.XSHE"},
{"name": "中证中央企业综合指数", "symbol": "399926.XSHE"},
{"name": "中证中央企业100指数", "symbol": "399927.XSHE"},
{"name": "中证能源指数", "symbol": "399928.XSHE"},
{"name": "中证原材料指数", "symbol": "399929.XSHE"},
{"name": "中证工业指数", "symbol": "399930.XSHE"},
{"name": "中证可选消费指数", "symbol": "399931.XSHE"},
{"name": "中证主要消费指数", "symbol": "399932.XSHE"},
{"name": "中证医药卫生指数", "symbol": "399933.XSHE"},
{"name": "中证金融地产指数", "symbol": "399934.XSHE"},
{"name": "中证信息技术指数", "symbol": "399935.XSHE"},
{"name": "中证电信业务指数", "symbol": "399936.XSHE"},
{"name": "中证公用事业指数", "symbol": "399937.XSHE"},
{"name": "中证民营企业综合指数", "symbol": "399938.XSHE"},
{"name": "中证民营企业200指数", "symbol": "399939.XSHE"},
{"name": "中证内地新能源主题指数", "symbol": "399941.XSHE"},
{"name": "中证内地消费主题指数", "symbol": "399942.XSHE"},
{"name": "中证内地基建主题指数", "symbol": "399943.XSHE"},
{"name": "中证内地资源主题指数", "symbol": "399944.XSHE"},
{"name": "中证内地运输主题指数", "symbol": "399945.XSHE"},
{"name": "中证内地金融主题指数", "symbol": "399946.XSHE"},
{"name": "中证内地银行主题指数", "symbol": "399947.XSHE"},
{"name": "中证内地地产主题指数", "symbol": "399948.XSHE"},
{"name": "中证内地农业主题指数", "symbol": "399949.XSHE"},
{"name": "沪深300基建主题指数", "symbol": "399950.XSHE"},
{"name": "沪深300银行指数", "symbol": "399951.XSHE"},
{"name": "沪深300地产指数", "symbol": "399952.XSHE"},
{"name": "中证地方国有企业综合指数", "symbol": "399953.XSHE"},
{"name": "中证地方国有企业100指数", "symbol": "399954.XSHE"},
{"name": "中证国有企业200指数", "symbol": "399956.XSHE"},
{"name": "沪深300运输指数", "symbol": "399957.XSHE"},
{"name": "中证创业成长指数", "symbol": "399958.XSHE"},
{"name": "军工指数", "symbol": "399959.XSHE"},
{"name": "中证龙头企业指数", "symbol": "399960.XSHE"},
{"name": "中证上游资源产业指数", "symbol": "399961.XSHE"},
{"name": "中证中游制造产业指数", "symbol": "399962.XSHE"},
{"name": "中证下游消费与服务产业指数", "symbol": "399963.XSHE"},
{"name": "中证新兴产业指数", "symbol": "399964.XSHE"},
{"name": "800地产", "symbol": "399965.XSHE"},
{"name": "800非银", "symbol": "399966.XSHE"},
{"name": "中证军工", "symbol": "399967.XSHE"},
{"name": "沪深300周期行业指数", "symbol": "399968.XSHE"},
{"name": "沪深300非周期行业指数", "symbol": "399969.XSHE"},
{"name": "中证移动互联网指数", "symbol": "399970.XSHE"},
{"name": "中证传媒指数", "symbol": "399971.XSHE"},
{"name": "300深市", "symbol": "399972.XSHE"},
{"name": "中证国防指数", "symbol": "399973.XSHE"},
{"name": "中证国有企业改革指数", "symbol": "399974.XSHE"},
{"name": "中证全指证券公司指数(四级行业)", "symbol": "399975.XSHE"},
{"name": "中证新能源汽车指数", "symbol": "399976.XSHE"},
{"name": "中证内地低碳经济主题指数", "symbol": "399977.XSHE"},
{"name": "中证医药100指数", "symbol": "399978.XSHE"},
{"name": "中证大宗商品股票指数", "symbol": "399979.XSHE"},
{"name": "中证超级大盘指数", "symbol": "399980.XSHE"},
{"name": "中证500等权重指数", "symbol": "399982.XSHE"},
{"name": "沪深300地产等权重指数", "symbol": "399983.XSHE"},
{"name": "中证银行指数", "symbol": "399986.XSHE"},
{"name": "中证酒指数", "symbol": "399987.XSHE"},
{"name": "中证医疗指数", "symbol": "399989.XSHE"},
{"name": "中证煤炭等权指数", "symbol": "399990.XSHE"},
{"name": "中证一带一路主题指数", "symbol": "399991.XSHE"},
{"name": "中证万得并购重组指数", "symbol": "399992.XSHE"},
{"name": "中证万得生物科技指数", "symbol": "399993.XSHE"},
{"name": "中证信息安全主题指数", "symbol": "399994.XSHE"},
{"name": "中证基建工程指数", "symbol": "399995.XSHE"},
{"name": "中证智能家居指数", "symbol": "399996.XSHE"},
{"name": "中证白酒指数", "symbol": "399997.XSHE"},
{"name": "中证煤炭指数", "symbol": "399998.XSHE"}
]
"""
def get_list(style='yahoo'):
df = pd.read_json(StringIO(JSON_DATA))
if style == 'yahoo':
df.symbol = df.symbol.apply(lambda x: str(x).replace('XSHG', 'SS').replace('XSHE', 'SZ'))
return df
@click.command()
@click.argument("outputdir")
def gen_data(outputdir):
"""
gen all ticker data result to output dir
"""
if not os.path.isdir(outputdir):
raise Exception("Please Provide a valid output dir path")
df = get_list('yahoo')
for s in df.symbol:
outputfile = os.path.join(outputdir, '%s_benchmark.csv' % str(s).upper())
"""
zhikuang-squant is a private command of RainX
"""
click.echo("generate file or %s " % s)
cmd = 'zhikuang-squant gen_zipline_benchmark_file -s %s -o %s' % (s, outputfile)
# click.echo(cmd)
os.system(cmd)
click.echo("done! in path : %s" % outputfile)
if __name__ == "__main__":
gen_data() | zipline-cn-databundle | /zipline-cn-databundle-0.5.tar.gz/zipline-cn-databundle-0.5/zipline_cn_databundle/index_list/__init__.py | __init__.py |
import pandas as pd
import os
import numpy as np
import struct
"""
读取通达信数据
"""
class TdxFileNotFoundException(Exception):
pass
class TdxReader:
def __init__(self, vipdoc_path):
self.vipdoc_path = vipdoc_path
def get_kline_by_code(self, code, exchange):
fname = os.path.join(self.vipdoc_path, exchange)
fname = os.path.join(fname, 'lday')
fname = os.path.join(fname, '%s%s.day' % (exchange, code))
return self.parse_data_by_file(fname)
def parse_data_by_file(self, fname):
if not os.path.isfile(fname):
raise TdxFileNotFoundException('no tdx kline data, pleaes check path %s', fname)
with open(fname, 'rb') as f:
content = f.read()
return self.unpack_records('<iiiiifii', content)
return []
def unpack_records(self, format, data):
record_struct = struct.Struct(format)
return (record_struct.unpack_from(data, offset)
for offset in range(0, len(data), record_struct.size))
def get_df(self, code, exchange):
data = [self._df_convert(row) for row in self.get_kline_by_code(code, exchange)]
df = pd.DataFrame(data=data, columns=('date', 'open', 'high', 'low', 'close', 'amount', 'volume'))
df.index = pd.to_datetime(df.date)
return df[['open', 'high', 'low', 'close', 'volume']]
def _df_convert(self, row):
t_date = str(row[0])
datestr = t_date[:4] + "-" + t_date[4:6] + "-" + t_date[6:]
new_row = (
datestr,
row[1] * 0.01, # * 0.01 * 1000 , zipline need 1000 times to original price
row[2] * 0.01,
row[3] * 0.01,
row[4] * 0.01,
row[5],
row[6]
)
return new_row
if __name__ == '__main__':
tdx_reader = TdxReader('/Volumes/more/data/vipdoc/')
try:
#for row in tdx_reader.parse_data_by_file('/Volumes/more/data/vipdoc/sh/lday/sh600000.day'):
# print(row)
for row in tdx_reader.get_kline_by_code('600000', 'sh'):
print(row)
except TdxFileNotFoundException as e:
pass
print(tdx_reader.get_df('600000', 'sh')) | zipline-cn-databundle | /zipline-cn-databundle-0.5.tar.gz/zipline-cn-databundle-0.5/zipline_cn_databundle/tdx/reader.py | reader.py |
import errno
import os
import click
import logbook
import pandas as pd
import zipline
from zipline.data import bundles as bundles_module
from zipline.utils.calendar_utils import get_calendar
from zipline.utils.compat import wraps
from zipline.utils.cli import Date, Timestamp
from zipline.utils.run_algo import _run, BenchmarkSpec, load_extensions
from zipline.extensions import create_args
try:
__IPYTHON__
except NameError:
__IPYTHON__ = False
@click.group()
@click.option(
"-e",
"--extension",
multiple=True,
help="File or module path to a zipline extension to load.",
)
@click.option(
"--strict-extensions/--non-strict-extensions",
is_flag=True,
help="If --strict-extensions is passed then zipline will not "
"run if it cannot load all of the specified extensions. "
"If this is not passed or --non-strict-extensions is passed "
"then the failure will be logged but execution will continue.",
)
@click.option(
"--default-extension/--no-default-extension",
is_flag=True,
default=True,
help="Don't load the default zipline extension.py file in $ZIPLINE_HOME.",
)
@click.option(
"-x",
multiple=True,
help="Any custom command line arguments to define, in key=value form.",
)
@click.pass_context
def main(ctx, extension, strict_extensions, default_extension, x):
"""Top level zipline entry point."""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
def extract_option_object(option):
"""Convert a click.option call into a click.Option object.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
option_object : click.Option
The option object that this decorator will create.
"""
@option
def opt():
pass
return opt.__click_params__[0]
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
DEFAULT_BUNDLE = "quandl"
@main.command()
@click.option(
"-f",
"--algofile",
default=None,
type=click.File("r"),
help="The file that contains the algorithm to run.",
)
@click.option(
"-t",
"--algotext",
help="The algorithm script to run.",
)
@click.option(
"-D",
"--define",
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be any "
"python expression. These are evaluated in order so they may refer "
"to previously defined names.",
)
@click.option(
"--data-frequency",
type=click.Choice({"daily", "minute"}),
default="daily",
show_default=True,
help="The data frequency of the simulation.",
)
@click.option(
"--capital-base",
type=float,
default=10e6,
show_default=True,
help="The starting capital for the simulation.",
)
@click.option(
"-b",
"--bundle",
default=DEFAULT_BUNDLE,
metavar="BUNDLE-NAME",
show_default=True,
help="The data bundle to use for the simulation.",
)
@click.option(
"--bundle-timestamp",
type=Timestamp(),
default=pd.Timestamp.utcnow(),
show_default=False,
help="The date to lookup data on or before.\n" "[default: <current-time>]",
)
@click.option(
"-bf",
"--benchmark-file",
default=None,
type=click.Path(exists=True, dir_okay=False, readable=True, path_type=str),
help="The csv file that contains the benchmark returns",
)
@click.option(
"--benchmark-symbol",
default=None,
type=click.STRING,
help="The symbol of the instrument to be used as a benchmark "
"(should exist in the ingested bundle)",
)
@click.option(
"--benchmark-sid",
default=None,
type=int,
help="The sid of the instrument to be used as a benchmark "
"(should exist in the ingested bundle)",
)
@click.option(
"--no-benchmark",
is_flag=True,
default=False,
help="If passed, use a benchmark of zero returns.",
)
@click.option(
"-s",
"--start",
type=Date(tz="utc", as_timestamp=True),
help="The start date of the simulation.",
)
@click.option(
"-e",
"--end",
type=Date(tz="utc", as_timestamp=True),
help="The end date of the simulation.",
)
@click.option(
"-o",
"--output",
default="-",
metavar="FILENAME",
show_default=True,
help="The location to write the perf data. If this is '-' the perf will"
" be written to stdout.",
)
@click.option(
"--trading-calendar",
metavar="TRADING-CALENDAR",
default="XNYS",
help="The calendar you want to use e.g. XLON. XNYS is the default.",
)
@click.option(
"--print-algo/--no-print-algo",
is_flag=True,
default=False,
help="Print the algorithm to stdout.",
)
@click.option(
"--metrics-set",
default="default",
help="The metrics set to use. New metrics sets may be registered in your"
" extension.py.",
)
@click.option(
"--blotter",
default="default",
help="The blotter to use.",
show_default=True,
)
@ipython_only(
click.option(
"--local-namespace/--no-local-namespace",
is_flag=True,
default=None,
help="Should the algorithm methods be " "resolved in the local namespace.",
)
)
@click.pass_context
def run(
ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
benchmark_file,
benchmark_symbol,
benchmark_sid,
no_benchmark,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
blotter,
):
"""Run a backtest for the given algorithm."""
# check that the start and end dates are passed correctly
if start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'",
)
if start is None:
ctx.fail("must specify a start date with '-s' / '--start'")
if end is None:
ctx.fail("must specify an end date with '-e' / '--end'")
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / "
"'--algofile' or"
" '-t' / '--algotext'",
)
trading_calendar = get_calendar(trading_calendar)
benchmark_spec = BenchmarkSpec.from_cli_params(
no_benchmark=no_benchmark,
benchmark_sid=benchmark_sid,
benchmark_symbol=benchmark_symbol,
benchmark_file=benchmark_file,
)
return _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
trading_calendar=trading_calendar,
print_algo=print_algo,
metrics_set=metrics_set,
local_namespace=local_namespace,
environ=os.environ,
blotter=blotter,
benchmark_spec=benchmark_spec,
custom_loader=None,
)
def zipline_magic(line, cell=None):
"""The zipline IPython cell magic."""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
"--algotext",
cell,
"--output",
os.devnull, # don't write the results by default
]
+ (
[
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
"--algotext",
"",
"--local-namespace",
]
if cell is None
else []
)
+ line.split(),
"%s%%zipline" % ((cell or "") and "%"),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError("main returned non-zero status code: %d" % e.code)
@main.command()
@click.option(
"-b",
"--bundle",
default=DEFAULT_BUNDLE,
metavar="BUNDLE-NAME",
show_default=True,
help="The data bundle to ingest.",
)
@click.option(
"--assets-version",
type=int,
multiple=True,
help="Version of the assets db to which to downgrade.",
)
@click.option(
"--show-progress/--no-show-progress",
default=True,
help="Print progress information to the terminal.",
)
def ingest(bundle, assets_version, show_progress):
"""Ingest the data for the given bundle."""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
)
@main.command()
@click.option(
"-b",
"--bundle",
default=DEFAULT_BUNDLE,
metavar="BUNDLE-NAME",
show_default=True,
help="The data bundle to clean.",
)
@click.option(
"-e",
"--before",
type=Timestamp(),
help="Clear all data before TIMESTAMP."
" This may not be passed with -k / --keep-last",
)
@click.option(
"-a",
"--after",
type=Timestamp(),
help="Clear all data after TIMESTAMP"
" This may not be passed with -k / --keep-last",
)
@click.option(
"-k",
"--keep-last",
type=int,
metavar="N",
help="Clear all but the last N downloads."
" This may not be passed with -e / --before or -a / --after",
)
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command."""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
@main.command()
def bundles():
"""List all of the available data bundles."""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith("."):
# hide the test data
continue
try:
ingestions = list(map(str, bundles_module.ingestions_for_bundle(bundle)))
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp))
if __name__ == "__main__":
main() | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/__main__.py | __main__.py |
import re
from toolz import curry
def create_args(args, root):
"""
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
"""
extension_args = {}
for arg in args:
parse_extension_arg(arg, extension_args)
for name in sorted(extension_args, key=len):
path = name.split(".")
update_namespace(root, path, extension_args[name])
def parse_extension_arg(arg, arg_dict):
"""
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
"""
match = re.match(r"^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$", arg)
if match is None:
raise ValueError(
"invalid extension argument '%s', must be in key=value form" % arg
)
name = match.group(1)
value = match.group(4)
arg_dict[name] = value
def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
"""
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), str):
raise ValueError(
"Conflicting assignments at namespace" " level '%s'" % path[0]
)
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name)
class Namespace(object):
"""
A placeholder object representing a namespace level
"""
class Registry(object):
"""
Responsible for managing all instances of custom subclasses of a
given abstract base class - only one instance needs to be created
per abstract base class, and should be created through the
create_registry function/decorator. All management methods
for a given base class can be called through the global wrapper functions
rather than through the object instance itself.
Parameters
----------
interface : type
The abstract base class to manage.
"""
def __init__(self, interface):
self.interface = interface
self._factories = {}
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r"
% (self.interface.__name__, name, sorted(self._factories)),
)
def is_registered(self, name):
"""Check whether we have a factory registered under ``name``."""
return name in self._factories
@curry
def register(self, name, factory):
if self.is_registered(name):
raise ValueError(
"%s factory with name %r is already registered"
% (self.interface.__name__, name)
)
self._factories[name] = factory
return factory
def unregister(self, name):
try:
del self._factories[name]
except KeyError:
raise ValueError(
"%s factory %r was not already registered"
% (self.interface.__name__, name)
)
def clear(self):
self._factories.clear()
# Public wrapper methods for Registry:
def get_registry(interface):
"""
Getter method for retrieving the registry
instance for a given extendable type
Parameters
----------
interface : type
extendable type (base class)
Returns
-------
manager : Registry
The corresponding registry
"""
try:
return custom_types[interface]
except KeyError:
raise ValueError("class specified is not an extendable type")
def load(interface, name):
"""
Retrieves a custom class whose name is given.
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the class to be retrieved.
Returns
-------
obj : object
An instance of the desired class.
"""
return get_registry(interface).load(name)
@curry
def register(interface, name, custom_class):
"""
Registers a class for retrieval by the load method
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the subclass
custom_class : type
The class to register, which must be a subclass of the
abstract base class in self.dtype
"""
return get_registry(interface).register(name, custom_class)
def unregister(interface, name):
"""
If a class is registered with the given name,
it is unregistered.
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the class to be unregistered.
"""
get_registry(interface).unregister(name)
def clear(interface):
"""
Unregisters all current registered classes
Parameters
----------
interface : type
The base class for which to perform this operation
"""
get_registry(interface).clear()
def create_registry(interface):
"""
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
"""
if interface in custom_types:
raise ValueError(
"there is already a Registry instance " "for the specified type"
)
custom_types[interface] = Registry(interface)
return interface
extensible = create_registry
# A global dictionary for storing instances of Registry:
custom_types = {} | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/extensions.py | extensions.py |
from textwrap import dedent
from zipline.utils.memoize import lazyval
class ZiplineError(Exception):
msg = None
def __init__(self, **kwargs):
self.kwargs = kwargs
@lazyval
def message(self):
return str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class NoTradeDataAvailable(ZiplineError):
pass
class NoTradeDataAvailableTooEarly(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It started trading on {start_dt}."
class NoTradeDataAvailableTooLate(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It stopped trading on {end_dt}."
class BenchmarkAssetNotAvailableTooEarly(NoTradeDataAvailableTooEarly):
pass
class BenchmarkAssetNotAvailableTooLate(NoTradeDataAvailableTooLate):
pass
class InvalidBenchmarkAsset(ZiplineError):
msg = """
{sid} cannot be used as the benchmark because it has a stock \
dividend on {dt}. Choose another asset to use as the benchmark.
""".strip()
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the set_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to set slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class IncompatibleSlippageModel(ZiplineError):
"""
Raised if a user tries to set a futures slippage model for equities or vice
versa.
"""
msg = """
You attempted to set an incompatible slippage model for {asset_type}. \
The slippage model '{given_model}' only supports {supported_asset_types}.
""".strip()
class SetSlippagePostInit(ZiplineError):
# Raised if a users script calls set_slippage magic
# after the initialize method has returned.
msg = """
You attempted to set slippage outside of `initialize`. \
You may only call 'set_slippage' in your initialize method.
""".strip()
class SetCancelPolicyPostInit(ZiplineError):
# Raised if a users script calls set_cancel_policy
# after the initialize method has returned.
msg = """
You attempted to set the cancel policy outside of `initialize`. \
You may only call 'set_cancel_policy' in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the set_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to set commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class IncompatibleCommissionModel(ZiplineError):
"""
Raised if a user tries to set a futures commission model for equities or
vice versa.
"""
msg = """
You attempted to set an incompatible commission model for {asset_type}. \
The commission model '{given_model}' only supports {supported_asset_types}.
""".strip()
class UnsupportedCancelPolicy(ZiplineError):
"""
Raised if a user script calls set_cancel_policy with an object that isn't
a CancelPolicy.
"""
msg = """
You attempted to set the cancel policy with an unsupported class. Please use
an instance of CancelPolicy.
""".strip()
class SetCommissionPostInit(ZiplineError):
"""
Raised if a users script calls set_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call 'set_commission' in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class CannotOrderDelistedAsset(ZiplineError):
"""
Raised if an order is for a delisted asset.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class SetBenchmarkOutsideInitialize(ZiplineError):
"""
Raised if set_benchmark is called outside initialize()
"""
msg = "'set_benchmark' can only be called within initialize function."
class ZeroCapitalError(ZiplineError):
"""
Raised if initial capital is set at or below zero
"""
msg = "initial capital base must be greater than zero"
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class OrderInBeforeTradingStart(ZiplineError):
"""
Raised when an algorithm calls an order method in before_trading_start.
"""
msg = "Cannot place orders inside before_trading_start."
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to specify when the date symbol-lookup
should be valid.
Possible options: {options}
""".strip()
class MultipleSymbolsFoundForFuzzySymbol(MultipleSymbolsFound):
"""
Raised when a fuzzy symbol lookup is not resolvable without additional
information.
"""
msg = dedent(
"""\
Multiple symbols were found fuzzy matching the name '{symbol}'. Use
the as_of_date and/or country_code arguments to specify the date
and country for the symbol-lookup.
Possible options: {options}
"""
)
class SameSymbolUsedAcrossCountries(MultipleSymbolsFound):
"""
Raised when a symbol() call contains a symbol that is used in more than
one country and is thus not resolvable without a country_code.
"""
msg = dedent(
"""\
The symbol '{symbol}' is used in more than one country. Use the
country_code argument to specify the country.
Possible options by country: {options}
"""
)
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class ValueNotFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value does not exist for the specified mapping type.
"""
msg = """
Value '{value}' was not found for field '{field}'.
""".strip()
class MultipleValuesFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value that changed over time for the specified field and is
thus not resolvable without additional information provided via
as_of_date.
"""
msg = """
Multiple occurrences of the value '{value}' found for field '{field}'.
Use the 'as_of_date' or 'country_code' argument to specify when or where the
lookup should be valid.
Possible options: {options}
""".strip()
class NoValueForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a sid that
does not have a value for the specified mapping type.
"""
msg = """
No '{field}' value found for sid '{sid}'.
""".strip()
class MultipleValuesFoundForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a value that
changed over time for the specified field and is thus not resolvable
without additional information provided via as_of_date.
"""
msg = """
Multiple '{field}' values found for sid '{sid}'. Use the as_of_date' argument
to specify when the lookup should be valid.
Possible options: {options}
""".strip()
class SidsNotFound(ZiplineError):
"""
Raised when a retrieve_asset() or retrieve_all() call contains a
non-existent sid.
"""
@lazyval
def plural(self):
return len(self.sids) > 1
@lazyval
def sids(self):
return self.kwargs["sids"]
@lazyval
def msg(self):
if self.plural:
return "No assets found for sids: {sids}."
return "No asset found for sid: {sids[0]}."
class EquitiesNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_equities` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No equities found for sids: {sids}."
return "No equity found for sid: {sids[0]}."
class FutureContractsNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_futures_contracts` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No future contracts found for sids: {sids}."
return "No future contract found for sid: {sids[0]}."
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = ("Expected a window_length greater than 0, got {window_length}.").strip()
class NonWindowSafeInput(ZiplineError):
"""
Raised when a Pipeline API term that is not deemed window safe is specified
as an input to another windowed term.
This is an error because it's generally not safe to compose windowed
functions on split/dividend adjusted data.
"""
msg = "Can't compute windowed expression {parent} with " "windowed input {child}."
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class NonPipelineInputs(ZiplineError):
"""
Raised when a non-pipeline object is passed as input to a ComputableTerm
"""
def __init__(self, term, inputs):
self.term = term
self.inputs = inputs
def __str__(self):
return (
"Unexpected input types in {}. "
"Inputs to Pipeline expressions must be Filters, Factors, "
"Classifiers, or BoundColumns.\n"
"Got the following type(s) instead: {}".format(
type(self.term).__name__,
sorted(set(map(type, self.inputs)), key=lambda t: t.__name__),
)
)
class TermOutputsEmpty(ZiplineError):
"""
Raised if a user attempts to construct a term with an empty outputs list.
"""
msg = "{termname} requires at least one output when passed an outputs " "argument."
class InvalidOutputName(ZiplineError):
"""
Raised if a term's output names conflict with any of its attributes.
"""
msg = (
"{output_name!r} cannot be used as an output name for {termname}. "
"Output names cannot start with an underscore or be contained in the "
"following list: {disallowed_names}."
)
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying window
length and that term does not have a class-level default window length.
"""
msg = "{termname} requires a window_length, but no window_length was passed."
class InvalidTermParams(ZiplineError):
"""
Raised if a user attempts to construct a Term using ParameterizedTermMixin
without specifying a `params` list in the class body.
"""
msg = (
"Expected a list of strings as a class-level attribute for "
"{termname}.params, but got {value} instead."
)
class DTypeNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying dtype and
that term does not have class-level default dtype.
"""
msg = "{termname} requires a dtype, but no dtype was passed."
class NotDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that isn't a numpy
dtype object.
"""
msg = (
"{termname} expected a numpy dtype "
"object for a dtype, but got {dtype} instead."
)
class UnsupportedDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that's not
supported.
"""
msg = (
"Failed to construct {termname}.\n"
"Pipeline terms of dtype {dtype} are not yet supported."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and {upper_bound}, and min "
"must be less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = "Unknown ranking method: '{method}'. " "`method` must be one of {choices}"
class AttachPipelineAfterInitialize(ZiplineError):
"""
Raised when a user tries to call add_pipeline outside of initialize.
"""
msg = (
"Attempted to attach a pipeline after initialize(). "
"attach_pipeline() can only be called during initialize."
)
class PipelineOutputDuringInitialize(ZiplineError):
"""
Raised when a user tries to call `pipeline_output` during initialize.
"""
msg = (
"Attempted to call pipeline_output() during initialize. "
"pipeline_output() can only be called once initialize has completed."
)
class NoSuchPipeline(ZiplineError, KeyError):
"""
Raised when a user tries to access a non-existent pipeline by name.
"""
msg = (
"No pipeline named '{name}' exists. Valid pipeline names are {valid}. "
"Did you forget to call attach_pipeline()?"
)
class DuplicatePipelineName(ZiplineError):
"""
Raised when a user tries to attach a pipeline with a name that already
exists for another attached pipeline.
"""
msg = (
"Attempted to attach pipeline named {name!r}, but the name already "
"exists for another pipeline. Please use a different name for this "
"pipeline."
)
class UnsupportedDataType(ZiplineError):
"""
Raised by CustomFactors with unsupported dtypes.
"""
def __init__(self, hint="", **kwargs):
if hint:
hint = " " + hint
kwargs["hint"] = hint
super(UnsupportedDataType, self).__init__(**kwargs)
msg = "{typename} instances with dtype {dtype} are not supported.{hint}"
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = "{msg}"
@classmethod
def from_lookback_window(
cls, initial_message, first_date, lookback_start, lookback_length
):
return cls(
msg=dedent(
"""
{initial_message}
lookback window started at {lookback_start}
earliest known date was {first_date}
{lookback_length} extra rows of data were required
"""
).format(
initial_message=initial_message,
first_date=first_date,
lookback_start=lookback_start,
lookback_length=lookback_length,
)
)
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = (
"The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object."
)
class AssetDBVersionError(ZiplineError):
"""
Raised by an AssetDBWriter or AssetFinder if the version number in the
versions table does not match the ASSET_DB_VERSION in asset_writer.py.
"""
msg = (
"The existing Asset database has an incorrect version: {db_version}. "
"Expected version: {expected_version}. Try rebuilding your asset "
"database or updating your version of Zipline."
)
class AssetDBImpossibleDowngrade(ZiplineError):
msg = (
"The existing Asset database is version: {db_version} which is lower "
"than the desired downgrade version: {desired_version}."
)
class HistoryWindowStartsBeforeData(ZiplineError):
msg = (
"History window extends before {first_trading_day}. To use this "
"history window, start the backtest on or after {suggested_start_day}."
)
class NonExistentAssetInTimeFrame(ZiplineError):
msg = (
"The target asset '{asset}' does not exist for the entire timeframe "
"between {start_date} and {end_date}."
)
class InvalidCalendarName(ZiplineError):
"""
Raised when a calendar with an invalid name is requested.
"""
msg = "The requested TradingCalendar, {calendar_name}, does not exist."
class CalendarNameCollision(ZiplineError):
"""
Raised when the static calendar registry already has a calendar with a
given name.
"""
msg = "A calendar with the name {calendar_name} is already registered."
class CyclicCalendarAlias(ZiplineError):
"""
Raised when calendar aliases form a cycle.
"""
msg = "Cycle in calendar aliases: [{cycle}]"
class ScheduleFunctionWithoutCalendar(ZiplineError):
"""
Raised when schedule_function is called but there is not a calendar to be
used in the construction of an event rule.
"""
# TODO update message when new TradingSchedules are built
msg = (
"To use schedule_function, the TradingAlgorithm must be running on an "
"ExchangeTradingSchedule, rather than {schedule}."
)
class ScheduleFunctionInvalidCalendar(ZiplineError):
"""
Raised when schedule_function is called with an invalid calendar argument.
"""
msg = (
"Invalid calendar '{given_calendar}' passed to schedule_function. "
"Allowed options are {allowed_calendars}."
)
class UnsupportedPipelineOutput(ZiplineError):
"""
Raised when a 1D term is added as a column to a pipeline.
"""
msg = (
"Cannot add column {column_name!r} with term {term}. Adding slices or "
"single-column-output terms as pipeline columns is not currently "
"supported."
)
class NonSliceableTerm(ZiplineError):
"""
Raised when attempting to index into a non-sliceable term, e.g. instances
of `zipline.pipeline.term.LoadableTerm`.
"""
msg = "Taking slices of {term} is not currently supported."
class IncompatibleTerms(ZiplineError):
"""
Raised when trying to compute correlations/regressions between two 2D
factors with different masks.
"""
msg = (
"{term_1} and {term_2} must have the same mask in order to compute "
"correlations and regressions asset-wise."
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/errors.py | errors.py |
import pandas as pd
from .assets import Asset
from enum import IntEnum
from ._protocol import BarData, InnerPosition # noqa
class MutableView(object):
"""A mutable view over an "immutable" object.
Parameters
----------
ob : any
The object to take a view over.
"""
# add slots so we don't accidentally add attributes to the view instead of
# ``ob``
__slots__ = ("_mutable_view_ob",)
def __init__(self, ob):
object.__setattr__(self, "_mutable_view_ob", ob)
def __getattr__(self, attr):
return getattr(self._mutable_view_ob, attr)
def __setattr__(self, attr, value):
vars(self._mutable_view_ob)[attr] = value
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self._mutable_view_ob)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = IntEnum(
"DATASOURCE_TYPE",
[
"AS_TRADED_EQUITY",
"MERGER",
"SPLIT",
"DIVIDEND",
"TRADE",
"TRANSACTION",
"ORDER",
"EMPTY",
"DONE",
"CUSTOM",
"BENCHMARK",
"COMMISSION",
"CLOSE_POSITION",
],
start=0,
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
"declared_date",
"ex_date",
"gross_amount",
"net_amount",
"pay_date",
"payment_sid",
"ratio",
"sid",
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
"id",
"payment_sid",
"cash_amount",
"share_count",
]
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__.update(initial_values)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, "__dict__") and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
"""Object providing read-only access to current portfolio state.
Parameters
----------
start_date : pd.Timestamp
The start date for the period being recorded.
capital_base : float
The starting value for the portfolio. This will be used as the starting
cash, current cash, and portfolio value.
Attributes
----------
positions : zipline.protocol.Positions
Dict-like object containing information about currently-held positions.
cash : float
Amount of cash currently held in portfolio.
portfolio_value : float
Current liquidation value of the portfolio's holdings.
This is equal to ``cash + sum(shares * price)``
starting_cash : float
Amount of cash in the portfolio at the start of the backtest.
"""
def __init__(self, start_date=None, capital_base=0.0):
self_ = MutableView(self)
self_.cash_flow = 0.0
self_.starting_cash = capital_base
self_.portfolio_value = capital_base
self_.pnl = 0.0
self_.returns = 0.0
self_.cash = capital_base
self_.positions = Positions()
self_.start_date = start_date
self_.positions_value = 0.0
self_.positions_exposure = 0.0
@property
def capital_used(self):
return self.cash_flow
def __setattr__(self, attr, value):
raise AttributeError("cannot mutate Portfolio objects")
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
@property
def current_portfolio_weights(self):
"""
Compute each asset's weight in the portfolio by calculating its held
value divided by the total value of all positions.
Each equity's value is its price times the number of shares held. Each
futures contract's value is its unit price times number of shares held
times the multiplier.
"""
position_values = pd.Series(
{
asset: (
position.last_sale_price * position.amount * asset.price_multiplier
)
for asset, position in self.positions.items()
},
dtype=float,
)
return position_values / self.portfolio_value
class Account(object):
"""
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
"""
def __init__(self):
self_ = MutableView(self)
self_.settled_cash = 0.0
self_.accrued_interest = 0.0
self_.buying_power = float("inf")
self_.equity_with_loan = 0.0
self_.total_positions_value = 0.0
self_.total_positions_exposure = 0.0
self_.regt_equity = 0.0
self_.regt_margin = float("inf")
self_.initial_margin_requirement = 0.0
self_.maintenance_margin_requirement = 0.0
self_.available_funds = 0.0
self_.excess_liquidity = 0.0
self_.cushion = 0.0
self_.day_trades_remaining = float("inf")
self_.leverage = 0.0
self_.net_leverage = 0.0
self_.net_liquidation = 0.0
def __setattr__(self, attr, value):
raise AttributeError("cannot mutate Account objects")
def __repr__(self):
return "Account({0})".format(self.__dict__)
class Position(object):
"""
A position held by an algorithm.
Attributes
----------
asset : zipline.assets.Asset
The held asset.
amount : int
Number of shares held. Short positions are represented with negative
values.
cost_basis : float
Average price at which currently-held shares were acquired.
last_sale_price : float
Most recent price for the position.
last_sale_date : pd.Timestamp
Datetime at which ``last_sale_price`` was last updated.
"""
__slots__ = ("_underlying_position",)
def __init__(self, underlying_position):
object.__setattr__(self, "_underlying_position", underlying_position)
def __getattr__(self, attr):
return getattr(self._underlying_position, attr)
def __setattr__(self, attr, value):
raise AttributeError("cannot mutate Position objects")
@property
def sid(self):
# for backwards compatibility
return self.asset
def __repr__(self):
return "Position(%r)" % {
k: getattr(self, k)
for k in (
"asset",
"amount",
"cost_basis",
"last_sale_price",
"last_sale_date",
)
}
class Positions(dict):
"""A dict-like object containing the algorithm's current positions."""
def __missing__(self, key):
if isinstance(key, Asset):
return Position(InnerPosition(key))
raise ValueError(
"Position lookup expected a value of type Asset"
f" but got {type(key).__name__} instead"
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/protocol.py | protocol.py |
from packaging.version import Version
import os
import numpy as np
# This is *not* a place to dump arbitrary classes/modules for convenience,
# it is a place to expose the public interfaces.
from zipline.utils.calendar_utils import get_calendar
from . import data
from . import finance
from . import gens
from . import utils
from .utils.numpy_utils import numpy_version
from .utils.pandas_utils import new_pandas
from .utils.run_algo import run_algorithm
# These need to happen after the other imports.
from .algorithm import TradingAlgorithm
from . import api
from zipline import extensions as ext
from zipline.finance.blotter import Blotter
# PERF: Fire a warning if calendars were instantiated during zipline import.
# Having calendars doesn't break anything per-se, but it makes zipline imports
# noticeably slower, which becomes particularly noticeable in the Zipline CLI.
from zipline.utils.calendar_utils import global_calendar_dispatcher
if global_calendar_dispatcher._calendars:
import warnings
warnings.warn(
"Found TradingCalendar instances after zipline import.\n"
"Zipline startup will be much slower until this is fixed!",
)
del warnings
del global_calendar_dispatcher
from ._version import get_versions # noqa 402
__version__ = get_versions()["version"]
del get_versions
extension_args = ext.Namespace()
def load_ipython_extension(ipython):
from .__main__ import zipline_magic
ipython.register_magic_function(zipline_magic, "line_cell", "zipline")
if os.name == "nt":
# we need to be able to write to our temp directoy on windows so we
# create a subdir in %TMP% that has write access and use that as %TMP%
def _():
import atexit
import tempfile
tempfile.tempdir = tempdir = tempfile.mkdtemp()
@atexit.register
def cleanup_tempdir():
import shutil
shutil.rmtree(tempdir)
_()
del _
__all__ = [
"Blotter",
"TradingAlgorithm",
"api",
"data",
"finance",
"get_calendar",
"gens",
"run_algorithm",
"utils",
"extension_args",
]
def setup(
self,
np=np,
numpy_version=numpy_version,
Version=Version,
new_pandas=new_pandas,
):
"""Lives in zipline.__init__ for doctests."""
if numpy_version >= Version("1.14"):
self.old_opts = np.get_printoptions()
np.set_printoptions(legacy="1.13")
else:
self.old_opts = None
if new_pandas:
self.old_err = np.geterr()
# old pandas has numpy compat that sets this
np.seterr(all="ignore")
else:
self.old_err = None
def teardown(self, np=np):
"""Lives in zipline.__init__ for doctests."""
if self.old_err is not None:
np.seterr(**self.old_err)
if self.old_opts is not None:
np.set_printoptions(**self.old_opts)
del os
del np
del numpy_version
del Version
del new_pandas | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/__init__.py | __init__.py |
from collections.abc import Iterable
from collections import namedtuple
from copy import copy
import warnings
from datetime import tzinfo, time
import logbook
import pytz
import pandas as pd
import numpy as np
from itertools import chain, repeat
from zipline.utils.calendar_utils import get_calendar, days_at_time
from zipline._protocol import handle_non_market_minutes
from zipline.errors import (
AttachPipelineAfterInitialize,
CannotOrderDelistedAsset,
DuplicatePipelineName,
IncompatibleCommissionModel,
IncompatibleSlippageModel,
NoSuchPipeline,
OrderDuringInitialize,
OrderInBeforeTradingStart,
PipelineOutputDuringInitialize,
RegisterAccountControlPostInit,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetBenchmarkOutsideInitialize,
SetCancelPolicyPostInit,
SetCommissionPostInit,
SetSlippagePostInit,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
UnsupportedOrderParameters,
ZeroCapitalError,
)
from zipline.finance.blotter import SimulationBlotter
from zipline.finance.controls import (
LongOnly,
MaxOrderCount,
MaxOrderSize,
MaxPositionSize,
MaxLeverage,
MinLeverage,
RestrictedListOrder,
)
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.asset_restrictions import Restrictions
from zipline.finance.cancel_policy import NeverCancel, CancelPolicy
from zipline.finance.asset_restrictions import (
NoRestrictions,
StaticRestrictions,
SecurityListRestrictions,
)
from zipline.assets import Asset, Equity, Future
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.finance.metrics import MetricsTracker, load as load_metrics_set
from zipline.pipeline import Pipeline
import zipline.pipeline.domain as domain
from zipline.pipeline.engine import (
ExplodingPipelineEngine,
SimplePipelineEngine,
)
from zipline.utils.api_support import (
api_method,
require_initialized,
require_not_initialized,
ZiplineAPI,
disallowed_in_before_trading_start,
)
from zipline.utils.compat import ExitStack
from zipline.utils.date_utils import make_utc_aware
from zipline.utils.input_validation import (
coerce_string,
ensure_upper_case,
error_keywords,
expect_dtypes,
expect_types,
optional,
optionally,
)
from zipline.utils.numpy_utils import int64_dtype
from zipline.utils.pandas_utils import normalize_date
from zipline.utils.cache import ExpiringCache
import zipline.utils.events
from zipline.utils.events import (
EventManager,
make_eventrule,
date_rules,
time_rules,
calendars,
AfterOpen,
BeforeClose,
)
from zipline.utils.math_utils import (
tolerant_equals,
round_if_near_integer,
)
from zipline.utils.preprocess import preprocess
from zipline.utils.security_list import SecurityList
import zipline.protocol
from zipline.sources.requests_csv import PandasRequestsCSV
from zipline.gens.sim_engine import MinuteSimulationClock
from zipline.sources.benchmark_source import BenchmarkSource
from zipline.zipline_warnings import ZiplineDeprecationWarning
log = logbook.Logger("ZiplineLog")
# For creating and storing pipeline instances
AttachedPipeline = namedtuple("AttachedPipeline", "pipe chunks eager")
class NoBenchmark(ValueError):
def __init__(self):
super(NoBenchmark, self).__init__(
"Must specify either benchmark_sid or benchmark_returns.",
)
class TradingAlgorithm(object):
"""A class that represents a trading strategy and parameters to execute
the strategy.
Parameters
----------
*args, **kwargs
Forwarded to ``initialize`` unless listed below.
initialize : callable[context -> None], optional
Function that is called at the start of the simulation to
setup the initial context.
handle_data : callable[(context, data) -> None], optional
Function called on every bar. This is where most logic should be
implemented.
before_trading_start : callable[(context, data) -> None], optional
Function that is called before any bars have been processed each
day.
analyze : callable[(context, DataFrame) -> None], optional
Function that is called at the end of the backtest. This is passed
the context and the performance results for the backtest.
script : str, optional
Algoscript that contains the definitions for the four algorithm
lifecycle functions and any supporting code.
namespace : dict, optional
The namespace to execute the algoscript in. By default this is an
empty namespace that will include only python built ins.
algo_filename : str, optional
The filename for the algoscript. This will be used in exception
tracebacks. default: '<string>'.
data_frequency : {'daily', 'minute'}, optional
The duration of the bars.
equities_metadata : dict or DataFrame or file-like object, optional
If dict is provided, it must have the following structure:
* keys are the identifiers
* values are dicts containing the metadata, with the metadata
field name as the key
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the metadata fields
* index must be the different asset identifiers
* array contents should be the metadata value
If an object with a ``read`` method is provided, ``read`` must
return rows containing at least one of 'sid' or 'symbol' along
with the other metadata fields.
futures_metadata : dict or DataFrame or file-like object, optional
The same layout as ``equities_metadata`` except that it is used
for futures information.
identifiers : list, optional
Any asset identifiers that are not provided in the
equities_metadata, but will be traded by this TradingAlgorithm.
get_pipeline_loader : callable[BoundColumn -> PipelineLoader], optional
The function that maps pipeline columns to their loaders.
create_event_context : callable[BarData -> context manager], optional
A function used to create a context mananger that wraps the
execution of all events that are scheduled for a bar.
This function will be passed the data for the bar and should
return the actual context manager that will be entered.
history_container_class : type, optional
The type of history container to use. default: HistoryContainer
platform : str, optional
The platform the simulation is running on. This can be queried for
in the simulation with ``get_environment``. This allows algorithms
to conditionally execute code based on platform it is running on.
default: 'zipline'
adjustment_reader : AdjustmentReader
The interface to the adjustments.
"""
def __init__(
self,
sim_params,
data_portal=None,
asset_finder=None,
# Algorithm API
namespace=None,
script=None,
algo_filename=None,
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
#
trading_calendar=None,
metrics_set=None,
blotter=None,
blotter_class=None,
cancel_policy=None,
benchmark_sid=None,
benchmark_returns=None,
platform="zipline",
capital_changes=None,
get_pipeline_loader=None,
create_event_context=None,
**initialize_kwargs,
):
# List of trading controls to be used to validate orders.
self.trading_controls = []
# List of account controls to be checked on each bar.
self.account_controls = []
self._recorded_vars = {}
self.namespace = namespace or {}
self._platform = platform
self.logger = None
# XXX: This is kind of a mess.
# We support passing a data_portal in `run`, but we need an asset
# finder earlier than that to look up assets for things like
# set_benchmark.
self.data_portal = data_portal
if self.data_portal is None:
if asset_finder is None:
raise ValueError(
"Must pass either data_portal or asset_finder "
"to TradingAlgorithm()"
)
self.asset_finder = asset_finder
else:
# Raise an error if we were passed two different asset finders.
# There's no world where that's a good idea.
if (
asset_finder is not None
and asset_finder is not data_portal.asset_finder
):
raise ValueError("Inconsistent asset_finders in TradingAlgorithm()")
self.asset_finder = data_portal.asset_finder
self.benchmark_returns = benchmark_returns
# XXX: This is also a mess. We should remove all of this and only allow
# one way to pass a calendar.
#
# We have a required sim_params argument as well as an optional
# trading_calendar argument, but sim_params has a trading_calendar
# attribute. If the user passed trading_calendar explicitly, make sure
# it matches their sim_params. Otherwise, just use what's in their
# sim_params.
self.sim_params = sim_params
if trading_calendar is None:
self.trading_calendar = sim_params.trading_calendar
elif trading_calendar.name == sim_params.trading_calendar.name:
self.trading_calendar = sim_params.trading_calendar
else:
raise ValueError(
"Conflicting calendars: trading_calendar={}, but "
"sim_params.trading_calendar={}".format(
trading_calendar.name,
self.sim_params.trading_calendar.name,
)
)
self.metrics_tracker = None
self._last_sync_time = pd.NaT
self._metrics_set = metrics_set
if self._metrics_set is None:
self._metrics_set = load_metrics_set("default")
# Initialize Pipeline API data.
self.init_engine(get_pipeline_loader)
self._pipelines = {}
# Create an already-expired cache so that we compute the first time
# data is requested.
self._pipeline_cache = ExpiringCache()
if blotter is not None:
self.blotter = blotter
else:
cancel_policy = cancel_policy or NeverCancel()
blotter_class = blotter_class or SimulationBlotter
self.blotter = blotter_class(cancel_policy=cancel_policy)
# The symbol lookup date specifies the date to use when resolving
# symbols to sids, and can be set using set_symbol_lookup_date()
self._symbol_lookup_date = None
# If string is passed in, execute and get reference to
# functions.
self.algoscript = script
self._initialize = None
self._before_trading_start = None
self._analyze = None
self._in_before_trading_start = False
self.event_manager = EventManager(create_event_context)
self._handle_data = None
def noop(*args, **kwargs):
pass
if self.algoscript is not None:
unexpected_api_methods = set()
if initialize is not None:
unexpected_api_methods.add("initialize")
if handle_data is not None:
unexpected_api_methods.add("handle_data")
if before_trading_start is not None:
unexpected_api_methods.add("before_trading_start")
if analyze is not None:
unexpected_api_methods.add("analyze")
if unexpected_api_methods:
raise ValueError(
"TradingAlgorithm received a script and the following API"
" methods as functions:\n{funcs}".format(
funcs=unexpected_api_methods,
)
)
if algo_filename is None:
algo_filename = "<string>"
code = compile(self.algoscript, algo_filename, "exec")
exec(code, self.namespace)
self._initialize = self.namespace.get("initialize", noop)
self._handle_data = self.namespace.get("handle_data", noop)
self._before_trading_start = self.namespace.get(
"before_trading_start",
)
# Optional analyze function, gets called after run
self._analyze = self.namespace.get("analyze")
else:
self._initialize = initialize or (lambda self: None)
self._handle_data = handle_data
self._before_trading_start = before_trading_start
self._analyze = analyze
self.event_manager.add_event(
zipline.utils.events.Event(
zipline.utils.events.Always(),
# We pass handle_data.__func__ to get the unbound method.
# We will explicitly pass the algorithm to bind it again.
self.handle_data.__func__,
),
prepend=True,
)
if self.sim_params.capital_base <= 0:
raise ZeroCapitalError()
# Prepare the algo for initialization
self.initialized = False
self.initialize_kwargs = initialize_kwargs or {}
self.benchmark_sid = benchmark_sid
# A dictionary of capital changes, keyed by timestamp, indicating the
# target/delta of the capital changes, along with values
self.capital_changes = capital_changes or {}
# A dictionary of the actual capital change deltas, keyed by timestamp
self.capital_change_deltas = {}
self.restrictions = NoRestrictions()
def init_engine(self, get_loader):
"""
Construct and store a PipelineEngine from loader.
If get_loader is None, constructs an ExplodingPipelineEngine
"""
if get_loader is not None:
self.engine = SimplePipelineEngine(
get_loader,
self.asset_finder,
self.default_pipeline_domain(self.trading_calendar),
)
else:
self.engine = ExplodingPipelineEngine()
def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self, *args, **kwargs)
def before_trading_start(self, data):
self.compute_eager_pipelines()
if self._before_trading_start is None:
return
self._in_before_trading_start = True
with handle_non_market_minutes(
data
) if self.data_frequency == "minute" else ExitStack():
self._before_trading_start(self, data)
self._in_before_trading_start = False
def handle_data(self, data):
if self._handle_data:
self._handle_data(self, data)
def analyze(self, perf):
if self._analyze is None:
return
with ZiplineAPI(self):
self._analyze(self, perf)
def __repr__(self):
"""
N.B. this does not yet represent a string that can be used
to instantiate an exact copy of an algorithm.
However, it is getting close, and provides some value as something
that can be inspected interactively.
"""
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage_models={slippage_models},
commission_models={commission_models},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(
class_name=self.__class__.__name__,
capital_base=self.sim_params.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage_models=repr(self.blotter.slippage_models),
commission_models=repr(self.blotter.commission_models),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars),
)
def _create_clock(self):
"""
If the clock property is not set, then create one based on frequency.
"""
trading_o_and_c = self.trading_calendar.schedule.loc[self.sim_params.sessions]
market_closes = trading_o_and_c["market_close"]
minutely_emission = False
if self.sim_params.data_frequency == "minute":
market_opens = trading_o_and_c["market_open"]
minutely_emission = self.sim_params.emission_rate == "minute"
# The calendar's execution times are the minutes over which we
# actually want to run the clock. Typically the execution times
# simply adhere to the market open and close times. In the case of
# the futures calendar, for example, we only want to simulate over
# a subset of the full 24 hour calendar, so the execution times
# dictate a market open time of 6:31am US/Eastern and a close of
# 5:00pm US/Eastern.
execution_opens = self.trading_calendar.execution_time_from_open(
market_opens
)
execution_closes = self.trading_calendar.execution_time_from_close(
market_closes
)
else:
# in daily mode, we want to have one bar per session, timestamped
# as the last minute of the session.
execution_closes = self.trading_calendar.execution_time_from_close(
market_closes
)
execution_opens = execution_closes
# FIXME generalize these values
before_trading_start_minutes = days_at_time(
self.sim_params.sessions, time(8, 45), "US/Eastern"
)
return MinuteSimulationClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
)
def _create_benchmark_source(self):
if self.benchmark_sid is not None:
benchmark_asset = self.asset_finder.retrieve_asset(self.benchmark_sid)
benchmark_returns = None
else:
benchmark_asset = None
benchmark_returns = self.benchmark_returns
return BenchmarkSource(
benchmark_asset=benchmark_asset,
benchmark_returns=benchmark_returns,
trading_calendar=self.trading_calendar,
sessions=self.sim_params.sessions,
data_portal=self.data_portal,
emission_rate=self.sim_params.emission_rate,
)
def _create_metrics_tracker(self):
return MetricsTracker(
trading_calendar=self.trading_calendar,
first_session=self.sim_params.start_session,
last_session=self.sim_params.end_session,
capital_base=self.sim_params.capital_base,
emission_rate=self.sim_params.emission_rate,
data_frequency=self.sim_params.data_frequency,
asset_finder=self.asset_finder,
metrics=self._metrics_set,
)
def _create_generator(self, sim_params):
if sim_params is not None:
self.sim_params = sim_params
self.metrics_tracker = metrics_tracker = self._create_metrics_tracker()
# Set the dt initially to the period start by forcing it to change.
self.on_dt_changed(self.sim_params.start_session)
if not self.initialized:
self.initialize(**self.initialize_kwargs)
self.initialized = True
benchmark_source = self._create_benchmark_source()
self.trading_client = AlgorithmSimulator(
self,
sim_params,
self.data_portal,
self._create_clock(),
benchmark_source,
self.restrictions,
)
metrics_tracker.handle_start_of_simulation(benchmark_source)
return self.trading_client.transform()
def compute_eager_pipelines(self):
"""
Compute any pipelines attached with eager=True.
"""
for name, pipe in self._pipelines.items():
if pipe.eager:
self.pipeline_output(name)
def get_generator(self):
"""
Override this method to add new logic to the construction
of the generator. Overrides can use the _create_generator
method to get a standard construction generator.
"""
return self._create_generator(self.sim_params)
def run(self, data_portal=None):
"""Run the algorithm."""
# HACK: I don't think we really want to support passing a data portal
# this late in the long term, but this is needed for now for backwards
# compat downstream.
if data_portal is not None:
self.data_portal = data_portal
self.asset_finder = data_portal.asset_finder
elif self.data_portal is None:
raise RuntimeError(
"No data portal in TradingAlgorithm.run().\n"
"Either pass a DataPortal to TradingAlgorithm() or to run()."
)
else:
assert (
self.asset_finder is not None
), "Have data portal without asset_finder."
# Create zipline and loop through simulated_trading.
# Each iteration returns a perf dictionary
try:
perfs = []
for perf in self.get_generator():
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
finally:
self.data_portal = None
self.metrics_tracker = None
return daily_stats
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if "daily_perf" in perf:
perf["daily_perf"].update(perf["daily_perf"].pop("recorded_vars"))
perf["daily_perf"].update(perf["cumulative_risk_metrics"])
daily_perfs.append(perf["daily_perf"])
else:
self.risk_report = perf
daily_dts = pd.DatetimeIndex([p["period_close"] for p in daily_perfs])
daily_dts = make_utc_aware(daily_dts)
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
def calculate_capital_changes(
self, dt, emission_rate, is_interday, portfolio_value_adjustment=0.0
):
"""
If there is a capital change for a given dt, this means the the change
occurs before `handle_data` on the given dt. In the case of the
change being a target value, the change will be computed on the
portfolio value according to prices at the given dt
`portfolio_value_adjustment`, if specified, will be removed from the
portfolio_value of the cumulative performance when calculating deltas
from target capital changes.
"""
try:
capital_change = self.capital_changes[dt]
except KeyError:
return
self._sync_last_sale_prices()
if capital_change["type"] == "target":
target = capital_change["value"]
capital_change_amount = target - (
self.portfolio.portfolio_value - portfolio_value_adjustment
)
log.info(
"Processing capital change to target %s at %s. Capital "
"change delta is %s" % (target, dt, capital_change_amount)
)
elif capital_change["type"] == "delta":
target = None
capital_change_amount = capital_change["value"]
log.info(
"Processing capital change of delta %s at %s"
% (capital_change_amount, dt)
)
else:
log.error(
"Capital change %s does not indicate a valid type "
"('target' or 'delta')" % capital_change
)
return
self.capital_change_deltas.update({dt: capital_change_amount})
self.metrics_tracker.capital_change(capital_change_amount)
yield {
"capital_change": {
"date": dt,
"type": "cash",
"target": target,
"delta": capital_change_amount,
}
}
@api_method
def get_environment(self, field="platform"):
"""Query the execution environment.
Parameters
----------
field : {'platform', 'arena', 'data_frequency',
'start', 'end', 'capital_base', 'platform', '*'}
The field to query. The options have the following meanings:
arena : str
The arena from the simulation parameters. This will normally
be ``'backtest'`` but some systems may use this distinguish
live trading from backtesting.
data_frequency : {'daily', 'minute'}
data_frequency tells the algorithm if it is running with
daily data or minute data.
start : datetime
The start date for the simulation.
end : datetime
The end date for the simulation.
capital_base : float
The starting capital for the simulation.
platform : str
The platform that the code is running on. By default this
will be the string 'zipline'. This can allow algorithms to
know if they are running on the Quantopian platform instead.
* : dict[str -> any]
Returns all of the fields in a dictionary.
Returns
-------
val : any
The value for the field queried. See above for more information.
Raises
------
ValueError
Raised when ``field`` is not a valid option.
"""
env = {
"arena": self.sim_params.arena,
"data_frequency": self.sim_params.data_frequency,
"start": self.sim_params.first_open,
"end": self.sim_params.last_close,
"capital_base": self.sim_params.capital_base,
"platform": self._platform,
}
if field == "*":
return env
else:
try:
return env[field]
except KeyError:
raise ValueError(
"%r is not a valid field for get_environment" % field,
)
@api_method
def fetch_csv(
self,
url,
pre_func=None,
post_func=None,
date_column="date",
date_format=None,
timezone=pytz.utc.zone,
symbol=None,
mask=True,
symbol_column=None,
special_params_checker=None,
country_code=None,
**kwargs,
):
"""Fetch a csv from a remote url and register the data so that it is
queryable from the ``data`` object.
Parameters
----------
url : str
The url of the csv file to load.
pre_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow preprocessing the raw data returned from
fetch_csv before dates are paresed or symbols are mapped.
post_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow postprocessing of the data after dates and
symbols have been mapped.
date_column : str, optional
The name of the column in the preprocessed dataframe containing
datetime information to map the data.
date_format : str, optional
The format of the dates in the ``date_column``. If not provided
``fetch_csv`` will attempt to infer the format. For information
about the format of this string, see :func:`pandas.read_csv`.
timezone : tzinfo or str, optional
The timezone for the datetime in the ``date_column``.
symbol : str, optional
If the data is about a new asset or index then this string will
be the name used to identify the values in ``data``. For example,
one may use ``fetch_csv`` to load data for VIX, then this field
could be the string ``'VIX'``.
mask : bool, optional
Drop any rows which cannot be symbol mapped.
symbol_column : str
If the data is attaching some new attribute to each asset then this
argument is the name of the column in the preprocessed dataframe
containing the symbols. This will be used along with the date
information to map the sids in the asset finder.
country_code : str, optional
Country code to use to disambiguate symbol lookups.
**kwargs
Forwarded to :func:`pandas.read_csv`.
Returns
-------
csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV
A requests source that will pull data from the url specified.
"""
if country_code is None:
country_code = self.default_fetch_csv_country_code(
self.trading_calendar,
)
# Show all the logs every time fetcher is used.
csv_data_source = PandasRequestsCSV(
url,
pre_func,
post_func,
self.asset_finder,
self.trading_calendar.day,
self.sim_params.start_session,
self.sim_params.end_session,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency=self.data_frequency,
country_code=country_code,
special_params_checker=special_params_checker,
**kwargs,
)
# ingest this into dataportal
self.data_portal.handle_extra_source(csv_data_source.df, self.sim_params)
return csv_data_source
def add_event(self, rule, callback):
"""Adds an event to the algorithm's EventManager.
Parameters
----------
rule : EventRule
The rule for when the callback should be triggered.
callback : callable[(context, data) -> None]
The function to execute when the rule is triggered.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
)
@api_method
def schedule_function(
self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None,
):
"""
Schedule a function to be called repeatedly in the future.
Parameters
----------
func : callable
The function to execute when the rule is triggered. ``func`` should
have the same signature as ``handle_data``.
date_rule : zipline.utils.events.EventRule, optional
Rule for the dates on which to execute ``func``. If not
passed, the function will run every trading day.
time_rule : zipline.utils.events.EventRule, optional
Rule for the time at which to execute ``func``. If not passed, the
function will execute at the end of the first market minute of the
day.
half_days : bool, optional
Should this rule fire on half days? Default is True.
calendar : Sentinel, optional
Calendar used to compute rules that depend on the trading calendar.
See Also
--------
:class:`zipline.api.date_rules`
:class:`zipline.api.time_rules`
"""
# When the user calls schedule_function(func, <time_rule>), assume that
# the user meant to specify a time rule but no date rule, instead of
# a date rule and no time rule as the signature suggests
if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule:
warnings.warn(
"Got a time rule for the second positional argument "
"date_rule. You should use keyword argument "
"time_rule= when calling schedule_function without "
"specifying a date_rule",
stacklevel=3,
)
date_rule = date_rule or date_rules.every_day()
time_rule = (
(time_rule or time_rules.every_minute())
if self.sim_params.data_frequency == "minute"
else
# If we are in daily mode the time_rule is ignored.
time_rules.every_minute()
)
# Check the type of the algorithm's schedule before pulling calendar
# Note that the ExchangeTradingSchedule is currently the only
# TradingSchedule class, so this is unlikely to be hit
if calendar is None:
cal = self.trading_calendar
elif calendar is calendars.US_EQUITIES:
cal = get_calendar("XNYS")
elif calendar is calendars.US_FUTURES:
cal = get_calendar("us_futures")
else:
raise ScheduleFunctionInvalidCalendar(
given_calendar=calendar,
allowed_calendars=("[calendars.US_EQUITIES, calendars.US_FUTURES]"),
)
self.add_event(
make_eventrule(date_rule, time_rule, cal, half_days),
func,
)
@api_method
def record(self, *args, **kwargs):
"""Track and record values each day.
Parameters
----------
**kwargs
The names and values to record.
Notes
-----
These values will appear in the performance packets and the performance
dataframe passed to ``analyze`` and returned from
:func:`~zipline.run_algorithm`.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, kwargs.items()):
self._recorded_vars[name] = value
@api_method
def set_benchmark(self, benchmark):
"""Set the benchmark asset.
Parameters
----------
benchmark : zipline.assets.Asset
The asset to set as the new benchmark.
Notes
-----
Any dividends payed out for that new benchmark asset will be
automatically reinvested.
"""
if self.initialized:
raise SetBenchmarkOutsideInitialize()
self.benchmark_sid = benchmark
@api_method
@preprocess(root_symbol_str=ensure_upper_case)
def continuous_future(
self, root_symbol_str, offset=0, roll="volume", adjustment="mul"
):
"""Create a specifier for a continuous contract.
Parameters
----------
root_symbol_str : str
The root symbol for the future chain.
offset : int, optional
The distance from the primary contract. Default is 0.
roll_style : str, optional
How rolls are determined. Default is 'volume'.
adjustment : str, optional
Method for adjusting lookback prices between rolls. Options are
'mul', 'add', and None. Default is 'mul'.
Returns
-------
continuous_future : zipline.assets.ContinuousFuture
The continuous future specifier.
"""
return self.asset_finder.create_continuous_future(
root_symbol_str,
offset,
roll,
adjustment,
)
@api_method
@preprocess(
symbol_str=ensure_upper_case,
country_code=optionally(ensure_upper_case),
)
def symbol(self, symbol_str, country_code=None):
"""Lookup an Equity by its ticker symbol.
Parameters
----------
symbol_str : str
The ticker symbol for the equity to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equity : zipline.assets.Equity
The equity that held the ticker symbol on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when the symbols was not held on the current lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
# If the user has not set the symbol lookup date,
# use the end_session as the date for symbol->sid resolution.
_lookup_date = (
self._symbol_lookup_date
if self._symbol_lookup_date is not None
else self.sim_params.end_session
)
return self.asset_finder.lookup_symbol(
symbol_str,
as_of_date=_lookup_date,
country_code=country_code,
)
@api_method
def symbols(self, *args, **kwargs):
"""Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[zipline.assets.Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
return [self.symbol(identifier, **kwargs) for identifier in args]
@api_method
def sid(self, sid):
"""Lookup an Asset by its unique asset identifier.
Parameters
----------
sid : int
The unique integer that identifies an asset.
Returns
-------
asset : zipline.assets.Asset
The asset with the given ``sid``.
Raises
------
SidsNotFound
When a requested ``sid`` does not map to any asset.
"""
return self.asset_finder.retrieve_asset(sid)
@api_method
@preprocess(symbol=ensure_upper_case)
def future_symbol(self, symbol):
"""Lookup a futures contract with a given symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : zipline.assets.Future
The future that trades with the name ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
return self.asset_finder.lookup_future_symbol(symbol)
def _calculate_order_value_amount(self, asset, value):
"""
Calculates how many shares/contracts to order based on the type of
asset being ordered.
"""
# Make sure the asset exists, and that there is a last price for it.
# FIXME: we should use BarData's can_trade logic here, but I haven't
# yet found a good way to do that.
normalized_date = normalize_date(self.datetime)
if normalized_date < asset.start_date:
raise CannotOrderDelistedAsset(
msg="Cannot order {0}, as it started trading on"
" {1}.".format(asset.symbol, asset.start_date)
)
elif normalized_date > asset.end_date:
raise CannotOrderDelistedAsset(
msg="Cannot order {0}, as it stopped trading on"
" {1}.".format(asset.symbol, asset.end_date)
)
else:
last_price = self.trading_client.current_data.current(asset, "price")
if np.isnan(last_price):
raise CannotOrderDelistedAsset(
msg="Cannot order {0} on {1} as there is no last "
"price for the security.".format(asset.symbol, self.datetime)
)
if tolerant_equals(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(psid=asset)
if self.logger:
self.logger.debug(zero_message)
# Don't place any order
return 0
value_multiplier = asset.price_multiplier
return value / (last_price * value_multiplier)
def _can_order_asset(self, asset):
if not isinstance(asset, Asset):
raise UnsupportedOrderParameters(
msg="Passing non-Asset argument to 'order()' is not supported."
" Use 'sid()' or 'symbol()' methods to look up an Asset."
)
if asset.auto_close_date:
day = normalize_date(self.get_datetime())
if day > min(asset.end_date, asset.auto_close_date):
# If we are after the asset's end date or auto close date, warn
# the user that they can't place an order for this asset, and
# return None.
log.warn(
"Cannot place order for {0}, as it has de-listed. "
"Any existing positions for this asset will be "
"liquidated on "
"{1}.".format(asset.symbol, asset.auto_close_date)
)
return False
return True
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order(self, asset, amount, limit_price=None, stop_price=None, style=None):
"""Place an order for a fixed number of shares.
Parameters
----------
asset : Asset
The asset to be ordered.
amount : int
The amount of shares to order. If ``amount`` is positive, this is
the number of shares to buy or cover. If ``amount`` is negative,
this is the number of shares to sell or short.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle, optional
The execution style for the order.
Returns
-------
order_id : str or None
The unique identifier for this order, or None if no order was
placed.
Notes
-----
The ``limit_price`` and ``stop_price`` arguments provide shorthands for
passing common execution styles. Passing ``limit_price=N`` is
equivalent to ``style=LimitOrder(N)``. Similarly, passing
``stop_price=M`` is equivalent to ``style=StopOrder(M)``, and passing
``limit_price=N`` and ``stop_price=M`` is equivalent to
``style=StopLimitOrder(N, M)``. It is an error to pass both a ``style``
and ``limit_price`` or ``stop_price``.
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order_value`
:func:`zipline.api.order_percent`
"""
if not self._can_order_asset(asset):
return None
amount, style = self._calculate_order(
asset, amount, limit_price, stop_price, style
)
return self.blotter.order(asset, amount, style)
def _calculate_order(
self, asset, amount, limit_price=None, stop_price=None, style=None
):
amount = self.round_order(amount)
# Raises a ZiplineError if invalid parameters are detected.
self.validate_order_params(asset, amount, limit_price, stop_price, style)
# Convert deprecated limit_price and stop_price parameters to use
# ExecutionStyle objects.
style = self.__convert_order_params_for_blotter(
asset, limit_price, stop_price, style
)
return amount, style
@staticmethod
def round_order(amount):
"""
Convert number of shares to an integer.
By default, truncates to the integer share count that's either within
.0001 of amount or closer to zero.
E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0
"""
return int(round_if_near_integer(amount))
def validate_order_params(self, asset, amount, limit_price, stop_price, style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
for control in self.trading_controls:
control.validate(
asset,
amount,
self.portfolio,
self.get_datetime(),
self.trading_client.current_data,
)
@staticmethod
def __convert_order_params_for_blotter(asset, limit_price, stop_price, style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price, asset=asset)
if limit_price:
return LimitOrder(limit_price, asset=asset)
if stop_price:
return StopOrder(stop_price, asset=asset)
else:
return MarketOrder()
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_value(self, asset, value, limit_price=None, stop_price=None, style=None):
"""
Place an order for a fixed amount of money.
Equivalent to ``order(asset, value / data.current(asset, 'price'))``.
Parameters
----------
asset : Asset
The asset to be ordered.
value : float
Amount of value of ``asset`` to be transacted. The number of shares
bought or sold will be equal to ``value / current_price``.
limit_price : float, optional
Limit price for the order.
stop_price : float, optional
Stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_percent`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_value_amount(asset, value)
return self.order(
asset,
amount,
limit_price=limit_price,
stop_price=stop_price,
style=style,
)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
def _sync_last_sale_prices(self, dt=None):
"""Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap.
"""
if dt is None:
dt = self.datetime
if dt != self._last_sync_time:
self.metrics_tracker.sync_last_sale_prices(
dt,
self.data_portal,
)
self._last_sync_time = dt
@property
def portfolio(self):
self._sync_last_sale_prices()
return self.metrics_tracker.portfolio
@property
def account(self):
self._sync_last_sale_prices()
return self.metrics_tracker.account
def set_logger(self, logger):
self.logger = logger
def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
self.datetime = dt
self.blotter.set_date(dt)
@api_method
@preprocess(tz=coerce_string(pytz.timezone))
@expect_types(tz=optional(tzinfo))
def get_datetime(self, tz=None):
"""
Returns the current simulation datetime.
Parameters
----------
tz : tzinfo or str, optional
The timezone to return the datetime in. This defaults to utc.
Returns
-------
dt : datetime
The current simulation datetime converted to ``tz``.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
dt = dt.astimezone(tz)
return dt
@api_method
def set_slippage(self, us_equities=None, us_futures=None):
"""
Set the slippage models for the simulation.
Parameters
----------
us_equities : EquitySlippageModel
The slippage model to use for trading US equities.
us_futures : FutureSlippageModel
The slippage model to use for trading US futures.
Notes
-----
This function can only be called during
:func:`~zipline.api.initialize`.
See Also
--------
:class:`zipline.finance.slippage.SlippageModel`
"""
if self.initialized:
raise SetSlippagePostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type="equities",
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.slippage_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type="futures",
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.slippage_models[Future] = us_futures
@api_method
def set_commission(self, us_equities=None, us_futures=None):
"""Sets the commission models for the simulation.
Parameters
----------
us_equities : EquityCommissionModel
The commission model to use for trading US equities.
us_futures : FutureCommissionModel
The commission model to use for trading US futures.
Notes
-----
This function can only be called during
:func:`~zipline.api.initialize`.
See Also
--------
:class:`zipline.finance.commission.PerShare`
:class:`zipline.finance.commission.PerTrade`
:class:`zipline.finance.commission.PerDollar`
"""
if self.initialized:
raise SetCommissionPostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type="equities",
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.commission_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type="futures",
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.commission_models[Future] = us_futures
@api_method
def set_cancel_policy(self, cancel_policy):
"""Sets the order cancellation policy for the simulation.
Parameters
----------
cancel_policy : CancelPolicy
The cancellation policy to use.
See Also
--------
:class:`zipline.api.EODCancel`
:class:`zipline.api.NeverCancel`
"""
if not isinstance(cancel_policy, CancelPolicy):
raise UnsupportedCancelPolicy()
if self.initialized:
raise SetCancelPolicyPostInit()
self.blotter.cancel_policy = cancel_policy
@api_method
def set_symbol_lookup_date(self, dt):
"""Set the date for which symbols will be resolved to their assets
(symbols may map to different firms or underlying assets at
different times)
Parameters
----------
dt : datetime
The new symbol lookup date.
"""
try:
self._symbol_lookup_date = pd.Timestamp(dt).tz_localize("UTC")
except TypeError:
self._symbol_lookup_date = pd.Timestamp(dt).tz_convert("UTC")
except ValueError:
raise UnsupportedDatetimeFormat(input=dt, method="set_symbol_lookup_date")
@property
def data_frequency(self):
return self.sim_params.data_frequency
@data_frequency.setter
def data_frequency(self, value):
assert value in ("daily", "minute")
self.sim_params.data_frequency = value
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_percent(
self, asset, percent, limit_price=None, stop_price=None, style=None
):
"""Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Parameters
----------
asset : Asset
The asset that this order is for.
percent : float
The percentage of the portfolio value to allocate to ``asset``.
This is specified as a decimal, for example: 0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_percent_amount(asset, percent)
return self.order(
asset,
amount,
limit_price=limit_price,
stop_price=stop_price,
style=style,
)
def _calculate_order_percent_amount(self, asset, percent):
value = self.portfolio.portfolio_value * percent
return self._calculate_order_value_amount(asset, value)
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target(
self, asset, target, limit_price=None, stop_price=None, style=None
):
"""Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
Parameters
----------
asset : Asset
The asset that this order is for.
target : int
The desired number of shares of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target`` does not take into account any open orders. For
example:
.. code-block:: python
order_target(sid(0), 10)
order_target(sid(0), 10)
This code will result in 20 shares of ``sid(0)`` because the first
call to ``order_target`` will not have been filled when the second
``order_target`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target_percent`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_amount(asset, target)
return self.order(
asset,
amount,
limit_price=limit_price,
stop_price=stop_price,
style=style,
)
def _calculate_order_target_amount(self, asset, target):
if asset in self.portfolio.positions:
current_position = self.portfolio.positions[asset].amount
target -= current_position
return target
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target_value(
self, asset, target, limit_price=None, stop_price=None, style=None
):
"""Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent`
"""
if not self._can_order_asset(asset):
return None
target_amount = self._calculate_order_value_amount(asset, target)
amount = self._calculate_order_target_amount(asset, target_amount)
return self.order(
asset,
amount,
limit_price=limit_price,
stop_price=stop_price,
style=style,
)
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target_percent(
self, asset, target, limit_price=None, stop_price=None, style=None
):
"""Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired percentage of the portfolio value to allocate to
``asset``. This is specified as a decimal, for example:
0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_percent(sid(0), 10)
order_target_percent(sid(0), 10)
This code will result in 20% of the portfolio being allocated to sid(0)
because the first call to ``order_target_percent`` will not have been
filled when the second ``order_target_percent`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_percent_amount(asset, target)
return self.order(
asset,
amount,
limit_price=limit_price,
stop_price=stop_price,
style=style,
)
def _calculate_order_target_percent_amount(self, asset, target):
target_amount = self._calculate_order_percent_amount(asset, target)
return self._calculate_order_target_amount(asset, target_amount)
@api_method
@expect_types(share_counts=pd.Series)
@expect_dtypes(share_counts=int64_dtype)
def batch_market_order(self, share_counts):
"""Place a batch market order for multiple assets.
Parameters
----------
share_counts : pd.Series[Asset -> int]
Map from asset to number of shares to order for that asset.
Returns
-------
order_ids : pd.Index[str]
Index of ids for newly-created orders.
"""
style = MarketOrder()
order_args = [
(asset, amount, style) for (asset, amount) in share_counts.items() if amount
]
return self.blotter.batch_order(order_args)
@error_keywords(
sid="Keyword argument `sid` is no longer supported for "
"get_open_orders. Use `asset` instead."
)
@api_method
def get_open_orders(self, asset=None):
"""Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not None, return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If no asset is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset.
"""
if asset is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in self.blotter.open_orders.items()
if orders
}
if asset in self.blotter.open_orders:
orders = self.blotter.open_orders[asset]
return [order.to_api_obj() for order in orders]
return []
@api_method
def get_order(self, order_id):
"""Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object.
"""
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj()
@api_method
def cancel_order(self, order_param):
"""Cancel an open order.
Parameters
----------
order_param : str or Order
The order_id or order object to cancel.
"""
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id)
####################
# Account Controls #
####################
def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control)
def validate_account_controls(self):
for control in self.account_controls:
control.validate(
self.portfolio,
self.account,
self.get_datetime(),
self.trading_client.current_data,
)
@api_method
def set_max_leverage(self, max_leverage):
"""Set a limit on the maximum leverage of the algorithm.
Parameters
----------
max_leverage : float
The maximum leverage for the algorithm. If not provided there will
be no maximum.
"""
control = MaxLeverage(max_leverage)
self.register_account_control(control)
@api_method
def set_min_leverage(self, min_leverage, grace_period):
"""Set a limit on the minimum leverage of the algorithm.
Parameters
----------
min_leverage : float
The minimum leverage for the algorithm.
grace_period : pd.Timedelta
The offset from the start date used to enforce a minimum leverage.
"""
deadline = self.sim_params.start_session + grace_period
control = MinLeverage(min_leverage, deadline)
self.register_account_control(control)
####################
# Trading Controls #
####################
def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control)
@api_method
def set_max_position_size(
self, asset=None, max_shares=None, max_notional=None, on_error="fail"
):
"""Set a limit on the number of shares and/or dollar value held for the
given sid. Limits are treated as absolute values and are enforced at
the time that the algo attempts to place an order for sid. This means
that it's possible to end up with more than the max number of shares
due to splits/dividends, and more than the max notional due to price
improvement.
If an algorithm attempts to place an order that would result in
increasing the absolute value of shares/dollar value exceeding one of
these limits, raise a TradingControlException.
Parameters
----------
asset : Asset, optional
If provided, this sets the guard only on positions in the given
asset.
max_shares : int, optional
The maximum number of shares to hold for an asset.
max_notional : float, optional
The maximum value to hold for an asset.
"""
control = MaxPositionSize(
asset=asset,
max_shares=max_shares,
max_notional=max_notional,
on_error=on_error,
)
self.register_trading_control(control)
@api_method
def set_max_order_size(
self, asset=None, max_shares=None, max_notional=None, on_error="fail"
):
"""Set a limit on the number of shares and/or dollar value of any single
order placed for sid. Limits are treated as absolute values and are
enforced at the time that the algo attempts to place an order for sid.
If an algorithm attempts to place an order that would result in
exceeding one of these limits, raise a TradingControlException.
Parameters
----------
asset : Asset, optional
If provided, this sets the guard only on positions in the given
asset.
max_shares : int, optional
The maximum number of shares that can be ordered at one time.
max_notional : float, optional
The maximum value that can be ordered at one time.
"""
control = MaxOrderSize(
asset=asset,
max_shares=max_shares,
max_notional=max_notional,
on_error=on_error,
)
self.register_trading_control(control)
@api_method
def set_max_order_count(self, max_count, on_error="fail"):
"""Set a limit on the number of orders that can be placed in a single
day.
Parameters
----------
max_count : int
The maximum number of orders that can be placed on any single day.
"""
control = MaxOrderCount(on_error, max_count)
self.register_trading_control(control)
@api_method
def set_do_not_order_list(self, restricted_list, on_error="fail"):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : container[Asset], SecurityList
The assets that cannot be ordered.
"""
if isinstance(restricted_list, SecurityList):
warnings.warn(
"`set_do_not_order_list(security_lists.leveraged_etf_list)` "
"is deprecated. Use `set_asset_restrictions("
"security_lists.restrict_leveraged_etfs)` instead.",
category=ZiplineDeprecationWarning,
stacklevel=2,
)
restrictions = SecurityListRestrictions(restricted_list)
else:
warnings.warn(
"`set_do_not_order_list(container_of_assets)` is deprecated. "
"Create a zipline.finance.asset_restrictions."
"StaticRestrictions object with a container of assets and use "
"`set_asset_restrictions(StaticRestrictions("
"container_of_assets))` instead.",
category=ZiplineDeprecationWarning,
stacklevel=2,
)
restrictions = StaticRestrictions(restricted_list)
self.set_asset_restrictions(restrictions, on_error)
@api_method
@expect_types(
restrictions=Restrictions,
on_error=str,
)
def set_asset_restrictions(self, restrictions, on_error="fail"):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : Restrictions
An object providing information about restricted assets.
See Also
--------
zipline.finance.asset_restrictions.Restrictions
"""
control = RestrictedListOrder(on_error, restrictions)
self.register_trading_control(control)
self.restrictions |= restrictions
@api_method
def set_long_only(self, on_error="fail"):
"""Set a rule specifying that this algorithm cannot take short
positions.
"""
self.register_trading_control(LongOnly(on_error))
##############
# Pipeline API
##############
@api_method
@require_not_initialized(AttachPipelineAfterInitialize())
@expect_types(
pipeline=Pipeline,
name=str,
chunks=(int, Iterable, type(None)),
)
def attach_pipeline(self, pipeline, name, chunks=None, eager=True):
"""Register a pipeline to be computed at the start of each day.
Parameters
----------
pipeline : Pipeline
The pipeline to have computed.
name : str
The name of the pipeline.
chunks : int or iterator, optional
The number of days to compute pipeline results for. Increasing
this number will make it longer to get the first results but
may improve the total runtime of the simulation. If an iterator
is passed, we will run in chunks based on values of the iterator.
Default is True.
eager : bool, optional
Whether or not to compute this pipeline prior to
before_trading_start.
Returns
-------
pipeline : Pipeline
Returns the pipeline that was attached unchanged.
See Also
--------
:func:`zipline.api.pipeline_output`
"""
if chunks is None:
# Make the first chunk smaller to get more immediate results:
# (one week, then every half year)
chunks = chain([5], repeat(126))
elif isinstance(chunks, int):
chunks = repeat(chunks)
if name in self._pipelines:
raise DuplicatePipelineName(name=name)
self._pipelines[name] = AttachedPipeline(pipeline, iter(chunks), eager)
# Return the pipeline to allow expressions like
# p = attach_pipeline(Pipeline(), 'name')
return pipeline
@api_method
@require_initialized(PipelineOutputDuringInitialize())
def pipeline_output(self, name):
"""
Get results of the pipeline attached by with name ``name``.
Parameters
----------
name : str
Name of the pipeline from which to fetch results.
Returns
-------
results : pd.DataFrame
DataFrame containing the results of the requested pipeline for
the current simulation date.
Raises
------
NoSuchPipeline
Raised when no pipeline with the name `name` has been registered.
See Also
--------
:func:`zipline.api.attach_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
try:
pipe, chunks, _ = self._pipelines[name]
except KeyError:
raise NoSuchPipeline(
name=name,
valid=list(self._pipelines.keys()),
)
return self._pipeline_output(pipe, chunks, name)
def _pipeline_output(self, pipeline, chunks, name):
"""
Internal implementation of `pipeline_output`.
"""
today = normalize_date(self.get_datetime())
try:
data = self._pipeline_cache.get(name, today)
except KeyError:
# Calculate the next block.
data, valid_until = self.run_pipeline(
pipeline,
today,
next(chunks),
)
self._pipeline_cache.set(name, data, valid_until)
# Now that we have a cached result, try to return the data for today.
try:
return data.loc[today]
except KeyError:
# This happens if no assets passed the pipeline screen on a given
# day.
return pd.DataFrame(index=[], columns=data.columns)
def run_pipeline(self, pipeline, start_session, chunksize):
"""
Compute `pipeline`, providing values for at least `start_date`.
Produces a DataFrame containing data for days between `start_date` and
`end_date`, where `end_date` is defined by:
`end_date = min(start_date + chunksize trading days,
simulation_end)`
Returns
-------
(data, valid_until) : tuple (pd.DataFrame, pd.Timestamp)
See Also
--------
PipelineEngine.run_pipeline
"""
sessions = self.trading_calendar.all_sessions
# Load data starting from the previous trading day...
start_date_loc = sessions.get_loc(start_session)
# ...continuing until either the day before the simulation end, or
# until chunksize days of data have been loaded.
sim_end_session = self.sim_params.end_session
end_loc = min(start_date_loc + chunksize, sessions.get_loc(sim_end_session))
end_session = sessions[end_loc]
return (
self.engine.run_pipeline(pipeline, start_session, end_session),
end_session,
)
@staticmethod
def default_pipeline_domain(calendar):
"""
Get a default pipeline domain for algorithms running on ``calendar``.
This will be used to infer a domain for pipelines that only use generic
datasets when running in the context of a TradingAlgorithm.
"""
return _DEFAULT_DOMAINS.get(calendar.name, domain.GENERIC)
@staticmethod
def default_fetch_csv_country_code(calendar):
"""
Get a default country_code to use for fetch_csv symbol lookups.
This will be used to disambiguate symbol lookups for fetch_csv calls if
our asset db contains entries with the same ticker spread across
multiple
"""
return _DEFAULT_FETCH_CSV_COUNTRY_CODES.get(calendar.name)
##################
# End Pipeline API
##################
@classmethod
def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [fn for fn in vars(cls).values() if getattr(fn, "is_api_method", False)]
# Map from calendar name to default domain for that calendar.
_DEFAULT_DOMAINS = {d.calendar_name: d for d in domain.BUILT_IN_DOMAINS}
# Map from calendar name to default country code for that calendar.
_DEFAULT_FETCH_CSV_COUNTRY_CODES = {
d.calendar_name: d.country_code for d in domain.BUILT_IN_DOMAINS
}
# Include us_futures, which doesn't have a pipeline domain.
_DEFAULT_FETCH_CSV_COUNTRY_CODES["us_futures"] = "US" | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/algorithm.py | algorithm.py |
from copy import copy
from logbook import Logger, Processor
from zipline.finance.order import ORDER_STATUS
from zipline.protocol import BarData
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.compat import ExitStack
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR,
)
log = Logger("Trade Simulation")
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {"minute": "minute_perf", "daily": "daily_perf"}
def __init__(
self,
algo,
sim_params,
data_portal,
clock,
benchmark_source,
restrictions,
):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
self.data_portal = data_portal
self.restrictions = restrictions
# ==============
# Algo Setup
# ==============
self.algo = algo
# ==============
# Snapshot Setup
# ==============
# This object is the way that user algorithms interact with OHLCV data,
# fetcher data, and some API methods like `data.can_trade`.
self.current_data = self._create_bar_data()
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
self.clock = clock
self.benchmark_source = benchmark_source
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if "algo_dt" not in record.extra:
record.extra["algo_dt"] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
def get_simulation_dt(self):
return self.simulation_dt
def _create_bar_data(self):
return BarData(
data_portal=self.data_portal,
simulation_dt_func=self.get_simulation_dt,
data_frequency=self.sim_params.data_frequency,
trading_calendar=self.algo.trading_calendar,
restrictions=self.restrictions,
)
# TODO: simplify
# flake8: noqa: C901
def transform(self):
"""
Main generator work loop.
"""
algo = self.algo
metrics_tracker = algo.metrics_tracker
emission_rate = metrics_tracker.emission_rate
def every_bar(
dt_to_use,
current_data=self.current_data,
handle_data=algo.event_manager.handle_data,
):
for capital_change in calculate_minute_capital_changes(dt_to_use):
yield capital_change
self.simulation_dt = dt_to_use
# called every tick (minute or day).
algo.on_dt_changed(dt_to_use)
blotter = algo.blotter
# handle any transactions and commissions coming out new orders
# placed in the last bar
(
new_transactions,
new_commissions,
closed_orders,
) = blotter.get_transactions(current_data)
blotter.prune_orders(closed_orders)
for transaction in new_transactions:
metrics_tracker.process_transaction(transaction)
# since this order was modified, record it
order = blotter.orders[transaction.order_id]
metrics_tracker.process_order(order)
for commission in new_commissions:
metrics_tracker.process_commission(commission)
handle_data(algo, current_data, dt_to_use)
# grab any new orders from the blotter, then clear the list.
# this includes cancelled orders.
new_orders = blotter.new_orders
blotter.new_orders = []
# if we have any new orders, record them so that we know
# in what perf period they were placed.
for new_order in new_orders:
metrics_tracker.process_order(new_order)
def once_a_day(
midnight_dt,
current_data=self.current_data,
data_portal=self.data_portal,
):
# process any capital changes that came overnight
for capital_change in algo.calculate_capital_changes(
midnight_dt, emission_rate=emission_rate, is_interday=True
):
yield capital_change
# set all the timestamps
self.simulation_dt = midnight_dt
algo.on_dt_changed(midnight_dt)
metrics_tracker.handle_market_open(
midnight_dt,
algo.data_portal,
)
# handle any splits that impact any positions or any open orders.
assets_we_care_about = (
metrics_tracker.positions.keys() | algo.blotter.open_orders.keys()
)
if assets_we_care_about:
splits = data_portal.get_splits(assets_we_care_about, midnight_dt)
if splits:
algo.blotter.process_splits(splits)
metrics_tracker.handle_splits(splits)
def on_exit():
# Remove references to algo, data portal, et al to break cycles
# and ensure deterministic cleanup of these objects when the
# simulation finishes.
self.algo = None
self.benchmark_source = self.current_data = self.data_portal = None
with ExitStack() as stack:
stack.callback(on_exit)
stack.enter_context(self.processor)
stack.enter_context(ZiplineAPI(self.algo))
if algo.data_frequency == "minute":
def execute_order_cancellation_policy():
algo.blotter.execute_cancel_policy(SESSION_END)
def calculate_minute_capital_changes(dt):
# process any capital changes that came between the last
# and current minutes
return algo.calculate_capital_changes(
dt, emission_rate=emission_rate, is_interday=False
)
elif algo.data_frequency == "daily":
def execute_order_cancellation_policy():
algo.blotter.execute_daily_cancel_policy(SESSION_END)
def calculate_minute_capital_changes(dt):
return []
else:
def execute_order_cancellation_policy():
pass
def calculate_minute_capital_changes(dt):
return []
for dt, action in self.clock:
if action == BAR:
for capital_change_packet in every_bar(dt):
yield capital_change_packet
elif action == SESSION_START:
for capital_change_packet in once_a_day(dt):
yield capital_change_packet
elif action == SESSION_END:
# End of the session.
positions = metrics_tracker.positions
position_assets = algo.asset_finder.retrieve_all(positions)
self._cleanup_expired_assets(dt, position_assets)
execute_order_cancellation_policy()
algo.validate_account_controls()
yield self._get_daily_message(dt, algo, metrics_tracker)
elif action == BEFORE_TRADING_START_BAR:
self.simulation_dt = dt
algo.on_dt_changed(dt)
algo.before_trading_start(self.current_data)
elif action == MINUTE_END:
minute_msg = self._get_minute_message(
dt,
algo,
metrics_tracker,
)
yield minute_msg
risk_message = metrics_tracker.handle_simulation_end(
self.data_portal,
)
yield risk_message
def _cleanup_expired_assets(self, dt, position_assets):
"""
Clear out any assets that have expired before starting a new sim day.
Performs two functions:
1. Finds all assets for which we have open orders and clears any
orders whose assets are on or after their auto_close_date.
2. Finds all assets for which we have positions and generates
close_position events for any assets that have reached their
auto_close_date.
"""
algo = self.algo
def past_auto_close_date(asset):
acd = asset.auto_close_date
return acd is not None and acd <= dt
# Remove positions in any sids that have reached their auto_close date.
assets_to_clear = [
asset for asset in position_assets if past_auto_close_date(asset)
]
metrics_tracker = algo.metrics_tracker
data_portal = self.data_portal
for asset in assets_to_clear:
metrics_tracker.process_close_position(asset, dt, data_portal)
# Remove open orders for any sids that have reached their auto close
# date. These orders get processed immediately because otherwise they
# would not be processed until the first bar of the next day.
blotter = algo.blotter
assets_to_cancel = [
asset for asset in blotter.open_orders if past_auto_close_date(asset)
]
for asset in assets_to_cancel:
blotter.cancel_all_orders_for_asset(asset)
# Make a copy here so that we are not modifying the list that is being
# iterated over.
for order in copy(blotter.new_orders):
if order.status == ORDER_STATUS.CANCELLED:
metrics_tracker.process_order(order)
blotter.new_orders.remove(order)
def _get_daily_message(self, dt, algo, metrics_tracker):
"""
Get a perf message for the given datetime.
"""
perf_message = metrics_tracker.handle_market_close(
dt,
self.data_portal,
)
perf_message["daily_perf"]["recorded_vars"] = algo.recorded_vars
return perf_message
def _get_minute_message(self, dt, algo, metrics_tracker):
"""
Get a perf message for the given datetime.
"""
rvars = algo.recorded_vars
minute_message = metrics_tracker.handle_minute_close(
dt,
self.data_portal,
)
minute_message["minute_perf"]["recorded_vars"] = rvars
return minute_message | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/gens/tradesimulation.py | tradesimulation.py |
from abc import abstractmethod
from numpy import (
array,
full,
recarray,
searchsorted,
vstack,
where,
)
from pandas import NaT as pd_NaT
from zipline.errors import (
WindowLengthNotPositive,
UnsupportedDataType,
NonExistentAssetInTimeFrame,
NoFurtherDataError,
)
from zipline.lib.labelarray import LabelArray, labelarray_where
from zipline.utils.context_tricks import nop_context
from zipline.utils.input_validation import expect_dtypes, expect_types
from zipline.utils.numpy_utils import bool_dtype
from zipline.utils.pandas_utils import nearest_unequal_elements
from .downsample_helpers import (
select_sampling_indices,
expect_downsample_frequency,
)
from .sentinels import NotSpecified
from .term import Term
class PositiveWindowLengthMixin(Term):
"""
Validation mixin enforcing that a Term gets a positive WindowLength
"""
def _validate(self):
super(PositiveWindowLengthMixin, self)._validate()
if not self.windowed:
raise WindowLengthNotPositive(window_length=self.window_length)
class SingleInputMixin(Term):
"""
Validation mixin enforcing that a Term gets a length-1 inputs list.
"""
def _validate(self):
super(SingleInputMixin, self)._validate()
num_inputs = len(self.inputs)
if num_inputs != 1:
raise ValueError(
"{typename} expects only one input, "
"but received {num_inputs} instead.".format(
typename=type(self).__name__, num_inputs=num_inputs
)
)
class StandardOutputs(Term):
"""
Validation mixin enforcing that a Term cannot produce non-standard outputs.
"""
def _validate(self):
super(StandardOutputs, self)._validate()
if self.outputs is not NotSpecified:
raise ValueError(
"{typename} does not support custom outputs,"
" but received custom outputs={outputs}.".format(
typename=type(self).__name__,
outputs=self.outputs,
)
)
class RestrictedDTypeMixin(Term):
"""
Validation mixin enforcing that a term has a specific dtype.
"""
ALLOWED_DTYPES = NotSpecified
def _validate(self):
super(RestrictedDTypeMixin, self)._validate()
assert self.ALLOWED_DTYPES is not NotSpecified, (
"ALLOWED_DTYPES not supplied on subclass "
"of RestrictedDTypeMixin: %s." % type(self).__name__
)
if self.dtype not in self.ALLOWED_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
)
class CustomTermMixin(Term):
"""
Mixin for user-defined rolling-window Terms.
Implements `_compute` in terms of a user-defined `compute` function, which
is mapped over the input windows.
Used by CustomFactor, CustomFilter, CustomClassifier, etc.
"""
ctx = nop_context
def __new__(
cls,
inputs=NotSpecified,
outputs=NotSpecified,
window_length=NotSpecified,
mask=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
ndim=NotSpecified,
**kwargs,
):
unexpected_keys = set(kwargs) - set(cls.params)
if unexpected_keys:
raise TypeError(
"{termname} received unexpected keyword "
"arguments {unexpected}".format(
termname=cls.__name__,
unexpected={k: kwargs[k] for k in unexpected_keys},
)
)
return super(CustomTermMixin, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
dtype=dtype,
missing_value=missing_value,
ndim=ndim,
**kwargs,
)
def compute(self, today, assets, out, *arrays):
"""
Override this method with a function that writes a value into `out`.
"""
raise NotImplementedError(
"{name} must define a compute method".format(name=type(self).__name__)
)
def _allocate_output(self, windows, shape):
"""
Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray).
"""
missing_value = self.missing_value
outputs = self.outputs
if outputs is not NotSpecified:
out = recarray(
shape,
formats=[self.dtype.str] * len(outputs),
names=outputs,
)
out[:] = missing_value
else:
out = full(shape, missing_value, dtype=self.dtype)
return out
def _format_inputs(self, windows, column_mask):
inputs = []
for input_ in windows:
window = next(input_)
if window.shape[1] == 1:
# Do not mask single-column inputs.
inputs.append(window)
else:
inputs.append(window[:, column_mask])
return inputs
def _compute(self, windows, dates, assets, mask):
"""
Call the user's `compute` function on each window with a pre-built
output array.
"""
format_inputs = self._format_inputs
compute = self.compute
params = self.params
ndim = self.ndim
shape = (len(mask), 1) if ndim == 1 else mask.shape
out = self._allocate_output(windows, shape)
with self.ctx:
for idx, date in enumerate(dates):
# Never apply a mask to 1D outputs.
out_mask = array([True]) if ndim == 1 else mask[idx]
# Mask our inputs as usual.
inputs_mask = mask[idx]
masked_assets = assets[inputs_mask]
out_row = out[idx][out_mask]
inputs = format_inputs(windows, inputs_mask)
compute(date, masked_assets, out_row, *inputs, **params)
out[idx][out_mask] = out_row
return out
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return type(self).__name__ + ":\\l window_length: %d\\l" % self.window_length
class LatestMixin(SingleInputMixin):
"""
Common behavior for :attr:`zipline.pipeline.data.BoundColumn.latest`.
Given a :class:`~zipline.pipeline.data.DataSet` named ``MyData`` with a
column ``col`` of numeric dtype, the following expression:
.. code-block:: python
factor = MyData.col.latest
is equivalent to:
.. code-block:: python
class Latest(CustomFactor):
inputs = [MyData.col]
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
factor = Latest()
The behavior is the same for columns of boolean or string dtype, except the
resulting expression will be a :class:`~zipline.pipeline.CustomFilter` for
boolean columns, and the resulting object will be a
:class:`~zipline.pipeline.CustomClassifier` for string or integer columns.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
def _validate(self):
super(LatestMixin, self)._validate()
if self.inputs[0].dtype != self.dtype:
raise TypeError(
"{name} expected an input of dtype {expected}, "
"but got {actual} instead.".format(
name=type(self).__name__,
expected=self.dtype,
actual=self.inputs[0].dtype,
)
)
def graph_repr(self):
return "Latest"
class UniversalMixin(Term):
"""
Base class for "universal" mixins.
Universal mixins are used to implement expressions that need separate
subclasses for each of the ComputableTerm subclasses (Factor, Filter, and
Classifier). Such expressions are usually return types of methods of
ComputableTerm, such as `downsample()`, `alias()`, or `fillna()`.
A type may only inherit from one UniversalMixin.
"""
# Memo dict mapping pairs of (mixin_type, principal_type) to subtypes.
_UNIVERSAL_MIXIN_SUBTYPES = {}
@staticmethod
@abstractmethod
def _universal_mixin_type():
raise NotImplementedError("_universal_mixin_type")
@staticmethod
@abstractmethod
def _universal_mixin_specialization_name(principal_type):
raise NotImplementedError("_universal_mixin_specialization_name")
@classmethod
def universal_mixin_specialization(cls, principal_type):
"""
Create a new subtype of `principal_type` that adds this mixin to
``principal_type``. ``principal_type`` will be one of Factor, Filter,
or Classifier.
"""
mixin = cls._universal_mixin_type()
memo_key = (mixin, principal_type)
try:
return cls._UNIVERSAL_MIXIN_SUBTYPES[memo_key]
except KeyError:
new_type = type(
mixin._universal_mixin_specialization_name(principal_type),
(mixin, principal_type),
{"__module__": principal_type.__module__},
)
cls._UNIVERSAL_MIXIN_SUBTYPES[memo_key] = new_type
return new_type
class AliasedMixin(SingleInputMixin, UniversalMixin):
"""
Mixin for aliased terms.
"""
def __new__(cls, term, name):
return super(AliasedMixin, cls).__new__(
cls,
inputs=(term,),
outputs=term.outputs,
window_length=0,
name=name,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
window_safe=term.window_safe,
)
def _init(self, name, *args, **kwargs):
self.name = name
return super(AliasedMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, name, *args, **kwargs):
return (
super(AliasedMixin, cls)._static_identity(*args, **kwargs),
name,
)
def _compute(self, inputs, dates, assets, mask):
return inputs[0]
def __repr__(self):
return "{type}({inner}, name={name!r})".format(
type=type(self).__name__,
inner=self.inputs[0].recursive_repr(),
name=self.name,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return self.name
@staticmethod
def _universal_mixin_type():
return AliasedMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return "Aliased" + principal_type.__name__
class DownsampledMixin(StandardOutputs, UniversalMixin):
"""Universal mixin for downsampled terms.
A downsampled term is a wrapper around the "real" term that performs actual
computation. The downsampler is responsible for calling the real term's
`compute` method at selected intervals and forward-filling the computed
values.
Downsampling is not currently supported for terms with multiple outputs.
"""
# There's no reason to take a window of a downsampled term. The whole
# point is that you're re-using the same result multiple times.
window_safe = False
@expect_types(term=Term)
@expect_downsample_frequency
def __new__(cls, term, frequency):
return super(DownsampledMixin, cls).__new__(
cls,
inputs=term.inputs,
outputs=term.outputs,
window_length=term.window_length,
mask=term.mask,
frequency=frequency,
wrapped_term=term,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
)
def _init(self, frequency, wrapped_term, *args, **kwargs):
self._frequency = frequency
self._wrapped_term = wrapped_term
return super(DownsampledMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, frequency, wrapped_term, *args, **kwargs):
return (
super(DownsampledMixin, cls)._static_identity(*args, **kwargs),
frequency,
wrapped_term,
)
def compute_extra_rows(self, all_dates, start_date, end_date, min_extra_rows):
"""
Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date.
"""
try:
current_start_pos = all_dates.get_loc(start_date) - min_extra_rows
if current_start_pos < 0:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=all_dates[0],
lookback_start=start_date,
lookback_length=min_extra_rows,
)
except KeyError:
before, after = nearest_unequal_elements(all_dates, start_date)
raise ValueError(
"Pipeline start_date {start_date} is not in calendar.\n"
"Latest date before start_date is {before}.\n"
"Earliest date after start_date is {after}.".format(
start_date=start_date,
before=before,
after=after,
)
)
# Our possible target dates are all the dates on or before the current
# starting position.
# TODO: Consider bounding this below by self.window_length
candidates = all_dates[: current_start_pos + 1]
# Choose the latest date in the candidates that is the start of a new
# period at our frequency.
choices = select_sampling_indices(candidates, self._frequency)
# If we have choices, the last choice is the first date if the
# period containing current_start_date. Choose it.
new_start_date = candidates[choices[-1]]
# Add the difference between the new and old start dates to get the
# number of rows for the new start_date.
new_start_pos = all_dates.get_loc(new_start_date)
assert new_start_pos <= current_start_pos, "Computed negative extra rows!"
return min_extra_rows + (current_start_pos - new_start_pos)
def _compute(self, inputs, dates, assets, mask):
"""
Compute by delegating to self._wrapped_term._compute on sample dates.
On non-sample dates, forward-fill from previously-computed samples.
"""
to_sample = dates[select_sampling_indices(dates, self._frequency)]
assert to_sample[0] == dates[0], (
"Misaligned sampling dates in %s." % type(self).__name__
)
real_compute = self._wrapped_term._compute
# Inputs will contain different kinds of values depending on whether or
# not we're a windowed computation.
# If we're windowed, then `inputs` is a list of iterators of ndarrays.
# If we're not windowed, then `inputs` is just a list of ndarrays.
# There are two things we care about doing with the input:
# 1. Preparing an input to be passed to our wrapped term.
# 2. Skipping an input if we're going to use an already-computed row.
# We perform these actions differently based on the expected kind of
# input, and we encapsulate these actions with closures so that we
# don't clutter the code below with lots of branching.
if self.windowed:
# If we're windowed, inputs are stateful AdjustedArrays. We don't
# need to do any preparation before forwarding to real_compute, but
# we need to call `next` on them if we want to skip an iteration.
def prepare_inputs():
return inputs
def skip_this_input():
for w in inputs:
next(w)
else:
# If we're not windowed, inputs are just ndarrays. We need to
# slice out a single row when forwarding to real_compute, but we
# don't need to do anything to skip an input.
def prepare_inputs():
# i is the loop iteration variable below.
return [a[[i]] for a in inputs]
def skip_this_input():
pass
results = []
samples = iter(to_sample)
next_sample = next(samples)
for i, compute_date in enumerate(dates):
if next_sample == compute_date:
results.append(
real_compute(
prepare_inputs(),
dates[i : i + 1],
assets,
mask[i : i + 1],
)
)
try:
next_sample = next(samples)
except StopIteration:
# No more samples to take. Set next_sample to Nat, which
# compares False with any other datetime.
next_sample = pd_NaT
else:
skip_this_input()
# Copy results from previous sample period.
results.append(results[-1])
# We should have exhausted our sample dates.
try:
next_sample = next(samples)
except StopIteration:
pass
else:
raise AssertionError("Unconsumed sample date: %s" % next_sample)
# Concatenate stored results.
return vstack(results)
@staticmethod
def _universal_mixin_type():
return DownsampledMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return "Downsampled" + principal_type.__name__
class SliceMixin(UniversalMixin):
"""Universal mixin for taking columnar slices of terms.
Parameters
----------
term : zipline.pipeline.Term
The term from which to extract a column of data.
asset : zipline.assets.Asset
The asset corresponding to the column of `term` to be extracted.
Notes
-----
Users should rarely construct instances of `Slice` directly. Instead, they
should construct instances via indexing, e.g. `MyFactor()[Asset(24)]`.
"""
def __new__(cls, term, asset):
return super(SliceMixin, cls).__new__(
cls,
asset=asset,
inputs=[term],
window_length=0,
mask=term.mask,
dtype=term.dtype,
missing_value=term.missing_value,
window_safe=term.window_safe,
ndim=1,
)
def __repr__(self):
return "{parent_term}[{asset}]".format(
parent_term=self.inputs[0].recursive_repr(),
asset=self._asset,
)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(SliceMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (
super(SliceMixin, cls)._static_identity(*args, **kwargs),
asset,
)
def _compute(self, windows, dates, assets, mask):
asset = self._asset
asset_column = searchsorted(assets.values, asset.sid)
if assets[asset_column] != asset.sid:
raise NonExistentAssetInTimeFrame(
asset=asset,
start_date=dates[0],
end_date=dates[-1],
)
# Return a 2D array with one column rather than a 1D array of the
# column.
return windows[0][:, [asset_column]]
@property
def asset(self):
"""Get the asset whose data is selected by this slice."""
return self._asset
@staticmethod
def _universal_mixin_type():
return SliceMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return principal_type.__name__ + "Slice"
class IfElseMixin(UniversalMixin):
"""Universal mixin for types returned by Filter.if_else."""
window_length = 0
@expect_dtypes(condition=bool_dtype)
def __new__(cls, condition, if_true, if_false):
return super(IfElseMixin, cls).__new__(
cls,
inputs=[condition, if_true, if_false],
dtype=if_true.dtype,
ndim=if_true.ndim,
missing_value=if_true.missing_value,
window_safe=all(
(
condition.window_safe,
if_true.window_safe,
if_false.window_safe,
)
),
outputs=if_true.outputs,
)
def _compute(self, inputs, assets, dates, mask):
if self.dtype == object:
return labelarray_where(inputs[0], inputs[1], inputs[2])
return where(inputs[0], inputs[1], inputs[2])
@staticmethod
def _universal_mixin_type():
return IfElseMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return "IfElse" + principal_type.__name__
class ConstantMixin(StandardOutputs, UniversalMixin):
"""Universal mixin for terms that produce a known constant value."""
window_length = 0
inputs = ()
params = ("const",)
def _compute(self, inputs, assets, dates, mask):
constant = self.params["const"]
out = full(mask.shape, constant, dtype=self.dtype)
if self.dtype == object:
return LabelArray(
out,
categories=[constant],
missing_value=self.missing_value,
)
return out
@staticmethod
def _universal_mixin_type():
return ConstantMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return "Constant" + principal_type.__name__ | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/mixins.py | mixins.py |
from contextlib import contextmanager
import errno
from functools import partial
from io import BytesIO
from subprocess import Popen, PIPE
from networkx import topological_sort
from zipline.pipeline.data import BoundColumn
from zipline.pipeline import Filter, Factor, Classifier, Term
from zipline.pipeline.term import AssetExists
class NoIPython(Exception):
pass
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError("`delimiters` must be of length 2. Got %r" % delimiters)
return "".join([delimiters[0], content, delimiters[1]])
quote = partial(delimit, '""')
bracket = partial(delimit, "[]")
def begin_graph(f, name, **attrs):
writeln(f, "strict digraph %s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def begin_cluster(f, name, **attrs):
attrs.setdefault("label", quote(name))
writeln(f, "subgraph cluster_%s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def end_graph(f):
writeln(f, "}")
@contextmanager
def graph(f, name, **attrs):
begin_graph(f, name, **attrs)
yield
end_graph(f)
@contextmanager
def cluster(f, name, **attrs):
begin_cluster(f, name, **attrs)
yield
end_graph(f)
def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in g.in_degree().items() if d == 0)
def filter_nodes(include_asset_exists, nodes):
if include_asset_exists:
return nodes
return filter(lambda n: n is not AssetExists(), nodes)
def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {"rankdir": "TB", "splines": "ortho"}
cluster_attrs = {"style": "filled", "color": "lightgoldenrod1"}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, "Output", labelloc="b", **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, "Input", **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists, topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ["dot", "-T", format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode("utf-8")
)
out.write(proc_stdout)
def display_graph(g, format="svg", include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == "svg":
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
def writeln(f, s):
f.write((s + "\n").encode("utf-8"))
def fmt(obj):
if isinstance(obj, Term):
r = obj.graph_repr()
else:
r = obj
return '"%s"' % r
def add_term_node(f, term):
declare_node(f, id(term), attrs_for_node(term))
def declare_node(f, name, attributes):
writeln(f, "{0} {1};".format(name, format_attrs(attributes)))
def add_edge(f, source, dest):
writeln(f, "{0} -> {1};".format(source, dest))
def attrs_for_node(term, **overrides):
attrs = {
"shape": "box",
"colorscheme": "pastel19",
"style": "filled",
"label": fmt(term),
}
if isinstance(term, BoundColumn):
attrs["fillcolor"] = "1"
if isinstance(term, Factor):
attrs["fillcolor"] = "2"
elif isinstance(term, Filter):
attrs["fillcolor"] = "3"
elif isinstance(term, Classifier):
attrs["fillcolor"] = "4"
attrs.update(**overrides or {})
return attrs
def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ""
entries = ["=".join((key, value)) for key, value in attrs.items()]
return "[" + ", ".join(entries) + "]" | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/visualize.py | visualize.py |
import re
from itertools import chain
from numbers import Number
import numexpr
from numexpr.necompiler import getExprNames
from numpy import (
full,
inf,
)
from zipline.pipeline.term import Term, ComputableTerm
from zipline.utils.numpy_utils import bool_dtype
_VARIABLE_NAME_RE = re.compile("^(x_)([0-9]+)$")
# Map from op symbol to equivalent Python magic method name.
ops_to_methods = {
"+": "__add__",
"-": "__sub__",
"*": "__mul__",
"/": "__div__",
"%": "__mod__",
"**": "__pow__",
"&": "__and__",
"|": "__or__",
"^": "__xor__",
"<": "__lt__",
"<=": "__le__",
"==": "__eq__",
"!=": "__ne__",
">=": "__ge__",
">": "__gt__",
}
# Map from method name to op symbol.
methods_to_ops = {v: k for k, v in ops_to_methods.items()}
# Map from op symbol to equivalent Python magic method name after flipping
# arguments.
ops_to_commuted_methods = {
"+": "__radd__",
"-": "__rsub__",
"*": "__rmul__",
"/": "__rdiv__",
"%": "__rmod__",
"**": "__rpow__",
"&": "__rand__",
"|": "__ror__",
"^": "__rxor__",
"<": "__gt__",
"<=": "__ge__",
"==": "__eq__",
"!=": "__ne__",
">=": "__le__",
">": "__lt__",
}
unary_ops_to_methods = {
"-": "__neg__",
"~": "__invert__",
}
UNARY_OPS = {"-"}
MATH_BINOPS = {"+", "-", "*", "/", "**", "%"}
FILTER_BINOPS = {"&", "|"} # NumExpr doesn't support xor.
COMPARISONS = {"<", "<=", "!=", ">=", ">", "=="}
NUMEXPR_MATH_FUNCS = {
"sin",
"cos",
"tan",
"arcsin",
"arccos",
"arctan",
"sinh",
"cosh",
"tanh",
"arcsinh",
"arccosh",
"arctanh",
"log",
"log10",
"log1p",
"exp",
"expm1",
"sqrt",
"abs",
}
NPY_MAXARGS = 32
def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup)
class BadBinaryOperator(TypeError):
"""
Called when a bad binary operation is encountered.
Parameters
----------
op : str
The attempted operation
left : zipline.computable.Term
The left hand side of the operation.
right : zipline.computable.Term
The right hand side of the operation.
"""
def __init__(self, op, left, right):
super(BadBinaryOperator, self).__init__(
"Can't compute {left} {op} {right}".format(
op=op,
left=type(left).__name__,
right=type(right).__name__,
)
)
def method_name_for_op(op, commute=False):
"""
Get the name of the Python magic method corresponding to `op`.
Parameters
----------
op : str {'+','-','*', '/','**','&','|','^','<','<=','==','!=','>=','>'}
The requested operation.
commute : bool
Whether to return the name of an equivalent method after flipping args.
Returns
-------
method_name : str
The name of the Python magic method corresponding to `op`.
If `commute` is True, returns the name of a method equivalent to `op`
with inputs flipped.
Examples
--------
>>> method_name_for_op('+')
'__add__'
>>> method_name_for_op('+', commute=True)
'__radd__'
>>> method_name_for_op('>')
'__gt__'
>>> method_name_for_op('>', commute=True)
'__lt__'
"""
if commute:
return ops_to_commuted_methods[op]
return ops_to_methods[op]
def unary_op_name(op):
return unary_ops_to_methods[op]
def is_comparison(op):
return op in COMPARISONS
class NumericalExpression(ComputableTerm):
"""
Term binding to a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
dtype : np.dtype
The dtype for the expression.
"""
window_length = 0
def __new__(cls, expr, binds, dtype):
# We always allow filters to be used in windowed computations.
# Otherwise, an expression is window_safe if all its constituents are
# window_safe.
window_safe = (dtype == bool_dtype) or all(t.window_safe for t in binds)
return super(NumericalExpression, cls).__new__(
cls,
inputs=binds,
expr=expr,
dtype=dtype,
window_safe=window_safe,
)
def _init(self, expr, *args, **kwargs):
self._expr = expr
return super(NumericalExpression, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, expr, *args, **kwargs):
return (
super(NumericalExpression, cls)._static_identity(*args, **kwargs),
expr,
)
def _validate(self):
"""
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
"""
variable_names, _unused = getExprNames(self._expr, {})
expr_indices = []
for name in variable_names:
if name == "inf":
continue
match = _VARIABLE_NAME_RE.match(name)
if not match:
raise ValueError("%r is not a valid variable name" % name)
expr_indices.append(int(match.group(2)))
expr_indices.sort()
expected_indices = list(range(len(self.inputs)))
if expr_indices != expected_indices:
raise ValueError(
"Expected %s for variable indices, but got %s"
% (
expected_indices,
expr_indices,
)
)
super(NumericalExpression, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
Compute our stored expression string with numexpr.
"""
out = full(mask.shape, self.missing_value, dtype=self.dtype)
# This writes directly into our output buffer.
numexpr.evaluate(
self._expr,
local_dict={"x_%d" % idx: array for idx, array in enumerate(arrays)},
global_dict={"inf": inf},
out=out,
)
return out
def _rebind_variables(self, new_inputs):
"""
Return self._expr with all variables rebound to the indices implied by
new_inputs.
"""
expr = self._expr
# If we have 11+ variables, some of our variable names may be
# substrings of other variable names. For example, we might have x_1,
# x_10, and x_100. By enumerating in reverse order, we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring. This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index. For example, if we have variables
# with indices going up to 100, we will process all of the x_1xx names
# before x_1x, which will be before x_1, so the substitution of x_1
# will not affect x_1x, which will not affect x_1xx.
for idx, input_ in reversed(list(enumerate(self.inputs))):
old_varname = "x_%d" % idx
# Temporarily rebind to x_temp_N so that we don't overwrite the
# same value multiple times.
temp_new_varname = "x_temp_%d" % new_inputs.index(input_)
expr = expr.replace(old_varname, temp_new_varname)
# Clear out the temp variables now that we've finished iteration.
return expr.replace("_temp_", "_")
def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs
def build_binary_op(self, op, other):
"""
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
"""
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, self, other)
# If the merged inputs would be too many for numexpr, then don't merge
# them:
if len(new_inputs) >= NPY_MAXARGS:
self_expr = "x_0"
other_expr = "x_1"
new_inputs = self, other
return self_expr, other_expr, new_inputs
@property
def bindings(self):
return {"x_%d" % i: input_ for i, input_ in enumerate(self.inputs)}
def __repr__(self):
return "{typename}(expr='{expr}', bindings={bindings})".format(
typename=type(self).__name__,
expr=self._expr,
bindings=self.bindings,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Replace any floating point numbers in the expression
# with their scientific notation
final = re.sub(
r"[-+]?\d*\.\d+", lambda x: format(float(x.group(0)), ".2E"), self._expr
)
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Expression:\\l {}\\l".format(
final,
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/expression.py | expression.py |
from zipline.errors import UnsupportedPipelineOutput
from zipline.utils.input_validation import (
expect_element,
expect_types,
optional,
)
from .domain import Domain, GENERIC, infer_domain
from .graph import ExecutionPlan, TermGraph, SCREEN_NAME
from .filters import Filter
from .term import AssetExists, ComputableTerm, Term
class Pipeline(object):
"""
A Pipeline object represents a collection of named expressions to be
compiled and executed by a PipelineEngine.
A Pipeline has two important attributes: 'columns', a dictionary of named
:class:`~zipline.pipeline.Term` instances, and 'screen', a
:class:`~zipline.pipeline.Filter` representing criteria for
including an asset in the results of a Pipeline.
To compute a pipeline in the context of a TradingAlgorithm, users must call
``attach_pipeline`` in their ``initialize`` function to register that the
pipeline should be computed each trading day. The most recent outputs of an
attached pipeline can be retrieved by calling ``pipeline_output`` from
``handle_data``, ``before_trading_start``, or a scheduled function.
Parameters
----------
columns : dict, optional
Initial columns.
screen : zipline.pipeline.Filter, optional
Initial screen.
"""
__slots__ = ("_columns", "_screen", "_domain", "__weakref__")
@expect_types(columns=optional(dict), screen=optional(Filter), domain=Domain)
def __init__(self, columns=None, screen=None, domain=GENERIC):
if columns is None:
columns = {}
validate_column = self.validate_column
for column_name, term in columns.items():
validate_column(column_name, term)
if not isinstance(term, ComputableTerm):
raise TypeError(
"Column {column_name!r} contains an invalid pipeline term "
"({term}). Did you mean to append '.latest'?".format(
column_name=column_name,
term=term,
)
)
self._columns = columns
self._screen = screen
self._domain = domain
@property
def columns(self):
"""The output columns of this pipeline.
Returns
-------
columns : dict[str, zipline.pipeline.ComputableTerm]
Map from column name to expression computing that column's output.
"""
return self._columns
@property
def screen(self):
"""
The screen of this pipeline.
Returns
-------
screen : zipline.pipeline.Filter or None
Term defining the screen for this pipeline. If ``screen`` is a
filter, rows that do not pass the filter (i.e., rows for which the
filter computed ``False``) will be dropped from the output of this
pipeline before returning results.
Notes
-----
Setting a screen on a Pipeline does not change the values produced for
any rows: it only affects whether a given row is returned. Computing a
pipeline with a screen is logically equivalent to computing the
pipeline without the screen and then, as a post-processing-step,
filtering out any rows for which the screen computed ``False``.
"""
return self._screen
@expect_types(term=Term, name=str)
def add(self, term, name, overwrite=False):
"""Add a column.
The results of computing ``term`` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term
@expect_types(name=str)
def remove(self, name):
"""Remove a column.
Parameters
----------
name : str
The name of the column to remove.
Raises
------
KeyError
If `name` is not in self.columns.
Returns
-------
removed : zipline.pipeline.Term
The removed term.
"""
return self.columns.pop(name)
@expect_types(screen=Filter, overwrite=(bool, int))
def set_screen(self, screen, overwrite=False):
"""Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen
def to_execution_plan(self, domain, default_screen, start_date, end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
)
def to_simple_graph(self, default_screen):
"""
Compile into a simple TermGraph with no extra row metadata.
Parameters
----------
default_screen : zipline.pipeline.Term
Term to use as a screen if self.screen is None.
Returns
-------
graph : zipline.pipeline.graph.TermGraph
Graph encoding term dependencies.
"""
return TermGraph(self._prepare_graph_terms(default_screen))
def _prepare_graph_terms(self, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[SCREEN_NAME] = screen
return columns
@expect_element(format=("svg", "png", "jpeg"))
def show_graph(self, format="svg"):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph(AssetExists())
if format == "svg":
return g.svg
elif format == "png":
return g.png
elif format == "jpeg":
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format)
@staticmethod
@expect_types(term=Term, column_name=str)
def validate_column(column_name, term):
if term.ndim == 1:
raise UnsupportedPipelineOutput(column_name=column_name, term=term)
@property
def _output_terms(self):
"""
A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present.
"""
terms = list(self._columns.values())
screen = self.screen
if screen is not None:
terms.append(screen)
return terms
@expect_types(default=Domain)
def domain(self, default):
"""
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.domain.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.domain.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
"""
# Always compute our inferred domain to ensure that it's compatible
# with our explicit domain.
inferred = infer_domain(self._output_terms)
if inferred is GENERIC and self._domain is GENERIC:
# Both generic. Fall back to default.
return default
elif inferred is GENERIC and self._domain is not GENERIC:
# Use the non-generic domain.
return self._domain
elif inferred is not GENERIC and self._domain is GENERIC:
# Use the non-generic domain.
return inferred
else:
# Both non-generic. They have to match.
if inferred is not self._domain:
raise ValueError(
"Conflicting domains in Pipeline. Inferred {}, but {} was "
"passed at construction.".format(inferred, self._domain)
)
return inferred | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/pipeline.py | pipeline.py |
import datetime
from textwrap import dedent
from interface import default, implements, Interface
import numpy as np
import pandas as pd
import pytz
from zipline.utils.calendar_utils import get_calendar
from zipline.country import CountryCode
from zipline.utils.formatting import bulleted_list
from zipline.utils.input_validation import expect_types, optional
from zipline.utils.memoize import lazyval
from zipline.utils.pandas_utils import days_at_time
class IDomain(Interface):
"""Domain interface."""
def all_sessions(self):
"""
Get all trading sessions for the calendar of this domain.
This determines the row labels of Pipeline outputs for pipelines run on
this domain.
Returns
-------
sessions : pd.DatetimeIndex
An array of all session labels for this domain.
"""
@property
def country_code(self):
"""The country code for this domain.
Returns
-------
code : str
The two-character country iso3166 country code for this domain.
"""
def data_query_cutoff_for_sessions(self, sessions):
"""Compute the data query cutoff time for the given sessions.
Parameters
----------
sessions : pd.DatetimeIndex
The sessions to get the data query cutoff times for. This index
will contain all midnight UTC values.
Returns
-------
data_query_cutoff : pd.DatetimeIndex
Timestamp of the last minute for which data should be considered
"available" on each session.
"""
@default
def roll_forward(self, dt):
"""
Given a date, align it to the calendar of the pipeline's domain.
Parameters
----------
dt : pd.Timestamp
Returns
-------
pd.Timestamp
"""
try:
dt = pd.Timestamp(dt).tz_convert("UTC")
except TypeError:
dt = pd.Timestamp(dt).tz_localize("UTC")
trading_days = self.all_sessions()
try:
return trading_days[trading_days.searchsorted(dt)]
except IndexError:
raise ValueError(
"Date {} was past the last session for domain {}. "
"The last session for this domain is {}.".format(
dt.date(), self, trading_days[-1].date()
)
)
Domain = implements(IDomain)
Domain.__doc__ = """
A domain represents a set of labels for the arrays computed by a Pipeline.
A domain defines two things:
1. A calendar defining the dates to which the pipeline's inputs and outputs
should be aligned. The calendar is represented concretely by a pandas
DatetimeIndex.
2. The set of assets that the pipeline should compute over. Right now, the only
supported way of representing this set is with a two-character country code
describing the country of assets over which the pipeline should compute. In
the future, we expect to expand this functionality to include more general
concepts.
"""
Domain.__name__ = "Domain"
Domain.__qualname__ = "zipline.pipeline.domain.Domain"
class GenericDomain(Domain):
"""Special singleton class used to represent generic DataSets and Columns."""
def all_sessions(self):
raise NotImplementedError("Can't get sessions for generic domain.")
@property
def country_code(self):
raise NotImplementedError("Can't get country code for generic domain.")
def data_query_cutoff_for_sessions(self, sessions):
raise NotImplementedError(
"Can't compute data query cutoff times for generic domain.",
)
def __repr__(self):
return "GENERIC"
GENERIC = GenericDomain()
class EquityCalendarDomain(Domain):
"""
An equity domain whose sessions are defined by a named TradingCalendar.
Parameters
----------
country_code : str
ISO-3166 two-letter country code of the domain
calendar_name : str
Name of the calendar, to be looked by by trading_calendar.get_calendar.
data_query_offset : np.timedelta64
The offset from market open when data should no longer be considered
available for a session. For example, a ``data_query_offset`` of
``-np.timedelta64(45, 'm')`` means that the data must have
been available at least 45 minutes prior to market open for it to
appear in the pipeline input for the given session.
"""
@expect_types(
country_code=str,
calendar_name=str,
__funcname="EquityCountryDomain",
)
def __init__(
self, country_code, calendar_name, data_query_offset=-np.timedelta64(45, "m")
):
self._country_code = country_code
self.calendar_name = calendar_name
self._data_query_offset = (
# add one minute because `open_time` is actually the open minute
# label which is one minute _after_ market open...
data_query_offset
- np.timedelta64(1, "m")
)
if data_query_offset >= datetime.timedelta(0):
raise ValueError(
"data must be ready before market open (offset must be < 0)",
)
@property
def country_code(self):
return self._country_code
@lazyval
def calendar(self):
return get_calendar(self.calendar_name)
def all_sessions(self):
return self.calendar.all_sessions
def data_query_cutoff_for_sessions(self, sessions):
opens = self.calendar.opens.reindex(sessions).values
missing_mask = pd.isnull(opens)
if missing_mask.any():
missing_days = sessions[missing_mask]
raise ValueError(
"cannot resolve data query time for sessions that are not on"
" the %s calendar:\n%s"
% (
self.calendar.name,
missing_days,
),
)
return pd.DatetimeIndex(opens + self._data_query_offset, tz="UTC")
def __repr__(self):
return "EquityCalendarDomain({!r}, {!r})".format(
self.country_code,
self.calendar_name,
)
AR_EQUITIES = EquityCalendarDomain(CountryCode.ARGENTINA, "XBUE")
AT_EQUITIES = EquityCalendarDomain(CountryCode.AUSTRIA, "XWBO")
AU_EQUITIES = EquityCalendarDomain(CountryCode.AUSTRALIA, "XASX")
BE_EQUITIES = EquityCalendarDomain(CountryCode.BELGIUM, "XBRU")
BR_EQUITIES = EquityCalendarDomain(CountryCode.BRAZIL, "BVMF")
CA_EQUITIES = EquityCalendarDomain(CountryCode.CANADA, "XTSE")
CH_EQUITIES = EquityCalendarDomain(CountryCode.SWITZERLAND, "XSWX")
CL_EQUITIES = EquityCalendarDomain(CountryCode.CHILE, "XSGO")
CN_EQUITIES = EquityCalendarDomain(CountryCode.CHINA, "XSHG")
CO_EQUITIES = EquityCalendarDomain(CountryCode.COLOMBIA, "XBOG")
CZ_EQUITIES = EquityCalendarDomain(CountryCode.CZECH_REPUBLIC, "XPRA")
DE_EQUITIES = EquityCalendarDomain(CountryCode.GERMANY, "XFRA")
DK_EQUITIES = EquityCalendarDomain(CountryCode.DENMARK, "XCSE")
ES_EQUITIES = EquityCalendarDomain(CountryCode.SPAIN, "XMAD")
FI_EQUITIES = EquityCalendarDomain(CountryCode.FINLAND, "XHEL")
FR_EQUITIES = EquityCalendarDomain(CountryCode.FRANCE, "XPAR")
GB_EQUITIES = EquityCalendarDomain(CountryCode.UNITED_KINGDOM, "XLON")
GR_EQUITIES = EquityCalendarDomain(CountryCode.GREECE, "ASEX")
HK_EQUITIES = EquityCalendarDomain(CountryCode.HONG_KONG, "XHKG")
HU_EQUITIES = EquityCalendarDomain(CountryCode.HUNGARY, "XBUD")
ID_EQUITIES = EquityCalendarDomain(CountryCode.INDONESIA, "XIDX")
IE_EQUITIES = EquityCalendarDomain(CountryCode.IRELAND, "XDUB")
IN_EQUITIES = EquityCalendarDomain(CountryCode.INDIA, "XBOM")
IT_EQUITIES = EquityCalendarDomain(CountryCode.ITALY, "XMIL")
JP_EQUITIES = EquityCalendarDomain(CountryCode.JAPAN, "XTKS")
KR_EQUITIES = EquityCalendarDomain(CountryCode.SOUTH_KOREA, "XKRX")
MX_EQUITIES = EquityCalendarDomain(CountryCode.MEXICO, "XMEX")
MY_EQUITIES = EquityCalendarDomain(CountryCode.MALAYSIA, "XKLS")
NL_EQUITIES = EquityCalendarDomain(CountryCode.NETHERLANDS, "XAMS")
NO_EQUITIES = EquityCalendarDomain(CountryCode.NORWAY, "XOSL")
NZ_EQUITIES = EquityCalendarDomain(CountryCode.NEW_ZEALAND, "XNZE")
PE_EQUITIES = EquityCalendarDomain(CountryCode.PERU, "XLIM")
PH_EQUITIES = EquityCalendarDomain(CountryCode.PHILIPPINES, "XPHS")
PK_EQUITIES = EquityCalendarDomain(CountryCode.PAKISTAN, "XKAR")
PL_EQUITIES = EquityCalendarDomain(CountryCode.POLAND, "XWAR")
PT_EQUITIES = EquityCalendarDomain(CountryCode.PORTUGAL, "XLIS")
RU_EQUITIES = EquityCalendarDomain(CountryCode.RUSSIA, "XMOS")
SE_EQUITIES = EquityCalendarDomain(CountryCode.SWEDEN, "XSTO")
SG_EQUITIES = EquityCalendarDomain(CountryCode.SINGAPORE, "XSES")
TH_EQUITIES = EquityCalendarDomain(CountryCode.THAILAND, "XBKK")
TR_EQUITIES = EquityCalendarDomain(CountryCode.TURKEY, "XIST")
TW_EQUITIES = EquityCalendarDomain(CountryCode.TAIWAN, "XTAI")
US_EQUITIES = EquityCalendarDomain(CountryCode.UNITED_STATES, "XNYS")
ZA_EQUITIES = EquityCalendarDomain(CountryCode.SOUTH_AFRICA, "XJSE")
BUILT_IN_DOMAINS = [
AR_EQUITIES,
AT_EQUITIES,
AU_EQUITIES,
BE_EQUITIES,
BR_EQUITIES,
CA_EQUITIES,
CH_EQUITIES,
CL_EQUITIES,
CN_EQUITIES,
CO_EQUITIES,
CZ_EQUITIES,
DE_EQUITIES,
DK_EQUITIES,
ES_EQUITIES,
FI_EQUITIES,
FR_EQUITIES,
GB_EQUITIES,
GR_EQUITIES,
HK_EQUITIES,
HU_EQUITIES,
ID_EQUITIES,
IE_EQUITIES,
IN_EQUITIES,
IT_EQUITIES,
JP_EQUITIES,
KR_EQUITIES,
MX_EQUITIES,
MY_EQUITIES,
NL_EQUITIES,
NO_EQUITIES,
NZ_EQUITIES,
PE_EQUITIES,
PH_EQUITIES,
PK_EQUITIES,
PL_EQUITIES,
PT_EQUITIES,
RU_EQUITIES,
SE_EQUITIES,
SG_EQUITIES,
TH_EQUITIES,
TR_EQUITIES,
TW_EQUITIES,
US_EQUITIES,
ZA_EQUITIES,
]
def infer_domain(terms):
"""
Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms.
"""
domains = {t.domain for t in terms}
num_domains = len(domains)
if num_domains == 0:
return GENERIC
elif num_domains == 1:
return domains.pop()
elif num_domains == 2 and GENERIC in domains:
domains.remove(GENERIC)
return domains.pop()
else:
# Remove GENERIC if it's present before raising. Showing it to the user
# is confusing because it doesn't contribute to the error.
domains.discard(GENERIC)
raise AmbiguousDomain(sorted(domains, key=repr))
# This would be better if we provided more context for which domains came from
# which terms.
class AmbiguousDomain(Exception):
"""
Raised when we attempt to infer a domain from a collection of mixed terms.
"""
_TEMPLATE = dedent(
"""\
Found terms with conflicting domains:
{domains}"""
)
def __init__(self, domains):
self.domains = domains
def __str__(self):
return self._TEMPLATE.format(
domains=bulleted_list(self.domains, indent=2),
)
class EquitySessionDomain(Domain):
"""A domain built directly from an index of sessions.
Mostly useful for testing.
Parameters
----------
sessions : pd.DatetimeIndex
Sessions to use as output labels for pipelines run on this domain.
country_code : str
ISO 3166 country code of equities to be used with this domain.
data_query_time : datetime.time, optional
The time of day when data should no longer be considered available for
a session.
data_query_date_offset : int, optional
The number of days to add to the session label before applying the
``data_query_time``. This can be used to express that the cutoff time
for a session falls on a different calendar day from the session label.
"""
@expect_types(
sessions=pd.DatetimeIndex,
country_code=str,
data_query_time=optional(datetime.time),
data_query_date_offset=int,
__funcname="EquitySessionDomain",
)
def __init__(
self, sessions, country_code, data_query_time=None, data_query_date_offset=0
):
self._country_code = country_code
self._sessions = sessions
if data_query_time is None:
data_query_time = datetime.time(0, 0, tzinfo=pytz.timezone("UTC"))
if data_query_time.tzinfo is None:
raise ValueError("data_query_time cannot be tz-naive")
self._data_query_time = data_query_time
self._data_query_date_offset = data_query_date_offset
@property
def country_code(self):
return self._country_code
def all_sessions(self):
return self._sessions
def data_query_cutoff_for_sessions(self, sessions):
return days_at_time(
sessions,
self._data_query_time,
self._data_query_time.tzinfo,
self._data_query_date_offset,
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/domain.py | domain.py |
from abc import ABCMeta, abstractmethod
from functools import partial
from numpy import array, arange
from pandas import DataFrame, MultiIndex
from toolz import groupby
from zipline.errors import NoFurtherDataError
from zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray
from zipline.utils.date_utils import compute_date_range_chunks
from zipline.utils.input_validation import expect_types
from zipline.utils.numpy_utils import (
as_column,
repeat_first_axis,
repeat_last_axis,
)
from zipline.utils.pandas_utils import categorical_df_concat
from zipline.utils.pandas_utils import explode
from zipline.utils.string_formatting import bulleted_list
from .domain import Domain, GENERIC
from .graph import maybe_specialize
from .hooks import DelegatingHooks
from .term import AssetExists, InputDates, LoadableTerm
class PipelineEngine(metaclass=ABCMeta):
@abstractmethod
def run_pipeline(self, pipeline, start_date, end_date, hooks=None):
"""
Compute values for ``pipeline`` from ``start_date`` to ``end_date``.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
hooks : list[implements(PipelineHooks)], optional
Hooks for instrumenting Pipeline execution.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
"""
raise NotImplementedError("run_pipeline")
@abstractmethod
def run_chunked_pipeline(
self, pipeline, start_date, end_date, chunksize, hooks=None
):
"""
Compute values for ``pipeline`` from ``start_date`` to ``end_date``, in
date chunks of size ``chunksize``.
Chunked execution reduces memory consumption, and may reduce
computation time depending on the contents of your pipeline.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
start_date : pd.Timestamp
The start date to run the pipeline for.
end_date : pd.Timestamp
The end date to run the pipeline for.
chunksize : int
The number of days to execute at a time.
hooks : list[implements(PipelineHooks)], optional
Hooks for instrumenting Pipeline execution.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
raise NotImplementedError("run_chunked_pipeline")
class NoEngineRegistered(Exception):
"""
Raised if a user tries to call pipeline_output in an algorithm that hasn't
set up a pipeline engine.
"""
class ExplodingPipelineEngine(PipelineEngine):
"""
A PipelineEngine that doesn't do anything.
"""
def run_pipeline(self, pipeline, start_date, end_date, hooks=None):
raise NoEngineRegistered(
"Attempted to run a pipeline but no pipeline " "resources were registered."
)
def run_chunked_pipeline(
self, pipeline, start_date, end_date, chunksize, hooks=None
):
raise NoEngineRegistered(
"Attempted to run a chunked pipeline but no pipeline "
"resources were registered."
)
def default_populate_initial_workspace(
initial_workspace, root_mask_term, execution_plan, dates, assets
):
"""The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with.
"""
return initial_workspace
class SimplePipelineEngine(PipelineEngine):
"""
PipelineEngine class that computes each term independently.
Parameters
----------
get_loader : callable
A function that is given a loadable term and returns a PipelineLoader
to use to retrieve raw data for that term.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
populate_initial_workspace : callable, optional
A function which will be used to populate the initial workspace when
computing a pipeline. See
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
for more info.
default_hooks : list, optional
List of hooks that should be used to instrument all pipelines executed
by this engine.
See Also
--------
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
"""
__slots__ = (
"_get_loader",
"_finder",
"_root_mask_term",
"_root_mask_dates_term",
"_populate_initial_workspace",
)
@expect_types(
default_domain=Domain,
__funcname="SimplePipelineEngine",
)
def __init__(
self,
get_loader,
asset_finder,
default_domain=GENERIC,
populate_initial_workspace=None,
default_hooks=None,
):
self._get_loader = get_loader
self._finder = asset_finder
self._root_mask_term = AssetExists()
self._root_mask_dates_term = InputDates()
self._populate_initial_workspace = (
populate_initial_workspace or default_populate_initial_workspace
)
self._default_domain = default_domain
if default_hooks is None:
self._default_hooks = []
else:
self._default_hooks = list(default_hooks)
def run_chunked_pipeline(
self, pipeline, start_date, end_date, chunksize, hooks=None
):
"""
Compute values for ``pipeline`` from ``start_date`` to ``end_date``, in
date chunks of size ``chunksize``.
Chunked execution reduces memory consumption, and may reduce
computation time depending on the contents of your pipeline.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
start_date : pd.Timestamp
The start date to run the pipeline for.
end_date : pd.Timestamp
The end date to run the pipeline for.
chunksize : int
The number of days to execute at a time.
hooks : list[implements(PipelineHooks)], optional
Hooks for instrumenting Pipeline execution.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
domain = self.resolve_domain(pipeline)
ranges = compute_date_range_chunks(
domain.all_sessions(),
start_date,
end_date,
chunksize,
)
hooks = self._resolve_hooks(hooks)
run_pipeline = partial(self._run_pipeline_impl, pipeline, hooks=hooks)
with hooks.running_pipeline(pipeline, start_date, end_date):
chunks = [run_pipeline(s, e) for s, e in ranges]
if len(chunks) == 1:
# OPTIMIZATION: Don't make an extra copy in `categorical_df_concat`
# if we don't have to.
return chunks[0]
# Filter out empty chunks. Empty dataframes lose dtype information,
# which makes concatenation fail.
nonempty_chunks = [c for c in chunks if len(c)]
return categorical_df_concat(nonempty_chunks, inplace=True)
def run_pipeline(self, pipeline, start_date, end_date, hooks=None):
"""
Compute values for ``pipeline`` from ``start_date`` to ``end_date``.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
hooks : list[implements(PipelineHooks)], optional
Hooks for instrumenting Pipeline execution.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
"""
hooks = self._resolve_hooks(hooks)
with hooks.running_pipeline(pipeline, start_date, end_date):
return self._run_pipeline_impl(
pipeline,
start_date,
end_date,
hooks,
)
def _run_pipeline_impl(self, pipeline, start_date, end_date, hooks):
"""Shared core for ``run_pipeline`` and ``run_chunked_pipeline``."""
# See notes at the top of this module for a description of the
# algorithm implemented here.
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
domain = self.resolve_domain(pipeline)
plan = pipeline.to_execution_plan(
domain,
self._root_mask_term,
start_date,
end_date,
)
extra_rows = plan.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(
domain,
start_date,
end_date,
extra_rows,
)
dates, sids, root_mask_values = explode(root_mask)
workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values),
},
self._root_mask_term,
plan,
dates,
sids,
)
refcounts = plan.initial_refcounts(workspace)
execution_order = plan.execution_order(workspace, refcounts)
with hooks.computing_chunk(execution_order, start_date, end_date):
results = self.compute_chunk(
graph=plan,
dates=dates,
sids=sids,
workspace=workspace,
refcounts=refcounts,
execution_order=execution_order,
hooks=hooks,
)
return self._to_narrow(
plan.outputs,
results,
results.pop(plan.screen_name),
dates[extra_rows:],
sids,
)
def _compute_root_mask(self, domain, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain for which we're computing a pipeline.
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
sessions = domain.all_sessions()
if start_date not in sessions:
raise ValueError(
"Pipeline start date ({}) is not a trading session for "
"domain {}.".format(start_date, domain)
)
elif end_date not in sessions:
raise ValueError(
"Pipeline end date {} is not a trading session for "
"domain {}.".format(end_date, domain)
)
start_idx, end_idx = sessions.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=sessions[0],
lookback_start=start_date,
lookback_length=extra_rows,
)
# NOTE: This logic should probably be delegated to the domain once we
# start adding more complex domains.
#
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
finder = self._finder
lifetimes = finder.lifetimes(
sessions[start_idx - extra_rows : end_idx],
include_start_date=False,
country_codes=(domain.country_code,),
)
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist from the farthest look back
# window through the end of the requested dates.
existed = lifetimes.any()
ret = lifetimes.loc[:, existed]
num_assets = ret.shape[1]
if num_assets == 0:
raise ValueError(
"Failed to find any assets with country_code {!r} that traded "
"between {} and {}.\n"
"This probably means that your asset db is old or that it has "
"incorrect country/exchange metadata.".format(
domain.country_code,
start_date,
end_date,
)
)
return ret
@staticmethod
def _inputs_for_term(term, workspace, graph, domain, refcounts):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
out = []
# We need to specialize here because we don't change ComputableTerm
# after resolving domains, so they can still contain generic terms as
# inputs.
specialized = [maybe_specialize(t, domain) for t in term.inputs]
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
for input_ in specialized:
adjusted_array = ensure_adjusted_array(
workspace[input_],
input_.missing_value,
)
out.append(
adjusted_array.traverse(
window_length=term.window_length,
offset=offsets[term, input_],
# If the refcount for the input is > 1, we will need
# to traverse this array again so we must copy.
# If the refcount for the input == 0, this is the last
# traversal that will happen so we can invalidate
# the AdjustedArray and mutate the data in place.
copy=refcounts[input_] > 1,
)
)
else:
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
for input_ in specialized:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
input_data = input_data[offset:]
if refcounts[input_] > 1:
input_data = input_data.copy()
out.append(input_data)
return out
def compute_chunk(
self, graph, dates, sids, workspace, refcounts, execution_order, hooks
):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
This is where we do the actual work of running a pipeline.
Parameters
----------
graph : zipline.pipeline.graph.ExecutionPlan
Dependency graph of the terms to be executed.
dates : pd.DatetimeIndex
Row labels for our root mask.
sids : pd.Int64Index
Column labels for our root mask.
workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
refcounts : dict[Term, int]
Dictionary mapping terms to number of dependent terms. When a
term's refcount hits 0, it can be safely discarded from
``workspace``. See TermGraph.decref_dependencies for more info.
execution_order : list[Term]
Order in which to execute terms.
hooks : implements(PipelineHooks)
Hooks to instrument pipeline execution.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(graph, dates, sids, workspace)
get_loader = self._get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = workspace.copy()
domain = graph.domain
# Many loaders can fetch data more efficiently if we ask them to
# retrieve all their inputs at once. For example, a loader backed by a
# SQL database can fetch multiple columns from the database in a single
# query.
#
# To enable these loaders to fetch their data efficiently, we group
# together requests for LoadableTerms if they are provided by the same
# loader and they require the same number of extra rows.
#
# The extra rows condition is a simplification: we don't currently have
# a mechanism for asking a loader to fetch different windows of data
# for different terms, so we only batch requests together when they're
# going to produce data for the same set of dates.
def loader_group_key(term):
loader = get_loader(term)
extra_rows = graph.extra_rows[term]
return loader, extra_rows
# Only produce loader groups for the terms we expect to load. This
# ensures that we can run pipelines for graphs where we don't have a
# loader registered for an atomic term if all the dependencies of that
# term were supplied in the initial workspace.
will_be_loaded = graph.loadable_terms - workspace.keys()
loader_groups = groupby(
loader_group_key,
(t for t in execution_order if t in will_be_loaded),
)
for term in execution_order:
# `term` may have been supplied in `initial_workspace`, or we may
# have loaded `term` as part of a batch with another term coming
# from the same loader (see note on loader_group_key above). In
# either case, we already have the term computed, so don't
# recompute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = graph.mask_and_dates_for_term(
term,
self._root_mask_term,
workspace,
dates,
)
if isinstance(term, LoadableTerm):
loader = get_loader(term)
to_load = sorted(
loader_groups[loader_group_key(term)], key=lambda t: t.dataset
)
self._ensure_can_load(loader, to_load)
with hooks.loading_terms(to_load):
loaded = loader.load_adjusted_array(
domain,
to_load,
mask_dates,
sids,
mask,
)
assert set(loaded) == set(to_load), (
"loader did not return an AdjustedArray for each column\n"
"expected: %r\n"
"got: %r"
% (
sorted(to_load, key=repr),
sorted(loaded, key=repr),
)
)
workspace.update(loaded)
else:
with hooks.computing_term(term):
workspace[term] = term._compute(
self._inputs_for_term(
term,
workspace,
graph,
domain,
refcounts,
),
mask_dates,
sids,
mask,
)
if term.ndim == 2:
assert workspace[term].shape == mask.shape
else:
assert workspace[term].shape == (mask.shape[0], 1)
# Decref dependencies of ``term``, and clear any terms
# whose refcounts hit 0.
for garbage in graph.decref_dependencies(term, refcounts):
del workspace[garbage]
# At this point, all the output terms are in the workspace.
out = {}
graph_extra_rows = graph.extra_rows
for name, term in graph.outputs.items():
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term] :]
return out
def _to_narrow(self, terms, data, mask, dates, assets):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
terms : dict[str -> Term]
Dict mapping column names to terms.
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results for those names.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
assets : ndarray[int64, ndim=2]
Column index for arrays `data` and `mask`
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
if not mask.any():
# Manually handle the empty DataFrame case. This is a workaround
# to pandas failing to tz_localize an empty dataframe with a
# MultiIndex. It also saves us the work of applying a known-empty
# mask to each array.
#
# Slicing `dates` here to preserve pandas metadata.
empty_dates = dates[:0]
empty_assets = array([], dtype=object)
return DataFrame(
data={name: array([], dtype=arr.dtype) for name, arr in data.items()},
index=MultiIndex.from_arrays([empty_dates, empty_assets]),
)
final_columns = {}
for name in data:
# Each term that computed an output has its postprocess method
# called on the filtered result.
#
# Using this to convert np.records to tuples
final_columns[name] = terms[name].postprocess(data[name][mask])
resolved_assets = array(self._finder.retrieve_all(assets))
index = _pipeline_output_index(dates, resolved_assets, mask)
return DataFrame(data=final_columns, index=index)
def _validate_compute_chunk_params(self, graph, dates, sids, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(sids)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
for term in initial_workspace:
if self._is_special_root_term(term):
continue
if term.domain is GENERIC:
# XXX: We really shouldn't allow **any** generic terms to be
# populated in the initial workspace. A generic term, by
# definition, can't correspond to concrete data until it's
# paired with a domain, and populate_initial_workspace isn't
# given the domain of execution, so it can't possibly know what
# data to use when populating a generic term.
#
# In our current implementation, however, we don't have a good
# way to represent specializations of ComputableTerms that take
# only generic inputs, so there's no good way for the initial
# workspace to provide data for such terms except by populating
# the generic ComputableTerm.
#
# The right fix for the above is to implement "full
# specialization", i.e., implementing ``specialize`` uniformly
# across all terms, not just LoadableTerms. Having full
# specialization will also remove the need for all of the
# remaining ``maybe_specialize`` calls floating around in this
# file.
#
# In the meantime, disallowing ComputableTerms in the initial
# workspace would break almost every test in
# `test_filter`/`test_factor`/`test_classifier`, and fixing
# them would require updating all those tests to compute with
# more specialized terms. Once we have full specialization, we
# can fix all the tests without a large volume of edits by
# simply specializing their workspaces, so for now I'm leaving
# this in place as a somewhat sharp edge.
if isinstance(term, LoadableTerm):
raise ValueError(
"Loadable workspace terms must be specialized to a "
"domain, but got generic term {}".format(term)
)
elif term.domain != graph.domain:
raise ValueError(
"Initial workspace term {} has domain {}. "
"Does not match pipeline domain {}".format(
term,
term.domain,
graph.domain,
)
)
def resolve_domain(self, pipeline):
"""Resolve a concrete domain for ``pipeline``."""
domain = pipeline.domain(default=self._default_domain)
if domain is GENERIC:
raise ValueError(
"Unable to determine domain for Pipeline.\n"
"Pass domain=<desired domain> to your Pipeline to set a "
"domain."
)
return domain
def _is_special_root_term(self, term):
return term is self._root_mask_term or term is self._root_mask_dates_term
def _resolve_hooks(self, hooks):
if hooks is None:
hooks = []
return DelegatingHooks(self._default_hooks + hooks)
def _ensure_can_load(self, loader, terms):
"""Ensure that ``loader`` can load ``terms``."""
if not loader.currency_aware:
bad = [t for t in terms if t.currency_conversion is not None]
if bad:
raise ValueError(
"Requested currency conversion is not supported for the "
"following terms:\n{}".format(bulleted_list(bad))
)
def _pipeline_output_index(dates, assets, mask):
"""
Create a MultiIndex for a pipeline output.
Parameters
----------
dates : pd.DatetimeIndex
Row labels for ``mask``.
assets : pd.Index
Column labels for ``mask``.
mask : np.ndarray[bool]
Mask array indicating date/asset pairs that should be included in
output index.
Returns
-------
index : pd.MultiIndex
MultiIndex containing (date, asset) pairs corresponding to ``True``
values in ``mask``.
"""
date_labels = repeat_last_axis(arange(len(dates)), len(assets))[mask]
asset_labels = repeat_first_axis(arange(len(assets)), len(dates))[mask]
return MultiIndex(
[dates, assets],
[date_labels, asset_labels],
# TODO: We should probably add names for these.
names=[None, None],
verify_integrity=False,
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/engine.py | engine.py |
from abc import ABCMeta, abstractproperty, abstractmethod
from bisect import insort
from collections.abc import Mapping
from weakref import WeakValueDictionary
from numpy import (
array,
record,
dtype as dtype_class,
ndarray,
)
from zipline.assets import Asset
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonSliceableTerm,
NonWindowSafeInput,
NotDType,
NonPipelineInputs,
TermInputsNotSpecified,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.lib.adjusted_array import can_represent_dtype
from zipline.lib.labelarray import LabelArray
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import classlazyval, lazyval
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
datetime64ns_dtype,
default_missing_value_for_dtype,
float64_dtype,
)
from zipline.utils.sharedoc import (
templated_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from .domain import Domain, GENERIC, infer_domain
from .downsample_helpers import expect_downsample_frequency
from .sentinels import NotSpecified
class Term(object, metaclass=ABCMeta):
"""
Base class for objects that can appear in the compute graph of a
:class:`zipline.pipeline.Pipeline`.
Notes
-----
Most Pipeline API users only interact with :class:`Term` via subclasses:
- :class:`~zipline.pipeline.data.BoundColumn`
- :class:`~zipline.pipeline.Factor`
- :class:`~zipline.pipeline.Filter`
- :class:`~zipline.pipeline.Classifier`
Instances of :class:`Term` are **memoized**. If you call a Term's
constructor with the same arguments twice, the same object will be returned
from both calls:
**Example:**
>>> from zipline.pipeline.data import EquityPricing
>>> from zipline.pipeline.factors import SimpleMovingAverage
>>> x = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)
>>> y = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)
>>> x is y
True
.. warning::
Memoization of terms means that it's generally unsafe to modify
attributes of a term after construction.
"""
# These are NotSpecified because a subclass is required to provide them.
dtype = NotSpecified
missing_value = NotSpecified
# Subclasses aren't required to provide `params`. The default behavior is
# no params.
params = ()
# All terms are generic by default.
domain = GENERIC
# Determines if a term is safe to be used as a windowed input.
window_safe = False
# The dimensions of the term's output (1D or 2D).
ndim = 2
_term_cache = WeakValueDictionary()
def __new__(
cls,
domain=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
window_safe=NotSpecified,
ndim=NotSpecified,
# params is explicitly not allowed to be passed to an instance.
*args,
**kwargs,
):
"""
Memoized constructor for Terms.
Caching previously-constructed Terms is useful because it allows us to
only compute equivalent sub-expressions once when traversing a Pipeline
dependency graph.
Caching previously-constructed Terms is **sane** because terms and
their inputs are both conceptually immutable.
"""
# Subclasses can override these class-level attributes to provide
# different default values for instances.
if domain is NotSpecified:
domain = cls.domain
if dtype is NotSpecified:
dtype = cls.dtype
if missing_value is NotSpecified:
missing_value = cls.missing_value
if ndim is NotSpecified:
ndim = cls.ndim
if window_safe is NotSpecified:
window_safe = cls.window_safe
dtype, missing_value = validate_dtype(
cls.__name__,
dtype,
missing_value,
)
params = cls._pop_params(kwargs)
identity = cls._static_identity(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args,
**kwargs,
)
try:
return cls._term_cache[identity]
except KeyError:
new_instance = cls._term_cache[identity] = (
super(Term, cls)
.__new__(cls)
._init(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args,
**kwargs,
)
)
return new_instance
@classmethod
def _pop_params(cls, kwargs):
"""
Pop entries from the `kwargs` passed to cls.__new__ based on the values
in `cls.params`.
Parameters
----------
kwargs : dict
The kwargs passed to cls.__new__.
Returns
-------
params : list[(str, object)]
A list of string, value pairs containing the entries in cls.params.
Raises
------
TypeError
Raised if any parameter values are not passed or not hashable.
"""
params = cls.params
if not isinstance(params, Mapping):
params = {k: NotSpecified for k in params}
param_values = []
for key, default_value in params.items():
try:
value = kwargs.pop(key, default_value)
if value is NotSpecified:
raise KeyError(key)
# Check here that the value is hashable so that we fail here
# instead of trying to hash the param values tuple later.
hash(value)
except KeyError:
raise TypeError(
"{typename} expected a keyword parameter {name!r}.".format(
typename=cls.__name__, name=key
)
)
except TypeError:
# Value wasn't hashable.
raise TypeError(
"{typename} expected a hashable value for parameter "
"{name!r}, but got {value!r} instead.".format(
typename=cls.__name__,
name=key,
value=value,
)
)
param_values.append((key, value))
return tuple(param_values)
def __init__(self, *args, **kwargs):
"""
Noop constructor to play nicely with our caching __new__. Subclasses
should implement _init instead of this method.
When a class' __new__ returns an instance of that class, Python will
automatically call __init__ on the object, even if a new object wasn't
actually constructed. Because we memoize instances, we often return an
object that was already initialized from __new__, in which case we
don't want to call __init__ again.
Subclasses that need to initialize new instances should override _init,
which is guaranteed to be called only once.
"""
pass
@expect_types(key=Asset)
def __getitem__(self, key):
if isinstance(self, LoadableTerm):
raise NonSliceableTerm(term=self)
from .mixins import SliceMixin
slice_type = type(self)._with_mixin(SliceMixin)
return slice_type(self, key)
@classmethod
def _static_identity(cls, domain, dtype, missing_value, window_safe, ndim, params):
"""
Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance.
"""
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
def _init(self, domain, dtype, missing_value, window_safe, ndim, params):
"""
Parameters
----------
domain : zipline.pipeline.domain.Domain
The domain of this term.
dtype : np.dtype
Dtype of this term's output.
missing_value : object
Missing value for this term.
ndim : 1 or 2
The dimensionality of this term.
params : tuple[(str, hashable)]
Tuple of key/value pairs of additional parameters.
"""
self.domain = domain
self.dtype = dtype
self.missing_value = missing_value
self.window_safe = window_safe
self.ndim = ndim
for name, value in params:
if hasattr(self, name):
raise TypeError(
"Parameter {name!r} conflicts with already-present"
" attribute with value {value!r}.".format(
name=name,
value=getattr(self, name),
)
)
# TODO: Consider setting these values as attributes and replacing
# the boilerplate in NumericalExpression, Rank, and
# PercentileFilter.
self.params = dict(params)
# Make sure that subclasses call super() in their _validate() methods
# by setting this flag. The base class implementation of _validate
# should set this flag to True.
self._subclass_called_super_validate = False
self._validate()
assert self._subclass_called_super_validate, (
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
del self._subclass_called_super_validate
return self
def _validate(self):
"""
Assert that this term is well-formed. This should be called exactly
once, at the end of Term._init().
"""
# mark that we got here to enforce that subclasses overriding _validate
# call super().
self._subclass_called_super_validate = True
def compute_extra_rows(self, all_dates, start_date, end_date, min_extra_rows):
"""
Calculate the number of extra rows needed to compute ``self``.
Must return at least ``min_extra_rows``, and the default implementation
is to just return ``min_extra_rows``. This is overridden by
downsampled terms to ensure that the first date computed is a
recomputation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. Must be at least
``min_extra_rows``.
"""
return min_extra_rows
@abstractproperty
def inputs(self):
"""
A tuple of other Terms needed as inputs for ``self``.
"""
raise NotImplementedError("inputs")
@abstractproperty
def windowed(self):
"""
Boolean indicating whether this term is a trailing-window computation.
"""
raise NotImplementedError("windowed")
@abstractproperty
def mask(self):
"""
A :class:`~zipline.pipeline.Filter` representing asset/date pairs to
while computing this Term. True means include; False means exclude.
"""
raise NotImplementedError("mask")
@abstractproperty
def dependencies(self):
"""
A dictionary mapping terms that must be computed before `self` to the
number of extra rows needed for those terms.
"""
raise NotImplementedError("dependencies")
def graph_repr(self):
"""A short repr to use when rendering GraphViz graphs."""
# Default graph_repr is just the name of the type.
return type(self).__name__
def recursive_repr(self):
"""A short repr to use when recursively rendering terms with inputs."""
# Default recursive_repr is just the name of the type.
return type(self).__name__
class AssetExists(Term):
"""
Pseudo-filter describing whether or not an asset existed on a given day.
This is the default mask for all terms that haven't been passed a mask
explicitly.
This is morally a Filter, in the sense that it produces a boolean value for
every asset on every date. We don't subclass Filter, however, because
`AssetExists` is computed directly by the PipelineEngine.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
See Also
--------
zipline.assets.AssetFinder.lifetimes
"""
dtype = bool_dtype
dataset = None
inputs = ()
dependencies = {}
mask = None
windowed = False
def __repr__(self):
return "AssetExists()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"AssetExists cannot be computed directly."
" Check your PipelineEngine configuration."
)
class InputDates(Term):
"""
1-Dimensional term providing date labels for other term inputs.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
"""
ndim = 1
dataset = None
dtype = datetime64ns_dtype
inputs = ()
dependencies = {}
mask = None
windowed = False
window_safe = True
def __repr__(self):
return "InputDates()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"InputDates cannot be computed directly."
" Check your PipelineEngine configuration."
)
class LoadableTerm(Term):
"""
A Term that should be loaded from an external resource by a PipelineLoader.
This is the base class for :class:`zipline.pipeline.data.BoundColumn`.
"""
windowed = False
inputs = ()
@lazyval
def dependencies(self):
return {self.mask: 0}
class ComputableTerm(Term):
"""
A Term that should be computed from a tuple of inputs.
This is the base class for :class:`zipline.pipeline.Factor`,
:class:`zipline.pipeline.Filter`, and :class:`zipline.pipeline.Classifier`.
"""
inputs = NotSpecified
outputs = NotSpecified
window_length = NotSpecified
mask = NotSpecified
domain = NotSpecified
def __new__(
cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
domain=domain,
*args,
**kwargs,
):
if inputs is NotSpecified:
inputs = cls.inputs
# Having inputs = NotSpecified is an error, but we handle it later
# in self._validate rather than here.
if inputs is not NotSpecified:
# Allow users to specify lists as class-level defaults, but
# normalize to a tuple so that inputs is hashable.
inputs = tuple(inputs)
# Make sure all our inputs are valid pipeline objects before trying
# to infer a domain.
non_terms = [t for t in inputs if not isinstance(t, Term)]
if non_terms:
raise NonPipelineInputs(cls.__name__, non_terms)
if domain is NotSpecified:
domain = infer_domain(inputs)
if outputs is NotSpecified:
outputs = cls.outputs
if outputs is not NotSpecified:
outputs = tuple(outputs)
if mask is NotSpecified:
mask = cls.mask
if mask is NotSpecified:
mask = AssetExists()
if window_length is NotSpecified:
window_length = cls.window_length
return super(ComputableTerm, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
mask=mask,
window_length=window_length,
domain=domain,
*args,
**kwargs,
)
def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):
self.inputs = inputs
self.outputs = outputs
self.window_length = window_length
self.mask = mask
return super(ComputableTerm, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, inputs, outputs, window_length, mask, *args, **kwargs):
return (
super(ComputableTerm, cls)._static_identity(*args, **kwargs),
inputs,
outputs,
window_length,
mask,
)
def _validate(self):
super(ComputableTerm, self)._validate()
# Check inputs.
if self.inputs is NotSpecified:
raise TermInputsNotSpecified(termname=type(self).__name__)
if not isinstance(self.domain, Domain):
raise TypeError(
"Expected {}.domain to be an instance of Domain, "
"but got {}.".format(type(self).__name__, type(self.domain))
)
# Check outputs.
if self.outputs is NotSpecified:
pass
elif not self.outputs:
raise TermOutputsEmpty(termname=type(self).__name__)
else:
# Raise an exception if there are any naming conflicts between the
# term's output names and certain attributes.
disallowed_names = [
attr for attr in dir(ComputableTerm) if not attr.startswith("_")
]
# The name 'compute' is an added special case that is disallowed.
# Use insort to add it to the list in alphabetical order.
insort(disallowed_names, "compute")
for output in self.outputs:
if output.startswith("_") or output in disallowed_names:
raise InvalidOutputName(
output_name=output,
termname=type(self).__name__,
disallowed_names=disallowed_names,
)
if self.window_length is NotSpecified:
raise WindowLengthNotSpecified(termname=type(self).__name__)
if self.mask is NotSpecified:
# This isn't user error, this is a bug in our code.
raise AssertionError("{term} has no mask".format(term=self))
if self.window_length > 1:
for child in self.inputs:
if not child.window_safe:
raise NonWindowSafeInput(parent=self, child=child)
def _compute(self, inputs, dates, assets, mask):
"""
Subclasses should implement this to perform actual computation.
This is named ``_compute`` rather than just ``compute`` because
``compute`` is reserved for user-supplied functions in
CustomFilter/CustomFactor/CustomClassifier.
"""
raise NotImplementedError("_compute")
# NOTE: This is a method rather than a property because ABCMeta tries to
# access all abstract attributes of its child classes to see if
# they've been implemented. These accesses happen during subclass
# creation, before the new subclass has been bound to a name in its
# defining scope. Filter, Factor, and Classifier each implement this
# method to return themselves, but if the method is invoked before
# class definition is finished (which happens if this is a property),
# they fail with a NameError.
@classmethod
@abstractmethod
def _principal_computable_term_type(cls):
"""
Return the "principal" type for a ComputableTerm.
This returns either Filter, Factor, or Classifier, depending on the
type of ``cls``. It is used to implement behaviors like ``downsample``
and ``if_then_else`` that are implemented on all ComputableTerms, but
that need to produce different output types depending on the type of
the receiver.
"""
raise NotImplementedError("_principal_computable_term_type")
@lazyval
def windowed(self):
"""
Whether or not this term represents a trailing window computation.
If term.windowed is truthy, its compute_from_windows method will be
called with instances of AdjustedArray as inputs.
If term.windowed is falsey, its compute_from_baseline will be called
with instances of np.ndarray as inputs.
"""
return self.window_length is not NotSpecified and self.window_length > 0
@lazyval
def dependencies(self):
"""
The number of extra rows needed for each of our inputs to compute this
term.
"""
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out
@expect_types(data=ndarray)
def postprocess(self, data):
"""
Called with an result of ``self``, unravelled (i.e. 1-dimensional)
after any user-defined screens have been applied.
This is mostly useful for transforming the dtype of an output, e.g., to
convert a LabelArray into a pandas Categorical.
The default implementation is to just return data unchanged.
"""
# starting with pandas 1.4, record arrays are no longer supported as DataFrame columns
if isinstance(data[0], record):
return [tuple(r) for r in data]
return data
def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return (
result.unstack()
.fillna(self.missing_value)
.reindex(columns=assets, fill_value=self.missing_value)
.values
)
@expect_downsample_frequency
@templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)
def downsample(self, frequency):
"""
Make a term that computes from ``self`` at lower-than-daily frequency.
Parameters
----------
{frequency}
"""
from .mixins import DownsampledMixin
downsampled_type = type(self)._with_mixin(DownsampledMixin)
return downsampled_type(term=self, frequency=frequency)
@templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)
def alias(self, name):
"""
Make a term from ``self`` that names the expression.
Parameters
----------
{name}
Returns
-------
aliased : Aliased
``self`` with a name.
Notes
-----
This is useful for giving a name to a numerical or boolean expression.
"""
from .mixins import AliasedMixin
aliased_type = type(self)._with_mixin(AliasedMixin)
return aliased_type(term=self, name=name)
def isnull(self):
"""
A Filter producing True for values where this Factor has missing data.
Equivalent to self.isnan() when ``self.dtype`` is float64.
Otherwise equivalent to ``self.eq(self.missing_value)``.
Returns
-------
filter : zipline.pipeline.Filter
"""
if self.dtype == bool_dtype:
raise TypeError("isnull() is not supported for Filters")
from .filters import NullFilter
if self.dtype == float64_dtype:
# Using isnan is more efficient when possible because we can fold
# the isnan computation with other NumExpr expressions.
return self.isnan()
else:
return NullFilter(self)
def notnull(self):
"""
A Filter producing True for values where this Factor has complete data.
Equivalent to ``~self.isnan()` when ``self.dtype`` is float64.
Otherwise equivalent to ``(self != self.missing_value)``.
Returns
-------
filter : zipline.pipeline.Filter
"""
if self.dtype == bool_dtype:
raise TypeError("notnull() is not supported for Filters")
from .filters import NotNullFilter
return NotNullFilter(self)
def fillna(self, fill_value):
"""
Create a new term that fills missing values of this term's output with
``fill_value``.
Parameters
----------
fill_value : zipline.pipeline.ComputableTerm, or object.
Object to use as replacement for missing values.
If a ComputableTerm (e.g. a Factor) is passed, that term's results
will be used as fill values.
If a scalar (e.g. a number) is passed, the scalar will be used as a
fill value.
Examples
--------
**Filling with a Scalar:**
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 NaN 3.0 4.0
2017-03-14 1.5 2.5 NaN NaN
Then ``f.fillna(0)`` produces the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 0.0 3.0 4.0
2017-03-14 1.5 2.5 0.0 0.0
**Filling with a Term:**
Let ``f`` be as above, and let ``g`` be another Factor which would
produce the following output::
AAPL MSFT MCD BK
2017-03-13 10.0 20.0 30.0 40.0
2017-03-14 15.0 25.0 35.0 45.0
Then, ``f.fillna(g)`` produces the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 20.0 3.0 4.0
2017-03-14 1.5 2.5 35.0 45.0
Returns
-------
filled : zipline.pipeline.ComputableTerm
A term computing the same results as ``self``, but with missing
values filled in using values from ``fill_value``.
"""
if self.dtype == bool_dtype:
raise TypeError("fillna() is not supported for Filters")
if isinstance(fill_value, LoadableTerm):
raise TypeError(
"Can't use expression {} as a fill value. Did you mean to "
"append '.latest?'".format(fill_value)
)
elif isinstance(fill_value, ComputableTerm):
if_false = fill_value
else:
# Assume we got a scalar value. Make sure it's compatible with our
# dtype.
try:
fill_value = _coerce_to_dtype(fill_value, self.dtype)
except TypeError as e:
raise TypeError(
"Fill value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=type(self).__name__,
value=fill_value,
dtype=self.dtype,
error=e,
)
)
if_false = self._constant_type(
const=fill_value,
dtype=self.dtype,
missing_value=self.missing_value,
)
return self.notnull().if_else(if_true=self, if_false=if_false)
@classlazyval
def _constant_type(cls):
from .mixins import ConstantMixin
return cls._with_mixin(ConstantMixin)
@classlazyval
def _if_else_type(cls):
from .mixins import IfElseMixin
return cls._with_mixin(IfElseMixin)
def __repr__(self):
return ("{type}([{inputs}], {window_length})").format(
type=type(self).__name__,
inputs=", ".join(i.recursive_repr() for i in self.inputs),
window_length=self.window_length,
)
def recursive_repr(self):
return type(self).__name__ + "(...)"
@classmethod
def _with_mixin(cls, mixin_type):
return mixin_type.universal_mixin_specialization(
cls._principal_computable_term_type(),
)
def validate_dtype(termname, dtype, missing_value):
"""
Validate a `dtype` and `missing_value` passed to Term.__new__.
Ensures that we know how to represent ``dtype``, and that missing_value
is specified for types without default missing values.
Returns
-------
validated_dtype, validated_missing_value : np.dtype, any
The dtype and missing_value to use for the new term.
Raises
------
DTypeNotSpecified
When no dtype was passed to the instance, and the class doesn't
provide a default.
NotDType
When either the class or the instance provides a value not
coercible to a numpy dtype.
NoDefaultMissingValue
When dtype requires an explicit missing_value, but
``missing_value`` is NotSpecified.
"""
if dtype is NotSpecified:
raise DTypeNotSpecified(termname=termname)
try:
dtype = dtype_class(dtype)
except TypeError:
raise NotDType(dtype=dtype, termname=termname)
if not can_represent_dtype(dtype):
raise UnsupportedDType(dtype=dtype, termname=termname)
if missing_value is NotSpecified:
missing_value = default_missing_value_for_dtype(dtype)
try:
_coerce_to_dtype(missing_value, dtype)
except TypeError as e:
raise TypeError(
"Missing value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=termname,
value=missing_value,
dtype=dtype,
error=e,
)
)
return dtype, missing_value
def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"String-dtype classifiers can only produce {types}.".format(
types=" or ".join([t.__name__ for t in label_types])
)
)
def _coerce_to_dtype(value, dtype):
if dtype == categorical_dtype:
# This check is necessary because we use object dtype for
# categoricals, and numpy will allow us to promote numerical
# values to object even though we don't support them.
_assert_valid_categorical_missing_value(value)
return value
else:
# For any other type, cast using the same rules as numpy's astype
# function with casting='same_kind'.
#
# 'same_kind' allows casting between things like float32 and float64,
# but not between str and int. Note that the name is somewhat
# misleading, since it does allow conversion between different dtype
# kinds in some cases. In particular, conversion from int to float is
# allowed.
return array([value]).astype(dtype=dtype, casting="same_kind")[0] | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/term.py | term.py |
from interface import implements
from zipline.utils.compat import ExitStack, contextmanager, wraps
from .iface import PipelineHooks, PIPELINE_HOOKS_CONTEXT_MANAGERS
from .no import NoHooks
def delegating_hooks_method(method_name):
"""Factory function for making DelegatingHooks methods."""
if method_name in PIPELINE_HOOKS_CONTEXT_MANAGERS:
# Generate a contextmanager that enters the context of all child hooks.
@wraps(getattr(PipelineHooks, method_name))
@contextmanager
def ctx(self, *args, **kwargs):
with ExitStack() as stack:
for hook in self._hooks:
sub_ctx = getattr(hook, method_name)(*args, **kwargs)
stack.enter_context(sub_ctx)
yield stack
return ctx
else:
# Generate a method that calls methods of all child hooks.
@wraps(getattr(PipelineHooks, method_name))
def method(self, *args, **kwargs):
for hook in self._hooks:
sub_method = getattr(hook, method_name)
sub_method(*args, **kwargs)
return method
class DelegatingHooks(implements(PipelineHooks)):
"""A PipelineHooks that delegates to one or more other hooks.
Parameters
----------
hooks : list[implements(PipelineHooks)]
Sequence of hooks to delegate to.
"""
def __new__(cls, hooks):
if len(hooks) == 0:
# OPTIMIZATION: Short-circuit to a NoHooks if we don't have any
# sub-hooks.
return NoHooks()
elif len(hooks) == 1:
# OPTIMIZATION: Unwrap delegation layer if we only have one
# sub-hook.
return hooks[0]
else:
self = super(DelegatingHooks, cls).__new__(cls)
self._hooks = hooks
return self
# Implement all interface methods by delegating to corresponding methods on
# input hooks.
locals().update(
{
name: delegating_hooks_method(name)
# TODO: Expose this publicly on interface.
for name in PipelineHooks._signatures
}
)
del delegating_hooks_method | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/hooks/delegate.py | delegate.py |
from collections import namedtuple
import time
from interface import implements
from zipline.utils.compat import contextmanager, escape_html
from zipline.utils.string_formatting import bulleted_list
from .iface import PipelineHooks
class ProgressHooks(implements(PipelineHooks)):
"""
Hooks implementation for displaying progress.
Parameters
----------
publisher_factory : callable
Function producing a new object with a ``publish()`` method that takes
a ``ProgressModel`` and publishes progress to a consumer.
"""
def __init__(self, publisher_factory):
self._publisher_factory = publisher_factory
self._reset_transient_state()
def _reset_transient_state(self):
self._start_date = None
self._end_date = None
self._model = None
self._publisher = None
@classmethod
def with_widget_publisher(cls):
"""
Construct a ProgressHooks that publishes to Jupyter via
``IPython.display``.
"""
return cls(publisher_factory=IPythonWidgetProgressPublisher)
@classmethod
def with_static_publisher(cls, publisher):
"""Construct a ProgressHooks that uses an already-constructed publisher."""
return cls(publisher_factory=lambda: publisher)
def _publish(self):
self._publisher.publish(self._model)
@contextmanager
def running_pipeline(self, pipeline, start_date, end_date):
self._start_date = start_date
self._end_date = end_date
try:
yield
except Exception:
if self._model is None:
# This will only happen if an error happens in the Pipeline
# Engine beteween entering `running_pipeline` and the first
# `computing_chunk` call. If that happens, just propagate the
# exception.
raise
self._model.finish(success=False)
self._publish()
raise
else:
self._model.finish(success=True)
self._publish()
finally:
self._reset_transient_state()
@contextmanager
def computing_chunk(self, terms, start_date, end_date):
# Set up model on first compute_chunk call.
if self._model is None:
self._publisher = self._publisher_factory()
self._model = ProgressModel(
start_date=self._start_date,
end_date=self._end_date,
)
try:
self._model.start_chunk(terms, start_date, end_date)
self._publish()
yield
finally:
self._model.finish_chunk(terms, start_date, end_date)
self._publish()
@contextmanager
def loading_terms(self, terms):
try:
self._model.start_load_terms(terms)
self._publish()
yield
finally:
self._model.finish_load_terms(terms)
self._publish()
@contextmanager
def computing_term(self, term):
try:
self._model.start_compute_term(term)
self._publish()
yield
finally:
self._model.finish_compute_term(term)
self._publish()
class ProgressModel(object):
"""
Model object for tracking progress of a Pipeline execution.
Parameters
----------
nterms : int
Number of terms in the execution plan of the Pipeline being run.
start_date : pd.Timestamp
Start date of the range over which ``plan`` will be computed.
end_date : pd.Timestamp
End date of the range over which ``plan`` will be computed.
Methods
-------
start_chunk(start_date, end_date)
finish_chunk(start_date, end_date)
load_precomputed_terms(terms)
start_load_terms(terms)
finish_load_terms(terms)
start_compute_term(term)
finish_compute_term(term)
finish(success)
Attributes
----------
state : {'init', 'loading', 'computing', 'error', 'success'}
Current state of the execution.
percent_complete : float
Percent of execution that has been completed, on a scale from 0 to 100.
execution_time : float
Number of seconds that the execution required. Only available if state
is 'error' or 'success'.
execution_bounds : (pd.Timestamp, pd.Timestamp)
Pair of (start_date, end_date) for the entire execution.
current_chunk_bounds : (pd.Timestamp, pd.Timestamp)
Pair of (start_date, end_date) for the currently executing chunk.
current_work : [zipline.pipeline.Term]
List of terms currently being loaded or computed.
"""
def __init__(self, start_date, end_date):
self._start_date = start_date
self._end_date = end_date
# +1 to be inclusive of end_date.
self._total_days = (end_date - start_date).days + 1
self._progress = 0.0
self._days_completed = 0
self._state = "init"
# Number of days in current chunk.
self._current_chunk_size = None
# (start_date, end_date) of current chunk.
self._current_chunk_bounds = None
# How much should we increment progress by after completing a term?
self._completed_term_increment = None
# How much should we increment progress by after completing a chunk?
# This is zero unless we compute a pipeline with no terms, in which
# case it will be the full chunk percentage.
self._completed_chunk_increment = None
# Terms currently being computed.
self._current_work = None
# Tracking state for total elapsed time.
self._start_time = time.time()
self._end_time = None
# These properties form the interface for Publishers.
@property
def state(self):
return self._state
@property
def percent_complete(self):
return round(self._progress * 100.0, 3)
@property
def execution_time(self):
if self._end_time is None:
raise ValueError("Can't get execution_time until execution is complete.")
return self._end_time - self._start_time
@property
def execution_bounds(self):
return (self._start_date, self._end_date)
@property
def current_chunk_bounds(self):
return self._current_chunk_bounds
@property
def current_work(self):
return self._current_work
# These methods form the interface for ProgressHooks.
def start_chunk(self, terms, start_date, end_date):
days_since_start = (end_date - self._start_date).days + 1
self._current_chunk_size = days_since_start - self._days_completed
self._current_chunk_bounds = (start_date, end_date)
# What percent of our overall progress will happen in this chunk?
chunk_percent = float(self._current_chunk_size) / self._total_days
# How much of that is associated with each completed term?
nterms = len(terms)
if nterms:
self._completed_term_increment = chunk_percent / len(terms)
self._completed_chunk_increment = 0.0
else:
# Special case. If we don't have any terms, increment the entire
# chunk's worth of progress when we finish the chunk.
self._completed_term_increment = 0.0
self._completed_chunk_increment = chunk_percent
def finish_chunk(self, terms, start_date, end_date):
self._days_completed += self._current_chunk_size
self._progress += self._completed_chunk_increment
def start_load_terms(self, terms):
self._state = "loading"
self._current_work = terms
def finish_load_terms(self, terms):
self._finish_terms(nterms=len(terms))
def start_compute_term(self, term):
self._state = "computing"
self._current_work = [term]
def finish_compute_term(self, term):
self._finish_terms(nterms=1)
def finish(self, success):
self._end_time = time.time()
if success:
self._state = "success"
else:
self._state = "error"
def _finish_terms(self, nterms):
self._progress += nterms * self._completed_term_increment
try:
import ipywidgets
HAVE_WIDGETS = True
# This VBox subclass exists to work around a strange display issue but
# where the repr of the progress bar sometimes gets re-displayed upon
# re-opening the notebook, even after the bar has closed. The repr of VBox
# is somewhat noisy, so we replace it here with a version that just returns
# an empty string.
class ProgressBarContainer(ipywidgets.VBox):
def __repr__(self):
return ""
except ImportError:
HAVE_WIDGETS = False
try:
from IPython.display import display, HTML as IPython_HTML
HAVE_IPYTHON = True
except ImportError:
HAVE_IPYTHON = False
# XXX: This class is currently untested, because we don't require ipywidgets as
# a test dependency. Be careful if you make changes to this.
class IPythonWidgetProgressPublisher(object):
"""A progress publisher that publishes to an IPython/Jupyter widget."""
def __init__(self):
missing = []
if not HAVE_WIDGETS:
missing.append("ipywidgets")
elif not HAVE_IPYTHON:
missing.append("IPython")
if missing:
raise ValueError(
"IPythonWidgetProgressPublisher needs ipywidgets and IPython:"
"\nMissing:\n{}".format(bulleted_list(missing))
)
# Heading for progress display.
self._heading = ipywidgets.HTML()
# Percent Complete Indicator to the left of the bar.
indicator_width = "120px"
self._percent_indicator = ipywidgets.HTML(
layout={"width": indicator_width},
)
# The progress bar itself.
self._bar = ipywidgets.FloatProgress(
value=0.0,
min=0.0,
max=100.0,
bar_style="info",
# Leave enough space for the percent indicator.
layout={"width": "calc(100% - {})".format(indicator_width)},
)
bar_and_percent = ipywidgets.HBox([self._percent_indicator, self._bar])
# Collapsable details tab underneath the progress bar.
self._details_body = ipywidgets.HTML()
self._details_tab = ipywidgets.Accordion(
children=[self._details_body],
selected_index=None, # Start in collapsed state.
layout={
# Override default border settings to make details tab less
# heavy.
"border": "1px",
},
)
# There's no public interface for setting title in the constructor :/.
self._details_tab.set_title(0, "Details")
# Container for the combined widget.
self._layout = ProgressBarContainer(
[
self._heading,
bar_and_percent,
self._details_tab,
],
# Overall layout consumes 75% of the page.
layout={"width": "75%"},
)
self._displayed = False
def publish(self, model):
if model.state == "init":
self._heading.value = "<b>Analyzing Pipeline...</b>"
self._set_progress(0.0)
self._ensure_displayed()
elif model.state in ("loading", "computing"):
term_list = self._render_term_list(model.current_work)
if model.state == "loading":
details_heading = "<b>Loading Inputs:</b>"
else:
details_heading = "<b>Computing Expression:</b>"
self._details_body.value = details_heading + term_list
chunk_start, chunk_end = model.current_chunk_bounds
self._heading.value = (
"<b>Running Pipeline</b>: Chunk Start={}, Chunk End={}".format(
chunk_start.date(), chunk_end.date()
)
)
self._set_progress(model.percent_complete)
self._ensure_displayed()
elif model.state == "success":
# Replace widget layout with html that can be persisted.
self._stop_displaying()
display(
IPython_HTML(
"<b>Pipeline Execution Time:</b> {}".format(
self._format_execution_time(model.execution_time)
)
),
)
elif model.state == "error":
self._bar.bar_style = "danger"
self._stop_displaying()
else:
self._layout.close()
raise ValueError("Unknown display state: {!r}".format(model.state))
def _ensure_displayed(self):
if not self._displayed:
display(self._layout)
self._displayed = True
def _stop_displaying(self):
self._layout.close()
@staticmethod
def _render_term_list(terms):
list_elements = "".join(
["<li><pre>{}</pre></li>".format(repr_htmlsafe(t)) for t in terms]
)
return "<ul>{}</ul>".format(list_elements)
def _set_progress(self, percent_complete):
self._bar.value = percent_complete
self._percent_indicator.value = "<b>{:.2f}% Complete</b>".format(
percent_complete
)
@staticmethod
def _format_execution_time(total_seconds):
"""Helper method for displaying total execution time of a Pipeline.
Parameters
----------
total_seconds : float
Number of seconds elapsed.
Returns
-------
formatted : str
User-facing text representation of elapsed time.
"""
def maybe_s(n):
if n == 1:
return ""
return "s"
minutes, seconds = divmod(total_seconds, 60)
minutes = int(minutes)
if minutes >= 60:
hours, minutes = divmod(minutes, 60)
t = "{hours} Hour{hs}, {minutes} Minute{ms}, {seconds:.2f} Seconds"
return t.format(
hours=hours,
hs=maybe_s(hours),
minutes=minutes,
ms=maybe_s(minutes),
seconds=seconds,
)
elif minutes >= 1:
t = "{minutes} Minute{ms}, {seconds:.2f} Seconds"
return t.format(
minutes=minutes,
ms=maybe_s(minutes),
seconds=seconds,
)
else:
return "{seconds:.2f} Seconds".format(seconds=seconds)
class TestingProgressPublisher(object):
"""A progress publisher that records a trace of model states for testing."""
TraceState = namedtuple(
"TraceState",
[
"state",
"percent_complete",
"execution_bounds",
"current_chunk_bounds",
"current_work",
],
)
def __init__(self):
self.trace = []
def publish(self, model):
self.trace.append(
self.TraceState(
state=model.state,
percent_complete=model.percent_complete,
execution_bounds=model.execution_bounds,
current_chunk_bounds=model.current_chunk_bounds,
current_work=model.current_work,
),
)
def repr_htmlsafe(t):
"""Repr a value and html-escape the result.
If an error is thrown by the repr, show a placeholder.
"""
try:
r = repr(t)
except Exception:
r = "(Error Displaying {})".format(type(t).__name__)
return escape_html(str(r), quote=True) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/hooks/progress.py | progress.py |
from zipline.utils.compat import contextmanager as _contextmanager
from interface import Interface
# Keep track of which methods of PipelineHooks are contextmanagers. Used by
# DelegatingHooks to properly delegate to sub-hooks.
PIPELINE_HOOKS_CONTEXT_MANAGERS = set()
def contextmanager(f):
"""
Wrapper for contextlib.contextmanager that tracks which methods of
PipelineHooks are contextmanagers in CONTEXT_MANAGER_METHODS.
"""
PIPELINE_HOOKS_CONTEXT_MANAGERS.add(f.__name__)
return _contextmanager(f)
class PipelineHooks(Interface):
"""
Interface for instrumenting SimplePipelineEngine executions.
Methods with names like 'on_event()' should be normal methods. They will be
called by the engine after the corresponding event.
Methods with names like 'doing_thing()' should be context managers. They
will be entered by the engine around the corresponding event.
Methods
-------
running_pipeline(self, pipeline, start_date, end_date, chunked)
computing_chunk(self, terms, start_date, end_date)
loading_terms(self, terms)
computing_term(self, term):
"""
@contextmanager
def running_pipeline(self, pipeline, start_date, end_date):
"""
Contextmanager entered during execution of run_pipeline or
run_chunked_pipeline.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline being executed.
start_date : pd.Timestamp
First date of the execution.
end_date : pd.Timestamp
Last date of the execution.
"""
@contextmanager
def computing_chunk(self, terms, start_date, end_date):
"""
Contextmanager entered during execution of compute_chunk.
Parameters
----------
terms : list[zipline.pipeline.Term]
List of terms, in execution order, that will be computed. This
value may change between chunks if ``populate_initial_workspace``
prepopulates different terms at different times.
start_date : pd.Timestamp
First date of the chunk.
end_date : pd.Timestamp
Last date of the chunk.
"""
@contextmanager
def loading_terms(self, terms):
"""Contextmanager entered when loading a batch of LoadableTerms.
Parameters
----------
terms : list[zipline.pipeline.LoadableTerm]
Terms being loaded.
"""
@contextmanager
def computing_term(self, term):
"""Contextmanager entered when computing a ComputableTerm.
Parameters
----------
terms : zipline.pipeline.ComputableTerm
Terms being computed.
""" | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/hooks/iface.py | iface.py |
from numpy import (
abs,
average,
clip,
diff,
dstack,
inf,
)
from numexpr import evaluate
from zipline.pipeline.data import EquityPricing
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.input_validation import expect_bounded
from zipline.utils.math_utils import (
nanargmax,
nanargmin,
nanmax,
nanmean,
nanstd,
nanmin,
)
from zipline.utils.numpy_utils import rolling_window
from .basic import exponential_weights
from .basic import ( # noqa reexport
# These are re-exported here for backwards compatibility with the old
# definition site.
LinearWeightedMovingAverage,
MaxDrawdown,
SimpleMovingAverage,
VWAP,
WeightedAverageValue,
)
class RSI(SingleInputMixin, CustomFactor):
"""
Relative Strength Index
**Default Inputs**: :data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length**: 15
"""
window_length = 15
inputs = (EquityPricing.close,)
window_safe = True
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={"ups": ups, "downs": downs},
global_dict={},
out=out,
)
class BollingerBands(CustomFactor):
"""
Bollinger Bands technical indicator.
https://en.wikipedia.org/wiki/Bollinger_Bands
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`
Parameters
----------
inputs : length-1 iterable[BoundColumn]
The expression over which to compute bollinger bands.
window_length : int > 0
Length of the lookback window over which to compute the bollinger
bands.
k : float
The number of standard deviations to add or subtract to create the
upper and lower bands.
"""
params = ("k",)
inputs = (EquityPricing.close,)
outputs = "lower", "middle", "upper"
def compute(self, today, assets, out, close, k):
difference = k * nanstd(close, axis=0)
out.middle = middle = nanmean(close, axis=0)
out.upper = middle + difference
out.lower = middle - difference
class Aroon(CustomFactor):
"""
Aroon technical indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator
**Defaults Inputs:** :data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.high`
Parameters
----------
window_length : int > 0
Length of the lookback window over which to compute the Aroon
indicator.
""" # noqa
inputs = (EquityPricing.low, EquityPricing.high)
outputs = ("down", "up")
def compute(self, today, assets, out, lows, highs):
wl = self.window_length
high_date_index = nanargmax(highs, axis=0)
low_date_index = nanargmin(lows, axis=0)
evaluate(
"(100 * high_date_index) / (wl - 1)",
local_dict={
"high_date_index": high_date_index,
"wl": wl,
},
out=out.up,
)
evaluate(
"(100 * low_date_index) / (wl - 1)",
local_dict={
"low_date_index": low_date_index,
"wl": wl,
},
out=out.down,
)
class FastStochasticOscillator(CustomFactor):
"""
Fast Stochastic Oscillator Indicator [%K, Momentum Indicator]
https://wiki.timetotrade.eu/Stochastic
This stochastic is considered volatile, and varies a lot when used in
market analysis. It is recommended to use the slow stochastic oscillator
or a moving average of the %K [%D].
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.high`
**Default Window Length:** 14
Returns
-------
out: %K oscillator
"""
inputs = (EquityPricing.close, EquityPricing.low, EquityPricing.high)
window_safe = True
window_length = 14
def compute(self, today, assets, out, closes, lows, highs):
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
today_closes = closes[-1]
evaluate(
"((tc - ll) / (hh - ll)) * 100",
local_dict={
"tc": today_closes,
"ll": lowest_lows,
"hh": highest_highs,
},
global_dict={},
out=out,
)
class IchimokuKinkoHyo(CustomFactor):
"""Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud).
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.high`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length:** 52
Parameters
----------
window_length : int > 0
The length the the window for the senkou span b.
tenkan_sen_length : int >= 0, <= window_length
The length of the window for the tenkan-sen.
kijun_sen_length : int >= 0, <= window_length
The length of the window for the kijou-sen.
chikou_span_length : int >= 0, <= window_length
The lag for the chikou span.
""" # noqa
params = {
"tenkan_sen_length": 9,
"kijun_sen_length": 26,
"chikou_span_length": 26,
}
inputs = (EquityPricing.high, EquityPricing.low, EquityPricing.close)
outputs = (
"tenkan_sen",
"kijun_sen",
"senkou_span_a",
"senkou_span_b",
"chikou_span",
)
window_length = 52
def _validate(self):
super(IchimokuKinkoHyo, self)._validate()
for k, v in self.params.items():
if v > self.window_length:
raise ValueError(
"%s must be <= the window_length: %s > %s"
% (
k,
v,
self.window_length,
),
)
def compute(
self,
today,
assets,
out,
high,
low,
close,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length,
):
out.tenkan_sen = tenkan_sen = (
high[-tenkan_sen_length:].max(axis=0) + low[-tenkan_sen_length:].min(axis=0)
) / 2
out.kijun_sen = kijun_sen = (
high[-kijun_sen_length:].max(axis=0) + low[-kijun_sen_length:].min(axis=0)
) / 2
out.senkou_span_a = (tenkan_sen + kijun_sen) / 2
out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2
out.chikou_span = close[chikou_span_length]
class RateOfChangePercentage(CustomFactor):
"""
Rate of change Percentage
ROC measures the percentage change in price from one period to the next.
The ROC calculation compares the current price with the price `n`
periods ago.
Formula for calculation: ((price - prevPrice) / prevPrice) * 100
price - the current price
prevPrice - the price n days ago, equals window length
"""
def compute(self, today, assets, out, close):
today_close = close[-1]
prev_close = close[0]
evaluate(
"((tc - pc) / pc) * 100",
local_dict={"tc": today_close, "pc": prev_close},
global_dict={},
out=out,
)
class TrueRange(CustomFactor):
"""
True Range
A technical indicator originally developed by J. Welles Wilder, Jr.
Indicates the true degree of daily price change in an underlying.
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.high`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length:** 2
"""
inputs = (
EquityPricing.high,
EquityPricing.low,
EquityPricing.close,
)
window_length = 2
def compute(self, today, assets, out, highs, lows, closes):
high_to_low = highs[1:] - lows[1:]
high_to_prev_close = abs(highs[1:] - closes[:-1])
low_to_prev_close = abs(lows[1:] - closes[:-1])
out[:] = nanmax(
dstack(
(
high_to_low,
high_to_prev_close,
low_to_prev_close,
)
),
2,
)
class MovingAverageConvergenceDivergenceSignal(CustomFactor):
"""
Moving Average Convergence/Divergence (MACD) Signal line
https://en.wikipedia.org/wiki/MACD
A technical indicator originally developed by Gerald Appel in the late
1970's. MACD shows the relationship between two moving averages and
reveals changes in the strength, direction, momentum, and duration of a
trend in a stock's price.
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`
Parameters
----------
fast_period : int > 0, optional
The window length for the "fast" EWMA. Default is 12.
slow_period : int > 0, > fast_period, optional
The window length for the "slow" EWMA. Default is 26.
signal_period : int > 0, < fast_period, optional
The window length for the signal line. Default is 9.
Notes
-----
Unlike most pipeline expressions, this factor does not accept a
``window_length`` parameter. ``window_length`` is inferred from
``slow_period`` and ``signal_period``.
"""
inputs = (EquityPricing.close,)
# We don't use the default form of `params` here because we want to
# dynamically calculate `window_length` from the period lengths in our
# __new__.
params = ("fast_period", "slow_period", "signal_period")
@expect_bounded(
__funcname="MACDSignal",
fast_period=(1, None), # These must all be >= 1.
slow_period=(1, None),
signal_period=(1, None),
)
def __new__(cls, fast_period=12, slow_period=26, signal_period=9, *args, **kwargs):
if slow_period <= fast_period:
raise ValueError(
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period={slow}, fast_period={fast}".format(
slow=slow_period,
fast=fast_period,
)
)
return super(MovingAverageConvergenceDivergenceSignal, cls).__new__(
cls,
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
window_length=slow_period + signal_period - 1,
*args,
**kwargs,
)
def _ewma(self, data, length):
decay_rate = 1.0 - (2.0 / (1.0 + length))
return average(data, axis=1, weights=exponential_weights(length, decay_rate))
def compute(
self, today, assets, out, close, fast_period, slow_period, signal_period
):
slow_EWMA = self._ewma(rolling_window(close, slow_period), slow_period)
fast_EWMA = self._ewma(
rolling_window(close, fast_period)[-signal_period:], fast_period
)
macd = fast_EWMA - slow_EWMA
out[:] = self._ewma(macd.T, signal_period)
# Convenience aliases.
MACDSignal = MovingAverageConvergenceDivergenceSignal | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/factors/technical.py | technical.py |
from numexpr import evaluate
import numpy as np
from numpy import broadcast_arrays
from scipy.stats import (
linregress,
spearmanr,
)
from zipline.assets import Asset
from zipline.errors import IncompatibleTerms
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.filters import SingleAsset
from zipline.pipeline.mixins import StandardOutputs
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import AssetExists
from zipline.utils.input_validation import (
expect_bounded,
expect_dtypes,
expect_types,
)
from zipline.utils.math_utils import nanmean
from zipline.utils.numpy_utils import (
float64_dtype,
int64_dtype,
)
from .basic import Returns
ALLOWED_DTYPES = (float64_dtype, int64_dtype)
class _RollingCorrelation(CustomFactor):
@expect_dtypes(base_factor=ALLOWED_DTYPES, target=ALLOWED_DTYPES)
@expect_bounded(correlation_length=(2, None))
def __new__(cls, base_factor, target, correlation_length, mask=NotSpecified):
if target.ndim == 2 and base_factor.mask is not target.mask:
raise IncompatibleTerms(term_1=base_factor, term_2=target)
return super(_RollingCorrelation, cls).__new__(
cls,
inputs=[base_factor, target],
window_length=correlation_length,
mask=mask,
)
class RollingPearson(_RollingCorrelation):
"""
A Factor that computes pearson correlation coefficients between the columns
of a given Factor and either the columns of another Factor/BoundColumn or a
slice/single column of data.
Parameters
----------
base_factor : zipline.pipeline.Factor
The factor for which to compute correlations of each of its columns
with `target`.
target : zipline.pipeline.Term with a numeric dtype
The term with which to compute correlations against each column of data
produced by `base_factor`. This term may be a Factor, a BoundColumn or
a Slice. If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `base_factor` should have
their correlation with `target` computed each day.
See Also
--------
:func:`scipy.stats.pearsonr`
:meth:`Factor.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
Notes
-----
Most users should call Factor.pearsonr rather than directly construct an
instance of this class.
"""
window_safe = True
def compute(self, today, assets, out, base_data, target_data):
vectorized_pearson_r(
base_data,
target_data,
allowed_missing=0,
out=out,
)
class RollingSpearman(_RollingCorrelation):
"""
A Factor that computes spearman rank correlation coefficients between the
columns of a given Factor and either the columns of another
Factor/BoundColumn or a slice/single column of data.
Parameters
----------
base_factor : zipline.pipeline.Factor
The factor for which to compute correlations of each of its columns
with `target`.
target : zipline.pipeline.Term with a numeric dtype
The term with which to compute correlations against each column of data
produced by `base_factor`. This term may be a Factor, a BoundColumn or
a Slice. If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `base_factor` should have
their correlation with `target` computed each day.
See Also
--------
:func:`scipy.stats.spearmanr`
:meth:`Factor.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
Notes
-----
Most users should call Factor.spearmanr rather than directly construct an
instance of this class.
"""
window_safe = True
def compute(self, today, assets, out, base_data, target_data):
# If `target_data` is a Slice or single column of data, broadcast it
# out to the same shape as `base_data`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
target_data = broadcast_arrays(target_data, base_data)[0]
for i in range(len(out)):
out[i] = spearmanr(base_data[:, i], target_data[:, i])[0]
class RollingLinearRegression(CustomFactor):
"""
A Factor that performs an ordinary least-squares regression predicting the
columns of a given Factor from either the columns of another
Factor/BoundColumn or a slice/single column of data.
Parameters
----------
dependent : zipline.pipeline.Factor
The factor whose columns are the predicted/dependent variable of each
regression with `independent`.
independent : zipline.pipeline.slice.Slice or zipline.pipeline.Factor
The factor/slice whose columns are the predictor/independent variable
of each regression with `dependent`. If `independent` is a Factor,
regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `dependent` should be
regressed against `independent` each day.
See Also
--------
:func:`scipy.stats.linregress`
:meth:`Factor.linear_regression`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
Notes
-----
Most users should call Factor.linear_regression rather than directly
construct an instance of this class.
"""
outputs = ["alpha", "beta", "r_value", "p_value", "stderr"]
@expect_dtypes(dependent=ALLOWED_DTYPES, independent=ALLOWED_DTYPES)
@expect_bounded(regression_length=(2, None))
def __new__(cls, dependent, independent, regression_length, mask=NotSpecified):
if independent.ndim == 2 and dependent.mask is not independent.mask:
raise IncompatibleTerms(term_1=dependent, term_2=independent)
return super(RollingLinearRegression, cls).__new__(
cls,
inputs=[dependent, independent],
window_length=regression_length,
mask=mask,
)
def compute(self, today, assets, out, dependent, independent):
alpha = out.alpha
beta = out.beta
r_value = out.r_value
p_value = out.p_value
stderr = out.stderr
def regress(y, x):
regr_results = linregress(y=y, x=x)
# `linregress` returns its results in the following order:
# slope, intercept, r-value, p-value, stderr
alpha[i] = regr_results[1]
beta[i] = regr_results[0]
r_value[i] = regr_results[2]
p_value[i] = regr_results[3]
stderr[i] = regr_results[4]
# If `independent` is a Slice or single column of data, broadcast it
# out to the same shape as `dependent`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
independent = broadcast_arrays(independent, dependent)[0]
for i in range(len(out)):
regress(y=dependent[:, i], x=independent[:, i])
class RollingPearsonOfReturns(RollingPearson):
"""
Calculates the Pearson product-moment correlation coefficient of the
returns of the given asset with the returns of all other assets.
Pearson correlation is what most people mean when they say "correlation
coefficient" or "R-value".
Parameters
----------
target : zipline.assets.Asset
The asset to correlate with all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
correlation_length : int >= 1
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with the
target asset computed each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which correlations are computed.
Examples
--------
Let the following be example 10-day returns for three different assets::
SPY MSFT FB
2017-03-13 -.03 .03 .04
2017-03-14 -.02 -.03 .02
2017-03-15 -.01 .02 .01
2017-03-16 0 -.02 .01
2017-03-17 .01 .04 -.01
2017-03-20 .02 -.03 -.02
2017-03-21 .03 .01 -.02
2017-03-22 .04 -.02 -.02
Suppose we are interested in SPY's rolling returns correlation with each
stock from 2017-03-17 to 2017-03-22, using a 5-day look back window (that
is, we calculate each correlation coefficient over 5 days of data). We can
achieve this by doing::
rolling_correlations = RollingPearsonOfReturns(
target=sid(8554),
returns_length=10,
correlation_length=5,
)
The result of computing ``rolling_correlations`` from 2017-03-17 to
2017-03-22 gives::
SPY MSFT FB
2017-03-17 1 .15 -.96
2017-03-20 1 .10 -.96
2017-03-21 1 -.16 -.94
2017-03-22 1 -.16 -.85
Note that the column for SPY is all 1's, as the correlation of any data
series with itself is always 1. To understand how each of the other values
were calculated, take for example the .15 in MSFT's column. This is the
correlation coefficient between SPY's returns looking back from 2017-03-17
(-.03, -.02, -.01, 0, .01) and MSFT's returns (.03, -.03, .02, -.02, .04).
See Also
--------
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
def __new__(cls, target, returns_length, correlation_length, mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingPearsonOfReturns, cls).__new__(
cls,
base_factor=returns,
target=returns[target],
correlation_length=correlation_length,
mask=mask,
)
class RollingSpearmanOfReturns(RollingSpearman):
"""
Calculates the Spearman rank correlation coefficient of the returns of the
given asset with the returns of all other assets.
Parameters
----------
target : zipline.assets.Asset
The asset to correlate with all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
correlation_length : int >= 1
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with the
target asset computed each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which correlations are computed.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
def __new__(cls, target, returns_length, correlation_length, mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingSpearmanOfReturns, cls).__new__(
cls,
base_factor=returns,
target=returns[target],
correlation_length=correlation_length,
mask=mask,
)
class RollingLinearRegressionOfReturns(RollingLinearRegression):
"""
Perform an ordinary least-squares regression predicting the returns of all
other assets on the given asset.
Parameters
----------
target : zipline.assets.Asset
The asset to regress against all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
regression_length : int >= 1
Length of the lookback window over which to compute each regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed against the target
asset each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which regressions are computed.
This factor is designed to return five outputs:
- alpha, a factor that computes the intercepts of each regression.
- beta, a factor that computes the slopes of each regression.
- r_value, a factor that computes the correlation coefficient of each
regression.
- p_value, a factor that computes, for each regression, the two-sided
p-value for a hypothesis test whose null hypothesis is that the slope is
zero.
- stderr, a factor that computes the standard error of the estimate of each
regression.
For more help on factors with multiple outputs, see
:class:`zipline.pipeline.CustomFactor`.
Examples
--------
Let the following be example 10-day returns for three different assets::
SPY MSFT FB
2017-03-13 -.03 .03 .04
2017-03-14 -.02 -.03 .02
2017-03-15 -.01 .02 .01
2017-03-16 0 -.02 .01
2017-03-17 .01 .04 -.01
2017-03-20 .02 -.03 -.02
2017-03-21 .03 .01 -.02
2017-03-22 .04 -.02 -.02
Suppose we are interested in predicting each stock's returns from SPY's
over rolling 5-day look back windows. We can compute rolling regression
coefficients (alpha and beta) from 2017-03-17 to 2017-03-22 by doing::
regression_factor = RollingRegressionOfReturns(
target=sid(8554),
returns_length=10,
regression_length=5,
)
alpha = regression_factor.alpha
beta = regression_factor.beta
The result of computing ``alpha`` from 2017-03-17 to 2017-03-22 gives::
SPY MSFT FB
2017-03-17 0 .011 .003
2017-03-20 0 -.004 .004
2017-03-21 0 .007 .006
2017-03-22 0 .002 .008
And the result of computing ``beta`` from 2017-03-17 to 2017-03-22 gives::
SPY MSFT FB
2017-03-17 1 .3 -1.1
2017-03-20 1 .2 -1
2017-03-21 1 -.3 -1
2017-03-22 1 -.3 -.9
Note that SPY's column for alpha is all 0's and for beta is all 1's, as the
regression line of SPY with itself is simply the function y = x.
To understand how each of the other values were calculated, take for
example MSFT's ``alpha`` and ``beta`` values on 2017-03-17 (.011 and .3,
respectively). These values are the result of running a linear regression
predicting MSFT's returns from SPY's returns, using values starting at
2017-03-17 and looking back 5 days. That is, the regression was run with
x = [-.03, -.02, -.01, 0, .01] and y = [.03, -.03, .02, -.02, .04], and it
produced a slope of .3 and an intercept of .011.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
"""
window_safe = True
def __new__(cls, target, returns_length, regression_length, mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingLinearRegressionOfReturns, cls).__new__(
cls,
dependent=returns,
independent=returns[target],
regression_length=regression_length,
mask=mask,
)
class SimpleBeta(CustomFactor, StandardOutputs):
"""
Factor producing the slope of a regression line between each asset's daily
returns to the daily returns of a single "target" asset.
Parameters
----------
target : zipline.Asset
Asset against which other assets should be regressed.
regression_length : int
Number of days of daily returns to use for the regression.
allowed_missing_percentage : float, optional
Percentage of returns observations (between 0 and 1) that are allowed
to be missing when calculating betas. Assets with more than this
percentage of returns observations missing will produce values of
NaN. Default behavior is that 25% of inputs can be missing.
"""
window_safe = True
dtype = float64_dtype
params = ("allowed_missing_count",)
@expect_types(
target=Asset,
regression_length=int,
allowed_missing_percentage=(int, float),
__funcname="SimpleBeta",
)
@expect_bounded(
regression_length=(3, None),
allowed_missing_percentage=(0.0, 1.0),
__funcname="SimpleBeta",
)
def __new__(cls, target, regression_length, allowed_missing_percentage=0.25):
daily_returns = Returns(
window_length=2,
mask=(AssetExists() | SingleAsset(asset=target)),
)
allowed_missing_count = int(allowed_missing_percentage * regression_length)
return super(SimpleBeta, cls).__new__(
cls,
inputs=[daily_returns, daily_returns[target]],
window_length=regression_length,
allowed_missing_count=allowed_missing_count,
)
def compute(
self, today, assets, out, all_returns, target_returns, allowed_missing_count
):
vectorized_beta(
dependents=all_returns,
independent=target_returns,
allowed_missing=allowed_missing_count,
out=out,
)
def graph_repr(self):
return "{}({!r}, {}, {})".format(
type(self).__name__,
str(self.target.symbol), # coerce from unicode to str in py2.
self.window_length,
self.params["allowed_missing_count"],
)
@property
def target(self):
"""Get the target of the beta calculation."""
return self.inputs[1].asset
def __repr__(self):
return "{}({}, length={}, allowed_missing={})".format(
type(self).__name__,
self.target,
self.window_length,
self.params["allowed_missing_count"],
)
def vectorized_beta(dependents, independent, allowed_missing, out=None):
"""
Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in either ``dependents`` or
``independents`` will output NaN as the regression coefficient.
out : np.array[M] or None, optional
Output array into which to write results. If None, a new array is
created and returned.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``.
"""
# Cache these as locals since we're going to call them multiple times.
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
# Copy N times as a column vector and fill with nans to have the same
# missing value pattern as the dependent variable.
#
# PERF_TODO: We could probably avoid the space blowup by doing this in
# Cython.
# shape: (N, M)
independent = np.where(
isnan(dependents),
nan,
independent,
)
# Calculate beta as Cov(X, Y) / Cov(X, X).
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa
#
# NOTE: The usual formula for covariance is::
#
# mean((X - mean(X)) * (Y - mean(Y)))
#
# However, we don't actually need to take the mean of both sides of the
# product, because of the folllowing equivalence::
#
# Let X_res = (X - mean(X)).
# We have:
#
# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))
# (1) = mean((X_res * Y) - (X_res * mean(Y)))
# (2) = mean(X_res * Y) - mean(X_res * mean(Y))
# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)
# (4) = mean(X_res * Y) - 0 * mean(Y)
# (5) = mean(X_res * Y)
#
#
# The tricky step in the above derivation is step (4). We know that
# mean(X_res) is zero because, for any X:
#
# mean(X - mean(X)) = mean(X) - mean(X) = 0.
#
# The upshot of this is that we only have to center one of `independent`
# and `dependent` when calculating covariances. Since we need the centered
# `independent` to calculate its variance in the next step, we choose to
# center `independent`.
# shape: (N, M)
ind_residual = independent - nanmean(independent, axis=0)
# shape: (M,)
covariances = nanmean(ind_residual * dependents, axis=0)
# We end up with different variances in each column here because each
# column may have a different subset of the data dropped due to missing
# data in the corresponding dependent column.
# shape: (M,)
independent_variances = nanmean(ind_residual ** 2, axis=0)
# shape: (M,)
np.divide(covariances, independent_variances, out=out)
# Write nans back to locations where we have more then allowed number of
# missing entries.
nanlocs = isnan(independent).sum(axis=0) > allowed_missing
out[nanlocs] = nan
return out
def vectorized_pearson_r(dependents, independents, allowed_missing, out=None):
"""
Compute Pearson's r between columns of ``dependents`` and ``independents``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independents : np.array[N, M] or np.array[N, 1]
Independent variable(s) of the regression. If a single column is
passed, it is broadcast to the shape of ``dependents``.
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in either ``dependents`` or
``independents`` will output NaN as the correlation coefficient.
out : np.array[M] or None, optional
Output array into which to write results. If None, a new array is
created and returned.
Returns
-------
correlations : np.array[M]
Pearson correlation coefficients for each column of ``dependents``.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearson`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
"""
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
if allowed_missing > 0:
# If we're handling nans robustly, we need to mask both arrays to
# locations where either was nan.
either_nan = isnan(dependents) | isnan(independents)
independents = np.where(either_nan, nan, independents)
dependents = np.where(either_nan, nan, dependents)
mean = nanmean
else:
# Otherwise, we can just use mean, which will give us a nan for any
# column where there's ever a nan.
mean = np.mean
# Pearson R is Cov(X, Y) / StdDev(X) * StdDev(Y)
# c.f. https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
ind_residual = independents - mean(independents, axis=0)
dep_residual = dependents - mean(dependents, axis=0)
ind_variance = mean(ind_residual ** 2, axis=0)
dep_variance = mean(dep_residual ** 2, axis=0)
covariances = mean(ind_residual * dep_residual, axis=0)
evaluate(
"where(mask, nan, cov / sqrt(ind_variance * dep_variance))",
local_dict={
"cov": covariances,
"mask": isnan(independents).sum(axis=0) > allowed_missing,
"nan": np.nan,
"ind_variance": ind_variance,
"dep_variance": dep_variance,
},
global_dict={},
out=out,
)
return out | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/factors/statistical.py | statistical.py |
import numpy as np
from operator import attrgetter
from numbers import Number
from math import ceil
from textwrap import dedent
from numpy import empty_like, inf, isnan, nan, where
from scipy.stats import rankdata
from zipline.utils.compat import wraps
from zipline.errors import (
BadPercentileBounds,
UnknownRankMethod,
UnsupportedDataType,
)
from zipline.lib.normalize import naive_grouped_rowwise_apply
from zipline.lib.rank import masked_rankdata_2d, rankdata_1d_descending
from zipline.pipeline.api_utils import restrict_to_dtype
from zipline.pipeline.classifiers import Classifier, Everything, Quantiles
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.expression import (
BadBinaryOperator,
COMPARISONS,
is_comparison,
MATH_BINOPS,
method_name_for_op,
NumericalExpression,
NUMEXPR_MATH_FUNCS,
UNARY_OPS,
unary_op_name,
)
from zipline.pipeline.filters import (
Filter,
NumExprFilter,
PercentileFilter,
MaximumFilter,
)
from zipline.pipeline.mixins import (
CustomTermMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
)
from zipline.pipeline.sentinels import NotSpecified, NotSpecifiedType
from zipline.pipeline.term import AssetExists, ComputableTerm, Term
from zipline.utils.functional import with_doc, with_name
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import (
nanmax,
nanmean,
nanmedian,
nanmin,
nanstd,
nansum,
)
from zipline.utils.numpy_utils import (
as_column,
bool_dtype,
coerce_to_dtype,
float64_dtype,
is_missing,
)
from zipline.utils.sharedoc import templated_docstring
_RANK_METHODS = frozenset(["average", "min", "max", "dense", "ordinal"])
def coerce_numbers_to_my_dtype(f):
"""
A decorator for methods whose signature is f(self, other) that coerces
``other`` to ``self.dtype``.
This is used to make comparison operations between numbers and `Factor`
instances work independently of whether the user supplies a float or
integer literal.
For example, if I write::
my_filter = my_factor > 3
my_factor probably has dtype float64, but 3 is an int, so we want to coerce
to float64 before doing the comparison.
"""
@wraps(f)
def method(self, other):
if isinstance(other, Number):
other = coerce_to_dtype(self.dtype, other)
return f(self, other)
return method
def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype
BINOP_DOCSTRING_TEMPLATE = """
Construct a :class:`~zipline.pipeline.{rtype}` computing ``self {op} other``.
Parameters
----------
other : zipline.pipeline.Factor, float
Right-hand side of the expression.
Returns
-------
{ret}
"""
BINOP_RETURN_FILTER = """\
filter : zipline.pipeline.Filter
Filter computing ``self {op} other`` with the outputs of ``self`` and
``other``.
"""
BINOP_RETURN_FACTOR = """\
factor : zipline.pipeline.Factor
Factor computing ``self {op} other`` with outputs of ``self`` and
``other``.
"""
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
is_compare = is_comparison(op)
if is_compare:
ret_doc = BINOP_RETURN_FILTER.format(op=op)
rtype = "Filter"
else:
ret_doc = BINOP_RETURN_FACTOR.format(op=op)
rtype = "Factor"
docstring = BINOP_DOCSTRING_TEMPLATE.format(
op=op,
ret=ret_doc,
rtype=rtype,
)
@with_doc(docstring)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = NumExprFilter if is_compare else NumExprFactor
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op,
other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
raise BadBinaryOperator(op, self, other)
return binary_operator
def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(op, other)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {"-"}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator
def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
docstring = dedent(
"""\
Construct a Factor that computes ``{}()`` on each output of ``self``.
Returns
-------
factor : zipline.pipeline.Factor
""".format(
func
)
)
@with_doc(docstring)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc
# Decorators for Factor methods.
if_not_float64_tell_caller_to_use_isnull = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}.\n"
"{method_name}() is only defined for dtype {expected_dtype}."
"To filter missing data, use isnull() or notnull()."
),
)
float64_only = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() is only defined on Factors of dtype {expected_dtype},"
" but it was called on a Factor of dtype {received_dtype}."
),
)
CORRELATION_METHOD_NOTE = dedent(
"""\
This method can only be called on expressions which are deemed safe for use
as inputs to windowed :class:`~zipline.pipeline.Factor` objects. Examples
of such expressions include This includes
:class:`~zipline.pipeline.data.BoundColumn`
:class:`~zipline.pipeline.factors.Returns` and any factors created from
:meth:`~zipline.pipeline.Factor.rank` or
:meth:`~zipline.pipeline.Factor.zscore`.
"""
)
class summary_funcs(object):
"""Namespace of functions meant to be used with DailySummary."""
@staticmethod
def mean(a, missing_value):
return nanmean(a, axis=1)
@staticmethod
def stddev(a, missing_value):
return nanstd(a, axis=1)
@staticmethod
def max(a, missing_value):
return nanmax(a, axis=1)
@staticmethod
def min(a, missing_value):
return nanmin(a, axis=1)
@staticmethod
def median(a, missing_value):
return nanmedian(a, axis=1)
@staticmethod
def sum(a, missing_value):
return nansum(a, axis=1)
@staticmethod
def notnull_count(a, missing_value):
return (~is_missing(a, missing_value)).sum(axis=1)
names = {k for k in locals() if not k.startswith("_")}
def summary_method(name):
func = getattr(summary_funcs, name)
@expect_types(mask=(Filter, NotSpecifiedType))
@float64_only
def f(self, mask=NotSpecified):
"""Create a 1-dimensional factor computing the {} of self, each day.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing results.
If supplied, we ignore asset/date pairs where ``mask`` produces
``False``.
Returns
-------
result : zipline.pipeline.Factor
"""
return DailySummary(
func,
self,
mask=mask,
dtype=self.dtype,
)
f.__name__ = func.__name__
f.__doc__ = f.__doc__.format(f.__name__)
return f
class Factor(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline API expression producing a numerical or date-valued output.
Factors are the most commonly-used Pipeline term, representing the result
of any computation producing a numerical result.
Factors can be combined, both with other Factors and with scalar values,
via any of the builtin mathematical operators (``+``, ``-``, ``*``, etc).
This makes it easy to write complex expressions that combine multiple
Factors. For example, constructing a Factor that computes the average of
two other Factors is simply::
>>> f1 = SomeFactor(...) # doctest: +SKIP
>>> f2 = SomeOtherFactor(...) # doctest: +SKIP
>>> average = (f1 + f2) / 2.0 # doctest: +SKIP
Factors can also be converted into :class:`zipline.pipeline.Filter` objects
via comparison operators: (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``).
There are many natural operators defined on Factors besides the basic
numerical operators. These include methods for identifying missing or
extreme-valued outputs (:meth:`isnull`, :meth:`notnull`, :meth:`isnan`,
:meth:`notnan`), methods for normalizing outputs (:meth:`rank`,
:meth:`demean`, :meth:`zscore`), and methods for constructing Filters based
on rank-order properties of results (:meth:`top`, :meth:`bottom`,
:meth:`percentile_between`).
"""
ALLOWED_DTYPES = FACTOR_DTYPES # Used by RestrictedDTypeMixin
# Dynamically add functions for creating NumExprFactor/NumExprFilter
# instances.
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
# Don't override __eq__ because it breaks comparisons on tuples of
# Factors.
for op in MATH_BINOPS.union(COMPARISONS - {"=="})
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): reflected_binary_operator(op)
for op in MATH_BINOPS
}
)
clsdict.update({unary_op_name(op): unary_operator(op) for op in UNARY_OPS})
clsdict.update(
{funcname: function_application(funcname) for funcname in NUMEXPR_MATH_FUNCS}
)
__truediv__ = clsdict["__div__"]
__rtruediv__ = clsdict["__rdiv__"]
# Add summary functions.
clsdict.update(
{name: summary_method(name) for name in summary_funcs.names},
)
del clsdict # don't pollute the class namespace with this.
eq = binary_operator("==")
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
)
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def zscore(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.zscore(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``zscore()`` is only supported on Factors of dtype float64.
Examples
--------
See :meth:`~zipline.pipeline.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=zscore,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=True,
)
def rank(
self, method="ordinal", ascending=True, mask=NotSpecified, groupby=NotSpecified
):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.Factor
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
"""
if groupby is NotSpecified:
return Rank(self, method=method, ascending=ascending, mask=mask)
return GroupedRowTransform(
transform=rankdata if ascending else rankdata_1d_descending,
transform_args=(method,),
factor=self,
groupby=groupby,
dtype=float64_dtype,
missing_value=nan,
mask=mask,
window_safe=True,
)
@expect_types(
target=Term,
correlation_length=int,
mask=(Filter, NotSpecifiedType),
)
@templated_docstring(CORRELATION_METHOD_NOTE=CORRELATION_METHOD_NOTE)
def pearsonr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling pearson correlation
coefficients between ``target`` and the columns of ``self``.
Parameters
----------
target : zipline.pipeline.Term
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.Factor
A new Factor that will compute correlations between ``target`` and
the columns of ``self``.
Notes
-----
{CORRELATION_METHOD_NOTE}
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr`
"""
from .statistical import RollingPearson
return RollingPearson(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
@expect_types(
target=Term,
correlation_length=int,
mask=(Filter, NotSpecifiedType),
)
@templated_docstring(CORRELATION_METHOD_NOTE=CORRELATION_METHOD_NOTE)
def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between ``target`` and the columns of ``self``.
Parameters
----------
target : zipline.pipeline.Term
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.Factor
A new Factor that will compute correlations between ``target`` and
the columns of ``self``.
Notes
-----
{CORRELATION_METHOD_NOTE}
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:meth:`Factor.pearsonr`
"""
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
@expect_types(
target=Term,
regression_length=int,
mask=(Filter, NotSpecifiedType),
)
@templated_docstring(CORRELATION_METHOD_NOTE=CORRELATION_METHOD_NOTE)
def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
Parameters
----------
target : zipline.pipeline.Term
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.Factor
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Notes
-----
{CORRELATION_METHOD_NOTE}
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
"""
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
)
@expect_types(
min_percentile=(int, float),
max_percentile=(int, float),
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def winsorize(
self, min_percentile, max_percentile, mask=NotSpecified, groupby=NotSpecified
):
"""
Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
"""
if not 0.0 <= min_percentile < max_percentile <= 1.0:
raise BadPercentileBounds(
min_percentile=min_percentile,
max_percentile=max_percentile,
upper_bound=1.0,
)
return GroupedRowTransform(
transform=winsorize,
transform_args=(min_percentile, max_percentile),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=self.window_safe,
)
@expect_types(bins=int, mask=(Filter, NotSpecifiedType))
def quantiles(self, bins, mask=NotSpecified):
"""
Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.Classifier
A classifier producing integer labels ranging from 0 to (bins - 1).
"""
if mask is NotSpecified:
mask = self.mask
return Quantiles(inputs=(self,), bins=bins, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quartiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quartiles over the output of ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, corresponding to the first, second, third, or fourth
quartile over each row. NaN data points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quartiles.
Returns
-------
quartiles : zipline.pipeline.Classifier
A classifier producing integer labels ranging from 0 to 3.
"""
return self.quantiles(bins=4, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quintiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quintile labels on ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, 4, corresonding to quintiles over each row. NaN data
points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quintiles.
Returns
-------
quintiles : zipline.pipeline.Classifier
A classifier producing integer labels ranging from 0 to 4.
"""
return self.quantiles(bins=5, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def deciles(self, mask=NotSpecified):
"""
Construct a Classifier computing decile labels on ``self``.
Every non-NaN data point the output is labelled with a value from 0 to
9 corresonding to deciles over each row. NaN data points are labelled
with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing deciles.
Returns
-------
deciles : zipline.pipeline.Classifier
A classifier producing integer labels ranging from 0 to 9.
"""
return self.quantiles(bins=10, mask=mask)
def top(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the top N asset
values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
if N == 1:
# Special case: if N == 1, we can avoid doing a full sort on every
# group, which is a big win.
return self._maximum(mask=mask, groupby=groupby)
return self.rank(ascending=False, mask=mask, groupby=groupby) <= N
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values **for each group** defined by ``groupby``.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N
def _maximum(self, mask=NotSpecified, groupby=NotSpecified):
return MaximumFilter(self, groupby=groupby, mask=mask)
def percentile_between(self, min_percentile, max_percentile, mask=NotSpecified):
"""
Construct a Filter matching values of self that fall within the range
defined by ``min_percentile`` and ``max_percentile``.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.Filter
A new filter that will compute the specified percentile-range mask.
"""
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
)
@if_not_float64_tell_caller_to_use_isnull
def isnan(self):
"""
A Filter producing True for all values where this Factor is NaN.
Returns
-------
nanfilter : zipline.pipeline.Filter
"""
return self != self
@if_not_float64_tell_caller_to_use_isnull
def notnan(self):
"""
A Filter producing True for values where this Factor is not NaN.
Returns
-------
nanfilter : zipline.pipeline.Filter
"""
return ~self.isnan()
@if_not_float64_tell_caller_to_use_isnull
def isfinite(self):
"""
A Filter producing True for values where this Factor is anything but
NaN, inf, or -inf.
"""
return (-inf < self) & (self < inf)
def clip(self, min_bound, max_bound, mask=NotSpecified):
"""
Clip (limit) the values in a factor.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of ``[0, 1]`` is specified,
values smaller than 0 become 0, and values larger than 1 become 1.
Parameters
----------
min_bound : float
The minimum value to use.
max_bound : float
The maximum value to use.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when clipping.
Notes
-----
To only clip values on one side, ``-np.inf` and ``np.inf`` may be
passed. For example, to only clip the maximum value but not clip a
minimum value:
.. code-block:: python
factor.clip(min_bound=-np.inf, max_bound=user_provided_max)
See Also
--------
numpy.clip
"""
from .basic import Clip
return Clip(
inputs=[self],
min_bound=min_bound,
max_bound=max_bound,
)
@classmethod
def _principal_computable_term_type(cls):
return Factor
class NumExprFactor(NumericalExpression, Factor):
"""
Factor computed from a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
Notes
-----
NumExprFactors are constructed by numerical operators like `+` and `-`.
Users should rarely need to construct a NumExprFactor directly.
"""
pass
class GroupedRowTransform(Factor):
"""
A Factor that transforms an input factor by applying a row-wise
shape-preserving transformation on classifier-defined groups of that
Factor.
This is most often useful for normalization operators like ``zscore`` or
``demean`` or for performing ranking using ``rank``.
Parameters
----------
transform : function[ndarray[ndim=1] -> ndarray[ndim=1]]
Function to apply over each row group.
factor : zipline.pipeline.Factor
The factor providing baseline data to transform.
mask : zipline.pipeline.Filter
Mask of entries to ignore when calculating transforms.
groupby : zipline.pipeline.Classifier
Classifier partitioning ``factor`` into groups to use when calculating
means.
transform_args : tuple[hashable]
Additional positional arguments to forward to ``transform``.
Notes
-----
Users should rarely construct instances of this factor directly. Instead,
they should construct instances via factor normalization methods like
``zscore`` and ``demean`` or using ``rank`` with ``groupby``.
See Also
--------
zipline.pipeline.Factor.zscore
zipline.pipeline.Factor.demean
zipline.pipeline.Factor.rank
"""
window_length = 0
def __new__(
cls,
transform,
transform_args,
factor,
groupby,
dtype,
missing_value,
mask,
**kwargs,
):
if mask is NotSpecified:
mask = factor.mask
else:
mask = mask & factor.mask
if groupby is NotSpecified:
groupby = Everything(mask=mask)
return super(GroupedRowTransform, cls).__new__(
GroupedRowTransform,
transform=transform,
transform_args=transform_args,
inputs=(factor, groupby),
missing_value=missing_value,
mask=mask,
dtype=dtype,
**kwargs,
)
def _init(self, transform, transform_args, *args, **kwargs):
self._transform = transform
self._transform_args = transform_args
return super(GroupedRowTransform, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, transform, transform_args, *args, **kwargs):
return (
super(GroupedRowTransform, cls)._static_identity(*args, **kwargs),
transform,
transform_args,
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
group_labels, null_label = self.inputs[1]._to_integral(arrays[1])
# Make a copy with the null code written to masked locations.
group_labels = where(mask, group_labels, null_label)
return where(
group_labels != null_label,
naive_grouped_rowwise_apply(
data=data,
group_labels=group_labels,
func=self._transform,
func_args=self._transform_args,
out=empty_like(data, dtype=self.dtype),
),
self.missing_value,
)
@property
def transform_name(self):
return self._transform.__name__
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return type(self).__name__ + "(%r)" % self.transform_name
class Rank(SingleInputMixin, Factor):
"""
A Factor representing the row-wise rank data of another Factor.
Parameters
----------
factor : zipline.pipeline.Factor
The factor on which to compute ranks.
method : str, {'average', 'min', 'max', 'dense', 'ordinal'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for each
ranking method.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`Factor.rank`
Notes
-----
Most users should call Factor.rank rather than directly construct an
instance of this class.
"""
window_length = 0
dtype = float64_dtype
window_safe = True
def __new__(cls, factor, method, ascending, mask):
return super(Rank, cls).__new__(
cls,
inputs=(factor,),
method=method,
ascending=ascending,
mask=mask,
)
def _init(self, method, ascending, *args, **kwargs):
self._method = method
self._ascending = ascending
return super(Rank, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, method, ascending, *args, **kwargs):
return (
super(Rank, cls)._static_identity(*args, **kwargs),
method,
ascending,
)
def _validate(self):
"""
Verify that the stored rank method is valid.
"""
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
)
def __repr__(self):
if self.mask is AssetExists():
# Don't include mask in repr if it's the default.
mask_info = ""
else:
mask_info = ", mask={}".format(self.mask.recursive_repr())
return "{type}({input_}, method='{method}'{mask_info})".format(
type=type(self).__name__,
input_=self.inputs[0].recursive_repr(),
method=self._method,
mask_info=mask_info,
)
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Rank:\\l method: {!r}\\l mask: {}\\l".format(
self._method,
type(self.mask).__name__,
)
class CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor):
'''
Base class for user-defined Factors.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to `self.compute`. If this
argument is not passed to the CustomFactor constructor, we look for a
class-level attribute named `inputs`.
outputs : iterable[str], optional
An iterable of strings which represent the names of each output this
factor should compute and return. If this argument is not passed to the
CustomFactor constructor, we look for a class-level attribute named
`outputs`.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFactor constructor, we look for a class-level attribute
named `window_length`.
mask : zipline.pipeline.Filter, optional
A Filter describing the assets on which we should compute each day.
Each call to ``CustomFactor.compute`` will only receive assets for
which ``mask`` produced True on the day for which compute is being
called.
Notes
-----
Users implementing their own Factors should subclass CustomFactor and
implement a method named `compute` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFactor constructor.
The specific types of the values passed to `compute` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[self.dtype, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`. If multiple outputs are
specified, `compute` should write its desired return values into
`out.<output_name>` for each output name in `self.outputs`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
``compute`` functions should expect to be passed NaN values for dates on
which no data was available for an asset. This may include dates on which
an asset did not yet exist.
For example, if a CustomFactor requires 10 rows of close price data, and
asset A started trading on Monday June 2nd, 2014, then on Tuesday, June
3rd, 2014, the column of input data for asset A will have 9 leading NaNs
for the preceding days on which data was not yet available.
Examples
--------
A CustomFactor with pre-declared defaults:
.. code-block:: python
class TenDayRange(CustomFactor):
"""
Computes the difference between the highest high in the last 10
days and the lowest low.
Pre-declares high and low as default inputs and `window_length` as
10.
"""
inputs = [USEquityPricing.high, USEquityPricing.low]
window_length = 10
def compute(self, today, assets, out, highs, lows):
from numpy import nanmin, nanmax
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
out[:] = highest_highs - lowest_lows
# Doesn't require passing inputs or window_length because they're
# pre-declared as defaults for the TenDayRange class.
ten_day_range = TenDayRange()
A CustomFactor without defaults:
.. code-block:: python
class MedianValue(CustomFactor):
"""
Computes the median value of an arbitrary single input over an
arbitrary window..
Does not declare any defaults, so values for `window_length` and
`inputs` must be passed explicitly on every construction.
"""
def compute(self, today, assets, out, data):
from numpy import nanmedian
out[:] = data.nanmedian(data, axis=0)
# Values for `inputs` and `window_length` must be passed explicitly to
# MedianValue.
median_close10 = MedianValue([USEquityPricing.close], window_length=10)
median_low15 = MedianValue([USEquityPricing.low], window_length=15)
A CustomFactor with multiple outputs:
.. code-block:: python
class MultipleOutputs(CustomFactor):
inputs = [USEquityPricing.close]
outputs = ['alpha', 'beta']
window_length = N
def compute(self, today, assets, out, close):
computed_alpha, computed_beta = some_function(close)
out.alpha[:] = computed_alpha
out.beta[:] = computed_beta
# Each output is returned as its own Factor upon instantiation.
alpha, beta = MultipleOutputs()
# Equivalently, we can create a single factor instance and access each
# output as an attribute of that instance.
multiple_outputs = MultipleOutputs()
alpha = multiple_outputs.alpha
beta = multiple_outputs.beta
Note: If a CustomFactor has multiple outputs, all outputs must have the
same dtype. For instance, in the example above, if alpha is a float then
beta must also be a float.
'''
dtype = float64_dtype
def _validate(self):
try:
super(CustomFactor, self)._validate()
except UnsupportedDataType:
if self.dtype in CLASSIFIER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint="Did you mean to create a CustomClassifier?",
)
elif self.dtype in FILTER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint="Did you mean to create a CustomFilter?",
)
raise
def __getattribute__(self, name):
outputs = object.__getattribute__(self, "outputs")
if outputs is NotSpecified:
return super(CustomFactor, self).__getattribute__(name)
elif name in outputs:
return RecarrayField(factor=self, attribute=name)
else:
try:
return super(CustomFactor, self).__getattribute__(name)
except AttributeError:
raise AttributeError(
"Instance of {factor} has no output named {attr!r}. "
"Possible choices are: {choices}.".format(
factor=type(self).__name__,
attr=name,
choices=self.outputs,
)
)
def __iter__(self):
if self.outputs is NotSpecified:
raise ValueError(
"{factor} does not have multiple outputs.".format(
factor=type(self).__name__,
)
)
return (RecarrayField(self, attr) for attr in self.outputs)
class RecarrayField(SingleInputMixin, Factor):
"""
A single field from a multi-output factor.
"""
def __new__(cls, factor, attribute):
return super(RecarrayField, cls).__new__(
cls,
attribute=attribute,
inputs=[factor],
window_length=0,
mask=factor.mask,
dtype=factor.dtype,
missing_value=factor.missing_value,
window_safe=factor.window_safe,
)
def _init(self, attribute, *args, **kwargs):
self._attribute = attribute
return super(RecarrayField, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, attribute, *args, **kwargs):
return (
super(RecarrayField, cls)._static_identity(*args, **kwargs),
attribute,
)
def _compute(self, windows, dates, assets, mask):
return windows[0][self._attribute]
def graph_repr(self):
return "{}.{}".format(self.inputs[0].recursive_repr(), self._attribute)
class Latest(LatestMixin, CustomFactor):
"""
Factor producing the most recently-known value of `inputs[0]` on each day.
The `.latest` attribute of DataSet columns returns an instance of this
Factor.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
class DailySummary(SingleInputMixin, Factor):
"""1D Factor that computes a summary statistic across all assets."""
ndim = 1
window_length = 0
params = ("func",)
def __new__(cls, func, input_, mask, dtype):
# TODO: We should be able to support datetime64 as well, but that
# requires extra care for handling NaT.
if dtype != float64_dtype:
raise AssertionError(
"DailySummary only supports float64 dtype, got {}".format(dtype),
)
return super(DailySummary, cls).__new__(
cls,
inputs=[input_],
dtype=dtype,
missing_value=nan,
window_safe=input_.window_safe,
func=func,
mask=mask,
)
def _compute(self, arrays, dates, assets, mask):
func = self.params["func"]
data = arrays[0]
data[~mask] = nan
if not isnan(self.inputs[0].missing_value):
data[data == self.inputs[0].missing_value] = nan
return as_column(func(data, self.inputs[0].missing_value))
def __repr__(self):
return "{}.{}()".format(
self.inputs[0].recursive_repr(),
self.params["func"].__name__,
)
graph_repr = recursive_repr = __repr__
# Functions to be passed to GroupedRowTransform. These aren't defined inline
# because the transformation function is part of the instance hash key.
def demean(row):
return row - nanmean(row)
def zscore(row):
with np.errstate(divide="ignore", invalid="ignore"):
return (row - nanmean(row)) / nanstd(row)
def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/factors/factor.py | factor.py |
from numbers import Number
from numpy import (
arange,
average,
clip,
copyto,
exp,
fmax,
full,
isnan,
log,
NINF,
sqrt,
sum as np_sum,
unique,
errstate as np_errstate,
)
from zipline.pipeline.data import EquityPricing
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import (
nanargmax,
nanmax,
nanmean,
nanstd,
nansum,
)
from zipline.utils.numpy_utils import (
float64_dtype,
ignore_nanwarnings,
)
from .factor import CustomFactor
from ..mixins import SingleInputMixin
class Returns(CustomFactor):
"""
Calculates the percent change in close price over the given window_length.
**Default Inputs**: [EquityPricing.close]
"""
inputs = [EquityPricing.close]
window_safe = True
def _validate(self):
super(Returns, self)._validate()
if self.window_length < 2:
raise ValueError(
"'Returns' expected a window length of at least 2, but was "
"given {window_length}. For daily returns, use a window "
"length of 2.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, close):
out[:] = (close[-1] - close[0]) / close[0]
class PercentChange(SingleInputMixin, CustomFactor):
"""
Calculates the percent change over the given window_length.
**Default Inputs:** None
**Default Window Length:** None
Notes
-----
Percent change is calculated as ``(new - old) / abs(old)``.
"""
window_safe = True
def _validate(self):
super(PercentChange, self)._validate()
if self.window_length < 2:
raise ValueError(
"'PercentChange' expected a window length"
"of at least 2, but was given {window_length}. "
"For daily percent change, use a window "
"length of 2.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, values):
with np_errstate(divide="ignore", invalid="ignore"):
out[:] = (values[-1] - values[0]) / abs(values[0])
class DailyReturns(Returns):
"""
Calculates daily percent change in close price.
**Default Inputs**: [EquityPricing.close]
"""
inputs = [EquityPricing.close]
window_safe = True
window_length = 2
class SimpleMovingAverage(SingleInputMixin, CustomFactor):
"""
Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
out[:] = nanmean(data, axis=0)
class WeightedAverageValue(CustomFactor):
"""
Helper for VWAP-like computations.
**Default Inputs:** None
**Default Window Length:** None
"""
def compute(self, today, assets, out, base, weight):
out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0)
class VWAP(WeightedAverageValue):
"""
Volume Weighted Average Price
**Default Inputs:** [EquityPricing.close, EquityPricing.volume]
**Default Window Length:** None
"""
inputs = (EquityPricing.close, EquityPricing.volume)
class MaxDrawdown(SingleInputMixin, CustomFactor):
"""
Max Drawdown
**Default Inputs:** None
**Default Window Length:** None
"""
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
drawdowns = fmax.accumulate(data, axis=0) - data
drawdowns[isnan(drawdowns)] = NINF
drawdown_ends = nanargmax(drawdowns, axis=0)
# TODO: Accelerate this loop in Cython or Numba.
for i, end in enumerate(drawdown_ends):
peak = nanmax(data[: end + 1, i])
out[i] = (peak - data[end, i]) / data[end, i]
class AverageDollarVolume(CustomFactor):
"""
Average Daily Dollar Volume
**Default Inputs:** [EquityPricing.close, EquityPricing.volume]
**Default Window Length:** None
"""
inputs = [EquityPricing.close, EquityPricing.volume]
def compute(self, today, assets, out, close, volume):
out[:] = nansum(close * volume, axis=0) / len(close)
def exponential_weights(length, decay_rate):
"""
Build a weight vector for an exponentially-weighted statistic.
The resulting ndarray is of the form::
[decay_rate ** length, ..., decay_rate ** 2, decay_rate]
Parameters
----------
length : int
The length of the desired weight vector.
decay_rate : float
The rate at which entries in the weight vector increase or decrease.
Returns
-------
weights : ndarray[float64]
"""
return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1)
class _ExponentialWeightedFactor(SingleInputMixin, CustomFactor):
"""
Base class for factors implementing exponential-weighted operations.
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list or tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Methods
-------
weights
from_span
from_halflife
from_center_of_mass
"""
params = ("decay_rate",)
@classmethod
@expect_types(span=Number)
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError("`span` must be a positive number. %s was passed." % span)
decay_rate = 1.0 - (2.0 / (1.0 + span))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs
)
@classmethod
@expect_types(halflife=Number)
def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[EquityPricing.close],
window_length=30,
halflife=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(0.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs
)
@classmethod
def from_center_of_mass(cls, inputs, window_length, center_of_mass, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[EquityPricing.close],
window_length=30,
center_of_mass=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs,
)
class ExponentialWeightedMovingAverage(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Average
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMA``.
See Also
--------
:meth:`pandas.DataFrame.ewm`
"""
def compute(self, today, assets, out, data, decay_rate):
out[:] = average(
data,
axis=0,
weights=exponential_weights(len(data), decay_rate),
)
class ExponentialWeightedMovingStdDev(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Standard Deviation
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMSTD``.
See Also
--------
:func:`pandas.DataFrame.ewm`
"""
def compute(self, today, assets, out, data, decay_rate):
weights = exponential_weights(len(data), decay_rate)
mean = average(data, axis=0, weights=weights)
variance = average((data - mean) ** 2, axis=0, weights=weights)
squared_weight_sum = np_sum(weights) ** 2
bias_correction = squared_weight_sum / (squared_weight_sum - np_sum(weights**2))
out[:] = sqrt(variance * bias_correction)
class LinearWeightedMovingAverage(SingleInputMixin, CustomFactor):
"""
Weighted Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
ndays = data.shape[0]
# Initialize weights array
weights = arange(1, ndays + 1, dtype=float64_dtype).reshape(ndays, 1)
# Compute normalizer
normalizer = (ndays * (ndays + 1)) / 2
# Weight the data
weighted_data = data * weights
# Compute weighted averages
out[:] = nansum(weighted_data, axis=0) / normalizer
class AnnualizedVolatility(CustomFactor):
"""
Volatility. The degree of variation of a series over time as measured by
the standard deviation of daily returns.
https://en.wikipedia.org/wiki/Volatility_(finance)
**Default Inputs:** [Returns(window_length=2)]
Parameters
----------
annualization_factor : float, optional
The number of time units per year. Defaults is 252, the number of NYSE
trading days in a normal year.
"""
inputs = [Returns(window_length=2)]
params = {"annualization_factor": 252.0}
window_length = 252
def compute(self, today, assets, out, returns, annualization_factor):
out[:] = nanstd(returns, axis=0) * (annualization_factor**0.5)
class PeerCount(SingleInputMixin, CustomFactor):
"""
Peer Count of distinct categories in a given classifier. This factor
is returned by the classifier instance method peer_count()
**Default Inputs:** None
**Default Window Length:** 1
"""
window_length = 1
def _validate(self):
super(PeerCount, self)._validate()
if self.window_length != 1:
raise ValueError(
"'PeerCount' expected a window length of 1, but was given"
"{window_length}.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, classifier_values):
# Convert classifier array to group label int array
group_labels, null_label = self.inputs[0]._to_integral(classifier_values[0])
_, inverse, counts = unique( # Get counts, idx of unique groups
group_labels,
return_counts=True,
return_inverse=True,
)
copyto(out, counts[inverse], where=(group_labels != null_label))
# Convenience aliases
EWMA = ExponentialWeightedMovingAverage
EWMSTD = ExponentialWeightedMovingStdDev
class Clip(CustomFactor):
"""
Clip (limit) the values in a factor.
Given an interval, values outside the interval are clipped to the interval
edges. For example, if an interval of ``[0, 1]`` is specified, values
smaller than 0 become 0, and values larger than 1 become 1.
**Default Window Length:** 1
Parameters
----------
min_bound : float
The minimum value to use.
max_bound : float
The maximum value to use.
Notes
-----
To only clip values on one side, ``-np.inf` and ``np.inf`` may be passed.
For example, to only clip the maximum value but not clip a minimum value:
.. code-block:: python
Clip(inputs=[factor], min_bound=-np.inf, max_bound=user_provided_max)
See Also
--------
numpy.clip
"""
window_length = 1
params = ("min_bound", "max_bound")
def compute(self, today, assets, out, values, min_bound, max_bound):
clip(values[-1], min_bound, max_bound, out=out) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/factors/basic.py | basic.py |
from numpy import newaxis
from zipline.utils.numpy_utils import (
NaTD,
busday_count_mask_NaT,
datetime64D_dtype,
float64_dtype,
)
from .factor import Factor
class BusinessDaysSincePreviousEvent(Factor):
"""
Abstract class for business days since a previous event.
Returns the number of **business days** (not trading days!) since
the most recent event date for each asset.
This doesn't use trading days for symmetry with
BusinessDaysUntilNextEarnings.
Assets which announced or will announce the event today will produce a
value of 0.0. Assets that announced the event on the previous business
day will produce a value of 1.0.
Assets for which the event date is `NaT` will produce a value of `NaN`.
Example
-------
``BusinessDaysSincePreviousEvent`` can be used to create an event-driven
factor. For instance, you may want to only trade assets that have
a data point with an asof_date in the last 5 business days. To do this,
you can create a ``BusinessDaysSincePreviousEvent`` factor, supplying
the relevant asof_date column from your dataset as input, like this::
# Factor computing number of days since most recent asof_date
# per asset.
days_since_event = BusinessDaysSincePreviousEvent(
inputs=[MyDataset.asof_date]
)
# Filter returning True for each asset whose most recent asof_date
# was in the last 5 business days.
recency_filter = (days_since_event <= 5)
"""
window_length = 0
dtype = float64_dtype
def _compute(self, arrays, dates, assets, mask):
# Coerce from [ns] to [D] for numpy busday_count.
announce_dates = arrays[0].astype(datetime64D_dtype)
# Set masked values to NaT.
announce_dates[~mask] = NaTD
# Convert row labels into a column vector for broadcasted comparison.
reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis]
return busday_count_mask_NaT(announce_dates, reference_dates)
class BusinessDaysUntilNextEvent(Factor):
"""
Abstract class for business days since a next event.
Returns the number of **business days** (not trading days!) until
the next known event date for each asset.
This doesn't use trading days because the trading calendar includes
information that may not have been available to the algorithm at the time
when `compute` is called.
For example, the NYSE closings September 11th 2001, would not have been
known to the algorithm on September 10th.
Assets that announced or will announce the event today will produce a value
of 0.0. Assets that will announce the event on the next upcoming business
day will produce a value of 1.0.
Assets for which the event date is `NaT` will produce a value of `NaN`.
"""
window_length = 0
dtype = float64_dtype
def _compute(self, arrays, dates, assets, mask):
# Coerce from [ns] to [D] for numpy busday_count.
announce_dates = arrays[0].astype(datetime64D_dtype)
# Set masked values to NaT.
announce_dates[~mask] = NaTD
# Convert row labels into a column vector for broadcasted comparison.
reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis]
return busday_count_mask_NaT(reference_dates, announce_dates) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/factors/events.py | events.py |
from functools import partial
from numbers import Number
import operator
import re
from numpy import where, isnan, nan, zeros
import pandas as pd
from zipline.errors import UnsupportedDataType
from zipline.lib.labelarray import LabelArray
from zipline.lib.quantiles import quantiles
from zipline.pipeline.api_utils import restrict_to_dtype
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import ComputableTerm
from zipline.utils.compat import unicode
from zipline.utils.input_validation import expect_types, expect_dtypes
from zipline.utils.numpy_utils import (
categorical_dtype,
int64_dtype,
vectorized_is_element,
)
from ..filters import ArrayPredicate, NumExprFilter
from ..mixins import (
CustomTermMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
StandardOutputs,
)
string_classifiers_only = restrict_to_dtype(
dtype=categorical_dtype,
message_template=(
"{method_name}() is only defined on Classifiers producing strings"
" but it was called on a Classifier of dtype {received_dtype}."
),
)
class Classifier(RestrictedDTypeMixin, ComputableTerm):
"""
A Pipeline expression computing a categorical output.
Classifiers are most commonly useful for describing grouping keys for
complex transformations on Factor outputs. For example, Factor.demean() and
Factor.zscore() can be passed a Classifier in their ``groupby`` argument,
indicating that means/standard deviations should be computed on assets for
which the classifier produced the same label.
"""
# Used by RestrictedDTypeMixin
ALLOWED_DTYPES = CLASSIFIER_DTYPES
categories = NotSpecified
# We explicitly don't support classifier to classifier comparisons, since
# the stored values likely don't mean the same thing. This may be relaxed
# in the future, but for now we're starting conservatively.
def eq(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other``.
"""
# We treat this as an error because missing_values have NaN semantics,
# which means this would return an array of all False, which is almost
# certainly not what the user wants.
if other == self.missing_value:
raise ValueError(
"Comparison against self.missing_value ({value!r}) in"
" {typename}.eq().\n"
"Missing values have NaN semantics, so the "
"requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.".format(
value=other,
typename=(type(self).__name__),
)
)
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"x_0 == {other}".format(other=int(other)),
binds=(self,),
)
else:
return ArrayPredicate(
term=self,
op=operator.eq,
opargs=(other,),
)
def __ne__(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other.
"""
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"((x_0 != {other}) & (x_0 != {missing}))".format(
other=int(other),
missing=self.missing_value,
),
binds=(self,),
)
else:
# Numexpr doesn't know how to use LabelArrays.
return ArrayPredicate(term=self, op=operator.ne, opargs=(other,))
def bad_compare(opname, other):
raise TypeError("cannot compare classifiers with %s" % opname)
__gt__ = partial(bad_compare, ">")
__ge__ = partial(bad_compare, ">=")
__le__ = partial(bad_compare, "<=")
__lt__ = partial(bad_compare, "<")
del bad_compare
@string_classifiers_only
@expect_types(prefix=(bytes, unicode))
def startswith(self, prefix):
"""
Construct a Filter matching values starting with ``prefix``.
Parameters
----------
prefix : str
String prefix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string starting with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.startswith,
opargs=(prefix,),
)
@string_classifiers_only
@expect_types(suffix=(bytes, unicode))
def endswith(self, suffix):
"""
Construct a Filter matching values ending with ``suffix``.
Parameters
----------
suffix : str
String suffix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string ending with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.endswith,
opargs=(suffix,),
)
@string_classifiers_only
@expect_types(substring=(bytes, unicode))
def has_substring(self, substring):
"""
Construct a Filter matching values containing ``substring``.
Parameters
----------
substring : str
Sub-string against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string containing ``substring``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.has_substring,
opargs=(substring,),
)
@string_classifiers_only
@expect_types(pattern=(bytes, unicode, type(re.compile(""))))
def matches(self, pattern):
"""
Construct a Filter that checks regex matches against ``pattern``.
Parameters
----------
pattern : str
Regex pattern against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string matched by ``pattern``.
See Also
--------
:mod:`Python Regular Expressions <re>`
"""
return ArrayPredicate(
term=self,
op=LabelArray.matches,
opargs=(pattern,),
)
# TODO: Support relabeling for integer dtypes.
@string_classifiers_only
def relabel(self, relabeler):
"""
Convert ``self`` into a new classifier by mapping a function over each
element produced by ``self``.
Parameters
----------
relabeler : function[str -> str or None]
A function to apply to each unique value produced by ``self``.
Returns
-------
relabeled : Classifier
A classifier produced by applying ``relabeler`` to each unique
value produced by ``self``.
"""
return Relabel(term=self, relabeler=relabeler)
def element_of(self, choices):
"""
Construct a Filter indicating whether values are in ``choices``.
Parameters
----------
choices : iterable[str or int]
An iterable of choices.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces an entry in ``choices``.
"""
try:
choices = frozenset(choices)
except Exception as e:
raise TypeError(
"Expected `choices` to be an iterable of hashable values,"
" but got {} instead.\n"
"This caused the following error: {!r}.".format(choices, e)
)
if self.missing_value in choices:
raise ValueError(
"Found self.missing_value ({mv!r}) in choices supplied to"
" {typename}.{meth_name}().\n"
"Missing values have NaN semantics, so the"
" requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.\n"
"Received choices were {choices}.".format(
mv=self.missing_value,
typename=(type(self).__name__),
choices=sorted(choices),
meth_name=self.element_of.__name__,
)
)
def only_contains(type_, values):
return all(isinstance(v, type_) for v in values)
if self.dtype == int64_dtype:
if only_contains(int, choices):
return ArrayPredicate(
term=self,
op=vectorized_is_element,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-int in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
elif self.dtype == categorical_dtype:
if only_contains((bytes, unicode), choices):
return ArrayPredicate(
term=self,
op=LabelArray.element_of,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-string in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype
def postprocess(self, data):
if self.dtype == int64_dtype:
return data
if not isinstance(data, LabelArray):
raise AssertionError("Expected a LabelArray, got %s." % type(data))
return data.as_categorical()
def to_workspace_value(self, result, assets):
"""
Called with the result of a pipeline. This needs to return an object
which can be put into the workspace to continue doing computations.
This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
"""
if self.dtype == int64_dtype:
return super(Classifier, self).to_workspace_value(result, assets)
assert isinstance(
result.values, pd.Categorical
), "Expected a Categorical, got %r." % type(result.values)
with_missing = pd.Series(
data=pd.Categorical(
result.values,
result.values.categories.union([self.missing_value]),
),
index=result.index,
)
return LabelArray(
super(Classifier, self).to_workspace_value(
with_missing,
assets,
),
self.missing_value,
)
@classmethod
def _principal_computable_term_type(cls):
return Classifier
def _to_integral(self, output_array):
"""
Convert an array produced by this classifier into an array of integer
labels and a missing value label.
"""
if self.dtype == int64_dtype:
group_labels = output_array
null_label = self.missing_value
elif self.dtype == categorical_dtype:
# Coerce LabelArray into an isomorphic array of ints. This is
# necessary because np.where doesn't know about LabelArrays or the
# void dtype.
group_labels = output_array.as_int_array()
null_label = output_array.missing_value_code
else:
raise AssertionError("Unexpected Classifier dtype: %s." % self.dtype)
return group_labels, null_label
def peer_count(self, mask=NotSpecified):
"""
Construct a factor that gives the number of occurrences of
each distinct category in a classifier.
Parameters
----------
mask : zipline.pipeline.Filter, optional
If passed, only count assets passing the filter. Default behavior
is to count all assets.
Examples
--------
Let ``c`` be a Classifier which would produce the following output::
AAPL MSFT MCD BK AMZN FB
2015-05-05 'a' 'a' None 'b' 'a' None
2015-05-06 'b' 'a' 'c' 'b' 'b' 'b'
2015-05-07 None 'a' 'aa' 'aa' 'aa' None
2015-05-08 'c' 'c' 'c' 'c' 'c' 'c'
Then ``c.peer_count()`` will count, for each row, the total number
of assets in each classifier category produced by ``c``. Missing
data will be evaluated to NaN.
::
AAPL MSFT MCD BK AMZN FB
2015-05-05 3.0 3.0 NaN 1.0 3.0 NaN
2015-05-06 4.0 1.0 1.0 4.0 4.0 4.0
2015-05-07 NaN 1.0 3.0 3.0 3.0 NaN
2015-05-08 6.0 6.0 6.0 6.0 6.0 6.0
Returns
-------
factor : CustomFactor
A CustomFactor that counts, for each asset, the total number
of assets with the same classifier category label.
"""
# Lazy import due to cyclic dependencies in factor.py, classifier.py
from ..factors import PeerCount
return PeerCount(inputs=[self], mask=mask)
class Everything(Classifier):
"""
A trivial classifier that classifies everything the same.
"""
dtype = int64_dtype
window_length = 0
inputs = ()
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
return where(
mask,
zeros(shape=mask.shape, dtype=int64_dtype),
self.missing_value,
)
class Quantiles(SingleInputMixin, Classifier):
"""
A classifier computing quantiles over an input.
"""
params = ("bins",)
dtype = int64_dtype
window_length = 0
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
bins = self.params["bins"]
to_bin = where(mask, data, nan)
result = quantiles(to_bin, bins)
# Write self.missing_value into nan locations, whether they were
# generated by our input mask or not.
result[isnan(result)] = self.missing_value
return result.astype(int64_dtype)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return type(self).__name__ + "(%d)" % self.params["bins"]
class Relabel(SingleInputMixin, Classifier):
"""
A classifier applying a relabeling function on the result of another
classifier.
Parameters
----------
arg : zipline.pipeline.Classifier
Term produceing the input to be relabeled.
relabel_func : function(LabelArray) -> LabelArray
Function to apply to the result of `term`.
"""
window_length = 0
params = ("relabeler",)
# TODO: Support relabeling for integer dtypes.
@expect_dtypes(term=categorical_dtype)
@expect_types(term=Classifier)
def __new__(cls, term, relabeler):
return super(Relabel, cls).__new__(
cls,
inputs=(term,),
dtype=term.dtype,
mask=term.mask,
relabeler=relabeler,
)
def _compute(self, arrays, dates, assets, mask):
relabeler = self.params["relabeler"]
data = arrays[0]
if isinstance(data, LabelArray):
result = data.map(relabeler)
result[~mask] = data.missing_value
else:
raise NotImplementedError(
"Relabeling is not currently supported for " "int-dtype classifiers."
)
return result
class CustomClassifier(
PositiveWindowLengthMixin, StandardOutputs, CustomTermMixin, Classifier
):
"""
Base class for user-defined Classifiers.
Does not suppport multiple outputs.
See Also
--------
zipline.pipeline.CustomFactor
zipline.pipeline.CustomFilter
"""
def _validate(self):
try:
super(CustomClassifier, self)._validate()
except UnsupportedDataType:
if self.dtype in FACTOR_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint="Did you mean to create a CustomFactor?",
)
elif self.dtype in FILTER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint="Did you mean to create a CustomFilter?",
)
raise
def _allocate_output(self, windows, shape):
"""
Override the default array allocation to produce a LabelArray when we
have a string-like dtype.
"""
if self.dtype == int64_dtype:
return super(CustomClassifier, self)._allocate_output(
windows,
shape,
)
# This is a little bit of a hack. We might not know what the
# categories for a LabelArray are until it's actually been loaded, so
# we need to look at the underlying data.
return windows[0].data.empty_like(shape)
class Latest(LatestMixin, CustomClassifier):
"""
A classifier producing the latest value of an input.
See Also
--------
zipline.pipeline.data.dataset.BoundColumn.latest
"""
pass
class InvalidClassifierComparison(TypeError):
def __init__(self, classifier, compval):
super(InvalidClassifierComparison, self).__init__(
"Can't compare classifier of dtype"
" {dtype} to value {value} of type {type}.".format(
dtype=classifier.dtype,
value=compval,
type=type(compval).__name__,
)
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/classifiers/classifier.py | classifier.py |
import abc
from collections import namedtuple, OrderedDict
from itertools import repeat
from textwrap import dedent
from weakref import WeakKeyDictionary
from toolz import first
from zipline.currency import Currency
from zipline.data.fx import DEFAULT_FX_RATE
from zipline.pipeline.classifiers import Classifier, Latest as LatestClassifier
from zipline.pipeline.domain import Domain, GENERIC
from zipline.pipeline.factors import Factor, Latest as LatestFactor
from zipline.pipeline.filters import Filter, Latest as LatestFilter
from zipline.pipeline.sentinels import NotSpecified, sentinel
from zipline.pipeline.term import (
AssetExists,
LoadableTerm,
validate_dtype,
)
from zipline.utils.formatting import s, plural
from zipline.utils.input_validation import (
coerce_types,
ensure_dtype,
expect_types,
)
from zipline.utils.numpy_utils import float64_dtype, NoDefaultMissingValue
from zipline.utils.preprocess import preprocess
from zipline.utils.string_formatting import bulleted_list
IsSpecialization = sentinel("IsSpecialization")
class Column(object):
"""
An abstract column of data, not yet associated with a dataset.
"""
@preprocess(dtype=ensure_dtype)
def __init__(
self,
dtype,
missing_value=NotSpecified,
doc=None,
metadata=None,
currency_aware=False,
):
if currency_aware and dtype != float64_dtype:
raise ValueError(
"Columns cannot be constructed with currency_aware={}, "
"dtype={}. Currency aware columns must have a float64 dtype.".format(
currency_aware, dtype
)
)
self.dtype = dtype
self.missing_value = missing_value
self.doc = doc
self.metadata = metadata.copy() if metadata is not None else {}
self.currency_aware = currency_aware
def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
currency_aware=self.currency_aware,
)
class _BoundColumnDescr(object):
"""
Intermediate class that sits on `DataSet` objects and returns memoized
`BoundColumn` objects when requested.
This exists so that subclasses of DataSets don't share columns with their
parent classes.
"""
def __init__(
self, dtype, missing_value, name, doc, metadata, currency_aware
):
# Validating and calculating default missing values here guarantees
# that we fail quickly if the user passes an unsupporte dtype or fails
# to provide a missing value for a dtype that requires one
# (e.g. int64), but still enables us to provide an error message that
# points to the name of the failing column.
try:
self.dtype, self.missing_value = validate_dtype(
termname="Column(name={name!r})".format(name=name),
dtype=dtype,
missing_value=missing_value,
)
except NoDefaultMissingValue:
# Re-raise with a more specific message.
raise NoDefaultMissingValue(
"Failed to create Column with name {name!r} and"
" dtype {dtype} because no missing_value was provided\n\n"
"Columns with dtype {dtype} require a missing_value.\n"
"Please pass missing_value to Column() or use a different"
" dtype.".format(dtype=dtype, name=name)
)
self.name = name
self.doc = doc
self.metadata = metadata
self.currency_aware = currency_aware
def __get__(self, instance, owner):
"""
Produce a concrete BoundColumn object when accessed.
We don't bind to datasets at class creation time so that subclasses of
DataSets produce different BoundColumns.
"""
return BoundColumn(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=owner,
name=self.name,
doc=self.doc,
metadata=self.metadata,
currency_conversion=None,
currency_aware=self.currency_aware,
)
class BoundColumn(LoadableTerm):
"""
A column of data that's been concretely bound to a particular dataset.
Attributes
----------
dtype : numpy.dtype
The dtype of data produced when this column is loaded.
latest : zipline.pipeline.LoadableTerm
A :class:`~zipline.pipeline.Filter`, :class:`~zipline.pipeline.Factor`,
or :class:`~zipline.pipeline.Classifier` computing the most recently
known value of this column on each date.
See :class:`zipline.pipeline.mixins.LatestMixin` for more details.
dataset : zipline.pipeline.data.DataSet
The dataset to which this column is bound.
name : str
The name of this column.
metadata : dict
Extra metadata associated with this column.
currency_aware : bool
Whether or not this column produces currency-denominated data.
Notes
-----
Instances of this class are dynamically created upon access to attributes
of :class:`~zipline.pipeline.data.DataSet`. For example,
:attr:`~zipline.pipeline.data.EquityPricing.close` is an instance of this
class. Pipeline API users should never construct instances of this
directly.
"""
mask = AssetExists()
window_safe = True
def __new__(
cls,
dtype,
missing_value,
dataset,
name,
doc,
metadata,
currency_conversion,
currency_aware,
):
if currency_aware and dtype != float64_dtype:
raise AssertionError(
"The {} column on dataset {} cannot be constructed with "
"currency_aware={}, dtype={}. Currency aware columns must "
"have a float64 dtype.".format(
name,
dataset,
currency_aware,
dtype,
)
)
return super(BoundColumn, cls).__new__(
cls,
domain=dataset.domain,
dtype=dtype,
missing_value=missing_value,
dataset=dataset,
name=name,
ndim=dataset.ndim,
doc=doc,
metadata=metadata,
currency_conversion=currency_conversion,
currency_aware=currency_aware,
)
def _init(
self,
dataset,
name,
doc,
metadata,
currency_conversion,
currency_aware,
*args,
**kwargs,
):
self._dataset = dataset
self._name = name
self.__doc__ = doc
self._metadata = metadata
self._currency_conversion = currency_conversion
self._currency_aware = currency_aware
return super(BoundColumn, self)._init(*args, **kwargs)
@classmethod
def _static_identity(
cls,
dataset,
name,
doc,
metadata,
currency_conversion,
currency_aware,
*args,
**kwargs,
):
return (
super(BoundColumn, cls)._static_identity(*args, **kwargs),
dataset,
name,
doc,
frozenset(sorted(metadata.items(), key=first)),
currency_conversion,
currency_aware,
)
def __lt__(self, other):
msg = "Can't compare '{}' with '{}'. (Did you mean to use '.latest'?)"
raise TypeError(msg.format(self.qualname, other.__class__.__name__))
__gt__ = __le__ = __ge__ = __lt__
def _replace(self, **kwargs):
kw = dict(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=self._dataset,
name=self._name,
doc=self.__doc__,
metadata=self._metadata,
currency_conversion=self._currency_conversion,
currency_aware=self._currency_aware,
)
kw.update(kwargs)
return type(self)(**kw)
def specialize(self, domain):
"""Specialize ``self`` to a concrete domain."""
if domain == self.domain:
return self
return self._replace(dataset=self._dataset.specialize(domain))
def unspecialize(self):
"""
Unspecialize a column to its generic form.
This is equivalent to ``column.specialize(GENERIC)``.
"""
return self.specialize(GENERIC)
@coerce_types(currency=(str, Currency))
def fx(self, currency):
"""
Construct a currency-converted version of this column.
Parameters
----------
currency : str or zipline.currency.Currency
Currency into which to convert this column's data.
Returns
-------
column : BoundColumn
Column producing the same data as ``self``, but currency-converted
into ``currency``.
"""
conversion = self._currency_conversion
if not self._currency_aware:
raise TypeError(
"The .fx() method cannot be called on {} because it does not "
"produce currency-denominated data.".format(self.qualname)
)
elif conversion is not None and conversion.currency == currency:
return self
return self._replace(
currency_conversion=CurrencyConversion(
currency=currency,
field=DEFAULT_FX_RATE,
)
)
@property
def currency_conversion(self):
"""Specification for currency conversions applied for this term."""
return self._currency_conversion
@property
def currency_aware(self):
"""
Whether or not this column produces currency-denominated data.
"""
return self._currency_aware
@property
def dataset(self):
"""
The dataset to which this column is bound.
"""
return self._dataset
@property
def name(self):
"""
The name of this column.
"""
return self._name
@property
def metadata(self):
"""
A copy of the metadata for this column.
"""
return self._metadata.copy()
@property
def qualname(self):
"""The fully-qualified name of this column."""
out = ".".join([self.dataset.qualname, self.name])
conversion = self._currency_conversion
if conversion is not None:
out += ".fx({!r})".format(conversion.currency.code)
return out
@property
def latest(self):
dtype = self.dtype
if dtype in Filter.ALLOWED_DTYPES:
Latest = LatestFilter
elif dtype in Classifier.ALLOWED_DTYPES:
Latest = LatestClassifier
else:
assert dtype in Factor.ALLOWED_DTYPES, "Unknown dtype %s." % dtype
Latest = LatestFactor
return Latest(
inputs=(self,),
dtype=dtype,
missing_value=self.missing_value,
ndim=self.ndim,
)
def __repr__(self):
return "{qualname}::{dtype}".format(
qualname=self.qualname,
dtype=self.dtype.name,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "BoundColumn:\\l Dataset: {}\\l Column: {}\\l".format(
self.dataset.__name__, self.name
)
def recursive_repr(self):
"""Short repr used to render in recursive contexts."""
return self.qualname
class DataSetMeta(type):
"""
Metaclass for DataSets
Supplies name and dataset information to Column attributes, and manages
families of specialized dataset.
"""
def __new__(mcls, name, bases, dict_):
if len(bases) != 1:
# Disallowing multiple inheritance makes it easier for us to
# determine whether a given dataset is the root for its family of
# specializations.
raise TypeError("Multiple dataset inheritance is not supported.")
# This marker is set in the class dictionary by `specialize` below.
is_specialization = dict_.pop(IsSpecialization, False)
newtype = super(DataSetMeta, mcls).__new__(mcls, name, bases, dict_)
if not isinstance(newtype.domain, Domain):
raise TypeError(
"Expected a Domain for {}.domain, but got {} instead.".format(
newtype.__name__,
type(newtype.domain),
)
)
# Collect all of the column names that we inherit from our parents.
column_names = set().union(
*(getattr(base, "_column_names", ()) for base in bases)
)
# Collect any new columns from this dataset.
for maybe_colname, maybe_column in dict_.items():
if isinstance(maybe_column, Column):
# add column names defined on our class
bound_column_descr = maybe_column.bind(maybe_colname)
setattr(newtype, maybe_colname, bound_column_descr)
column_names.add(maybe_colname)
newtype._column_names = frozenset(column_names)
if not is_specialization:
# This is the new root of a family of specializations. Store the
# memoized dictionary for family on this type.
newtype._domain_specializations = WeakKeyDictionary(
{
newtype.domain: newtype,
}
)
return newtype
@expect_types(domain=Domain)
def specialize(self, domain):
"""
Specialize a generic DataSet to a concrete domain.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain to which we should generate a specialization.
Returns
-------
specialized : type
A new :class:`~zipline.pipeline.data.DataSet` subclass with the
same columns as ``self``, but specialized to ``domain``.
"""
# We're already the specialization to this domain, so just return self.
if domain == self.domain:
return self
try:
return self._domain_specializations[domain]
except KeyError:
if not self._can_create_new_specialization(domain):
# This either means we're already a specialization and trying
# to create a new specialization, or we're the generic version
# of a root-specialized dataset, which we don't want to create
# new specializations of.
raise ValueError(
"Can't specialize {dataset} from {current} to new domain {new}.".format(
dataset=self.__name__,
current=self.domain,
new=domain,
)
)
new_type = self._create_specialization(domain)
self._domain_specializations[domain] = new_type
return new_type
def unspecialize(self):
"""
Unspecialize a dataset to its generic form.
This is equivalent to ``dataset.specialize(GENERIC)``.
"""
return self.specialize(GENERIC)
def _can_create_new_specialization(self, domain):
# Always allow specializing to a generic domain.
if domain is GENERIC:
return True
elif "_domain_specializations" in vars(self):
# This branch is True if we're the root of a family.
# Allow specialization if we're generic.
return self.domain is GENERIC
else:
# If we're not the root of a family, we can't create any new
# specializations.
return False
def _create_specialization(self, domain):
# These are all assertions because we should have handled these cases
# already in specialize().
assert isinstance(domain, Domain)
assert (
domain not in self._domain_specializations
), "Domain specializations should be memoized!"
if domain is not GENERIC:
assert (
self.domain is GENERIC
), "Can't specialize dataset with domain {} to domain {}.".format(
self.domain,
domain,
)
# Create a new subclass of ``self`` with the given domain.
# Mark that it's a specialization so that we know not to create a new
# family for it.
name = self.__name__
bases = (self,)
dict_ = {"domain": domain, IsSpecialization: True}
out = type(name, bases, dict_)
out.__module__ = self.__module__
return out
@property
def columns(self):
return frozenset(
getattr(self, colname) for colname in self._column_names
)
@property
def qualname(self):
if self.domain is GENERIC:
specialization_key = ""
else:
specialization_key = "<" + self.domain.country_code + ">"
return self.__name__ + specialization_key
# NOTE: We used to use `functools.total_ordering` to account for all of the
# other rich comparison methods, but it has issues in python 3 and
# this method is only used for test purposes, so for now we will just
# keep this in isolation. If we ever need any of the other comparison
# methods we will have to implement them individually.
def __lt__(self, other):
return id(self) < id(other)
def __repr__(self):
return "<DataSet: %r, domain=%s>" % (self.__name__, self.domain)
class DataSet(object, metaclass=DataSetMeta):
"""
Base class for Pipeline datasets.
A :class:`DataSet` is defined by two parts:
1. A collection of :class:`~zipline.pipeline.data.Column` objects that
describe the queryable attributes of the dataset.
2. A :class:`~zipline.pipeline.domain.Domain` describing the assets and
calendar of the data represented by the :class:`DataSet`.
To create a new Pipeline dataset, define a subclass of :class:`DataSet` and
set one or more :class:`Column` objects as class-level attributes. Each
column requires a ``np.dtype`` that describes the type of data that should
be produced by a loader for the dataset. Integer columns must also provide
a "missing value" to be used when no value is available for a given
asset/date combination.
By default, the domain of a dataset is the special singleton value,
:data:`~zipline.pipeline.domain.GENERIC`, which means that they can be used
in a Pipeline running on **any** domain.
In some cases, it may be preferable to restrict a dataset to only allow
support a single domain. For example, a DataSet may describe data from a
vendor that only covers the US. To restrict a dataset to a specific domain,
define a `domain` attribute at class scope.
You can also define a domain-specific version of a generic DataSet by
calling its ``specialize`` method with the domain of interest.
Examples
--------
The built-in EquityPricing dataset is defined as follows::
class EquityPricing(DataSet):
open = Column(float)
high = Column(float)
low = Column(float)
close = Column(float)
volume = Column(float)
The built-in USEquityPricing dataset is a specialization of
EquityPricing. It is defined as::
from zipline.pipeline.domain import US_EQUITIES
USEquityPricing = EquityPricing.specialize(US_EQUITIES)
Columns can have types other than float. A dataset containing assorted
company metadata might be defined like this::
class CompanyMetadata(DataSet):
# Use float for semantically-numeric data, even if it's always
# integral valued (see Notes section below). The default missing
# value for floats is NaN.
shares_outstanding = Column(float)
# Use object for string columns. The default missing value for
# object-dtype columns is None.
ticker = Column(object)
# Use integers for integer-valued categorical data like sector or
# industry codes. Integer-dtype columns require an explicit missing
# value.
sector_code = Column(int, missing_value=-1)
# Use bool for boolean-valued flags. Note that the default missing
# value for bool-dtype columns is False.
is_primary_share = Column(bool)
Notes
-----
Because numpy has no native support for integers with missing values, users
are strongly encouraged to use floats for any data that's semantically
numeric. Doing so enables the use of `NaN` as a natural missing value,
which has useful propagation semantics.
"""
domain = GENERIC
ndim = 2
@classmethod
def get_column(cls, name):
"""Look up a column by name.
Parameters
----------
name : str
Name of the column to look up.
Returns
-------
column : zipline.pipeline.data.BoundColumn
Column with the given name.
Raises
------
AttributeError
If no column with the given name exists.
"""
clsdict = vars(cls)
try:
maybe_column = clsdict[name]
if not isinstance(maybe_column, _BoundColumnDescr):
raise KeyError(name)
except KeyError:
raise AttributeError(
"{dset} has no column {colname!r}:\n\n"
"Possible choices are:\n"
"{choices}".format(
dset=cls.qualname,
colname=name,
choices=bulleted_list(
sorted(cls._column_names),
max_count=10,
),
)
)
# Resolve column descriptor into a BoundColumn.
return maybe_column.__get__(None, cls)
# This attribute is set by DataSetMeta to mark that a class is the root of a
# family of datasets with diffent domains. We don't want that behavior for the
# base DataSet class, and we also don't want to accidentally use a shared
# version of this attribute if we fail to set this in a subclass somewhere.
del DataSet._domain_specializations
class DataSetFamilyLookupError(AttributeError):
"""Exception thrown when a column is accessed on a DataSetFamily
instead of on the result of a slice.
Parameters
----------
family_name : str
The name of the DataSetFamily on which the access occurred.
column_name : str
The name of the column accessed.
"""
def __init__(self, family_name, column_name):
self.family_name = family_name
self.column_name = column_name
def __str__(self):
# NOTE: when ``aggregate`` is added, remember to update this message
return dedent(
"""\
Attempted to access column {c} from DataSetFamily {d}:
To work with dataset families, you must first select a
slice using the ``slice`` method:
{d}.slice(...).{c}
""".format(
c=self.column_name, d=self.family_name
)
)
class _DataSetFamilyColumn(object):
"""Descriptor used to raise a helpful error when a column is accessed on a
DataSetFamily instead of on the result of a slice.
Parameters
----------
column_names : str
The name of the column.
"""
def __init__(self, column_name):
self.column_name = column_name
def __get__(self, instance, owner):
raise DataSetFamilyLookupError(
owner.__name__,
self.column_name,
)
class DataSetFamilyMeta(abc.ABCMeta):
def __new__(cls, name, bases, dict_):
columns = {}
for k, v in dict_.items():
if isinstance(v, Column):
# capture all the columns off the DataSetFamily class
# and replace them with a descriptor that will raise a helpful
# error message. The columns will get added to the BaseSlice
# for this type.
columns[k] = v
dict_[k] = _DataSetFamilyColumn(k)
is_abstract = dict_.pop("_abstract", False)
self = super(DataSetFamilyMeta, cls).__new__(
cls,
name,
bases,
dict_,
)
if not is_abstract:
self.extra_dims = extra_dims = OrderedDict(
[
(k, frozenset(v))
for k, v in OrderedDict(self.extra_dims).items()
]
)
if not extra_dims:
raise ValueError(
"DataSetFamily must be defined with non-empty"
" extra_dims, or with `_abstract = True`",
)
class BaseSlice(self._SliceType):
dataset_family = self
ndim = self.slice_ndim
domain = self.domain
locals().update(columns)
BaseSlice.__name__ = "%sBaseSlice" % self.__name__
self._SliceType = BaseSlice
# each type gets a unique cache
self._slice_cache = {}
return self
def __repr__(self):
return "<DataSetFamily: %r, extra_dims=%r>" % (
self.__name__,
list(self.extra_dims),
)
class DataSetFamilySlice(DataSet):
"""Marker type for slices of a
:class:`zipline.pipeline.data.dataset.DataSetFamily` objects
"""
# XXX: This docstring was mostly written when the abstraction here was
# "MultiDimensionalDataSet". It probably needs some rewriting.
class DataSetFamily(metaclass=DataSetFamilyMeta):
"""
Base class for Pipeline dataset families.
Dataset families are used to represent data where the unique identifier for
a row requires more than just asset and date coordinates. A
:class:`DataSetFamily` can also be thought of as a collection of
:class:`~zipline.pipeline.data.DataSet` objects, each of which has the same
columns, domain, and ndim.
:class:`DataSetFamily` objects are defined with one or more
:class:`~zipline.pipeline.data.Column` objects, plus one additional field:
``extra_dims``.
The ``extra_dims`` field defines coordinates other than asset and date that
must be fixed to produce a logical timeseries. The column objects determine
columns that will be shared by slices of the family.
``extra_dims`` are represented as an ordered dictionary where the keys are
the dimension name, and the values are a set of unique values along that
dimension.
To work with a :class:`DataSetFamily` in a pipeline expression, one must
choose a specific value for each of the extra dimensions using the
:meth:`~zipline.pipeline.data.DataSetFamily.slice` method.
For example, given a :class:`DataSetFamily`:
.. code-block:: python
class SomeDataSet(DataSetFamily):
extra_dims = [
('dimension_0', {'a', 'b', 'c'}),
('dimension_1', {'d', 'e', 'f'}),
]
column_0 = Column(float)
column_1 = Column(bool)
This dataset might represent a table with the following columns:
::
sid :: int64
asof_date :: datetime64[ns]
timestamp :: datetime64[ns]
dimension_0 :: str
dimension_1 :: str
column_0 :: float64
column_1 :: bool
Here we see the implicit ``sid``, ``asof_date`` and ``timestamp`` columns
as well as the extra dimensions columns.
This :class:`DataSetFamily` can be converted to a regular :class:`DataSet`
with:
.. code-block:: python
DataSetSlice = SomeDataSet.slice(dimension_0='a', dimension_1='e')
This sliced dataset represents the rows from the higher dimensional dataset
where ``(dimension_0 == 'a') & (dimension_1 == 'e')``.
"""
_abstract = True # Removed by metaclass
domain = GENERIC
slice_ndim = 2
_SliceType = DataSetFamilySlice
@type.__call__
class extra_dims(object):
"""OrderedDict[str, frozenset] of dimension name -> unique values
May be defined on subclasses as an iterable of pairs: the
metaclass converts this attribute to an OrderedDict.
"""
__isabstractmethod__ = True
def __get__(self, instance, owner):
return []
@classmethod
def _canonical_key(cls, args, kwargs):
extra_dims = cls.extra_dims
dimensions_set = set(extra_dims)
if not set(kwargs) <= dimensions_set:
extra = sorted(set(kwargs) - dimensions_set)
raise TypeError(
"%s does not have the following %s: %s\n"
"Valid dimensions are: %s"
% (
cls.__name__,
s("dimension", extra),
", ".join(extra),
", ".join(extra_dims),
),
)
if len(args) > len(extra_dims):
raise TypeError(
"%s has %d extra %s but %d %s given"
% (
cls.__name__,
len(extra_dims),
s("dimension", extra_dims),
len(args),
plural("was", "were", args),
),
)
missing = object()
coords = OrderedDict(zip(extra_dims, repeat(missing)))
to_add = dict(zip(extra_dims, args))
coords.update(to_add)
added = set(to_add)
for key, value in kwargs.items():
if key in added:
raise TypeError(
"%s got multiple values for dimension %r"
% (
cls.__name__,
coords,
),
)
coords[key] = value
added.add(key)
missing = {k for k, v in coords.items() if v is missing}
if missing:
missing = sorted(missing)
raise TypeError(
"no coordinate provided to %s for the following %s: %s"
% (
cls.__name__,
s("dimension", missing),
", ".join(missing),
),
)
# validate that all of the provided values exist along their given
# dimensions
for key, value in coords.items():
if value not in cls.extra_dims[key]:
raise ValueError(
"%r is not a value along the %s dimension of %s"
% (
value,
key,
cls.__name__,
),
)
return coords, tuple(coords.items())
@classmethod
def _make_dataset(cls, coords):
"""Construct a new dataset given the coordinates."""
class Slice(cls._SliceType):
extra_coords = coords
Slice.__name__ = "%s.slice(%s)" % (
cls.__name__,
", ".join("%s=%r" % item for item in coords.items()),
)
return Slice
@classmethod
def slice(cls, *args, **kwargs):
"""Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date.
Parameters
----------
*args
**kwargs
The coordinates to fix along each extra dimension.
Returns
-------
dataset : DataSet
A regular pipeline dataset indexed by asset and date.
Notes
-----
The extra dimensions coords used to produce the result are available
under the ``extra_coords`` attribute.
"""
coords, hash_key = cls._canonical_key(args, kwargs)
try:
return cls._slice_cache[hash_key]
except KeyError:
pass
Slice = cls._make_dataset(coords)
cls._slice_cache[hash_key] = Slice
return Slice
CurrencyConversion = namedtuple(
"CurrencyConversion",
["currency", "field"],
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/data/dataset.py | dataset.py |
from itertools import chain
from operator import attrgetter
from numpy import (
any as np_any,
float64,
nan,
nanpercentile,
uint8,
)
from zipline.errors import (
BadPercentileBounds,
NonExistentAssetInTimeFrame,
UnsupportedDataType,
)
from zipline.lib.labelarray import LabelArray
from zipline.lib.rank import is_missing, grouped_masked_is_maximal
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.expression import (
BadBinaryOperator,
FILTER_BINOPS,
method_name_for_op,
NumericalExpression,
)
from zipline.pipeline.mixins import (
CustomTermMixin,
IfElseMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
StandardOutputs,
)
from zipline.pipeline.term import ComputableTerm, Term
from zipline.utils.input_validation import expect_types
from zipline.utils.numpy_utils import (
same,
bool_dtype,
int64_dtype,
repeat_first_axis,
)
from ..sentinels import NotSpecified
def concat_tuples(*tuples):
"""
Concatenate a sequence of tuples into one tuple.
"""
return tuple(chain(*tuples))
def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op,
other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {"~"}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator
class Filter(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline expression computing a boolean output.
Filters are most commonly useful for describing sets of assets to include
or exclude for some particular purpose. Many Pipeline API functions accept
a ``mask`` argument, which can be supplied a Filter indicating that only
values passing the Filter should be considered when performing the
requested computation. For example, :meth:`zipline.pipeline.Factor.top`
accepts a mask indicating that ranks should be computed only on assets that
passed the specified Filter.
The most common way to construct a Filter is via one of the comparison
operators (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``) of
:class:`~zipline.pipeline.Factor`. For example, a natural way to construct
a Filter for stocks with a 10-day VWAP less than $20.0 is to first
construct a Factor computing 10-day VWAP and compare it to the scalar value
20.0::
>>> from zipline.pipeline.factors import VWAP
>>> vwap_10 = VWAP(window_length=10)
>>> vwaps_under_20 = (vwap_10 <= 20)
Filters can also be constructed via comparisons between two Factors. For
example, to construct a Filter producing True for asset/date pairs where
the asset's 10-day VWAP was greater than it's 30-day VWAP::
>>> short_vwap = VWAP(window_length=10)
>>> long_vwap = VWAP(window_length=30)
>>> higher_short_vwap = (short_vwap > long_vwap)
Filters can be combined via the ``&`` (and) and ``|`` (or) operators.
``&``-ing together two filters produces a new Filter that produces True if
**both** of the inputs produced True.
``|``-ing together two filters produces a new Filter that produces True if
**either** of its inputs produced True.
The ``~`` operator can be used to invert a Filter, swapping all True values
with Falses and vice-versa.
Filters may be set as the ``screen`` attribute of a Pipeline, indicating
asset/date pairs for which the filter produces False should be excluded
from the Pipeline's output. This is useful both for reducing noise in the
output of a Pipeline and for reducing memory consumption of Pipeline
results.
"""
# Filters are window-safe by default, since a yes/no decision means the
# same thing from all temporal perspectives.
window_safe = True
# Used by RestrictedDTypeMixin
ALLOWED_DTYPES = FILTER_DTYPES
dtype = bool_dtype
clsdict = locals()
clsdict.update(
{method_name_for_op(op): binary_operator(op) for op in FILTER_BINOPS}
)
clsdict.update(
{
method_name_for_op(op, commute=True): binary_operator(op)
for op in FILTER_BINOPS
}
)
__invert__ = unary_operator("~")
def _validate(self):
# Run superclass validation first so that we handle `dtype not passed`
# before this.
retval = super(Filter, self)._validate()
if self.dtype != bool_dtype:
raise UnsupportedDataType(typename=type(self).__name__, dtype=self.dtype)
return retval
@classmethod
def _principal_computable_term_type(cls):
return Filter
@expect_types(if_true=ComputableTerm, if_false=ComputableTerm)
def if_else(self, if_true, if_false):
"""
Create a term that selects values from one of two choices.
Parameters
----------
if_true : zipline.pipeline.term.ComputableTerm
Expression whose values should be used at locations where this
filter outputs True.
if_false : zipline.pipeline.term.ComputableTerm
Expression whose values should be used at locations where this
filter outputs False.
Returns
-------
merged : zipline.pipeline.term.ComputableTerm
A term that computes by taking values from either ``if_true`` or
``if_false``, depending on the values produced by ``self``.
The returned term draws from``if_true`` at locations where ``self``
produces True, and it draws from ``if_false`` at locations where
``self`` produces False.
Example
-------
Let ``f`` be a Factor that produces the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 5.0 6.0 7.0 8.0
Let ``g`` be another Factor that produces the following output::
AAPL MSFT MCD BK
2017-03-13 10.0 20.0 30.0 40.0
2017-03-14 50.0 60.0 70.0 80.0
Finally, let ``condition`` be a Filter that produces the following
output::
AAPL MSFT MCD BK
2017-03-13 True False True False
2017-03-14 True True False False
Then, the expression ``condition.if_else(f, g)`` produces the following
output::
AAPL MSFT MCD BK
2017-03-13 1.0 20.0 3.0 40.0
2017-03-14 5.0 6.0 70.0 80.0
See Also
--------
numpy.where
Factor.fillna
"""
true_type = if_true._principal_computable_term_type()
false_type = if_false._principal_computable_term_type()
if true_type is not false_type:
raise TypeError(
"Mismatched types in if_else(): if_true={}, but if_false={}".format(
true_type.__name__, false_type.__name__
)
)
if if_true.dtype != if_false.dtype:
raise TypeError(
"Mismatched dtypes in if_else(): "
"if_true.dtype = {}, if_false.dtype = {}".format(
if_true.dtype, if_false.dtype
)
)
if if_true.outputs != if_false.outputs:
raise ValueError(
"Mismatched outputs in if_else(): "
"if_true.outputs = {}, if_false.outputs = {}".format(
if_true.outputs, if_false.outputs
),
)
if not same(if_true.missing_value, if_false.missing_value):
raise ValueError(
"Mismatched missing values in if_else(): "
"if_true.missing_value = {!r}, if_false.missing_value = {!r}".format(
if_true.missing_value, if_false.missing_value
)
)
return_type = type(if_true)._with_mixin(IfElseMixin)
return return_type(
condition=self,
if_true=if_true,
if_false=if_false,
)
class NumExprFilter(NumericalExpression, Filter):
"""
A Filter computed from a numexpr expression.
"""
@classmethod
def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype)
def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return (
super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
)
& mask
)
class NullFilter(SingleInputMixin, Filter):
"""
A Filter indicating whether input values are missing from an input.
Parameters
----------
factor : zipline.pipeline.Term
The factor to compare against its missing_value.
"""
window_length = 0
def __new__(cls, term):
return super(NullFilter, cls).__new__(
cls,
inputs=(term,),
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
if isinstance(data, LabelArray):
return data.is_missing()
return is_missing(arrays[0], self.inputs[0].missing_value)
class NotNullFilter(SingleInputMixin, Filter):
"""
A Filter indicating whether input values are **not** missing from an input.
Parameters
----------
factor : zipline.pipeline.Term
The factor to compare against its missing_value.
"""
window_length = 0
def __new__(cls, term):
return super(NotNullFilter, cls).__new__(
cls,
inputs=(term,),
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
if isinstance(data, LabelArray):
return ~data.is_missing()
return ~is_missing(arrays[0], self.inputs[0].missing_value)
class PercentileFilter(SingleInputMixin, Filter):
"""
A Filter representing assets falling between percentile bounds of a Factor.
Parameters
----------
factor : zipline.pipeline.factor.Factor
The factor over which to compute percentile bounds.
min_percentile : float [0.0, 1.0]
The minimum percentile rank of an asset that will pass the filter.
max_percentile : float [0.0, 1.0]
The maxiumum percentile rank of an asset that will pass the filter.
"""
window_length = 0
def __new__(cls, factor, min_percentile, max_percentile, mask):
return super(PercentileFilter, cls).__new__(
cls,
inputs=(factor,),
mask=mask,
min_percentile=min_percentile,
max_percentile=max_percentile,
)
def _init(self, min_percentile, max_percentile, *args, **kwargs):
self._min_percentile = min_percentile
self._max_percentile = max_percentile
return super(PercentileFilter, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, min_percentile, max_percentile, *args, **kwargs):
return (
super(PercentileFilter, cls)._static_identity(*args, **kwargs),
min_percentile,
max_percentile,
)
def _validate(self):
"""
Ensure that our percentile bounds are well-formed.
"""
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0:
raise BadPercentileBounds(
min_percentile=self._min_percentile,
max_percentile=self._max_percentile,
upper_bound=100.0,
)
return super(PercentileFilter, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a mask of all values falling between
the given percentiles.
"""
# TODO: Review whether there's a better way of handling small numbers
# of columns.
data = arrays[0].copy().astype(float64)
data[~mask] = nan
# FIXME: np.nanpercentile **should** support computing multiple bounds
# at once, but there's a bug in the logic for multiple bounds in numpy
# 1.9.2. It will be fixed in 1.10.
# c.f. https://github.com/numpy/numpy/pull/5981
lower_bounds = nanpercentile(
data,
self._min_percentile,
axis=1,
keepdims=True,
)
upper_bounds = nanpercentile(
data,
self._max_percentile,
axis=1,
keepdims=True,
)
return (lower_bounds <= data) & (data <= upper_bounds)
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "{}:\\l min: {}, max: {}\\l".format(
type(self).__name__,
self._min_percentile,
self._max_percentile,
)
class CustomFilter(PositiveWindowLengthMixin, CustomTermMixin, Filter):
"""
Base class for user-defined Filters.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to ``self.compute``. If this
argument is passed to the CustomFilter constructor, we look for a
class-level attribute named ``inputs``.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFilter constructor, we look for a class-level attribute
named `window_length`.
Notes
-----
Users implementing their own Filters should subclass CustomFilter and
implement a method named ``compute`` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFilter constructor.
The specific types of the values passed to ``compute`` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[bool, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
See the documentation for
:class:`~zipline.pipeline.CustomFactor` for more details on
implementing a custom ``compute`` method.
See Also
--------
zipline.pipeline.CustomFactor
"""
def _validate(self):
try:
super(CustomFilter, self)._validate()
except UnsupportedDataType:
if self.dtype in CLASSIFIER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint="Did you mean to create a CustomClassifier?",
)
elif self.dtype in FACTOR_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint="Did you mean to create a CustomFactor?",
)
raise
class ArrayPredicate(SingleInputMixin, Filter):
"""
A filter applying a function from (ndarray, *args) -> ndarray[bool].
Parameters
----------
term : zipline.pipeline.Term
Term producing the array over which the predicate will be computed.
op : function(ndarray, *args) -> ndarray[bool]
Function to apply to the result of `term`.
opargs : tuple[hashable]
Additional argument to apply to ``op``.
"""
params = ("op", "opargs")
window_length = 0
@expect_types(term=Term, opargs=tuple)
def __new__(cls, term, op, opargs):
hash(opargs) # fail fast if opargs isn't hashable.
return super(ArrayPredicate, cls).__new__(
ArrayPredicate,
op=op,
opargs=opargs,
inputs=(term,),
mask=term.mask,
)
def _compute(self, arrays, dates, assets, mask):
params = self.params
data = arrays[0]
return params["op"](data, *params["opargs"]) & mask
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "{}:\\l op: {}.{}()".format(
type(self).__name__,
self.params["op"].__module__,
self.params["op"].__name__,
)
class Latest(LatestMixin, CustomFilter):
"""
Filter producing the most recently-known value of `inputs[0]` on each day.
"""
pass
class SingleAsset(Filter):
"""
A Filter that computes to True only for the given asset.
"""
inputs = []
window_length = 1
def __new__(cls, asset):
return super(SingleAsset, cls).__new__(cls, asset=asset)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(SingleAsset, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (
super(SingleAsset, cls)._static_identity(*args, **kwargs),
asset,
)
def _compute(self, arrays, dates, assets, mask):
is_my_asset = assets == self._asset.sid
out = repeat_first_axis(is_my_asset, len(mask))
# Raise an exception if `self._asset` does not exist for the entirety
# of the timeframe over which we are computing.
if (is_my_asset.sum() != 1) or ((out & mask).sum() != len(mask)):
raise NonExistentAssetInTimeFrame(
asset=self._asset,
start_date=dates[0],
end_date=dates[-1],
)
return out
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "SingleAsset:\\l asset: {!r}\\l".format(self._asset)
class StaticSids(Filter):
"""
A Filter that computes True for a specific set of predetermined sids.
``StaticSids`` is mostly useful for debugging or for interactively
computing pipeline terms for a fixed set of sids that are known ahead of
time.
Parameters
----------
sids : iterable[int]
An iterable of sids for which to filter.
"""
inputs = ()
window_length = 0
params = ("sids",)
def __new__(cls, sids):
sids = frozenset(sids)
return super(StaticSids, cls).__new__(cls, sids=sids)
def _compute(self, arrays, dates, sids, mask):
my_columns = sids.isin(self.params["sids"])
return repeat_first_axis(my_columns, len(mask)) & mask
class StaticAssets(StaticSids):
"""
A Filter that computes True for a specific set of predetermined assets.
``StaticAssets`` is mostly useful for debugging or for interactively
computing pipeline terms for a fixed set of assets that are known ahead of
time.
Parameters
----------
assets : iterable[Asset]
An iterable of assets for which to filter.
"""
def __new__(cls, assets):
sids = frozenset(asset.sid for asset in assets)
return super(StaticAssets, cls).__new__(cls, sids)
class AllPresent(CustomFilter, SingleInputMixin, StandardOutputs):
"""Pipeline filter indicating input term has data for a given window."""
def _validate(self):
if isinstance(self.inputs[0], Filter):
raise TypeError("Input to filter `AllPresent` cannot be a Filter.")
return super(AllPresent, self)._validate()
def compute(self, today, assets, out, value):
if isinstance(value, LabelArray):
out[:] = ~np_any(value.is_missing(), axis=0)
else:
out[:] = ~np_any(
is_missing(value, self.inputs[0].missing_value),
axis=0,
)
class MaximumFilter(Filter, StandardOutputs):
"""Pipeline filter that selects the top asset, possibly grouped and masked."""
window_length = 0
def __new__(cls, factor, groupby, mask):
if groupby is NotSpecified:
from zipline.pipeline.classifiers import Everything
groupby = Everything()
return super(MaximumFilter, cls).__new__(
cls,
inputs=(factor, groupby),
mask=mask,
)
def _compute(self, arrays, dates, assets, mask):
# XXX: We're doing a lot of unncessary work here if `groupby` isn't
# specified.
data = arrays[0]
group_labels, null_label = self.inputs[1]._to_integral(arrays[1])
effective_mask = (
mask
& (group_labels != null_label)
& ~is_missing(data, self.inputs[0].missing_value)
).view(uint8)
return grouped_masked_is_maximal(
# Unconditionally view the data as int64.
# This is safe because casting from float64 to int64 is an
# order-preserving operation.
data.view(int64_dtype),
# PERF: Consider supporting different sizes of group labels.
group_labels.astype(int64_dtype),
effective_mask,
)
def __repr__(self):
return "Maximum({}, groupby={}, mask={})".format(
self.inputs[0].recursive_repr(),
self.inputs[1].recursive_repr(),
self.mask.recursive_repr(),
)
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Maximum:\\l groupby: {}\\l mask: {}\\l".format(
self.inputs[1].recursive_repr(),
self.mask.recursive_repr(),
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/filters/filter.py | filter.py |
from collections import defaultdict
from interface import implements
from numpy import iinfo, uint32, multiply
from zipline.data.fx import ExplodingFXRateReader
from zipline.lib.adjusted_array import AdjustedArray
from zipline.utils.numpy_utils import repeat_first_axis
from .base import PipelineLoader
from .utils import shift_dates
from ..data.equity_pricing import EquityPricing
UINT32_MAX = iinfo(uint32).max
class EquityPricingLoader(implements(PipelineLoader)):
"""A PipelineLoader for loading daily OHLCV data.
Parameters
----------
raw_price_reader : zipline.data.session_bars.SessionBarReader
Reader providing raw prices.
adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader
Reader providing price/volume adjustments.
fx_reader : zipline.data.fx.FXRateReader
Reader providing currency conversions.
"""
def __init__(self, raw_price_reader, adjustments_reader, fx_reader):
self.raw_price_reader = raw_price_reader
self.adjustments_reader = adjustments_reader
self.fx_reader = fx_reader
@classmethod
def without_fx(cls, raw_price_reader, adjustments_reader):
"""
Construct an EquityPricingLoader without support for fx rates.
The returned loader will raise an error if requested to load
currency-converted columns.
Parameters
----------
raw_price_reader : zipline.data.session_bars.SessionBarReader
Reader providing raw prices.
adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader
Reader providing price/volume adjustments.
Returns
-------
loader : EquityPricingLoader
A loader that can only provide currency-naive data.
"""
return cls(
raw_price_reader=raw_price_reader,
adjustments_reader=adjustments_reader,
fx_reader=ExplodingFXRateReader(),
)
def load_adjusted_array(self, domain, columns, dates, sids, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the **start** of each date. We assume that the latest
# data known on day N is the data from day (N - 1), so we shift all
# query dates back by a trading session.
sessions = domain.all_sessions()
shifted_dates = shift_dates(sessions, dates[0], dates[-1], shift=1)
ohlcv_cols, currency_cols = self._split_column_types(columns)
del columns # From here on we should use ohlcv_cols or currency_cols.
ohlcv_colnames = [c.name for c in ohlcv_cols]
raw_ohlcv_arrays = self.raw_price_reader.load_raw_arrays(
ohlcv_colnames,
shifted_dates[0],
shifted_dates[-1],
sids,
)
# Currency convert raw_arrays in place if necessary. We use shifted
# dates to load currency conversion rates to make them line up with
# dates used to fetch prices.
self._inplace_currency_convert(
ohlcv_cols,
raw_ohlcv_arrays,
shifted_dates,
sids,
)
adjustments = self.adjustments_reader.load_pricing_adjustments(
ohlcv_colnames,
dates,
sids,
)
out = {}
for c, c_raw, c_adjs in zip(ohlcv_cols, raw_ohlcv_arrays, adjustments):
out[c] = AdjustedArray(
c_raw.astype(c.dtype),
c_adjs,
c.missing_value,
)
for c in currency_cols:
codes_1d = self.raw_price_reader.currency_codes(sids)
codes = repeat_first_axis(codes_1d, len(dates))
out[c] = AdjustedArray(
codes,
adjustments={},
missing_value=None,
)
return out
@property
def currency_aware(self):
# Tell the pipeline engine that this loader supports currency
# conversion if we have a non-dummy fx rates reader.
return not isinstance(self.fx_reader, ExplodingFXRateReader)
def _inplace_currency_convert(self, columns, arrays, dates, sids):
"""
Currency convert raw data loaded for ``column``.
Parameters
----------
columns : list[zipline.pipeline.data.BoundColumn]
List of columns whose raw data has been loaded.
arrays : list[np.array]
List of arrays, parallel to ``columns`` containing data for the
column.
dates : pd.DatetimeIndex
Labels for rows of ``arrays``. These are the dates that should
be used to fetch fx rates for conversion.
sids : np.array[int64]
Labels for columns of ``arrays``.
Returns
-------
None
Side Effects
------------
Modifies ``arrays`` in place by applying currency conversions.
"""
# Group columns by currency conversion spec.
by_spec = defaultdict(list)
for column, array in zip(columns, arrays):
by_spec[column.currency_conversion].append(array)
# Nothing to do for terms with no currency conversion.
by_spec.pop(None, None)
if not by_spec:
return
fx_reader = self.fx_reader
base_currencies = self.raw_price_reader.currency_codes(sids)
# Columns with the same conversion spec will use the same multipliers.
for spec, arrays in by_spec.items():
rates = fx_reader.get_rates(
rate=spec.field,
quote=spec.currency.code,
bases=base_currencies,
dts=dates,
)
for arr in arrays:
multiply(arr, rates, out=arr)
def _split_column_types(self, columns):
"""Split out currency columns from OHLCV columns.
Parameters
----------
columns : list[zipline.pipeline.data.BoundColumn]
Columns to be loaded by ``load_adjusted_array``.
Returns
-------
ohlcv_columns : list[zipline.pipeline.data.BoundColumn]
Price and volume columns from ``columns``.
currency_columns : list[zipline.pipeline.data.BoundColumn]
Currency code column from ``columns``, if present.
"""
currency_name = EquityPricing.currency.name
ohlcv = []
currency = []
for c in columns:
if c.name == currency_name:
currency.append(c)
else:
ohlcv.append(c)
return ohlcv, currency
# Backwards compat alias.
USEquityPricingLoader = EquityPricingLoader | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/loaders/equity_pricing_loader.py | equity_pricing_loader.py |
from interface import implements
from numpy import (
arange,
array,
eye,
float64,
full,
iinfo,
nan,
uint32,
)
from numpy.random import RandomState
from pandas import DataFrame, Timestamp
from sqlite3 import connect as sqlite3_connect
from .base import PipelineLoader
from .frame import DataFrameLoader
from zipline.data.adjustments import (
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from zipline.data.bcolz_daily_bars import US_EQUITY_PRICING_BCOLZ_COLUMNS
from zipline.utils.numpy_utils import (
bool_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
object_dtype,
)
UINT_32_MAX = iinfo(uint32).max
def nanos_to_seconds(nanos):
return nanos / (1000 * 1000 * 1000)
class PrecomputedLoader(implements(PipelineLoader)):
"""
Synthetic PipelineLoader that uses a pre-computed array for each column.
Parameters
----------
values : dict
Map from column to values to use for that column.
Values can be anything that can be passed as the first positional
argument to a DataFrame whose indices are ``dates`` and ``sids``
dates : iterable[datetime-like]
Row labels for input data. Can be anything that pd.DataFrame will
coerce to a DatetimeIndex.
sids : iterable[int-like]
Column labels for input data. Can be anything that pd.DataFrame will
coerce to an Int64Index.
Notes
-----
Adjustments are unsupported by this loader.
"""
def __init__(self, constants, dates, sids):
loaders = {}
for column, const in constants.items():
frame = DataFrame(
const,
index=dates,
columns=sids,
dtype=column.dtype,
)
loaders[column] = DataFrameLoader(
column=column,
baseline=frame,
adjustments=None,
)
self._loaders = loaders
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load by delegating to sub-loaders.
"""
out = {}
for col in columns:
try:
loader = self._loaders.get(col)
if loader is None:
loader = self._loaders[col.unspecialize()]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.update(loader.load_adjusted_array(domain, [col], dates, sids, mask))
return out
class EyeLoader(PrecomputedLoader):
"""
A PrecomputedLoader that emits arrays containing 1s on the diagonal and 0s
elsewhere.
Parameters
----------
columns : list[BoundColumn]
Columns that this loader should know about.
dates : iterable[datetime-like]
Same as PrecomputedLoader.
sids : iterable[int-like]
Same as PrecomputedLoader
"""
def __init__(self, columns, dates, sids):
shape = (len(dates), len(sids))
super(EyeLoader, self).__init__(
{column: eye(shape, dtype=column.dtype) for column in columns},
dates,
sids,
)
class SeededRandomLoader(PrecomputedLoader):
"""
A PrecomputedLoader that emits arrays randomly-generated with a given seed.
Parameters
----------
seed : int
Seed for numpy.random.RandomState.
columns : list[BoundColumn]
Columns that this loader should know about.
dates : iterable[datetime-like]
Same as PrecomputedLoader.
sids : iterable[int-like]
Same as PrecomputedLoader
"""
def __init__(self, seed, columns, dates, sids):
self._seed = seed
super(SeededRandomLoader, self).__init__(
{c: self.values(c.dtype, dates, sids) for c in columns},
dates,
sids,
)
def values(self, dtype, dates, sids):
"""
Make a random array of shape (len(dates), len(sids)) with ``dtype``.
"""
shape = (len(dates), len(sids))
return {
datetime64ns_dtype: self._datetime_values,
float64_dtype: self._float_values,
int64_dtype: self._int_values,
bool_dtype: self._bool_values,
object_dtype: self._object_values,
}[dtype](shape)
@property
def state(self):
"""
Make a new RandomState from our seed.
This ensures that every call to _*_values produces the same output
every time for a given SeededRandomLoader instance.
"""
return RandomState(self._seed)
def _float_values(self, shape):
"""
Return uniformly-distributed floats between -0.0 and 100.0.
"""
return self.state.uniform(low=0.0, high=100.0, size=shape)
def _int_values(self, shape):
"""
Return uniformly-distributed integers between 0 and 100.
"""
return self.state.randint(low=0, high=100, size=shape).astype(
"int64"
) # default is system int
def _datetime_values(self, shape):
"""
Return uniformly-distributed dates in 2014.
"""
start = Timestamp("2014", tz="UTC").asm8
offsets = self.state.randint(
low=0,
high=364,
size=shape,
).astype("timedelta64[D]")
return start + offsets
def _bool_values(self, shape):
"""
Return uniformly-distributed True/False values.
"""
return self.state.randn(*shape) < 0
def _object_values(self, shape):
res = self._int_values(shape).astype(str).astype(object)
return res
OHLCV = ("open", "high", "low", "close", "volume")
OHLC = ("open", "high", "low", "close")
PSEUDO_EPOCH = Timestamp("2000-01-01", tz="UTC")
def asset_start(asset_info, asset):
ret = asset_info.loc[asset]["start_date"]
if ret.tz is None:
ret = ret.tz_localize("UTC")
assert ret.tzname() == "UTC", "Unexpected non-UTC timestamp"
return ret
def asset_end(asset_info, asset):
ret = asset_info.loc[asset]["end_date"]
if ret.tz is None:
ret = ret.tz_localize("UTC")
assert ret.tzname() == "UTC", "Unexpected non-UTC timestamp"
return ret
def make_bar_data(asset_info, calendar, holes=None):
"""
For a given asset/date/column combination, we generate a corresponding raw
value using the following formula for OHLCV columns:
data(asset, date, column) = (100,000 * asset_id)
+ (10,000 * column_num)
+ (date - Jan 1 2000).days # ~6000 for 2015
where:
column_num('open') = 0
column_num('high') = 1
column_num('low') = 2
column_num('close') = 3
column_num('volume') = 4
We use days since Jan 1, 2000 to guarantee that there are no collisions
while also the produced values smaller than UINT32_MAX / 1000.
For 'day' and 'id', we use the standard format expected by the base class.
Parameters
----------
asset_info : DataFrame
DataFrame with asset_id as index and 'start_date'/'end_date' columns.
calendar : pd.DatetimeIndex
The trading calendar to use.
holes : dict[int -> tuple[pd.Timestamps]], optional
A dict mapping asset ids to the tuple of dates that should have
no data for that asset in the output. Default is no holes.
Yields
------
p : (int, pd.DataFrame)
A sid, data pair to be passed to BcolzDailyDailyBarWriter.write
"""
assert (
# Using .value here to avoid having to care about UTC-aware dates.
PSEUDO_EPOCH.value
< calendar.normalize().min().value
<= asset_info["start_date"].min().value
), "calendar.min(): %s\nasset_info['start_date'].min(): %s" % (
calendar.min(),
asset_info["start_date"].min(),
)
assert (asset_info["start_date"] < asset_info["end_date"]).all()
def _raw_data_for_asset(asset_id):
"""
Generate 'raw' data that encodes information about the asset.
See docstring for a description of the data format.
"""
# Get the dates for which this asset existed according to our asset
# info.
datetimes = calendar[
calendar.slice_indexer(
asset_start(asset_info, asset_id),
asset_end(asset_info, asset_id),
)
]
data = full(
(len(datetimes), len(US_EQUITY_PRICING_BCOLZ_COLUMNS)),
asset_id * 100 * 1000,
dtype=uint32,
)
# Add 10,000 * column-index to OHLCV columns
data[:, :5] += arange(5, dtype=uint32) * 1000
# Add days since Jan 1 2001 for OHLCV columns.
data[:, :5] += array((datetimes - PSEUDO_EPOCH).days)[:, None].astype(uint32)
frame = DataFrame(
data,
index=datetimes,
columns=US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
if holes is not None and asset_id in holes:
for dt in holes[asset_id]:
frame.loc[dt, OHLC] = nan
frame.loc[dt, ["volume"]] = 0
frame["day"] = nanos_to_seconds(datetimes.asi8)
frame["id"] = asset_id
return frame
for asset in asset_info.index:
yield asset, _raw_data_for_asset(asset)
def expected_bar_value(asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100000
from_colname = OHLCV.index(colname) * 1000
from_date = (date - PSEUDO_EPOCH).days
return from_asset + from_colname + from_date
def expected_bar_value_with_holes(asset_id, date, colname, holes, missing_value):
# Explicit holes are filled with the missing value.
if asset_id in holes and date in holes[asset_id]:
return missing_value
return expected_bar_value(asset_id, date, colname)
def expected_bar_values_2d(dates, assets, asset_info, colname, holes=None):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`.
"""
if colname == "volume":
dtype = uint32
missing = 0
else:
dtype = float64
missing = float("nan")
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
# Use missing values when asset_id is not contained in asset_info.
if asset not in asset_info.index:
continue
start = asset_start(asset_info, asset)
end = asset_end(asset_info, asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
if holes is not None:
expected = expected_bar_value_with_holes(
asset,
date,
colname,
holes,
missing,
)
else:
expected = expected_bar_value(asset, date, colname)
data[i, j] = expected
return data
class NullAdjustmentReader(SQLiteAdjustmentReader):
"""
A SQLiteAdjustmentReader that stores no adjustments and uses in-memory
SQLite.
"""
def __init__(self):
conn = sqlite3_connect(":memory:")
writer = SQLiteAdjustmentWriter(conn, None, None)
empty = DataFrame(
{
"sid": array([], dtype=uint32),
"effective_date": array([], dtype=uint32),
"ratio": array([], dtype=float),
}
)
empty_dividends = DataFrame(
{
"sid": array([], dtype=uint32),
"amount": array([], dtype=float64),
"record_date": array([], dtype="datetime64[ns]"),
"ex_date": array([], dtype="datetime64[ns]"),
"declared_date": array([], dtype="datetime64[ns]"),
"pay_date": array([], dtype="datetime64[ns]"),
}
)
writer.write(splits=empty, mergers=empty, dividends=empty_dividends)
super(NullAdjustmentReader, self).__init__(conn) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/loaders/synthetic.py | synthetic.py |
from functools import partial
from interface import implements
from numpy import (
ix_,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import make_adjustment_from_labels
from zipline.utils.numpy_utils import as_column
from .base import PipelineLoader
ADJUSTMENT_COLUMNS = Index(
[
"sid",
"value",
"kind",
"start_date",
"end_date",
"apply_date",
]
)
class DataFrameLoader(implements(PipelineLoader)):
"""
A PipelineLoader that reads its input from DataFrames.
Mostly useful for testing, but can also be used for real work if your data
fits in memory.
Parameters
----------
column : zipline.pipeline.data.BoundColumn
The column whose data is loadable by this loader.
baseline : pandas.DataFrame
A DataFrame with index of type DatetimeIndex and columns of type
Int64Index. Dates should be labelled with the first date on which a
value would be **available** to an algorithm. This means that OHLCV
data should generally be shifted back by a trading day before being
supplied to this class.
adjustments : pandas.DataFrame, default=None
A DataFrame with the following columns:
sid : int
value : any
kind : int (zipline.pipeline.loaders.frame.ADJUSTMENT_TYPES)
start_date : datetime64 (can be NaT)
end_date : datetime64 (must be set)
apply_date : datetime64 (must be set)
The default of None is interpreted as "no adjustments to the baseline".
"""
def __init__(self, column, baseline, adjustments=None):
self.column = column
self.baseline = baseline.values.astype(self.column.dtype)
self.dates = baseline.index
self.assets = baseline.columns
if adjustments is None:
adjustments = DataFrame(
index=DatetimeIndex([]),
columns=ADJUSTMENT_COLUMNS,
)
else:
# Ensure that columns are in the correct order.
adjustments = adjustments.reindex(ADJUSTMENT_COLUMNS, axis=1)
adjustments.sort_values(["apply_date", "sid"], inplace=True)
self.adjustments = adjustments
self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date)
self.adjustment_end_dates = DatetimeIndex(adjustments.end_date)
self.adjustment_sids = Int64Index(adjustments.sid)
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype="bool")
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= self.adjustment_end_dates >= min_date
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[dates_filter & sids_filter].set_index(
"apply_date"
)
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method="bfill")
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError("Can't load multiple columns with DataFrameLoader")
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
# Boolean arrays with True on matched entries
good_dates = date_indexer != -1
good_assets = assets_indexer != -1
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
}
def _validate_input_column(self, column):
"""Make sure a passed column is our column."""
if column != self.column and column.unspecialize() != self.column:
raise ValueError("Can't load unknown column %s" % column) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/loaders/frame.py | frame.py |
import numpy as np
import pandas as pd
from zipline.errors import NoFurtherDataError
from zipline.pipeline.common import TS_FIELD_NAME, SID_FIELD_NAME
from zipline.utils.date_utils import make_utc_aware
from zipline.utils.numpy_utils import categorical_dtype
def is_sorted_ascending(a):
"""Check if a numpy array is sorted."""
return (np.fmax.accumulate(a) <= a).all()
def validate_event_metadata(event_dates, event_timestamps, event_sids):
assert is_sorted_ascending(event_dates), "event dates must be sorted"
assert (
len(event_sids) == len(event_dates) == len(event_timestamps)
), "mismatched arrays: %d != %d != %d" % (
len(event_sids),
len(event_dates),
len(event_timestamps),
)
def next_event_indexer(
all_dates, data_query_cutoff, all_sids, event_dates, event_timestamps, event_sids
):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(
# pd.to_datetime(event_dates, utc=True), side="right")
make_utc_aware(pd.DatetimeIndex(event_dates)),
side="right",
)
ts_ixs = data_query_cutoff.searchsorted(
# pd.to_datetime(event_timestamps, utc=True), side="right"
make_utc_aware(pd.DatetimeIndex(event_timestamps)),
side="right",
)
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out
def previous_event_indexer(
data_query_cutoff_times, all_sids, event_dates, event_timestamps, event_sids
):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full(
(len(data_query_cutoff_times), len(all_sids)),
-1,
dtype=np.int64,
)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = data_query_cutoff_times.searchsorted(
# pd.to_datetime(eff_dts, utc=True), side="right"
make_utc_aware(pd.DatetimeIndex(eff_dts)),
side="right",
)
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix : last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out
def last_in_date_group(
df,
data_query_cutoff_times,
assets,
reindex=True,
have_sids=True,
extra_groupers=None,
):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
# get positions in `data_query_cutoff_times` just before `TS_FIELD_NAME` in `df`
idx_before_ts = data_query_cutoff_times.searchsorted(
make_utc_aware(pd.DatetimeIndex(df[TS_FIELD_NAME]))
)
idx = [data_query_cutoff_times[idx_before_ts]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
to_unstack = idx[-1 : -len(idx) : -1]
last_in_group = (
df.drop(TS_FIELD_NAME, axis=1)
.groupby(idx, sort=False)
.last()
.unstack(level=to_unstack)
)
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
# for _ in range(len(idx) - 1):
# last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
columns = pd.MultiIndex.from_product(
tuple(cols.levels[0 : len(extra_groupers) + 1]) + (assets,),
names=cols.names,
)
last_in_group = last_in_group.reindex(
index=data_query_cutoff_times, columns=columns
)
else:
last_in_group = last_in_group.reindex(data_query_cutoff_times)
return last_in_group
def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[column.name].where(
pd.notnull(df[column_name]), column.missing_value
)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = (
df[column_name].fillna(column.missing_value).astype(column.dtype)
)
def shift_dates(dates, start_date, end_date, shift):
"""
Shift dates of a pipeline query back by ``shift`` days.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
Returns
-------
shifted : pd.DatetimeIndex
The range [start_date, end_date] from ``dates``, shifted backwards by
``shift`` days.
Raises
------
ValueError
If ``start_date`` or ``end_date`` is not in ``dates``.
NoFurtherDataError
If shifting ``start_date`` back by ``shift`` days would push it off the
end of ``dates``.
"""
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift : end - shift + 1] # +1 to be inclusive | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/loaders/utils.py | utils.py |
from abc import abstractmethod, abstractproperty
from interface import implements
import numpy as np
import pandas as pd
from toolz import groupby
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import (
Datetime641DArrayOverwrite,
Datetime64Overwrite,
Float641DArrayOverwrite,
Float64Multiply,
Float64Overwrite,
)
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.utils.date_utils import make_utc_aware
from zipline.utils.numpy_utils import datetime64ns_dtype, float64_dtype
from zipline.pipeline.loaders.utils import (
ffill_across_cols,
last_in_date_group,
)
INVALID_NUM_QTRS_MESSAGE = (
"Passed invalid number of quarters %s; " "must pass a number of quarters >= 0"
)
NEXT_FISCAL_QUARTER = "next_fiscal_quarter"
NEXT_FISCAL_YEAR = "next_fiscal_year"
NORMALIZED_QUARTERS = "normalized_quarters"
PREVIOUS_FISCAL_QUARTER = "previous_fiscal_quarter"
PREVIOUS_FISCAL_YEAR = "previous_fiscal_year"
SHIFTED_NORMALIZED_QTRS = "shifted_normalized_quarters"
SIMULATION_DATES = "dates"
def normalize_quarters(years, quarters):
return years * 4 + quarters - 1
def split_normalized_quarters(normalized_quarters):
years = normalized_quarters // 4
quarters = normalized_quarters % 4
return years, quarters + 1
# These metadata columns are used to align event indexers.
metadata_columns = frozenset(
{
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
}
)
def required_estimates_fields(columns):
"""
Compute the set of resource columns required to serve
`columns`.
"""
# We also expect any of the field names that our loadable columns
# are mapped to.
return metadata_columns.union(columns.values())
def validate_column_specs(events, columns):
"""
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`.
"""
required = required_estimates_fields(columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EarningsEstimatesLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
)
def add_new_adjustments(adjustments_dict, adjustments, column_name, ts):
try:
adjustments_dict[column_name][ts].extend(adjustments)
except KeyError:
adjustments_dict[column_name][ts] = adjustments
class EarningsEstimatesLoader(implements(PipelineLoader)):
"""
An abstract pipeline loader for estimates data that can load data a
variable number of quarters forwards/backwards from calendar dates
depending on the `num_announcements` attribute of the columns' dataset.
If split adjustments are to be applied, a loader, split-adjusted columns,
and the split-adjusted asof-date must be supplied.
Parameters
----------
estimates : pd.DataFrame
The raw estimates data.
``estimates`` must contain at least 5 columns:
sid : int64
The asset id associated with each estimate.
event_date : datetime64[ns]
The date on which the event that the estimate is for will/has
occurred..
timestamp : datetime64[ns]
The datetime where we learned about the estimate.
fiscal_quarter : int64
The quarter during which the event has/will occur.
fiscal_year : int64
The year during which the event has/will occur.
name_map : dict[str -> str]
A map of names of BoundColumns that this loader will load to the
names of the corresponding columns in `events`.
"""
def __init__(self, estimates, name_map):
validate_column_specs(estimates, name_map)
self.estimates = estimates[
estimates[EVENT_DATE_FIELD_NAME].notnull()
& estimates[FISCAL_QUARTER_FIELD_NAME].notnull()
& estimates[FISCAL_YEAR_FIELD_NAME].notnull()
]
self.estimates[NORMALIZED_QUARTERS] = normalize_quarters(
self.estimates[FISCAL_YEAR_FIELD_NAME],
self.estimates[FISCAL_QUARTER_FIELD_NAME],
)
self.array_overwrites_dict = {
datetime64ns_dtype: Datetime641DArrayOverwrite,
float64_dtype: Float641DArrayOverwrite,
}
self.scalar_overwrites_dict = {
datetime64ns_dtype: Datetime64Overwrite,
float64_dtype: Float64Overwrite,
}
self.name_map = name_map
@abstractmethod
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
raise NotImplementedError("get_zeroth_quarter_idx")
@abstractmethod
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
raise NotImplementedError("get_shifted_qtrs")
@abstractmethod
def create_overwrite_for_estimate(
self,
column,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments,
split_adjusted_asof_idx,
):
raise NotImplementedError("create_overwrite_for_estimate")
@abstractproperty
def searchsorted_side(self):
return NotImplementedError("searchsorted_side")
def get_requested_quarter_data(
self,
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates,
):
"""
Selects the requested data for each date.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
zeroth_quarter_idx : pd.Index
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next or previous earnings estimate.
stacked_last_per_qtr : pd.DataFrame
The latest estimate known with the dates, normalized quarter, and
sid as the index.
num_announcements : int
The number of annoucements out the user requested relative to
each date in the calendar dates.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
Returns
--------
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns; `dates` are the index and columns are a MultiIndex
with sids at the top level and the dataset columns on the bottom.
"""
zero_qtr_data_idx = zero_qtr_data.index
requested_qtr_idx = pd.MultiIndex.from_arrays(
[
zero_qtr_data_idx.get_level_values(0),
zero_qtr_data_idx.get_level_values(1),
self.get_shifted_qtrs(
zeroth_quarter_idx.get_level_values(
NORMALIZED_QUARTERS,
),
num_announcements,
),
],
names=[
zero_qtr_data_idx.names[0],
zero_qtr_data_idx.names[1],
SHIFTED_NORMALIZED_QTRS,
],
)
requested_qtr_data = stacked_last_per_qtr.reindex(index=requested_qtr_idx)
requested_qtr_data = requested_qtr_data.reset_index(
SHIFTED_NORMALIZED_QTRS,
)
# Calculate the actual year/quarter being requested and add those in
# as columns.
(
requested_qtr_data[FISCAL_YEAR_FIELD_NAME],
requested_qtr_data[FISCAL_QUARTER_FIELD_NAME],
) = split_normalized_quarters(requested_qtr_data[SHIFTED_NORMALIZED_QTRS])
# Once we're left with just dates as the index, we can reindex by all
# dates so that we have a value for each calendar date.
return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates)
def get_split_adjusted_asof_idx(self, dates):
"""
Compute the index in `dates` where the split-adjusted-asof-date
falls. This is the date up to which, and including which, we will
need to unapply all adjustments for and then re-apply them as they
come in. After this date, adjustments are applied as normal.
Parameters
----------
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
Returns
-------
split_adjusted_asof_idx : int
The index in `dates` at which the data should be split.
"""
split_adjusted_asof_idx = dates.searchsorted(
pd.to_datetime(self._split_adjusted_asof, utc=True)
# make_utc_aware(pd.DatetimeIndex(self._split_adjusted_asof))
)
# The split-asof date is after the date index.
if split_adjusted_asof_idx == len(dates):
split_adjusted_asof_idx = len(dates) - 1
if self._split_adjusted_asof.tzinfo is not None:
if self._split_adjusted_asof < dates[0]:
split_adjusted_asof_idx = -1
else:
if self._split_adjusted_asof < dates[0].tz_localize(None):
split_adjusted_asof_idx = -1
return split_adjusted_asof_idx
def collect_overwrites_for_sid(
self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_idx,
columns,
all_adjustments_for_sid,
sid,
):
"""
Given a sid, collect all overwrites that should be applied for this
sid at each quarter boundary.
Parameters
----------
group : pd.DataFrame
The data for `sid`.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_idx : int
The sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
all_adjustments_for_sid : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index for the given sid (`sid`). This dictionary is modified as
adjustments are collected.
sid : int
The sid for which overwrites should be computed.
"""
# If data was requested for only 1 date, there can never be any
# overwrites, so skip the extra work.
if len(dates) == 1:
return
next_qtr_start_indices = dates.searchsorted(
# pd.to_datetime(group[EVENT_DATE_FIELD_NAME], utc=True),
make_utc_aware(pd.DatetimeIndex(group[EVENT_DATE_FIELD_NAME])),
side=self.searchsorted_side,
)
qtrs_with_estimates = group.index.get_level_values(NORMALIZED_QUARTERS).values
for idx in next_qtr_start_indices:
if 0 < idx < len(dates):
# Find the quarter being requested in the quarter we're
# crossing into.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS,
sid,
].iloc[idx]
# Only add adjustments if the next quarter starts somewhere
# in our date index for this sid. Our 'next' quarter can
# never start at index 0; a starting index of 0 means that
# the next quarter's event date was NaT.
self.create_overwrites_for_quarter(
all_adjustments_for_sid,
idx,
last_per_qtr,
qtrs_with_estimates,
requested_quarter,
sid,
sid_idx,
columns,
)
def get_adjustments_for_sid(
self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs,
):
"""
Parameters
----------
group : pd.DataFrame
The data for the given sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_to_idx : dict[int -> int]
A dictionary mapping sid to he sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index. This dictionary is for adjustments for ALL sids. It is
modified as adjustments are collected.
kwargs :
Additional arguments used in collecting adjustments; unused here.
"""
# Collect all adjustments for a given sid.
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid,
)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
)
def merge_into_adjustments_for_all_sids(
self, all_adjustments_for_sid, col_to_all_adjustments
):
"""
Merge adjustments for a particular sid into a dictionary containing
adjustments for all sids.
Parameters
----------
all_adjustments_for_sid : dict[int -> AdjustedArray]
All adjustments for a particular sid.
col_to_all_adjustments : dict[int -> AdjustedArray]
All adjustments for all sids.
"""
for col_name in all_adjustments_for_sid:
if col_name not in col_to_all_adjustments:
col_to_all_adjustments[col_name] = {}
for ts in all_adjustments_for_sid[col_name]:
adjs = all_adjustments_for_sid[col_name][ts]
add_new_adjustments(col_to_all_adjustments, adjs, col_name, ts)
def get_adjustments(
self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs,
):
"""
Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied.
"""
zero_qtr_data.sort_index(inplace=True)
# Here we want to get the LAST record from each group of records
# corresponding to a single quarter. This is to ensure that we select
# the most up-to-date event date in case the event date changes.
quarter_shifts = zero_qtr_data.groupby(
level=[SID_FIELD_NAME, NORMALIZED_QUARTERS]
).nth(-1)
col_to_all_adjustments = {}
sid_to_idx = dict(zip(assets, range(len(assets))))
quarter_shifts.groupby(level=SID_FIELD_NAME).apply(
self.get_adjustments_for_sid,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs,
)
return col_to_all_adjustments
def create_overwrites_for_quarter(
self,
col_to_overwrites,
next_qtr_start_idx,
last_per_qtr,
quarters_with_estimates_for_sid,
requested_quarter,
sid,
sid_idx,
columns,
):
"""
Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites.
"""
for col in columns:
column_name = self.name_map[col.name]
if column_name not in col_to_overwrites:
col_to_overwrites[column_name] = {}
# If there are estimates for the requested quarter,
# overwrite all values going up to the starting index of
# that quarter with estimates for that quarter.
if requested_quarter in quarters_with_estimates_for_sid:
adjs = self.create_overwrite_for_estimate(
col,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
)
add_new_adjustments(
col_to_overwrites, adjs, column_name, next_qtr_start_idx
)
# There are no estimates for the quarter. Overwrite all
# values going up to the starting index of that quarter
# with the missing value for this column.
else:
adjs = [self.overwrite_with_null(col, next_qtr_start_idx, sid_idx)]
add_new_adjustments(
col_to_overwrites, adjs, column_name, next_qtr_start_idx
)
def overwrite_with_null(self, column, next_qtr_start_idx, sid_idx):
return self.scalar_overwrites_dict[column.dtype](
0, next_qtr_start_idx - 1, sid_idx, sid_idx, column.missing_value
)
def load_adjusted_array(self, domain, columns, dates, sids, mask):
# Separate out getting the columns' datasets and the datasets'
# num_announcements attributes to ensure that we're catching the right
# AttributeError.
col_to_datasets = {col: col.dataset for col in columns}
try:
groups = groupby(
lambda col: col_to_datasets[col].num_announcements, col_to_datasets
)
except AttributeError:
raise AttributeError(
"Datasets loaded via the "
"EarningsEstimatesLoader must define a "
"`num_announcements` attribute that defines "
"how many quarters out the loader should load"
" the data relative to `dates`."
)
if any(num_qtr < 0 for num_qtr in groups):
raise ValueError(
INVALID_NUM_QTRS_MESSAGE
% ",".join(str(qtr) for qtr in groups if qtr < 0)
)
out = {}
# To optimize performance, only work below on assets that are
# actually in the raw data.
data_query_cutoff_times = domain.data_query_cutoff_for_sessions(dates)
assets_with_data = set(sids) & set(self.estimates[SID_FIELD_NAME])
last_per_qtr, stacked_last_per_qtr = self.get_last_data_per_qtr(
assets_with_data,
columns,
dates,
data_query_cutoff_times,
)
# Determine which quarter is immediately next/previous for each
# date.
zeroth_quarter_idx = self.get_zeroth_quarter_idx(stacked_last_per_qtr)
zero_qtr_data = stacked_last_per_qtr.loc[zeroth_quarter_idx]
for num_announcements, columns in groups.items():
requested_qtr_data = self.get_requested_quarter_data(
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates,
)
# Calculate all adjustments for the given quarter and accumulate
# them for each column.
col_to_adjustments = self.get_adjustments(
zero_qtr_data, requested_qtr_data, last_per_qtr, dates, sids, columns
)
# Lookup the asset indexer once, this is so we can reindex
# the assets returned into the assets requested for each column.
# This depends on the fact that our column pd.MultiIndex has the same
# sids for each field. This allows us to do the lookup once on
# level 1 instead of doing the lookup each time per value in
# level 0.
# asset_indexer = sids.get_indexer_for(
# requested_qtr_data.columns.levels[1],
# )
for col in columns:
column_name = self.name_map[col.name]
# allocate the empty output with the correct missing value
# shape = len(dates), len(sids)
# output_array = np.full(shape=shape,
# fill_value=col.missing_value,
# dtype=col.dtype)
# overwrite the missing value with values from the computed data
try:
output_array = (
requested_qtr_data[column_name]
.reindex(sids, axis=1)
.to_numpy()
.astype(col.dtype)
)
except Exception:
output_array = (
requested_qtr_data[column_name]
.reindex(sids, axis=1)
.to_numpy(na_value=col.missing_value)
.astype(col.dtype)
)
# except ValueError:
# np.copyto(output_array[:, asset_indexer],
# requested_qtr_data[column_name].to_numpy(na_value=output_array.dtype),
# casting='unsafe')
out[col] = AdjustedArray(
output_array,
# There may not be any adjustments at all (e.g. if
# len(date) == 1), so provide a default.
dict(col_to_adjustments.get(column_name, {})),
col.missing_value,
)
return out
def get_last_data_per_qtr(
self, assets_with_data, columns, dates, data_query_cutoff_times
):
"""
Determine the last piece of information we know for each column on each
date in the index for each sid and quarter.
Parameters
----------
assets_with_data : pd.Index
Index of all assets that appear in the raw data given to the
loader.
columns : iterable of BoundColumn
The columns that need to be loaded from the raw data.
data_query_cutoff_times : pd.DatetimeIndex
The calendar of dates for which data should be loaded.
Returns
-------
stacked_last_per_qtr : pd.DataFrame
A DataFrame indexed by [dates, sid, normalized_quarters] that has
the latest information for each row of the index, sorted by event
date.
last_per_qtr : pd.DataFrame
A DataFrame with columns that are a MultiIndex of [
self.estimates.columns, normalized_quarters, sid].
"""
# Get a DataFrame indexed by date with a MultiIndex of columns of
# [self.estimates.columns, normalized_quarters, sid], where each cell
# contains the latest data for that day.
last_per_qtr = last_in_date_group(
self.estimates,
data_query_cutoff_times,
assets_with_data,
reindex=True,
extra_groupers=[NORMALIZED_QUARTERS],
)
last_per_qtr.index = dates
# Forward fill values for each quarter/sid/dataset column.
ffill_across_cols(last_per_qtr, columns, self.name_map)
# Stack quarter and sid into the index.
stacked_last_per_qtr = last_per_qtr.stack(
[SID_FIELD_NAME, NORMALIZED_QUARTERS],
)
# Set date index name for ease of reference
stacked_last_per_qtr.index.set_names(
SIMULATION_DATES,
level=0,
inplace=True,
)
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime(
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME], utc=True
)
stacked_last_per_qtr = stacked_last_per_qtr.sort_values(EVENT_DATE_FIELD_NAME)
return last_per_qtr, stacked_last_per_qtr
class NextEarningsEstimatesLoader(EarningsEstimatesLoader):
searchsorted_side = "right"
def create_overwrite_for_estimate(
self,
column,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments=None,
split_adjusted_asof_idx=None,
):
return [
self.array_overwrites_dict[column.dtype](
0,
next_qtr_start_idx - 1,
sid_idx,
sid_idx,
last_per_qtr[
column_name,
requested_quarter,
sid,
].values[:next_qtr_start_idx],
)
]
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
return zero_qtrs + (num_announcements - 1)
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the next quarter by picking out the upcoming release for
each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
next_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next event.
"""
next_releases_per_date = (
stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME]
>= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
]
.groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
)
.nth(0)
)
return next_releases_per_date.index
class PreviousEarningsEstimatesLoader(EarningsEstimatesLoader):
searchsorted_side = "left"
def create_overwrite_for_estimate(
self,
column,
column_name,
dates,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments=None,
split_adjusted_asof_idx=None,
split_dict=None,
):
return [
self.overwrite_with_null(
column,
next_qtr_start_idx,
sid_idx,
)
]
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
return zero_qtrs - (num_announcements - 1)
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the previous quarter by picking out the most recent
release relative to each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
previous_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a previous event.
"""
previous_releases_per_date = (
stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME]
<= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
]
.groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
)
.nth(-1)
)
return previous_releases_per_date.index
def validate_split_adjusted_column_specs(name_map, columns):
to_be_split = set(columns)
available = set(name_map.keys())
extra = to_be_split - available
if extra:
raise ValueError(
"EarningsEstimatesLoader got the following extra columns to be "
"split-adjusted: {extra}.\n"
"Got Columns: {to_be_split}\n"
"Available Columns: {available}".format(
extra=sorted(extra),
to_be_split=sorted(to_be_split),
available=sorted(available),
)
)
class SplitAdjustedEstimatesLoader(EarningsEstimatesLoader):
"""
Estimates loader that loads data that needs to be split-adjusted.
Parameters
----------
split_adjustments_loader : SQLiteAdjustmentReader
The loader to use for reading split adjustments.
split_adjusted_column_names : iterable of str
The column names that should be split-adjusted.
split_adjusted_asof : pd.Timestamp
The date that separates data into 2 halves: the first half is the set
of dates up to and including the split_adjusted_asof date. All
adjustments occurring during this first half are applied to all
dates in this first half. The second half is the set of dates after
the split_adjusted_asof date. All adjustments occurring during this
second half are applied sequentially as they appear in the timeline.
"""
def __init__(
self,
estimates,
name_map,
split_adjustments_loader,
split_adjusted_column_names,
split_adjusted_asof,
):
validate_split_adjusted_column_specs(name_map, split_adjusted_column_names)
self._split_adjustments = split_adjustments_loader
self._split_adjusted_column_names = split_adjusted_column_names
self._split_adjusted_asof = split_adjusted_asof
self._split_adjustment_dict = {}
super(SplitAdjustedEstimatesLoader, self).__init__(estimates, name_map)
@abstractmethod
def collect_split_adjustments(
self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
):
raise NotImplementedError("collect_split_adjustments")
def get_adjustments_for_sid(
self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
split_adjusted_asof_idx=None,
split_adjusted_cols_for_group=None,
):
"""
Collects both overwrites and adjustments for a particular sid.
Parameters
----------
split_adjusted_asof_idx : int
The integer index of the date on which the data was split-adjusted.
split_adjusted_cols_for_group : list of str
The names of requested columns that should also be split-adjusted.
"""
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid,
)
(
pre_adjustments,
post_adjustments,
) = self.retrieve_split_adjustment_data_for_sid(
dates, sid, split_adjusted_asof_idx
)
sid_estimates = self.estimates[self.estimates[SID_FIELD_NAME] == sid]
# We might not have any overwrites but still have
# adjustments, and we will need to manually add columns if
# that is the case.
for col_name in split_adjusted_cols_for_group:
if col_name not in all_adjustments_for_sid:
all_adjustments_for_sid[col_name] = {}
self.collect_split_adjustments(
all_adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_to_idx[sid],
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
split_adjusted_cols_for_group,
)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
)
def get_adjustments(
self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs,
):
"""
Calculates both split adjustments and overwrites for all sids.
"""
split_adjusted_cols_for_group = [
self.name_map[col.name]
for col in columns
if self.name_map[col.name] in self._split_adjusted_column_names
]
# Add all splits to the adjustment dict for this sid.
split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(dates)
return super(SplitAdjustedEstimatesLoader, self).get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
split_adjusted_cols_for_group=split_adjusted_cols_for_group,
split_adjusted_asof_idx=split_adjusted_asof_idx,
)
def determine_end_idx_for_adjustment(
self, adjustment_ts, dates, upper_bound, requested_quarter, sid_estimates
):
"""
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
"""
end_idx = upper_bound
# Find the next newest kd that happens on or after
# the date of this adjustment
newest_kd_for_qtr = sid_estimates[
(sid_estimates[NORMALIZED_QUARTERS] == requested_quarter)
& (pd.to_datetime(sid_estimates[TS_FIELD_NAME], utc=True) >= adjustment_ts)
][TS_FIELD_NAME].min()
if pd.notnull(newest_kd_for_qtr):
newest_kd_idx = dates.searchsorted(
pd.to_datetime(newest_kd_for_qtr, utc=True)
# make_utc_aware(pd.DatetimeIndex(newest_kd_for_qtr))
)
# We have fresh information that comes in
# before the end of the overwrite and
# presumably is already split-adjusted to the
# current split. We should stop applying the
# adjustment the day before this new
# information comes in.
if newest_kd_idx <= upper_bound:
end_idx = newest_kd_idx - 1
return end_idx
def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns,
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
"""
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [
Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment,
)
for future_adjustment in adjustment_values
]
for adjustment, date_index in zip(adjustment_values, date_indexes):
adj = Float64Multiply(
0, split_adjusted_asof_date_idx, sid_idx, sid_idx, adjustment
)
add_new_adjustments(
col_to_split_adjustments, [adj], column_name, date_index
)
return col_to_split_adjustments
def collect_post_asof_split_adjustments(
self,
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns,
):
"""
Collect split adjustments that occur after the
split-adjusted-asof-date. Each adjustment needs to be applied to all
dates on which knowledge for the requested quarter was older than the
date of the adjustment.
Parameters
----------
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for this sid.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred after the
split-asof-date.
"""
col_to_split_adjustments = {}
if post_adjustments:
# Get an integer index
requested_qtr_timeline = requested_qtr_data[SHIFTED_NORMALIZED_QTRS][
sid
].reset_index()
requested_qtr_timeline = requested_qtr_timeline[
requested_qtr_timeline[sid].notnull()
]
# Split the data into range by quarter and determine which quarter
# was being requested in each range.
# Split integer indexes up by quarter range
qtr_ranges_idxs = np.split(
requested_qtr_timeline.index,
np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1,
)
requested_quarters_per_range = [
requested_qtr_timeline[sid][r[0]] for r in qtr_ranges_idxs
]
# Try to apply each adjustment to each quarter range.
for i, qtr_range in enumerate(qtr_ranges_idxs):
for adjustment, date_index, timestamp in zip(*post_adjustments):
# In the default case, apply through the end of the quarter
upper_bound = qtr_range[-1]
# Find the smallest KD in estimates that is on or after the
# date of the given adjustment. Apply the given adjustment
# until that KD.
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
requested_qtr_data.index,
upper_bound,
requested_quarters_per_range[i],
sid_estimates,
)
# In the default case, apply adjustment on the first day of
# the quarter.
start_idx = qtr_range[0]
# If the adjustment happens during this quarter, apply the
# adjustment on the day it happens.
if date_index > start_idx:
start_idx = date_index
# We only want to apply the adjustment if we have any stale
# data to apply it to.
if qtr_range[0] <= end_idx:
for column_name in requested_split_adjusted_columns:
if column_name not in col_to_split_adjustments:
col_to_split_adjustments[column_name] = {}
adj = Float64Multiply(
# Always apply from first day of qtr
qtr_range[0],
end_idx,
sid_idx,
sid_idx,
adjustment,
)
add_new_adjustments(
col_to_split_adjustments, [adj], column_name, start_idx
)
return col_to_split_adjustments
def retrieve_split_adjustment_data_for_sid(
self, dates, sid, split_adjusted_asof_idx
):
"""
dates : pd.DatetimeIndex
The calendar dates.
sid : int
The sid for which we want to retrieve adjustments.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
Returns
-------
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
"""
adjustments = self._split_adjustments.get_adjustments_for_sid("splits", sid)
sorted(adjustments, key=lambda adj: adj[0])
# Get rid of any adjustments that happen outside of our date index.
adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1], adjustments))
adjustment_values = np.array([adj[1] for adj in adjustments])
timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments])
# We need the first date on which we would have known about each
# adjustment.
date_indexes = dates.searchsorted(timestamps)
pre_adjustment_idxs = np.where(date_indexes <= split_adjusted_asof_idx)[0]
last_adjustment_split_asof_idx = -1
if len(pre_adjustment_idxs):
last_adjustment_split_asof_idx = pre_adjustment_idxs.max()
pre_adjustments = (
adjustment_values[: last_adjustment_split_asof_idx + 1],
date_indexes[: last_adjustment_split_asof_idx + 1],
)
post_adjustments = (
adjustment_values[last_adjustment_split_asof_idx + 1 :],
date_indexes[last_adjustment_split_asof_idx + 1 :],
timestamps[last_adjustment_split_asof_idx + 1 :],
)
return pre_adjustments, post_adjustments
def _collect_adjustments(
self,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
):
pre_adjustments_dict = self.collect_pre_split_asof_date_adjustments(
split_adjusted_asof_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns,
)
post_adjustments_dict = self.collect_post_asof_split_adjustments(
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns,
)
return pre_adjustments_dict, post_adjustments_dict
def merge_split_adjustments_with_overwrites(
self, pre, post, overwrites, requested_split_adjusted_columns
):
"""
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
"""
for column_name in requested_split_adjusted_columns:
# We can do a merge here because the timestamps in 'pre' and
# 'post' are guaranteed to not overlap.
if pre:
# Either empty or contains all columns.
for ts in pre[column_name]:
add_new_adjustments(
overwrites, pre[column_name][ts], column_name, ts
)
if post:
# Either empty or contains all columns.
for ts in post[column_name]:
add_new_adjustments(
overwrites, post[column_name][ts], column_name, ts
)
class PreviousSplitAdjustedEarningsEstimatesLoader(
SplitAdjustedEstimatesLoader, PreviousEarningsEstimatesLoader
):
def collect_split_adjustments(
self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
):
"""
Collect split adjustments for previous quarters and apply them to the
given dictionary of splits for the given sid. Since overwrites just
replace all estimates before the new quarter with NaN, we don't need to
worry about re-applying split adjustments.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns,
)
class NextSplitAdjustedEarningsEstimatesLoader(
SplitAdjustedEstimatesLoader, NextEarningsEstimatesLoader
):
def collect_split_adjustments(
self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[column_name][overwrite_ts].extend(
[
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value,
)
for adjustment in pre_adjustments_dict[column_name][
split_ts
]
]
)
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates,
)
adjustments_for_sid[column_name][overwrite_ts].append(
Float64Multiply(
0, end_idx, sid_idx, sid_idx, adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns,
) | zipline-crypto | /zipline_crypto-0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/zipline/pipeline/loaders/earnings_estimates.py | earnings_estimates.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.