code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
"""
Classes and functions for building timelines.
"""
from ..entity.asset import StoredFile
from ..util import as_id, as_collection
__all__ = [
'TimelineBuilder',
'VideoClip'
]
class VideoClip:
"""
Clips represent a prediction for a section of video.
"""
def __init__(self, data):
self._data = data
@property
def id(self):
"""The Asset id the clip is associated with."""
return self._data['id']
@property
def asset_id(self):
"""The Asset id the clip is associated with."""
return self._data['assetId']
@property
def timeline(self):
"""The name of the timeline, this is the same as the pipeline module."""
return self._data['timeline']
@property
def track(self):
"""The track name"""
return self._data['track']
@property
def content(self):
"""The content of the clip. This is the prediction"""
return self._data['content']
@property
def length(self):
"""The length of the clip"""
return self._data['length']
@property
def start(self):
"""The start time of the clip"""
return self._data['start']
@property
def stop(self):
"""The stop time of the clip"""
return self._data['stop']
@property
def score(self):
"""The prediction score"""
return self._data['score']
@property
def simhash(self):
"""A similarity hash, if any"""
return self._data.get('simhash')
@property
def files(self):
"""The array of associated files."""
return [StoredFile(f) for f in self._data.get('files', [])]
@staticmethod
def from_hit(hit):
"""
Converts an ElasticSearch hit into an VideoClip.
Args:
hit (dict): An raw ES document
Returns:
Asset: The Clip.
"""
data = {
'id': hit['_id'],
}
data.update(hit.get('_source', {}).get('clip', {}))
return VideoClip(data)
def __len__(self):
return self.length
def __str__(self):
return "<VideoClip id='{}'/>".format(self.id)
def __repr__(self):
return "<VideoClip id='{}' at {}/>".format(self.id, hex(id(self)))
def __eq__(self, other):
return other.id == self.id
def __hash__(self):
return hash(self.id)
class TimelineBuilder:
"""
The TimelineBuilder class is used for batch creation of video clips. Clips within a track
can be overlapping. Duplicate clips are automatically compacted to the highest score.
"""
def __init__(self, asset, name):
"""
Create a new timeline instance.
Args:
name (str): The name of the Timeline.
"""
self.asset = as_id(asset)
self.name = name
self.tracks = {}
def add_clip(self, track_name, start, stop, content, score=1, tags=None):
"""
Add a clip to the timeline.
Args:
track_name (str): The Track name.
start (float): The starting time.
stop (float): The end time.
content (str): The content.
score: (float): The score if any.
tags: (list): A list of tags that describes the content.
Returns:
(dict): A clip entry.
"""
if stop < start:
raise ValueError("The stop time cannot be smaller than the start time.")
track = self.tracks.get(track_name)
if not track:
track = {'name': track_name, 'clips': []}
self.tracks[track_name] = track
clip = {
"start": start,
"stop": stop,
"content": [c.replace("\n", " ").strip() for c in as_collection(content)],
"score": score,
"tags": as_collection(tags)
}
track['clips'].append(clip)
return clip
def for_json(self):
return {
'name': self.name,
'assetId': self.asset,
'tracks': [track for track in self.tracks.values() if track['clips']]
}
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/entity/clip.py
|
clip.py
|
import logging
import os
import tempfile
from ..entity import Model, Job, ModelTypeInfo, AnalysisModule
from ..training import TrainingSetDownloader
from ..util import as_collection, as_id, zip_directory
logger = logging.getLogger(__name__)
__all__ = [
'ModelApp'
]
class ModelApp:
"""
Methods for manipulating models.
"""
def __init__(self, app):
self.app = app
def create_model(self, name, type):
"""
Create and retrn a new model .
Args:
name (str): The name of the model.
type (ModelType): The type of Model, see the ModelType class.
Returns:
Model: The new model.
"""
body = {
"name": name,
"type": type.name
}
return Model(self.app.client.post("/api/v3/models", body))
def get_model(self, id):
"""
Get a Model by Id
Args:
id (str): The model id.
Returns:
Model: The model.
"""
return Model(self.app.client.get("/api/v3/models/{}".format(as_id(id))))
def find_one_model(self, id=None, name=None, type=None):
"""
Find a single Model based on various properties.
Args:
id (str): The ID or list of Ids.
name (str): The model name or list of names.
type (str): The model type or list of types.
Returns:
Model: the matching Model.
"""
body = {
'names': as_collection(name),
'ids': as_collection(id),
'types': as_collection(type)
}
return Model(self.app.client.post("/api/v3/models/_find_one", body))
def find_models(self, id=None, name=None, type=None, limit=None, sort=None):
"""
Find a single Model based on various properties.
Args:
id (str): The ID or list of Ids.
name (str): The model name or list of names.
type (str): The model type or list of types.
limit (int): Limit results to the given size.
sort (list): An arary of properties to sort by. Example: ["name:asc"]
Returns:
generator: A generator which will return matching Models when iterated.
"""
body = {
'names': as_collection(name),
'ids': as_collection(id),
'types': as_collection(type),
'sort': sort
}
return self.app.client.iter_paged_results('/api/v3/models/_search', body, limit, Model)
def train_model(self, model, deploy=False, **kwargs):
"""
Train the given Model by kicking off a model training job.
Args:
model (Model): The Model instance or a unique Model id.
deploy (bool): Deploy the model on your production data immediately after training.
**kwargs (kwargs): Model training arguments which differ based on the model..
Returns:
Job: A model training job.
"""
model_id = as_id(model)
body = {
'deploy': deploy,
'args': dict(kwargs)
}
return Job(self.app.client.post('/api/v3/models/{}/_train'.format(model_id), body))
def deploy_model(self, model, search=None, file_types=None):
"""
Apply the model to the given search.
Args:
model (Model): A Model instance or a model unique Id.
search (dict): An arbitrary asset search, defaults to using the
deployment search associated with the model
file_types (list): An optional file type filer, can be combination of
"images", "documents", and "videos"
Returns:
Job: The Job that is hosting the reprocess task.
"""
mid = as_id(model)
body = {
"search": search,
"fileTypes": file_types,
"jobId": os.environ.get("ZMLP_JOB_ID")
}
return Job(self.app.client.post(f'/api/v3/models/{mid}/_deploy', body))
def upload_trained_model(self, model, model_path, labels):
"""
Uploads a Tensorflow2/Keras model. For the 'model_path' arg you can either
pass the path to a Tensorflow saved model or a trained model instance itself.
Args:
model (Model): The Model or te unique Model ID.
model_path (mixed): The path to the model directory or a Tensorflow model instance.
labels (list): The list of labels,.
Returns:
AnalysisModule: The AnalysisModule configured to use the model.
"""
if not labels:
raise ValueError("Uploading a model requires an array of labels")
# check to see if its a keras model and save to a temp dir.
if getattr(model_path, 'save', None):
tmp_path = tempfile.mkdtemp()
model_path.save(tmp_path)
model_path = tmp_path
with open(model_path + '/labels.txt', 'w') as fp:
for label in labels:
fp.write(f'{label}\n')
model_file = tempfile.mkstemp(prefix="model_", suffix=".zip")[1]
zip_file_path = zip_directory(model_path, model_file)
mid = as_id(model)
return AnalysisModule(self.app.client.send_file(
f'/api/v3/models/{mid}/_upload', zip_file_path))
def get_label_counts(self, model):
"""
Get a dictionary of the labels and how many times they occur.
Args:
model (Model): The Model or its unique Id.
Returns:
dict: a dictionary of label name to occurrence count.
"""
return self.app.client.get('/api/v3/models/{}/_label_counts'.format(as_id(model)))
def rename_label(self, model, old_label, new_label):
"""
Rename a the given label to a new label name. The new label can already exist.
Args:
model (Model): The Model or its unique Id.
old_label (str): The old label name.
new_label (str): The new label name.
Returns:
dict: a dictionary containing the number of assets updated.
"""
body = {
"label": old_label,
"newLabel": new_label
}
return self.app.client.put('/api/v3/models/{}/labels'.format(as_id(model)), body)
def delete_label(self, model, label):
"""
Removes the label from all Assets.
Args:
model (Model): The Model or its unique Id.
label (str): The label name to remove.
Returns:
dict: a dictionary containing the number of assets updated.
"""
body = {
"label": label
}
return self.app.client.delete('/api/v3/models/{}/labels'.format(as_id(model)), body)
def download_labeled_images(self, model, style, dst_dir, validation_split=0.2):
"""
Get a TrainingSetDownloader instance which can be used to download all the
labeled images for a Model to local disk.
Args:
model (Model): The Model or its unique ID.
style (str): The structure style to build: labels_std, objects_keras, objects_coco
dst_dir (str): The destination dir to write the Assets into.
validation_split (float): The ratio of training images to validation images.
Defaults to 0.2.
"""
return TrainingSetDownloader(self.app, model, style, dst_dir, validation_split)
def get_model_type_info(self, model_type):
"""
Get additional properties concerning a specific model type.
Args:
model_type (ModelType): The model type Enum or name.
Returns:
ModelTypeInfo: Additional properties related to a model type.
"""
type_name = getattr(model_type, 'name', str(model_type))
return ModelTypeInfo(self.app.client.get(f'/api/v3/models/_types/{type_name}'))
def get_all_model_type_info(self):
"""
Get all available ModelTypeInfo options.
Returns:
list: A list of ModelTypeInfo
"""
return [ModelTypeInfo(info) for info in self.app.client.get('/api/v3/models/_types')]
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/model_app.py
|
model_app.py
|
from ..entity.field import CustomField
from ..util import as_id
class CustomFieldApp:
"""
An App instance for managing custom fields.
"""
def __init__(self, app):
self.app = app
def create_custom_field(self, name, type):
"""
Create a new Field. The name of the field will be used as the actual
ElasticSearch field name. The name must be alpha-numeric, underscores/dashes
are allowed, periods are not.
To reference your custom field in an ES query you must prepend the field name
with 'custom.'. For example if your field name is 'city', then you must use the
fully qualified name 'custom.city' in your query.
Args:
name (str): The name of the field.
type (str): The ES field type.
Returns:
CustomField: The new custom field.
"""
body = {
'name': name,
'type': type
}
return CustomField(self.app.client.post('/api/v3/custom-fields', body))
def get_custom_field(self, id):
"""
Get the record for the custom field.
Args:
id (str): The id of the field.
Returns:
CustomField: The custom field.
"""
id = as_id(id)
return CustomField(self.app.client.get(f'/api/v3/custom-fields/{id}'))
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/field_app.py
|
field_app.py
|
from ..entity import DataSource, Job
from ..util import is_valid_uuid, as_collection
class DataSourceApp(object):
def __init__(self, app):
self.app = app
def create_datasource(self, name, uri, modules=None, file_types=None, credentials=None):
"""
Create a new DataSource.
Args:
name (str): The name of the data source.
uri (str): The URI where the data can be found.
modules (list): A list of AnalysisModules names to apply to the data.
file_types (list of str): a list of file extensions or general types like
'images', 'videos', 'documents'. Defaults to all file types.
credentials (list of str): A list of pre-created credentials blob names.
Returns:
DataSource: The created DataSource
"""
url = '/api/v1/data-sources'
body = {
'name': name,
'uri': uri,
'credentials': as_collection(credentials),
'fileTypes': file_types,
'modules': as_collection(modules)
}
return DataSource(self.app.client.post(url, body=body))
def get_datasource(self, name):
"""
Finds a DataSource by name or unique Id.
Args:
name (str): The unique name or unique ID.
Returns:
DataSource: The DataSource
"""
url = '/api/v1/data-sources/_findOne'
if is_valid_uuid(name):
body = {"ids": [name]}
else:
body = {"names": [name]}
return DataSource(self.app.client.post(url, body=body))
def import_files(self, ds, batch_size=25):
"""
Import all assets found at the given DataSource. If the
DataSource has already been imported then only new files will be
imported. New modules assigned to the datasource will
also be applied to existing assets as well as new assets.
Args:
ds (DataSource): A DataSource object or the name of a data source.
batch_size (int): The number of Assets per batch. Must be at least 20.
Returns:
Job: Return the Job responsible for processing the files.
"""
body = {
"batchSize": batch_size
}
url = '/api/v1/data-sources/{}/_import'.format(ds.id)
return Job(self.app.client.post(url, body))
def delete_datasource(self, ds, remove_assets=False):
"""
Delete the given datasource. If remove_assets is true, then all
assets that were imported with a datasource are removed as well. This
cannot be undone.
Args:
ds (DataSource): A DataSource object or the name of a data source.
remove_assets (bool): Set to true if Assets should be deleted as well.
Returns:
dict: Status object
"""
body = {
'deleteAssets': remove_assets
}
url = '/api/v1/data-sources/{}'.format(ds.id)
return self.app.client.delete(url, body)
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/datasource_app.py
|
datasource_app.py
|
import io
import os
import requests
from collections import namedtuple
from ..entity import Asset, StoredFile, FileUpload, FileTypes, Job, VideoClip
from ..search import AssetSearchResult, AssetSearchScroller, SimilarityQuery, SearchScroller
from ..util import as_collection, as_id_collection, as_id
class AssetApp(object):
def __init__(self, app):
self.app = app
def batch_import_files(self, files, modules=None):
"""
Import a list of FileImport instances.
Args:
files (list of FileImport): The list of files to import as Assets.
modules (list): A list of Pipeline Modules to apply to the data.
Notes:
Example return value:
{
"bulkResponse" : {
"took" : 15,
"errors" : false,
"items" : [ {
"create" : {
"_index" : "yvqg1901zmu5bw9q",
"_type" : "_doc",
"_id" : "dd0KZtqyec48n1q1fniqVMV5yllhRRGx",
"_version" : 1,
"result" : "created",
"forced_refresh" : true,
"_shards" : {
"total" : 1,
"successful" : 1,
"failed" : 0
},
"_seq_no" : 0,
"_primary_term" : 1,
"status" : 201
}
} ]
},
"failed" : [ ],
"created" : [ "dd0KZtqyec48n1q1fniqVMV5yllhRRGx" ],
"jobId" : "ba310246-1f87-1ece-b67c-be3f79a80d11"
}
Returns:
dict: A dictionary containing an ES bulk response, failed files,
and created asset ids.
"""
body = {
"assets": files,
"modules": modules
}
return self.app.client.post("/api/v3/assets/_batch_create", body)
def batch_upload_files(self, files, modules=None):
"""
Batch upload a list of files and return a structure which contains
an ES bulk response object, a list of failed file paths, a list of created
asset Ids, and a processing jobId.
Args:
files (list of FileUpload):
modules (list): A list of Pipeline Modules to apply to the data.
Notes:
Example return value:
{
"bulkResponse" : {
"took" : 15,
"errors" : false,
"items" : [ {
"create" : {
"_index" : "yvqg1901zmu5bw9q",
"_type" : "_doc",
"_id" : "dd0KZtqyec48n1q1fniqVMV5yllhRRGx",
"_version" : 1,
"result" : "created",
"forced_refresh" : true,
"_shards" : {
"total" : 1,
"successful" : 1,
"failed" : 0
},
"_seq_no" : 0,
"_primary_term" : 1,
"status" : 201
}
} ]
},
"failed" : [ ],
"created" : [ "dd0KZtqyec48n1q1fniqVMV5yllhRRGx" ],
"jobId" : "ba310246-1f87-1ece-b67c-be3f79a80d11"
}
Returns:
dict: A dictionary containing an ES bulk response, failed files,
and created asset ids.
"""
files = as_collection(files)
file_paths = [f.uri for f in files]
body = {
"assets": files,
"modules": modules
}
return self.app.client.upload_files("/api/v3/assets/_batch_upload",
file_paths, body)
def batch_upload_directory(self, path, file_types=None,
batch_size=50, modules=None, callback=None):
"""
Recursively upload all files in the given directory path.
This method takes an optional callback function which takes two
arguments, files and response. This callback is called for
each batch of files submitted.
Examples:
def batch_callback(files, response):
print("--processed files--")
for path in files:
print(path)
print("--zvi response--")
pprint.pprint(rsp)
app.assets.batch_upload_directory("/home", file_types=['images'],
callback=batch_callback)
Args:
path (str): A file path to a directory.
file_types (list): a list of file extensions and/or
categories(documents, images, videos)
batch_size (int) The number of files to upload per batch.
modules (list): An array of modules to apply to the files.
callback (func): A function to call for every batch
Returns:
dict: A dictionary containing batch operation counters.
"""
batch = []
totals = {
"file_count": 0,
"file_size": 0,
"batch_count": 0,
}
def process_batch():
totals['batch_count'] += 1
totals['file_count'] += len(batch)
totals['file_size'] += sum([os.path.getsize(f) for f in batch])
rsp = self.batch_upload_files(
[FileUpload(f) for f in batch], modules)
if callback:
callback(batch.copy(), rsp)
batch.clear()
file_types = FileTypes.resolve(file_types)
for root, dirs, files in os.walk(path):
for fname in files:
if fname.startswith("."):
continue
_, ext = os.path.splitext(fname)
if not ext:
continue
if ext[1:].lower() not in file_types:
continue
batch.append(os.path.abspath(os.path.join(root, fname)))
if len(batch) >= batch_size:
process_batch()
if batch:
process_batch()
return totals
def delete_asset(self, asset):
"""
Delete the given asset.
Args:
asset (mixed): unique Id or Asset instance.
Returns:
bool: True if the asset was deleted.
"""
asset_id = as_id(asset)
return self.app.client.delete("/api/v3/assets/{}".format(asset_id))['success']
def batch_delete_assets(self, assets):
"""
Batch delete the given list of Assets or asset ids.
Args:
assets (list): A list of Assets or unique asset ids.
Returns:
dict: A dictionary containing deleted and errored asset Ids.
"""
body = {
"assetIds": as_id_collection(assets)
}
return self.app.client.delete("/api/v3/assets/_batch_delete", body)
def search(self, search=None, fetch_source=True):
"""
Perform an asset search using the ElasticSearch query DSL.
See Also:
For search/query format.
https://www.elastic.co/guide/en/elasticsearch/reference/6.4/search-request-body.html
Args:
search (dict): The ElasticSearch search to execute.
fetch_source: (bool): If true, the full JSON document for each asset is returned.
Returns:
AssetSearchResult - an AssetSearchResult instance.
"""
if not fetch_source:
search['_source'] = False
return AssetSearchResult(self.app, search)
def scroll_search(self, search=None, timeout="1m"):
"""
Perform an asset scrolled search using the ElasticSearch query DSL.
See Also:
For search/query format.
https://www.elastic.co/guide/en/elasticsearch/reference/6.4/search-request-body.html
Args:
search (dict): The ElasticSearch search to execute
timeout (str): The scroll timeout. Defaults to 1 minute.
Returns:
AssetSearchScroll - an AssetSearchScroller instance which is a generator
by nature.
"""
return AssetSearchScroller(self.app, search, timeout)
def reprocess_search(self, search, modules):
"""
Reprocess the given search with the supplied modules.
Args:
search (dict): An ElasticSearch search.
modules (list): A list of module names to apply.
Returns:
dict: Contains a Job and the number of assets to be processed.
"""
body = {
"search": search,
"modules": modules
}
rsp = self.app.client.post("/api/v3/assets/_search/reprocess", body)
return ReprocessSearchResponse(rsp["assetCount"], Job(rsp["job"]))
def scroll_search_clips(self, asset, search=None, timeout="1m"):
"""
Scroll through clips for given asset using the ElasticSearch query DSL.
Args:
asset (Asset): The asset or unique AssetId.
search (dict): The ElasticSearch search to execute
timeout (str): The scroll timeout. Defaults to 1 minute.
Returns:
SearchScroller a clip scroller instance for generating VideoClips.
"""
asset_id = as_id(asset)
return SearchScroller(
VideoClip, f'/api/v3/assets/{asset_id}/clips/_search', self.app, search, timeout
)
def reprocess_assets(self, assets, modules):
"""
Reprocess the given array of assets with the given modules.
Args:
assets (list): A list of Assets or asset unique Ids.
modules (list): A list of Pipeline module names or ides.
Returns:
Job: The job responsible for processing the assets.
"""
asset_ids = [getattr(asset, "id", asset) for asset in as_collection(assets)]
body = {
"search": {
"query": {
"terms": {
"_id": asset_ids
}
}
},
"modules": as_collection(modules)
}
return self.app.client.post("/api/v3/assets/_search/reprocess", body)
def get_asset(self, id):
"""
Return the asset with the given unique Id.
Args:
id (str): The unique ID of the asset.
Returns:
Asset: The Asset
"""
return Asset(self.app.client.get("/api/v3/assets/{}".format(id)))
def update_labels(self, assets, add_labels=None, remove_labels=None):
"""
Update the Labels on the given array of assets.
Args:
assets (mixed): An Asset, asset ID, or a list of either type.
add_labels (list[Label]): A Label or list of Label to add.
remove_labels (list[Label]): A Label or list of Label to remove.
Returns:
dict: An request status dict
"""
ids = as_id_collection(assets)
body = {}
if add_labels:
body['add'] = dict([(a, as_collection(add_labels)) for a in ids])
if remove_labels:
body['remove'] = dict([(a, as_collection(remove_labels)) for a in ids])
if not body:
raise ValueError("Must pass at least and add_labels or remove_labels argument")
return self.app.client.put("/api/v3/assets/_batch_update_labels", body)
def update_custom_fields(self, asset, values):
"""
Set the values of custom metadata fields.
Args:
asset (Asset): The asset or unique Asset id.
values (dict): A dictionary of values.
Returns:
dict: A status dictionary with failures or succcess
"""
body = {
"update": {
as_id(asset): values
}
}
return self.app.client.put("/api/v3/assets/_batch_update_custom_fields", body)
def batch_update_custom_fields(self, update):
"""
Set the values of custom metadata fields.
Examples:
{
"asset-id1": {"shoe": "nike"},
"asset-id2": {"country": "New Zealand"}
}
Args:
update (dict): A dict o dicts which describe the
Returns:
dict: A status dictionary with failures or success
"""
body = {
'update': update
}
return self.app.client.put('/api/v3/assets/_batch_update_custom_fields', body)
def download_file(self, stored_file, dst_file=None):
"""
Download given file and store results in memory, or optionally
a destination file. The stored_file ID can be specified as
either a string like "assets/<id>/proxy/image_450x360.jpg"
or a StoredFile instance can be used.
Args:
stored_file (mixed): The StoredFile instance or its ID.
dst_file (str): An optional destination file path.
Returns:
io.BytesIO instance containing the binary data or if
a destination path was provided the size of the
file is returned.
"""
if isinstance(stored_file, str):
path = stored_file
elif isinstance(stored_file, StoredFile):
path = stored_file.id
else:
raise ValueError("stored_file must be a string or StoredFile instance")
rsp = self.app.client.get("/api/v3/files/_stream/{}".format(path), is_json=False)
if dst_file:
with open(dst_file, 'wb') as fp:
fp.write(rsp.content)
return os.path.getsize(dst_file)
else:
return io.BytesIO(rsp.content)
def stream_file(self, stored_file, chunk_size=1024):
"""
Streams a file by iteratively returning chunks of the file using a generator. This
can be useful when developing web applications and a full download of the file
before continuing is not necessary.
Args:
stored_file (mixed): The StoredFile instance or its ID.
chunk_size (int): The byte sizes of each requesting chunk. Defaults to 1024.
Yields:
generator (File-like Object): Content of the file.
"""
if isinstance(stored_file, str):
path = stored_file
elif isinstance(stored_file, StoredFile):
path = stored_file.id
else:
raise ValueError("stored_file must be a string or StoredFile instance")
url = self.app.client.get_url('/api/v3/files/_stream/{}'.format(path))
response = requests.get(url, verify=self.app.client.verify,
headers=self.app.client.headers(), stream=True)
for block in response.iter_content(chunk_size):
yield block
def get_sim_hashes(self, images):
"""
Return a similarity hash for the given array of images.
Args:
images (mixed): Can be an file handle (opened with 'rb'), or
path to a file.
Returns:
list of str: A list of similarity hashes.
"""
return self.app.client.upload_files("/ml/v1/sim-hash",
as_collection(images), body=None)
def get_sim_query(self, images, min_score=0.75):
"""
Analyze the given image files and return a SimilarityQuery which
can be used in a search.
Args:
images (mixed): Can be an file handle (opened with 'rb'), or
path to a file.
min_score (float): A float between, the higher the value the more similar
the results. Defaults to 0.75
Returns:
SimilarityQuery: A configured SimilarityQuery
"""
return SimilarityQuery(self.get_sim_hashes(images), min_score)
"""
A named tuple to define a ReprocessSearchResponse
"""
ReprocessSearchResponse = namedtuple('ReprocessSearchResponse', ["asset_count", "job"])
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/asset_app.py
|
asset_app.py
|
from ..util import as_id, as_id_collection
from ..search import VideoClipSearchResult, VideoClipSearchScroller
from ..entity import VideoClip
class VideoClipApp:
"""
An App instance for managing Jobs. Jobs are containers for async processes
such as data import or training.
"""
def __init__(self, app):
self.app = app
def create_clip(self, asset, timeline, track, start, stop, content):
"""
Create a new clip. If a clip with the same metadata already exists it will
simply be replaced.
Args:
asset (Asset): The asset or its unique Id.
timeline (str): The timeline name for the clip.
track (str): The track name for the clip.
start (float): The starting point for the clip in seconds.
stop (float): The ending point for the clip in seconds.
content (str): The content of the clip.
Returns:
Clip: The clip that was created.
"""
body = {
"assetId": as_id(asset),
"timeline": timeline,
"track": track,
"start": start,
"stop": stop,
"content": content
}
return VideoClip(self.app.client.post('/api/v1/clips', body))
def create_clips(self, timeline):
"""
Batch create clips using a TimelineBuilder.
Args:
timeline: (TimelineBuilder): A timeline builder.
Returns:
dict: A status dictionary
"""
return self.app.client.post('/api/v1/clips/_timeline', timeline)
def get_webvtt(self,
asset,
dst_file=None,
timeline=None,
track=None,
content=None):
"""
Get all clip data as a WebVTT file and filter by specified options.
Args:
asset (Asset): The asset or unique Id.
timeline: (str): A timeline name or collection of timeline names.
track: (str): A track name or collection of track names.
content (str): A content string to match.
dst_file (mixed): An optional writable file handle or path to file.
Returns:
mixed: The text of the webvtt or the size of the written file.
"""
body = {
'assetId': as_id(asset),
'timelines': as_id_collection(timeline),
'tracks': as_id_collection(track),
'content': as_id_collection(content)
}
rsp = self.app.client.post('/api/v1/clips/_webvtt', body=body, is_json=False)
return self.__handle_webvtt(rsp, dst_file)
def scroll_search(self, search=None, timeout="1m"):
"""
Perform a VideoClip scrolled search using the ElasticSearch query DSL.
See Also:
For search/query format.
https://www.elastic.co/guide/en/elasticsearch/reference/6.4/search-request-body.html
Args:
search (dict): The ElasticSearch search to execute
timeout (str): The scroll timeout. Defaults to 1 minute.
Returns:
VideoClipSearchScroller - an VideoClipSearchScroller instance which can be used as
a generator for paging results.
"""
return VideoClipSearchScroller(self.app, search, timeout)
def search(self, search=None):
"""
Perform an VideoClip search using the ElasticSearch query DSL.
See Also:
For search/query format.
https://www.elastic.co/guide/en/elasticsearch/reference/6.4/search-request-body.html
Args:
search (dict): The ElasticSearch search to execute.
Returns:
VideoClipSearchResult - A VideoClipSearchResult instance.
"""
return VideoClipSearchResult(self.app, search)
def get_clip(self, id):
"""
Get a VideoClip by unique Id.
Args:
id (str): The VideoClip or its unique Id.
Returns:
VideoClip: The clip with the given Id.
"""
return VideoClip(self.app.client.get(f'api/v1/clips/{id}'))
def __handle_webvtt(self, rsp, dst_file):
"""
Handle a webvtt file response.
Args:
rsp (Response): A response from requests.
dst_file (mixed): An optional file path or file handle.
Returns:
(mixed): Return the content itself or the content size if written to file.
"""
if dst_file:
if isinstance(dst_file, str):
with open(dst_file, 'w') as fp:
fp.write(rsp.content.decode())
return len(rsp.content)
else:
dst_file.write(rsp.content.decode())
return len(rsp.content)
else:
return rsp.content.decode()
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/clip_app.py
|
clip_app.py
|
import logging
from ..entity import AnalysisModule
from ..util import as_collection, as_id
logger = logging.getLogger(__name__)
__all__ = [
'AnalysisModuleApp'
]
class AnalysisModuleApp:
"""
App class for querying Analysis Modules
"""
def __init__(self, app):
self.app = app
def get_analysis_module(self, id):
"""
Get an AnalysisModule by Id.
Args:
id (str): The AnalysisModule ID or a AnalysisModule instance.
Returns:
AnalysisModule: The matching AnalysisModule
"""
return AnalysisModule(self.app.client.get('/api/v1/pipeline-mods/{}'.format(as_id(id))))
def find_one_analysis_module(self, id=None, name=None, type=None, category=None, provider=None):
"""
Find a single AnalysisModule based on various properties.
Args:
id (str): The ID or list of Ids.
name (str): The model name or list of names.
type: (str): A AnalysisModule typ type or collection of types to filter on.
category (str): The category of AnalysisModuleule
provider (str): The provider of the AnalysisModuleule
Returns:
AnalysisModule: The matching AnalysisModule.
"""
body = {
'names': as_collection(name),
'ids': as_collection(id),
'types': as_collection(type),
'categories': as_collection(category),
'providers': as_collection(provider)
}
return AnalysisModule(self.app.client.post('/api/v1/pipeline-mods/_find_one', body))
def find_analysis_modules(self, keywords=None, id=None, name=None, type=None,
category=None, provider=None, limit=None, sort=None):
"""
Search for AnalysisModule.
Args:
keywords(str): Keywords that match various fields on a AnalysisModule
id (str): An ID or collection of IDs to filter on.
name (str): A name or collection of names to filter on.
type: (str): A AnalysisModule type type or collection of types to filter on.
category (str): The category or collection of category names.
provider (str): The provider or collection provider names.
limit: (int) Limit the number of results.
sort: (list): A sort array, example: ["time_created:desc"]
Returns:
generator: A generator which will return matching AnalysisModules when iterated.
"""
body = {
'keywords': str(keywords),
'names': as_collection(name),
'ids': as_collection(id),
'types': as_collection(type),
'categories': as_collection(category),
'providers': as_collection(provider),
'sort': sort
}
return self.app.client.iter_paged_results(
'/api/v1/pipeline-mods/_search', body, limit, AnalysisModule)
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/analysis_app.py
|
analysis_app.py
|
import base64
import logging
import os
from . import AssetApp, DataSourceApp, ProjectApp, \
JobApp, ModelApp, AnalysisModuleApp, VideoClipApp, CustomFieldApp
from ..client import ZmlpClient, DEFAULT_SERVER
logger = logging.getLogger(__name__)
class ZmlpApp(object):
"""
Exposes the main ZMLP API.
"""
def __init__(self, apikey, server=None):
"""
Initialize a ZMLP Application instance.
Args:
apikey (mixed): An API key, can be either a key or file handle.
server (str): The URL to the ZMLP API server, defaults cloud api.
"""
logger.debug("Initializing ZMLP to {}".format(server))
self.client = ZmlpClient(apikey, server or
os.environ.get("ZMLP_SERVER", DEFAULT_SERVER))
self.assets = AssetApp(self)
self.datasource = DataSourceApp(self)
self.projects = ProjectApp(self)
self.jobs = JobApp(self)
self.models = ModelApp(self)
self.analysis = AnalysisModuleApp(self)
self.clips = VideoClipApp(self)
self.fields = CustomFieldApp(self)
def app_from_env():
"""
Create a ZmlpApp configured via environment variables. This method
will not throw if the environment is configured improperly, however
attempting the use the ZmlpApp instance to make a request
will fail.
- ZMLP_APIKEY : A base64 encoded API key.
- ZMLPL_APIKEY_FILE : A path to a JSON formatted API key.
- ZMLP_SERVER : The URL to the ZMLP API server.
Returns:
ZmlpClient : A configured ZmlpClient
"""
apikey = None
if 'ZMLP_APIKEY' in os.environ:
apikey = os.environ['ZMLP_APIKEY']
elif 'ZMLP_APIKEY_FILE' in os.environ:
with open(os.environ['ZMLP_APIKEY_FILE'], 'rb') as fp:
apikey = base64.b64encode(fp.read())
return ZmlpApp(apikey, os.environ.get('ZMLP_SERVER'))
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/zmlp_app.py
|
zmlp_app.py
|
# flake8: noqa
from .asset_app import AssetApp
from .datasource_app import DataSourceApp
from .project_app import ProjectApp
from .job_app import JobApp
from .model_app import ModelApp
from .analysis_app import AnalysisModuleApp
from .clip_app import VideoClipApp
from .field_app import CustomFieldApp
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/__init__.py
|
__init__.py
|
from ..entity import Job, Task, TaskError
from ..util import as_collection, as_id_collection, as_id
class JobApp:
"""
An App instance for managing Jobs. Jobs are containers for async processes
such as data import or training.
"""
def __init__(self, app):
self.app = app
def get_job(self, id):
"""
Get a Job by its unique Id.
Args:
id (str): The Job id or Job object.
Returns:
Job: The Job
"""
return Job(self.app.client.get('/api/v1/jobs/{}'.format(as_id(id))))
def refresh_job(self, job):
"""
Refreshes the internals of the given job.
Args:
job (Job): The job to refresh.
"""
job._data = self.app.client.get('/api/v1/jobs/{}'.format(job.id))
def find_jobs(self, id=None, state=None, name=None, limit=None, sort=None):
"""
Find jobs matching the given criteria.
Args:
id (mixed): A job ID or IDs to filter on.
state (mixed): A Job state or list of states to filter on.
name (mixed): A Job name or list of names to filter on.
limit (int): The maximum number of jobs to return, None is no limit.
sort (list): A list of sort ordering phrases, like ["name:d", "time_created:a"]
Returns:
generator: A generator which will return matching jobs when iterated.
"""
body = {
'ids': as_collection(id),
'states': as_collection(state),
'names': as_collection(name),
'sort': sort
}
return self.app.client.iter_paged_results('/api/v1/jobs/_search', body, limit, Job)
def find_one_job(self, id=None, state=None, name=None):
"""
Find single Job matching the given criteria. Raises exception if more
than one result is found.
Args:
id (mixed): A job ID or IDs to filter on.
state (mixed): A Job state or list of states to filter on.
name (mixed): A Job name or list of names to filter on.
sort (list): A list of sort ordering phrases, like ["name:d", "time_created:a"]
Returns:
Job: The job.
"""
body = {
'ids': as_collection(id),
'states': as_collection(state),
'names': as_collection(name)
}
return Job(self.app.client.post('/api/v1/jobs/_findOne', body))
def find_task_errors(self, query=None, job=None, task=None,
asset=None, path=None, processor=None, limit=None, sort=None):
"""
Find TaskErrors based on the supplied criterion.
Args:
query (str): keyword query to match various error properties.
job (mixed): A single Job, job id or list of either type.
task (mixed): A single Task, task id or list of either type.
asset (mixed): A single Asset, asset id or list of either type.
path (mixed): A file path or list of file path.
processor (mixed): A processor name or list of processors.
limit (int): Limit the number of results or None for all results.
sort (list): A list of sort ordering phrases, like ["name:d", "time_created:a"]
Returns:
generator: A generator which returns results when iterated.
"""
body = {
'keywords': query,
'jobIds': as_id_collection(job),
'taskIds': as_id_collection(task),
'assetIds': as_id_collection(asset),
'paths': as_collection(path),
'processor': as_collection(processor),
'sort': sort
}
return self.app.client.iter_paged_results(
'/api/v1/taskerrors/_search', body, limit, TaskError)
def pause_job(self, job):
"""
Pause scheduling for the given Job. Pausing a job simply removes the
job from scheduler consideration. All existing tasks will continue to run
and Analysts will move to new jobs as tasks complete.
Args:
job (Job): The Job to pause
Returns:
bool: True if the job was actually paused.
"""
# Resolve the job if we need to.
if isinstance(job, str):
job = self.get_job(job)
if self.app.client.put('/api/v1/jobs/{}'.format(job.id), job._data)['success']:
job._data['paused'] = True
return True
return False
def resume_job(self, job):
"""
Resume scheduling for the given Job.
Args:
job (Job): The Job to resume
Returns:
bool: True of the job was actually resumed.
"""
if isinstance(job, str):
job = self.get_job(job)
if self.app.client.put('/api/v1/jobs/{}'.format(job.id), job._data)['success']:
job._data['paused'] = False
return True
return False
def cancel_job(self, job):
"""
Cancel the given Job. Canceling a job immediately kills all running Tasks
and removes the job from scheduler consideration.
Args:
job (Job): The Job to cancel, or the job's unique Id.
Returns:
bool: True if the job was actually canceled, False if the job was already cancelled.
"""
if isinstance(job, str):
job = self.get_job(job)
if self.app.client.put('/api/v1/jobs/{}/_cancel'.format(job.id)).get('success'):
self.refresh_job(job)
return True
return False
def restart_job(self, job):
"""
Restart a canceled job.
Args:
job (Job): The Job to restart
Returns:
bool: True if the job was actually restarted, false if the job was not cancelled.
"""
if isinstance(job, str):
job = self.get_job(job)
if self.app.client.put('/api/v1/jobs/{}/_restart'.format(job.id)).get('success'):
self.refresh_job(job)
return True
return False
def retry_all_failed_tasks(self, job):
"""
Retry all failed Tasks in the Job.
Args:
job (Job): The Job with failed tasks.
Returns:
bool: True if the some failed tasks were restarted.
"""
if isinstance(job, str):
job = self.get_job(job)
if self.app.client.put(
'/api/v1/jobs/{}/_retryAllFailures'.format(job.id)).get('success'):
self.refresh_job(job)
return True
return False
def find_tasks(self, job=None, id=None, name=None, state=None, limit=None, sort=None):
"""
Find Tasks matching the given criteria.
Args:
job: (mixed): A single Job, job id or list of either type.
id (mixed): A single Task, task id or list of either type.
name (mixed): A task name or list of tasks names.
state (mixed): A take state or list of task states.
limit (int): Limit the number of results, None for no limit.
sort (list): A list of sort ordering phrases, like ["name:d", "time_created:a"]
Returns:
generator: A Generator that returns matching Tasks when iterated.
"""
body = {
'ids': as_collection(id),
'states': as_collection(state),
'names': as_collection(name),
'jobIds': as_id_collection(job),
'sort': sort
}
return self.app.client.iter_paged_results('/api/v1/tasks/_search', body, limit, Task)
def find_one_task(self, job=None, id=None, name=None, state=None):
"""
Find a single task matching the criterion.
Args:
job: (mixed): A single Job, job id or list of either type.
id (mixed): A single Task, task id or list of either type.
name (mixed): A task name or list of tasks names.
state (mixed): A take state or list of task states.
Returns:
Task A single matching task.
"""
body = {
'ids': as_collection(id),
'states': as_collection(state),
'names': as_collection(name),
'jobIds': as_id_collection(job)
}
res = Task(self.app.client.post('/api/v1/tasks/_findOne', body))
return res
def get_task(self, task):
"""
Get a Task by its unique id.
Args:
task (str): The Task or task id.
Returns:
Task: The Task
"""
return Task(self.app.client.get('/api/v1/tasks/{}'.format(as_id(task))))
def refresh_task(self, task):
"""
Refreshes the internals of the given job.
Args:
task (Task): The Task
"""
task._data = self.app.client.get('/api/v1/tasks/{}'.format(task.id))
def skip_task(self, task):
"""
Skip the given task. A skipped task wilk not run.
Args:
task (str): The Task or task id.
Returns:
bool: True if the Task changed to the Skipped state.
"""
if isinstance(task, str):
task = self.get_task(task)
if self.app.client.put('/api/v1/tasks/{}/_skip'.format(task.id))['success']:
self.refresh_task(task)
return True
return False
def retry_task(self, task):
"""
Retry the given task. Retried tasks are set back to the waiting state.
Args:
task (str): The Task or task id.
Returns:
bool: True if the Task changed to the Waiting state.
"""
if isinstance(task, str):
task = self.get_task(task)
if self.app.client.put('/api/v1/tasks/{}/_retry'.format(task.id))['success']:
self.refresh_task(task)
return True
return False
def get_task_script(self, task):
"""
Return the given task's ZPS script.
Args:
task: (str): The Task or task id.
Returns:
dict: The script in dictionary form.
"""
return self.app.client.get('/api/v1/tasks/{}/_script'.format(as_id(task)))
def download_task_log(self, task, dst_path):
"""
Download the task log file to the given file path.
Args:
task: (str): The Task or task id.
dst_path (str): The path to the destination file.
Returns:
dict: The script in dictionary form.
"""
return self.app.client.stream('/api/v1/tasks/{}/_log'.format(as_id(task)), dst_path)
def iterate_task_log(self, task):
"""
Return a generator that can be used to iterate a task log file.
Args:
task: (str): The Task or task id.
Returns:
generator: A generator which yields each line of a log file.
"""
return self.app.client.stream_text('/api/v1/tasks/{}/_log'.format(as_id(task)))
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/job_app.py
|
job_app.py
|
from ..entity import Project
class ProjectApp(object):
def __init__(self, app):
self.app = app
def get_project(self):
"""
Return the current API Key's assigned project.
Returns:
Project
"""
return Project(self.app.client.get("/api/v1/project"))
|
zvi-client
|
/zvi-client-1.1.3.tar.gz/zvi-client-1.1.3/pylib/zmlp/app/project_app.py
|
project_app.py
|
#!/usr/bin/env python3
from setuptools import setup
from datetime import datetime
# See https://packaging.python.org/tutorials/packaging-projects/
# for details about packaging python projects
# Generating distribution archives (run from same directory as this file)
# python3 -m pip install --user --upgrade setuptools wheel
# python3 setup.py sdist bdist_wheel
requirements = [
'cmake',
'zvi-client',
'pandas',
'matplotlib',
'opencv-python',
'Pillow',
'ipython',
'opencv_python',
'scikit-learn',
'bokeh',
'holoviews',
'MulticoreTSNE'
]
setup(
name='zvi',
version='1.0.0',
description='Zorroa Visual Intelligence ML Environment',
url='https://www.zorroa.com',
license='Apache2',
package_dir={'': 'pylib'},
packages=['zvi'],
scripts=[],
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: OS Independent'
],
include_package_data=True,
install_requires=requirements
)
|
zvi
|
/zvi-1.0.0.tar.gz/zvi-1.0.0/setup.py
|
setup.py
|
MIT License
Copyright (c) 2018-2022 Olexa Bilaniuk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
zvit
|
/zvit-0.0.11+8ef0d2e7db5e6bb76f8186088141a78692691e7b.tar.gz/zvit-0.0.11/LICENSE.md
|
LICENSE.md
|
[](https://pypi.python.org/pypi/zvit)
# Звіт / Zvit
|
zvit
|
/zvit-0.0.11+8ef0d2e7db5e6bb76f8186088141a78692691e7b.tar.gz/zvit-0.0.11/README.md
|
README.md
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from scripts import setup
|
zvit
|
/zvit-0.0.11+8ef0d2e7db5e6bb76f8186088141a78692691e7b.tar.gz/zvit-0.0.11/setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, re, sys, subprocess, time
from . import git
#
# Public Version.
#
# This is the master declaration of the version number for this project.
#
# We will obey PEP 440 (https://www.python.org/dev/peps/pep-0440/) here. PEP440
# recommends the pattern
# [N!]N(.N)*[{a|b|rc}N][.postN][.devN]
# We shall standardize on the ultracompact form
# [N!]N(.N)*[{a|b|rc}N][-N][.devN]
# which has a well-defined normalization.
#
verPublic = "0.0.11"
#
# Information computed from the public version.
#
regexMatch = re.match(r"""(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
(?P<preL>a|b|rc)
(?P<preN>[0-9]+)
)?
(?P<post> # post release
(?:-(?P<postN>[0-9]+))
)?
(?P<dev> # dev release
(?:\.dev(?P<devN>[0-9]+))
)?
)""", verPublic, re.X)
assert regexMatch
verEpoch = regexMatch.group("epoch") or ""
verRelease = regexMatch.group("release")
verPreRel = regexMatch.group("pre") or ""
verPostRel = regexMatch.group("post") or ""
verDevRel = regexMatch.group("dev") or ""
verNormal = verRelease+verPreRel+verPostRel+verDevRel
verIsRel = bool(not verPreRel and not verDevRel)
#
# Local Version.
#
# Uses POSIX time (Nominal build time as seconds since the Epoch) as obtained
# either from the environment variable SOURCE_DATE_EPOCH or the wallclock time.
# Also converts POSIX timestamp to ISO 8601.
#
verVCS = git.getGitVer()
verClean = bool((not verVCS) or (git.isGitClean()))
posixTime = int(os.environ.get("SOURCE_DATE_EPOCH", time.time()))
iso8601Time= time.strftime("%Y%m%dT%H%M%SZ", time.gmtime(posixTime))
verLocal = verPublic+"+"+iso8601Time
if verVCS:
verLocal += "."+verVCS
if not verClean:
verLocal += ".dirty"
#
# SemVer Version.
#
# Obeys Semantic Versioning 2.0.0, found at
# https://semver.org/spec/v2.0.0.html
#
verSemVer = ".".join((verRelease+".0.0").split(".")[:3])
identifiers= []
if verPreRel: identifiers.append(verPreRel)
if verDevRel: identifiers.append(verDevRel[1:])
if identifiers:
verSemVer += "-" + ".".join(identifiers)
metadata = []
if regexMatch.group("postN"):
metadata.append("post")
metadata.append(regexMatch.group("postN"))
metadata.append("buildtime")
metadata.append(iso8601Time)
if verVCS:
metadata.append("git")
metadata.append(verVCS)
if not verClean:
metadata.append("dirty")
if metadata:
verSemVer += "+" + ".".join(metadata)
#
# Version utilities
#
def synthesizeVersionPy():
templatePath = os.path.join(git.getSrcRoot(),
"scripts",
"version.py.in")
with open(templatePath, "r") as f:
return f.read().format(**globals())
|
zvit
|
/zvit-0.0.11+8ef0d2e7db5e6bb76f8186088141a78692691e7b.tar.gz/zvit-0.0.11/scripts/versioning.py
|
versioning.py
|
# -*- coding: utf-8 -*-
|
zvit
|
/zvit-0.0.11+8ef0d2e7db5e6bb76f8186088141a78692691e7b.tar.gz/zvit-0.0.11/scripts/utils.py
|
utils.py
|
# -*- coding: utf-8 -*-
#
# Imports
#
import os, subprocess
# Useful constants
EMPTYTREE_SHA1 = "4b825dc642cb6eb9a060e54bf8d69288fbee4904"
ORIGINAL_ENV = os.environ.copy()
C_ENV = os.environ.copy()
C_ENV['LANGUAGE'] = C_ENV['LANG'] = C_ENV['LC_ALL'] = "C"
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
SRCROOT_PATH = None
GIT_VER = None
GIT_CLEAN = None
#
# Utility functions
#
def invoke(command,
cwd = SCRIPT_PATH,
env = C_ENV,
stdin = subprocess.DEVNULL,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
**kwargs):
return subprocess.Popen(
command,
stdin = stdin,
stdout = stdout,
stderr = stderr,
cwd = cwd,
env = env,
**kwargs
)
def getSrcRoot():
#
# Return the cached value if we know it.
#
global SRCROOT_PATH
if SRCROOT_PATH is not None:
return SRCROOT_PATH
#
# Our initial guess is `dirname(dirname(__file__))`.
#
root = os.path.dirname(SCRIPT_PATH)
try:
inv = invoke(["git", "rev-parse", "--show-toplevel"],
universal_newlines = True,)
streamOut, streamErr = inv.communicate()
if inv.returncode == 0:
root = streamOut[:-1]
except FileNotFoundError as err:
pass
finally:
SRCROOT_PATH = root
return root
def getGitVer():
#
# Return the cached value if we know it.
#
global GIT_VER
if GIT_VER is not None:
return GIT_VER
try:
gitVer = ""
inv = invoke(["git", "rev-parse", "HEAD"],
universal_newlines = True,)
streamOut, streamErr = inv.communicate()
if inv.returncode == 0 or inv.returncode == 128:
gitVer = streamOut[:-1]
except FileNotFoundError as err:
pass
finally:
if gitVer == "HEAD":
GIT_VER = EMPTYTREE_SHA1
else:
GIT_VER = gitVer
return GIT_VER
def isGitClean():
#
# Return the cached value if we know it.
#
global GIT_CLEAN
if GIT_CLEAN is not None:
return GIT_CLEAN
try:
gitVer = None
inv_nc = invoke(["git", "diff", "--quiet"],
stdout = subprocess.DEVNULL,
stderr = subprocess.DEVNULL,)
inv_c = invoke(["git", "diff", "--quiet", "--cached"],
stdout = subprocess.DEVNULL,
stderr = subprocess.DEVNULL,)
inv_nc = inv_nc.wait()
inv_c = inv_c .wait()
GIT_CLEAN = (inv_nc == 0) and (inv_c == 0)
except FileNotFoundError as err:
#
# If we don't have access to Git, assume it's a tarball, in which case
# it's always clean.
#
GIT_CLEAN = True
return GIT_CLEAN
|
zvit
|
/zvit-0.0.11+8ef0d2e7db5e6bb76f8186088141a78692691e7b.tar.gz/zvit-0.0.11/scripts/git.py
|
git.py
|
# -*- coding: utf-8 -*-
#
# Imports
#
import os, sys, subprocess, time
from setuptools import setup, find_packages, Extension
packageName = "zvit"
githubURL = "https://github.com/obilaniu/Zvit"
#
# Restrict to Python 3.4+
#
if sys.version_info[:2] < (3, 4):
sys.stdout.write(packageName+" is Python 3.4+ only!\n")
sys.exit(1)
#
# Retrieve setup scripts
#
from . import git, versioning, utils
#
# Read long description
#
with open(os.path.join(git.getSrcRoot(),
"scripts",
"LONG_DESCRIPTION.txt"), "r") as f:
long_description = f.read()
#
# Synthesize version.py file
#
with open(os.path.join(git.getSrcRoot(),
"src",
packageName,
"version.py"), "w") as f:
f.write(versioning.synthesizeVersionPy())
#
# Perform setup.
#
setup(
name = packageName,
version = versioning.verPublic,
author = "Olexa Bilaniuk",
author_email = "[email protected]",
license = "MIT",
url = githubURL,
download_url = githubURL+"/archive/v{}.tar.gz".format(versioning.verRelease),
description = "A standalone, lightweight logging package that writes "
"TensorFlow tfevents files compatible with TensorBoard.",
long_description = long_description,
classifiers = [
"Development Status :: 1 - Planning",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Logging",
"Topic :: Utilities",
],
python_requires = '>=3.4',
install_requires = [
"nauka>=0.0.8",
"numpy>=1.10",
"Pillow>=4.0.0",
],
packages = find_packages("src"),
package_dir = {'': 'src'},
ext_modules = [
Extension("zvit.pb.crc_native",
[os.path.join("src", "zvit", "pb", "crc_native.c")],)
],
entry_points = {
"console_scripts": [
"zvit = zvit.__main__:main",
]
},
zip_safe = False,
)
|
zvit
|
/zvit-0.0.11+8ef0d2e7db5e6bb76f8186088141a78692691e7b.tar.gz/zvit-0.0.11/scripts/setup.py
|
setup.py
|
import unittest
from zvolv_sdk import divide_by_three
class TestDivideByThree(unittest.TestCase):
def test_divide_by_three(self):
self.assertEqual(divide_by_three(12), 4)
unittest.main()
|
zvolv-sdk
|
/zvolv_sdk-0.0.2-py3-none-any.whl/tests/test_divide_by_three.py
|
test_divide_by_three.py
|
def divide_by_three(num):
return num / 3
|
zvolv-sdk
|
/zvolv_sdk-0.0.2-py3-none-any.whl/zvolv_sdk/divide_by_three.py
|
divide_by_three.py
|
"""Check the latest version at https://pypi.org/project/zvolv-sdk/"""
__version__ = "0.0.2"
|
zvolv-sdk
|
/zvolv_sdk-0.0.2-py3-none-any.whl/zvolv_sdk/version.py
|
version.py
|
[](https://github.com/zvtvz/zvt-ccxt)
[](https://pypi.org/project/zvt-ccxt/)
[](https://pypi.org/project/zvt-ccxt/)
[](https://pypi.org/project/zvt-ccxt/)
[](https://travis-ci.org/zvtvz/zvt-ccxt)
[](http://hits.dwyl.io/zvtvz/zvt-ccxt)
## How to use
### 1.1 install
```
pip install zvt-ccxt
pip show zvt-ccxt
```
make sure use the latest version
```
pip install --upgrade zvt-ccxt
```
### 1.2 use in zvt way
```
In [1]: from zvt_ccxt.domain import *
In [2]: Coin
Out[2]: zvt_ccxt.domain.coin_meta.Coin
In [3]: Coin.record_data()
Coin registered recorders:{'ccxt': <class 'zvt_ccxt.recorders.coin_recorder.CoinMetaRecorder'>}
2020-07-17 23:26:38,730 INFO MainThread init_markets for binance success
2020-07-17 23:26:40,941 INFO MainThread init_markets for huobipro success
In [4]: Coin.query_data()
Out[4]:
id entity_id timestamp entity_type exchange code name
0 coin_binance_BTC/USDT coin_binance_BTC/USDT None coin binance BTC/USDT BTC/USDT
1 coin_binance_ETH/USDT coin_binance_ETH/USDT None coin binance ETH/USDT ETH/USDT
2 coin_binance_EOS/USDT coin_binance_EOS/USDT None coin binance EOS/USDT EOS/USDT
3 coin_huobipro_BTC/USDT coin_huobipro_BTC/USDT None coin huobipro BTC/USDT BTC/USDT
4 coin_huobipro_ETH/USDT coin_huobipro_ETH/USDT None coin huobipro ETH/USDT ETH/USDT
5 coin_huobipro_EOS/USDT coin_huobipro_EOS/USDT None coin huobipro EOS/USDT EOS/USDT
In [2]: Coin1dKdata.record_data()
In [4]: Coin1dKdata.query_data(codes=['BTC/USDT'])
Out[4]:
id entity_id timestamp provider code name level open close high low volume turnover
0 coin_binance_BTC/USDT_2017-10-22 coin_binance_BTC/USDT 2017-10-22 ccxt BTC/USDT BTC/USDT 1d 6003.27 5950.02 6060.00 5720.03 1362.092216 None
1 coin_binance_BTC/USDT_2017-10-23 coin_binance_BTC/USDT 2017-10-23 ccxt BTC/USDT BTC/USDT 1d 5975.00 5915.93 6080.00 5621.03 1812.557715 None
2 coin_binance_BTC/USDT_2017-10-24 coin_binance_BTC/USDT 2017-10-24 ccxt BTC/USDT BTC/USDT 1d 5909.47 5477.03 5925.00 5450.00 2580.418767 None
3 coin_binance_BTC/USDT_2017-10-25 coin_binance_BTC/USDT 2017-10-25 ccxt BTC/USDT BTC/USDT 1d 5506.92 5689.99 5704.96 5286.98 2282.813205 None
4 coin_binance_BTC/USDT_2017-10-26 coin_binance_BTC/USDT 2017-10-26 ccxt BTC/USDT BTC/USDT 1d 5670.10 5861.77 5939.99 5650.00 1972.965882 None
.. ... ... ... ... ... ... ... ... ... ... ... ... ...
995 coin_binance_BTC/USDT_2020-07-13 coin_binance_BTC/USDT 2020-07-13 ccxt BTC/USDT BTC/USDT 1d 9303.31 9242.62 9343.82 9200.89 42740.069115 None
996 coin_binance_BTC/USDT_2020-07-14 coin_binance_BTC/USDT 2020-07-14 ccxt BTC/USDT BTC/USDT 1d 9242.61 9255.85 9279.54 9113.00 45772.552509 None
997 coin_binance_BTC/USDT_2020-07-15 coin_binance_BTC/USDT 2020-07-15 ccxt BTC/USDT BTC/USDT 1d 9255.85 9197.60 9276.49 9160.57 39053.579665 None
998 coin_binance_BTC/USDT_2020-07-16 coin_binance_BTC/USDT 2020-07-16 ccxt BTC/USDT BTC/USDT 1d 9197.60 9133.72 9226.15 9047.25 43375.571191 None
999 coin_binance_BTC/USDT_2020-07-17 coin_binance_BTC/USDT 2020-07-17 ccxt BTC/USDT BTC/USDT 1d 9133.72 9157.72 9186.83 9089.81 21075.560207 None
[1000 rows x 13 columns]
```
## 💌请作者喝杯咖啡
如果你觉得项目对你有帮助,可以请作者喝杯咖啡
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/alipay-cn.png" width="25%" alt="Alipay">
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/wechat-cn.png" width="25%" alt="Wechat">
## 🤝联系方式
个人微信:foolcage 添加暗号:zvt-ccxt
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/wechat.jpeg" width="25%" alt="Wechat">
------
微信公众号:
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/gongzhonghao.jpg" width="25%" alt="Wechat">
知乎专栏:
https://zhuanlan.zhihu.com/automoney
## Thanks
<p><a href=https://www.jetbrains.com/?from=zvt><img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/jetbrains.png" width="25%" alt="jetbrains"></a></p>
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/README.md
|
README.md
|
# -*- coding: utf-8 -*-
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
try:
# for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
# for pip <= 9.0.3
from pip.req import parse_requirements
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
install_reqs = parse_requirements("requirements.txt", session=False)
try:
requirements = [str(ir.req) for ir in install_reqs]
except:
requirements = [str(ir.requirement) for ir in install_reqs]
setup(
name="zvt-ccxt",
version='0.0.6',
description='ccxt cryptocurrency plugin for zvt ',
install_requires=requirements,
entry_points={"zvt": ["ccxt = zvt_ccxt"]},
py_modules=["zvt_ccxt"],
packages=find_packages(),
package_data={
'zvt_ccxt.accounts': ['*.json']
},
long_description=long_description,
url='https://github.com/zvtvz/zvt-ccxt',
author='foolcage',
author_email='[email protected]',
classifiers=[ # Optional
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Topic :: Software Development :: Build Tools',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='quant zvt cryptocurrency coin btc eos',
project_urls={ # Optional
'Bug Reports': 'https://github.com/zvtvz/zvt-ccxt/issues',
'Funding': 'https://github.com/zvtvz/zvt-ccxt',
'Say Thanks!': 'https://saythanks.io/to/foolcage',
'Source': 'https://github.com/zvtvz/zvt-ccxt',
},
include_package_data=True,
long_description_content_type="text/markdown",
)
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
# ****** setting for crypto currency ****** #
COIN_EXCHANGES = ["binance", "huobipro"]
# COIN_BASE = ["BTC", "ETH", "XRP", "BCH", "EOS", "LTC", "XLM", "ADA", "IOTA", "TRX", "NEO", "DASH", "XMR",
# "BNB", "ETC", "QTUM", "ONT"]
COIN_BASE = ["BTC", "ETH", "EOS"]
COIN_PAIRS = [("{}/{}".format(item, "USDT")) for item in COIN_BASE] + \
[("{}/{}".format(item, "USD")) for item in COIN_BASE]
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/settings.py
|
settings.py
|
# -*- coding: utf-8 -*-
import zvt
from zvt_ccxt.accounts import CCXTAccount
@zvt.hookimpl
def zvt_setup_env(config: dict):
return "zvt_ccxt", CCXTAccount.exchange_conf
from zvt_ccxt.domain import *
from zvt_ccxt.recorders import *
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/__init__.py
|
__init__.py
|
from zvt.domain import KdataCommon, TickCommon
class CoinKdataCommon(KdataCommon):
pass
class CoinTickCommon(TickCommon):
pass
from .coin_meta import *
from .quotes import *
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/__init__.py
|
__init__.py
|
# 数字货币
import pandas as pd
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract import EntityMixin
from zvt.contract.register import register_entity, register_schema
CoinMetaBase = declarative_base()
@register_entity(entity_type='coin')
class Coin(CoinMetaBase, EntityMixin):
__tablename__ = 'coin'
@classmethod
def get_trading_dates(cls, start_date=None, end_date=None):
return pd.date_range(start_date, end_date, freq='D')
@classmethod
def could_short(cls):
return True
@classmethod
def get_trading_t(cls):
return 0
@classmethod
def get_trading_intervals(cls):
return [('00:00,23:59')]
register_schema(providers=['ccxt'], db_name='coin', schema_base=CoinMetaBase)
__all__ = ['Coin']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/coin_meta.py
|
coin_meta.py
|
from zvt.contract import IntervalLevel
from zvt.domain.quotes.gen_kdata_schema import gen_kdata_schema
if __name__ == '__main__':
gen_kdata_schema(pkg='zvt_ccxt', providers=['ccxt'], entity_type='coin',
levels=[level for level in IntervalLevel])
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/gen_kdata_schema.py
|
gen_kdata_schema.py
|
from zvt.domain import KdataCommon, TickCommon
class CoinKdataCommon(KdataCommon):
pass
class CoinTickCommon(TickCommon):
pass
from .coin import *
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin1monKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_1mon_kdata'
register_schema(providers=['ccxt'], db_name='coin_1mon_kdata', schema_base=KdataBase)
__all__ = ['Coin1monKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_1mon_kdata.py
|
coin_1mon_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin1wkKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_1wk_kdata'
register_schema(providers=['ccxt'], db_name='coin_1wk_kdata', schema_base=KdataBase)
__all__ = ['Coin1wkKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_1wk_kdata.py
|
coin_1wk_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin4hKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_4h_kdata'
register_schema(providers=['ccxt'], db_name='coin_4h_kdata', schema_base=KdataBase)
__all__ = ['Coin4hKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_4h_kdata.py
|
coin_4h_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin1hKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_1h_kdata'
register_schema(providers=['ccxt'], db_name='coin_1h_kdata', schema_base=KdataBase)
__all__ = ['Coin1hKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_1h_kdata.py
|
coin_1h_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin5mKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_5m_kdata'
register_schema(providers=['ccxt'], db_name='coin_5m_kdata', schema_base=KdataBase)
__all__ = ['Coin5mKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_5m_kdata.py
|
coin_5m_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin1mKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_1m_kdata'
register_schema(providers=['ccxt'], db_name='coin_1m_kdata', schema_base=KdataBase)
__all__ = ['Coin1mKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_1m_kdata.py
|
coin_1m_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinTickCommon
KdataBase = declarative_base()
class CoinTickKdata(KdataBase, CoinTickCommon):
__tablename__ = 'coin_tick_kdata'
register_schema(providers=['ccxt'], db_name='coin_tick_kdata', schema_base=KdataBase)
__all__ = ['CoinTickKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_tick_kdata.py
|
coin_tick_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin15mKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_15m_kdata'
register_schema(providers=['ccxt'], db_name='coin_15m_kdata', schema_base=KdataBase)
__all__ = ['Coin15mKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_15m_kdata.py
|
coin_15m_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from zvt_ccxt.domain.quotes.coin.coin_tick_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_1m_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_5m_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_15m_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_30m_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_1h_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_4h_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_1d_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_1wk_kdata import *
from zvt_ccxt.domain.quotes.coin.coin_1mon_kdata import *
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin1dKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_1d_kdata'
register_schema(providers=['ccxt'], db_name='coin_1d_kdata', schema_base=KdataBase)
__all__ = ['Coin1dKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_1d_kdata.py
|
coin_1d_kdata.py
|
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.contract.register import register_schema
from zvt_ccxt.domain.quotes import CoinKdataCommon
KdataBase = declarative_base()
class Coin30mKdata(KdataBase, CoinKdataCommon):
__tablename__ = 'coin_30m_kdata'
register_schema(providers=['ccxt'], db_name='coin_30m_kdata', schema_base=KdataBase)
__all__ = ['Coin30mKdata']
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/domain/quotes/coin/coin_30m_kdata.py
|
coin_30m_kdata.py
|
import pandas as pd
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt_ccxt.accounts import CCXTAccount
from zvt_ccxt.domain import Coin
from zvt_ccxt.settings import COIN_EXCHANGES, COIN_PAIRS
class CoinMetaRecorder(Recorder):
provider = 'ccxt'
data_schema = Coin
def __init__(self, batch_size=10, force_update=False, sleeping_time=10, exchanges=COIN_EXCHANGES) -> None:
super().__init__(batch_size, force_update, sleeping_time)
self.exchanges = exchanges
def run(self):
for exchange_str in self.exchanges:
exchange = CCXTAccount.get_ccxt_exchange(exchange_str)
try:
markets = exchange.fetch_markets()
df = pd.DataFrame()
# markets有些为key=symbol的dict,有些为list
markets_type = type(markets)
if markets_type != dict and markets_type != list:
self.logger.exception("unknown return markets type {}".format(markets_type))
return
aa = []
for market in markets:
if markets_type == dict:
name = market
code = market
if markets_type == list:
code = market['symbol']
name = market['symbol']
if name not in COIN_PAIRS:
continue
aa.append(market)
security_item = {
'id': '{}_{}_{}'.format('coin', exchange_str, code),
'entity_id': '{}_{}_{}'.format('coin', exchange_str, code),
'exchange': exchange_str,
'entity_type': 'coin',
'code': code,
'name': name
}
df = df.append(security_item, ignore_index=True)
# 存储该交易所的数字货币列表
if not df.empty:
df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=True)
self.logger.info("init_markets for {} success".format(exchange_str))
except Exception as e:
self.logger.exception(f"init_markets for {exchange_str} failed", e)
__all__ = ["CoinMetaRecorder"]
if __name__ == '__main__':
CoinMetaRecorder().run()
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/recorders/coin_recorder.py
|
coin_recorder.py
|
# -*- coding: utf-8 -*-
import argparse
from zvt import init_log
from zvt.api import get_kdata_schema, generate_kdata_id
from zvt.contract import IntervalLevel
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.utils.time_utils import to_pd_timestamp
from zvt_ccxt.accounts import CCXTAccount
from zvt_ccxt.domain import Coin, CoinTickCommon
from zvt_ccxt.settings import COIN_EXCHANGES, COIN_PAIRS
class CoinTickRecorder(FixedCycleDataRecorder):
provider = 'ccxt'
entity_provider = 'ccxt'
entity_schema = Coin
# 只是为了把recorder注册到data_schema
data_schema = CoinTickCommon
def __init__(self,
exchanges=['binance'],
entity_ids=None,
codes=None,
batch_size=10,
force_update=True,
sleeping_time=10,
default_size=2000,
real_time=True,
fix_duplicate_way='ignore',
start_timestamp=None,
end_timestamp=None,
kdata_use_begin_time=False,
close_hour=None,
close_minute=None,
level=IntervalLevel.LEVEL_TICK,
one_day_trading_minutes=24 * 60) -> None:
self.data_schema = get_kdata_schema(entity_type='coin', level=level)
super().__init__('coin', exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute, IntervalLevel.LEVEL_TICK, kdata_use_begin_time, one_day_trading_minutes)
def generate_domain_id(self, entity, original_data):
return generate_kdata_id(entity_id=entity.id, timestamp=original_data['timestamp'], level=self.level)
def record(self, entity, start, end, size, timestamps):
if size < 20:
size = 20
ccxt_exchange = CCXTAccount.get_ccxt_exchange(entity.exchange)
if ccxt_exchange.has['fetchTrades']:
limit = CCXTAccount.get_tick_limit(entity.exchange)
limit = min(size, limit)
kdata_list = []
trades = ccxt_exchange.fetch_trades(entity.code, limit=limit)
for trade in trades:
kdata_json = {
'name': entity.name,
'provider': 'ccxt',
# 'id': trade['id'],
'level': 'tick',
'order': trade['order'],
'timestamp': to_pd_timestamp(trade['timestamp']),
'price': trade['price'],
'volume': trade['amount'],
'direction': trade['side'],
'order_type': trade['type'],
'turnover': trade['price'] * trade['amount']
}
kdata_list.append(kdata_json)
return kdata_list
else:
self.logger.warning("exchange:{} not support fetchOHLCV".format(entity.exchange))
__all__ = ["CoinTickRecorder"]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exchanges', help='exchanges', default='binance', nargs='+',
choices=[item for item in COIN_EXCHANGES])
parser.add_argument('--codes', help='codes', default='EOS/USDT', nargs='+',
choices=[item for item in COIN_PAIRS])
args = parser.parse_args()
init_log('coin_tick_kdata.log')
CoinTickRecorder(codes=['EOS/USDT']).run()
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/recorders/coin_tick_recorder.py
|
coin_tick_recorder.py
|
# -*- coding: utf-8 -*-
import argparse
from zvt import init_log
from zvt.api import generate_kdata_id, get_kdata_schema
from zvt.contract import IntervalLevel
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.utils.time_utils import to_pd_timestamp
from zvt.utils.time_utils import to_time_str
from zvt_ccxt.accounts import CCXTAccount
from zvt_ccxt.domain import Coin, CoinKdataCommon
from zvt_ccxt.recorders import to_ccxt_trading_level
from zvt_ccxt.settings import COIN_EXCHANGES, COIN_PAIRS
class CoinKdataRecorder(FixedCycleDataRecorder):
provider = 'ccxt'
entity_provider = 'ccxt'
entity_schema = Coin
# 只是为了把recorder注册到data_schema
data_schema = CoinKdataCommon
def __init__(self,
exchanges=['binance'],
entity_ids=None,
codes=None,
batch_size=10,
force_update=True,
sleeping_time=10,
default_size=2000,
real_time=False,
fix_duplicate_way='ignore',
start_timestamp=None,
end_timestamp=None,
level=IntervalLevel.LEVEL_1DAY,
kdata_use_begin_time=True,
close_hour=None,
close_minute=None,
one_day_trading_minutes=24 * 60) -> None:
self.data_schema = get_kdata_schema(entity_type='coin', level=level)
self.ccxt_trading_level = to_ccxt_trading_level(level)
super().__init__('coin', exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, close_hour, close_minute,
end_timestamp, level, kdata_use_begin_time, one_day_trading_minutes)
def generate_domain_id(self, entity, original_data):
return generate_kdata_id(entity_id=entity.id, timestamp=original_data['timestamp'], level=self.level)
def record(self, entity, start, end, size, timestamps):
start_timestamp = to_time_str(start)
ccxt_exchange = CCXTAccount.get_ccxt_exchange(entity.exchange)
if ccxt_exchange.has['fetchOHLCV']:
limit = CCXTAccount.get_kdata_limit(entity.exchange)
limit = min(size, limit)
kdata_list = []
if CCXTAccount.exchange_conf[entity.exchange]['support_since']:
kdatas = ccxt_exchange.fetch_ohlcv(entity.code,
timeframe=self.ccxt_trading_level,
since=start_timestamp)
else:
kdatas = ccxt_exchange.fetch_ohlcv(entity.code,
timeframe=self.ccxt_trading_level,
limit=limit)
for kdata in kdatas:
current_timestamp = kdata[0]
if self.level == IntervalLevel.LEVEL_1DAY:
current_timestamp = to_time_str(current_timestamp)
kdata_json = {
'timestamp': to_pd_timestamp(current_timestamp),
'open': kdata[1],
'high': kdata[2],
'low': kdata[3],
'close': kdata[4],
'volume': kdata[5],
'name': entity.name,
'provider': 'ccxt',
'level': self.level.value
}
kdata_list.append(kdata_json)
return kdata_list
else:
self.logger.warning("exchange:{} not support fetchOHLCV".format(entity.exchange))
__all__ = ["CoinKdataRecorder"]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--level', help='trading level', default='1m', choices=[item.value for item in IntervalLevel])
parser.add_argument('--exchanges', help='exchanges', default='binance', nargs='+',
choices=[item for item in COIN_EXCHANGES])
parser.add_argument('--codes', help='codes', default='EOS/USDT', nargs='+',
choices=[item for item in COIN_PAIRS])
args = parser.parse_args()
level = IntervalLevel(args.level)
exchanges = args.exchanges
if type(exchanges) != list:
exchanges = [exchanges]
codes = args.codes
if type(codes) != list:
codes = [codes]
init_log(
'coin_{}_{}_{}_kdata.log'.format('-'.join(exchanges), '-'.join(codes).replace('/', ''), args.level))
CoinKdataRecorder(exchanges=exchanges, codes=codes, level=level, real_time=True).run()
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/recorders/coin_kdata_recorder.py
|
coin_kdata_recorder.py
|
# -*- coding: utf-8 -*-
from zvt.contract import IntervalLevel
def to_ccxt_trading_level(trading_level: IntervalLevel):
return trading_level.value
from .coin_kdata_recorder import *
from .coin_tick_recorder import *
from .coin_recorder import *
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/recorders/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
from zvt_ccxt.accounts.ccxt_account import CCXTAccount
CCXTAccount.init()
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/accounts/__init__.py
|
__init__.py
|
# -*- coding: utf-8 -*-
import json
import ccxt
from zvt import zvt_env
from zvt_ccxt.settings import COIN_EXCHANGES
class CCXTAccount(object):
exchanges = COIN_EXCHANGES
exchange_conf = {}
@classmethod
def init(cls):
for exchange in cls.exchanges:
import pkg_resources
resource_package = 'zvt_ccxt'
resource_path = 'accounts/{}.json'.format(exchange)
config_file = pkg_resources.resource_filename(resource_package, resource_path)
with open(config_file) as f:
cls.exchange_conf[exchange] = json.load(f)
@classmethod
def get_tick_limit(cls, exchange):
return cls.exchange_conf[exchange]['tick_limit']
@classmethod
def get_kdata_limit(cls, exchange):
return cls.exchange_conf[exchange]['kdata_limit']
@classmethod
def get_safe_sleeping_time(cls, exchange):
return cls.exchange_conf[exchange]['safe_sleeping_time']
@classmethod
def get_ccxt_exchange(cls, exchange_str) -> ccxt.Exchange:
exchange = eval("ccxt.{}()".format(exchange_str))
exchange.apiKey = cls.exchange_conf[exchange_str]['apiKey']
exchange.secret = cls.exchange_conf[exchange_str]['secret']
# set to your proxies if need
exchange.proxies = {'http': zvt_env['http_proxy'], 'https': zvt_env['https_proxy']}
return exchange
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/zvt_ccxt/accounts/ccxt_account.py
|
ccxt_account.py
|
# -*- coding: utf-8 -*-
def init_test_context():
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/tests/context.py
|
context.py
|
# -*- coding: utf-8 -*-
from .context import init_test_context
init_test_context()
from zvt_ccxt.domain import Coin
def test_coin():
try:
Coin.record_data()
Coin.query_data()
except:
assert False
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/tests/test_coin.py
|
test_coin.py
|
# -*- coding: utf-8 -*-
|
zvt-ccxt
|
/zvt-ccxt-0.0.6.tar.gz/zvt-ccxt-0.0.6/tests/__init__.py
|
__init__.py
|
## 1. 命名
* snake_case适用于
* variable(变量)
* package(包)
* module(模块)
* function(函数)
* method(方法)
* CamelCase适用于
* class(类)
## 2. 对外接口
各module对外暴露的接口应显式声明于__all__,例子:
[contract模块](https://github.com/zvtvz/zvt/blob/master/zvt/contract/__init__.py)以此结尾:
```
__all__ = ['IntervalLevel', 'Mixin', 'NormalMixin', 'EntityMixin', 'NormalEntityMixin', 'zvt_context']
```
## 3. 引入接口
不要使用 from zvt.module import *,你要什么就import什么,不要污染。
## 4. 没了
No api is the best api.
Code as comment.
|
zvt
|
/zvt-0.10.4.tar.gz/zvt-0.10.4/code_of_conduct.md
|
code_of_conduct.md
|
[](https://github.com/zvtvz/zvt)
[](https://pypi.org/project/zvt/)
[](https://pypi.org/project/zvt/)
[](https://pypi.org/project/zvt/)
[](https://github.com/zvtvz/zvt/actions/workflows/build.yml)
[](https://github.com/zvtvz/zvt/actions/workflows/package.yaml)
[](https://zvt.readthedocs.io/en/latest/?badge=latest)
[](https://codecov.io/github/zvtvz/zvt)
[](https://pepy.tech/project/zvt)
**Read this in other languages: [中文](README-cn.md).**
**Read the docs:[https://zvt.readthedocs.io/en/latest/](https://zvt.readthedocs.io/en/latest/)**
### Install
```
python3 -m pip install -U zvt
```
### Main ui
After the installation is complete, enter zvt on the command line
```shell
zvt
```
open [http://127.0.0.1:8050/](http://127.0.0.1:8050/)
> The example shown here relies on data, factor, trader, please read [docs](https://zvt.readthedocs.io/en/latest/)
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/zvt-factor.png'/></p>
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/zvt-trader.png'/></p>
> The core concept of the system is visual, and the name of the interface corresponds to it one-to-one, so it is also uniform and extensible.
> You can write and run the strategy in your favorite ide, and then view its related targets, factor, signal and performance on the UI.
### Behold, the power of zvt:
```
>>> from zvt.domain import Stock, Stock1dHfqKdata
>>> from zvt.ml import MaStockMLMachine
>>> Stock.record_data(provider="em")
>>> entity_ids = ["stock_sz_000001", "stock_sz_000338", "stock_sh_601318"]
>>> Stock1dHfqKdata.record_data(provider="em", entity_ids=entity_ids, sleeping_time=1)
>>> machine = MaStockMLMachine(entity_ids=["stock_sz_000001"], data_provider="em")
>>> machine.train()
>>> machine.predict()
>>> machine.draw_result(entity_id="stock_sz_000001")
```
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/pred_close.png'/></p>
> The few lines of code above has done: data capture, persistence, incremental update, machine learning, prediction, and display results.
> Once you are familiar with the core concepts of the system, you can apply it to any target in the market.
### Data
#### China stock
```
>>> from zvt.domain import *
>>> Stock.record_data(provider="em")
>>> df = Stock.query_data(provider="em", index='code')
>>> print(df)
id entity_id timestamp entity_type exchange code name list_date end_date
code
000001 stock_sz_000001 stock_sz_000001 1991-04-03 stock sz 000001 平安银行 1991-04-03 None
000002 stock_sz_000002 stock_sz_000002 1991-01-29 stock sz 000002 万 科A 1991-01-29 None
000004 stock_sz_000004 stock_sz_000004 1990-12-01 stock sz 000004 国华网安 1990-12-01 None
000005 stock_sz_000005 stock_sz_000005 1990-12-10 stock sz 000005 世纪星源 1990-12-10 None
000006 stock_sz_000006 stock_sz_000006 1992-04-27 stock sz 000006 深振业A 1992-04-27 None
... ... ... ... ... ... ... ... ... ...
605507 stock_sh_605507 stock_sh_605507 2021-08-02 stock sh 605507 国邦医药 2021-08-02 None
605577 stock_sh_605577 stock_sh_605577 2021-08-24 stock sh 605577 龙版传媒 2021-08-24 None
605580 stock_sh_605580 stock_sh_605580 2021-08-19 stock sh 605580 恒盛能源 2021-08-19 None
605588 stock_sh_605588 stock_sh_605588 2021-08-12 stock sh 605588 冠石科技 2021-08-12 None
605589 stock_sh_605589 stock_sh_605589 2021-08-10 stock sh 605589 圣泉集团 2021-08-10 None
[4136 rows x 9 columns]
```
#### USA stock
```
>>> Stockus.record_data()
>>> df = Stockus.query_data(index='code')
>>> print(df)
id entity_id timestamp entity_type exchange code name list_date end_date
code
A stockus_nyse_A stockus_nyse_A NaT stockus nyse A 安捷伦 None None
AA stockus_nyse_AA stockus_nyse_AA NaT stockus nyse AA 美国铝业 None None
AAC stockus_nyse_AAC stockus_nyse_AAC NaT stockus nyse AAC Ares Acquisition Corp-A None None
AACG stockus_nasdaq_AACG stockus_nasdaq_AACG NaT stockus nasdaq AACG ATA Creativity Global ADR None None
AACG stockus_nyse_AACG stockus_nyse_AACG NaT stockus nyse AACG ATA Creativity Global ADR None None
... ... ... ... ... ... ... ... ... ...
ZWRK stockus_nasdaq_ZWRK stockus_nasdaq_ZWRK NaT stockus nasdaq ZWRK Z-Work Acquisition Corp-A None None
ZY stockus_nasdaq_ZY stockus_nasdaq_ZY NaT stockus nasdaq ZY Zymergen Inc None None
ZYME stockus_nyse_ZYME stockus_nyse_ZYME NaT stockus nyse ZYME Zymeworks Inc None None
ZYNE stockus_nasdaq_ZYNE stockus_nasdaq_ZYNE NaT stockus nasdaq ZYNE Zynerba Pharmaceuticals Inc None None
ZYXI stockus_nasdaq_ZYXI stockus_nasdaq_ZYXI NaT stockus nasdaq ZYXI Zynex Inc None None
[5826 rows x 9 columns]
>>> Stockus.query_data(code='AAPL')
id entity_id timestamp entity_type exchange code name list_date end_date
0 stockus_nasdaq_AAPL stockus_nasdaq_AAPL None stockus nasdaq AAPL 苹果 None None
```
#### Hong Kong stock
```
>>> Stockhk.record_data()
>>> df = Stockhk.query_data(index='code')
>>> print(df)
id entity_id timestamp entity_type exchange code name list_date end_date
code
00001 stockhk_hk_00001 stockhk_hk_00001 NaT stockhk hk 00001 长和 None None
00002 stockhk_hk_00002 stockhk_hk_00002 NaT stockhk hk 00002 中电控股 None None
00003 stockhk_hk_00003 stockhk_hk_00003 NaT stockhk hk 00003 香港中华煤气 None None
00004 stockhk_hk_00004 stockhk_hk_00004 NaT stockhk hk 00004 九龙仓集团 None None
00005 stockhk_hk_00005 stockhk_hk_00005 NaT stockhk hk 00005 汇丰控股 None None
... ... ... ... ... ... ... ... ... ...
09996 stockhk_hk_09996 stockhk_hk_09996 NaT stockhk hk 09996 沛嘉医疗-B None None
09997 stockhk_hk_09997 stockhk_hk_09997 NaT stockhk hk 09997 康基医疗 None None
09998 stockhk_hk_09998 stockhk_hk_09998 NaT stockhk hk 09998 光荣控股 None None
09999 stockhk_hk_09999 stockhk_hk_09999 NaT stockhk hk 09999 网易-S None None
80737 stockhk_hk_80737 stockhk_hk_80737 NaT stockhk hk 80737 湾区发展-R None None
[2597 rows x 9 columns]
>>> df[df.code=='00700']
id entity_id timestamp entity_type exchange code name list_date end_date
2112 stockhk_hk_00700 stockhk_hk_00700 None stockhk hk 00700 腾讯控股 None None
```
#### And more
```
>>> from zvt.contract import *
>>> zvt_context.tradable_schema_map
{'stockus': zvt.domain.meta.stockus_meta.Stockus,
'stockhk': zvt.domain.meta.stockhk_meta.Stockhk,
'index': zvt.domain.meta.index_meta.Index,
'etf': zvt.domain.meta.etf_meta.Etf,
'stock': zvt.domain.meta.stock_meta.Stock,
'block': zvt.domain.meta.block_meta.Block,
'fund': zvt.domain.meta.fund_meta.Fund}
```
The key is tradable entity type, and the value is the schema. The system provides unified **record (record_data)** and **query (query_data)** methods for the schema.
```
>>> Index.record_data()
>>> df=Index.query_data(filters=[Index.category=='scope',Index.exchange='sh'])
>>> print(df)
id entity_id timestamp entity_type exchange code name list_date end_date publisher category base_point
0 index_sh_000001 index_sh_000001 1990-12-19 index sh 000001 上证指数 1991-07-15 None csindex scope 100.00
1 index_sh_000002 index_sh_000002 1990-12-19 index sh 000002 A股指数 1992-02-21 None csindex scope 100.00
2 index_sh_000003 index_sh_000003 1992-02-21 index sh 000003 B股指数 1992-08-17 None csindex scope 100.00
3 index_sh_000010 index_sh_000010 2002-06-28 index sh 000010 上证180 2002-07-01 None csindex scope 3299.06
4 index_sh_000016 index_sh_000016 2003-12-31 index sh 000016 上证50 2004-01-02 None csindex scope 1000.00
.. ... ... ... ... ... ... ... ... ... ... ... ...
25 index_sh_000020 index_sh_000020 2007-12-28 index sh 000020 中型综指 2008-05-12 None csindex scope 1000.00
26 index_sh_000090 index_sh_000090 2009-12-31 index sh 000090 上证流通 2010-12-02 None csindex scope 1000.00
27 index_sh_930903 index_sh_930903 2012-12-31 index sh 930903 中证A股 2016-10-18 None csindex scope 1000.00
28 index_sh_000688 index_sh_000688 2019-12-31 index sh 000688 科创50 2020-07-23 None csindex scope 1000.00
29 index_sh_931643 index_sh_931643 2019-12-31 index sh 931643 科创创业50 2021-06-01 None csindex scope 1000.00
[30 rows x 12 columns]
```
### EntityEvent
We have tradable entity and then events about them.
#### Market quotes
the TradableEntity quote schema follows the following rules:
```
{entity_shema}{level}{adjust_type}Kdata
```
* entity_schema
TradableEntity class,e.g., Stock,Stockus.
* level
```
>>> for level in IntervalLevel:
print(level.value)
```
* adjust type
```
>>> for adjust_type in AdjustType:
print(adjust_type.value)
```
> Note: In order to be compatible with historical data, the pre-reset is an exception, {adjust_type} is left empty
qfq
```
>>> Stock1dKdata.record_data(code='000338', provider='em')
>>> df = Stock1dKdata.query_data(code='000338', provider='em')
>>> print(df)
id entity_id timestamp provider code name level open close high low volume turnover change_pct turnover_rate
0 stock_sz_000338_2007-04-30 stock_sz_000338 2007-04-30 None 000338 潍柴动力 1d 2.33 2.00 2.40 1.87 207375.0 1.365189e+09 3.2472 0.1182
1 stock_sz_000338_2007-05-08 stock_sz_000338 2007-05-08 None 000338 潍柴动力 1d 2.11 1.94 2.20 1.87 86299.0 5.563198e+08 -0.0300 0.0492
2 stock_sz_000338_2007-05-09 stock_sz_000338 2007-05-09 None 000338 潍柴动力 1d 1.90 1.81 1.94 1.66 93823.0 5.782065e+08 -0.0670 0.0535
3 stock_sz_000338_2007-05-10 stock_sz_000338 2007-05-10 None 000338 潍柴动力 1d 1.78 1.85 1.98 1.75 47720.0 2.999226e+08 0.0221 0.0272
4 stock_sz_000338_2007-05-11 stock_sz_000338 2007-05-11 None 000338 潍柴动力 1d 1.81 1.73 1.81 1.66 39273.0 2.373126e+08 -0.0649 0.0224
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3426 stock_sz_000338_2021-08-27 stock_sz_000338 2021-08-27 None 000338 潍柴动力 1d 19.39 20.30 20.30 19.25 1688497.0 3.370241e+09 0.0601 0.0398
3427 stock_sz_000338_2021-08-30 stock_sz_000338 2021-08-30 None 000338 潍柴动力 1d 20.30 20.09 20.31 19.78 1187601.0 2.377957e+09 -0.0103 0.0280
3428 stock_sz_000338_2021-08-31 stock_sz_000338 2021-08-31 None 000338 潍柴动力 1d 20.20 20.07 20.63 19.70 1143985.0 2.295195e+09 -0.0010 0.0270
3429 stock_sz_000338_2021-09-01 stock_sz_000338 2021-09-01 None 000338 潍柴动力 1d 19.98 19.68 19.98 19.15 1218697.0 2.383841e+09 -0.0194 0.0287
3430 stock_sz_000338_2021-09-02 stock_sz_000338 2021-09-02 None 000338 潍柴动力 1d 19.71 19.85 19.97 19.24 1023545.0 2.012006e+09 0.0086 0.0241
[3431 rows x 15 columns]
>>> Stockus1dKdata.record_data(code='AAPL', provider='em')
>>> df = Stockus1dKdata.query_data(code='AAPL', provider='em')
>>> print(df)
id entity_id timestamp provider code name level open close high low volume turnover change_pct turnover_rate
0 stockus_nasdaq_AAPL_1984-09-07 stockus_nasdaq_AAPL 1984-09-07 None AAPL 苹果 1d -5.59 -5.59 -5.58 -5.59 2981600.0 0.000000e+00 0.0000 0.0002
1 stockus_nasdaq_AAPL_1984-09-10 stockus_nasdaq_AAPL 1984-09-10 None AAPL 苹果 1d -5.59 -5.59 -5.58 -5.59 2346400.0 0.000000e+00 0.0000 0.0001
2 stockus_nasdaq_AAPL_1984-09-11 stockus_nasdaq_AAPL 1984-09-11 None AAPL 苹果 1d -5.58 -5.58 -5.58 -5.58 5444000.0 0.000000e+00 0.0018 0.0003
3 stockus_nasdaq_AAPL_1984-09-12 stockus_nasdaq_AAPL 1984-09-12 None AAPL 苹果 1d -5.58 -5.59 -5.58 -5.59 4773600.0 0.000000e+00 -0.0018 0.0003
4 stockus_nasdaq_AAPL_1984-09-13 stockus_nasdaq_AAPL 1984-09-13 None AAPL 苹果 1d -5.58 -5.58 -5.58 -5.58 7429600.0 0.000000e+00 0.0018 0.0004
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
8765 stockus_nasdaq_AAPL_2021-08-27 stockus_nasdaq_AAPL 2021-08-27 None AAPL 苹果 1d 147.48 148.60 148.75 146.83 55802388.0 8.265452e+09 0.0072 0.0034
8766 stockus_nasdaq_AAPL_2021-08-30 stockus_nasdaq_AAPL 2021-08-30 None AAPL 苹果 1d 149.00 153.12 153.49 148.61 90956723.0 1.383762e+10 0.0304 0.0055
8767 stockus_nasdaq_AAPL_2021-08-31 stockus_nasdaq_AAPL 2021-08-31 None AAPL 苹果 1d 152.66 151.83 152.80 151.29 86453117.0 1.314255e+10 -0.0084 0.0052
8768 stockus_nasdaq_AAPL_2021-09-01 stockus_nasdaq_AAPL 2021-09-01 None AAPL 苹果 1d 152.83 152.51 154.98 152.34 80313711.0 1.235321e+10 0.0045 0.0049
8769 stockus_nasdaq_AAPL_2021-09-02 stockus_nasdaq_AAPL 2021-09-02 None AAPL 苹果 1d 153.87 153.65 154.72 152.40 71171317.0 1.093251e+10 0.0075 0.0043
[8770 rows x 15 columns]
```
hfq
```
>>> Stock1dHfqKdata.record_data(code='000338', provider='em')
>>> df = Stock1dHfqKdata.query_data(code='000338', provider='em')
>>> print(df)
id entity_id timestamp provider code name level open close high low volume turnover change_pct turnover_rate
0 stock_sz_000338_2007-04-30 stock_sz_000338 2007-04-30 None 000338 潍柴动力 1d 70.00 64.93 71.00 62.88 207375.0 1.365189e+09 2.1720 0.1182
1 stock_sz_000338_2007-05-08 stock_sz_000338 2007-05-08 None 000338 潍柴动力 1d 66.60 64.00 68.00 62.88 86299.0 5.563198e+08 -0.0143 0.0492
2 stock_sz_000338_2007-05-09 stock_sz_000338 2007-05-09 None 000338 潍柴动力 1d 63.32 62.00 63.88 59.60 93823.0 5.782065e+08 -0.0313 0.0535
3 stock_sz_000338_2007-05-10 stock_sz_000338 2007-05-10 None 000338 潍柴动力 1d 61.50 62.49 64.48 61.01 47720.0 2.999226e+08 0.0079 0.0272
4 stock_sz_000338_2007-05-11 stock_sz_000338 2007-05-11 None 000338 潍柴动力 1d 61.90 60.65 61.90 59.70 39273.0 2.373126e+08 -0.0294 0.0224
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3426 stock_sz_000338_2021-08-27 stock_sz_000338 2021-08-27 None 000338 潍柴动力 1d 331.97 345.95 345.95 329.82 1688497.0 3.370241e+09 0.0540 0.0398
3427 stock_sz_000338_2021-08-30 stock_sz_000338 2021-08-30 None 000338 潍柴动力 1d 345.95 342.72 346.10 337.96 1187601.0 2.377957e+09 -0.0093 0.0280
3428 stock_sz_000338_2021-08-31 stock_sz_000338 2021-08-31 None 000338 潍柴动力 1d 344.41 342.41 351.02 336.73 1143985.0 2.295195e+09 -0.0009 0.0270
3429 stock_sz_000338_2021-09-01 stock_sz_000338 2021-09-01 None 000338 潍柴动力 1d 341.03 336.42 341.03 328.28 1218697.0 2.383841e+09 -0.0175 0.0287
3430 stock_sz_000338_2021-09-02 stock_sz_000338 2021-09-02 None 000338 潍柴动力 1d 336.88 339.03 340.88 329.67 1023545.0 2.012006e+09 0.0078 0.0241
[3431 rows x 15 columns]
```
#### Finance factor
```
>>> FinanceFactor.record_data(code='000338')
>>> FinanceFactor.query_data(code='000338',columns=FinanceFactor.important_cols(),index='timestamp')
basic_eps total_op_income net_profit op_income_growth_yoy net_profit_growth_yoy roe rota gross_profit_margin net_margin timestamp
timestamp
2002-12-31 NaN 1.962000e+07 2.471000e+06 NaN NaN NaN NaN 0.2068 0.1259 2002-12-31
2003-12-31 1.27 3.574000e+09 2.739000e+08 181.2022 109.8778 0.7729 0.1783 0.2551 0.0766 2003-12-31
2004-12-31 1.75 6.188000e+09 5.369000e+08 0.7313 0.9598 0.3245 0.1474 0.2489 0.0868 2004-12-31
2005-12-31 0.93 5.283000e+09 3.065000e+08 -0.1463 -0.4291 0.1327 0.0603 0.2252 0.0583 2005-12-31
2006-03-31 0.33 1.859000e+09 1.079000e+08 NaN NaN NaN NaN NaN 0.0598 2006-03-31
... ... ... ... ... ... ... ... ... ... ...
2020-08-28 0.59 9.449000e+10 4.680000e+09 0.0400 -0.1148 0.0983 0.0229 0.1958 0.0603 2020-08-28
2020-10-31 0.90 1.474000e+11 7.106000e+09 0.1632 0.0067 0.1502 0.0347 0.1949 0.0590 2020-10-31
2021-03-31 1.16 1.975000e+11 9.207000e+09 0.1327 0.0112 0.1919 0.0444 0.1931 0.0571 2021-03-31
2021-04-30 0.42 6.547000e+10 3.344000e+09 0.6788 0.6197 0.0622 0.0158 0.1916 0.0667 2021-04-30
2021-08-31 0.80 1.264000e+11 6.432000e+09 0.3375 0.3742 0.1125 0.0287 0.1884 0.0653 2021-08-31
[66 rows x 10 columns]
```
#### Three financial tables
```
>>> BalanceSheet.record_data(code='000338')
>>> IncomeStatement.record_data(code='000338')
>>> CashFlowStatement.record_data(code='000338')
```
#### And more
```
>>> zvt_context.schemas
[zvt.domain.dividend_financing.DividendFinancing,
zvt.domain.dividend_financing.DividendDetail,
zvt.domain.dividend_financing.SpoDetail...]
```
All schemas is registered in zvt_context.schemas, **schema** is table, data structure.
The fields and meaning could be checked in following ways:
* help
type the schema. and press tab to show its fields or .help()
```
>>> FinanceFactor.help()
```
* source code
Schemas defined in [domain](https://github.com/zvtvz/zvt/tree/master/zvt/domain)
From above examples, you should know the unified way of recording data:
> Schema.record_data(provider='your provider',codes='the codes')
Note the optional parameter provider, which represents the data provider.
A schema can have multiple providers, which is the cornerstone of system stability.
Check the provider has been implemented:
```
>>> Stock.provider_map_recorder
{'joinquant': zvt.recorders.joinquant.meta.jq_stock_meta_recorder.JqChinaStockRecorder,
'exchange': zvt.recorders.exchange.exchange_stock_meta_recorder.ExchangeStockMetaRecorder,
'em': zvt.recorders.em.meta.em_stock_meta_recorder.EMStockRecorder,
'eastmoney': zvt.recorders.eastmoney.meta.eastmoney_stock_meta_recorder.EastmoneyChinaStockListRecorder}
```
You can use any provider to get the data, the first one is used by default.
One more example, the stock sector data recording:
```
>>> Block.provider_map_recorder
{'eastmoney': zvt.recorders.eastmoney.meta.eastmoney_block_meta_recorder.EastmoneyChinaBlockRecorder,
'sina': zvt.recorders.sina.meta.sina_block_recorder.SinaBlockRecorder}
>>> Block.record_data(provider='sina')
Block registered recorders:{'eastmoney': <class 'zvt.recorders.eastmoney.meta.china_stock_category_recorder.EastmoneyChinaBlockRecorder'>, 'sina': <class 'zvt.recorders.sina.meta.sina_china_stock_category_recorder.SinaChinaBlockRecorder'>}
2020-03-04 23:56:48,931 INFO MainThread finish record sina blocks:industry
2020-03-04 23:56:49,450 INFO MainThread finish record sina blocks:concept
```
Learn more about record_data
* The parameter code[single], codes[multiple] represent the stock codes to be recorded
* Recording the whole market if not set code, codes
* This method will store the data locally and only do incremental updates
Refer to the scheduling recoding way[data runner](https://github.com/zvtvz/zvt/blob/master/examples/data_runner)
#### Market-wide stock selection
After recording the data of the whole market, you can quickly query the required data locally.
An example: the top 20 stocks with roe>8% and revenue growth>8% in the 2018 annual report
```
>>> df=FinanceFactor.query_data(filters=[FinanceFactor.roe>0.08,FinanceFactor.report_period=='year',FinanceFactor.op_income_growth_yoy>0.08],start_timestamp='2019-01-01',order=FinanceFactor.roe.desc(),limit=20,columns=["code"]+FinanceFactor.important_cols(),index='code')
code basic_eps total_op_income net_profit op_income_growth_yoy net_profit_growth_yoy roe rota gross_profit_margin net_margin timestamp
code
000048 000048 2.7350 4.919000e+09 1.101000e+09 0.4311 1.5168 0.7035 0.1988 0.5243 0.2355 2020-04-30
000912 000912 0.3500 4.405000e+09 3.516000e+08 0.1796 1.2363 4.7847 0.0539 0.2175 0.0795 2019-03-20
002207 002207 0.2200 3.021000e+08 5.189000e+07 0.1600 1.1526 1.1175 0.1182 0.1565 0.1718 2020-04-27
002234 002234 5.3300 3.276000e+09 1.610000e+09 0.8023 3.2295 0.8361 0.5469 0.5968 0.4913 2020-04-21
002458 002458 3.7900 3.584000e+09 2.176000e+09 1.4326 4.9973 0.8318 0.6754 0.6537 0.6080 2020-02-20
... ... ... ... ... ... ... ... ... ... ... ...
600701 600701 -3.6858 7.830000e+08 -3.814000e+09 1.3579 -0.0325 1.9498 -0.7012 0.4173 -4.9293 2020-04-29
600747 600747 -1.5600 3.467000e+08 -2.290000e+09 2.1489 -0.4633 3.1922 -1.5886 0.0378 -6.6093 2020-06-30
600793 600793 1.6568 1.293000e+09 1.745000e+08 0.1164 0.8868 0.7490 0.0486 0.1622 0.1350 2019-04-30
600870 600870 0.0087 3.096000e+07 4.554000e+06 0.7773 1.3702 0.7458 0.0724 0.2688 0.1675 2019-03-30
688169 688169 15.6600 4.205000e+09 7.829000e+08 0.3781 1.5452 0.7172 0.4832 0.3612 0.1862 2020-04-28
[20 rows x 11 columns]
```
So, you should be able to answer the following three questions now:
* What data is there?
* How to record data?
* How to query data?
For more advanced usage and extended data, please refer to the data section in the detailed document.
### Write strategy
Now we could write strategy basing on TradableEntity and EntityEvent.
The so-called strategy backtesting is nothing but repeating the following process:
#### At a certain time, find the targets which matching conditions, buy and sell them, and see the performance.
Two modes to write strategy:
* solo (free style)
At a certain time, calculate conditions according to the events, buy and sell
* formal (正式的)
The calculation model of the two-dimensional index and multi-entity
#### a too simple,sometimes naive person (solo)
Well, this strategy is really too simple,sometimes naive, as we do most of the time.
> When the report comes out, I look at the report.
> If the institution increases its position by more than 5%, I will buy it, and if the institution reduces its position by more than 50%, I will sell it.
Show you the code:
```
# -*- coding: utf-8 -*-
import pandas as pd
from zvt.api import get_recent_report_date
from zvt.contract import ActorType, AdjustType
from zvt.domain import StockActorSummary, Stock1dKdata
from zvt.trader import StockTrader
from zvt.utils import pd_is_not_null, is_same_date, to_pd_timestamp
class FollowIITrader(StockTrader):
finish_date = None
def on_time(self, timestamp: pd.Timestamp):
recent_report_date = to_pd_timestamp(get_recent_report_date(timestamp))
if self.finish_date and is_same_date(recent_report_date, self.finish_date):
return
filters = [StockActorSummary.actor_type == ActorType.raised_fund.value,
StockActorSummary.report_date == recent_report_date]
if self.entity_ids:
filters = filters + [StockActorSummary.entity_id.in_(self.entity_ids)]
df = StockActorSummary.query_data(filters=filters)
if pd_is_not_null(df):
self.logger.info(f'{df}')
self.finish_date = recent_report_date
long_df = df[df['change_ratio'] > 0.05]
short_df = df[df['change_ratio'] < -0.5]
try:
self.trade_the_targets(due_timestamp=timestamp, happen_timestamp=timestamp,
long_selected=set(long_df['entity_id'].to_list()),
short_selected=set(short_df['entity_id'].to_list()))
except Exception as e:
self.logger.error(e)
if __name__ == '__main__':
entity_id = 'stock_sh_600519'
Stock1dKdata.record_data(entity_id=entity_id, provider='em')
StockActorSummary.record_data(entity_id=entity_id, provider='em')
FollowIITrader(start_timestamp='2002-01-01', end_timestamp='2021-01-01', entity_ids=[entity_id],
provider='em', adjust_type=AdjustType.qfq, profit_threshold=None).run()
```
So, writing a strategy is not that complicated.
Just use your imagination, find the relation of the price and the events.
Then refresh [http://127.0.0.1:8050/](http://127.0.0.1:8050/),check the performance of your strategy.
More examples is in [Strategy example](https://github.com/zvtvz/zvt/tree/master/examples/trader)
#### Be serious (formal)
Simple calculation can be done through query_data.
Now it's time to introduce the two-dimensional index multi-entity calculation model.
Takes technical factors as an example to illustrate the **calculation process**:
```
In [7]: from zvt.factors.technical_factor import *
In [8]: factor = BullFactor(codes=['000338','601318'],start_timestamp='2019-01-01',end_timestamp='2019-06-10', transformer=MacdTransformer())
```
### data_df
**two-dimensional index** DataFrame read from the schema by query_data.
```
In [11]: factor.data_df
Out[11]:
level high id entity_id open low timestamp close
entity_id timestamp
stock_sh_601318 2019-01-02 1d 54.91 stock_sh_601318_2019-01-02 stock_sh_601318 54.78 53.70 2019-01-02 53.94
2019-01-03 1d 55.06 stock_sh_601318_2019-01-03 stock_sh_601318 53.91 53.82 2019-01-03 54.42
2019-01-04 1d 55.71 stock_sh_601318_2019-01-04 stock_sh_601318 54.03 53.98 2019-01-04 55.31
2019-01-07 1d 55.88 stock_sh_601318_2019-01-07 stock_sh_601318 55.80 54.64 2019-01-07 55.03
2019-01-08 1d 54.83 stock_sh_601318_2019-01-08 stock_sh_601318 54.79 53.96 2019-01-08 54.54
... ... ... ... ... ... ... ... ...
stock_sz_000338 2019-06-03 1d 11.04 stock_sz_000338_2019-06-03 stock_sz_000338 10.93 10.74 2019-06-03 10.81
2019-06-04 1d 10.85 stock_sz_000338_2019-06-04 stock_sz_000338 10.84 10.57 2019-06-04 10.73
2019-06-05 1d 10.92 stock_sz_000338_2019-06-05 stock_sz_000338 10.87 10.59 2019-06-05 10.59
2019-06-06 1d 10.71 stock_sz_000338_2019-06-06 stock_sz_000338 10.59 10.49 2019-06-06 10.65
2019-06-10 1d 11.05 stock_sz_000338_2019-06-10 stock_sz_000338 10.73 10.71 2019-06-10 11.02
[208 rows x 8 columns]
```
### factor_df
**two-dimensional index** DataFrame which calculating using data_df by [transformer](https://github.com/zvtvz/zvt/blob/master/zvt/factors/factor.py#L18)
e.g., MacdTransformer.
```
In [12]: factor.factor_df
Out[12]:
level high id entity_id open low timestamp close diff dea macd
entity_id timestamp
stock_sh_601318 2019-01-02 1d 54.91 stock_sh_601318_2019-01-02 stock_sh_601318 54.78 53.70 2019-01-02 53.94 NaN NaN NaN
2019-01-03 1d 55.06 stock_sh_601318_2019-01-03 stock_sh_601318 53.91 53.82 2019-01-03 54.42 NaN NaN NaN
2019-01-04 1d 55.71 stock_sh_601318_2019-01-04 stock_sh_601318 54.03 53.98 2019-01-04 55.31 NaN NaN NaN
2019-01-07 1d 55.88 stock_sh_601318_2019-01-07 stock_sh_601318 55.80 54.64 2019-01-07 55.03 NaN NaN NaN
2019-01-08 1d 54.83 stock_sh_601318_2019-01-08 stock_sh_601318 54.79 53.96 2019-01-08 54.54 NaN NaN NaN
... ... ... ... ... ... ... ... ... ... ... ...
stock_sz_000338 2019-06-03 1d 11.04 stock_sz_000338_2019-06-03 stock_sz_000338 10.93 10.74 2019-06-03 10.81 -0.121336 -0.145444 0.048215
2019-06-04 1d 10.85 stock_sz_000338_2019-06-04 stock_sz_000338 10.84 10.57 2019-06-04 10.73 -0.133829 -0.143121 0.018583
2019-06-05 1d 10.92 stock_sz_000338_2019-06-05 stock_sz_000338 10.87 10.59 2019-06-05 10.59 -0.153260 -0.145149 -0.016223
2019-06-06 1d 10.71 stock_sz_000338_2019-06-06 stock_sz_000338 10.59 10.49 2019-06-06 10.65 -0.161951 -0.148509 -0.026884
2019-06-10 1d 11.05 stock_sz_000338_2019-06-10 stock_sz_000338 10.73 10.71 2019-06-10 11.02 -0.137399 -0.146287 0.017776
[208 rows x 11 columns]
```
### result_df
**two-dimensional index** DataFrame which calculating using factor_df or(and) data_df.
It's used by TargetSelector.
e.g.,[macd](https://github.com/zvtvz/zvt/blob/master/zvt/factors/technical_factor.py#L56)
```
In [14]: factor.result_df
Out[14]:
filter_result
entity_id timestamp
stock_sh_601318 2019-01-02 False
2019-01-03 False
2019-01-04 False
2019-01-07 False
2019-01-08 False
... ...
stock_sz_000338 2019-06-03 False
2019-06-04 False
2019-06-05 False
2019-06-06 False
2019-06-10 False
[208 rows x 1 columns]
```
The format of result_df is as follows:
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/result_df.png'/></p>
filter_result is True or False, score_result is from 0 to 1
Combining the stock picker and backtesting, the whole process is as follows:
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/flow.png'/></p>
## Env settings(optional)
```
>>> from zvt import *
>>> zvt_env
{'zvt_home': '/Users/foolcage/zvt-home',
'data_path': '/Users/foolcage/zvt-home/data',
'tmp_path': '/Users/foolcage/zvt-home/tmp',
'ui_path': '/Users/foolcage/zvt-home/ui',
'log_path': '/Users/foolcage/zvt-home/logs'}
>>> zvt_config
```
* jq_username 聚宽数据用户名
* jq_password 聚宽数据密码
* smtp_host 邮件服务器host
* smtp_port 邮件服务器端口
* email_username smtp邮箱账户
* email_password smtp邮箱密码
* wechat_app_id
* wechat_app_secrect
```
>>> init_config(current_config=zvt_config, jq_username='xxx', jq_password='yyy')
```
> config others this way: init_config(current_config=zvt_config, **kv)
### History data(optional)
baidu: https://pan.baidu.com/s/1kHAxGSxx8r5IBHe5I7MAmQ code: yb6c
google drive: https://drive.google.com/drive/folders/17Bxijq-PHJYrLDpyvFAm5P6QyhKL-ahn?usp=sharing
It contains daily/weekly post-restoration data, stock valuations, fund and its holdings data, financial data and other data.
Unzip the downloaded data to the data_path of the your environment (all db files are placed in this directory, there is no hierarchical structure)
The data could be updated incrementally. Downloading historical data is just to save time. It is also possible to update all by yourself.
#### Joinquant(optional)
the data could be updated from different provider, this make the system stable.
https://www.joinquant.com/default/index/sdk?channelId=953cbf5d1b8683f81f0c40c9d4265c0d
> add other providers, [Data extension tutorial](https://zvtvz.github.io/zvt/#/data_extending)
## Development
### Clone
```
git clone https://github.com/zvtvz/zvt.git
```
set up virtual env(python>=3.6),install requirements
```
pip3 install -r requirements.txt
pip3 install pytest
```
### Tests
```shell
pytest ./tests
```
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/pytest.jpg'/></p>
Most of the features can be referenced from the tests
## Contribution
[code of conduct](https://github.com/zvtvz/zvt/blob/master/code_of_conduct.md)
1. Pass all unit tests, if it is a new feature, please add a new unit test for it
2. Compliance with development specifications
3. If necessary, please update the corresponding document
Developers are also very welcome to provide more examples for zvt, and work together to improve the documentation.
## Buy me a coffee
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/alipay-cn.png" width="25%" alt="Alipay">
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/wechat-cn.png" width="25%" alt="Wechat">
## Contact
wechat:foolcage
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/wechat.jpeg" width="25%" alt="Wechat">
------
wechat subscription:
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/gongzhonghao.jpg" width="25%" alt="Wechat">
zhihu:
https://zhuanlan.zhihu.com/automoney
## Thanks
<p><a href=https://www.jetbrains.com/?from=zvt><img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/jetbrains.png" width="25%" alt="jetbrains"></a></p>
|
zvt
|
/zvt-0.10.4.tar.gz/zvt-0.10.4/README.md
|
README.md
|
[](https://github.com/zvtvz/zvt)
[](https://pypi.org/project/zvt/)
[](https://pypi.org/project/zvt/)
[](https://pypi.org/project/zvt/)
[](https://github.com/zvtvz/zvt/actions/workflows/build.yml)
[](https://github.com/zvtvz/zvt/actions/workflows/package.yaml)
[](https://zvt.readthedocs.io/en/latest/?badge=latest)
[](https://codecov.io/github/zvtvz/zvt)
[](https://pepy.tech/project/zvt)
**Read this in other languages: [English](README-cn.md).**
**详细文档:[https://zvt.readthedocs.io/en/latest/](https://zvt.readthedocs.io/en/latest/)**
## 市场模型
ZVT 将市场抽象为如下的模型:
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/view.png'/></p>
* TradableEntity (交易标的)
* ActorEntity (市场参与者)
* EntityEvent (交易标的 和 市场参与者 发生的事件)
## 快速开始
### 安装
```
python3 -m pip install -U zvt
```
### 使用展示
#### 主界面
安装完成后,在命令行下输入 zvt
```shell
zvt
```
打开 [http://127.0.0.1:8050/](http://127.0.0.1:8050/)
> 这里展示的例子依赖后面的下载历史数据,数据更新请参考后面文档
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/zvt-factor.png'/></p>
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/zvt-trader.png'/></p>
> 系统的核心概念是可视化的,界面的名称与其一一对应,因此也是统一可扩展的。
> 你可以在你喜欢的ide里编写和运行策略,然后运行界面查看其相关的标的,因子,信号和净值展示。
#### 见证奇迹的时刻
```
>>> from zvt.domain import Stock, Stock1dHfqKdata
>>> from zvt.ml import MaStockMLMachine
>>> Stock.record_data(provider="em")
>>> entity_ids = ["stock_sz_000001", "stock_sz_000338", "stock_sh_601318"]
>>> Stock1dHfqKdata.record_data(provider="em", entity_ids=entity_ids, sleeping_time=1)
>>> machine = MaStockMLMachine(entity_ids=["stock_sz_000001"], data_provider="em")
>>> machine.train()
>>> machine.predict()
>>> machine.draw_result(entity_id="stock_sz_000001")
```
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/pred_close.png'/></p>
> 以上几行代码实现了:数据的抓取,持久化,增量更新,机器学习,预测,展示结果。
> 熟悉系统的核心概念后,可以应用到市场中的任何标的。
### 核心概念
```
>>> from zvt.domain import *
```
### TradableEntity (交易标的)
#### A股交易标的
```
>>> Stock.record_data()
>>> df = Stock.query_data(index='code')
>>> print(df)
id entity_id timestamp entity_type exchange code name list_date end_date
code
000001 stock_sz_000001 stock_sz_000001 1991-04-03 stock sz 000001 平安银行 1991-04-03 None
000002 stock_sz_000002 stock_sz_000002 1991-01-29 stock sz 000002 万 科A 1991-01-29 None
000004 stock_sz_000004 stock_sz_000004 1990-12-01 stock sz 000004 国华网安 1990-12-01 None
000005 stock_sz_000005 stock_sz_000005 1990-12-10 stock sz 000005 世纪星源 1990-12-10 None
000006 stock_sz_000006 stock_sz_000006 1992-04-27 stock sz 000006 深振业A 1992-04-27 None
... ... ... ... ... ... ... ... ... ...
605507 stock_sh_605507 stock_sh_605507 2021-08-02 stock sh 605507 国邦医药 2021-08-02 None
605577 stock_sh_605577 stock_sh_605577 2021-08-24 stock sh 605577 龙版传媒 2021-08-24 None
605580 stock_sh_605580 stock_sh_605580 2021-08-19 stock sh 605580 恒盛能源 2021-08-19 None
605588 stock_sh_605588 stock_sh_605588 2021-08-12 stock sh 605588 冠石科技 2021-08-12 None
605589 stock_sh_605589 stock_sh_605589 2021-08-10 stock sh 605589 圣泉集团 2021-08-10 None
[4136 rows x 9 columns]
```
#### 美股交易标的
```
>>> Stockus.record_data()
>>> df = Stockus.query_data(index='code')
>>> print(df)
id entity_id timestamp entity_type exchange code name list_date end_date
code
A stockus_nyse_A stockus_nyse_A NaT stockus nyse A 安捷伦 None None
AA stockus_nyse_AA stockus_nyse_AA NaT stockus nyse AA 美国铝业 None None
AAC stockus_nyse_AAC stockus_nyse_AAC NaT stockus nyse AAC Ares Acquisition Corp-A None None
AACG stockus_nasdaq_AACG stockus_nasdaq_AACG NaT stockus nasdaq AACG ATA Creativity Global ADR None None
AACG stockus_nyse_AACG stockus_nyse_AACG NaT stockus nyse AACG ATA Creativity Global ADR None None
... ... ... ... ... ... ... ... ... ...
ZWRK stockus_nasdaq_ZWRK stockus_nasdaq_ZWRK NaT stockus nasdaq ZWRK Z-Work Acquisition Corp-A None None
ZY stockus_nasdaq_ZY stockus_nasdaq_ZY NaT stockus nasdaq ZY Zymergen Inc None None
ZYME stockus_nyse_ZYME stockus_nyse_ZYME NaT stockus nyse ZYME Zymeworks Inc None None
ZYNE stockus_nasdaq_ZYNE stockus_nasdaq_ZYNE NaT stockus nasdaq ZYNE Zynerba Pharmaceuticals Inc None None
ZYXI stockus_nasdaq_ZYXI stockus_nasdaq_ZYXI NaT stockus nasdaq ZYXI Zynex Inc None None
[5826 rows x 9 columns]
>>> Stockus.query_data(code='AAPL')
id entity_id timestamp entity_type exchange code name list_date end_date
0 stockus_nasdaq_AAPL stockus_nasdaq_AAPL None stockus nasdaq AAPL 苹果 None None
```
#### 港股交易标的
```
>>> Stockhk.record_data()
>>> df = Stockhk.query_data(index='code')
>>> print(df)
id entity_id timestamp entity_type exchange code name list_date end_date
code
00001 stockhk_hk_00001 stockhk_hk_00001 NaT stockhk hk 00001 长和 None None
00002 stockhk_hk_00002 stockhk_hk_00002 NaT stockhk hk 00002 中电控股 None None
00003 stockhk_hk_00003 stockhk_hk_00003 NaT stockhk hk 00003 香港中华煤气 None None
00004 stockhk_hk_00004 stockhk_hk_00004 NaT stockhk hk 00004 九龙仓集团 None None
00005 stockhk_hk_00005 stockhk_hk_00005 NaT stockhk hk 00005 汇丰控股 None None
... ... ... ... ... ... ... ... ... ...
09996 stockhk_hk_09996 stockhk_hk_09996 NaT stockhk hk 09996 沛嘉医疗-B None None
09997 stockhk_hk_09997 stockhk_hk_09997 NaT stockhk hk 09997 康基医疗 None None
09998 stockhk_hk_09998 stockhk_hk_09998 NaT stockhk hk 09998 光荣控股 None None
09999 stockhk_hk_09999 stockhk_hk_09999 NaT stockhk hk 09999 网易-S None None
80737 stockhk_hk_80737 stockhk_hk_80737 NaT stockhk hk 80737 湾区发展-R None None
[2597 rows x 9 columns]
>>> df[df.code=='00700']
id entity_id timestamp entity_type exchange code name list_date end_date
2112 stockhk_hk_00700 stockhk_hk_00700 None stockhk hk 00700 腾讯控股 None None
```
#### 还有更多
```
>>> from zvt.contract import *
>>> zvt_context.tradable_schema_map
{'stockus': zvt.domain.meta.stockus_meta.Stockus,
'stockhk': zvt.domain.meta.stockhk_meta.Stockhk,
'index': zvt.domain.meta.index_meta.Index,
'etf': zvt.domain.meta.etf_meta.Etf,
'stock': zvt.domain.meta.stock_meta.Stock,
'block': zvt.domain.meta.block_meta.Block,
'fund': zvt.domain.meta.fund_meta.Fund}
```
其中key为交易标的的类型,value为其schema,系统为schema提供了统一的 **记录(record_data)** 和 **查询(query_data)** 方法。
```
>>> Index.record_data()
>>> df=Index.query_data(filters=[Index.category=='scope',Index.exchange='sh'])
>>> print(df)
id entity_id timestamp entity_type exchange code name list_date end_date publisher category base_point
0 index_sh_000001 index_sh_000001 1990-12-19 index sh 000001 上证指数 1991-07-15 None csindex scope 100.00
1 index_sh_000002 index_sh_000002 1990-12-19 index sh 000002 A股指数 1992-02-21 None csindex scope 100.00
2 index_sh_000003 index_sh_000003 1992-02-21 index sh 000003 B股指数 1992-08-17 None csindex scope 100.00
3 index_sh_000010 index_sh_000010 2002-06-28 index sh 000010 上证180 2002-07-01 None csindex scope 3299.06
4 index_sh_000016 index_sh_000016 2003-12-31 index sh 000016 上证50 2004-01-02 None csindex scope 1000.00
.. ... ... ... ... ... ... ... ... ... ... ... ...
25 index_sh_000020 index_sh_000020 2007-12-28 index sh 000020 中型综指 2008-05-12 None csindex scope 1000.00
26 index_sh_000090 index_sh_000090 2009-12-31 index sh 000090 上证流通 2010-12-02 None csindex scope 1000.00
27 index_sh_930903 index_sh_930903 2012-12-31 index sh 930903 中证A股 2016-10-18 None csindex scope 1000.00
28 index_sh_000688 index_sh_000688 2019-12-31 index sh 000688 科创50 2020-07-23 None csindex scope 1000.00
29 index_sh_931643 index_sh_931643 2019-12-31 index sh 931643 科创创业50 2021-06-01 None csindex scope 1000.00
[30 rows x 12 columns]
```
### EntityEvent (交易标的 发生的事件)
有了交易标的,才有交易标的 发生的事。
#### 行情数据
交易标的 **行情schema** 遵从如下的规则:
```
{entity_shema}{level}{adjust_type}Kdata
```
* entity_schema
就是前面说的TradableEntity,比如Stock,Stockus等。
* level
```
>>> for level in IntervalLevel:
print(level.value)
```
* adjust type
```
>>> for adjust_type in AdjustType:
print(adjust_type.value)
```
> 注意: 为了兼容历史数据,前复权是个例外,{adjust_type}不填
前复权
```
>>> Stock1dKdata.record_data(code='000338', provider='em')
>>> df = Stock1dKdata.query_data(code='000338', provider='em')
>>> print(df)
id entity_id timestamp provider code name level open close high low volume turnover change_pct turnover_rate
0 stock_sz_000338_2007-04-30 stock_sz_000338 2007-04-30 None 000338 潍柴动力 1d 2.33 2.00 2.40 1.87 207375.0 1.365189e+09 3.2472 0.1182
1 stock_sz_000338_2007-05-08 stock_sz_000338 2007-05-08 None 000338 潍柴动力 1d 2.11 1.94 2.20 1.87 86299.0 5.563198e+08 -0.0300 0.0492
2 stock_sz_000338_2007-05-09 stock_sz_000338 2007-05-09 None 000338 潍柴动力 1d 1.90 1.81 1.94 1.66 93823.0 5.782065e+08 -0.0670 0.0535
3 stock_sz_000338_2007-05-10 stock_sz_000338 2007-05-10 None 000338 潍柴动力 1d 1.78 1.85 1.98 1.75 47720.0 2.999226e+08 0.0221 0.0272
4 stock_sz_000338_2007-05-11 stock_sz_000338 2007-05-11 None 000338 潍柴动力 1d 1.81 1.73 1.81 1.66 39273.0 2.373126e+08 -0.0649 0.0224
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3426 stock_sz_000338_2021-08-27 stock_sz_000338 2021-08-27 None 000338 潍柴动力 1d 19.39 20.30 20.30 19.25 1688497.0 3.370241e+09 0.0601 0.0398
3427 stock_sz_000338_2021-08-30 stock_sz_000338 2021-08-30 None 000338 潍柴动力 1d 20.30 20.09 20.31 19.78 1187601.0 2.377957e+09 -0.0103 0.0280
3428 stock_sz_000338_2021-08-31 stock_sz_000338 2021-08-31 None 000338 潍柴动力 1d 20.20 20.07 20.63 19.70 1143985.0 2.295195e+09 -0.0010 0.0270
3429 stock_sz_000338_2021-09-01 stock_sz_000338 2021-09-01 None 000338 潍柴动力 1d 19.98 19.68 19.98 19.15 1218697.0 2.383841e+09 -0.0194 0.0287
3430 stock_sz_000338_2021-09-02 stock_sz_000338 2021-09-02 None 000338 潍柴动力 1d 19.71 19.85 19.97 19.24 1023545.0 2.012006e+09 0.0086 0.0241
[3431 rows x 15 columns]
>>> Stockus1dKdata.record_data(code='AAPL', provider='em')
>>> df = Stockus1dKdata.query_data(code='AAPL', provider='em')
>>> print(df)
id entity_id timestamp provider code name level open close high low volume turnover change_pct turnover_rate
0 stockus_nasdaq_AAPL_1984-09-07 stockus_nasdaq_AAPL 1984-09-07 None AAPL 苹果 1d -5.59 -5.59 -5.58 -5.59 2981600.0 0.000000e+00 0.0000 0.0002
1 stockus_nasdaq_AAPL_1984-09-10 stockus_nasdaq_AAPL 1984-09-10 None AAPL 苹果 1d -5.59 -5.59 -5.58 -5.59 2346400.0 0.000000e+00 0.0000 0.0001
2 stockus_nasdaq_AAPL_1984-09-11 stockus_nasdaq_AAPL 1984-09-11 None AAPL 苹果 1d -5.58 -5.58 -5.58 -5.58 5444000.0 0.000000e+00 0.0018 0.0003
3 stockus_nasdaq_AAPL_1984-09-12 stockus_nasdaq_AAPL 1984-09-12 None AAPL 苹果 1d -5.58 -5.59 -5.58 -5.59 4773600.0 0.000000e+00 -0.0018 0.0003
4 stockus_nasdaq_AAPL_1984-09-13 stockus_nasdaq_AAPL 1984-09-13 None AAPL 苹果 1d -5.58 -5.58 -5.58 -5.58 7429600.0 0.000000e+00 0.0018 0.0004
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
8765 stockus_nasdaq_AAPL_2021-08-27 stockus_nasdaq_AAPL 2021-08-27 None AAPL 苹果 1d 147.48 148.60 148.75 146.83 55802388.0 8.265452e+09 0.0072 0.0034
8766 stockus_nasdaq_AAPL_2021-08-30 stockus_nasdaq_AAPL 2021-08-30 None AAPL 苹果 1d 149.00 153.12 153.49 148.61 90956723.0 1.383762e+10 0.0304 0.0055
8767 stockus_nasdaq_AAPL_2021-08-31 stockus_nasdaq_AAPL 2021-08-31 None AAPL 苹果 1d 152.66 151.83 152.80 151.29 86453117.0 1.314255e+10 -0.0084 0.0052
8768 stockus_nasdaq_AAPL_2021-09-01 stockus_nasdaq_AAPL 2021-09-01 None AAPL 苹果 1d 152.83 152.51 154.98 152.34 80313711.0 1.235321e+10 0.0045 0.0049
8769 stockus_nasdaq_AAPL_2021-09-02 stockus_nasdaq_AAPL 2021-09-02 None AAPL 苹果 1d 153.87 153.65 154.72 152.40 71171317.0 1.093251e+10 0.0075 0.0043
[8770 rows x 15 columns]
```
后复权
```
>>> Stock1dHfqKdata.record_data(code='000338', provider='em')
>>> df = Stock1dHfqKdata.query_data(code='000338', provider='em')
>>> print(df)
id entity_id timestamp provider code name level open close high low volume turnover change_pct turnover_rate
0 stock_sz_000338_2007-04-30 stock_sz_000338 2007-04-30 None 000338 潍柴动力 1d 70.00 64.93 71.00 62.88 207375.0 1.365189e+09 2.1720 0.1182
1 stock_sz_000338_2007-05-08 stock_sz_000338 2007-05-08 None 000338 潍柴动力 1d 66.60 64.00 68.00 62.88 86299.0 5.563198e+08 -0.0143 0.0492
2 stock_sz_000338_2007-05-09 stock_sz_000338 2007-05-09 None 000338 潍柴动力 1d 63.32 62.00 63.88 59.60 93823.0 5.782065e+08 -0.0313 0.0535
3 stock_sz_000338_2007-05-10 stock_sz_000338 2007-05-10 None 000338 潍柴动力 1d 61.50 62.49 64.48 61.01 47720.0 2.999226e+08 0.0079 0.0272
4 stock_sz_000338_2007-05-11 stock_sz_000338 2007-05-11 None 000338 潍柴动力 1d 61.90 60.65 61.90 59.70 39273.0 2.373126e+08 -0.0294 0.0224
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
3426 stock_sz_000338_2021-08-27 stock_sz_000338 2021-08-27 None 000338 潍柴动力 1d 331.97 345.95 345.95 329.82 1688497.0 3.370241e+09 0.0540 0.0398
3427 stock_sz_000338_2021-08-30 stock_sz_000338 2021-08-30 None 000338 潍柴动力 1d 345.95 342.72 346.10 337.96 1187601.0 2.377957e+09 -0.0093 0.0280
3428 stock_sz_000338_2021-08-31 stock_sz_000338 2021-08-31 None 000338 潍柴动力 1d 344.41 342.41 351.02 336.73 1143985.0 2.295195e+09 -0.0009 0.0270
3429 stock_sz_000338_2021-09-01 stock_sz_000338 2021-09-01 None 000338 潍柴动力 1d 341.03 336.42 341.03 328.28 1218697.0 2.383841e+09 -0.0175 0.0287
3430 stock_sz_000338_2021-09-02 stock_sz_000338 2021-09-02 None 000338 潍柴动力 1d 336.88 339.03 340.88 329.67 1023545.0 2.012006e+09 0.0078 0.0241
[3431 rows x 15 columns]
```
#### 财务因子
```
>>> FinanceFactor.record_data(code='000338')
>>> FinanceFactor.query_data(code='000338',columns=FinanceFactor.important_cols(),index='timestamp')
basic_eps total_op_income net_profit op_income_growth_yoy net_profit_growth_yoy roe rota gross_profit_margin net_margin timestamp
timestamp
2002-12-31 NaN 1.962000e+07 2.471000e+06 NaN NaN NaN NaN 0.2068 0.1259 2002-12-31
2003-12-31 1.27 3.574000e+09 2.739000e+08 181.2022 109.8778 0.7729 0.1783 0.2551 0.0766 2003-12-31
2004-12-31 1.75 6.188000e+09 5.369000e+08 0.7313 0.9598 0.3245 0.1474 0.2489 0.0868 2004-12-31
2005-12-31 0.93 5.283000e+09 3.065000e+08 -0.1463 -0.4291 0.1327 0.0603 0.2252 0.0583 2005-12-31
2006-03-31 0.33 1.859000e+09 1.079000e+08 NaN NaN NaN NaN NaN 0.0598 2006-03-31
... ... ... ... ... ... ... ... ... ... ...
2020-08-28 0.59 9.449000e+10 4.680000e+09 0.0400 -0.1148 0.0983 0.0229 0.1958 0.0603 2020-08-28
2020-10-31 0.90 1.474000e+11 7.106000e+09 0.1632 0.0067 0.1502 0.0347 0.1949 0.0590 2020-10-31
2021-03-31 1.16 1.975000e+11 9.207000e+09 0.1327 0.0112 0.1919 0.0444 0.1931 0.0571 2021-03-31
2021-04-30 0.42 6.547000e+10 3.344000e+09 0.6788 0.6197 0.0622 0.0158 0.1916 0.0667 2021-04-30
2021-08-31 0.80 1.264000e+11 6.432000e+09 0.3375 0.3742 0.1125 0.0287 0.1884 0.0653 2021-08-31
[66 rows x 10 columns]
```
#### 财务三张表
```
#资产负债表
>>> BalanceSheet.record_data(code='000338')
#利润表
>>> IncomeStatement.record_data(code='000338')
#现金流量表
>>> CashFlowStatement.record_data(code='000338')
```
#### 还有更多
```
>>> zvt_context.schemas
[zvt.domain.dividend_financing.DividendFinancing,
zvt.domain.dividend_financing.DividendDetail,
zvt.domain.dividend_financing.SpoDetail...]
```
zvt_context.schemas为系统支持的schema,schema即表结构,即数据,其字段含义的查看方式如下:
* help
输入schema.按tab提示其包含的字段,或者.help()
```
>>> FinanceFactor.help()
```
* 源码
[domain](https://github.com/zvtvz/zvt/tree/master/zvt/domain)里的文件为schema的定义,查看相应字段的注释即可。
通过以上的例子,你应该掌握了统一的记录数据的方法:
> Schema.record_data(provider='your provider',codes='the codes')
注意可选参数provider,其代表数据提供商,一个schema可以有多个provider,这是系统稳定的基石。
查看**已实现**的provider
```
>>> Stock.provider_map_recorder
{'joinquant': zvt.recorders.joinquant.meta.jq_stock_meta_recorder.JqChinaStockRecorder,
'exchange': zvt.recorders.exchange.exchange_stock_meta_recorder.ExchangeStockMetaRecorder,
'em': zvt.recorders.em.meta.em_stock_meta_recorder.EMStockRecorder,
'eastmoney': zvt.recorders.eastmoney.meta.eastmoney_stock_meta_recorder.EastmoneyChinaStockListRecorder}
```
你可以使用任意一个provider来获取数据,默认使用第一个。
再举个例子,股票板块数据获取:
```
>>> Block.provider_map_recorder
{'eastmoney': zvt.recorders.eastmoney.meta.eastmoney_block_meta_recorder.EastmoneyChinaBlockRecorder,
'sina': zvt.recorders.sina.meta.sina_block_recorder.SinaBlockRecorder}
>>> Block.record_data(provider='sina')
Block registered recorders:{'eastmoney': <class 'zvt.recorders.eastmoney.meta.china_stock_category_recorder.EastmoneyChinaBlockRecorder'>, 'sina': <class 'zvt.recorders.sina.meta.sina_china_stock_category_recorder.SinaChinaBlockRecorder'>}
2020-03-04 23:56:48,931 INFO MainThread finish record sina blocks:industry
2020-03-04 23:56:49,450 INFO MainThread finish record sina blocks:concept
```
再多了解一点record_data:
* 参数code[单个],codes[多个]代表需要抓取的股票代码
* 不传入code,codes则是全市场抓取
* 该方法会把数据存储到本地并只做增量更新
定时任务的方式更新可参考[定时更新](https://github.com/zvtvz/zvt/blob/master/examples/data_runner)
#### 全市场选股
查询数据使用的是query_data方法,把全市场的数据记录下来后,就可以在本地快速查询需要的数据了。
一个例子:2018年年报 roe>8% 营收增长>8% 的前20个股
```
>>> df=FinanceFactor.query_data(filters=[FinanceFactor.roe>0.08,FinanceFactor.report_period=='year',FinanceFactor.op_income_growth_yoy>0.08],start_timestamp='2019-01-01',order=FinanceFactor.roe.desc(),limit=20,columns=["code"]+FinanceFactor.important_cols(),index='code')
code basic_eps total_op_income net_profit op_income_growth_yoy net_profit_growth_yoy roe rota gross_profit_margin net_margin timestamp
code
000048 000048 2.7350 4.919000e+09 1.101000e+09 0.4311 1.5168 0.7035 0.1988 0.5243 0.2355 2020-04-30
000912 000912 0.3500 4.405000e+09 3.516000e+08 0.1796 1.2363 4.7847 0.0539 0.2175 0.0795 2019-03-20
002207 002207 0.2200 3.021000e+08 5.189000e+07 0.1600 1.1526 1.1175 0.1182 0.1565 0.1718 2020-04-27
002234 002234 5.3300 3.276000e+09 1.610000e+09 0.8023 3.2295 0.8361 0.5469 0.5968 0.4913 2020-04-21
002458 002458 3.7900 3.584000e+09 2.176000e+09 1.4326 4.9973 0.8318 0.6754 0.6537 0.6080 2020-02-20
... ... ... ... ... ... ... ... ... ... ... ...
600701 600701 -3.6858 7.830000e+08 -3.814000e+09 1.3579 -0.0325 1.9498 -0.7012 0.4173 -4.9293 2020-04-29
600747 600747 -1.5600 3.467000e+08 -2.290000e+09 2.1489 -0.4633 3.1922 -1.5886 0.0378 -6.6093 2020-06-30
600793 600793 1.6568 1.293000e+09 1.745000e+08 0.1164 0.8868 0.7490 0.0486 0.1622 0.1350 2019-04-30
600870 600870 0.0087 3.096000e+07 4.554000e+06 0.7773 1.3702 0.7458 0.0724 0.2688 0.1675 2019-03-30
688169 688169 15.6600 4.205000e+09 7.829000e+08 0.3781 1.5452 0.7172 0.4832 0.3612 0.1862 2020-04-28
[20 rows x 11 columns]
```
以上,你应该会回答如下的三个问题了:
* 有什么数据?
* 如何记录数据?
* 如何查询数据?
更高级的用法以及扩展数据,可以参考详细文档里的数据部分。
### 写个策略
有了 **交易标的** 和 **交易标的发生的事**,就可以写策略了。
所谓策略回测,无非就是,重复以下过程:
#### 在某时间点,找到符合条件的标的,对其进行买卖,看其表现。
系统支持两种模式:
* solo (随意的)
在 某个时间 根据发生的事件 计算条件 并买卖
* formal (正式的)
系统设计的二维索引多标的计算模型
#### 一个很随便的人(solo)
嗯,这个策略真的很随便,就像我们大部分时间做的那样。
> 报表出来的时,我看一下报表,机构加仓超过5%我就买入,机构减仓超过50%我就卖出。
代码如下:
```
# -*- coding: utf-8 -*-
import pandas as pd
from zvt.api import get_recent_report_date
from zvt.contract import ActorType, AdjustType
from zvt.domain import StockActorSummary, Stock1dKdata
from zvt.trader import StockTrader
from zvt.utils import pd_is_not_null, is_same_date, to_pd_timestamp
class FollowIITrader(StockTrader):
finish_date = None
def on_time(self, timestamp: pd.Timestamp):
recent_report_date = to_pd_timestamp(get_recent_report_date(timestamp))
if self.finish_date and is_same_date(recent_report_date, self.finish_date):
return
filters = [StockActorSummary.actor_type == ActorType.raised_fund.value,
StockActorSummary.report_date == recent_report_date]
if self.entity_ids:
filters = filters + [StockActorSummary.entity_id.in_(self.entity_ids)]
df = StockActorSummary.query_data(filters=filters)
if pd_is_not_null(df):
self.logger.info(f'{df}')
self.finish_date = recent_report_date
long_df = df[df['change_ratio'] > 0.05]
short_df = df[df['change_ratio'] < -0.5]
try:
self.trade_the_targets(due_timestamp=timestamp, happen_timestamp=timestamp,
long_selected=set(long_df['entity_id'].to_list()),
short_selected=set(short_df['entity_id'].to_list()))
except Exception as e:
self.logger.error(e)
if __name__ == '__main__':
entity_id = 'stock_sh_600519'
Stock1dKdata.record_data(entity_id=entity_id, provider='em')
StockActorSummary.record_data(entity_id=entity_id, provider='em')
FollowIITrader(start_timestamp='2002-01-01', end_timestamp='2021-01-01', entity_ids=[entity_id],
provider='em', adjust_type=AdjustType.qfq, profit_threshold=None).run()
```
所以,写一个策略其实还是很简单的嘛。
你可以发挥想象力,社保重仓买买买,外资重仓买买买,董事长跟小姨子跑了卖卖卖......
然后,刷新一下[http://127.0.0.1:8050/](http://127.0.0.1:8050/),看你运行策略的performance
更多可参考[策略例子](https://github.com/zvtvz/zvt/tree/master/examples/trader)
#### 严肃一点(formal)
简单的计算可以通过query_data来完成,这里说的是系统设计的二维索引多标的计算模型。
下面以技术因子为例对**计算流程**进行说明:
```
In [7]: from zvt.factors.technical_factor import *
In [8]: factor = BullFactor(codes=['000338','601318'],start_timestamp='2019-01-01',end_timestamp='2019-06-10', transformer=MacdTransformer())
```
### data_df
data_df为factor的原始数据,即通过query_data从数据库读取到的数据,为一个**二维索引**DataFrame
```
In [11]: factor.data_df
Out[11]:
level high id entity_id open low timestamp close
entity_id timestamp
stock_sh_601318 2019-01-02 1d 54.91 stock_sh_601318_2019-01-02 stock_sh_601318 54.78 53.70 2019-01-02 53.94
2019-01-03 1d 55.06 stock_sh_601318_2019-01-03 stock_sh_601318 53.91 53.82 2019-01-03 54.42
2019-01-04 1d 55.71 stock_sh_601318_2019-01-04 stock_sh_601318 54.03 53.98 2019-01-04 55.31
2019-01-07 1d 55.88 stock_sh_601318_2019-01-07 stock_sh_601318 55.80 54.64 2019-01-07 55.03
2019-01-08 1d 54.83 stock_sh_601318_2019-01-08 stock_sh_601318 54.79 53.96 2019-01-08 54.54
... ... ... ... ... ... ... ... ...
stock_sz_000338 2019-06-03 1d 11.04 stock_sz_000338_2019-06-03 stock_sz_000338 10.93 10.74 2019-06-03 10.81
2019-06-04 1d 10.85 stock_sz_000338_2019-06-04 stock_sz_000338 10.84 10.57 2019-06-04 10.73
2019-06-05 1d 10.92 stock_sz_000338_2019-06-05 stock_sz_000338 10.87 10.59 2019-06-05 10.59
2019-06-06 1d 10.71 stock_sz_000338_2019-06-06 stock_sz_000338 10.59 10.49 2019-06-06 10.65
2019-06-10 1d 11.05 stock_sz_000338_2019-06-10 stock_sz_000338 10.73 10.71 2019-06-10 11.02
[208 rows x 8 columns]
```
### factor_df
factor_df为transformer对data_df进行计算后得到的数据,设计因子即对[transformer](https://github.com/zvtvz/zvt/blob/master/zvt/factors/factor.py#L18)进行扩展,例子中用的是MacdTransformer()。
```
In [12]: factor.factor_df
Out[12]:
level high id entity_id open low timestamp close diff dea macd
entity_id timestamp
stock_sh_601318 2019-01-02 1d 54.91 stock_sh_601318_2019-01-02 stock_sh_601318 54.78 53.70 2019-01-02 53.94 NaN NaN NaN
2019-01-03 1d 55.06 stock_sh_601318_2019-01-03 stock_sh_601318 53.91 53.82 2019-01-03 54.42 NaN NaN NaN
2019-01-04 1d 55.71 stock_sh_601318_2019-01-04 stock_sh_601318 54.03 53.98 2019-01-04 55.31 NaN NaN NaN
2019-01-07 1d 55.88 stock_sh_601318_2019-01-07 stock_sh_601318 55.80 54.64 2019-01-07 55.03 NaN NaN NaN
2019-01-08 1d 54.83 stock_sh_601318_2019-01-08 stock_sh_601318 54.79 53.96 2019-01-08 54.54 NaN NaN NaN
... ... ... ... ... ... ... ... ... ... ... ...
stock_sz_000338 2019-06-03 1d 11.04 stock_sz_000338_2019-06-03 stock_sz_000338 10.93 10.74 2019-06-03 10.81 -0.121336 -0.145444 0.048215
2019-06-04 1d 10.85 stock_sz_000338_2019-06-04 stock_sz_000338 10.84 10.57 2019-06-04 10.73 -0.133829 -0.143121 0.018583
2019-06-05 1d 10.92 stock_sz_000338_2019-06-05 stock_sz_000338 10.87 10.59 2019-06-05 10.59 -0.153260 -0.145149 -0.016223
2019-06-06 1d 10.71 stock_sz_000338_2019-06-06 stock_sz_000338 10.59 10.49 2019-06-06 10.65 -0.161951 -0.148509 -0.026884
2019-06-10 1d 11.05 stock_sz_000338_2019-06-10 stock_sz_000338 10.73 10.71 2019-06-10 11.02 -0.137399 -0.146287 0.017776
[208 rows x 11 columns]
```
### result_df
result_df为可用于选股器的**二维索引**DataFrame,通过对data_df或factor_df计算来实现。
该例子在计算macd之后,利用factor_df,黄白线在0轴上为True,否则为False,[具体代码](https://github.com/zvtvz/zvt/blob/master/zvt/factors/technical_factor.py#L56)
```
In [14]: factor.result_df
Out[14]:
score
entity_id timestamp
stock_sh_601318 2019-01-02 False
2019-01-03 False
2019-01-04 False
2019-01-07 False
2019-01-08 False
... ...
stock_sz_000338 2019-06-03 False
2019-06-04 False
2019-06-05 False
2019-06-06 False
2019-06-10 False
[208 rows x 1 columns]
```
result_df的格式如下:
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/result_df.png'/></p>
filter_result 为 True 或 False, score_result 取值为 0 到 1。
结合选股器和回测,整个流程如下:
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/flow.png'/></p>
## 环境设置(可选)
```
>>> from zvt import *
>>> zvt_env
{'zvt_home': '/Users/foolcage/zvt-home',
'data_path': '/Users/foolcage/zvt-home/data',
'tmp_path': '/Users/foolcage/zvt-home/tmp',
'ui_path': '/Users/foolcage/zvt-home/ui',
'log_path': '/Users/foolcage/zvt-home/logs'}
>>> zvt_config
```
* jq_username 聚宽数据用户名
* jq_password 聚宽数据密码
* smtp_host 邮件服务器host
* smtp_port 邮件服务器端口
* email_username smtp邮箱账户
* email_password smtp邮箱密码
* wechat_app_id
* wechat_app_secrect
```
>>> init_config(current_config=zvt_config, jq_username='xxx', jq_password='yyy')
```
> 通用的配置方式为: init_config(current_config=zvt_config, **kv)
### 下载历史数据(可选)
百度网盘: https://pan.baidu.com/s/1kHAxGSxx8r5IBHe5I7MAmQ 提取码: yb6c
google drive: https://drive.google.com/drive/folders/17Bxijq-PHJYrLDpyvFAm5P6QyhKL-ahn?usp=sharing
里面包含joinquant的日/周线后复权数据,个股估值,基金及其持仓数据,eastmoney的财务等数据。
把下载的数据解压到正式环境的data_path(所有db文件放到该目录下,没有层级结构)
数据的更新是增量的,下载历史数据只是为了节省时间,全部自己更新也是可以的。
#### 注册聚宽(可选)
项目数据支持多provider,在数据schema一致性的基础上,可根据需要进行选择和扩展,目前支持新浪,东财,交易所等免费数据。
#### 数据的设计上是让provider来适配schema,而不是反过来,这样即使某provider不可用了,换一个即可,不会影响整个系统的使用。
但免费数据的缺点是显而易见的:不稳定,爬取清洗数据耗时耗力,维护代价巨大,且随时可能不可用。
个人建议:如果只是学习研究,可以使用免费数据;如果是真正有意投身量化,还是选一家可靠的数据提供商。
项目支持聚宽的数据,可戳以下链接申请使用(目前可免费使用一年)
https://www.joinquant.com/default/index/sdk?channelId=953cbf5d1b8683f81f0c40c9d4265c0d
> 项目中大部分的免费数据目前都是比较稳定的,且做过严格测试,特别是东财的数据,可放心使用
> 添加其他数据提供商, 请参考[数据扩展教程](https://zvtvz.github.io/zvt/#/data_extending)
## 开发
### clone代码
```
git clone https://github.com/zvtvz/zvt.git
```
设置项目的virtual env(python>=3.6),安装依赖
```
pip3 install -r requirements.txt
pip3 install pytest
```
### 测试案例
pycharm导入工程(推荐,你也可以使用其他ide),然后pytest跑测试案例
<p align="center"><img src='https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/pytest.jpg'/></p>
大部分功能使用都可以从tests里面参考
## 贡献
期待能有更多的开发者参与到 zvt 的开发中来,我会保证尽快 Reivew PR 并且及时回复。但提交 PR 请确保
先看一下[1分钟代码规范](https://github.com/zvtvz/zvt/blob/master/code_of_conduct.md)
1. 通过所有单元测试,如若是新功能,请为其新增单元测试
2. 遵守开发规范
3. 如若需要,请更新相对应的文档
也非常欢迎开发者能为 zvt 提供更多的示例,共同来完善文档。
## 请作者喝杯咖啡
如果你觉得项目对你有帮助,可以请作者喝杯咖啡
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/alipay-cn.png" width="25%" alt="Alipay">
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/wechat-cn.png" width="25%" alt="Wechat">
## 联系方式
加微信进群:foolcage 添加暗号:zvt
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/wechat.jpeg" width="25%" alt="Wechat">
------
微信公众号:
<img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/gongzhonghao.jpg" width="25%" alt="Wechat">
知乎专栏:
https://zhuanlan.zhihu.com/automoney
## Thanks
<p><a href=https://www.jetbrains.com/?from=zvt><img src="https://raw.githubusercontent.com/zvtvz/zvt/master/docs/imgs/jetbrains.png" width="25%" alt="jetbrains"></a></p>
|
zvt
|
/zvt-0.10.4.tar.gz/zvt-0.10.4/README-cn.md
|
README-cn.md
|
#!/usr/bin/env python
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="zvt",
version="0.10.4",
description="unified,modular quant framework for human beings ",
long_description=long_description,
url="https://github.com/zvtvz/zvt",
author="foolcage",
author_email="[email protected]",
classifiers=[ # Optional
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Customer Service",
"Intended Audience :: Education",
"Intended Audience :: Financial and Insurance Industry",
"Topic :: Software Development :: Build Tools",
"Topic :: Office/Business :: Financial :: Investment",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="quant stock finance fintech big-data zvt technical-analysis trading-platform pandas fundamental-analysis",
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.7, <4",
include_package_data=True,
install_requires=[
"requests == 2.20.1",
"SQLAlchemy == 1.4.20",
"pandas == 1.1.4",
"arrow == 1.2.1",
"xlrd == 1.2.0",
"demjson3 == 3.0.5",
"marshmallow-sqlalchemy == 0.23.1",
"marshmallow == 3.2.2",
"plotly==4.12.0",
"dash==1.17.0",
"simplejson==3.16.0",
"jqdatapy==0.1.6",
"dash-bootstrap-components==0.11.0",
"dash_daq==0.5.0",
"scikit-learn==1.0.1",
],
project_urls={ # Optional
"Bug Reports": "https://github.com/zvtvz/zvt/issues",
"Funding": "https://www.foolcage.com/zvt",
"Say Thanks!": "https://saythanks.io/to/foolcage",
"Source": "https://github.com/zvtvz/zvt",
},
long_description_content_type="text/markdown",
entry_points={
"console_scripts": [
"zvt = zvt.main:main",
"zvt_plugin = zvt.plugin:main",
"zvt_export = zvt.plugin:export",
],
},
license_file="LICENSE",
)
|
zvt
|
/zvt-0.10.4.tar.gz/zvt-0.10.4/setup.py
|
setup.py
|
# zvtm简单说明
## 1、zvt是一个很好的量化框架,不过文档还不够详细。https://zvtvz.github.io/zvt/#/README 是它的介绍。
## 2、为了解决zvt数据保存在sqlite3不方便使用的问题,对其进行了修改,可以保存到mysql数据库。
## 3、使用方法,直接修改config.json 放到本地用户zvtm_home目录,就可以使用了。
## 4、test 目录的record.py做了几个简单的demo。具体详见上面zvt的说明。
## 5、原有数据可以通过sql3mysql进行转化。
|
zvtm
|
/zvtm-0.0.11.tar.gz/zvtm-0.0.11/README.md
|
README.md
|
#!/usr/bin/env python
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
try:
# for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
# for pip <= 9.0.3
from pip.req import parse_requirements
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
install_reqs = parse_requirements("requirements.txt", session=False)
try:
requirements = [str(ir.req) for ir in install_reqs]
except:
requirements = [str(ir.requirement) for ir in install_reqs]
setup(
name='zvtm',
version='0.0.11',
description='unified,modular quant framework for mysql ',
long_description=long_description,
url='https://github.com/epoches/zvtm',
author='epoches',
author_email='[email protected]',
classifiers=[ # Optional
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Topic :: Software Development :: Build Tools',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
keywords='quant stock finance fintech big-data zvt technical-analysis trading-platform pandas fundamental-analysis',
packages=find_packages(include=['zvtm.*', 'zvtm']),
python_requires='>=3.5, <4',
include_package_data=True,
install_requires=requirements,
project_urls={ # Optional
'Bug Reports': 'https://github.com/epoches/zvtm/issues',
'Say Thanks!': 'https://saythanks.io/to/foolcage',
'Source': 'https://github.com/epoches/zvtm',
},
long_description_content_type="text/markdown",
entry_points={
'console_scripts': [
'zvtm = zvtm.main:main',
'zvtm_plugin = zvtm.plugin:main',
'zvtm_export = zvtm.plugin:export',
],
},
)
|
zvtm
|
/zvtm-0.0.11.tar.gz/zvtm-0.0.11/setup.py
|
setup.py
|
<div align="left">
<h1>ZvukoGram API <img src="https://zvukogram.com/design/img/dispic/zvuklogo.png" width=30 height=30></h1>
<p align="left" >
<a href="https://pypi.org/project/zvukogram/">
<img src="https://img.shields.io/pypi/v/zvukogram?style=flat-square" alt="PyPI">
</a>
<a href="https://pypi.org/project/zvukogram/">
<img src="https://img.shields.io/pypi/dm/zvukogram?style=flat-square" alt="PyPI">
</a>
</p>
</div>
A simple, yet powerful library for [ZvukoGram API](https://zvukogram.com/node/api/)
## Usage
With ``ZvukoGram API`` you can fully access the ZvukoGram API.
## Documentation
Official docs can be found on the [API's webpage](https://zvukogram.com/node/api/)
## Installation
```bash
pip install zvukogram
```
## Requirements
- ``Python 3.7+``
- ``aiohttp``
- ``pydantic``
## Features
- ``Asynchronous``
- ``Exception handling``
- ``Pydantic return model``
- ``LightWeight``
## Basic example
```python
import asyncio
from zvukogram import ZvukoGram, ZvukoGramError
api = ZvukoGram('token', 'email')
async def main():
try:
voices = await api.get_voices()
print(voices['Русский'].pop().voice)
except ZvukoGramError as exc:
print(exc)
generation = await api.tts(
voice='Бот Максим',
text='Привет!',
)
print(generation.file)
audio = await generation.download()
generation = await api.tts_long(
voice='Бот Максим',
text='Более длинный текст!',
)
while not generation.file:
await asyncio.sleep(1)
generation = await api.check_progress(generation.id)
print(generation.file)
asyncio.run(main())
```
Developed by Nikita Minaev (c) 2023
|
zvukogram
|
/zvukogram-1.0.1.tar.gz/zvukogram-1.0.1/README.md
|
README.md
|
import os
import codecs
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as file:
long_description = "\n" + file.read()
VERSION = '1.0.1'
DESCRIPTION = 'Asynchronous ZvukoGram API wrapper'
setup(
name="zvukogram",
version=VERSION,
author="Nikita Minaev",
author_email="<[email protected]>",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=long_description,
packages=find_packages(),
install_requires=['aiohttp', 'pydantic'],
keywords=['python', 'zvukogram', 'payments', 'async', 'asyncio', 'aiohttp', 'pydantic'],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
url='https://github.com/nikitalm8/zvukogram',
project_urls={
'Homepage': 'https://github.com/nikitalm8/zvukogram',
'Bug Tracker': 'https://github.com/nikitalm8/zvukogram/issues',
'API Docs': 'https://zvukogram.com/node/api/',
},
)
|
zvukogram
|
/zvukogram-1.0.1.tar.gz/zvukogram-1.0.1/setup.py
|
setup.py
|
from .zw_fast_quantile_py import *
__doc__ = zw_fast_quantile_py.__doc__
|
zw-fast-quantile-py
|
/zw_fast_quantile_py-0.2.0-cp39-cp39-macosx_11_0_arm64.whl/zw_fast_quantile_py/__init__.py
|
__init__.py
|
# zw_outliersdetec
This is a small outliers-detection package. You can click
[Github-vidiewei](https://github.com/vidiewei) to see more about this package.
This package includes six algorithms for unsupervised-outliers-detection
1、HOT-SAX 2、PAPR_RW 3、IntervalSets 4、iForest 5、RDOS 6、FastVOA
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/README.md
|
README.md
|
from setuptools import setup,find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="zw_outliersdetec",
version="0.0.1", #第一版
author="Zuo Wei",
author_email="[email protected]",
description="A small outliers-detection package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/vidiewei",
packages=find_packages(),
#include_package_data = True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/setup.py
|
setup.py
|
import numpy
from sklearn.preprocessing import scale
class PAPR:
splits = dict() #创建一个新的字典
splits[2] = [0, float('inf')] #float(‘inf’)正无穷
splits[3] = [-0.43, 0.43, float('inf')]
splits[4] = [-0.67, 0, 0.67, float('inf')]
splits[5] = [-0.84, -0.25, 0.25, 0.84, float('inf')]
splits[6] = [-0.97, -0.43, 0, 0.43, 0.97, float('inf')]
splits[7] = [-1.07, -0.57, -0.18, 0.18, 0.57, 1.07, float('inf')]
splits[8] = [-1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15, float('inf')]
splits[9] = [-1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22, float('inf')]
splits[10] = [-1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28, float('inf')]
def __init__(self,m):
self.m=m
'''
功能:利用PAPR_RW算法进行异常分数的计算
输入:data,从CSV文件读取的一维数据
输出:data_matrix,二维矩阵,每行为一个子序列
运行示例:
from PAPR import *
import numpy as np
import matplotlib.pyplot as plt
m=250 #设置子序列长度,每个数据集不一样
P=PAPR(m)
#从CSV文件读取数据,不带标签
data = np.genfromtxt("chfdb_1.csv", delimiter=',')
scores,scores_index=P.PAPR(data)
#可视化标注异常
t=scores_index[0] #得分最低的序列的下标,该序列被认为是异常
x0=range(0,data.__len__())
plt.plot(x0,data)
x1=range(t*m,t*m+m-1)
plt.plot(x1,data[t*m:t*m+m-1],color="red")
plt.show()
'''
def papr(self,data):
m=self.m #划分的序列长度
# 将数据划分成多个子序列
data_matrix = self.split_data(data)
# 对子序列分别进行归一化处理
data_matrix = scale(data_matrix, axis=1)
# 计算子序列的高斯径向基函数的宽度参数
widths = self.find_best_w(data_matrix)
matrix = self.cal_matrix(data_matrix, 6)
sim_matrix = self.cal_similarity(matrix=matrix, wc=0.3, wd=0.4, wr=0.3, length=widths.__len__(), widths=widths)
scores = self.random_walk(sim_matrix, error=0.05)
scores_index=numpy.argsort(scores) #异常分数从低到高排序
return scores,scores_index
'''
输入:data,一维数据
功能:将读取到的一维数据按照传入的子序列长度进行划分
输出:data_matrix,二维矩阵,每行为一个子序列
'''
def split_data(self,data):
index=self.m
length=data.__len__()
data_matix=list()
sequence=list()
i=0
while i<length:
sequence=data[i:i+index]
# print(sequence)
i=i+index
data_matix.append(sequence)
return data_matix
'''
值空间划分以及PAPR指标的计算
'''
def cal_matrix(self,data, k):
points = self.splits[k]
new_data = list()
for item in data:
tmp_points = list()
for i in range(k):
tmp_points.append(list())
for p in item:
for w in range(k):
if p < points[w]:
tmp_points[w].append(p)
break
tmp_matrix = numpy.zeros((k, 3)) #生成一个K行3列的全0矩阵,用于记录PAPR方法得出的Mi = [di, ci, ri]
for w in range(k):
tmp_matrix[w, 0] = len(tmp_points[w]) #记录子值空间的点个数
if tmp_matrix[w, 0] != 0:
tmp_matrix[w, 1] = numpy.mean(tmp_points[w]) #记录子值空间的点均值
tmp_matrix[w, 2] = numpy.var(tmp_points[w]) #记录子值空间的方差
new_data.append(tmp_matrix)
return numpy.array(new_data)
'''
计算相似度矩阵
#length是子序列的数量,width是计算Scij和Srij使所用到的δ
'''
def cal_similarity(self,matrix, length, wd, wc, wr, widths):
index = range(length)
sim_matrix = numpy.zeros((length, length)) #生成一个length行length列的全0矩阵
for r in index:
for c in index:
sd = self.cal_d_sim(matrix[r, :, 0], matrix[c, :, 0])
sc = self.cal_rc_sim(matrix[r, :, 1], matrix[c, :, 1], widths[r])
sr = self.cal_rc_sim(matrix[r, :, 2], matrix[c, :, 2], widths[r])
sim_matrix[r, c] = wd*sd + wc*sc + wr*sr
return sim_matrix
'''
函数功能:计算记录两点数量的向量di和dj的相似度Sdij
'''
def cal_d_sim(self,one, two):
#m是子序列one的总长度,m=∑(k=1..q)dik
m = numpy.sum(one)
#length是记录子序列特征的Mi=[di,ci,ri]的长度,即子值空间的划分数目
length = len(one)
s = 0
for l in range(length):
s += min(one[l], two[l])
return 1.0 * s / m
'''
函数功能:计算Scij和Srij,两个计算公式相同
w即δ,高斯径向基函数的半径,通过信息熵的方法可以计算出每个数据集的该值
'''
def cal_rc_sim(self,one, two, w=0.005):
return numpy.exp(-1.0 * numpy.linalg.norm(one - two, ord=2) / numpy.power(w, 2))
'''
RW模型,最终会得到一个概率分布矩阵,即异常得分
'''
def random_walk(self,sim_matrix, error=0.1):
rows, cols = sim_matrix.shape
s_matrix = numpy.zeros((rows, cols))
for r in range(rows):
totSim = 0.0
for c in range(cols):
totSim += sim_matrix[r, c]
for c in range(cols):
s_matrix[r, c] = 1.0*sim_matrix[r, c] / totSim
damping_factor = 0.1
ct = numpy.array([1.0/rows]*rows)
recursive_err = error+1
times = 0
while recursive_err > error and times < 100:
ct1 = damping_factor/rows + numpy.dot(s_matrix.T, ct)
recursive_err = numpy.linalg.norm(ct-ct1, ord=1)
times += 1
ct = ct1[:]
return ct
'''
函数功能:计算数据集的δ,高斯径向基函数的半径,通过信息熵的方法计算
'''
def find_best_w(self,data_matrix):
alist, blist = numpy.zeros(data_matrix.__len__()), numpy.zeros(data_matrix.__len__())
r_index = range(data_matrix.__len__())
gama = (5**0.5-1)/2
coe = (2**0.5)/3
for i in r_index:
min_dist, max_dist = float('inf'), -float('inf')
for j in r_index:
if i == j:
continue
dist = numpy.linalg.norm(data_matrix[i]-data_matrix[j], ord=2) #求二范数
min_dist = min(dist, min_dist)
max_dist = max(dist, max_dist)
alist[i], blist[i] = coe*min_dist, coe*max_dist
left, right = cal_sig(alist, blist, gama)
ent_left = cal_entropy(left)
ent_right = cal_entropy(right)
epison = 1
times = 0
while numpy.linalg.norm(alist-blist) < 1 and times < 20:
if ent_left < ent_right:
blist, right = right.copy(), left.copy()
ent_right = ent_left
left = alist + (1-gama)*(blist-alist)
ent_left = cal_entropy(left)
else:
alist, left = left.copy(), right.copy()
ent_left = ent_right
right = alist + gama*(blist-alist)
ent_right = cal_entropy(right)
times += 1
if ent_left < ent_right:
return left
else:
return right
def cal_sig(alist, blist, gama):
length = len(alist)
index = range(length)
left, right = numpy.zeros(length), numpy.zeros(length)
for i in index:
left[i] = alist[i] + (1-gama)*(blist[i]-alist[i])
right[i] = alist[i] + gama*(blist[i]-alist[i])
return left, right
'''
计算信信息熵
'''
def cal_entropy(list):
total = sum(list)
list /= total
log_list = numpy.log(list)
return -numpy.dot(list, log_list)
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/PAPR.py
|
PAPR.py
|
import numpy as np
from sklearn.neighbors import NearestNeighbors
from math import *
'''
说明:RDOS算法,根据K近邻集合、逆近邻集合以及共享近邻集合进行对象的核密度估计,从而得出异常分数
传入参数:k-近邻的个数,h-高斯核的宽度参数
'''
class RDOS:
#初始化,传入参数并设置默认值
def __init__(self,n_outliers=1,n_neighbors=5,h=2):
self.n_outliers=n_outliers
self.n_neighbors=n_neighbors
self.h=h
'''
RDOS
输入:data-数据集,n-返回的异常个数(默认为1),n_neighbors-近邻个数,h-高斯核函数的宽度参数
输出:返回异常的分数以及按异常分数排序好的下标,同时会根据预设的异常个数输出其下标及得分
运行示例:
import pandas as pd
from RDOS import *
#如果是带有Lable的数据集则需要先去除Label列
data=pd.read_csv("hbk.csv",sep=',')
#data=data.drop('Label', axis=1)
data=np.array(data)
print(data.shape)
#调用RDOS算法,传入预设的异常个数
rdos=RDOS(n_outliers=10)
RDOS_index,RDOS_score=rdos.rdos(data)
'''
def rdos(self,data):
n_outliers = self.n_outliers
n_neighbors=self.n_neighbors
h=self.h
n=data.shape[0] #n是数据集的样例数量
d=data.shape[1] #d是数据集的维度,即属性个数
#规范输入的参数
if n_neighbors>=n or n_neighbors<1:
print('n_neighbors input must be less than number of observations and greater than 0')
exit()
outliers=list()
#存储每个数据对象的近邻下标
Sknn= list()
Srnn= list()
Ssnn = list()
S= list()
P= list()
#计算Sknn
for X in data:
Sknn_temp = self.KNN(data, [X], return_distance=False)
Sknn_temp = np.squeeze(Sknn_temp)
Sknn.append(Sknn_temp[1:])
S.append(list(Sknn_temp[1:])) # X的所有近邻集合
#计算Srnn
for i in range(n):
Srnn_temp = list() # 记录每个数据对象的rnn
for item in Sknn[i]:
item_neighbors = Sknn[item]
# 如果X的近邻的k近邻集合中也包含X,说明该近邻是X的逆近邻
if i in item_neighbors:
Srnn_temp.append(item)
Srnn.append(Srnn_temp)
S[i].extend(Srnn_temp) # X的所有近邻集合
#计算Ssnn
for i in range(n):
Ssnn_temp = list()
for j in Sknn[i]:
kneighbor_rnn = Srnn[j] # k近邻的逆近邻集合
Ssnn_temp.extend(kneighbor_rnn)
Ssnn_temp = list(set(Ssnn_temp)) # 去重
if i in Ssnn_temp:
Ssnn_temp.remove(i) # 删除X本身下标
Ssnn.append(Ssnn_temp) # X的共享近邻集合
S[i].extend(Ssnn_temp) # X的所有近邻集合
S[i] = list(set(S[i])) # 去重
P.append(self.getKelnelDensity(data, i, S[i]))#计算论文中的P值
'''
#计算每个数据对象的近邻集合
for i in range(n):
Sknn_temp=self.KNN(data,[data[i]],return_distance=False)
Sknn_temp = np.squeeze(Sknn_temp)
print("Sknn:",Sknn_temp[1:])
Sknn.append(Sknn_temp[1:]) #需要除去其本身,例:[[11 29 7 26 24]]→[29 7 26 24]
Srnn.append(self.RNN(data,[data[i]],return_distance=False)) #例:[29 24]
Ssnn_temp=list()
for j in Sknn[i]:
kneighbor_rnn = self.RNN(data, [data[j]], return_distance=False) #k近邻的逆近邻集合
Ssnn_temp.extend(kneighbor_rnn)
Ssnn_temp = list(set(Ssnn_temp)) # 去重
if i in Ssnn_temp:
Ssnn_temp.remove(i) # 删除X本身下标
Ssnn.append(Ssnn_temp) #X的共享近邻集合
S.append(list(set(Ssnn_temp))) #X的所有近邻集合
'''
#print("S:",S[i]) #打印
#计算异常得分RDOS
RDOS_score=list()
for i in range(n):
S_RDOS=0
for j in S[i]: #计算近邻集合的RDOS总分数
S_RDOS=S_RDOS+P[j]
RDOS_score.append(S_RDOS/(len(S[i])*P[i]))
RDOS_index= np.argsort(RDOS_score) #对异常分数进行排序,从低到高,返回的是数组的索引
return RDOS_score,RDOS_index[::-1] #返回异常的得分及其下标(下标由得分从高到低排序)
'''
找出数据集X中每个对象的的k近邻并返回序号(当k>1时,会包括其本身)
X可以是一个点或者一组数据,data是所有数据
return_distance=True时会同时返回距离
'''
def KNN(self,data,X,return_distance=False):
neigh = NearestNeighbors(self.n_neighbors)
neigh.fit(data)
return neigh.kneighbors(X, return_distance=return_distance)
'''
找出X的k逆近邻集合并返回序号
X是一个数据对象,data是所有数据
return_distance=True时会同时返回距离
def RNN(self, data, X, return_distance=False):
neigh = NearestNeighbors(self.n_neighbors)
neigh.fit(data)
X_neighbors=neigh.kneighbors(X, return_distance=return_distance)
X_Srnn=list() #存储逆近邻的下标集合
# 遍历X的近邻集合寻找其逆近邻集合,item为近邻的序号
index = X_neighbors[0, 1:]
X_index = X_neighbors[0, 0] #X的下标
#近邻的下标
for item in index:
item_neighbors = neigh.kneighbors([data[item]], return_distance=False) #寻找近邻的k近邻集合
# 如果X的近邻的k近邻集合中也包含X,说明该近邻是X的逆近邻
if X_index in item_neighbors:
X_Srnn.append(item)
return np.array(X_Srnn)
'''
'''
计算核密度
输入:data-数据集,X_index-数据对象的下标,S近邻集合
输出:论文中的P
'''
def getKelnelDensity(self,data,X_index,S):
h=self.h #高斯核函数参数
d=data.shape[1] #数据的属性个数
S_X=list(S)
S_X.append(X_index)
X_guassian =0
for i in S_X:
X_guassian+=(1/((2*pi)**(d/2)))*exp(-(np.linalg.norm(data[i]-data[X_index]))/(2*h**2))
S_len=S.__len__()
P=1/(S_len+1)*(1/h**d)*X_guassian
return P
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/RDOS.py
|
RDOS.py
|
import random
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
from sklearn.decomposition import TruncatedSVD
#Algorithm 2 RandomProjection(S; t)
def random_projection(S, t):
"""
Projects data space to random vector space
:param S: Train set
:param t: Amount of final dimension
:return: A list L = L1L2L3...Lt where Li is a list of points
ordered by their dot product with ri
"""
l = []
for i in range(0, t):
#ri = []
#for j in range(0, S.shape[1]):
#ri.append(random.randint(0, 1))
ri=list(np.random.normal(0,1,S.shape[1]))
l.append([])
for index, record in S.iterrows():
dotted = np.dot(record, ri)
l[i].append((index, dotted))
l[i] = sorted(l[i], key=lambda x: x[1])
return l
def prepare_projected_data(projected, t):
result = list()
for i in range(0, t):
l = sorted(projected[i], key=lambda x: x[0])
l = list(map(lambda x: x[1], l))
result.append(l)
result = list(map(list, zip(*result)))
return result
def pca(S, t, is_product):
pca = PCA(n_components=t)
X = np.array(S)
pca.fit(X)
if not is_product:
print("Accumulative Variance Ratio : ", pca.explained_variance_ratio_.cumsum())
return pd.DataFrame(pca.transform(X))
def SVD(S, t, is_product):
svd = TruncatedSVD(n_components=t)
X = np.array(S)
svd.fit(X)
if not is_product:
print("Accumulative Variance Ratio : ", svd.explained_variance_ratio_.cumsum())
return pd.DataFrame(svd.transform(X))
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/__dimension_reduction.py
|
__dimension_reduction.py
|
import numpy as np
from zw_outliersdetec.__sax_via_window import *
'''
函数功能:计算欧式距离
'''
def euclidean(a, b):
"""Compute a Euclidean distance value."""
return np.sqrt(np.sum((a-b)**2))
'''
类功能:实现HOT_SAX算法,找出异常时间序列
'''
class HOTSAX:
#初始化,num_discords-预设输出异常的数量
def __init__(self,num_discords=2):
self.num_discords=num_discords
'''
功能:利用HOT-SAX算法找出异常时间序列的位置信息
输入:series-数据集,win-size-自行设置的窗口大小(可理解为序列长度,默认为100),其他参数默认
输出:根据设定的异常个数输出异常的开始位置以及对应的分数,表示从该位置开始的长度为win-size的序列被认为是异常
运行示例:
import numpy as np
from HOTSAX import *
#ECG数据,不带标签,只有一列值
data = np.genfromtxt("ECG0606_1.csv", delimiter=',')
hs=HOTSAX(2)
discords,win_size =hs.find_discords_hotsax(data)
print(discords,win_size)
'''
def hotsax(self,series, win_size=100, a_size=3,
paa_size=3, z_threshold=0.01):
"""HOT-SAX-driven discords discovery."""
discords = list()
globalRegistry = set()
while (len(discords) < self.num_discords):
bestDiscord =self.find_best_discord_hotsax(series, win_size, a_size,
paa_size, z_threshold,
globalRegistry)
if -1 == bestDiscord[0]:
break
discords.append(bestDiscord)
mark_start = bestDiscord[0] - win_size
if 0 > mark_start:
mark_start = 0
mark_end = bestDiscord[0] + win_size
'''if len(series) < mark_end:
mark_end = len(series)'''
for i in range(mark_start, mark_end):
globalRegistry.add(i)
return discords,win_size #返回设定异常个数的异常开始位置和窗口大小
def find_best_discord_hotsax(self,series, win_size, a_size, paa_size,
znorm_threshold, globalRegistry): # noqa: C901
"""Find the best discord with hotsax."""
"""[1.0] get the sax data first"""
sax_none = sax_via_window(series, win_size, a_size, paa_size, "none", 0.01)
"""[2.0] build the 'magic' array"""
magic_array = list()
for k, v in sax_none.items():
magic_array.append((k, len(v)))
"""[2.1] sort it desc by the key"""
m_arr = sorted(magic_array, key=lambda tup: tup[1])
"""[3.0] define the key vars"""
bestSoFarPosition = -1
bestSoFarDistance = 0.
distanceCalls = 0
visit_array = np.zeros(len(series), dtype=np.int)
"""[4.0] and we are off iterating over the magic array entries"""
for entry in m_arr:
"""[5.0] some moar of teh vars"""
curr_word = entry[0]
occurrences = sax_none[curr_word]
"""[6.0] jumping around by the same word occurrences makes it easier to
nail down the possibly small distance value -- so we can be efficient
and all that..."""
for curr_pos in occurrences:
if curr_pos in globalRegistry:
continue
"""[7.0] we don't want an overlapping subsequence"""
mark_start = curr_pos - win_size
mark_end = curr_pos + win_size
visit_set = set(range(mark_start, mark_end))
"""[8.0] here is our subsequence in question"""
cur_seq = znorm(series[curr_pos:(curr_pos + win_size)],
znorm_threshold)
"""[9.0] let's see what is NN distance"""
nn_dist = np.inf
do_random_search = 1
"""[10.0] ordered by occurrences search first"""
for next_pos in occurrences:
"""[11.0] skip bad pos"""
if next_pos in visit_set:
continue
else:
visit_set.add(next_pos)
"""[12.0] distance we compute"""
dist = euclidean(cur_seq, znorm(series[next_pos:(
next_pos+win_size)], znorm_threshold))
distanceCalls += 1
"""[13.0] keep the books up-to-date"""
if dist < nn_dist:
nn_dist = dist
if dist < bestSoFarDistance:
do_random_search = 0
break
"""[13.0] if not broken above,
we shall proceed with random search"""
if do_random_search:
"""[14.0] build that random visit order array"""
curr_idx = 0
for i in range(0, (len(series) - win_size)):
if not(i in visit_set):
visit_array[curr_idx] = i
curr_idx += 1
it_order = np.random.permutation(visit_array[0:curr_idx])
curr_idx -= 1
"""[15.0] and go random"""
while curr_idx >= 0:
rand_pos = it_order[curr_idx]
curr_idx -= 1
dist = euclidean(cur_seq, znorm(series[rand_pos:(
rand_pos + win_size)], znorm_threshold))
distanceCalls += 1
"""[16.0] keep the books up-to-date again"""
if dist < nn_dist:
nn_dist = dist
if dist < bestSoFarDistance:
nn_dist = dist
break
"""[17.0] and BIGGER books"""
if (nn_dist > bestSoFarDistance) and (nn_dist < np.inf):
bestSoFarDistance = nn_dist
bestSoFarPosition = curr_pos
return (bestSoFarPosition, bestSoFarDistance)
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/HOTSAX.py
|
HOTSAX.py
|
from __future__ import division
import numpy as np
from warnings import warn
from sklearn.utils.fixes import euler_gamma
from scipy.sparse import issparse
import numbers
from sklearn.externals import six
from sklearn.tree import ExtraTreeRegressor
from sklearn.utils import check_random_state, check_array
from sklearn.utils.validation import check_is_fitted
from sklearn.base import OutlierMixin
from sklearn.ensemble.bagging import BaseBagging
__all__ = ["iForest"]
INTEGER_TYPES = (numbers.Integral, np.integer)
class iForest(BaseBagging, OutlierMixin):
"""Isolation Forest Algorithm
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, optional (default=100)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default="auto")
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the decision function. If 'auto', the decision function threshold is
determined as in the original paper.
.. versionchanged:: 0.20
The default value of ``contamination`` will change from 0.1 in 0.20
to ``'auto'`` in 0.22.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=False)
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
behaviour : str, default='old'
Behaviour of the ``decision_function`` which can be either 'old' or
'new'. Passing ``behaviour='new'`` makes the ``decision_function``
change to match other anomaly detection algorithm API which will be
the default behaviour in the future. As explained in details in the
``offset_`` attribute documentation, the ``decision_function`` becomes
dependent on the contamination parameter, in such a way that 0 becomes
its natural threshold to detect outliers.
.. versionadded:: 0.20
``behaviour`` is added in 0.20 for back-compatibility purpose.
.. deprecated:: 0.20
``behaviour='old'`` is deprecated in 0.20 and will not be possible
in 0.22.
.. deprecated:: 0.22
``behaviour`` parameter will be deprecated in 0.22 and removed in
0.24.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : integer
The actual number of samples
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: ``decision_function = score_samples - offset_``.
Assuming behaviour == 'new', ``offset_`` is defined as follows.
When the contamination parameter is set to "auto", the offset is equal
to -0.5 as the scores of inliers are close to 0 and the scores of
outliers are close to -1. When a contamination parameter different
than "auto" is provided, the offset is defined in such a way we obtain
the expected number of outliers (samples with decision function < 0)
in training.
Assuming the behaviour parameter is set to 'old', we always have
``offset_ = -0.5``, making the decision function independent from the
contamination parameter.
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
"""
def __init__(self,
n_estimators=100,
max_samples="auto",
contamination="legacy",
max_features=1.,
bootstrap=False,
n_jobs=None,
behaviour='old',
random_state=None,
verbose=0):
super(iForest, self).__init__(
base_estimator=ExtraTreeRegressor(
max_features=1,
splitter='random',
random_state=random_state),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.behaviour = behaviour
self.contamination = contamination
'''
功能:利用IForest算法来计算异常分数
输入:data_train-从CSV文件读取的数据,不含标签
输出:返回每个数据对象的预测标签all_pred,-1则被认定为异常,1则是正常数据
运行示例:
import pandas as pd
from iForest import *
data_train = pd.read_csv('hbk.csv', sep=',')
# 选取特征,不使用标签,如带有标签需除去
#data_train=data_train.drop('Label', axis=1)
#print(data_train.columns)
#n_estimators是隔离树的数量
ift = iForest(n_estimators=100,
behaviour="new",
contamination="auto",
n_jobs=1, # 使用全部cpu
# verbose=2,
)
#调用IForest算法预测数据对象的Label
Label,Index=ift.IForest(data_train)
print(Label)
'''
def iforest(self,data_train):
# 训练
self.fit(data_train)
shape = data_train.shape[0]
batch = 10 ** 6
X_cols = data_train.columns
all_pred_lable = []
all_pred_score = []
for i in range(int(shape / batch + 1)):
start = i * batch
end = (i + 1) * batch
test = data_train[X_cols][start:end]
# 预测
pred_label, pred_score = self.predict(test)
all_pred_lable.extend(pred_label)
all_pred_score.extend(pred_score)
return all_pred_lable, np.argsort(all_pred_score) # 返回阈值限定后的标签和异常分数从小到大排序的数组下标
#data_train.to_csv('outliers.csv', columns=["pred", ], header=False)
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
y : Ignored
not used, present for API consistency by convention.
Returns
-------
self : object
"""
if self.contamination == "legacy":
warn('default contamination parameter 0.1 will change '
'in version 0.22 to "auto". This will change the '
'predict method behavior.',
FutureWarning)
self._contamination = 0.1
else:
self._contamination = self.contamination
if self.behaviour == 'old':
warn('behaviour="old" is deprecated and will be removed '
'in version 0.22. Please use behaviour="new", which '
'makes the decision_function change to match '
'other anomaly detection algorithm API.',
FutureWarning)
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
#判断x是否为sparse类型
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
#保证采样集的大小规范
n_samples = X.shape[0]
if isinstance(self.max_samples, six.string_types):
if self.max_samples == 'auto':
max_samples = min(256, n_samples)
else:
raise ValueError('max_samples (%s) is not supported.'
'Valid choices are: "auto", int or'
'float' % self.max_samples)
elif isinstance(self.max_samples, INTEGER_TYPES):
if self.max_samples > n_samples:
warn("max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples))
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not (0. < self.max_samples <= 1.):
raise ValueError("max_samples must be in (0, 1], got %r"
% self.max_samples)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super(iForest, self)._fit(X, y, max_samples,
max_depth=max_depth,
sample_weight=None)
if self.behaviour == 'old':
# in this case, decision_function = 0.5 + self.score_samples(X):
if self._contamination == "auto":
raise ValueError("contamination parameter cannot be set to "
"'auto' when behaviour == 'old'.")
self.offset_ = -0.5
self._threshold_ = np.percentile(self.decision_function(X),
100. * self._contamination)
return self
# else, self.behaviour == 'new':
if self._contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter, so that the
# threshold_ attribute is implicitly 0 and is not needed anymore:
#计算一个多维数组的任意百分比分位数
self.offset_ = np.percentile(self.score_samples(X),
100. * self._contamination)
return self
def predict(self, X):
#计算异常分数预测一个样例是否为异常
"""Predict if a particular sample is an outlier or not.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : array, shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self, ["offset_"])
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int) #建立一个矩阵元素均为1
threshold = self.threshold_ if self.behaviour == 'old' else 0
is_inlier[self.decision_function(X) < threshold] = -1 #异常分数小于阈值则为异常,并将其标记为-1
return is_inlier, self.decision_function(X)
def decision_function(self, X):
#返回x的异常分数(减去偏置值),用于在predict中进行判断
"""Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
scores : array, shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
#根据论文中的算法计算异常得分
"""Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
scores : array, shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
# code structure from ForestClassifier/predict_proba
check_is_fitted(self, ["estimators_"])
# Check data,检查数据是否规范
X = check_array(X, accept_sparse='csr')
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
#输入数据集的大小
n_samples = X.shape[0]
n_samples_leaf = np.zeros((n_samples, self.n_estimators), order="f")
#生成一个n_samples行,n_estimators列的全0矩阵,order: 可选参数,c代表与c语言类似,行优先;F代表列优先
depths = np.zeros((n_samples, self.n_estimators), order="f")
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
#使用enumerate( )方法的好处:可以同时拿到index和value。
#zip([seql, …])接受一系列可迭代对象作为参数,将对象中对应的元素打包成一个个tuple(元组),然后返回由这些tuples组成的list(列表)
for i, (tree, features) in enumerate(zip(self.estimators_,
self.estimators_features_)):
#对数据集进行采样
if subsample_features:
X_subset = X[:, features]
else:
X_subset = X
#tree.apply(X):返回每个样本被预测的叶子结点索引
leaves_index = tree.apply(X_subset)
node_indicator = tree.decision_path(X_subset) #decision_path(X):返回决策路径
n_samples_leaf[:, i] = tree.tree_.n_node_samples[leaves_index] #n_samples_leaf矩阵用于存储样本遍历后得到的叶子节点索引
depths[:, i] = np.ravel(node_indicator.sum(axis=1)) #depth存储遍历路径长度
depths[:, i] -= 1
#存储样例在每棵树遍历后得到的路径长度
depths += _average_path_length(n_samples_leaf)
#按照论文中的方法来计算异常分数
scores = 2 ** (-depths.mean(axis=1) / _average_path_length(
self.max_samples_))
# Take the opposite of the scores as bigger is better (here less abnormal),返回一个负值,分数越小越不正常
return -scores
@property
def threshold_(self):
if self.behaviour != 'old':
raise AttributeError("threshold_ attribute does not exist when "
"behaviour != 'old'")
warn("threshold_ attribute is deprecated in 0.20 and will"
" be removed in 0.22.", DeprecationWarning)
return self._threshold_
#对应论文中的c(φ)函数
def _average_path_length(n_samples_leaf):
""" The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like, shape (n_samples, n_estimators), or int.
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : array, same shape as n_samples_leaf
"""
#isinstance作用:来判断一个对象是否是一个已知的类型,其返回值为布尔型(True or flase)。
if isinstance(n_samples_leaf, INTEGER_TYPES):
if n_samples_leaf <= 1:
return 1.
else:
return 2. * (np.log(n_samples_leaf - 1.) + euler_gamma) - 2. * (
n_samples_leaf - 1.) / n_samples_leaf
else:
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask = (n_samples_leaf <= 1)
not_mask = np.logical_not(mask)
average_path_length[mask] = 1.
average_path_length[not_mask] = 2. * (
np.log(n_samples_leaf[not_mask] - 1.) + euler_gamma) - 2. * (
n_samples_leaf[not_mask] - 1.) / n_samples_leaf[not_mask]
return average_path_length.reshape(n_samples_leaf_shape)
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/iForest.py
|
iForest.py
|
import numpy as np
import math
from operator import add
from zw_outliersdetec import __dimension_reduction as dim_red
class FastVOA:
#t是随机超平面投影时的参数
def __init__(self,t):
self.t=t
#Algorithm 3 FirstMomentEstimator(L; t; n)
def __first_moment_estimator(self,projected, t, n):
f1 = [0] * n
for i in range(0, t):
cl = [0] * n
cr = [0] * n
li = projected[i]
for j in range(0, n):
idx = li[j][0]
cl[idx] = j #原:cl[idx] = j - 1
cr[idx] = n - 1 - cl[idx]
for j in range(0, n):
f1[j] += cl[j] * cr[j]
return list(map(lambda x: x * ((2 * math.pi) / (t * (n - 1) * (n - 2))), f1))
#Algorithm 4 FrobeniusNorm(L; t; n)
def __frobenius_norm(self,projected, t, n):
f2 = [0] * n
sl = np.random.choice([-1, 1], size=(n,), p=None)
sr = np.random.choice([-1, 1], size=(n,), p=None)
for i in range(0, t):
amsl = [0] * n
amsr = [0] * n
li = projected[i]
for j in range(1, n):
idx1 = li[j][0]
idx2 = li[j - 1][0]
amsl[idx1] = amsl[idx2] + sl[idx2]
for j in range(n - 2, -1, -1):
idx1 = li[j][0]
idx2 = li[j + 1][0]
amsr[idx1] = amsr[idx2] + sr[idx2]
for j in range(0, n):
f2[j] += amsl[j] * amsr[j]
return f2
#Algorithm 1 FastVOA(S; t; s1; s2)
'''
功能:计算每个数据对象的角度值
输入:train-不带标签的数据,n-数据对象的个数,t-随机超平面投影参数,s1,s2
输出:每个数据对象的角度值分数scores以及按分数排序后的下标scores_index
调用示例:
import pandas as pd
from FastVOA import *
#读取数据,不需要标签
data=pd.read_csv('isolet.csv',sep=',')
ytrain = data.iloc[:, -1]
train=data.drop('Label',axis=1)
#对数据进行随机超平面投影
DIMENSION = 600
t = DIMENSION
n = train.shape[0]
print(n)
#调用FatsVOA进行角度值计算
fv=FastVOA(t)
scores,scores_index=fv.fastvoa(train, n, t, 1, 1)
'''
def fastvoa(self,train, n, t, s1, s2):
projected = dim_red.random_projection(train, t)
f1 = self.__first_moment_estimator(projected, t, n)
y = []
for i in range(0, s2):
s = [0] * n
for j in range(0, s1):
result = list(map(lambda x: x ** 2, self.__frobenius_norm(projected, t, n)))
s = list(map(add, s, result))
s = list(map(lambda x: x / s1, s)) #s的长度为n(由于result的长度为n)
y.append(s) #yi的长度为n,y的长度为s2,y有s2行,n列
y = list(map(list, zip(*y))) #拆分y
f2 = []
for i in range(0, n):
f2.append(np.average(y[i])) #求均值
var = [0] * n
for i in range(0, n):
f2[i] = (4 * (math.pi ** 2) / (t * (t - 1) * (n - 1) * (n - 2))) * f2[i] - (2 * math.pi * f1[i]) / (t - 1)
var[i] = f2[i] - (f1[i] ** 2)
# 排序
scores=var
scores_index = np.argsort(scores) # 按角度值从低到高排序
return scores,scores_index[::-1] #返回异常得分及下标
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/FastVOA.py
|
FastVOA.py
|
import numpy as np
'''
函数功能:PAA算法
'''
def paa(series, paa_segments):
"""PAA implementation."""
series_len = len(series)
# check for the trivial case
if (series_len == paa_segments):
return np.copy(series)
else:
res = np.zeros(paa_segments)
# check when we are even
if (series_len % paa_segments == 0):
inc = series_len // paa_segments
for i in range(0, series_len):
idx = i // inc
np.add.at(res, idx, series[i])
# res[idx] = res[idx] + series[i]
return res / inc
# and process when we are odd
else:
for i in range(0, paa_segments * series_len):
idx = i // series_len
pos = i // paa_segments
np.add.at(res, idx, series[pos])
# res[idx] = res[idx] + series[pos]
return res / series_len
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/__paa.py
|
__paa.py
|
import numpy
from interval import *
from math import *
class IntervalSets:
#m为子序列长度,可以自行设置,默认为100
def __init__(self,m=100):
self.m=m
'''
功能:利用IntervalSets方法计算出每个序列的异常结构分数
输入:data-从CSV文件读取的数据信息,不含标签,一维
输出:每个序列的异常结构分数以及排序后的下标
运行示例:
import numpy as np
import matplotlib.pyplot as plt
from IntervalSets import *
m=400 #设置子序列长度,ECG为400,chfdb为250时效果最佳
#创建对象
IS=IntervalSets(m)
#从CSV文件读取数据
data= np.genfromtxt('ECG108_2.csv',delimiter=',')
#调用IntervalSets方法来查找异常
SAS,SAS_index=IS.intervalsets(data)
print(SAS) #输出异常结构分数SAS
print("最可能的异常序列标号为:",SAS_index[len(SAS_index)-1])
#可视化标注异常
x0=range(0,data.__len__())
plt.plot(x0,data)
t=SAS_index[len(SAS_index)-1]
x1=range(t*m,t*m+m-1)
plt.plot(x1,data[t*m:t*m+m-1],color="red")
plt.text(100, 1,"m=400", size = 15, alpha = 0.8)
plt.show()
'''
def intervalsets(self,data):
data_matrix=self.split_data(data) #对数据进行划分,形成多个子序列
n=len(data_matrix) #子序列的个数
m=self.m #子序列长度
Sp=self.cal_Sp(data_matrix)
SA=self.cal_SA(data_matrix)
#计算每个子序列的异常结构分数
S=list() #二维
SAS=list() #存储分数,一维
for i in range(n):
S_i=0
for j in range(n):
S_i=S_i+(0.5*SA[i][j]+0.5*Sp[i][j])
S.append(S_i)
for i in range(n):
SAS_i =0
for j in range(n):
SAS_i=SAS_i+(S[i]-S[j])**2
SAS.append(SAS_i/n)
#排序
SAS_index = numpy.argsort(SAS)
return SAS,SAS_index
'''
输入:data_matrix,二维数据
功能:类的私有方法,计算子序列之间的概率相似性Sp,boundary为区间边界参数
输出:Sp,二维矩阵,每行为一个子序列与其他子序列的Spij值
'''
def cal_Sp(self,data_matrix,boundary=0.2):
Sp=list()
Amin = list() # 每一行存储一个子序列的最小值
Amax = list() # 每一行存储一个子序列的最大值
Pmin = list() # 每一行存储一个子序列的概率最小值
Pmax = list() # 每一行存储一个子序列的概率最大值
n = len(data_matrix) # 子序列数量
widths=self.find_best_w(data_matrix)
#求子序列的取值区间
for i in range(n):
Amin.append(min(data_matrix[i])) # 子序列最小值
Amax.append(max(data_matrix[i])) # 子序列最大值
#求点分布在边界区间的概率
for i in range(n):
count_min=0
count_max = 0
for item in data_matrix[i]:
if item>=Amin[i] and item<=Amin[i]+boundary*(Amax[i]-Amin[i]):
count_min=count_min+1
if item>=Amax[i]-boundary*(Amax[i]-Amin[i]) and item<=Amax[i]:
count_max = count_max + 1
Pmin.append(count_min/self.m)
Pmax.append(count_max/self.m)
#利用边界的点分布概率计算Sp
for i in range(n):
Sp_i=list()
for j in range(n):
if i==j:
Sp_i.append(1)
else:
p=exp(-((Pmin[i]-Pmin[j])**2+(Pmax[i]-Pmax[j])**2)/widths[i]**2)
#p=numpy.exp(-1.0 * numpy.linalg.norm(one - two, ord=2) / numpy.power(w, 2))
Sp_i.append(p)
Sp.append(Sp_i)
return Sp
'''
输入:data_matrix,二维数据
功能:类的私有方法,计算子序列之间的概率相似性SA
输出:SA,二维矩阵,每行为一个子序列与其他子序列的SAij值
'''
def cal_SA(self, data_matrix):
Amin=list() #每一行存储一个子序列的最小值
Amax = list() # 每一行存储一个子序列的最大值
SA=list() #存储区相似度
n=len(data_matrix) #子序列数量
for i in range(n):
Amin.append(min(data_matrix[i])) # 子序列最小值
Amax.append(max(data_matrix[i])) # 子序列最大值
for i in range(n):
SA_i=list()
A_i=Interval(Amin[i],Amax[i])
#print(A_i)
for j in range(n):
A_j=Interval(Amin[j],Amax[j])
if A_i.overlaps(A_j)==False: #情况1,没有交集
SA_i.append(0)
else: #情况2,有交集
A_ij=A_i.join(A_j) #合并
a=((A_i.upper_bound-A_i.lower_bound)+(A_j.upper_bound-A_j.lower_bound)-(A_ij.upper_bound-A_ij.lower_bound))/(A_ij.upper_bound-A_ij.lower_bound)
SA_i.append(a)
SA.append(SA_i)
return SA
'''
输入:data,一维数据
功能:将读取到的一维数据按照传入的子序列长度进行划分
输出:data_matrix,二维矩阵,每行为一个子序列
'''
def split_data(self, data):
index = self.m
length = data.__len__()
data_matix = list()
sequence = list()
i = 0
while i < length:
sequence = data[i:i + index]
# print(sequence)
i = i + index
data_matix.append(sequence)
return data_matix
'''
函数功能:计算数据集的δ,高斯径向基函数的半径,通过信息熵的方法计算
返回一个与子序列长度相等的数组
'''
def find_best_w(self, data_matrix):
alist, blist = numpy.zeros(data_matrix.__len__()), numpy.zeros(data_matrix.__len__())
r_index = range(data_matrix.__len__())
gama = (5 ** 0.5 - 1) / 2
coe = (2 ** 0.5) / 3
for i in r_index:
min_dist, max_dist = float('inf'), -float('inf')
for j in r_index:
if i == j:
continue
dist = numpy.linalg.norm(data_matrix[i] - data_matrix[j], ord=2) # 求二范数
min_dist = min(dist, min_dist)
max_dist = max(dist, max_dist)
alist[i], blist[i] = coe * min_dist, coe * max_dist
left, right = cal_sig(alist, blist, gama)
ent_left = cal_entropy(left)
ent_right = cal_entropy(right)
epison = 1
times = 0
while numpy.linalg.norm(alist - blist) < 1 and times < 20:
if ent_left < ent_right:
blist, right = right.copy(), left.copy()
ent_right = ent_left
left = alist + (1 - gama) * (blist - alist)
ent_left = cal_entropy(left)
else:
alist, left = left.copy(), right.copy()
ent_left = ent_right
right = alist + gama * (blist - alist)
ent_right = cal_entropy(right)
times += 1
if ent_left < ent_right:
return left
else:
return right
def cal_sig(alist, blist, gama):
length = len(alist)
index = range(length)
left, right = numpy.zeros(length), numpy.zeros(length)
for i in index:
left[i] = alist[i] + (1 - gama) * (blist[i] - alist[i])
right[i] = alist[i] + gama * (blist[i] - alist[i])
return left, right
'''
计算信信息熵
'''
def cal_entropy(list):
total = sum(list)
list /= total
log_list = numpy.log(list)
return -numpy.dot(list, log_list)
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/IntervalSets.py
|
IntervalSets.py
|
name = "zw_outliersdetec"
__all__ = ['FastVOA','HOTSAX','iForest','IntervalSets','PAPR','RDOS']
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/__init__.py
|
__init__.py
|
import numpy as np
from collections import defaultdict
from zw_outliersdetec.__paa import *
'''
函数功能:SAX算法
'''
def sax_via_window(series, win_size, paa_size, alphabet_size=3,
nr_strategy='exact', z_threshold=0.01):
"""Simple via window conversion implementation."""
cuts = cuts_for_asize(alphabet_size)
sax = defaultdict(list)
prev_word = ''
for i in range(0, len(series) - win_size):
sub_section = series[i:(i+win_size)]
zn = znorm(sub_section, z_threshold)
paa_rep = paa(zn, paa_size)
curr_word = ts_to_string(paa_rep, cuts)
if '' != prev_word:
if 'exact' == nr_strategy and prev_word == curr_word:
continue
elif 'mindist' == nr_strategy and\
is_mindist_zero(prev_word, curr_word):
continue
prev_word = curr_word
sax[curr_word].append(i)
return sax
'''
函数功能:z-score标准化
'''
def znorm(series, znorm_threshold=0.01):
"""Znorm implementation."""
sd = np.std(series)
if (sd < znorm_threshold):
return series
mean = np.mean(series)
return (series - mean) / sd
def cuts_for_asize(a_size):
"""Generate a set of alphabet cuts for its size."""
""" Typically, we generate cuts in R as follows:
get_cuts_for_num <- function(num) {
cuts = c(-Inf)
for (i in 1:(num-1)) {
cuts = c(cuts, qnorm(i * 1/num))
}
cuts
}
get_cuts_for_num(3) """
options = {
2: np.array([-np.inf, 0.00]),
3: np.array([-np.inf, -0.4307273, 0.4307273]),
4: np.array([-np.inf, -0.6744898, 0, 0.6744898]),
5: np.array([-np.inf, -0.841621233572914, -0.2533471031358,
0.2533471031358, 0.841621233572914]),
6: np.array([-np.inf, -0.967421566101701, -0.430727299295457, 0,
0.430727299295457, 0.967421566101701]),
7: np.array([-np.inf, -1.06757052387814, -0.565948821932863,
-0.180012369792705, 0.180012369792705, 0.565948821932863,
1.06757052387814]),
8: np.array([-np.inf, -1.15034938037601, -0.674489750196082,
-0.318639363964375, 0, 0.318639363964375,
0.674489750196082, 1.15034938037601]),
9: np.array([-np.inf, -1.22064034884735, -0.764709673786387,
-0.430727299295457, -0.139710298881862, 0.139710298881862,
0.430727299295457, 0.764709673786387, 1.22064034884735]),
10: np.array([-np.inf, -1.2815515655446, -0.841621233572914,
-0.524400512708041, -0.2533471031358, 0, 0.2533471031358,
0.524400512708041, 0.841621233572914, 1.2815515655446]),
11: np.array([-np.inf, -1.33517773611894, -0.908457868537385,
-0.604585346583237, -0.348755695517045,
-0.114185294321428, 0.114185294321428, 0.348755695517045,
0.604585346583237, 0.908457868537385, 1.33517773611894]),
12: np.array([-np.inf, -1.38299412710064, -0.967421566101701,
-0.674489750196082, -0.430727299295457,
-0.210428394247925, 0, 0.210428394247925,
0.430727299295457, 0.674489750196082, 0.967421566101701,
1.38299412710064]),
13: np.array([-np.inf, -1.42607687227285, -1.0200762327862,
-0.736315917376129, -0.502402223373355,
-0.293381232121193, -0.0965586152896391,
0.0965586152896394, 0.293381232121194, 0.502402223373355,
0.73631591737613, 1.0200762327862, 1.42607687227285]),
14: np.array([-np.inf, -1.46523379268552, -1.06757052387814,
-0.791638607743375, -0.565948821932863, -0.36610635680057,
-0.180012369792705, 0, 0.180012369792705,
0.36610635680057, 0.565948821932863, 0.791638607743375,
1.06757052387814, 1.46523379268552]),
15: np.array([-np.inf, -1.50108594604402, -1.11077161663679,
-0.841621233572914, -0.622925723210088,
-0.430727299295457, -0.2533471031358, -0.0836517339071291,
0.0836517339071291, 0.2533471031358, 0.430727299295457,
0.622925723210088, 0.841621233572914, 1.11077161663679,
1.50108594604402]),
16: np.array([-np.inf, -1.53412054435255, -1.15034938037601,
-0.887146559018876, -0.674489750196082,
-0.488776411114669, -0.318639363964375,
-0.157310684610171, 0, 0.157310684610171,
0.318639363964375, 0.488776411114669, 0.674489750196082,
0.887146559018876, 1.15034938037601, 1.53412054435255]),
17: np.array([-np.inf, -1.5647264713618, -1.18683143275582,
-0.928899491647271, -0.721522283982343,
-0.541395085129088, -0.377391943828554,
-0.223007830940367, -0.0737912738082727,
0.0737912738082727, 0.223007830940367, 0.377391943828554,
0.541395085129088, 0.721522283982343, 0.928899491647271,
1.18683143275582, 1.5647264713618]),
18: np.array([-np.inf, -1.59321881802305, -1.22064034884735,
-0.967421566101701, -0.764709673786387,
-0.589455797849779, -0.430727299295457,
-0.282216147062508, -0.139710298881862, 0,
0.139710298881862, 0.282216147062508, 0.430727299295457,
0.589455797849779, 0.764709673786387, 0.967421566101701,
1.22064034884735, 1.59321881802305]),
19: np.array([-np.inf, -1.61985625863827, -1.25211952026522,
-1.00314796766253, -0.8045963803603, -0.633640000779701,
-0.47950565333095, -0.336038140371823, -0.199201324789267,
-0.0660118123758407, 0.0660118123758406,
0.199201324789267, 0.336038140371823, 0.47950565333095,
0.633640000779701, 0.8045963803603, 1.00314796766253,
1.25211952026522, 1.61985625863827]),
20: np.array([-np.inf, -1.64485362695147, -1.2815515655446,
-1.03643338949379, -0.841621233572914, -0.674489750196082,
-0.524400512708041, -0.385320466407568, -0.2533471031358,
-0.125661346855074, 0, 0.125661346855074, 0.2533471031358,
0.385320466407568, 0.524400512708041, 0.674489750196082,
0.841621233572914, 1.03643338949379, 1.2815515655446,
1.64485362695147]),
}
return options[a_size]
def ts_to_string(series, cuts):
"""A straightforward num-to-string conversion."""
a_size = len(cuts)
sax = list()
for i in range(0, len(series)):
num = series[i]
# if teh number below 0, start from the bottom, or else from the top
if(num >= 0):
j = a_size - 1
while ((j > 0) and (cuts[j] >= num)):
j = j - 1
sax.append(idx2letter(j))
else:
j = 1
while (j < a_size and cuts[j] <= num):
j = j + 1
sax.append(idx2letter(j-1))
return ''.join(sax)
def idx2letter(idx):
"""Convert a numerical index to a char."""
if 0 <= idx < 20:
return chr(97 + idx)
else:
raise ValueError('A wrong idx value supplied.')
def is_mindist_zero(a, b):
"""Check mindist."""
if len(a) != len(b):
return 0
else:
for i in range(0, len(b)):
if abs(ord(a[i]) - ord(b[i])) > 1:
return 0
return 1
|
zw-outliersdetec
|
/zw_outliersdetec-0.0.1.tar.gz/zw_outliersdetec-0.0.1/zw_outliersdetec/__sax_via_window.py
|
__sax_via_window.py
|
==================
Zaehlwerk jsMath
==================
A basic integration of jsMath from
http://www.math.union.edu/~dpvc/jsmath/ into Zope3
using zc.resourcelibrary.
|
zw.jsmath
|
/zw.jsmath-3.6a-0.9.tar.gz/zw.jsmath-3.6a-0.9/README.txt
|
README.txt
|
import os, urllib2, zipfile
from setuptools import setup, find_packages
name = 'zw.jsmath'
version = '3.6a-0.9'
patch = 'jsmath-3.6a-xhtml.patch'
jsmath_src_name = 'jsMath-3.6a.zip'
jsmath_fonts_src_name = 'jsMath-fonts-1.3.zip'
url_base = 'http://downloads.sourceforge.net/jsmath'
dest = os.path.join(os.path.dirname(__file__),
'src', 'zw', 'jsmath', 'jsMath')
dest_fonts = os.path.join(dest, 'fonts')
extpaths = []
if not os.path.exists(dest):
if not os.path.exists(jsmath_src_name):
x = urllib2.urlopen( url_base+'/'+jsmath_src_name).read()
open(jsmath_src_name, 'w').write(x)
zfile = zipfile.ZipFile(jsmath_src_name, 'r')
prefix = 'jsMath/'
lprefix = len('jsMath')
for zname in sorted(zfile.namelist()):
assert zname.startswith(prefix)
dname = dest + zname[lprefix:]
if dname[-1:] == '/':
os.makedirs(dname)
else:
open(dname, 'w').write(zfile.read(zname))
#extpaths.append(zname)
#apply xhtml patch
os.system( 'patch -d '+dest+' < '+\
os.path.join( os.path.dirname(__file__), patch ) )
if not os.path.exists(dest_fonts):
# unpack fonts
if not os.path.exists(jsmath_fonts_src_name):
x = urllib2.urlopen( url_base+'/'+jsmath_fonts_src_name).read()
open(jsmath_fonts_src_name, 'w').write(x)
zfile = zipfile.ZipFile(jsmath_fonts_src_name, 'r')
prefix = 'jsMath/fonts/'
lprefix = len('jsMath/fonts')
for zname in sorted(zfile.namelist()):
try:
assert zname.startswith(prefix)
except AssertionError:
continue
dname = dest_fonts + zname[lprefix:]
if dname[-1:] == '/':
os.makedirs(dname)
else:
open(dname, 'w').write(zfile.read(zname))
#extpaths.append(zname)
# Add files for packaging
lbase = len(os.path.dirname(dest))+1
for path, dirs, files in os.walk(dest):
prefix = path[lbase:]
for file in files:
extpaths.append(os.path.join(prefix, file))
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(name=name,
# Fill in project info below
version=version,
description="jsMath integration into zope3",
long_description=(
read('README.txt') + \
'\n\n' +
read('CHANGES.txt')),
keywords='zope3',
author='Gregor Giesen',
author_email='[email protected]',
url='https://launchpad.net/'+name,
license='GPLv3',
# Get more from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Environment :: Web Environment',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Framework :: Zope3',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
],
packages=find_packages('src'),
package_dir = {'': 'src'},
include_package_data=True,
package_data = {'zw.jsmath': extpaths},
namespace_packages = ['zw'],
zip_safe=False,
extras_require = dict(
test = [ 'zope.testing',
'zope.app.testing',
'zope.app.zcmlfiles',
'zope.testbrowser',
], ),
install_requires = ['setuptools',
'zc.resourcelibrary',
],
)
|
zw.jsmath
|
/zw.jsmath-3.6a-0.9.tar.gz/zw.jsmath-3.6a-0.9/setup.py
|
setup.py
|
##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
tmpeggs = tempfile.mkdtemp()
try:
import pkg_resources
except ImportError:
ez = {}
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
import pkg_resources
cmd = 'from setuptools.command.easy_install import main; main()'
if sys.platform == 'win32':
cmd = '"%s"' % cmd # work around spawn lamosity on windows
ws = pkg_resources.working_set
assert os.spawnle(
os.P_WAIT, sys.executable, sys.executable,
'-c', cmd, '-mqNxd', tmpeggs, 'zc.buildout',
dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse('setuptools')).location
),
) == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout')
import zc.buildout.buildout
zc.buildout.buildout.main(sys.argv[1:] + ['bootstrap'])
shutil.rmtree(tmpeggs)
|
zw.jsmath
|
/zw.jsmath-3.6a-0.9.tar.gz/zw.jsmath-3.6a-0.9/bootstrap.py
|
bootstrap.py
|
# HIC FASCIS SPATIUM NOMINALIS EST.
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
zw.jsmath
|
/zw.jsmath-3.6a-0.9.tar.gz/zw.jsmath-3.6a-0.9/src/zw/__init__.py
|
__init__.py
|
# HIC FASCIS PYTHONIS EST.
|
zw.jsmath
|
/zw.jsmath-3.6a-0.9.tar.gz/zw.jsmath-3.6a-0.9/src/zw/jsmath/__init__.py
|
__init__.py
|
'''
@Author: your name
@Date: 2019-12-05 09:46:15
@LastEditTime: 2019-12-05 11:12:02
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \zw\setup.py
'''
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup
setup(
name='zw',
version='0.0.1',
author='Vvgoder',
author_email='[email protected]',
url='https://zhuanlan.zhihu.com/p/26159930',
description=u'吃枣药丸',
packages=['zw'],
install_requires=[],
entry_points={
'console_scripts': [
'jujube=zw:jujube',
'pill=zw:pill'
]
}
)
|
zw
|
/zw-0.0.1.tar.gz/zw-0.0.1/setup.py
|
setup.py
|
======================================
Zaehlwerk's Incoming mail processing
======================================
This small module enables a Zope process to react on incoming mails,
reading a Maildir or (in the future) an imap folder.
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/README.txt
|
README.txt
|
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
name = 'zw.mail.incoming'
version = '0.1.2.3'
setup(name=name,
# Fill in project info below
version=version,
description="Zope processing incoming mail",
long_description=(
read('README.txt') + \
'\n\n' +
read('src', 'zw', 'mail', 'incoming','README.txt') + \
'\n\n' +
read('CHANGES.txt')),
keywords='zope3',
author='Gregor Giesen',
author_email='[email protected]',
url='https://launchpad.net/'+name,
license='GPLv3',
# Get more from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Environment :: Web Environment',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Framework :: Zope3',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
],
packages=find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['zw', 'zw.mail'],
include_package_data=True,
zip_safe=False,
extras_require = dict(
test = [ 'zope.testing',
'zope.app.testing',
'zope.app.zcmlfiles',
'zope.testbrowser',
# 'z3c.testsetup',
], ),
install_requires = ['setuptools',
'zope.component',
'zope.i18nmessageid',
'zope.interface',
'zope.schema',
'zope.sendmail',
'zope.app.appsetup',
'z3c.schema',
],
entry_points="""
[console_scripts]
dropmail=zw.mail.incoming.script:main
""",
scripts=[],
)
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/setup.py
|
setup.py
|
##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
tmpeggs = tempfile.mkdtemp()
try:
import pkg_resources
except ImportError:
ez = {}
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
import pkg_resources
cmd = 'from setuptools.command.easy_install import main; main()'
if sys.platform == 'win32':
cmd = '"%s"' % cmd # work around spawn lamosity on windows
ws = pkg_resources.working_set
assert os.spawnle(
os.P_WAIT, sys.executable, sys.executable,
'-c', cmd, '-mqNxd', tmpeggs, 'zc.buildout',
dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse('setuptools')).location
),
) == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout')
import zc.buildout.buildout
zc.buildout.buildout.main(sys.argv[1:] + ['bootstrap'])
shutil.rmtree(tmpeggs)
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/bootstrap.py
|
bootstrap.py
|
# HIC FASCIS SPATIUM NOMINALIS EST.
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/__init__.py
|
__init__.py
|
# HIC FASCIS SPATIUM NOMINALIS EST.
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/__init__.py
|
__init__.py
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""
$Id$
"""
__docformat__ = 'reStructuredText'
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zw.mail.incoming')
from zope.interface import classProvides
from zope.schema.interfaces import IVocabularyFactory
from zope.app.component.vocabulary import UtilityVocabulary
class InboxNames(UtilityVocabulary):
classProvides(IVocabularyFactory)
interface = IInbox
nameOnly = True
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/vocabulary.py
|
vocabulary.py
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""
$Id$
"""
__docformat__ = 'reStructuredText'
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zw.mail.incoming')
from zope import schema
from zope.component import adapter, queryUtility
from zope.component.interface import provideInterface
from zope.component.zcml import handler
from zope.configuration.exceptions import ConfigurationError
from zope.configuration.fields import Path, Tokens
from zope.interface import Interface
from zope.app.appsetup.bootstrap import getInformationFromEvent
from zope.app.appsetup.interfaces import IDatabaseOpenedWithRootEvent
from zw.mail.incoming.interfaces import IInbox
from zw.mail.incoming.inbox import MaildirInbox
from zw.mail.incoming.processor import IncomingMailProcessor
class IIncomingMailProcessorDirective(Interface):
"""This directive register an event on IDataBaseOpenedWithRoot
to launch an incoming mail processor.
"""
name = schema.TextLine(
title = _( u'label-IIncomingMailProcessorDirective.name',
u"Name" ),
description = _( u'help-IIncomingMailProcessorDirective.name',
u"Specifies the name of the mail processor." ),
default = u"Incoming Mail",
required = False )
pollingInterval = schema.Int(
title = _( u"Polling Interval" ),
description = _( u"How often the mail sources are checked for "
u"new messages (in milliseconds)" ),
default = 5000 )
sources = Tokens(
title = _( u"Sources" ),
description = _( u"Iterable of names of IInbox utilities." ),
required = True,
value_type = schema.TextLine(
title = _( u"Inbox utility name" )
)
)
def incomingMailProcessor(_context, sources, pollingInterval = 5000,
name = u"Incoming Mail" ):
@adapter(IDatabaseOpenedWithRootEvent)
def createIncomingMailProcessor(event):
db, conn, root, root_folder = getInformationFromEvent(event)
inboxes = []
for name in sources:
inbox = queryUtility(IInbox, name)
if inbox is None:
raise ConfigurationError("Inbox %r is not defined." % name)
inboxes.append(inbox)
thread = IncomingMailProcessor(root_folder, pollingInterval, inboxes)
thread.start()
_context.action(
discriminator = None,
callable = handler,
args = ('registerHandler',
createIncomingMailProcessor, (IDatabaseOpenedWithRootEvent,),
u'', _context.info),
)
_context.action(
discriminator = None,
callable = provideInterface,
args = ('', IDatabaseOpenedWithRootEvent)
)
class IInboxDirective(Interface):
"""A generic directive registering an inbox.
"""
name = schema.TextLine(
title = _( u'label-IInboxDirective.name',
u"Name" ),
description = _( u'help-IInboxDirective.name',
u"Specifies the Inbox name of the utility." ),
required = True )
class IMaildirInboxDirective(IInboxDirective):
"""Registers a new maildir inbox.
"""
path = Path(
title = _( u'label-IMaildirInboxDirective.path',
u"Maildir Path" ),
description = _( u'help-IMaildirInboxDirective.path',
u"Defines the path to the inbox maildir directory." ),
required = True )
def maildirInbox(_context, name, path):
_context.action(
discriminator = ('utility', IInbox, name),
callable = handler,
args = ('registerUtility',
MaildirInbox(path), IInbox, name)
)
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/zcml.py
|
zcml.py
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""
$Id$
"""
__docformat__ = 'reStructuredText'
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zw.mail.incoming')
from zope import schema
from zope.interface import Attribute, Interface
from z3c.schema.email import RFC822MailAddress
class IInbox(Interface):
"""An inbox provides a very simple interface for our needs."""
def pop():
"""Return an email.message.Message converted message and remove it.
"""
def __iter__():
"""Iterate through all messages.
"""
def next():
"""Return an email.message.Message converted message.
"""
def delete(msg):
"""Delete msg from inbox.
"""
class IMaildirInbox(IInbox):
"""An inbox that receives its messages by an Maildir folder.
"""
queuePath = schema.TextLine(
title = _( u"Queue Path" ),
description = _( u"Pathname of the Maildir directory." ) )
class IIMAPInbox(IInbox):
"""An inbox that receives its message via an IMAP connection.
"""
class IIncomingMailProcessor(Interface):
"""A mail queue processor that raise IIncomingMailEvent on new messages.
"""
pollingInterval = schema.Int(
title = _( u"Polling Interval" ),
description = _( u"How often the mail sources are checked for "
u"new messages (in milliseconds)" ),
default = 5000 )
sources = schema.FrozenSet(
title = _( u"Sources" ),
description = _( u"Iterable of inbox utilities." ),
required = True,
value_type = schema.Object(
title = _( u"Inbox source" ),
schema = IInbox
)
)
class IIncomingEmailEvent(Interface):
"""A new mail arrived.
"""
message = Attribute(u"""The new email.message message.""")
inbox = schema.Object(
title = _( u"The inbox" ),
description = _( u"The mail folder the message is contained in" ),
schema = IInbox )
root = Attribute(u"""The root object""")
class IIncomingEmailFailureEvent(IIncomingEmailEvent):
"""A new mail arrived with a failure.
"""
failures = schema.List(
title = _( u"Failure addresses" ),
description = _( u"Extracted list of failure addresses." ),
value_type = RFC822MailAddress(
title = u"Failure address" ),
)
delivery_report = Attribute(u"""The delivery report as email.message.Message.""")
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/interfaces.py
|
interfaces.py
|
zw.mail.incoming can be seen as the counterpart to zope.sendmail
but for processing incoming emails.
..
Local Variables:
mode: doctest
indent-tabs-mode: nil
sentence-end-double-space: t
fill-column: 70
End:
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/README.txt
|
README.txt
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""
$Id$
"""
__docformat__ = 'reStructuredText'
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zw.mail.incoming')
import email
from os import unlink
from zope.interface import implements
from zope.sendmail.maildir import Maildir
from zw.mail.incoming.interfaces import IMaildirInbox
class MaildirInbox(object):
implements(IMaildirInbox)
def __init__(self, path):
self.queuePath = path
self.maildir = Maildir(path, True)
def __repr__(self):
return '<%s %r>' % ( self.__class__.__name__, self.queuePath )
def pop(self):
filename = iter(self.maildir).next()
msg = email.message_from_file(
open(filename, 'r') )
unlink(filename)
return msg
def next(self):
filename = iter(self.maildir).next()
return email.message_from_file(
open(filename, 'r') )
def __iter__(self):
return iter(
[ email.message_from_file(open(filename, 'r')) \
for filename in self.maildir ] )
def delete(self, msg):
for filename in self.maildir:
if email.message_from_file(
open(filename, 'r') ).as_string() == msg.as_string():
unlink(filename)
break
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/inbox.py
|
inbox.py
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""Usage: dropmail maildir_path
dropmail simply reads a mail from stdin and write it to maildir_path.
"""
__docformat__ = 'reStructuredText'
import sys
from zope.sendmail.maildir import Maildir
def usage(code, msg=''):
print >> sys.stderr, __doc__
if msg:
print >> sys.stderr, msg
sys.exit(code)
def main(*argv):
if len(argv) != 1:
usage(0)
md = Maildir(argv[0])
msg = sys.stdin.read()
writer = md.newMessage()
writer.write(msg)
writer.commit()
if __name__ == '__main__':
main(*sys.argv[1:])
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/script.py
|
script.py
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""
$Id$
"""
__docformat__ = 'reStructuredText'
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zw.mail.incoming')
import atexit
from time import sleep
from threading import Thread
import logging
import transaction
from zope.component import getUtility
from zope.component.interfaces import ComponentLookupError
from zope.event import notify
from zope.interface import implements
from mailman.Bouncers.BouncerAPI import ScanMessages
from zw.mail.incoming.events import NewEmailEvent, NewEmailFailureEvent
from zw.mail.incoming.interfaces import IIncomingMailProcessor, IInbox
class IncomingMailProcessor(Thread):
implements(IIncomingMailProcessor)
log = logging.getLogger("IncomingMailProcessorThread")
__stopped = False
def __init__(self, root, interval, inboxes):
Thread.__init__(self)
self.context = root
self.pollingInterval = interval
self.sources = tuple(inboxes)
def run(self, forever=True):
atexit.register(self.stop)
while not self.__stopped:
for box in self.sources:
msg = None
try:
msg = box.next()
failures = ScanMessages(None, msg)
if failures:
notify( NewEmailFailureEvent( msg, box, failures, self.context ) )
else:
notify( NewEmailEvent( msg, box, self.context ) )
except StopIteration:
# That's fine.
pass
except:
# Catch up any other exception to let this thread survive.
if msg is None:
self.log.error(
"Cannot access next message from inbox '%r'.",
box )
else:
self.log.error(
"Cannot process message '%s' from inbox '%r'.",
msg['Message-Id'], box )
else:
self.log.info(
"Message '%s' from inbox '%r' processed.",
msg['Message-Id'], box )
transaction.commit()
else:
if forever:
sleep(self.pollingInterval/1000.)
if not forever:
break
def stop(self):
self.__stopped = True
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/processor.py
|
processor.py
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""
$Id$
"""
__docformat__ = 'reStructuredText'
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('zw.mail.incoming')
try:
from email.message import Message
except ImportError: #Py24 compability
from email.Message import Message
from zope.interface import implements
from zw.mail.incoming.interfaces import IIncomingEmailEvent, \
IIncomingEmailFailureEvent
class NewEmailEvent(object):
implements(IIncomingEmailEvent)
def __init__(self, msg, inbox, root):
self.message = msg
self.inbox = inbox
self.root = root
def extractDeliveryReport(msg):
assert isinstance(msg, Message), \
"Message is not an instance of 'email.message.Message'."
for part in msg.walk():
if part['Content-Type'] == 'message/delivery-status':
return part
class NewEmailFailureEvent(NewEmailEvent):
implements(IIncomingEmailFailureEvent)
def __init__(self, msg, inbox, failures, root):
super(NewEmailFailureEvent, self).__init__(msg, inbox, root)
self.failures = failures
self.delivery_report = extractDeliveryReport(msg)
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/events.py
|
events.py
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""
$Id$
"""
__docformat__ = 'reStructuredText'
from zw.mail.incoming.events import NewEmailEvent, NewEmailFailureEvent
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/__init__.py
|
__init__.py
|
#-*- coding: utf-8 -*-
#############################################################################
# #
# Copyright (c) 2007-2009 Gregor Giesen <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#############################################################################
"""
$Id$
"""
__docformat__ = 'reStructuredText'
import doctest
import unittest
from zope.testing.doctestunit import DocFileSuite
from z3c.form import testing
def test_suite():
return unittest.TestSuite((
DocFileSuite(
'README.txt',
setUp=testing.setUp, tearDown=testing.tearDown,
optionflags = doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS,
),
))
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/zw/mail/incoming/tests.py
|
tests.py
|
# Copyright (C) 2001-2009 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""LLNL's custom Sendmail bounce message."""
import re
import email
acre = re.compile(r',\s*(?P<addr>\S+@[^,]+),', re.IGNORECASE)
def process(msg):
for line in email.Iterators.body_line_iterator(msg):
mo = acre.search(line)
if mo:
return [mo.group('addr')]
return []
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/mailman/Bouncers/LLNL.py
|
LLNL.py
|
# Copyright (C) 1998-2009 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Microsoft's `SMTPSVC' nears I kin tell."""
import re
from cStringIO import StringIO
scre = re.compile(r'transcript of session follows', re.IGNORECASE)
def process(msg):
if msg.get_content_type() <> 'multipart/mixed':
return None
# Find the first subpart, which has no MIME type
try:
subpart = msg.get_payload(0)
except IndexError:
# The message *looked* like a multipart but wasn't
return None
data = subpart.get_payload()
if isinstance(data, list):
# The message is a multi-multipart, so not a matching bounce
return None
body = StringIO(data)
state = 0
addrs = []
while 1:
line = body.readline()
if not line:
break
if state == 0:
if scre.search(line):
state = 1
if state == 1:
if '@' in line:
addrs.append(line)
return addrs
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/mailman/Bouncers/Microsoft.py
|
Microsoft.py
|
# Copyright (C) 1998-2009 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Something which claims
X-Mailer: <SMTP32 vXXXXXX>
What the heck is this thing? Here's a recent host:
% telnet 207.51.255.218 smtp
Trying 207.51.255.218...
Connected to 207.51.255.218.
Escape character is '^]'.
220 X1 NT-ESMTP Server 208.24.118.205 (IMail 6.00 45595-15)
"""
import re
import email
ecre = re.compile('original message follows', re.IGNORECASE)
acre = re.compile(r'''
( # several different prefixes
user\ mailbox[^:]*: # have been spotted in the
|delivery\ failed[^:]*: # wild...
|unknown\ user[^:]*:
|undeliverable\ +to
|delivery\ userid[^:]*:
)
\s* # space separator
(?P<addr>[^\s]*) # and finally, the address
''', re.IGNORECASE | re.VERBOSE)
def process(msg):
mailer = msg.get('x-mailer', '')
if not mailer.startswith('<SMTP32 v'):
return
addrs = {}
for line in email.Iterators.body_line_iterator(msg):
if ecre.search(line):
break
mo = acre.search(line)
if mo:
addrs[mo.group('addr')] = 1
return addrs.keys()
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/mailman/Bouncers/SMTP32.py
|
SMTP32.py
|
# Copyright (C) 2002-2009 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Recognizes (some) Microsoft Exchange formats."""
import re
import email.Iterators
scre = re.compile('did not reach the following recipient')
ecre = re.compile('MSEXCH:')
a1cre = re.compile('SMTP=(?P<addr>[^;]+); on ')
a2cre = re.compile('(?P<addr>[^ ]+) on ')
def process(msg):
addrs = {}
it = email.Iterators.body_line_iterator(msg)
# Find the start line
for line in it:
if scre.search(line):
break
else:
return []
# Search each line until we hit the end line
for line in it:
if ecre.search(line):
break
mo = a1cre.search(line)
if not mo:
mo = a2cre.search(line)
if mo:
addrs[mo.group('addr')] = 1
return addrs.keys()
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/mailman/Bouncers/Exchange.py
|
Exchange.py
|
# Copyright (C) 1998-2009 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""This appears to be the format for Novell GroupWise and NTMail
X-Mailer: Novell GroupWise Internet Agent 5.5.3.1
X-Mailer: NTMail v4.30.0012
X-Mailer: Internet Mail Service (5.5.2653.19)
"""
import re
from email.Message import Message
from cStringIO import StringIO
acre = re.compile(r'<(?P<addr>[^>]*)>')
def find_textplain(msg):
if msg.get_content_type() == 'text/plain':
return msg
if msg.is_multipart:
for part in msg.get_payload():
if not isinstance(part, Message):
continue
ret = find_textplain(part)
if ret:
return ret
return None
def process(msg):
if msg.get_content_type() <> 'multipart/mixed' or not msg['x-mailer']:
return None
addrs = {}
# find the first text/plain part in the message
textplain = find_textplain(msg)
if not textplain:
return None
body = StringIO(textplain.get_payload())
while 1:
line = body.readline()
if not line:
break
mo = acre.search(line)
if mo:
addrs[mo.group('addr')] = 1
elif '@' in line:
i = line.find(' ')
if i == 0:
continue
if i < 0:
addrs[line] = 1
else:
addrs[line[:i]] = 1
return addrs.keys()
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/mailman/Bouncers/GroupWise.py
|
GroupWise.py
|
# Copyright (C) 1998-2009 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Yahoo! has its own weird format for bounces."""
import re
import email
from email.Utils import parseaddr
tcre = re.compile(r'message\s+from\s+yahoo\.\S+', re.IGNORECASE)
acre = re.compile(r'<(?P<addr>[^>]*)>:')
ecre = re.compile(r'--- Original message follows')
def process(msg):
# Yahoo! bounces seem to have a known subject value and something called
# an x-uidl: header, the value of which seems unimportant.
sender = parseaddr(msg.get('from', '').lower())[1] or ''
if not sender.startswith('mailer-daemon@yahoo'):
return None
addrs = []
# simple state machine
# 0 == nothing seen
# 1 == tag line seen
state = 0
for line in email.Iterators.body_line_iterator(msg):
line = line.strip()
if state == 0 and tcre.match(line):
state = 1
elif state == 1:
mo = acre.match(line)
if mo:
addrs.append(mo.group('addr'))
continue
mo = ecre.match(line)
if mo:
# we're at the end of the error response
break
return addrs
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/mailman/Bouncers/Yahoo.py
|
Yahoo.py
|
# Copyright (C) 1998-2009 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Parse mystery style generated by MTA at caiwireless.net."""
import re
import email
from cStringIO import StringIO
tcre = re.compile(r'the following recipients did not receive this message:',
re.IGNORECASE)
acre = re.compile(r'<(?P<addr>[^>]*)>')
def process(msg):
if msg.get_content_type() <> 'multipart/mixed':
return None
# simple state machine
# 0 == nothing seen
# 1 == tag line seen
state = 0
# This format thinks it's a MIME, but it really isn't
for line in email.Iterators.body_line_iterator(msg):
line = line.strip()
if state == 0 and tcre.match(line):
state = 1
elif state == 1 and line:
mo = acre.match(line)
if not mo:
return None
return [mo.group('addr')]
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/mailman/Bouncers/Caiwireless.py
|
Caiwireless.py
|
# Copyright (C) 1998-2009 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Parse bounce messages generated by Exim.
Exim adds an X-Failed-Recipients: header to bounce messages containing
an `addresslist' of failed addresses.
"""
from email.Utils import getaddresses
def process(msg):
all = msg.get_all('x-failed-recipients', [])
return [a for n, a in getaddresses(all)]
|
zw.mail.incoming
|
/zw.mail.incoming-0.1.2.3.tar.gz/zw.mail.incoming-0.1.2.3/src/mailman/Bouncers/Exim.py
|
Exim.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.