code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import os
import sys
import hashlib
import file_constants
import boto
import boto.s3
from boto.s3.key import Key
from colorama import init
from colorama import Fore
init()
IMAGE_FORMATS = file_constants.IMAGE_FORMATS
def push_to_cloudfront(article_id):
# Get the environment variable keys to connect with CloudFront.
bucket_name, access_key, secret_key = get_aws_keys()
# Establish connection with S3.
conn = boto.connect_s3(access_key, secret_key)
# Access the files in the folder for this article_id.
files_in_folder = next(os.walk("posts/" + article_id))[2]
for files in files_in_folder:
if files.endswith(IMAGE_FORMATS):
# Push each image.
push(article_id, files, conn, bucket_name)
def push(article_id, img_name, conn, bucket_name):
img_path = article_id + "/" + img_name
post_path = "posts/" + img_path
if not conn.lookup(bucket_name):
# Create the bucket and connect to it if it doesn't exist.
bucket = conn.create_bucket(bucket_name, location=boto.s3.connection.Location.DEFAULT)
else:
# Connect to the bucket.
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
# Give the key the same name as the image.
k.key = img_path
local_hash = hash_check(post_path)
# If the image path exists, check if the image has been modified.
if k.exists():
# Find local md5.
local_hash = hash_check(post_path)
# Access cloudfront md5.
cloudfront_hash = bucket.get_key(img_path).etag[1:-1]
if local_hash != cloudfront_hash:
print 'Updating ' + img_path + ' in Amazon S3 bucket ' + bucket_name
k.set_contents_from_filename(post_path)
else:
# If the image doesn't exist, add it.
print 'Uploading ' + img_path + ' to Amazon S3 bucket ' + bucket_name
k.set_contents_from_filename(post_path)
def delete_from_cloudfront(article_id):
bucket_name, access_key, secret_key = get_aws_keys()
# Establish connection with S3.
conn = boto.connect_s3(access_key, secret_key)
if not conn.lookup(bucket_name):
# If the bucket doesn't exist, there is nothing to delete.
return
else:
# Connect to the bucket.
bucket = conn.get_bucket(bucket_name)
# Access the images in the S3 directory for this article_id.
for image in bucket.list(prefix = article_id):
if not os.path.exists("posts/" + image.name):
# If the image doesn't exist locally, delete it on CloudFront.
print("Deleting " + image.name + " from CloudFront")
bucket.delete_key(image.name)
def hash_check(post_path):
# Computes the local image's md5.
hasher = hashlib.md5()
image = open(post_path, 'rb').read()
hasher.update(image)
return hasher.hexdigest()
def get_aws_keys():
# Get the AWS bucket name.
try:
bucket = os.environ["ZENDESK_BUCKET_NAME"]
except KeyError:
print(Fore.RED + "Please set the environment variable ZENDESK_BUCKET_NAME" + Fore.RESET)
sys.exit(1)
# Get the AWS access key.
try:
access = os.environ["AWS_ACCESS_KEY"]
except KeyError:
print(Fore.RED + "Please set the environment variable AWS_ACCESS_KEY" + Fore.RESET)
sys.exit(1)
# Get the AWS secret key.
try:
secret = os.environ["AWS_SECRET_KEY"]
except KeyError:
print(Fore.RED + "Please set the environment variable AWS_SECRET_KEY" + Fore.RESET)
sys.exit(1)
return [bucket, access, secret] | zendesk-help-center-backer | /zendesk-help-center-backer-0.2.tar.gz/zendesk-help-center-backer-0.2/zendesk/scripts/cloudfront_images.py | cloudfront_images.py |
import sys
import click
import requests
@click.group()
@click.pass_context
@click.option(
"-o",
"--organization",
help=(
"Zendesk organization name. "
"Usually the zendesk.com subdomain (obscura in obscura.zendesk.com)"
),
required=True,
envvar="ZREDACTOR_ORGANIZATION",
)
@click.option(
"-e",
"--email",
help=(
"A Zendesk agent email address. "
"Usually the email you use to login to Zendesk as an agent"
),
required=True,
envvar="ZREDACTOR_EMAIL",
)
@click.option(
"-t",
"--token",
help="Zendesk API token",
required=True,
envvar="ZREDACTOR_TOKEN",
)
@click.option("-i", "--ticket-id", help="Zendesk ticket ID", required=True)
@click.option(
"-r",
"--dry-run",
help="Only show what would be done, but do not actually do anything",
is_flag=True,
)
def redact(ctx, organization, email, token, ticket_id, dry_run):
ctx.ensure_object(dict)
ctx.obj[
"base_url"
] = f"https://{organization}.zendesk.com/api/v2/tickets/{ticket_id}/comments"
ctx.obj["auth"] = f"{email}/token", token
ctx.obj["dry_run"] = dry_run
@redact.command()
@click.pass_context
@click.argument("snippets", nargs=-1, required=False)
@click.option("-f", "--file", type=click.File("r"), required=False)
def snippets(ctx, snippets, file):
if not snippets and not file:
raise click.exceptions.UsageError(
"Either 'SNIPPETS' argument(s) or '-f' / '--file' option must be specified"
)
if snippets and file:
raise click.exceptions.UsageError(
"Only one of 'SNIPPETS' argument(s) or '-f' / '--file' option must be specified"
)
base_url = ctx.obj["base_url"]
auth = ctx.obj["auth"]
dry_run = ctx.obj["dry_run"]
if file:
snippets = [s.strip() for s in file.readlines() if s.strip()]
snippets_found = False
for s in snippets:
snippet_found = False
for c in get_comments(base_url, auth):
if s in c["body"]:
snippet_found = True
snippets_found = True
print(f"Found snippet '{s}' in comment {c['id']}")
if dry_run:
print("Running dry, skipping")
else:
print("Redacting snippet")
handle_resp(
requests.put(
f"{base_url}/{c['id']}/redact.json",
json={"text": s},
auth=auth,
),
error="Redaction failed",
)
if not snippet_found:
print(f"Snippet '{s}' not found in ticket")
if not snippets_found:
print("None of the provided text snippets were found in the ticket")
@redact.command()
@click.pass_context
def attachments(ctx):
base_url = ctx.obj["base_url"]
auth = ctx.obj["auth"]
dry_run = ctx.obj["dry_run"]
attachments_found = False
for c in get_comments(base_url, auth):
for a in c["attachments"]:
attachments_found = True
print(f"Found attachment {a['id']} in comment {c['id']}")
if dry_run:
print("Running dry, skipping")
else:
print("Redacting attachment")
handle_resp(
requests.put(
f"{base_url}/{c['id']}/attachments/{a['id']}/redact.json",
auth=auth,
),
error="Redaction failed",
)
if not attachments_found:
print("Ticket contains not attachments")
def get_comments(base_url, auth):
resp = requests.get(base_url, auth=auth)
handle_resp(resp, "Failed to get ticket comments", fail=True)
return resp.json().get("comments", [])
def handle_resp(resp, error, fail=False):
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
print(f"{error}: {e}")
if fail:
sys.exit(1) | zendesk-redactor | /zendesk-redactor-0.0.3.tar.gz/zendesk-redactor-0.0.3/redactor.py | redactor.py |
# zendesk-redactor
`zendesk-redactor` is a command-line interface for the Zendesk [Ticket
Redaction](https://www.zendesk.com/apps/support/ticket-redaction/) app.
## Motivation
The usability of the Ticket Redaction app is not so good. The redaction form does not support submitting multi-line
input, so if you have multiple text snippets that you need to redact, you'd have to input them into the field and click
the `Redact` button for each of them separately, one-by-one. This is very inefficient and time-consuming.
This tool tries to remedy this usability oversight by allowing you to redact multiple text snippets at once by either
providing them as command-line arguments, or by writing them into a file and supplying the path to this file as a
command-line option.
The tool also allows you to redact attachments, although it is currently not possible to do this selectively - it's
either all of them or none of them.
This tool is not a silver bullet though. If you have many text snippets that you need to redact, picking them out of the
ticket is still a cumbersome manual process. But this tool will at least allow you to redact all of them at once,
instead of clicking `Redact` for each of them separately.
## Installation
It's Python, so make sure you have Python 3 installed, and run:
```
$ pip install zendesk-redactor
```
Although not strictly necessary, you might want to either create a virtualenv or use something like
[pipx](https://github.com/pipxproject/pipx).
## Usage
### Authentication
In order to authenticate to Zendesk API you must provide the organization name, which usually is the subdomain part of
your Zendesk URL before `zendesk.com` (e.g `obscura` for `https://obscura.zendesk.com`), your agent email address, and
an API [token](https://developer.zendesk.com/rest_api/docs/support/introduction#using-a-zendesk-api-token).
These can be provided in two ways.
1. As command-line options:
```
$ zredactor --organization obscura --email [email protected] --token BigSecret42
```
2. As environment variables:
```
export ZREDACTOR_ORGANIZATION=obscura
export [email protected]
export ZREDACTOR_TOKEN=BigSecret42
```
### Redacting text snipets
The following command will redact all occurrences of the text snippets `foo`, `bar`, `baz` in the ticket with ID 1742:
```
$ zredactor --organization obscura --email [email protected] --token BigSecret42 --ticket-id 1742 snippets foo bar baz
```
Alternatively, if you use environment variables for authentication:
```
$ zredactor --ticket-id 1742 snippets foo bar baz
```
The following command will redact all occurrences of the text snippets provided in a file `/tmp/to_redact.txt` in the
ticket with ID 1742:
```
$ zredactor --organization obscura --email [email protected] --token BigSecret42 --ticket-id 1742 snippets -f /tmp/to_redact.txt
```
Alternatively, with authentication environment variables set:
```
$ zredactor --ticket-id 1742 snippets -f /tmp/to_redact.txt
```
The file must contain one snippet per line.
### Redacting attachments
The following command will redact all attachments in ticket with ID 1742:
```
$ zredactor --organization obscura --email [email protected] --token BigSecret42 --ticket-id 1742 attachments
```
Alternatively, with authentication environment variables set:
```
$ zredactor --ticket-id 1742 attachments
```
Note that currently it is not possible to redact attachments selectively.
| zendesk-redactor | /zendesk-redactor-0.0.3.tar.gz/zendesk-redactor-0.0.3/README.md | README.md |
from zendesk_ticket_viewer.ticket import ZendeskTicket
from zendesk_ticket_viewer.api import ZendeskApi
from zdesk import Zendesk
import os
''' Module description
The entry point for the application drives core functionality:
Display a single zendesk ticket
Display all zendesk tickets
'''
BORDER = '==================================================='
DISPLAY_SINGLE_TICKET = '1'
DISPLAY_ALL_TICKETS = '2'
EXIT = '9'
CONNECTION_ERROR_MSG = ''
def main():
set_up()
while True:
main_menu()
selection = input(': ')
if selection is DISPLAY_SINGLE_TICKET:
display_single_ticket()
elif selection is DISPLAY_ALL_TICKETS:
display_all_tickets()
elif selection is EXIT:
print('\nGood Bye - Have a nice day ^_^')
break
else:
print('Invalid selection\n')
# Welcome user and attempts to authenticate connection with zendesk api
def set_up():
print('Welcome to the Zendesk Ticket Viewer')
print('...Establishing connection')
ZendeskApi.attempt_connection(os, Zendesk)
# Handles getting a tickets raw data from the Zendesk api and then attempts to display it
def display_single_ticket():
id_number = input('Enter ticket id number: ')
raw_response = ZendeskApi.single_ticket(id_number)
if raw_response is False:
return False
display(raw_response)
# Handles getting all tickets raw data from the Zendesk api and then attempts to display them
def display_all_tickets():
all_tickets_raw_data = ZendeskApi.all_tickets()
if all_tickets_raw_data is False:
return False
for rawTicketData in ZendeskApi.all_tickets():
display(rawTicketData)
# Formats raw ticket data received from api, into a user friendly format
def display(raw_response):
try:
formatted_ticket = ZendeskTicket(raw_response)
print(formatted_ticket)
return True
except ValueError:
pass
return False
def main_menu():
print('{}\n{}\n\n{}\n{}\n{}\n\n{}\n{}' .format(BORDER,
'Main Menu',
'Please enter one of the following options:',
'1. Display a single ticket',
'2. Display all tickets',
'9. Quit out of program',
BORDER))
if __name__ == '__main__':
main() | zendesk-ticket-viewer | /zendesk_ticket_viewer-0.5.tar.gz/zendesk_ticket_viewer-0.5/zendesk_ticket_viewer/main.py | main.py |
import abc
from zdesk.zdesk import AuthenticationError, ZendeskError, RateLimitError
from requests.exceptions import ConnectionError
''' Module description
Deals with Zdesk (api wrapper) calls and prepares authentication details
currently implements
ticket_show
ticket_list
Handles custom exceptions raised by Zdesk
AuthenticationError
ZendeskError
ConnectionError
Uses os.getev to get authentication details from the local environment variables
'''
# Environment variables used for authentication
ZENDESK_ACCESS_TOKEN = 'ZENDESK_ACCESS_TOKEN'
ZENDESK_EMAIL_ADDRESS = 'ZENDESK_EMAIL_ADDRESS'
ZENDESK_SUBDOMAIN = 'ZENDESK_SUBDOMAIN'
# Interface to be used for tickets
class Api(metaclass=abc.ABCMeta):
@abc.abstractmethod
def attempt_connection(self) -> bool:
pass
@abc.abstractmethod
def all_tickets(self) -> bool:
pass
@abc.abstractmethod
def single_ticket(self) -> bool:
pass
# Handles establishing authentication with Zendesk and deals with making api requests
class ZendeskApi(Api):
connection = None
access_token = None
zendesk_email_address = None
zendesk_subdomain = None
# Attempts to authenticate with Zendesk and then checks if the attempt was successful.
@classmethod
def attempt_connection(cls, os, Zendesk):
if not ZendeskApi.set_credentials(os):
print('\nSystem variables have not been set or have been incorrectly named.\n'
'System variable names should be: '
'{}\n {}\n {}\n' .format(ZENDESK_ACCESS_TOKEN, ZENDESK_EMAIL_ADDRESS, ZENDESK_SUBDOMAIN))
return False
# Attempt to authenticate
ZendeskApi.connection = \
Zendesk(cls.zendesk_subdomain, cls.zendesk_email_address, cls.access_token, True)
# Test if the authentication/connection was successful
if cls.make_request(cls.connection.tickets_list, None) is not False:
return True
return False
# Returns all existing tickets from currently authenticated account
@classmethod
def all_tickets(cls):
response = cls.make_request(cls.connection.tickets_list, None)
if response is False:
return False
return response['tickets']
# Returns a single ticket found by id
@classmethod
def single_ticket(cls, id):
response = cls.make_request(cls.connection.ticket_show, id)
if response is False:
return False
return response['ticket']
# Helpers
@classmethod
def set_credentials(cls, os):
cls.access_token = os.getenv(ZENDESK_ACCESS_TOKEN)
cls.zendesk_email_address = os.getenv(ZENDESK_EMAIL_ADDRESS)
cls.zendesk_subdomain = os.getenv(ZENDESK_SUBDOMAIN)
# Checking required system variables exist and are set
if (cls.access_token is None or cls.zendesk_email_address is None
or cls.zendesk_subdomain is None):
return False
return True
# Attempts to process an api request and handle any errors raised by the zendesk api wrapper
@staticmethod
def make_request(request, params):
try:
if(params is None):
return request()
return request(params)
except AuthenticationError as e:
print("\nIncorrect authentication details - See raw response for more information")
print("%s %s" % ('Raw response: ', e))
return False
except ZendeskError as e:
print("\nBad HTTP response - See raw response for more information.\n")
print("%s %s" % ('Raw response: ', e))
return False
except ConnectionError as e:
print("\nNo internet connection - See raw response for more information.")
print("%s %s" % ('Raw response: ', e))
return False
except RateLimitError as e:
print("\nRate limit Reached - See raw response for more information.")
print("%s %s" % ('Raw response: ', e))
return False
print('Unknown error encounted when making api request')
return False | zendesk-ticket-viewer | /zendesk_ticket_viewer-0.5.tar.gz/zendesk_ticket_viewer-0.5/zendesk_ticket_viewer/api.py | api.py |
# Hive Zendesk API
# Copyright (c) 2008-2017 Hive Solutions Lda.
#
# This file is part of Hive Zendesk API.
#
# Hive Zendesk API is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Zendesk API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Zendesk API. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2017 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import base64
import appier
from . import user
from . import ticket
from . import ticket_field
DOMAIN = "domain.zendesk.com"
""" The default domain to be used when no other
domain value is provided to the constructor """
class API(
appier.API,
user.UserAPI,
ticket.TicketAPI,
ticket_field.TicketFieldAPI
):
def __init__(self, *args, **kwargs):
appier.API.__init__(self, *args, **kwargs)
self.domain = appier.conf("ZD_DOMAIN", DOMAIN)
self.username = appier.conf("ZD_USERNAME", None)
self.token = appier.conf("ZD_TOKEN", None)
self.username = kwargs.get("username", self.username)
self.token = kwargs.get("token", self.token)
self.base_url = "https://%s/api/v2/" % self.domain
def build(
self,
method,
url,
data = None,
data_j = None,
data_m = None,
headers = None,
params = None,
mime = None,
kwargs = None
):
auth = kwargs.pop("auth", True)
if auth: headers["Authorization"] = self.get_authorization()
def get_authorization(self):
if not self.username or not self.token: None
payload = "%s/token:%s" % (self.username, self.token)
payload = appier.legacy.bytes(payload)
authorization = base64.b64encode(payload)
authorization = appier.legacy.str(authorization)
return "Basic %s" % authorization | zendesk_api | /zendesk_api-0.1.5.tar.gz/zendesk_api-0.1.5/src/zendesk/base.py | base.py |
==================================================================
zendesk: Communicator with the Zendesk API
==================================================================
TODO: Modify the whole file as necessary.
This is a "long description" file for the package that you are creating.
If you submit your package to PyPi, this text will be presented on the `public page <http://pypi.python.org/pypi/python_package_boilerplate>`_ of your package.
Note: This README has to be written using `reStructured Text <http://docutils.sourceforge.net/rst.html>`_, otherwise PyPi won't format it properly.
Installation
------------
The easiest way to install most Python packages is via ``easy_install`` or ``pip``::
$ easy_install zendesk
Usage
-----
TODO: This is a good place to start with a couple of concrete examples of how the package should be used.
The boilerplate code provides a dummy ``main`` function that prints out the word 'Hello'::
>> from zendesk import main
>> main()
When the package is installed via ``easy_install`` or ``pip`` this function will be bound to the ``zendesk`` executable in the Python installation's ``bin`` directory (on Windows - the ``Scripts`` directory).
| zendesk_integration | /zendesk_integration-0.2.4.tar.gz/zendesk_integration-0.2.4/README.rst | README.rst |
import requests
import json
import exceptions
from inflection import singularize
from helper import separete_into_groups
from custom_exceptions import BulkExceededLimit, RequestException
class BaseZenDesk(object):
def __init__(self, hostname, user, password, timeout=15):
self.host = "https://{}.zendesk.com/api/v2/".format(hostname)
self.auth = (user, password)
self.timeout = timeout
def _request(self, resource, method='get', **kwargs):
'''
TODO
'''
_method = getattr(requests, method.lower())
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "{host}{resource}".format(host=self.host, resource=resource)
return _method(url, auth=self.auth, data=json.dumps(kwargs),
timeout=self.timeout, headers=headers)
class BaseRest(object):
def __init__(self, base, resource, class_object):
self.base = base
self.resource = resource
self.class_object = class_object
def get(self, resource=None, page=1, per_page=10, **kwargs):
resource = resource or self.resource
endpoint = "{}.json?page={}&per_page={}".format(
resource, page, per_page)
resp = self.base._request(endpoint, **kwargs)
if resp.status_code != 200:
content = resp.json() if getattr(resp, 'json') else {}
raise RequestException(resp.status_code, content=content)
resp = resp.json()
# return resp
items = resp.pop(self.resource)
resp.update(items=map(lambda x: self.class_object(**x), items))
return resp
def get_one(self, id_object, resource=None):
resource = resource or self.resource
url = "{}/{}.json".format(resource, id_object)
resp = self.base._request(url)
if resp.status_code != 200:
content = resp.json() if getattr(resp, 'json') else {}
raise RequestException(resp.status_code, content=content)
return self.class_object(**resp.json())
def get_one_query(self, query, resource=None):
resource = resource or self.resource
endpoint = "{}.json?query={}".format(resource, query)
resp = self.base._request(endpoint)
if resp.status_code != 200:
content = resp.json() if getattr(resp, 'json') else {}
raise RequestException(resp.status_code, content=content)
resp_json = resp.json()
if resp_json.get('count') != 1:
resp_json.update({'error': 'Return multiples objects for search!'})
raise RequestException(resp.status_code, content=resp_json)
item = resp_json.pop(self.resource)[0]
return self.class_object(**item)
def show_many(self, resource=None, name_field='ids', fields=[]):
if len(fields) > 100:
raise Exception('identificares the limit is 100!')
resource = resource or self.resource
url = "{}/show_many.json?{}={}".format(
resource, name_field, fields.split(','))
resp = self.base._request(url)
if resp.status_code != 200:
content = resp.json() if getattr(resp, 'json') else {}
raise RequestException(resp.status_code, content=content)
resp = resp.json()
items = resp.pop(self.resource)
resp.update(items=map(lambda x: self.class_object(**x), items))
return resp
def create(self, resource=None, **kwargs):
resource = resource or self.resource
url = "{}.json".format(resource)
singular_resource = singularize(resource)
data = {
singular_resource: kwargs,
}
resp = self.base._request(url, 'POST', **data)
if resp.status_code != 201:
content = resp.json() if getattr(resp, 'json') else {}
raise RequestException(resp.status_code, content=content)
return self.class_object(**resp.json().get(singularize(resource)))
def create_many(self, list_objects, resource=None):
jobs = []
resource = resource or self.resource
url = "{}/create_many.json".format(resource)
groups = separete_into_groups(list_objects, 100)
has_pendent_groups = True
while has_pendent_groups:
errors = []
for group in groups:
try:
data = {resource: group}
resp = self.base._request(url, 'POST', **data)
jobs.append(resp.json())
except Exception:
errors.append(group)
if errors:
groups = map(lambda x: x, errors)
else:
has_pendent_groups = False
return jobs
def put(self, id_object, resource=None, **kwargs):
resource = resource or self.resource
url = "{}/{}.json".format(resource, id_object)
data = {singularize(resource): kwargs}
resp = self.base._request(url, 'PUT', **data)
if resp.status_code != 200:
content = resp.json() if getattr(resp, 'json') else {}
raise RequestException(resp.status_code, content=content)
return self.class_object(**resp.json())
def bulk_put_many(self, documents, resource=None, limit=100):
if limit > 100:
raise BulkExceededLimit
resource = resource or self.resource
url = "{}/update_many.json".format(resource)
groups = separete_into_groups(documents, limit)
has_pendent_groups = True
jobs = []
while has_pendent_groups:
errors = []
for group in groups:
try:
data = {resource: group}
resp = self.base._request(url, 'PUT', **data)
jobs.append(resp.json())
except Exception:
errors.append(group)
if errors:
groups = map(lambda x: x, errors)
else:
has_pendent_groups = False
return jobs
def put_many(self):
raise exceptions.NotImplemented("Method not implemented!")
def delete(self, id_object, resource=None):
resource = resource or self.resource
url = "{}/{}.json".format(resource, id_object)
resp = self.base._request(url, 'DELETE')
if resp.status_code != 200:
content = resp.json() if getattr(resp, 'json') else {}
raise RequestException(resp.status_code, content=content)
def delete_many(self, list_ids, resource=None, name_field='ids', limit=100):
resource = resource or self.resource
groups = separete_into_groups(list_ids, limit)
has_pendent_groups = True
responses = []
while has_pendent_groups:
errors = []
for group in groups:
try:
url = "{}/destroy_many.json?{}={}.json".format(
resource, name_field, ','.join(group))
resp = self.base._request(url, 'DELETE')
responses.append(resp.json())
except Exception:
errors.append(group)
if errors:
groups = map(lambda x: x, errors)
else:
has_pendent_groups = False
return responses | zendesk_integration | /zendesk_integration-0.2.4.tar.gz/zendesk_integration-0.2.4/zendesk/base.py | base.py |
<p align="center">
<img src="https://github.com/Mjvolk3/Zendron/raw/main/notes/assets/drawio/logo.drawio.png" />
</p>
[](https://badge.fury.io/py/zendron)
## Showcase

1. Show how you can structure a paper using note refernces in Dendron.
2. Install Zendron and import references from the relevant library.
3. Cite while you write, and view all relevant Zotero metadata, annotations, and comment notes with hover.
4. Compile paper to `.docx`, `.pdf`, and `.html` with Pandoc.
5. Find relevant papers via VsCode search.
## Introduction
- This package was developed for porting Zotero annotations and metadata to markdown. These markdown notes are then brought into a [Dendron](https://www.dendron.so/) hierarchy for integration with vault notes. We recommend using the package within [Visual Studio Code](https://code.visualstudio.com/).The end goal is to get a two way sync between notes in Zotero and notes in Dendron, but this has some difficulties and limitations that are taking some time to address. For now only a one way sync from Zotero to Dendron is supported.
## Install Instructions
- It is recommended to build a [conda env](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) for installation.
- Install [Dendron CLI](https://wiki.dendron.so/notes/RjBkTbGuKCXJNuE4dyV6G/).
- `npm install -g @dendronhq/dendron-cli@latest`
- Install the zendron
- `python -m pip install zendron`
## Zotero Local Setup
- To start you need [Better BibTeX for Zotero](https://retorque.re/zotero-better-bibtex/installation/)
- This allows pinning of of bibtex keys.v
- Go to `Zotero > Settings... > Advanced > General > Config Editor`
- Accept the risks 
- In the Search, type `autoPinDelay` and chance the integer value from 0 (default) to 1. 
## Zotero API key
- [Zotero API key](https://www.zotero.org/settings/keys)
- We recommend setting up you Zotero API key with the following settings to allow for full functionality.
- Personal Library
- [x] Allow library access.
- [x] Allow notes access.
- [x] Allow write access.
- Default Gropu Permissions
- [x] Read/Write

- This key can then be copy pasted in the configuration file. You should add your key to `.gitignore` to prevent others from accessing your Zotoero database. If the key is lost you can always generate a new one.
## Zotero and File Import Configuration
All zendron configuration is handled in [config.yml](https://github.com/Mjvolk3/Zendron/raw/main/conf/config.yaml).
```yml
library_id : 4932032 # Zotero library ID
library_type : group # [user, group] library
api_key : FoCauOvWMlNdYmpYuY5JplTw # Zotero API key
collection: null # Name of Zotero Collection, null for entire library
item_types: [journalArticle, book, preprint, conferencePaper, report] # List of item types according to [pyzotero](https://pyzotero.readthedocs.io/en/latest/)
local_image_path: /Users/<username>/Zotero/cache # Local path for importing annotated images
dendron_limb: zendron.import # Dendron import limb e.g. zendron.import.paper-title.annotations.md
zotero_comment_title: zendron comment # fixed for now... needed for eventual 2-way sync.
pod_path: zotero_pod # Name of dendron pod, removed after completion of import. We will later add configuration for this to remain. This will allow for non Dendron users to import markdown Zotero notes in a strucutred hierarchy.
```
- `library_id` - Integer identifier of library. This is the number that matches the name of a library.
- [User ID](https://www.zotero.org/settings/keys).
- For group ID visit [Zotero Groups](https://www.zotero.org/groups/), click on your desired group, and copy the id from the URL.
- `library_type`: `group` for group libraries and `user` for user library.
- `api_key`: Use the API Key obtained from [Zotero API KEY](README.md#zotero-api-key).
- `collection`: This can be the name of any collection or subcollection in the specificed library. If there are multiple collections or sub collections with the same name, the import will arbitrarily choose one. To make sure you are importing the desired collection, make sure the name of the collection is unique in the Zotero library.
- `item_types`: Zotero item types to import according to [pyzotero](https://pyzotero.readthedocs.io/en/latest/) syntax.
`local_image_path`: Path to annotated images. `/Users/<username>/Zotero/cache` is the default path on MacOS.
- `dendron_limb`: This is the period deliminated hierarchy prefix to all file imports for Dendron, e.g. `root.zotero.import.<paper_title>.annotations.md`.
- `zotero comment title` - IGNORE FOR NOW. Eventually needed for 2-way sync.
- `pod_path` - IGNORE FOR NOW. Eventually needed for markdown only import, without Dendron integration.
## Basic Usage
There are only two basic commands that work as of now.
- `zendron`
- This command should only be run in the root directory of the workspace.
- This command imports notes according to a defined [config.yml](https://github.com/Mjvolk3/Zendron/raw/main/conf/config.yaml). Once the command is run the first time the user needs to modify their configuration `./conf/config.yaml`. All required configs are marked with a comment `# STARTER CONFIG` upon initialization.
- Notes are imported with a `## Time Created` heading. This allows for stable reference from other notes, within the note vault. We autogenerate a `*.comments.md` that should be used for taking any additional notes within Dendron. Additional notes taken within the meta data file (`notes/zendron.import.<paper-title>.md`), or the `*.annotations.md` will be overwritten after running `zendron` for a second time. All files downstream of import excpet `*.comments.md` should be treated as read only. We have plans to explicitly make them read only soon.
- Upon import, notes and tags are left as stubs. To create these notes run `> Dendron: Doctor` then `createMissingLinkedNotes`. It is best practice to correc tag warnings before doing this.
- `zendron remove=true`
- This command removes imported notes and associated links. This command works by remove all notes downstream fo `dendron_limb`, excpet for `comments.md`. There is some difficult removing other files created becuase these are separate from the `dendron_limb`. These files include `user.*.md`, which comes from bibtex keys, and `tags.*.md` which come from metadata and annotation tags. For now, we don't remove tags, but we do remove bibex keys (`<user.bibtex_key>.md`).
- There are more complicated removal's that could be desired so we plan to eventually change this from a `bool` to an `str`.
## Miscellaneous
- The `zendron_cache` is used for remove of `<user.bibtex_key>.md`. If it is deleted and you run remove, the `<user.bibtex_key>.md` will not be removed. In this case you can run `zendron` again, then run the `zendron remove=true` again.
- If there are run that fail, sometimes a `.hydra` with the given configuraiton will be generated in the root dir. This isn't an issue but it contains the API information and should therefore be added to the `.gitignore` as a safeguard. In addition these files can be used to inspect the reason for the faiure.
- `__main__.log` is generated after running a `zendron`, this can also be deleted as you please. It is also useful for inspecting an failures to import.
## Troubleshooting
- If you are having trouble with startup you can use this [Zendron-Test](https://github.com/Mjvolk3/Zendron-Test) template and try to reproduce your issues here. Simply click on `Use this template`, clone the repo and try to run `zendron` here. This will allow for us to catch things we overlooked for different user workspace configuration etc. Once you have tried to reproduce issues here please submit an issue on [Zendron](https://github.com/Mjvolk3/Zendron).
| zendron | /zendron-1.1.13.tar.gz/zendron-1.1.13/README.md | README.md |
# Zenduty Python SDK
Python SDK to communicate with zenduty endpoints
## Installing
Installation can be done through pip, as follows:
```
$ pip install zenduty-api
```
or you may grab the latest source code from GitHub:
```
$ git clone https://github.com/Zenduty/zenduty-python-sdk
$ python3 setup.py install
```
## Contents
1) zenduty/api : contains the functions to communicate with zenduty API endpoints
2) zenduty/ : contains the common required files
3) bin/ : contains sample script to run zenduty functions
## Getting started
Before you begin making use of the SDK, make sure you have your Zenduty Access Token.
You can then import the package into your python script.
```
import zenduty
```
Based on the endpoint you want to communicate with, create an object of the required class. For example, to create an incident:
```
api_obj = zenduty.IncidentsApi(zenduty.ApiClient('your-access-token'))
body = {"service":"c7fff4c5-2def-41e8-9120-c63f649a825c",
"escalation_policy":"a70244c8-e343-4dd0-8d87-2f767115568a",
"user":null,
"title":"Name of trial",
"summary":"summary of trial"}
response = api_obj.create_incident(body)
print(response.data)
print(response.status_code)
```
Refer the comments under each function for a detailed description of it's parameters.
It is important to note that each function returns a urllib3.response.HTTPResponse object.
## Running tests
There is a sample skeleton code in bin/. Add your access token to it and modify the object and function name for testing purposes.
## License
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details
| zenduty-api | /zenduty-api-0.2.tar.gz/zenduty-api-0.2/README.md | README.md |
import six
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, six.integer_types):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result | zenduty-api | /zenduty-api-0.2.tar.gz/zenduty-api-0.2/zenduty/exceptions.py | exceptions.py |
from zenduty.api_client import ApiClient
class IntegrationsApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def get_integrations_in_service(self,team_id,service_id):
#Returns the integrations in a service
#params str team_id: unique id of team
#params str service_id: unique id of service
return self.api_client.call_api('GET','/api/account/teams/{}/services/{}/integrations/'.format(team_id,service_id))
def create_integration(self,team_id,service_id,body):
#Creates a new integration for a given service in a team
#params str team_id: unique id of team
#params str service_id: unique id of service
#params dict body: contains the details of the new integration
# Sample body:
# {"name":"asdf",
# "summary":"asdf",
# "application":"27c9800c-2856-490d-8119-790be1308dd4"}
return self.api_client.call_api('POST','/api/account/teams/{}/services/{}/integrations/'.format(team_id,service_id),body=body)
def get_integrations_by_id(self,team_id,service_id,integration_id):
#Returns an integration belonging to a service in a team, identified by id
#params str team_id: unique id of team
#params str service_id: unique id of service
#params str integration_id: unique id of integration
return self.api_client.call_api('GET','/api/account/teams/{}/services/{}/integrations/{}/'.format(team_id,service_id,integration_id))
def get_alerts_in_integration(self,team_id,service_id,integration_id):
#Retruns alerts in a particular integration
#params str team_id: unique id of team
#params str service_id: unique id of service
#params str integration_id: unique id of integration
return self.api_client.call_api('GET','/api/account/teams/{}/services/{}/integrations/{}/alerts/'.format(team_id,service_id,integration_id)) | zenduty-api | /zenduty-api-0.2.tar.gz/zenduty-api-0.2/zenduty/api/integrations_api.py | integrations_api.py |
from zenduty.api_client import ApiClient
class IncidentsApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def get_incidents(self,body):
#Returns the incidents from your zenduty account
#params dict body: contains all the required details of your account
# Sample body:
# {'page':1,
# 'status':5,
# 'team_id':['a2c6322b-4c1b-4884-8f7a-a7f270de98cb'],
# 'service_ids':[],
# 'user_ids':[]}
return self.api_client.call_api('GET','/api/incidents/',body=body)
def get_incidents_by_number(self,incident_number):
#Returns the incidents belonging to a given incident number
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/'.format(incident_number))
def get_incident_alerts(self,incident_number):
#Returns all alerts of a particular incident
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/alerts/'.format(incident_number))
def get_incident_notes(self,incident_number):
#Gets the notes regarding an incident, identified by incident number
#params int incident_number: incident number of event
return self.api_client.call_api('GET','/api/incidents/{}/note/'.format(incident_number))
def acknowledge_or_resolve_incidents(self,incident_number,body):
#Used to acknowledge or resolve incident, identified by incident number
#params str incident_number: incident number of event
#params dict body: contains the changed values of incident
# Sample body:
# {'status':3,
# 'incident_number':12}
return self.api_client.call_api('PATCH','/api/incidents/{}/'.format(incident_number),body=body)
def create_incident(self,body):
#Used to create an incident for a particular service, identified by id
#params dict body: contains necessary details for creating incident
# Sample body:
# {"service":"c7fff4c5-2def-41e8-9120-c63f649a825c",
# "escalation_policy":"a70244c8-e343-4dd0-8d87-2f767115568a",
# "user":null,
# "title":"Name of trial",
# "summary":"summary of trial"}
# escalation_policy,service, title and summary are required fields.
# if escalation_policy is not set (set to None then), then assigned_to is required, as follows
# {"service":"b1559a26-c51f-45a1-886d-f6caeaf0fc7e",
# "escalation_policy":null,
# "assigned_to":"826032d6-7ccd-4d58-b114-f",
# "title":"Name of trial",
# "summary":"Summary of trial"}
return self.api_client.call_api('POST','/api/incidents/',body=body) | zenduty-api | /zenduty-api-0.2.tar.gz/zenduty-api-0.2/zenduty/api/incidents_api.py | incidents_api.py |
from zenduty.api_client import ApiClient
class EscalationPoliciesApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def get_escalation_policies(self,team_id):
#Returns the escalation policies belonging to one team
#params str team_id: unique id of team
return self.api_client.call_api('GET','/api/account/teams/{}/escalation_policies/'.format(team_id))
def create_escalation_policy(self,team_id,body):
#Creates an escalation policy for one team
#params str team_id: unique id of team
#params dict body: contains the required details for creating escalation policy
#Sample body:
# {'name':name,
# 'summary':summary,
# 'description':description,
# 'rules':rules,
# 'unique_id':unique_id,
# 'team':team_id}
return self.api_client.call_api('POST','/api/account/teams/{}/escalation_policies/'.format(team_id),body=body)
def get_escalation_policy_by_id(self,team_id,ep_id):
#Returns escalation_policy identified by id
#params str team_id: unique id of team
#params str ep_id: unique id of escalation policy
return self.api_client.call_api('GET','/api/account/teams/{}/escalation_policies/{}/'.format(team_id,ep_id))
def update_escalation_policy(self,team_id,ep_id,body):
#Updates escalation policy, identified by id
#params str team_id: unique id of team
#params str ep_id: unqiue id of escalation policy
#params dict body: contains all the updated values
# 'rules' is a required part of the body
#Sample body:
# body={'summary':'changes description',
# 'rules':[{"delay":1,
# "targets":[{"target_type":2,
# "target_id":"826032d6-7ccd-4d58-b114-f"}],
# "position":1,
# "unique_id":"c0dad09b-321b-491e-9c23-f816c7bd0339"}]}
return self.api_client.call_api('PATCH','/api/account/teams/{}/escalation_policies/{}/'.format(team_id,ep_id),body=body)
def delete_escalation_policy(self,team_id,ep_id):
#Deletes escalation policy, identified by id
#params str team_id: unique id of team
#params str ep_id: unique id of escalation policy
return self.api_client.call_api('DELETE','/api/account/teams/{}/escalation_policies/{}/'.format(team_id,ep_id)) | zenduty-api | /zenduty-api-0.2.tar.gz/zenduty-api-0.2/zenduty/api/escalationpolicies_api.py | escalationpolicies_api.py |
from zenduty.api_client import ApiClient
class ServicesApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def get_service_for_team(self,team_id):
#Returns all the services in a team
#params str team_id: unnique id of team
return self.api_client.call_api('GET','/api/account/teams/{}/services/'.format(team_id))
def add_new_service_in_team(self,team_id,body):
#Adds a new servie to a give team, identified by id
#params str team_id: unique id of team
#params dict body: contains the details of the new service to be added
#Sample body
#{"name":"Name of service",
# "description":"Description of service",
# "integrations":[{"application":"27c9800c-2856-490d-8119-790be1308dd4",
# "name":"API",
# "summary":"Edit summary for this integration"}],
# "escalation_policy":"5c9b6288-c105-418d-970b-91a93d0e919a",
# "acknowledgement_timeout":1,
# "auto_resolve_timeout":1}
return self.api_client.call_api('POST','/api/account/teams/{}/services/'.format(team_id),body=body)
def get_services_by_id(self,team_id,service_id):
#Returns a particular service from a team, identified by id
#params str team_id: unique id of team
#params str service_id: unique id of service
return self.api_client.call_api('GET','/api/account/teams/{}/services/{}/'.format(team_id,service_id))
def update_service(self,team_id,service_id,body):
#Updates the existing service in a team
#params str team_id: unique id of team
#params str service_id: unique id of service
#params dict body: contains the updated details of services
#Sample body:
#{"unique_id":"bc808ce3-46c0-41d0-bf1f-f405fdd0c1c3",
# "auto_resolve_timeout":0,
# "acknowledgement_timeout":0,
# "status":1,
# "escalation_policy":"5c9b6288-c105-418d-970b-91a93d0e919a"}
return self.api_client.call_api('PATCH','/api/account/teams/{}/services/{}/'.format(team_id,service_id),body=body)
def delete_service_from_team(self,team_id,service_id):
#Deletes a particular service from a team
#params str team_id: unique id of team
#params str service_id: unnique id of service
return self.api_client.call_api('DELETE','/api/account/teams/{}/services/{}/'.format(team_id,service_id)) | zenduty-api | /zenduty-api-0.2.tar.gz/zenduty-api-0.2/zenduty/api/services_api.py | services_api.py |
from zenduty.api_client import ApiClient
class SchedulesApi(object):
def __init__(self,api_client=None):
if api_client is None:
api_client=ApiClient()
self.api_client = api_client
def get_schedules(self,team_id):
#Returns the schedules in a particular team, identified by id
#params str team_id: unique id of a team
return self.api_client.call_api('GET','/api/account/teams/{}/schedules/'.format(team_id))
def create_schedule(self, team_id,body):
#Creates a schedule for a team
#params str team_id: unique id of team
#params dict body: contains the details of the schedule to be created
#Sample body:
#{"name":"Name of schedule",
# "summary":"summar of schedule",
# "time_zone":"Asia/Kolkata",
# "team":"d4a777db-5bce-419c-a725-420ebb505c54",
# "layers":[]}
return self.api_client.call_api('POST','/api/account/teams/{}/schedules/'.format(team_id),body=body)
def get_schedule_by_id(self,team_id,schedule_id):
#Returns a particular schedule from a team, identifed by id
#params str team_id: unique id of a team
#params schedule_id: unique id of schedule
return self.api_client.call_api('GET','/api/account/teams/{}/schedules/{}/'.format(team_id,schedule_id))
def update_schedule(self,team_id,schedule_id,body):
#Updates the schedule details for a given team, identified by id
#params str team_id: unique id of a team
#params str schedul_id: unique id of schedule
#params dict body: contains the updated values of schedule
# 'unique_id' and 'team' are required. Other fields are just those which have been changed
#Sample body:
#{"name":"Name of schedule",
# "summary":"summar of schedule",
# "time_zone":"Asia/Kamchatka",
# "team":"d4a777db-5bce-419c-a725-420ebb505c54",
# "unique_id":"f9b34bd3-818a-4b98-9d8a-04d8bd501cd0",
# "layers":[]}
return self.api_client.call_api('PATCH','/api/account/teams/{}/schedules/{}/'.format(team_id,schedule_id),body=body)
def delete_schedule(self,team_id,schedule_id):
#Deletes a schedule from a team
#params str team_id:unique id of team
#params str schedule_id: unique id of schedule
return self.api_client.call_api('DELETE','/api/account/teams/{}/schedules/{}/'.format(team_id,schedule_id)) | zenduty-api | /zenduty-api-0.2.tar.gz/zenduty-api-0.2/zenduty/api/schedules_api.py | schedules_api.py |
import requests
import logging
import json
logging.basicConfig(level=logging.NOTSET)
class ZenefitsClient():
"""
Our base client. You should generate an Access Token as described in the Zenefits
API docs
"""
def __init__(self, access_token, sandbox=False):
self.sandbox = sandbox
self.access_token = access_token
# Caches to prevent redundant api calls
self.cache = {}
########################################################################
# Caching Functions #
########################################################################
def check_cache(self, ref_object_str):
"""
Check the cache if items exist
Args:
ref_object_str (string): zenefits ref_object of the objects (i.e. "/core/people")
object_id (string): string id of the zenefits object
Returns:
bool: Whether the string is actually in the cache, used to determine if we've called a url
"""
return True if ref_object_str in list(self.cache.keys()) else False
def get_from_cache(self, ref_object_str, object_id=None):
"""
Grab the objects (or object) from cache
Args:
ref_object (string): zenefits ref_object of the objects (i.e. "/core/people")
object_id (string): string id of the zenefits object
Returns:
dict: Return the dictionary of the ids, or just the object if given id
"""
items = self.cache.get(ref_object_str)
if items:
if object_id:
return items.get(object_id)
return items
return {}
def insert_into_cache(self, ref_object_str, zenefit_objects):
"""
Populate our client cache with the given ref_object and a list of zenefit objects.
This function will fully replace the cached ref_object
{
ref_object_str: {
id: zenefit_object,
id: zenefit_object
}
}
Should be called after get_url_objects to prevent redundant calls
Args:
ref_object_str (string): The cache ref_object_str, zenefits ref_object_str of the objects (i.e. "/core/people")
zenefit_objects (list): List of the zenefit objects we want to cache
"""
cached_items = {}
for zenefit_object in zenefit_objects:
obj_id = zenefit_object.get("id")
cached_items[obj_id] = zenefit_object
self.cache[ref_object_str] = cached_items
logging.info(f"Cached updated for {ref_object_str}")
def populate_cache(self, zenefits_reference_obj):
"""
Populate the cache with the items from the given Zenefits reference object.
Will replace existing items in the cache.
Args:
zenefits_reference_obj (dict): Zenefits Reference Object
"""
# We always call all items under the assumption that querying one item
# through this client will lead to additional queries
# TODO implement a faster one item pull method
# Fetch all my objects
ref_object_str = zenefits_reference_obj.get("ref_object")
data = self.get_url_objects("https://api.zenefits.com" + ref_object_str)
self.insert_into_cache(ref_object_str, data)
self.cache
logging.info(f"Populating Cache for {ref_object_str}")
########################################################################
# Helper Functions #
########################################################################
def populate_reference_object(self, zenefits_reference_object):
"""
Populate the given Zenefits object (one level deep only)
Args:
zenefits_reference_object (dict): A single Zenefits reference object to populate
example:
{
"object": "/meta/ref/list",
"ref_object": "/core/people",
"url": "https://api.zenefits.com/core/departments/1/people"
}
Returns:
[dict]: The populated Zenefits reference object
example:
{
"name": {},
"name2": {}
}
"""
# First check our cache if we've called our cache before
in_cache = self.check_cache(zenefits_reference_object.get("ref_object"))
# If we haven't populated our cache, populate our cache
if not in_cache:
self.populate_cache(zenefits_reference_object)
# Grab our cached objects
cached_object = self.get_from_cache(zenefits_reference_object.get("ref_object"))
# Determine what type of object we want to return (list or detail)
object_type = zenefits_reference_object.get("object").split("/")[-1]
# If we are a list type object, return the cached entry
if object_type == "list":
return cached_object
# If I'm a detail object, return just the detailed object
elif object_type == "detail":
# Grab the ID from the url
# Sometimes it's after a /, sometimes it's after a =
if not zenefits_reference_object.get("url"):
return {}
object_id = zenefits_reference_object.get("url").split("/")[-1]
if '=' in object_id:
object_id = zenefits_reference_object.get("url").split("=")[-1]
return cached_object.get(object_id)
########################################################################
# API Call Functions #
########################################################################
def get_url_objects(self, url, includes=None):
"""
Retrieve the base objects returned from the given url. Takes into account paginated requests and pulls
all objects. Can pass in a list of parameters in includes to populate the reference items
Note that Zenefits API restricts what fields can be in includes, which is why for our main client
calls we don't use it.
Args:
url (str): URL that we want to pull from
includes (list): List of parameter strings telling Zenefits to populate objects
Returns:
zenefit_objects (list): List of zenefit objects
"""
zenefit_objects = []
payload = {}
headers = {
'Authorization': f"Bearer {self.access_token}",
"Content-Type": "application/json; charset=utf-8"
}
# Populate the given fields from zenefits
if includes:
includes_string = includes[0]
for parameter in includes[1:]:
includes_string = includes_string + " " + parameter
payload["includes"] = includes_string
response = requests.get(url, auth=None, params=payload, headers=headers, verify=True, timeout=10.000)
if response.status_code != 200:
logging.ERROR("Objects not fetched")
return response
# Yes they return a nested object with another nested object called data
data = json.loads(response.content.decode("utf-8"))['data']
# Check to see if there is a second data object called data
# Perform the check if in keys to account for the case where data is
# an empty list. I.e. as long as data['data'] exists, even if empty, extend it
zenefit_objects.extend(data['data']) if ('data' in list(data.keys())) else zenefit_objects.extend(data)
# Check our paginated url, while one exists, continue extending the list unitl I have all objects
next_url = data.get('next_url')
if next_url:
zenefit_objects.extend(self.get_url_objects(next_url))
return zenefit_objects
########################################################################
# Core Client Call Functions #
########################################################################
def get_department(self, name=None, populate=True):
"""
Retrieve all departments unless name specified. Includes populated references as objects.
We only populate one level deep (i.e. department: {people: { company }} will have the info
for department and people, but company will be the url that Zenefits sends back.
Args:
name (str, optional): Name of department to return. Defaults to None.
populate (bool, optional): Populate reference objects. Defaults to True.
Returns:
[dict]: Dictionary of department names and values
"""
url = "http://api.zenefits.com/core/departments"
data = self.get_url_objects(url)
# Data is a list of objects. For related items (i.e. People) Zenefits returns a URL. We need to populate them.
self.populate_cache(
{
"object": "/meta/ref/list",
"ref_object": "/core/departments",
"url": f"{url}"
}
)
populated_data = {}
for department in data:
# Populate my references to other objects
populated_department = {}
for key, value in department.items():
# If my value is an object and I want to populate (include objects)
if type(value) is dict and populate:
populated_department[key] = self.populate_reference_object(value)
# If my value isn't an object, just set to the value
else:
populated_department[key] = value
# Add my newly populated dictionary to my populated data
populated_data[department['name']] = populated_department
if name:
return populated_data[name]
return populated_data
def get_people(self, name=None, populate=True):
"""
Retrieve all people unless name specified. Includes populated references as objects.
We only populate one level deep (i.e. department: {people: { company }} will have the info
for department and people, but company will be the url that Zenefits sends back.
Args:
name (str, optional): FirstName_LastName of a person. Defaults to None.
populate (bool, optional): Populate reference objects. Defaults to True.
Returns:
[dict]: Dictionary of peoples FirstName_LastName and values
"""
url = "http://api.zenefits.com/core/people"
data = self.get_url_objects(url)
# Populate our cache
self.populate_cache(
{
"object": "/meta/ref/list",
"ref_object": "/core/people",
"url": f"{url}"
}
)
populated_data = {}
for people in data:
# Populate my references to other objects
populated_people = {}
for key, value in people.items():
# If my value is an object and I want to populate (include objects)
if type(value) is dict and populate:
populated_people[key] = self.populate_reference_object(value)
# If my value isn't an object, just set to the value
else:
populated_people[key] = value
# Add my newly populated dictionary to my populated data
full_name = populated_people['first_name'] + '_' + populated_people['last_name']
populated_data[full_name] = populated_people
if name:
return populated_data[name]
return populated_data | zenefits-client | /zenefits_client-0.0.2.tar.gz/zenefits_client-0.0.2/zenefits_client/client.py | client.py |
ZenFeed
=======
Zen feed reader − inspired by zencancan.
Documentation is available at <http://fspot.github.io/zenfeed/>.
Screenshots
-----------
Feed list :

Feed view :

Article view :

Layout on smartphones :

Todo
----
- add import/export OPML feature
- test zenfeed with mysql and postgresql
- actually remove oldest entries when threshold is exceeded
| zenfeed | /zenfeed-0.4.0.tar.gz/zenfeed-0.4.0/README.md | README.md |
==============================================================================
zenfilter. Filter stdin to avoid excessive output
==============================================================================
:Info: This is the README file for zenfilter.
:Author: Shlomi Fish <[email protected]>
:Copyright: © 2016, Shlomi Fish.
:Date: 2021-09-01
:Version: 0.6.5
.. index: README
.. image:: https://travis-ci.org/shlomif/zenfilter.svg?branch=master
:target: https://travis-ci.org/shlomif/zenfilter
PURPOSE
-------
This small script filters long STDIN output, performing several functions
to keep track of the important parts and progresses, which will be hard to
do with a shell script.
It is useful for filtering the output of verbose
Travis-CI ( https://travis-ci.org/ ) commands, but may be useful in other
contexts where there is a limit to the amount of kept output.
All arguments are optional:
* `--count-step=n`: displays `COUNT <tab> <step>` every n lines.
* `--last=n`: displays the last n lines prefixed with "LAST\t"
* `--filter=<regex pattern>`: displays matching lines with a "FOUND\t" prefix.
* `--suppress-last-on=<regex>`: suppress the last lines if their concatenated output matches the regex.
Examples:
::
python zenfilter.py --count-step=10 --last=200 --filter="[0-9]+"
python zenfilter.py --last=20
python zenfilter.py --last=25 --count-step=15
A use case scenario:
::
make 2>&1 | python zenfilter.py [args]
COPYRIGHT
---------
Copyright © 2016, Shlomi Fish.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions, and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the author of this software nor the names of
contributors to this software may be used to endorse or promote
products derived from this software without specific prior written
consent.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| zenfilter | /zenfilter-0.6.5.tar.gz/zenfilter-0.6.5/README.rst | README.rst |
==============================================================================
zenfilter. Filter stdin to avoid excessive output
==============================================================================
:Info: This is the README file for zenfilter.
:Author: Shlomi Fish <[email protected]>
:Copyright: © 2016, Shlomi Fish.
:Date: 2021-09-01
:Version: 0.6.5
.. index: README
.. image:: https://travis-ci.org/shlomif/zenfilter.svg?branch=master
:target: https://travis-ci.org/shlomif/zenfilter
PURPOSE
-------
This small script filters long STDIN output, performing several functions
to keep track of the important parts and progresses, which will be hard to
do with a shell script.
It is useful for filtering the output of verbose
Travis-CI ( https://travis-ci.org/ ) commands, but may be useful in other
contexts where there is a limit to the amount of kept output.
All arguments are optional:
* `--count-step=n`: displays `COUNT <tab> <step>` every n lines.
* `--last=n`: displays the last n lines prefixed with "LAST\t"
* `--filter=<regex pattern>`: displays matching lines with a "FOUND\t" prefix.
* `--suppress-last-on=<regex>`: suppress the last lines if their concatenated output matches the regex.
Examples:
::
python zenfilter.py --count-step=10 --last=200 --filter="[0-9]+"
python zenfilter.py --last=20
python zenfilter.py --last=25 --count-step=15
A use case scenario:
::
make 2>&1 | python zenfilter.py [args]
COPYRIGHT
---------
Copyright © 2016, Shlomi Fish.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions, and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the author of this software nor the names of
contributors to this software may be used to endorse or promote
products derived from this software without specific prior written
consent.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | zenfilter | /zenfilter-0.6.5.tar.gz/zenfilter-0.6.5/README | README |
=====================
Appendix C. Changelog
=====================
:Info: This is the changelog for zenfilter.
:Author: Shlomi Fish <[email protected]>
:Copyright: © 2016, Shlomi Fish.
:License: BSD (see /LICENSE or :doc:`Appendix B <LICENSE>`.)
:Date: 2021-09-01
:Version: 0.6.5
.. index:: CHANGELOG
GitHub holds releases, too
==========================
More information can be found on GitHub in the `releases section
<https://github.com/shlomif/zenfilter/releases>`_.
Version History
===============
0.6.5
The zenfilter() function now flushes stdout before exit (just in case).
0.6.4
print(flush=True) where appropriate.
0.6.3
Fix the script by using zenfilter/__init__.py
0.6.2
Rename the script from "zenfilter.py" to simply "zenfilter"
- Thanks to https://stackoverflow.com/questions/45114076/python-setuptools-using-scripts-keyword-in-setup-py
0.6.1
Reenable the tests and fix the script's shebang.
0.6.0
Converted to pydistman
0.4.0
Earlier versions based on the work of https://github.com/SpiritualForest/
Thanks!
| zenfilter | /zenfilter-0.6.5.tar.gz/zenfilter-0.6.5/CHANGELOG.rst | CHANGELOG.rst |
==============================================================================
zenfilter. Filter stdin to avoid excessive output
==============================================================================
:Info: This is the README file for zenfilter.
:Author: Shlomi Fish <[email protected]>
:Copyright: © 2016, Shlomi Fish.
:Date: 2021-09-01
:Version: 0.6.5
.. index: README
.. image:: https://travis-ci.org/shlomif/zenfilter.svg?branch=master
:target: https://travis-ci.org/shlomif/zenfilter
PURPOSE
-------
This small script filters long STDIN output, performing several functions
to keep track of the important parts and progresses, which will be hard to
do with a shell script.
It is useful for filtering the output of verbose
Travis-CI ( https://travis-ci.org/ ) commands, but may be useful in other
contexts where there is a limit to the amount of kept output.
All arguments are optional:
* `--count-step=n`: displays `COUNT <tab> <step>` every n lines.
* `--last=n`: displays the last n lines prefixed with "LAST\t"
* `--filter=<regex pattern>`: displays matching lines with a "FOUND\t" prefix.
* `--suppress-last-on=<regex>`: suppress the last lines if their concatenated output matches the regex.
Examples:
::
python zenfilter.py --count-step=10 --last=200 --filter="[0-9]+"
python zenfilter.py --last=20
python zenfilter.py --last=25 --count-step=15
A use case scenario:
::
make 2>&1 | python zenfilter.py [args]
COPYRIGHT
---------
Copyright © 2016, Shlomi Fish.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions, and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the author of this software nor the names of
contributors to this software may be used to endorse or promote
products derived from this software without specific prior written
consent.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| zenfilter | /zenfilter-0.6.5.tar.gz/zenfilter-0.6.5/docs/README.rst | README.rst |
==============================
Appendix A. Contribution rules
==============================
:Info: Those are the contribution rules for zenfilter.
:Copyright: © 2012-2021, Chris Warrick.
:License: 3-clause BSD
.. index:: contributing
Do you want to contribute to this project? Great! I’d love to see some help,
but you must comply with some rules.
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL
NOT”, “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and
“OPTIONAL” in this document are to be interpreted as described in
RFC 2119.
---------------
Issue reporting
---------------
.. index:: issues
GitHub Issues are the recommended way to report an issue. If you do not have an
account there, get one or mail me.
When pasting console sessions, you must paste them fully, *prompt-to-prompt*,
to see all the messages and your input. Trim only stuff that you are 1000%
sure that is not related to the project in question.
--------------------------------------------
General preparations, rules and pull process
--------------------------------------------
Prepare
=======
A GitHub account is recommended. Patches by mail are accepted, but I’d prefer
to work via GitHub.
.. _Rules:
Rules
=====
1. Commits must have short, informative and logical messages. Signoffs and
long messages are recommended. “Fix #xxx” is required if an issue
exists.
2. The following fancy Unicode characters should be used when
needed: ``— “ ” ‘ ’``. ``…`` should not appear in console output, but may
appear elsewhere.
3. For Python code, use the PEP 8 coding style and PEP 257 documentation style.
For other languages, K&R style applies. Braces are mandatory in all blocks
(even one-line blocks). Braces are on the same lines as class names and
function signatures. Use 4-space indents.
Request a Pull
==============
Done? Go hit the **Pull Request** button over on GitHub! And if you don’t
use GitHub, ``git format-patch``. Other formats are not accepted.
Your commit should be pulled up in a (longer) while. If I like it. Because
some commits may be bad. So, do your best not to do those bad commits.
| zenfilter | /zenfilter-0.6.5.tar.gz/zenfilter-0.6.5/docs/CONTRIBUTING.rst | CONTRIBUTING.rst |
=====================
Appendix C. Changelog
=====================
:Info: This is the changelog for zenfilter.
:Author: Shlomi Fish <[email protected]>
:Copyright: © 2016, Shlomi Fish.
:License: BSD (see /LICENSE or :doc:`Appendix B <LICENSE>`.)
:Date: 2021-09-01
:Version: 0.6.5
.. index:: CHANGELOG
GitHub holds releases, too
==========================
More information can be found on GitHub in the `releases section
<https://github.com/shlomif/zenfilter/releases>`_.
Version History
===============
0.1.0
Initial release.
| zenfilter | /zenfilter-0.6.5.tar.gz/zenfilter-0.6.5/docs/CHANGELOG.rst | CHANGELOG.rst |
=======================================================
Appendix B. License for zenfilter
=======================================================
:Info: This is the license for zenfilter.
:Author: Shlomi Fish <[email protected]>
:Copyright: © 2016, Shlomi Fish.
:License: BSD (see /LICENSE or :doc:`Appendix B <LICENSE>`.)
:Date: 2021-09-01
:Version: 0.6.5
.. index:: LICENSE
Copyright © 2016, Shlomi Fish.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions, and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the author of this software nor the names of
contributors to this software may be used to endorse or promote
products derived from this software without specific prior written
consent.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| zenfilter | /zenfilter-0.6.5.tar.gz/zenfilter-0.6.5/docs/LICENSE.rst | LICENSE.rst |
==============================
ZenginCode
==============================
|circle| |version|
The python implementation of ZenginCode.
ZenginCode is datasets of bank codes and branch codes for japanese.
Installation
==============
.. code-block:: bash
pip install zengin_code
Usage
==============
.. code-block:: python
from zengin_code import Bank
Bank.all # => OrderedDict([(u'0001', <zengin_code.bank.Bank object at 0x1044173d0>), ...
Contributing
===============
Bug reports and pull requests are welcome on GitHub at https://github.com/zengin-code/zengin-py
License
===============
The package is available as open source under the terms of the `MIT License <http://opensource.org/licenses/MIT>`_ .
.. |circle| image:: https://img.shields.io/circleci/project/zengin-code/zengin-py.svg
:target: https://circleci.com/gh/zengin-code/zengin-py
.. |version| image:: https://img.shields.io/pypi/v/zengin_code.svg
:target: http://pypi.python.org/pypi/zengin_code/
:alt: latest version
| zengin-code | /zengin_code-1.1.0.20230802.tar.gz/zengin_code-1.1.0.20230802/README.rst | README.rst |
ZEngine Web Service Framework
=============================
Zengine BPMN workflow based framework with Tornado, Rabbit AMQP, advanced permissions, extensible scaffolding features and more.
Built on top of following major components;
- SpiffWorkflow_: Powerful workflow engine with BPMN 2.0 support.
- Tornado_: Tornado is a Python web framework and asynchronous networking library.
- Pyoko_: Django esque ORM for Riak KV store.
- RabbitMQ_: Fast, ultrasharp AMQP server written with legendary Erlang lang.
.. _SpiffWorkflow: https://github.com/knipknap/SpiffWorkflow
.. _Tornado: http://www.tornadoweb.org/en/stable/
.. _Pyoko: https://github.com/zetaops/pyoko
.. _RabbitMQ: https://www.rabbitmq.com/
API Documentation
-----------------
`API Documentation`_
.. _`API Documentation`: http://zengine.readthedocs.org/en/latest/api-documentation.html
Troubleshooting
-------------------
`Troubleshooting for Common Problems`_
.. _`Troubleshooting for Common Problems`: http://zengine.readthedocs.org/en/latest/troubleshooting.html
Support
-------
Feel free to fork this and send back Pull Requests for any
defects or features that you want to contribute back.
Opening issues here is also recommended.
If you need to get the attention of the ZetaOps team send an email
to info ~at~ zetaops.io.
Commercial support from ZetaOps_ requires a valid support contract.
.. _ZetaOps: http://zetaops.io
Authors
-------
* Samuel Abels
* Gökhan Boranalp
* Ali Rıza Keleş
* Evren Esat Özkan
* Furkan Uyar
* Anıl Can Aydın
License
-------
ZEngine is licensed under the `GPL v3.0`_
.. _GPL v3.0: http://www.gnu.org/licenses/gpl-3.0.html
| zengine | /zengine-0.8.3.tar.gz/zengine-0.8.3/README.rst | README.rst |
import re
import struct
import textwrap
FORMAT = {
'2u1': ('uint8x2', 2),
'4u1': ('uint8x4', 4),
'2i1': ('sint8x2', 2),
'4i1': ('sint8x4', 4),
'2nu1': ('unorm8x2', 2),
'4nu1': ('unorm8x4', 4),
'2ni1': ('snorm8x2', 2),
'4ni1': ('snorm8x4', 4),
'2u2': ('uint16x2', 4),
'4u2': ('uint16x4', 8),
'2i2': ('sint16x2', 4),
'4i2': ('sint16x4', 8),
'2nu2': ('unorm16x2', 4),
'4nu2': ('unorm16x4', 8),
'2ni2': ('snorm16x2', 4),
'4ni2': ('snorm16x4', 8),
'2h': ('float16x2', 4),
'4h': ('float16x4', 8),
'1f': ('float32', 4),
'2f': ('float32x2', 8),
'3f': ('float32x3', 12),
'4f': ('float32x4', 16),
'1u': ('uint32', 4),
'2u': ('uint32x2', 8),
'3u': ('uint32x3', 12),
'4u': ('uint32x4', 16),
'1i': ('sint32', 4),
'2i': ('sint32x2', 8),
'3i': ('sint32x3', 12),
'4i': ('sint32x4', 16),
}
CULL_FACE = {
'front': 0x0404,
'back': 0x0405,
'front_and_back': 0x0408,
'none': 0,
}
MIN_FILTER = {
'nearest': 0x2600,
'linear': 0x2601,
'nearest_mipmap_nearest': 0x2700,
'linear_mipmap_nearest': 0x2701,
'nearest_mipmap_linear': 0x2702,
'linear_mipmap_linear': 0x2703,
}
MAG_FILTER = {
'nearest': 0x2600,
'linear': 0x2601,
}
TEXTURE_WRAP = {
'repeat': 0x2901,
'clamp_to_edge': 0x812F,
'mirrored_repeat': 0x8370,
}
COMPARE_MODE = {
'ref_to_texture': 0x884E,
'none': 0,
}
COMPARE_FUNC = {
'never': 0x0200,
'less': 0x0201,
'equal': 0x0202,
'lequal': 0x0203,
'greater': 0x0204,
'notequal': 0x0205,
'gequal': 0x0206,
'always': 0x0207,
}
BLEND_FUNC = {
'add': 0x8006,
'subtract': 0x800A,
'reverse_subtract': 0x800B,
'min': 0x8007,
'max': 0x8008,
}
BLEND_CONSTANT = {
'zero': 0,
'one': 1,
'src_color': 0x0300,
'one_minus_src_color': 0x0301,
'src_alpha': 0x0302,
'one_minus_src_alpha': 0x0303,
'dst_alpha': 0x0304,
'one_minus_dst_alpha': 0x0305,
'dst_color': 0x0306,
'one_minus_dst_color': 0x0307,
'src_alpha_saturate': 0x0308,
'constant_color': 0x8001,
'one_minus_constant_color': 0x8002,
'constant_alpha': 0x8003,
'one_minus_constant_alpha': 0x8004,
'src1_alpha': 0x8589,
'src1_color': 0x88F9,
'one_minus_src1_color': 0x88FA,
'one_minus_src1_alpha': 0x88FB,
}
STENCIL_OP = {
'zero': 0,
'keep': 0x1E00,
'replace': 0x1E01,
'incr': 0x1E02,
'decr': 0x1E03,
'invert': 0x150A,
'incr_wrap': 0x8507,
'decr_wrap': 0x8508,
}
STEP = {
'vertex': 0,
'instance': 1,
}
VERTEX_SHADER_BUILTINS = {
'gl_VertexID',
'gl_InstanceID',
'gl_DrawID',
'gl_BaseVertex',
'gl_BaseInstance',
}
UNIFORM_PACKER = {
0x1404: (1, 'i'),
0x8B53: (2, 'i'),
0x8B54: (3, 'i'),
0x8B55: (4, 'i'),
0x8B56: (1, 'i'),
0x8B57: (2, 'i'),
0x8B58: (3, 'i'),
0x8B59: (4, 'i'),
0x1405: (1, 'I'),
0x8DC6: (2, 'I'),
0x8DC7: (3, 'I'),
0x8DC8: (4, 'I'),
0x1406: (1, 'f'),
0x8B50: (2, 'f'),
0x8B51: (3, 'f'),
0x8B52: (4, 'f'),
0x8B5A: (4, 'f'),
0x8B65: (6, 'f'),
0x8B66: (8, 'f'),
0x8B67: (6, 'f'),
0x8B5B: (9, 'f'),
0x8B68: (12, 'f'),
0x8B69: (8, 'f'),
0x8B6A: (12, 'f'),
0x8B5C: (16, 'f'),
}
def loader(headless=False):
import glcontext
mode = 'standalone' if headless else 'detect'
return glcontext.default_backend()(glversion=330, mode=mode)
def calcsize(layout):
nodes = layout.split(' ')
if nodes[-1] == '/i':
nodes.pop()
stride = 0
for node in nodes:
if node[-1] == 'x':
stride += int(node[:-1])
continue
stride += FORMAT[node][1]
return stride
def bind(buffer, layout, *attributes):
nodes = layout.split(' ')
step = 'vertex'
if nodes[-1] == '/i':
step = 'instance'
nodes.pop()
res = []
offset = 0
idx = 0
for node in nodes:
if node[-1] == 'x':
offset += int(node[:-1])
continue
if len(attributes) == idx:
raise ValueError(f'Not enough vertex attributes for format "{layout}"')
location = attributes[idx]
format, size = FORMAT[node]
if location >= 0:
res.append({
'location': location,
'buffer': buffer,
'format': format,
'offset': offset,
'step': step,
})
offset += size
idx += 1
if len(attributes) != idx:
raise ValueError(f'Too many vertex attributes for format "{layout}"')
for x in res:
x['stride'] = offset
return res
def vertex_array_bindings(vertex_buffers, index_buffer):
res = [index_buffer]
for obj in vertex_buffers:
res.extend([obj['buffer'], obj['location'], obj['offset'], obj['stride'], STEP[obj['step']], obj['format']])
return tuple(res)
def resource_bindings(resources):
uniform_buffers = []
for obj in sorted((x for x in resources if x['type'] == 'uniform_buffer'), key=lambda x: x['binding']):
binding = obj['binding']
buffer = obj['buffer']
offset = obj.get('offset', 0)
size = obj.get('size', buffer.size - offset)
uniform_buffers.extend([binding, buffer, offset, size])
storage_buffers = []
for obj in sorted((x for x in resources if x['type'] == 'storage_buffer'), key=lambda x: x['binding']):
binding = obj['binding']
buffer = obj['buffer']
offset = obj.get('offset', 0)
size = obj.get('size', buffer.size - offset)
storage_buffers.extend([binding, buffer, offset, size])
samplers = []
for obj in sorted((x for x in resources if x['type'] == 'sampler'), key=lambda x: x['binding']):
params = (
MIN_FILTER[obj.get('min_filter', 'linear')],
MAG_FILTER[obj.get('mag_filter', 'linear')],
float(obj.get('min_lod', -1000.0)),
float(obj.get('max_lod', 1000.0)),
float(obj.get('lod_bias', 0.0)),
TEXTURE_WRAP[obj.get('wrap_x', 'repeat')],
TEXTURE_WRAP[obj.get('wrap_y', 'repeat')],
TEXTURE_WRAP[obj.get('wrap_z', 'repeat')],
COMPARE_MODE[obj.get('compare_mode', 'none')],
COMPARE_FUNC[obj.get('compare_func', 'never')],
float(obj.get('max_anisotropy', 1.0)),
)
samplers.extend([obj['binding'], obj['image'], params])
images = []
for obj in sorted((x for x in resources if x['type'] == 'image'), key=lambda x: x['binding']):
images.extend([obj['binding'], obj['image']])
return tuple(uniform_buffers), tuple(storage_buffers), tuple(samplers), tuple(images)
def framebuffer_attachments(size, attachments):
if not attachments:
if size is None:
raise ValueError('Missing framebuffer')
return size, (), None
attachments = [x.face() if hasattr(x, 'face') else x for x in attachments]
size = attachments[0].size
samples = attachments[0].samples
for attachment in attachments:
if attachment.size != size:
raise ValueError('Attachments must be images with the same size')
if attachment.samples != samples:
raise ValueError('Attachments must be images with the same number of samples')
depth_stencil_attachment = None
if not attachments[-1].flags & 1:
depth_stencil_attachment = attachments[-1]
attachments = attachments[:-1]
for attachment in attachments:
if not attachment.flags & 1:
raise ValueError('The depth stencil attachments must be the last item in the framebuffer')
return size, tuple(attachments), depth_stencil_attachment
def settings(cull_face, depth, stencil, blend, attachments):
res = [len(attachments[1]), CULL_FACE[cull_face]]
if depth is None:
depth = {}
if attachments[2] is not None and attachments[2].flags & 2:
res.extend([True, COMPARE_FUNC[depth.get('func', 'less')], bool(depth.get('write', True))])
else:
res.append(False)
if stencil is None:
stencil = {}
if attachments[2] is not None and attachments[2].flags & 4:
front = stencil.get('front', stencil.get('both', {}))
back = stencil.get('back', stencil.get('both', {}))
res.extend([
True,
STENCIL_OP[front.get('fail_op', 'keep')],
STENCIL_OP[front.get('pass_op', 'keep')],
STENCIL_OP[front.get('depth_fail_op', 'keep')],
COMPARE_FUNC[front.get('compare_op', 'always')],
int(front.get('compare_mask', 0xff)),
int(front.get('write_mask', 0xff)),
int(front.get('reference', 0)),
STENCIL_OP[back.get('fail_op', 'keep')],
STENCIL_OP[back.get('pass_op', 'keep')],
STENCIL_OP[back.get('depth_fail_op', 'keep')],
COMPARE_FUNC[back.get('compare_op', 'always')],
int(back.get('compare_mask', 0xff)),
int(back.get('write_mask', 0xff)),
int(back.get('reference', 0)),
])
else:
res.append(False)
if blend is not None:
res.append(True)
for obj in blend:
res.extend([
int(obj.get('enable', True)),
BLEND_FUNC[obj.get('op_color', 'add')],
BLEND_FUNC[obj.get('op_alpha', 'add')],
BLEND_CONSTANT[obj.get('src_color', 'one')],
BLEND_CONSTANT[obj.get('dst_color', 'zero')],
BLEND_CONSTANT[obj.get('src_alpha', 'one')],
BLEND_CONSTANT[obj.get('dst_alpha', 'zero')],
])
else:
res.append(False)
return tuple(res)
def program(includes, *shaders):
def include(match):
name = match.group(1)
content = includes.get(name)
if content is None:
raise KeyError(f'cannot include "{name}"')
return content
res = []
for shader, type in shaders:
shader = textwrap.dedent(shader).strip()
shader = re.sub(r'#include\s+"([^"]+)"', include, shader)
shader = shader.encode().replace(b'\r', b'')
res.append((shader, type))
return tuple(res)
def compile_error(shader: bytes, shader_type: int, log: bytes):
name = {0x8b31: 'Vertex Shader', 0x8b30: 'Fragment Shader', 0x91b9: 'Compute Shader'}[shader_type]
log = log.rstrip(b'\x00').decode()
raise ValueError(f'{name} Error\n\n{log}')
def linker_error(vertex_shader: bytes, fragment_shader: bytes, log: bytes):
log = log.rstrip(b'\x00').decode()
raise ValueError(f'Linker Error\n\n{log}')
def compute_linker_error(compute_shader: bytes, log: bytes):
log = log.rstrip(b'\x00').decode()
raise ValueError(f'Linker Error\n\n{log}')
def flatten(iterable):
try:
for x in iterable:
yield from flatten(x)
except TypeError:
yield iterable
def clean_glsl_name(name):
if name.endswith('[0]') and name['size'] > 1:
return name[:-3]
return name
def uniforms(interface, values):
data = bytearray()
uniform_map = {clean_glsl_name(obj['name']): obj for obj in interface if obj['type'] == 'uniform'}
for name, value in values.items():
if name not in uniform_map:
raise KeyError(f'Uniform "{name}" does not exist')
value = tuple(flatten(value))
location = uniform_map[name]['location']
size = uniform_map[name]['size']
gltype = uniform_map[name]['gltype']
if gltype not in UNIFORM_PACKER:
raise ValueError(f'Uniform "{name}" has an unknown type')
items, format = UNIFORM_PACKER[gltype]
count = len(value) // items
if len(value) > size * items:
raise ValueError(f'Uniform "{name}" must be {size * items} long at most')
if len(value) % items:
raise ValueError(f'Uniform "{name}" must have a length divisible by {items}')
data.extend(struct.pack('4i', len(value), location, count, gltype))
for value in flatten(value):
data.extend(struct.pack(format, value))
data.extend(struct.pack('4i', 0, 0, 0, 0))
return list(values), data
def validate(interface, resources, vertex_buffers, attachments, limits):
errors = []
unique = set((obj['type'], obj['binding']) for obj in resources)
if len(resources) != len(unique):
for obj in resources:
key = (obj['type'], obj['binding'])
if key not in unique:
binding = obj['binding']
rtype = obj['type']
errors.append(f'Duplicate resource entry for "{rtype}" with binding = {binding}')
unique.discard(key)
unique = set(obj['location'] for obj in vertex_buffers)
if len(vertex_buffers) != len(unique):
for obj in vertex_buffers:
location = obj['location']
if location not in unique:
errors.append(f'Duplicate vertex attribute entry with location = {location}')
unique.discard(location)
expected = set(obj['location'] + i for obj in interface if obj['type'] == 'input' for i in range(obj['size']))
provided = set(obj['location'] for obj in vertex_buffers)
if expected ^ provided:
missing = expected - provided
extra = provided - expected
if missing:
for location in sorted(missing):
obj = next(obj for obj in interface if obj['type'] == 'input' and obj['location'] == location)
name = clean_glsl_name(obj['name'])
errors.append(f'Missing vertex buffer binding for "{name}" with location = {location}')
if extra:
for location in sorted(extra):
errors.append(f'Unknown vertex attribute with location = {location}')
expected = set(obj['location'] + i for obj in interface if obj['type'] == 'output' for i in range(obj['size']))
provided = set(range(len(attachments)))
if expected ^ provided:
missing = expected - provided
extra = provided - expected
if missing:
for location in sorted(missing):
obj = next(obj for obj in interface if obj['type'] == 'output' and obj['location'] == location)
name = clean_glsl_name(obj['name'])
errors.append(f'Missing framebuffer attachment for "{name}" with location = {location}')
if extra:
for location in sorted(extra):
errors.append(f'Unknown framebuffer attachment with location = {location}')
expected = set(obj['binding'] for obj in interface if obj['type'] == 'uniform_buffer')
provided = set(obj['binding'] for obj in resources if obj['type'] == 'uniform_buffer')
if expected ^ provided:
missing = expected - provided
extra = provided - expected
if missing:
for binding in sorted(missing):
obj = next(obj for obj in interface if obj['type'] == 'uniform_buffer' and obj['binding'] == binding)
name = clean_glsl_name(obj['name'])
errors.append(f'Missing uniform buffer binding for "{name}" with binding = {binding}')
if extra:
for binding in sorted(extra):
errors.append(f'Unknown uniform buffer with binding = {binding}')
expected = set(obj['binding'] for obj in interface if obj['type'] == 'storage_buffer')
provided = set(obj['binding'] for obj in resources if obj['type'] == 'storage_buffer')
if expected ^ provided:
missing = expected - provided
extra = provided - expected
if missing:
for binding in sorted(missing):
obj = next(obj for obj in interface if obj['type'] == 'storage_buffer' and obj['binding'] == binding)
name = clean_glsl_name(obj['name'])
errors.append(f'Missing storage buffer binding for "{name}" with binding = {binding}')
if extra:
for binding in sorted(extra):
errors.append(f'Unknown storage buffer with binding = {binding}')
expected = set(obj['binding'] + i for obj in interface if obj['type'] == 'sampler' for i in range(obj['size']))
provided = set(obj['binding'] for obj in resources if obj['type'] == 'sampler')
if expected ^ provided:
missing = expected - provided
extra = provided - expected
if missing:
for binding in sorted(missing):
obj = next(obj for obj in interface if obj['type'] == 'sampler' and obj['binding'] == binding)
name = clean_glsl_name(obj['name'])
errors.append(f'Missing sampler binding for "{name}" with binding = {binding}')
if extra:
for binding in sorted(extra):
errors.append(f'Unknown sampler with binding = {binding}')
expected = set(obj['binding'] + i for obj in interface if obj['type'] == 'image' for i in range(obj['size']))
provided = set(obj['binding'] for obj in resources if obj['type'] == 'image')
if expected ^ provided:
missing = expected - provided
extra = provided - expected
if missing:
for binding in sorted(missing):
obj = next(obj for obj in interface if obj['type'] == 'image' and obj['binding'] == binding)
name = clean_glsl_name(obj['name'])
errors.append(f'Missing image binding for "{name}" with binding = {binding}')
if extra:
for binding in sorted(extra):
errors.append(f'Unknown image with binding = {binding}')
if errors:
raise ValueError('Program Validation Error\n\n' + '\n'.join(errors)) | zengl | /zengl-2.0.0a2-cp36-cp36m-win_amd64.whl/_zengl.py | _zengl.py |
.. image:: https://badge.fury.io/py/zenipy.png
:target: http://badge.fury.io/py/zenipy
.. image:: https://readthedocs.org/projects/zenipy/badge/?version=latest
:target: http://zenipy.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Zenipy
******
Zenipy is a library for python wich inspired by Zenity. When you write scripts,
you can use Zenipy to create simple dialogs that interact graphically with the user.
Requirements
============
* Python 2 or 3
* GTK+3
* python3-gi or python-gi (for python2)
Installation
============
Install using pip :
.. code-block:: bash
$ pip install zenipy
Or clone the repo :
.. code-block:: bash
$ git clone https://github.com/poulp/zenipy.git
$ cd ./zenipy
$ python setup.py install
Example
=======
Simple dialog :
.. code-block:: python
from zenipy import calendar
result = calendar(title="Awesome Calendar",text="Your birthday ?")
print(result)
This code show a calendar dialog :
.. image:: docs/images/screen.png
:align: center
And display the result :
.. code-block:: bash
$ python test.py
$ (year=2017, month=6, day=4)
API
===
.. code-block:: python
zenipy.zenipy.message(title='', text='', width=330, height=120, timeout=None)
Display a simple message
Parameters:
* **text** (*str*) – text inside the window
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
.. code-block:: python
zenipy.zenipy.error(title='', text='', width=330, height=120, timeout=None)
Display a simple error
Parameters:
* **text** (*str*) – text inside the window
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
.. code-block:: python
zenipy.zenipy.warning(title='', text='', width=330, height=120, timeout=None)
Display a simple warning
Parameters:
* **text** (*str*) – text inside the window
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
.. code-block:: python
zenipy.zenipy.question(title='', text='', width=330, height=120, timeout=None)
Display a question, possible answer are yes/no.
Parameters:
* **text** (*str*) – text inside the window
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
Returns:
The answer as a boolean
Return type:
bool
.. code-block:: python
zenipy.zenipy.entry(text='', placeholder='', title='', width=330, height=120, timeout=None)
Display a text input
Parameters:
* **text** (*str*) – text inside the window
* **placeholder** (*str*) – placeholder for the input
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
Returns:
The content of the text input
Return type:
str
.. code-block:: python
zenipy.zenipy.password(text='', placeholder='', title='', width=330, height=120, timeout=None)
Display a text input with hidden characters
Parameters:
* **text** (*str*) – text inside the window
* **placeholder** (*str*) – placeholder for the input
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
Returns:
The content of the text input
Return type:
str
.. code-block:: python
zenipy.zenipy.zlist(columns, items, print_columns=None, text='', title='', width=330, height=120, timeout=None)
Display a list of values
Parameters:
* **columns** (*list of strings*) – a list of columns name
* **items** (*list of strings*) – a list of values
* **print_columns** (*int** (**None if all the columns**)*) –
index of a column (return just the values from this column)
* **text** (*str*) – text inside the window
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
Returns:
A row of values from the table
Return type:
list
.. code-block:: python
zenipy.zenipy.file_selection(multiple=False, directory=False, save=False, confirm_overwrite=False, filename=None, title='', width=330, height=120, timeout=None)
Open a file selection window
Parameters:
* **multiple** (*bool*) – allow multiple file selection
* **directory** (*bool*) – only directory selection
* **save** (*bool*) – save mode
* **confirm_overwrite** (*bool*) – confirm when a file is
overwritten
* **filename** (*str*) – placeholder for the filename
* **text** (*str*) – text inside the window
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
Returns:
path of files selected.
Return type:
string or list if multiple enabled
.. code-block:: python
zenipy.zenipy.calendar(text='', day=None, month=None, title='', width=330, height=120, timeout=None)
Display a calendar
Parameters:
* **text** (*str*) – text inside the window
* **day** (*int*) – default day
* **month** (*int*) – default month
* **text** – text inside the window
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
Returns:
(year, month, day)
Return type:
tuple
.. code-block:: python
zenipy.zenipy.color_selection(show_palette=False, opacity_control=False, title='', width=330, height=120, timeout=None)
Display a color selection dialog
Parameters:
* **show_palette** (*bool*) – hide/show the palette with
preselected colors
* **opacity_control** (*bool*) – allow to control opacity
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
Returns:
the color selected by the user
Return type:
str
.. code-block:: python
zenipy.zenipy.scale(text='', value=0, min=0, max=100, step=1, draw_value=True, title='', width=330, height=120, timeout=None)
Select a number with a range widget
Parameters:
* **text** (*str*) – text inside window
* **value** (*int*) – current value
* **min** (*int*) – minimum value
* **max** (*int*) – maximum value
* **step** (*int*) – incrementation value
* **draw_value** (*bool*) – hide/show cursor value
* **title** (*str*) – title of the window
* **width** (*int*) – window width
* **height** (*int*) – window height
* **timeout** (*int*) – close the window after n seconds
Returns:
The value selected by the user
Return type:
float
| zenipy | /zenipy-0.1.5.tar.gz/zenipy-0.1.5/README.rst | README.rst |
.. :changelog:
History
-------
2.2.6 (2014-02-28)
++++++++++++++++++
* Use atexit to make sure gevent greenlets die.
2.2.5 (2014-02-26)
++++++++++++++++++
* Fix pubsub parser to check for proper message type.
2.2.4 (2012-06-28)
++++++++++++++++++
* added services command
2.2.3 (2012-04-17)
++++++++++++++++++
* removed extra ' in library
2.2.2 (2012-04-17)
++++++++++++++++++
* Switched to gevent instead of Thread to make killing services easier.
2.2.1 (2012-04-17)
++++++++++++++++++
* Add symlink for LICENSE so it includes properly when making the package.
2.2.0 (2012-04-17)
++++++++++++++++++
* Add MANIFEST.in
* Version bump to match major/minor step with ZenIRCBot.
2.1.3 (2012-04-11)
++++++++++++++++++
* Creating history
| zenircbot_api | /zenircbot_api-2.2.6.tar.gz/zenircbot_api-2.2.6/HISTORY.rst | HISTORY.rst |
# These are at the top to ensure gevent can monkey patch before
# threading gets imported.
from gevent import monkey
monkey.patch_all()
import atexit
import json
import gevent
from redis import StrictRedis
def load_config(name):
""" Loads a JSON file and returns an object.
:param string name: The JSON file to load.
:returns: An native object with the contents of the JSON file.
This is a helper so you don't have to do the file IO and JSON
parsing yourself.
"""
with open(name) as f:
return json.loads(f.read())
__version__ = '2.2.6'
class ZenIRCBot(object):
"""Instantiates a new ZenIRCBot API object.
:param string host: Redis hostname (default: 'localhost')
:param integer port: Redis port (default: 6379)
:param integer db: Redis DB number (default: 0)
:returns: ZenIRCBot instance
Takes Redis server parameters to use for instantiating Redis
clients.
"""
def __init__(self, host='localhost', port=6379, db=0):
self.host = host
self.port = port
self.db = db
self.redis = StrictRedis(host=self.host,
port=self.port,
db=self.db)
def send_privmsg(self, to, message):
"""Sends a message to the specified channel(s)
:param to: A list or a string, if it is a list it will send to
all the people or channels listed.
:param string message: The message to send.
This is a helper so you don't have to handle the JSON or the
envelope yourself.
"""
if isinstance(to, basestring):
to = (to,)
for channel in to:
self.get_redis_client().publish('out',
json.dumps({
'version': 1,
'type': 'privmsg',
'data': {
'to': channel,
'message': message,
}}))
def send_action(self, to, message):
"""Sends an "ACTION" message to the specified channel(s)
:param to: A list or a string, if it is a list it will send to
all the people or channels listed.
:param string message: The message to send.
This is a helper so you don't have to handle the JSON or the
envelope yourself.
"""
if isinstance(to, basestring):
to = (to,)
for channel in to:
self.get_redis_client().publish('out',
json.dumps({
'version': 1,
'type': 'privmsg_action',
'data': {
'to': channel,
'message': message,
}}))
def send_admin_message(self, message):
"""
:param string message: The message to send.
This is a helper function that sends the message to all of the
channels defined in ``admin_spew_channels``.
"""
admin_channels = self.redis.get('zenircbot:admin_spew_channels')
if admin_channels:
self.send_privmsg(admin_channels, message)
def non_blocking_redis_subscribe(self, func, args=[], kwargs={}):
pubsub = self.get_redis_client().pubsub()
pubsub.subscribe('in')
for msg in pubsub.listen():
if msg['type'] == 'message':
message = json.loads(msg['data'])
func(message=message, *args, **kwargs)
def register_commands(self, service, commands):
"""
:param string script: The script with extension that you are
registering.
:param list commands: A list of objects with name and description
attributes used to reply to
a commands query.
This will notify all ``admin_spew_channels`` of the script
coming online when the script registers itself. It will also
setup a subscription to the 'out' channel that listens for
'commands' to be sent to the bot and responds with the list of
script, command name, and command description for all
registered scripts.
"""
self.send_admin_message(service + ' online!')
if commands:
def registration_reply(message, service, commands):
if message['version'] == 1:
if message['type'] == 'directed_privmsg':
if message['data']['message'] == 'commands':
for command in commands:
self.send_privmsg(message['data']['sender'],
'%s: %s - %s' % (
service,
command['name'],
command['description']
))
elif message['data']['message'] == 'services':
self.send_privmsg(message['data']['sender'],
service)
greenlet = gevent.spawn(self.non_blocking_redis_subscribe,
func=registration_reply,
kwargs={
'service': service,
'commands': commands
})
# Ensures that the greenlet is cleaned up.
atexit.register(lambda gl: gl.kill(), greenlet)
def get_redis_client(self):
""" Get redis client using values from instantiation time."""
return StrictRedis(host=self.host,
port=self.port,
db=self.db) | zenircbot_api | /zenircbot_api-2.2.6.tar.gz/zenircbot_api-2.2.6/zenircbot_api.py | zenircbot_api.py |
import argparse
from .filing import Filing
from .settings import KNOWN_SCHEDULES, IRS_READER_ROOT
from .xmlrunner import XMLRunner
from .text_format_utils import *
def get_parser():
parser = argparse.ArgumentParser("irsx")
parser.add_argument(
'object_ids',
metavar='object_ids',
type=int,
nargs='+',
help='object ids'
)
parser.add_argument(
'--verbose',
dest='verbose',
action='store_const',
const=True, default=False,
help='Verbose output'
)
parser.add_argument(
"--schedule",
choices=KNOWN_SCHEDULES,
default=None,
help='Get only that schedule'
)
parser.add_argument(
"--xpath",
dest='documentation',
action='store_const',
const=True, default=False,
help='show xpath in text format'
)
parser.add_argument(
"--format",
choices=['json', 'csv', 'txt'],
default='json',
help='Output format'
)
parser.add_argument(
"--file",
default=None,
help='Write result to file'
)
parser.add_argument(
'--list_schedules',
dest='list_schedules',
action='store_const',
const=True,
default=False,
help='Only list schedules'
)
return parser
def run_main(args_read):
csv_format = args_read.format == 'csv' or args_read.format == 'txt'
xml_runner = XMLRunner(
documentation=args_read.documentation,
csv_format=csv_format
)
# Use the standardizer that was init'ed by XMLRunner
standardizer = xml_runner.get_standardizer()
for object_id in args_read.object_ids:
if args_read.verbose:
print("Processing filing %s" % object_id)
if args_read.file:
print("Printing result to file %s" % args_read.file)
if args_read.list_schedules:
this_filing = Filing(object_id)
this_filing.process()
print(this_filing.list_schedules())
return True # we're done, ignore any other commands
else:
if args_read.schedule:
parsed_filing = xml_runner.run_sked(
object_id,
args_read.schedule,
verbose=args_read.verbose
)
else:
parsed_filing = xml_runner.run_filing(
object_id,
verbose=args_read.verbose
)
if args_read.format == 'json':
to_json(parsed_filing.get_result(), outfilepath=args_read.file)
elif args_read.format == 'csv':
to_csv(
parsed_filing,
object_id=object_id,
standardizer=standardizer,
documentation=args_read.documentation,
outfilepath=args_read.file
)
elif args_read.format == 'txt':
to_txt(
parsed_filing,
standardizer=standardizer,
documentation=args_read.documentation,
outfilepath=args_read.file
)
def main(args=None):
parser = get_parser()
args_read = parser.parse_args()
run_main(args_read)
print("\n")
if __name__ == "__main__":
main() | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/irsx_cli.py | irsx_cli.py |
# csvcut -c 9 index_2017.csv | head -n 1000
object_ids_2017 = ['201612439349300006', '201612439349300026', '201612439349300341', '201612439349300516', '201612439349300546', '201612439349300601', '201612439349300621', '201612439349300746', '201612439349300861', '201612449349100601', '201612449349100706', '201612449349200001', '201612449349200101', '201612449349200111', '201612449349200121', '201612449349200756', '201612449349200761', '201612449349200806', '201612449349200906', '201612449349300216', '201612449349300336', '201612449349300406', '201612449349300606', '201612449349300636', '201612449349300756', '201612449349300786', '201612449349301061', '201612449349301071', '201612449349301076', '201612459349100321', '201612459349200111', '201612459349200206', '201612459349200321', '201612459349200431', '201612459349200506', '201612459349200711', '201612459349300241', '201612459349300301', '201632519349300958', '201632469349300108', '201632519349300313', '201642509349300319', '201642509349300704', '201642509349300209', '201642469349300244', '201632529349300833', '201632519349301153', '201642519349300434', '201602379349100415', '201602379349100605', '201602379349300230', '201602379349300530', '201602389349100605', '201602389349300545', '201602389349300845', '201602399349200620', '201602399349200715', '201602399349300310', '201602399349300330', '201602399349300410', '201602399349300605', '201602399349300610', '201602399349300625', '201602399349300700', '201602399349300725', '201602399349300800', '201602399349300900', '201602399349300915', '201602409349200000', '201602409349200200', '201602409349200700', '201612459349300346', '201612459349300426', '201612459349300431', '201612459349300536', '201612459349300626', '201612459349300816', '201612459349300971', '201612459349300976', '201612459349301101', '201612459349301111', '201612469349200411', '201612469349300206', '201612469349300231', '201612469349300426', '201612469349300501', '201612469349300601', '201612509349300101', '201612509349300166', '201612509349300211', '201612509349300356', '201612509349300476', '201612509349300501', '201612509349300621', '201612519349200021', '201612519349200611', '201612519349200736', '201612519349300136', '201612519349300141', '201612519349300146', '201612519349300206', '201612519349300601', '201612519349300766', '201612519349300826', '201612519349300901', '201612519349300951', '201612529349200701', '201612529349300801', '201612529349300836', '201612529349301251', '201622449349300427', '201602429349200000', '201602429349200100', '201602429349200310', '201602429349200615', '201602429349200640', '201602439349300045', '201602439349300415', '201602439349300535', '201602449349300910', '201602449349301055', '201602449349301090', '201602449349301405', '201602459349300345', '201602459349300405', '201602459349300715', '201602459349301055', '201602509349300350', '201602529349200405', '201602529349301100', '201602529349301155', '201602589349100020', '201602599349100130', '201612539349100751', '201602589349100610', '201602589349100210', '201622569349100422', '201622579349100302', '201602599349100000', '201602589349100310', '201602589349100515', '201602599349100405', '201602589349100120', '201622569349100452', '201622579349100117', '201602589349100300', '201602589349100205', '201602599349100630', '201602599349100310', '201622569349100312', '201602579349100515', '201602589349100105', '201602579349100705', '201602569349100600', '201622579349100017', '201602149349301245', '201602159349300625', '201642089349300449', '201642049349300519', '201632099349301278', '201632049349300243', '201612099349301021', '201641979349301154', '201641599349300219', '201631619349300243', '201612109349301306', '201642029349300864', '201602149349300040', '201602049349300800', '201632089349300823', '201622109349300627', '201602079349301200', '201632099349301353', '201632079349301368', '201632039349300953', '201602159349300225', '201602599349100435', '201612539349100406', '201602599349100030', '201602599349100640', '201602599349100430', '201602599349100625', '201602599349100205', '201602579349100200', '201602579349100115', '201602579349100105', '201602569349100085', '201622539349100702', '201622569349100122', '201612549349100401', '201612539349100001', '201612539349100311', '201622599349100022', '201602589349100625', '201602579349100715', '201602579349100100', '201602579349100005', '201602539349200810', '201602549349200005', '201602539349200000', '201602539349200510', '201602539349200800', '201602539349200230', '201602539349200805', '201602549349200600', '201602539349200310', '201622579349200247', '201602539349200010', '201602539349200135', '201602539349200605', '201602539349200005', '201602549349200105', '201602539349200600', '201602549349200100', '201602539349200525', '201602539349200300', '201602539349200520', '201602539349200235', '201602539349200710', '201602539349200115', '201602539349200515', '201602599349100215', '201612569349100466', '201602589349100400', '201612599349100301', '201612599349100116', '201602589349100810', '201612569349100501', '201602589349100005', '201622539349100802', '201612569349100401', '201602599349100515', '201602599349100210', '201602589349100805', '201602589349100000', '201612589349100511', '201622549349100002', '201622539349100312', '201612599349100136', '201612579349100706', '201612589349100421', '201602579349100000', '201602579349100210', '201612589349100411', '201612589349100406', '201612589349100326', '201602549349200700', '201602539349200110', '201602539349200225', '201612599349100106', '201612539349100616', '201602599349100530', '201622579349100702', '201622539349100502', '201622579349100412', '201612549349100501', '201612539349100401', '201602589349100750', '201602599349100135', '201602589349100510', '201602599349100420', '201602589349100110', '201602599349100535', '201602589349100015', '201622539349100757', '201622549349100302', '201622599349100002', '201622579349100802', '201622569349100617', '201622539349100007', '201622569349100512', '201612599349100211', '201612569349100616', '201612569349200726', '201612579349200761', '201612599349200636', '201612569349200621', '201612599349200606', '201612599349200131', '201612589349200501', '201622589349200342', '201622589349200132', '201612569349200136', '201612599349200121', '201612569349200011', '201612569349200751', '201612569349200001', '201612579349200641', '201612599349200711', '201612599349201111', '201612569349200616', '201612599349200611', '201612599349200541', '201612589349200441', '201642239349302074', '201632249349301788', '201632199349300108', '201602289349302985', '201602259349301770', '201642239349302474', '201602319349300950', '201642249349303004', '201632249349303053', '201632249349301818', '201642229349301129', '201602259349303350', '201602289349301880', '201632229349302013', '201622249349302207', '201632229349300443', '201642219349301344', '201642229349300884', '201642229349301644', '201602289349303935', '201632239349302363', '201642249349301329', '201602599349100635', '201612539349100301', '201602539349100760', '201612589349100626', '201612569349100606', '201602599349100525', '201602599349100400', '201612569349100506', '201622539349100767', '201612569349100461', '201602549349100400', '201602579349100510', '201602579349100800', '201612539349100306', '201602589349100710', '201602599349100020', '201602599349100010', '201602589349100410', '201612599349100201', '201612599349100006', '201612589349100601', '201622589349100717', '201612579349100511', '201602189349300440', '201642169349301589', '201632289349203713', '201632289349203468', '201632289349203398', '201632289349204603', '201632289349203743', '201632289349203373', '201632289349203233', '201632289349203158', '201632289349202553', '201632289349204143', '201632289349204153', '201632289349204003', '201632289349204313', '201632289349203663', '201632289349203628', '201632289349203563', '201632289349202753', '201632289349202633', '201632319349200013', '201632289349204248', '201632289349203993', '201632179349301418', '201612229349301241', '201602239349301585', '201602189349300985', '201602189349300760', '201642169349300104', '201612179349300641', '201622159349300027', '201642159349300334', '201602239349302575', '201602189349300865', '201632169349300528', '201622169349301667', '201622159349300787', '201602229349301885', '201612149349301421', '201642179349301574', '201602169349300300', '201602179349301510', '201622119349301027', '201642149349301339', '201622159349301767', '201612189349300641', '201612249349300436', '201612229349301486', '201612219349301091', '201602249349300445', '201602229349301920', '201602219349300525', '201602239349302375', '201602249349302260', '201622229349301242', '201612229349301306', '201602199349300500', '201622219349301032', '201622189349300897', '201602219349302255', '201602189349300950', '201602249349301185', '201612229349300146', '201612249349301301', '201632289349203813', '201632289349203808', '201632289349203568', '201632289349202108', '201632289349204358', '201632289349203223', '201632289349202923', '201642259349201179', '201642259349200709', '201642259349200424', '201632319349200113', '201632319349200033', '201632299349201003', '201632299349200933', '201632289349204413', '201632289349202683', '201632289349202558', '201632289349201388', '201632289349201243', '201632289349200923', '201632269349200133', '201642259349200549', '201632309349200418', '201632289349204273', '201632289349204258', '201632289349203703', '201632289349202983', '201632289349201408', '201632289349201353', '201632289349200228', '201642289349202399', '201642289349201969', '201642289349201744', '201642319349200329', '201642289349202324', '201642289349201379', '201642289349203804', '201642289349203564', '201642289349202134', '201642289349201074', '201642319349200319', '201642289349203174', '201642289349202924', '201642289349202874', '201642289349201979', '201642289349200829', '201642319349200719', '201612229349301736', '201612189349301406', '201622229349300527', '201602239349301695', '201612569349100081', '201622569349100457', '201612579349100001', '201622539349100412', '201612579349100206', '201612599349100311', '201612599349100506', '201612599349100401', '201612589349100206', '201612589349100016', '201622579349100312', '201622579349100402', '201602579349100610', '201612569349100076', '201612569349100006', '201612569349100001', '201622569349100072', '201622559349100002', '201612589349100211', '201612589349100126', '201602599349200910', '201602569349200640', '201602599349200745', '201602539349200405', '201602569349200635', '201602579349200715', '201602569349200860', '201602559349200050', '201602569349200135', '201602569349200405', '201602569349200000', '201602569349200530', '201602569349200240', '201602569349200865', '201602589349200120', '201602589349200520', '201602589349200300', '201602589349201005', '201602589349200725', '201602589349200005', '201602559349200000', '201602569349200125', '201602549349200500', '201642289349203299', '201642289349202499', '201642289349201729', '201632299349200913', '201632279349200808', '201632319349200038', '201632289349202428', '201632289349201613', '201632269349200003', '201632309349200718', '201632299349200903', '201632289349202593', '201642259349201084', '201642259349200719', '201632319349200128', '201632309349200438', '201632289349200938', '201632289349200853', '201632269349200613', '201642259349200334', '201632289349202218', '201632289349201508', '201632289349200208', '201632309349200528', '201632289349202408', '201632289349201643', '201632289349200413', '201632289349204608', '201632289349202943', '201632289349202363', '201632289349202008', '201632289349201988', '201632289349201773', '201632289349201578', '201632289349200003', '201632289349204233', '201632289349203433', '201632289349200433', '201632289349202213', '201632289349200223', '201632289349204103', '201632289349203348', '201632289349204238', '201632289349203138', '201632289349201733', '201632289349203003', '201632289349202803', '201632289349204378', '201602599349200945', '201602599349200920', '201602599349200325', '201602309349301000', '201602299349300525', '201632259349301783', '201602289349305800', '201612259349302576', '201602289349306480', '201602289349303740', '201602079349300950', '201612089349301256', '201612109349301301', '201642039349300014', '201632079349301123', '201642019349300124', '201641619349300539', '201642089349301274', '201632099349300403', '201602289349305745', '201612259349303741', '201612259349302571', '201612259349301956', '201602259349301840', '201642229349301139', '201602289349305390', '201632259349301883', '201622569349100052', '201612599349100141', '201612549349100001', '201612589349100021', '201612579349100011', '201612599349100411', '201622569349100132', '201622589349100422', '201622589349100507', '201602599349100510', '201602599349100140', '201602599349100105', '201622569349100302', '201622569349100002', '201622569349100082', '201622569349100077', '201622569349100612', '201612599349100216', '201612599349100121', '201612579349100016', '201632289349200143', '201632269349200603', '201631609349200708', '201642179349200249', '201642179349200439', '201602289349202390', '201602259349201435', '201612049349200001', '201612149349200121', '201612169349200301', '201612219349200731', '201612249349202001', '201602239349200910', '201632289349201078', '201632289349201113', '201632279349200038', '201632289349202003', '201632289349201973', '201632289349201143', '201632289349200028', '201632279349200223', '201632279349200713', '201632269349200233', '201632239349300138', '201622249349302802', '201622249349301907', '201642229349301629', '201632249349301433', '201632229349301808', '201642229349301344', '201602289349301630', '201642249349301799', '201642239349301989', '201642249349302259', '201642249349300714', '201602259349302660', '201632219349300933', '201632229349301128', '201622249349303002', '201632229349301373', '201642219349302219', '201632249349302148', '201622229349301197', '201622249349301172', '201612569349100086', '201632289349102063', '201632289349100543', '201632289349100508', '201632289349101548', '201632289349100748', '201632319349100103', '201632289349100003', '201622239349301852', '201632289349303273', '201632289349305258', '201632289349306863', '201632289349303708', '201632289349306598', '201632289349306363', '201632289349302613', '201632289349301978', '201632319349300748', '201632289349305448', '201632299349300208', '201632289349304968', '201632289349306638', '201632289349307128', '201632289349305023', '201632289349304128', '201632289349305693', '201632289349303163', '201632289349303643', '201632289349301418', '201622579349100512', '201612539349100811', '201622589349100512', '201622569349100467', '201622569349100307', '201622589349100407', '201622589349100002', '201622579349100507', '201622569349100117', '201622589349100427', '201602579349100600', '201602579349100400', '201622559349100152', '201622539349100407', '201612539349100851', '201622569349100112', '201622569349100102', '201602599349100425', '201622579349100012', '201622589349100807', '201622569349100552', '201622589349100417', '201622589349100327', '201622589349100207', '201622579349100307', '201612329349100001', '201612329349100501', '201612329349100506', '201612329349100706', '201612329349200006', '201612329349200226', '201612329349200306', '201612329349200526', '201612329349200801', '201612329349300301', '201632289349101028', '201632289349101583', '201632289349102008', '201632289349100418', '201642259349100004', '201632299349100303', '201632309349100303', '201642259349100544', '201632299349100003', '201642259349101114', '201632289349100938', '201632289349100623', '201632289349101358', '201632289349101918', '201632289349101198', '201632289349100913', '201632289349101578', '201632289349100733', '201632269349100403', '201642289349101929', '201642289349100929', '201642289349101909', '201642289349101564', '201602329349100705', '201642289349101289', '201642269349100304', '201642289349101274', '201602329349100805', '201602339349100100', '201642299349100714', '201642299349100709', '201642289349100944', '201642289349101879', '201642289349101519', '201642289349100734', '201642289349100414', '201642289349101689', '201642289349100224', '201642289349100349', '201642289349100214', '201642229349301184', '201642249349301549', '201632249349300013', '201642229349300839', '201642229349300909', '201632249349302078', '201632249349300513', '201632249349300503', '201632249349301498', '201642249349302379', '201642249349302039', '201642249349301544', '201642249349300729', '201642249349301614', '201642229349301239', '201642229349301324', '201642229349301054', '201642229349301179', '201642219349301764', '201642219349302074', '201642219349301564', '201602579349300515', '201612579349300236', '201602569349300880', '201602569349300420', '201602579349300825', '201602569349300315', '201602579349300310', '201602579349300315', '201602579349301210', '201602569349300900', '201602569349300785', '201602559349300155', '201602569349300110', '201602559349300350', '201612539349300006', '201612589349301136', '201602579349301265', '201602579349300520', '201602579349300855', '201602579349300400', '201602579349300895', '201602579349300720', '201602579349300885', '201602579349300820', '201602569349301265', '201602569349301000', '201612329349300631', '201612329349300646', '201612329349301101', '201612329349301151', '201612329349301206', '201612339349100201', '201612339349200301', '201612339349200501', '201612349349200251', '201612359349100351', '201612359349100361', '201612359349100411', '201612359349200041', '201612359349200256', '201612359349200501', '201612359349300216', '201612359349300461', '201612359349300501', '201612359349300721', '201612359349300901', '201612359349301006', '201612369349100756', '201612369349200111', '201612369349200406', '201612369349200416', '201612369349200531', '201612369349300001', '201612369349300011', '201612369349300201', '201612369349300301', '201612369349300336', '201612369349300346', '201612369349300506', '201612369349300611', '201612369349300726', '201612369349300906', '201612369349301156', '201612379349100001', '201602539349100765', '201602539349100210', '201602539349100805', '201602569349100465', '201602539349100610', '201602539349100100', '201602549349100000', '201602539349100005', '201602539349100200', '201602539349100105', '201602539349100800', '201602539349100615', '201622569349100067', '201622569349100057', '201602539349100205', '201602539349100605', '201602539349100600', '201602539349100110', '201602539349100500', '201602539349100810', '201602539349100415', '201602539349100410', '201602549349100500', '201602539349100405', '201602539349100000', '201602569349100300', '201602569349100415', '201642259349100034', '201632289349101218', '201632289349100243', '201632289349100218', '201632269349100003', '201632289349100838', '201632289349100038', '201632289349100523', '201632289349101158', '201632289349101043', '201632289349100048', '201632289349100408', '201632289349101668', '201632289349101213', '201632289349100333', '201632269349100203', '201632289349102003', '201632289349101658', '201632289349100933', '201632289349101513', '201632289349101018', '201632279349100403', '201642259349101004', '201642259349100509', '201632319349100708', '201612379349100116', '201612379349100411', '201612379349100606', '201612379349200001', '201612379349200231', '201612379349200616', '201612379349200706', '201612379349200711', '201612379349200721', '201612379349300016', '201612379349300036', '201612379349300126', '201612379349300226', '201612379349300301', '201612379349300331', '201612379349300401', '201612379349300606', '201612379349300631', '201612379349300716', '201612379349300766', '201612389349100501', '201612389349100506', '201612389349200006', '201612389349200126', '201612389349200206', '201612389349200401', '201612389349200606', '201612389349200726', '201612389349300001', '201612389349300016', '201612389349300031', '201612389349300121', '201612389349300141', '201612389349300206', '201612389349300411', '201612389349300501', '201612389349300541', '201612389349300631', '201642459349300959', '201642459349300939', '201642459349300969', '201642439349300724', '201642449349300534', '201632509349300238', '201632509349300003', '201632509349300023', '201642449349300544', '201642459349300949', '201642459349301054', '201642449349300104', '201642519349300709', '201642509349300384', '201642519349300319', '201642509349300219', '201642469349300624', '201642469349300204', '201632529349301013', '201612389349300816', '201612399349200316', '201612399349200506', '201612399349300011', '201612399349300041', '201612399349300211', '201612399349300426', '201612399349300516', '201612399349300626', '201612399349300726', '201612399349300736', '201612409349200401', '201612419349200301', '201612419349300051', '201612429349200131', '201612429349200146', '201612429349200201', '201612429349200216', '201612429349200511', '201612429349200716', '201612429349200751', '201612429349300111', '201612429349300576', '201612429349300586', '201612429349300591', '201612429349300596', '201612429349300691', '201612429349300721', '201612429349300811', '201612429349300966', '201612429349301051', '201612429349301161', '201612439349100201', '201612439349100306', '201612439349100606', '201612439349200401', '201612439349200516', '201612439349200621', '201612439349200631', '201602519349100005', '201602509349100165', '201602509349100060', '201632529349100603', '201642519349100314', '201642459349100104', '201632519349100508', '201602509349100000', '201602509349100210', '201602469349100505', '201622519349100512', '201612519349100311', '201642459349100659', '201602509349100310', '201642509349100359', '201622519349100507', '201622519349100702', '201612509349100356', '201602519349100205', '201602519349100315', '201642459349100654', '201602469349100205', '201642449349100409', '201642469349100709', '201602469349100310', '201602469349100010']
#csvcut -c 9 index_2016.csv | head -n 1000 > returns_2016.txt
object_ids_2016 = ['201543159349100344', '201543109349200219', '201513089349200226', '201513089349200236', '201523229349300327', '201543089349301829', '201533179349306298', '201533179349201108', '201533179349203783', '201533209349304768', '201533179349307343', '201533209349204083', '201533209349204123', '201533209349204128', '201533209349204148', '201533209349204153', '201533209349204178', '201533209349204198', '201533209349204208', '201533209349204223', '201533209349204228', '201533189349300608', '201523069349301367', '201533069349300963', '201523099349300542', '201533099349301033', '201533099349301043', '201523169349304367', '201533099349301803', '201523069349300142', '201533109349300348', '201503069349100380', '201513089349100601', '201523039349200407', '201543039349301204', '201523039349200632', '201523039349200637', '201523089349301462', '201533069349300788', '201533079349300238', '201543149349201279', '201543159349100504', '201543169349201334', '201543169349201349', '201543109349200229', '201533169349100748', '201533169349100808', '201513069349200601', '201523209349314227', '201523209349314257', '201523209349311332', '201533179349302173', '201533179349307048', '201523219349200632', '201533179349201623', '201533179349201643', '201543109349100104', '201533209349302633', '201533179349200538', '201533179349200618', '201533179349203683', '201533179349203728', '201533209349306188', '201533209349204843', '201533099349301103', '201533099349301113', '201523039349300127', '201523079349301652', '201533039349300813', '201533139349300148', '201533139349300208', '201533069349301413', '201533079349300003', '201523039349200827', '201523079349200027', '201523079349200237', '201523069349300957', '201523079349301387', '201533079349200823', '201523209349310937', '201523209349310947', '201543089349201054', '201533179349306528', '201533179349303278', '201543079349200609', '201543079349200529', '201533179349306278', '201523099349201102', '201523239349300002', '201533209349205278', '201533209349205353', '201533209349201488', '201533209349203893', '201533209349203908', '201533209349203913', '201533209349203923', '201533209349201753', '201533209349201808', '201533209349302303', '201533179349307818', '201533179349307828', '201523209349311892', '201533179349309453', '201533209349301728', '201533209349301738', '201533189349100703', '201533209349102838', '201533209349102858', '201533209349101368', '201533209349101373', '201533179349307538', '201533209349203328', '201533209349203503', '201533209349306423', '201533209349306438', '201533209349203508', '201533209349203518', '201533179349307838', '201533189349300223', '201533179349309083', '201533189349300233', '201543099349200889', '201503099349201105', '201513079349201106', '201513089349200936', '201513089349100726', '201523069349301172', '201533069349300408', '201543169349201379', '201523209349314732', '201523209349313972', '201533179349202718', '201533179349202668', '201533179349202733', '201533099349200108', '201523209349311802', '201533209349205593', '201533209349101563', '201533179349308913', '201533209349102818', '201533179349309088', '201533189349300443', '201533209349101958', '201533209349206523', '201533179349309208', '201533209349305433', '201533209349305438', '201533209349206623', '201533179349309223', '201533179349309268', '201533209349102023', '201533209349102028', '201533179349309348', '201533199349100413', '201533209349102278', '201543099349301839', '201503069349200845', '201513069349200231', '201503069349200970', '201503069349200980', '201513069349200316', '201513099349201151', '201513079349100106', '201513099349100201', '201523069349301047', '201523069349301057', '201523069349301112', '201523069349301117', '201523069349301127', '201523069349301132', '201523069349301157', '201503039349100615', '201523089349300312', '201523089349300317', '201523089349300322', '201533069349300508', '201533069349300708', '201533069349300718', '201533069349300778', '201533069349300803', '201543159349200314', '201543159349200319', '201543089349301374', '201523209349316527', '201533169349101073', '201543069349100569', '201533179349305958', '201523229349300237', '201533179349303308', '201533179349305888', '201533179349305893', '201533179349305903', '201533179349305923', '201533179349306913', '201543079349301154', '201523219349200727', '201523219349200747', '201543109349200224', '201543109349200234', '201543109349200304', '201533169349100833', '201533069349300443', '201533069349300868', '201533179349301803', '201523209349311197', '201523069349100357', '201533179349305948', '201523229349300307', '201523209349314597', '201523209349314607', '201523209349315117', '201523209349312647', '201523209349312657', '201523209349310667', '201523219349301067', '201523209349313657', '201533179349201693', '201543109349100329', '201523219349100207', '201533179349100403', '201533179349100418', '201533179349100508', '201533179349100513', '201523219349200132', '201533179349200718', '201533209349302068', '201533179349100823', '201533179349100938', '201533179349101128', '201533179349101688', '201533179349101693', '201533179349307523', '201533209349306058', '201543069349301474', '201543149349201919', '201543149349202034', '201543089349200014', '201543099349201214', '201543159349100614', '201543159349100629', '201543149349201549', '201513089349201211', '201503089349201250', '201513099349100036', '201513099349100041', '201523069349301752', '201543139349101009', '201523229349300507', '201543109349100009', '201533209349302098', '201533209349301008', '201533209349301023', '201533179349203653', '201533179349203853', '201533179349203858', '201533179349308653', '201533179349203888', '201533189349200133', '201533179349102213', '201533179349102453', '201523069349301392', '201533099349302063', '201533099349302068', '201533099349302103', '201523079349300547', '201523079349300607', '201523079349300707', '201523039349301092', '201533099349301028', '201523039349301227', '201533089349300648', '201513089349100511', '201503099349201100', '201543089349100729', '201523229349300122', '201523229349300142', '201533179349303898', '201533179349303908', '201533179349303913', '201523209349310247', '201533179349300928', '201523209349314977', '201533179349302118', '201523209349313352', '201523209349313387', '201523209349313392', '201533179349303713', '201533179349303723', '201533179349302818', '201523229349200117', '201533179349201233', '201523219349200142', '201523219349200147', '201523219349200207', '201523039349300942', '201523039349300977', '201523039349300982', '201533039349300948', '201543039349300044', '201543039349301154', '201533079349300213', '201533089349300538', '201533079349300328', '201533089349200978', '201523079349201057', '201533039349200308', '201533099349301313', '201523079349200447', '201523079349200627', '201533079349301248', '201533079349301328', '201533049349300403', '201543069349201204', '201543169349201579', '201543109349200424', '201523209349311052', '201523239349300612', '201523209349312367', '201543109349100014', '201543109349100124', '201533099349301538', '201523089349301817', '201523089349301927', '201533089349300633', '201523069349200307', '201533069349200328', '201533069349200338', '201533069349200428', '201533069349200433', '201523069349201167', '201523069349201202', '201523069349201257', '201523069349201302', '201533209349302198', '201523239349300107', '201533209349203173', '201523239349300202', '201533209349206183', '201533209349206208', '201533179349300543', '201533209349204003', '201533199349200413', '201533199349200648', '201533209349205633', '201533209349205643', '201533209349302503', '201523209349311932', '201533179349308283', '201533179349308293', '201533179349308308', '201533209349202273', '201533209349303908', '201533209349306913', '201533209349103568', '201543159349200914', '201543159349200919', '201543099349200334', '201543099349200624', '201523239349300307', '201533179349303018', '201533179349306963', '201533089349100613', '201533089349100623', '201533179349306108', '201533179349203403', '201533209349301973', '201533179349101908', '201533209349304733', '201533189349200148', '201533189349200213', '201533189349200223', '201533209349201793', '201523049349300007', '201523049349300107', '201533099349301308', '201503099349300215', '201503099349300225', '201533109349300523', '201533089349301458', '201523099349300347', '201543039349200339', '201523069349200962', '201523069349201002', '201543079349301354', '201533209349203958', '201533209349203963', '201533209349203978', '201533209349203983', '201533209349205873', '201533209349103663', '201533209349103678', '201533209349103238', '201533189349100513', '201503069349201110', '201503069349201115', '201503069349201150', '201503069349201165', '201503069349201215', '201503039349200340', '201513069349200956', '201513099349201116', '201523069349301482', '201543169349201394', '201543169349201369', '201543139349100604', '201523229349300047', '201523209349310187', '201523209349310192', '201523209349312807', '201523209349314007', '201533089349100528', '201523209349314152', '201523219349200627', '201523219349200712', '201523219349200732', '201523219349200802', '201523219349200907', '201533179349202178', '201533179349203378', '201533179349203398', '201533209349300603', '201533209349300608', '201533209349300618', '201533209349300623', '201523069349301327', '201523069349300707', '201523089349301977', '201533039349300038', '201513089349100326', '201523089349301827', '201533089349200923', '201523079349301177', '201533039349300823', '201533039349300828', '201543159349101159', '201543139349100014', '201543139349100024', '201543139349100029', '201513089349201101', '201513089349201106', '201513089349201116', '201513089349201156', '201513089349201161', '201513069349200331', '201503079349200795', '201503089349100030', '201543089349301464', '201533179349303958', '201533179349303998', '201523209349312622', '201533179349101313', '201533209349201068', '201523099349200307', '201523069349200337', '201523219349201152', '201533179349202388', '201533179349202413', '201533179349202453', '201523209349314792', '201543079349301174', '201543079349301194', '201543079349301214', '201543079349301309', '201533209349205268', '201533209349201613', '201533209349204028', '201533209349204038', '201533209349204053', '201533209349305963', '201533179349308908', '201533199349300033', '201533209349101623', '201533209349101633', '201533209349101643', '201543119349200204', '201543119349200304', '201543159349200519', '201543159349200529', '201543149349201779', '201543159349200609', '201543149349201799', '201543159349200614', '201543159349200704', '201543159349200734', '201543159349200749', '201543149349201439', '201543159349100804', '201543159349100809', '201543099349200234', '201543099349200339', '201503099349200960', '201513039349100006', '201523209349308842', '201523209349308857', '201533179349306558', '201543079349300119', '201533179349302383', '201533179349302388', '201533179349302413', '201533179349302423', '201533179349101993', '201533179349101998', '201533209349304753', '201533179349305163', '201533209349305143', '201533209349305178', '201533189349300433', '201523089349301007', '201523099349301812', '201533099349301168', '201533099349301173', '201523059349300002', '201523059349300007', '201523209349310582', '201523219349300902', '201523219349300927', '201523219349300932', '201533179349303218', '201543079349200724', '201533179349303963', '201533179349201488', '201533179349201513', '201523209349315257', '201533209349301013', '201523069349300737', '201523039349300142', '201523089349301962', '201523039349300912', '201523089349300147', '201523099349301322', '201533089349200928', '201523079349301522', '201523079349301562', '201533079349300603', '201533209349304958', '201533209349202433', '201533209349103903', '201533209349307203', '201533079349200308', '201503089349100620', '201533089349301298', '201543149349303884', '201513069349100376', '201543159349302009', '201543169349301754', '201543159349302049', '201533089349300118', '201543089349200419', '201543169349301814', '201543099349200014', '201543099349200239', '201543159349200949', '201543149349201409', '201543149349201429', '201543099349201109', '201543109349200404', '201543139349100044', '201543139349100104', '201543139349100109', '201523209349313967', '201523209349313982', '201523039349301162', '201533039349301308', '201523099349301372', '201533099349300408', '201533099349300433', '201533099349300513', '201533099349300623', '201523099349301587', '201533079349300708', '201533089349301623', '201533079349301718', '201533099349300003', '201533099349300008', '201533089349301943', '201533089349301953', '201523099349200332', '201523099349200402', '201533209349304158', '201533179349202848', '201533179349202858', '201533179349202873', '201533179349202903', '201533209349202243', '201533209349202258', '201533209349202298', '201533209349202313', '201533209349202343', '201513079349200921', '201523209349310107', '201523209349313822', '201533179349304353', '201533179349200728', '201523209349315237', '201533209349200208', '201533209349201173', '201533209349201178', '201533209349302323', '201523209349311942', '201533209349306063', '201533209349307733', '201533179349302083', '201533179349302108', '201533209349309743', '201533209349310963', '201533209349310968', '201533209349310998', '201533209349310378', '201533209349308923', '201533209349310413', '201533209349311033', '201533209349308968', '201533209349310418', '201533209349311133', '201533209349314503', '201543179349306014', '201533209349309328', '201533209349314523', '201533209349314548', '201533209349314553', '201533209349310808', '201533209349314683', '201533209349314693', '201533209349310838', '201533209349314718', '201533209349314723', '201503089349200640', '201513099349200901', '201513079349200321', '201503069349200425', '201503069349200430', '201503069349200515', '201503069349200530', '201503069349200755', '201503069349200900', '201503099349200145', '201503099349200210', '201513079349200541', '201513089349200131', '201513089349200206', '201513079349100601', '201523089349300307', '201523209349308707', '201523209349311307', '201523209349314277', '201523069349100212', '201523209349310987', '201543089349201069', '201523209349311582', '201523209349311657', '201523039349100802', '201523079349100007', '201543089349301989', '201533179349201188', '201533209349300923', '201533179349203573', '201533209349306638', '201523099349301782', '201523099349301787', '201523089349300032', '201503069349300805', '201503069349300815', '201523039349301212', '201523099349300047', '201523099349300112', '201533139349300103', '201533089349300748', '201543099349301914', '201503069349201300', '201543089349301469', '201533179349301748', '201533179349301758', '201523209349313117', '201523209349315172', '201523209349315217', '201523209349314172', '201533179349302358', '201533179349302363', '201523239349100202', '201523239349100307', '201523239349100402', '201533079349300148', '201533089349300513', '201543039349300509', '201543039349300519', '201523069349300892', '201533039349200338', '201533039349200343', '201533039349200418', '201543039349300624', '201523069349300217', '201523069349300247', '201523069349300432', '201523099349300237', '201533089349300048', '201533079349200303', '201533079349200403', '201523069349200972', '201533209349205148', '201533209349206148', '201533209349201633', '201533209349201643', '201533199349200318', '201533209349201398', '201533209349101543', '201533189349100603', '201533209349306018', '201533199349100708', '201533199349100718', '201533069349300238', '201533179349306763', '201523209349312602', '201523209349312607', '201533179349301013', '201533179349303838', '201543089349301684', '201543089349301719', '201543089349301724', '201543089349301814', '201543089349301909', '201543089349301924', '201543089349301949', '201523239349200117', '201533209349307668', '201533209349307673', '201533179349203813', '201533209349204963', '201533209349204988', '201533179349202458', '201533209349102443', '201533179349309068', '201533209349300428', '201533199349301023', '201533199349301028', '201533179349309583', '201533209349307183', '201533179349306873', '201533069349200213', '201533069349200233', '201543069349201104', '201543159349100639', '201543149349201534', '201543139349100004', '201543139349100034', '201523209349308717', '201543089349200729', '201523209349316562', '201523209349313147', '201533179349302053', '201533169349101053', '201533179349306148', '201543079349300814', '201523209349312297', '201523209349312322', '201523209349312327', '201523209349312337', '201533179349201983', '201523099349301962', '201533089349301428', '201523069349300127', '201523219349201107', '201533179349202373', '201533209349302993', '201533209349103268', '201533209349103288', '201533209349306393', '201533179349309508', '201533199349301408', '201533179349303488', '201533179349305663', '201533179349305693', '201533179349305708', '201533179349306808', '201533069349200343', '201543089349200229', '201543089349200244', '201543089349200249', '201543099349201204', '201543169349201574', '201543139349100419', '201523209349314662', '201523209349314712', '201523209349313887', '201533179349303978', '201523209349312442', '201533179349201388', '201533179349202838', '201523219349200307', '201523209349312307', '201523229349100702', '201533209349304743', '201533209349204813', '201523069349300797', '201523069349300812', '201523069349300827', '201503099349300235', '201533089349301583', '201533089349300433', '201533079349301213', '201533079349301223', '201533209349204298', '201533209349100828', '201533209349206463', '201533209349206818', '201533099349200443', '201533099349200518', '201523089349200247', '201523099349200437', '201523089349200507', '201523089349200512', '201523089349200547', '201523039349200832', '201533089349200738', '201533169349100923', '201533179349303518', '201543069349100369', '201543069349100379', '201543069349100414', '201543069349100524', '201543079349200629', '201533179349307778', '201533209349302523', '201533209349307303', '201533209349307333', '201533209349307363', '201523239349300862', '201533209349303283', '201533209349303298', '201533209349303303', '201533209349303773', '201523099349200432', '201523079349300612', '201503089349100510', '201533039349300213', '201513099349300511', '201543159349100124', '201543159349100134', '201543159349100139', '201543149349100034', '201543149349100049', '201543149349303704', '201543149349303804', '201543149349101224', '201543169349301619', '201533159349304133', '201543039349100624', '201543039349100629', '201533099349100038', '201533089349300043', '201543089349200044', '201543089349200104', '201543089349200119', '201543089349200139', '201543069349200314', '201523209349310152', '201523209349313832', '201533179349201673', '201533179349201678', '201533179349201723', '201543109349100404', '201523209349312222', '201523209349312232', '201533209349303123', '201533209349303163', '201533179349307363', '201533209349302293', '201533209349204648', '201533209349205588', '201533209349307383', '201533179349202793', '201533179349202843', '201533179349202853', '201533179349202863', '201533179349308278', '201533179349308328', '201533209349203058', '201533209349102923', '201533209349206783', '201533209349303308', '201533209349303358', '201533209349303388', '201533209349303708', '201533209349303418', '201503069349200810', '201503069349200830', '201503069349200990', '201513089349200511', '201513089349200526', '201543079349300739', '201543079349300834', '201523209349313357', '201523209349312217', '201523239349200227', '201523239349200312', '201533179349200708', '201533179349300308', '201533179349300323', '201533179349300333', '201533179349300338', '201533209349302678', '201533179349203833', '201523209349311832', '201533209349103233', '201533209349101553', '201533209349306053', '201503039349300600', '201543159349100039', '201543169349301719', '201523089349100002', '201523089349100017', '201523089349100022', '201523089349100207', '201533099349100613', '201533209349310883', '201533209349310893', '201533209349310923', '201533209349310928', '201543169349201529', '201543149349202024', '201543159349201019', '201543159349200724', '201543109349200209', '201543109349200314', '201543109349200504', '201543109349200434', '201543099349200224', '201543099349200429', '201543099349301959', '201513079349201066', '201513079349201101', '201513039349200211', '201503099349100735', '201533069349300143', '201533069349300618', '201523099349301612', '201543139349100749', '201543139349100754', '201543139349100764', '201523209349308752', '201523209349308762', '201523209349310132', '201523209349308852', '201523209349311232', '201523069349100322', '201523069349100327', '201543079349100204', '201523209349313587', '201533179349305998', '201543099349100009', '201523239349100407', '201523239349100502', '201533179349102208', '201523099349301772', '201533099349301158', '201503099349300130', '201523079349301702', '201523079349301712', '201543069349201114', '201543069349201304', '201543159349200134', '201523209349312642', '201533179349306993', '201533169349100218', '201533179349201218', '201533179349203553', '201523209349312382', '201533209349301128', '201533179349305138', '201523099349300012', '201543039349301004', '201533109349300648', '201533039349300848', '201543039349301109', '201523079349301527', '201523079349200032', '201533089349200743', '201543039349300544', '201533069349301108', '201533089349300023', '201533039349200213', '201533079349300818', '201533039349300923', '201523099349200737', '201533079349300903', '201523069349200342', '201533209349202168', '201533209349202178', '201533209349302238', '201533209349201453', '201533209349201548', '201523209349312842', '201533209349201278', '201533209349201418', '201533199349300638', '201533209349306133', '201533209349103323', '201533209349103328', '201543159349200419', '201523209349313022']
# csvcut -c 9 index_2015.csv | head -n 1000 > returns_2015.txt
object_ids_2015 = ['201542399349300614', '201542399349300619', '201542399349300629', '201542399349300634', '201542399349300719', '201542399349300724', '201542399349300739', '201522369349300102', '201522369349300112', '201522369349300117', '201522369349300122', '201522369349300127', '201522369349300132', '201522369349300137', '201522369349300142', '201522369349300147', '201522369349300202', '201522369349300207', '201522369349300212', '201522369349300227', '201522369349300307', '201522369349300317', '201532299349304913', '201532299349304953', '201542379349300864', '201542379349300874', '201542379349300884', '201542379349301004', '201542379349301009', '201532299349302418', '201532299349302423', '201532299349302433', '201532299349302443', '201532299349302473', '201532299349302483', '201532299349302488', '201532299349302498', '201532299349302503', '201532299349302518', '201532299349302523', '201532299349302543', '201532299349302558', '201542399349200309', '201542399349200319', '201542399349200324', '201542399349200334', '201542399349200339', '201542399349200609', '201542399349200614', '201542399349200709', '201542399349200714', '201542399349200814', '201542399349200909', '201542399349201004', '201522379349200037', '201522379349200127', '201522379349200202', '201522379349200212', '201522379349200307', '201522379349200312', '201522379349200322', '201522379349200402', '201522379349200612', '201522379349200712', '201522379349200722', '201532369349200018', '201532369349200023', '201532399349201003', '201542399349200019', '201542399349200104', '201542399349200119', '201542399349200129', '201542399349200504', '201542399349200509', '201502649349200005', '201502649349200010', '201502649349200100', '201502649349200105', '201502649349200110', '201502649349200120', '201502649349200125', '201502649349200200', '201502649349200205', '201502649349200210', '201502649349200215', '201502649349200225', '201502649349200250', '201502649349200255', '201502649349200265', '201502649349200300', '201502649349200315', '201502649349200330', '201502649349200355', '201522679349200002', '201522679349200022', '201522679349200032', '201522679349200102', '201502669349301050', '201502669349301070', '201522659349300002', '201522659349300012', '201522659349300042', '201532649349300343', '201532649349300348', '201532649349300433', '201532649349300438', '201532649349300538', '201532649349300603', '201532649349300658', '201532649349300663', '201542649349300034', '201542649349300039', '201542649349300184', '201542649349300234', '201542649349300329', '201542649349300409', '201512659349200121', '201512659349200131', '201512659349200201', '201512659349200211', '201512659349200316', '201512659349200326', '201512659349200501', '201512659349200526', '201512659349200616', '201512659349200621', '201512659349200716', '201522649349200267', '201522649349200357', '201522649349200372', '201522649349200402', '201522649349200407', '201532649349200008', '201532649349200013', '201532649349200103', '201542619349200304', '201542619349200514', '201542619349200809', '201512669349301106', '201522669349300002', '201522669349300017', '201522669349300022', '201532659349300538', '201532659349300548', '201532659349300608', '201532659349300628', '201532659349300638', '201532659349300728', '201532659349300753', '201532659349300953', '201532659349301003', '201532659349301053', '201532659349301058', '201542659349300224', '201542659349300234', '201542659349300239', '201542659349300314', '201512649349300026', '201512649349300036', '201532299349302568', '201532299349304203', '201532299349304208', '201532299349304223', '201522369349300517', '201522369349300527', '201522369349300532', '201522369349300537', '201522369349300542', '201522369349300547', '201522369349300602', '201522369349300607', '201522369349300612', '201522369349300617', '201522369349300622', '201522369349300632', '201522369349300637', '201522369349300652', '201522369349300667', '201522369349300687', '201522369349300802', '201532339349300003', '201532339349300008', '201532339349300018', '201542389349300009', '201542389349300014', '201542269349301474', '201542269349301479', '201542269349301489', '201542269349301499', '201542269349301554', '201542269349301564', '201542269349301574', '201542269349301584', '201542269349301589', '201542269349301599', '201542269349301659', '201542269349301664', '201542269349301674', '201542269349301679', '201542269349301694', '201542269349301699', '201542279349300039', '201542399349200819', '201542399349200904', '201542399349200914', '201532299349100643', '201532299349100648', '201532299349100703', '201532299349100713', '201532299349100718', '201532299349100723', '201542269349100129', '201542269349100134', '201542269349100139', '201542269349100144', '201542269349100149', '201542269349100204', '201542269349100209', '201542269349100219', '201542269349100224', '201542269349100234', '201542269349100239', '201542269349100249', '201542269349100304', '201542269349100314', '201542269349100324', '201542269349100329', '201542269349100339', '201502649349300020', '201542669349300224', '201542669349300234', '201542669349300244', '201542669349300304', '201542669349300309', '201542669349300324', '201542669349300329', '201542669349300409', '201542669349300414', '201542669349300429', '201542669349300434', '201542669349300444', '201542669349300509', '201542669349300514', '201542669349300524', '201542669349300529', '201542669349300534', '201502379349200000', '201502379349200010', '201502379349200020', '201502379349200040', '201502379349200115', '201502379349200135', '201502379349200305', '201502379349200310', '201502379349200320', '201502379349200620', '201502389349200410', '201502389349200415', '201502389349200520', '201502389349200630', '201502389349200705', '201502389349200730', '201502389349200760', '201512379349200136', '201512379349200316', '201512379349200711', '201502389349200300', '201512379349200016', '201512379349200026', '201512379349200031', '201512379349200041', '201512379349200101', '201512379349200111', '201512379349200116', '201512379349200126', '201512379349200201', '201512379349200211', '201512379349200216', '201512379349200226', '201512379349200301', '201512379349200306', '201512379349200321', '201512379349200401', '201512379349200406', '201512379349200416', '201512379349200601', '201512379349200611', '201512649349300051', '201512649349300151', '201512649349300166', '201512649349300176', '201512649349300196', '201512649349300226', '201512649349300231', '201512649349300301', '201512649349300316', '201512649349300326', '201512649349300341', '201542679349300134', '201542679349300149', '201542679349300214', '201542679349300219', '201542679349300239', '201542679349300244', '201542679349300314', '201542679349300319', '201532319349200118', '201532319349200323', '201542269349201814', '201542269349201874', '201542269349201884', '201542269349201969', '201542269349201979', '201542299349200214', '201542299349200244', '201542299349200329', '201542299349200429', '201542299349200509', '201542299349200524', '201542309349200034', '201542309349200104', '201542309349200134', '201542309349200244', '201542309349200404', '201502349349300000', '201502349349300200', '201502349349300700', '201502359349300100', '201502359349300400', '201502369349300000', '201542279349300104', '201542279349300114', '201542279349300119', '201512399349300006', '201512399349300016', '201512399349300021', '201512399349300031', '201512399349300036', '201512399349300116', '201512399349300121', '201512399349300136', '201542339349300119', '201542339349300124', '201542339349300129', '201542339349300134', '201542339349300204', '201542339349300214', '201542339349300234', '201542339349300309', '201542339349300314', '201542339349300334', '201542339349300404', '201512399349300321', '201512399349300336', '201512399349300341', '201512399349300411', '201532369349300503', '201532369349300508', '201532369349300513', '201532369349300518', '201532369349300523', '201542339349300419', '201542339349300504', '201542339349300519', '201542339349300529', '201542339349300609', '201542339349300614', '201542339349300624', '201542339349300804', '201542339349300814', '201542339349300819', '201542349349300204', '201542359349300304', '201502619349300005', '201502619349300010', '201502619349300015', '201502619349300020', '201502619349300025', '201502619349300100', '201502619349300105', '201502619349300110', '201502619349300125', '201502619349300135', '201502619349300200', '201502619349300205', '201502619349300215', '201502619349300225', '201502619349300230', '201542649349300619', '201542649349300624', '201542649349300634', '201542649349300639', '201542649349300654', '201542649349300664', '201542649349300684', '201542649349300754', '201512619349300911', '201512619349301001', '201512619349301006', '201512619349301101', '201512629349300101', '201512679349300716', '201512679349300731', '201512679349300736', '201512679349300746', '201512679349300801', '201512679349300816', '201512679349300826', '201512679349300836', '201512679349300901', '201512679349300906', '201512679349300921', '201512679349301011', '201512679349301101', '201502649349300615', '201502649349300620', '201512679349100006', '201512679349100101', '201512679349100106', '201512679349100201', '201512679349100206', '201512679349100306', '201522669349100002', '201522669349100102', '201522669349100202', '201522669349100402', '201522669349100602', '201522669349100702', '201532659349100503', '201532659349100508', '201532659349100603', '201542659349100009', '201542659349100104', '201542659349100204', '201542659349100404', '201542659349100504', '201542659349100604', '201522649349300162', '201522649349300172', '201522649349300177', '201522649349300182', '201522649349300187', '201532619349300708', '201532619349300718', '201532619349300808', '201532619349300813', '201532619349300903', '201532619349300908', '201532619349300918', '201532619349301103', '201532639349300053', '201502369349300005', '201512369349300001', '201512369349300031', '201512369349300111', '201512369349300116', '201512369349300126', '201512369349300221', '201512369349300231', '201512369349300246', '201512369349300336', '201512369349300451', '201512369349300496', '201512369349300521', '201512369349300546', '201512369349300621', '201532319349200423', '201532319349200433', '201532319349200518', '201532319349200543', '201532319349200608', '201532319349200713', '201532319349200833', '201542269349202034', '201542269349202039', '201542269349202059', '201542269349202074', '201542269349202094', '201542299349200609', '201542299349200619', '201542299349200724', '201542299349200814', '201542299349200844', '201542309349200524', '201542309349200534', '201542309349200619', '201532269349200713', '201532269349200738', '201532269349201038', '201532269349201043', '201532269349201053', '201532269349201078', '201532269349201108', '201532339349300508', '201532339349300513', '201532339349300528', '201532339349300533', '201532339349300608', '201532339349300613', '201532339349300623', '201532339349300633', '201532339349300808', '201532339349300823', '201532349349300203', '201532349349300303', '201532399349300238', '201532399349300243', '201532399349300308', '201542389349300844', '201542389349300849', '201542389349300914', '201522339349300127', '201522339349300137', '201522339349300202', '201522339349300217', '201522339349300222', '201522339349300227', '201522339349300232', '201522339349300302', '201522339349300307', '201522339349300317', '201522339349300322', '201522339349300327', '201522339349300407', '201522339349300417', '201522339349300427', '201522339349300517', '201522339349300522', '201542369349300244', '201542369349300249', '201542369349300324', '201542369349300404', '201542369349300454', '201532379349300883', '201532379349300903', '201542369349300474', '201502649349300640', '201502649349300650', '201502649349300655', '201502649349300660', '201502649349300670', '201502649349300675', '201502649349300700', '201522679349300612', '201522679349300617', '201522679349300702', '201522679349300712', '201522679349300717', '201532679349300013', '201532679349300023', '201532679349300033', '201532679349300108', '201532679349300113', '201532679349300133', '201502659349300105', '201502659349300110', '201502659349300120', '201502659349300135', '201502659349300140', '201512649349300416', '201512649349300421', '201512649349300431', '201512649349300436', '201512649349300501', '201512649349300511', '201512649349300521', '201512649349300531', '201512649349300541', '201512649349300546', '201512649349300611', '201512649349300631', '201512649349300641', '201502649349300535', '201502649349300610', '201502649349300625', '201502649349300635', '201502649349300665', '201502649349300680', '201502649349300690', '201532639349300203', '201542619349300109', '201542619349300114', '201542619349300119', '201542619349300124', '201542619349300134', '201542619349300204', '201542619349300209', '201542619349300219', '201512669349300326', '201512669349300331', '201512669349300336', '201512669349300411', '201512669349300416', '201532299349201658', '201532299349201668', '201532299349201703', '201532299349201708', '201542269349202354', '201542269349202364', '201542299349200919', '201542299349201129', '201542299349201139', '201542309349200804', '201542309349200964', '201542309349200974', '201542309349201104', '201532269349201478', '201532269349201518', '201532269349201543', '201532269349201563', '201532269349201603', '201532269349201633', '201532289349200113', '201532289349200118', '201532289349200313', '201532289349200603', '201532289349200608', '201532299349201993', '201532329349200028', '201532329349200113', '201532329349200303', '201532329349200403', '201532329349200423', '201542299349201424', '201542299349201434', '201542299349201554', '201542299349201569', '201542299349201604', '201532269349201783', '201532269349201793', '201532269349201803', '201532269349201818', '201532299349202373', '201532299349202388', '201532299349202393', '201542369349300484', '201542369349300489', '201542369349300509', '201542369349300524', '201542369349300529', '201542369349300539', '201542369349300544', '201542369349300609', '201542369349300619', '201542369349300624', '201542369349300634', '201542369349300649', '201542369349300654', '201542369349300669', '201542369349300674', '201502379349300400', '201502379349300500', '201502379349300600', '201502379349300610', '201502379349300615', '201502379349300620', '201502379349300700', '201502379349300705', '201502379349300710', '201502379349300720', '201502379349300725', '201502379349300730', '201502379349300740', '201502379349300745', '201502379349300855', '201502379349300860', '201502379349300875', '201502379349300905', '201502379349300910', '201502379349301005', '201542379349300729', '201542379349300734', '201532269349302848', '201532269349302868', '201532269349302918', '201532269349302923', '201532269349302973', '201532269349302993', '201502649349300710', '201502649349300750', '201502649349300800', '201512629349300601', '201512649349300006', '201512649349300011', '201522619349300007', '201522619349300012', '201522619349300127', '201522679349300227', '201522679349300232', '201522679349300412', '201522679349300517', '201522679349300722', '201532679349300028', '201532679349300118', '201502679349200720', '201502679349200750', '201522669349200002', '201522669349200017', '201522669349200022', '201522669349200107', '201522669349200117', '201522669349200122', '201522669349200202', '201522669349200217', '201522669349200222', '201542659349200424', '201542659349200504', '201542659349200524', '201542659349200529', '201542659349200614', '201542659349200619', '201542659349200709', '201542659349200714', '201512659349200001', '201512659349200006', '201512659349200011', '201512659349200021', '201512659349200101', '201512659349200111', '201512659349200126', '201512669349300431', '201512669349300441', '201512669349300446', '201512669349300511', '201512669349300531', '201512669349300601', '201512669349300621', '201512669349300626', '201522659349300722', '201522659349300757', '201522659349300762', '201522659349300802', '201522659349300812', '201522659349300907', '201522659349301002', '201522659349301052', '201512659349300041', '201512659349300046', '201512659349300101', '201512659349300121', '201512659349300126', '201512659349300206', '201512659349300216', '201512659349300221', '201512659349300236', '201512659349300241', '201512659349300301', '201512659349300311', '201512659349300326', '201512659349300401', '201512659349300416', '201512659349300431', '201522649349300547', '201522649349300602', '201542639349300204', '201502659349300235', '201502659349300300', '201502659349300330', '201502659349300415', '201502659349300430', '201502659349300545', '201502659349300625', '201512649349300661', '201532299349202433', '201532299349202508', '201532299349202518', '201532299349202533', '201532299349202538', '201532299349202568', '201542279349200014', '201542279349200139', '201542279349200229', '201542279349200324', '201542279349200429', '201542299349201924', '201542299349201984', '201542299349202154', '201532299349202613', '201532299349202618', '201532299349202668', '201532299349202678', '201532299349202723', '201532299349202763', '201532299349202778', '201542279349200634', '201542279349200714', '201542279349200724', '201542279349200734', '201542279349200809', '201542299349202209', '201542299349202259', '201542299349202334', '201542299349202359', '201542299349202369', '201542299349202379', '201542299349202394', '201532299349100018', '201532299349100023', '201532299349100028', '201532299349100033', '201532299349100038', '201532299349100043', '201532299349100048', '201532299349100103', '201532299349100108', '201532269349303013', '201532269349303018', '201532299349302438', '201532299349302453', '201532299349302463', '201532299349302468', '201532299349302538', '201532299349302553', '201532299349304308', '201532299349304318', '201532299349304353', '201532299349304423', '201532299349304443', '201532299349304463', '201512399349300731', '201512399349300741', '201522389349300042', '201522389349300047', '201522389349300102', '201522389349300107', '201522389349300202', '201522389349300207', '201522389349300212', '201522389349300242', '201522389349300247', '201522389349300307', '201522389349300312', '201522389349300317', '201522389349300322', '201522389349300327', '201532369349300608', '201532369349300658', '201532369349300673', '201532369349300683', '201532369349300758', '201532369349300803', '201532269349303203', '201532269349303233', '201532299349300113', '201532299349300128', '201532299349302648', '201532299349302703', '201512659349200216', '201522649349200317', '201522649349200322', '201522649349200332', '201522649349200362', '201522649349200367', '201542619349200504', '201542619349200509', '201542619349200604', '201542619349200704', '201542619349200804', '201542619349200904', '201502669349300410', '201502669349300420', '201502669349300425', '201502669349300440', '201502669349300445', '201502669349300500', '201502669349300515', '201502669349300525', '201502669349300530', '201502669349300535', '201502669349300600', '201502669349300605', '201502669349300610', '201502669349300615', '201502669349300620', '201502669349300630', '201502669349300645', '201502669349300800', '201512379349300911', '201512379349301006', '201512379349301016', '201512379349301021', '201522669349300952', '201522669349301057', '201522669349301067', '201522669349301102', '201532669349300003', '201532669349300008', '201532669349300023', '201532669349300108', '201532669349300113', '201512649349300681', '201512649349300701', '201522649349300027', '201522649349300152', '201522649349300167', '201532619349300203', '201532619349300213', '201532619349300238', '201532619349300303', '201532619349300418', '201532619349300513', '201532619349300603', '201542679349300919', '201502639349200250', '201512619349200001', '201512619349200011', '201512619349200101', '201512619349200206', '201512619349200211', '201512619349200306', '201512619349200311', '201512619349200316', '201512619349200321', '201512619349200401', '201512619349200411', '201512619349200501', '201512619349200601', '201512619349200606', '201512619349200701', '201512619349200711', '201512619349200801', '201512619349200811', '201532299349100113', '201532299349100118', '201532299349100123', '201532299349100128', '201532299349100133', '201532299349100138', '201532299349100203', '201532299349100208', '201532299349100213', '201532299349100218', '201532299349100223', '201532299349100233', '201532299349100238', '201532299349100248', '201532299349100303', '201542269349100004', '201542269349100014', '201542299349101134', '201542299349101154', '201542299349101159', '201542299349101169', '201542299349101174', '201542299349101184', '201542299349101189', '201542299349101209', '201542299349101219', '201542299349101224', '201542299349101254', '201542299349101259', '201542299349101269', '201542309349100004', '201542309349100104', '201542309349100109', '201542309349100304', '201542309349100314', '201532299349300603', '201532299349300613', '201532299349300618', '201532299349300633', '201532299349300638', '201532299349300648', '201532299349300713', '201532299349302723', '201532299349302753', '201532299349304498', '201532299349304578', '201532299349304603', '201532299349304628', '201532299349304638', '201532299349304653', '201532299349304668', '201542269349301714', '201542269349301719', '201542269349301809', '201542269349301814', '201522389349300607', '201522389349300612', '201522389349300617', '201522389349300622', '201522389349300632', '201522389349300637', '201522389349300642', '201522389349300647', '201522389349300702', '201522389349300707', '201522389349300712', '201522389349300717', '201522389349300727', '201522389349300732', '201522389349300802', '201522389349300822', '201522389349300827', '201522389349300902', '201522389349300907', '201522389349300917', '201532379349300013', '201522399349300032'] | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/object_ids.py | object_ids.py |
import os
import sys
import io
import xmltodict
import json
from collections import OrderedDict
from xml.parsers.expat import ExpatError
from .type_utils import dictType, orderedDictType, listType, \
unicodeType, noneType, strType
from .file_utils import stream_download, validate_object_id, \
get_local_path
from .settings import KNOWN_SCHEDULES, IRS_READER_ROOT
class InvalidXMLException(Exception):
pass
class FileMissingException(Exception):
pass
class Filing(object):
def __init__(self, object_id, filepath=None, URL=None, json=None):
""" Filepath is the location of the file locally;
URL is it's remote location (if not default)
Ignore these and defaults will be used.
If filepath is set, URL is ignored.
json is a json representation of the data, so if given,
no file will be downloaded.
"""
self.raw_irs_dict = None # The parsed xml will go here
self.version_string = None # Version number here
self.object_id = validate_object_id(object_id)
self.result = None
self.processed = False
self.keyerrors = None
self.csv_result = None
if json:
self.json = json
self.input_type = 'json'
else:
self.json = None
self.input_type = 'xml'
if filepath:
self.filepath = filepath
else:
self.filepath = get_local_path(self.object_id)
if URL:
self.URL = URL
def _download(self, force_overwrite=False, verbose=False):
"""
Files are no longer downloadable.
"""
if os.path.isfile(self.filepath):
return True
else:
raise FileMissingException(
"Filing not available, try downloading with irsx_retrieve [ YEAR ]"
)
def _denamespacify(self,entity):
"""
It's legal to include namespaces in the xml tags, e.g. irs:Return instead of Return
This is very rare; see 201940149349301304_public.xml for an example.
"""
thisentitytype = type(entity)
if thisentitytype == orderedDictType or thisentitytype == dictType:
newOD = OrderedDict()
for key in entity.keys():
newkey = key
if ":" in key:
newkey = key.split(":")[1]
newvalue = entity[key]
if type(newvalue) == listType or type(newvalue) == orderedDictType or type(newvalue) == dictType:
newvalue = self._denamespacify(newvalue)
newOD[newkey] = newvalue
return newOD
elif thisentitytype == listType:
newlist = list()
for item in entity:
newvalue = item
if type(newvalue) == listType or type(newvalue) == orderedDictType:
newvalue = self._denamespacify(newvalue)
newlist.append(newvalue)
return newlist
else:
return entity
def _set_dict_from_xml(self):
# io works across python2 and 3, and allows an encoding arg
with io.open(self.filepath, 'r', encoding='utf-8-sig') as fh:
raw_file = fh.read()
try:
self.raw_irs_dict = self._denamespacify(xmltodict.parse(raw_file))
except ExpatError:
raise InvalidXMLException(
"\nXML Parse error in " + self.filepath \
+ "\nFile may be damaged or incomplete.\n"\
+ "Try erasing this file and downloading again."
)
try:
self.raw_irs_dict['Return']
except KeyError:
raise InvalidXMLException(
"'Return' element not located in" + self.filepath \
+ "\nFile may be damaged or incomplete.\n" \
+ "Try erasing this file and downloading again."
)
def _set_dict_from_json(self):
self.raw_irs_dict = self.json
def _set_version(self):
self.version_string = self.raw_irs_dict['Return']['@returnVersion']
def _set_ein(self):
self.ein = self.raw_irs_dict['Return']['ReturnHeader']['Filer']['EIN']
def _set_schedules(self):
""" Attach the known and unknown schedules """
self.schedules = ['ReturnHeader990x', ]
self.otherforms = []
for sked in self.raw_irs_dict['Return']['ReturnData'].keys():
if not sked.startswith("@"):
if sked in KNOWN_SCHEDULES:
self.schedules.append(sked)
else:
self.otherforms.append(sked)
def get_object_id(self):
return self.object_id
def get_schedule(self, skedname):
if skedname == 'ReturnHeader990x':
return self.raw_irs_dict['Return']['ReturnHeader']
elif skedname in self.schedules:
return self.raw_irs_dict['Return']['ReturnData'][skedname]
else:
return None
def get_ein(self):
return self.ein
def get_otherform(self, skedname):
if skedname in self.otherforms:
return self.raw_irs_dict['Return']['ReturnData'][skedname]
else:
return None
def get_filepath(self):
return self.filepath
def get_version(self):
return self.version_string
def get_raw_irs_dict(self):
return self.raw_irs_dict
def list_schedules(self):
return self.schedules
def set_result(self, result):
self.result = result
def get_result(self):
return self.result
def set_csv_result(self, csv_result):
self.csv_result = csv_result
def get_csv_result(self):
return self.csv_result
def set_keyerrors(self, keyerrorlist):
self.keyerrors = keyerrorlist
def get_keyerrors(self):
return self.keyerrors
def get_unparsed_json(self):
""" Json dicts are unordered """
return json.dumps(self.raw_irs_dict)
def get_type(self):
if 'IRS990' in self.schedules:
return 'IRS990'
elif 'IRS990EZ' in self.schedules:
return 'IRS990EZ'
elif 'IRS990PF' in self.schedules:
return 'IRS990PF'
else:
raise Exception("Missing 990/990EZ/990PF-is this filing valid?")
def get_parsed_sked(self, skedname):
""" Returns an array because multiple sked K's are allowed"""
if not self.processed:
raise Exception("Filing must be processed to return parsed sked")
if skedname in self.schedules:
matching_skeds = []
for sked in self.result:
if sked['schedule_name']==skedname:
matching_skeds.append(sked)
return matching_skeds
else:
return []
def process(self, verbose=False):
# don't reprocess inadvertently
if not self.processed:
self.processed=True
if self.json:
self._set_dict_from_json()
else:
self._download(verbose=verbose)
self._set_dict_from_xml()
self._set_version()
self._set_ein()
self._set_schedules() | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/filing.py | filing.py |
import sys
import os
from .dir_utils import mkdir_p
IRS_READER_ROOT = os.path.abspath(os.path.dirname(__file__))
# This is the URL to amazon's bucket, could use another synced to it
IRS_XML_HTTP_BASE = "https://s3.amazonaws.com/irs-form-990"
# It can be hard to locate this.
IRSX_SETTINGS_LOCATION = (os.path.join(IRS_READER_ROOT, "settings.py"))
# Defaults to the same directory as this settings file, but you can override
# with the `IRSX_CACHE_DIRECTORY` environment variable
IRSX_CACHE_DIRECTORY = os.environ.get("IRSX_CACHE_DIRECTORY", IRS_READER_ROOT)
# The directory we put files in while we're processing them
WORKING_DIRECTORY = os.environ.get(
"IRSX_WORKING_DIRECTORY", os.path.join(IRSX_CACHE_DIRECTORY, "XML"))
# Helpful to keep these around for lookup purposes
INDEX_DIRECTORY = os.environ.get(
"IRSX_INDEX_DIRECTORY", os.path.join(IRSX_CACHE_DIRECTORY, "CSV"))
IRS_INDEX_BASE = "https://apps.irs.gov/pub/epostcard/990/xml/%s/index_%s.csv"
KNOWN_SCHEDULES = [
"IRS990", "IRS990EZ", "IRS990PF", "IRS990ScheduleA",
"IRS990ScheduleB", "IRS990ScheduleC", "IRS990ScheduleD",
"IRS990ScheduleE", "IRS990ScheduleF", "IRS990ScheduleG",
"IRS990ScheduleH", "IRS990ScheduleI", "IRS990ScheduleJ",
"IRS990ScheduleK", "IRS990ScheduleL", "IRS990ScheduleM",
"IRS990ScheduleN", "IRS990ScheduleO", "IRS990ScheduleR",
"ReturnHeader990x"
]
# these could get pushed to metadata directory?
ALLOWED_VERSIONSTRINGS = [
'2013v3.0', '2013v3.1', '2013v4.0',
'2014v5.0', '2014v6.0',
'2015v2.0', '2015v2.1', '2015v3.0',
'2016v3.0', '2016v3.1',
'2017v2.0', '2017v2.1', '2017v2.2', '2017v2.3',
'2018v3.0', '2018v3.1', '2018v3.2', '2018v3.3',
'2019v5.0', '2019v5.1', '2019v5.2',
'2020v1.0', '2020v1.1','2020v1.2','2020v1.3', '2020v2.0', '2020v3.0', '2020v4.0','2020v4.1', '2020v4.2',
'2021v4.0','2021v4.1','2021v4.2','2021v4.3',
'2022v4.0','2022v4.1','2022v5.0'
]
# 2020 is experimental
# see https://www.irs.gov/charities-non-profits/ty2020-xml-schemas-and-business-rules-for-exempt-organizations-modernized-e-file
# We can capture the group structure for these so it doesn't break
# but these versions ARE NOT supported and aren't mapped to IRSx variables
CSV_ALLOWED_VERSIONSTRINGS = ALLOWED_VERSIONSTRINGS + [
'2010v3.2', '2010v3.4', '2010v3.6', '2010v3.7', '2011v1.2', '2011v1.3',
'2011v1.4', '2011v1.5', '2012v2.0', '2012v2.1', '2012v2.2', '2012v2.3',
'2012v3.0'
]
METADATA_DIRECTORY = (os.path.join(IRS_READER_ROOT, "metadata"))
KEYERROR_LOG = os.path.join(IRS_READER_ROOT, "keyerrors.log")
LOG_KEY = 'xml'
mkdir_p([WORKING_DIRECTORY, INDEX_DIRECTORY])
try:
from .local_settings import *
except ImportError:
pass | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/settings.py | settings.py |
from .filing import Filing
from .standardizer import Standardizer, Documentizer, VersionDocumentizer
from .sked_dict_reader import SkedDictReader
# from .log_utils import configure_logging
from .type_utils import listType
from .settings import WORKING_DIRECTORY, ALLOWED_VERSIONSTRINGS, CSV_ALLOWED_VERSIONSTRINGS
class XMLRunner(object):
""" Load a Standardizer just once while running multiple filings
Return Filing objects with results, keyerrors set
"""
def __init__(self, documentation=False, standardizer=None, csv_format=False):
self.documentation = documentation
self.csv_format = csv_format
if documentation:
if not standardizer:
self.standardizer = Documentizer()
else:
if standardizer:
self.standardizer = standardizer
else:
self.standardizer = Standardizer()
self.group_dicts = self.standardizer.get_groups()
self.whole_filing_data = []
self.filing_keyerr_data = []
def get_standardizer(self):
return self.standardizer
def _run_schedule_k(self, sked, object_id, sked_dict, path_root, ein):
assert sked == 'IRS990ScheduleK'
if type(sked_dict) == listType:
for individual_sked in sked_dict:
doc_id = individual_sked['@documentId']
reader = SkedDictReader(
self.standardizer,
self.group_dicts,
object_id,
ein,
documentId=doc_id,
documentation=self.documentation,
csv_format=self.csv_format,
)
result = reader.parse(individual_sked, parent_path=path_root)
self.whole_filing_data.append({
'schedule_name': sked,
'groups': result['groups'],
'schedule_parts': result['schedule_parts'],
'csv_line_array':result['csv_line_array']
})
else:
reader = SkedDictReader(
self.standardizer,
self.group_dicts,
object_id,
ein,
documentation=self.documentation,
csv_format=self.csv_format,
)
result = reader.parse(sked_dict, parent_path=path_root)
self.whole_filing_data.append({
'schedule_name': sked,
'groups': result['groups'],
'schedule_parts': result['schedule_parts'],
'csv_line_array':result['csv_line_array']
})
def _run_schedule(self, sked, object_id, sked_dict, ein):
path_root = "/" + sked
# Only sked K (bonds) is allowed to repeat
if sked == 'IRS990ScheduleK':
self._run_schedule_k(sked, object_id, sked_dict, path_root, ein)
else:
reader = SkedDictReader(
self.standardizer,
self.group_dicts,
object_id,
ein,
documentation=self.documentation,
csv_format=self.csv_format,
)
if sked == 'ReturnHeader990x':
path_root = "/ReturnHeader"
result = reader.parse(sked_dict, parent_path=path_root)
self.whole_filing_data.append({
'schedule_name': sked,
'groups': result['groups'],
'schedule_parts': result['schedule_parts'],
'csv_line_array':result['csv_line_array']
})
if len(result['group_keyerrors']) > 0 or len(result['keyerrors'])> 0:
self.filing_keyerr_data.append({
'schedule_name': sked,
'group_keyerrors':result['group_keyerrors'],
'keyerrors':result['keyerrors'],
})
def run_filing(self, object_id, verbose=False):
self.whole_filing_data = []
self.filing_keyerr_data = []
this_filing = Filing(object_id)
this_filing.process(verbose=verbose)
this_version = this_filing.get_version()
if verbose:
print("Filing %s is version %s" % (object_id, this_version))
if this_version in ALLOWED_VERSIONSTRINGS or ( self.csv_format and this_version in CSV_ALLOWED_VERSIONSTRINGS ):
this_version = this_filing.get_version()
schedules = this_filing.list_schedules()
ein = this_filing.get_ein()
self.whole_filing_data = []
for sked in schedules:
sked_dict = this_filing.get_schedule(sked)
self._run_schedule(sked, object_id, sked_dict, ein)
this_filing.set_result(self.whole_filing_data)
this_filing.set_keyerrors(self.filing_keyerr_data)
if verbose and not self.csv_format: # csv format works on years with many, many keyerrors,
if len(self.filing_keyerr_data)>0:
print("In %s keyerrors: %s" % (object_id, self.filing_keyerr_data))
else:
print("No keyerrors found")
return this_filing
else:
print("Filing version %s isn't supported for this operation" % this_version )
return this_filing
"""
def run_from_filing_obj(self, this_filing, verbose=False):
#Run from a pre-created filing object.
self.whole_filing_data = []
self.filing_keyerr_data = []
this_filing.process(verbose=verbose)
object_id = this_filing.get_object_id()
this_version = this_filing.get_version()
if this_version in ALLOWED_VERSIONSTRINGS:
this_version = this_filing.get_version()
schedules = this_filing.list_schedules()
ein = this_filing.get_ein()
for sked in schedules:
sked_dict = this_filing.get_schedule(sked)
self._run_schedule(sked, object_id, sked_dict, ein)
this_filing.set_result(self.whole_filing_data)
this_filing.set_keyerrors(self.filing_keyerr_data)
return this_filing
else:
return this_filing
"""
def run_sked(self, object_id, sked, verbose=False):
"""
sked is the proper name of the schedule:
IRS990, IRS990EZ, IRS990PF, IRS990ScheduleA, etc.
"""
self.whole_filing_data = []
self.filing_keyerr_data = []
this_filing = Filing(object_id)
this_filing.process(verbose=verbose)
this_version = this_filing.get_version()
if this_version in ALLOWED_VERSIONSTRINGS or ( self.csv_format and this_version in CSV_ALLOWED_VERSIONSTRINGS ):
this_version = this_filing.get_version()
ein = this_filing.get_ein()
sked_dict = this_filing.get_schedule(sked)
self._run_schedule(sked, object_id, sked_dict, ein)
this_filing.set_result(self.whole_filing_data)
this_filing.set_keyerrors(self.filing_keyerr_data)
return this_filing
else:
print("Filing version %s isn't supported for this operation" % this_version )
return this_filing | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/xmlrunner.py | xmlrunner.py |
import sys
import os
import argparse
from zipfile import ZipFile
from .file_utils import stream_download
from .settings import WORKING_DIRECTORY
IRS_location = "https://apps.irs.gov/pub/epostcard/990/xml/%s/download990xml_%s"
ref_url = "https://www.irs.gov/charities-non-profits/form-990-series-downloads"
# How many files are available per year?
# https://www.irs.gov/charities-non-profits/form-990-series-downloads
number_of_files = {
'2022':0,
'2021':6,
'2020':8,
'2019':8,
'2018':7,
'2017':7,
'2016':6,
'2015':2
}
def get_cli_retrieve_parser():
parser = argparse.ArgumentParser("Irsreader")
parser.add_argument(
"year",
nargs='+',
help='4-digit year to retrieve, '
)
parser.add_argument(
'--verbose',
dest='verbose',
action='store_const',
const=True, default=False,
help='Verbose output'
)
return parser
def download_unzip_erase(remote_url, verbose=False):
local_name = remote_url.split("/")[-1]
local_path = os.path.join(WORKING_DIRECTORY, local_name)
if verbose:
print("Downloading %s to %s" % (remote_url, local_path))
stream_download(remote_url, local_path, verbose=verbose)
with ZipFile(local_path, 'r') as zipObj:
# Extract all the contents of zip file in different directory
print('Unzipping %s to %s' % (local_path, WORKING_DIRECTORY))
zipObj.extractall(WORKING_DIRECTORY)
print("Cleaning up, removing raw file.")
os.remove(local_path)
def unload_zipfile_by_year(year, verbose=False):
print("Retrieving zipfiles for year %s" % year)
if verbose:
print("Running verbose")
num_files = number_of_files[year]
location_base = IRS_location % (year, year)
file_list = []
if num_files == 0:
file_list.append(location_base + ".zip")
if num_files > 0:
for i in range(1, num_files+1):
file_list.append(location_base + "_" + str(i) + ".zip")
for this_file in file_list:
download_unzip_erase(this_file, verbose=verbose)
def run_cli_retrieve_main(args_read):
print("""
Please visit https://www.irs.gov/charities-non-profits/form-990-series-downloads
To see if any additional files are available.
""")
for year in args_read.year:
print("Processing %s files for year %s" % (year, number_of_files[year]))
unload_zipfile_by_year(year, verbose=args_read.verbose)
def main(args=None):
parser = get_cli_retrieve_parser()
args = parser.parse_args()
run_cli_retrieve_main(args)
if __name__ == "__main__":
main() | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/irsx_retrieve_cli.py | irsx_retrieve_cli.py |
import os
import sys
import collections
#import logging
from datetime import datetime
from .settings import METADATA_DIRECTORY, KEYERROR_LOG
from .sked_dict_reader import SkedDictReader
from .type_utils import listType
if sys.version_info >= (3, 0):
import csv
else:
import unicodecsv as csv
class Standardizer(object):
"""
This reads metadata .csv files, which it uses to standardize
ordered dicts. For documentation, see Documentizer below.
"""
def __init__(self):
#self.show_documentation = documentation
self.groups = {}
self.variables = {}
self.schedule_parts = {}
# This is overridden for Documentizer class below
self.variable_columns =['db_table', 'db_name']
self._make_groups()
self._make_variables()
def _make_groups(self):
group_filepath = os.path.join(METADATA_DIRECTORY, 'groups.csv')
with open(group_filepath, 'r') as reader_fh:
reader = csv.DictReader(reader_fh)
for row in reader:
self.groups[row['xpath']] = row
return True
def _make_variables(self):
variable_filepath = os.path.join(METADATA_DIRECTORY, 'variables.csv')
with open(variable_filepath, 'r') as variable_fh:
reader = csv.DictReader(variable_fh)
for row in reader:
vardict = {}
for col in self.variable_columns:
vardict[col]=row[col]
self.variables[row['xpath']] = vardict
return True
def get_groups(self):
return self.groups
def get_var(self, var_xpath, version=None):
if version:
raise Exception("Version checking is not implemented")
return (self.variables[var_xpath])
def get_documentation_status(self):
return False
class Documentizer(Standardizer):
""" Like Standardizer, but returns canonical documentation info from 2016 version """
def __init__(self, versions=False):
self.groups = {}
self.variables = {}
self.schedule_parts = {}
self.variable_columns =[
'db_table', 'db_name', 'ordering',
'line_number', 'description', 'db_type',
'irs_type', 'xpath'
]
if versions:
self.variable_columns = self.variable_columns + ['version_start', 'version_end']
self._make_schedule_parts()
self._make_groups()
self._make_variables()
def get_documentation_status(self):
return True
def _make_schedule_parts(self):
part_filepath = os.path.join(METADATA_DIRECTORY, 'schedule_parts.csv')
with open(part_filepath, 'r') as reader_fh:
reader = csv.DictReader(reader_fh)
for row in reader:
self.schedule_parts[row['parent_sked_part']] = {
'name': row['part_name'],
'ordering': row['ordering'],
'parent_sked': row['parent_sked'],
'parent_sked_part': row['parent_sked_part'],
'is_shell': row['is_shell']
}
return True
def get_schedule_parts(self):
return self.schedule_parts
def part_ordering(self, partname):
try:
result = int(self.schedule_parts[partname]['ordering'])
return result
except KeyError:
return None
def group_ordering(self, groupname):
try:
return self.groups[groupname]['ordering']
except KeyError:
return None
def get_groups_by_sked(self, sked):
groups = []
for thisgroup in self.groups.keys():
if self.groups[thisgroup]['parent_sked'] == sked:
groups.append(self.groups[thisgroup])
return groups
def get_parts_by_sked(self, sked):
parts = []
for thispart in self.schedule_parts.keys():
#print(self.schedule_parts[thispart])
if self.schedule_parts[thispart]['parent_sked'] == sked:
parts.append(self.schedule_parts[thispart])
return parts
def get_variables(self):
return self.variables
class VersionDocumentizer(object):
"""
Returns version-specific line number and documentation.
"""
def __init__(self):
self.line_numbers = {}
self.descriptions = {}
self._make_line_numbers()
self._make_descriptions()
def check_version(self, versionstring, start_year, end_year):
versionyear = int(versionstring.split("v")[0])
valid_start = versionyear >= int(start_year)
valid_end = not end_year or versionyear <= int(end_year)
result = valid_start and valid_end
return result
def _make_line_numbers(self):
filepath = os.path.join(METADATA_DIRECTORY, 'line_numbers.csv')
with open(filepath, 'r') as reader_fh:
reader = csv.DictReader(reader_fh)
for row in reader:
try:
self.line_numbers[row['xpath']]
self.line_numbers[row['xpath']].append(row)
except KeyError:
self.line_numbers[row['xpath']] = [row]
def _make_descriptions(self):
filepath = os.path.join(METADATA_DIRECTORY, 'descriptions.csv')
with open(filepath, 'r') as reader_fh:
reader = csv.DictReader(reader_fh)
for row in reader:
try:
self.descriptions[row['xpath']]
self.descriptions[row['xpath']].append(row)
except KeyError:
self.descriptions[row['xpath']] = [row]
def get_line_number(self, xpath, version_string):
candidate_rows = []
try:
candidate_rows = self.line_numbers[xpath]
except KeyError:
return None
for row in candidate_rows:
if self.check_version(version_string, row['version_start'], row['version_end']):
return row['line_number']
return None
def get_description(self, xpath, version_string):
candidate_rows = []
try:
candidate_rows = self.descriptions[xpath]
except KeyError:
return None
for row in candidate_rows:
if self.check_version(version_string, row['version_start'], row['version_end']):
return row['description']
return None | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/standardizer.py | standardizer.py |
from .type_utils import dictType, orderedDictType, listType, \
unicodeType, noneType, strType
from .flatten_utils import flatten
from .keyerror_utils import ignorable_keyerror
from .settings import LOG_KEY
class SkedDictReader(object):
"""
We get an ordered dict back from xmltodict, but we want to "flatten" it
into xpath-ed variables and repeated structures.
Will also work on reading xmltodict that was previously turned into json
"""
def __init__(
self,
standardizer,
groups,
object_id,
ein,
documentId=None,
documentation=False,
csv_format=False
):
self.standardizer = standardizer
self.object_id = object_id
self.ein = ein
self.documentId = documentId
self.schedule_parts = {} # allows one entry per filing
self.repeating_groups = {} # multiple per filing
self.csv_format = csv_format # Do we need to generate ordered csv
self.for_csv_list = [] # keep record of elements, line by line
self.groups = groups
self.documentation = documentation
self.variable_keyerrors = [] # record any unexpected variables
self.group_keyerrors = [] # or unexpected groups
if self.documentation and not self.standardizer.get_documentation_status():
# Todo: split out documenter entirely so we don't have to do this
raise Exception(
"Standardizer must be initialized with the \
documentation flag to load documentation"
)
def _get_table_start(self):
""" prefill the columns we need for all tables """
if self.documentation:
standardized_table_start = {
'object_id': {
'value': self.object_id,
'ordering': -1,
'line_number': 'NA',
'description': 'IRS-assigned object id',
'db_type': 'String(18)'
},
'ein': {
'value': self.ein,
'ordering': -2,
'line_number': 'NA',
'description': 'IRS employer id number',
'db_type': 'String(9)'
}
}
if self.documentId:
standardized_table_start['documentId'] = {
'value': self.documentId,
'description': 'Document ID',
'ordering': 0
}
else:
standardized_table_start = {
'object_id': self.object_id,
'ein': self.ein
}
if self.documentId:
standardized_table_start['documentId'] = self.documentId
return standardized_table_start
def _process_group(self, json_node, path, this_group):
for node_index, node in enumerate(json_node):
#print("_process_group %s " % (this_group['db_name']))
this_node_type = type(node)
flattened_list_item = None
if this_node_type == unicodeType:
#print("_pg: unicodeType %s ")
flattened_list_item = {path: node}
else:
#print("_pg: NOT unicodeType")
flattened_list_item = flatten(node, parent_key=path, sep='/')
table_name = None
standardized_group_dict = self._get_table_start()
for xpath in flattened_list_item.keys():
if '@' in xpath:
continue
else:
xpath = xpath.replace("/#text", "")
value = flattened_list_item[xpath]
if self.csv_format:
this_var = {
'xpath':xpath,
'value':value,
'in_group':True,
'group_name':this_group['db_name'],
'group_index':node_index
}
self.for_csv_list.append(this_var)
try:
this_var_data = self.standardizer.get_var(xpath)
except KeyError:
if not ignorable_keyerror(xpath):
self.variable_keyerrors.append(
{'element_path':xpath}
)
continue
this_var_value = flattened_list_item[xpath]
this_var_name = this_var_data['db_name']
table_name = this_var_data['db_table']
if self.documentation:
result = {
'value': this_var_value,
'ordering': this_var_data['ordering'],
'line_number': this_var_data['line_number'],
'description': this_var_data['description'],
'db_type': this_var_data['db_type']
}
standardized_group_dict[this_var_name] = result
else:
standardized_group_dict[this_var_name] = this_var_value
try:
self.repeating_groups[table_name].append(standardized_group_dict)
except KeyError:
self.repeating_groups[table_name] = [standardized_group_dict]
def _parse_json(self, json_node, parent_path=""):
this_node_type = type(json_node)
element_path = parent_path
if this_node_type == listType:
#print("List type %s" % element_path)
this_group = None
try:
this_group = self.groups[element_path]
except KeyError:
self.group_keyerrors.append(
{'element_path':element_path}
)
self._process_group(json_node, parent_path, this_group)
elif this_node_type == unicodeType:
# but ignore it if is an @.
if '@' in element_path:
pass
else:
element_path = element_path.replace("/#text", "")
try:
# is it a group?
this_group = self.groups[element_path]
self._process_group(
[{parent_path: json_node}],
'',
this_group
)
except KeyError:
# It's not a group so it should be a variable we know about
if self.csv_format:
this_var = {
'xpath':element_path,
'value':json_node,
'in_group':False,
'group_name':None,
'group_index':None
}
self.for_csv_list.append(this_var)
# It's not a group so it should be a variable we know about
try:
var_data = self.standardizer.get_var(element_path)
var_found = True
except KeyError:
# pass through for some common key errors
# [ TODO: FIX THE KEYERRORS! ]
if not ignorable_keyerror(element_path):
self.variable_keyerrors.append(
{'element_path':element_path}
)
var_found = False
if var_found:
table_name = var_data['db_table']
var_name = var_data['db_name']
result = json_node
if self.documentation:
result = {
'value': json_node,
'ordering': var_data['ordering'],
'line_number': var_data['line_number'],
'description': var_data['description'],
'db_type': var_data['db_type']
}
try:
self.schedule_parts[table_name][var_name] = result
except KeyError:
self.schedule_parts[table_name] = self._get_table_start()
self.schedule_parts[table_name][var_name] = result
elif this_node_type == orderedDictType or this_node_type == dictType:
try:
# is it a singleton group?
this_group = self.groups[element_path]
self._process_group([{parent_path: json_node}], '', this_group)
except KeyError:
keys = json_node.keys()
for key in keys:
new_path = parent_path + "/" + key
self._parse_json(json_node[key], parent_path=new_path)
elif this_node_type == noneType:
pass
elif this_node_type == strType:
msg = "String '%s'" % json_node
#self.logging.debug(msg)
else:
raise Exception("Unhandled type: %s" % (type(json_node)))
def parse(self, raw_ordered_dict, parent_path=""):
self._parse_json(raw_ordered_dict, parent_path=parent_path)
return ({
'schedule_parts': self.schedule_parts,
'groups': self.repeating_groups,
'csv_line_array':self.for_csv_list, # This is empty if not csv
'keyerrors':self.variable_keyerrors,
'group_keyerrors':self.group_keyerrors
}) | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/sked_dict_reader.py | sked_dict_reader.py |
import re
import os
import requests
from datetime import datetime
from .settings import IRS_XML_HTTP_BASE, WORKING_DIRECTORY, INDEX_DIRECTORY, IRS_INDEX_BASE
OBJECT_ID_RE = re.compile(r'20\d{16}')
# Not sure how much detail we need to go into here
OBJECT_ID_MSG = """
This appears not to be an IRS object id.
The ID should be 18 digits long and start with
the four digit year, e.g. 201642229349300909
To find the object id, see the yearly index csv files.
"""
def stream_download(url, target_path, verbose=False):
""" Download a large file without loading it into memory. """
response = requests.get(url, stream=True)
handle = open(target_path, "wb")
if verbose:
print("Beginning streaming download of %s" % url)
start = datetime.now()
try:
content_length = int(response.headers['Content-Length'])
content_MB = content_length/1048576.0
print("Total file size: %.2f MB" % content_MB)
except KeyError:
pass # allow Content-Length to be missing
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
if verbose:
print(
"Download completed to %s in %s" %
(target_path, datetime.now() - start))
def validate_object_id(object_id):
""" It's easy to make a mistake entering these, validate the format """
result = re.match(OBJECT_ID_RE, str(object_id))
if not result:
print("'%s' appears not to be a valid 990 object_id" % object_id)
raise RuntimeError(OBJECT_ID_MSG)
return object_id
# Files are no longer available on S3
# def get_s3_URL(object_id):
# return ("%s/%s_public.xml" % (IRS_XML_HTTP_BASE, object_id))
def get_local_path(object_id):
file_name = "%s_public.xml" % object_id
return os.path.join(WORKING_DIRECTORY, file_name)
def get_index_file_URL(year):
index_file = IRS_INDEX_BASE % (year, year)
print("index file %s %s" % (year, index_file))
return index_file
def get_local_index_path(year):
csv_file_name = "index_%s.csv" % year
return os.path.join(INDEX_DIRECTORY, csv_file_name) | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/file_utils.py | file_utils.py |
ignorable_keyerrors = ['/ReturnHeader/BuildTS']
## Todo: put in 2013 / 2015 series canonicals.
# 2013 vars that no longer exist
discontinued_2013_vars = [ '/IRS990ScheduleA/CertificationInd', '/IRS990ScheduleA/Contribution35ControlledInd', '/IRS990ScheduleA/ContributionControllerInd', '/IRS990ScheduleA/ContributionFamilyInd', '/IRS990ScheduleA/Form990ScheduleAPartIVGrp/ExplanationTxt', '/IRS990ScheduleA/SupportedOrgInformationGrp/SupportedOrgNotifiedInd', '/IRS990ScheduleA/SupportedOrgInformationGrp/USOrganizedInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/AdoptBudgetInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/AdoptImplementationStrategyInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/AllNeedsAddressedInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/AttachedToInvoiceInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/AvailableOnRequestInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/BodyAttachmentsInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/DevelopCommunityWidePlanInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/ExecCommunityWidePlanInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/ExecImplementationStrategyInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/FPGUsedDeterEligFreeCareInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/FPGUsedDetermEligDscntCareInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/IncludeOperationalPlanInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/LawsuitInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/LiensOnResidencesInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/MedicaidMedicareInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/OtherNeedsAddressedInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/PermitBodyAttachmentsInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/PermitLawsuitInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/PermitLienOnResidenceInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/PostedInAdmissionOfficeInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/PostedInEmergencyRoomInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/PrioritizeHealthNeedsInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/PrioritizeServicesInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/ProvidedOnAdmissionInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/StateRegulationInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/UninsuredDiscountInd']
# 2015 skedh vars removed
discontinued_2015_vars = ['/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/AverageNegotiatedRatesInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/DocumentedEligDeterminationInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/FAPNoticeDisplayedInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/FAPNotifiedAllPatientsInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/FAPNotifiedBeforeDischargeInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/FAPNotifiedUponAdmissionInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/InformationGapsInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/LowestNegotiatedRatesInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/MedicareRatesInd', '/IRS990ScheduleH/HospitalFcltyPoliciesPrctcGrp/OtherMethodUsedInd']
ignorable = {}
for key in ignorable_keyerrors + discontinued_2013_vars + discontinued_2015_vars:
ignorable[key] = 1
def ignorable_keyerror(xpath):
try:
ignorable[xpath]
return True
except KeyError:
return False | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/keyerror_utils.py | keyerror_utils.py |
import json
import sys
import codecs
import re
import csv
import unicodecsv
from .standardizer import Standardizer, Documentizer, VersionDocumentizer
BRACKET_RE = re.compile(r'\[.*?\]')
ASTERISKS = "****************"
def debracket(string):
""" Eliminate the bracketed var names in doc, line strings """
result = re.sub(BRACKET_RE, ';', str(string))
result = result.lstrip(';')
result = result.lstrip(' ')
result = result.replace('; ;',';')
return result
def most_recent(semicolon_delimited_string):
result = semicolon_delimited_string.split(";")[-1]
return result
def to_json(data, outfilepath=None):
if data:
if outfilepath:
with open(outfilepath, 'w') as outfile:
json.dump(data, outfile)
else:
if hasattr(sys.stdout, "buffer"):
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.buffer, "strict")
json.dump(data, sys.stdout)
else:
json.dump(data, sys.stdout)
def to_csv(parsed_filing, object_id=None, standardizer=None, documentation=True, vd=None, outfilepath=None):
if not vd:
vd = VersionDocumentizer()
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
if outfilepath:
stdout = open(outfilepath, 'wb') # or 'wb' ?
fieldnames = []
fieldnames = [
'object_id', 'form', 'line_number', 'description', 'value', 'variable_name',
'xpath', 'in_group', 'group_name', 'group_index'
]
writer = unicodecsv.DictWriter(
stdout,
fieldnames=fieldnames,
encoding='utf-8',
quoting=csv.QUOTE_MINIMAL
)
writer.writeheader() # this fails in python3?
results = parsed_filing.get_result()
if results:
for result in results:
for this_result in result['csv_line_array']:
vardata = None
try:
vardata = standardizer.get_var(this_result['xpath'])
except KeyError:
pass
if vardata:
this_result['variable_name'] = vardata['db_table'] + "." + vardata['db_name']
raw_line_num = vd.get_line_number(
this_result['xpath'],
parsed_filing.get_version()
)
this_result['line_number'] = debracket(raw_line_num)
raw_description = vd.get_description(
this_result['xpath'],
parsed_filing.get_version()
)
this_result['description'] = debracket(raw_description)
this_result['form'] = this_result['xpath'].split("/")[1]
this_result['object_id'] = object_id
writer.writerow(this_result)
def to_txt(parsed_filing, standardizer=None, documentation=True, vd=None, outfilepath=None):
if not vd:
vd = VersionDocumentizer()
results = parsed_filing.get_result()
this_sked_name = None
if outfilepath:
outfile = open(outfilepath, 'w')
if results:
for result in results:
for this_result in result['csv_line_array']:
#### Collect the variables we need
vardata = None
textoutput = "\n" # This is what we'll eventually write out
this_result['form'] = this_result['xpath'].split("/")[1]
try:
vardata = standardizer.get_var(this_result['xpath'])
except KeyError:
pass
if vardata:
this_result['variable_name'] = vardata['db_table'] + "." + vardata['db_name']
raw_line_num = vd.get_line_number(
this_result['xpath'],
parsed_filing.get_version()
)
this_result['line_number'] = debracket(raw_line_num)
raw_description = vd.get_description(
this_result['xpath'],
parsed_filing.get_version()
)
this_result['description'] = debracket(raw_description)
#### Write the output, now that we've got the vars
if this_sked_name != this_result['form']:
textoutput += "\n\n\n" + ASTERISKS + "\tSchedule %s\n" % this_result['form']
this_sked_name = this_result['form']
textoutput += "\n" + ASTERISKS + "\n Value: '%s'\nForm: %s\nLine:%s\nDescription:%s" % (
this_result['value'],
this_result['form'],
this_result['line_number'],
this_result['description'],
)
if documentation:
textoutput += "\nXpath:%s" % (this_result['xpath'])
if this_result['in_group']:
textoutput += "\nGroup: %s group_index %s" % (
this_result['group_name'],
this_result['group_index']
)
else:
textoutput += "\nGroup:"
if outfilepath:
outfile.write(textoutput)
else:
sys.stdout.write(textoutput)
if outfilepath:
outfile.close() | zenitechcyber-irsx | /zenitechcyber_irsx-0.2-py3-none-any.whl/irsx/text_format_utils.py | text_format_utils.py |
import json
import sys
import requests
class Kintone:
def __init__(self,authText,domain,app):
self.authText = authText
self.rootURL = 'https://{}.cybozu.com/k/v1/'.format(domain)
self.app = app
self.headers = {
'X-Cybozu-Authorization': self.authText,
'Content-Type': 'application/json'
}
self.property = self.get_property()
def requestKintone(self, method, url, json):
if method == 'GET':
try:
response = requests.get(url, json=json, headers=self.headers)
#ステータスコードチェック
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException:
raise Exception(response.json())
if method == 'POST':
try:
response = requests.post(url, json=json, headers=self.headers)
#ステータスコードチェック
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException:
raise Exception(response.json())
if method == 'PUT':
try:
response = requests.put(url, json=json, headers=self.headers)
#ステータスコードチェック
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException:
raise Exception(response.json())
if method == 'DELETE':
try:
response = requests.delete(url, json=json, headers=self.headers)
#ステータスコードチェック
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException:
raise Exception(response.json())
def get_property(self):
params = {
'app': self.app,
'lang': 'default'
}
url = self.rootURL + 'app/form/fields.json'
response = self.requestKintone(method='GET', url=url, json=params)
properties = response['properties']
return properties
def selectAll(self,where=None, fields=None):
"""
selectAll is function to get records from Kintone.\n
Get all fields if "field" is not specified.\n
If you do not specify "where", get all records.\n
Always get "id" and "revision".
"""
params = {
'app' : self.app,
'query' : '',
'totalCount': True,
}
if fields is not None:
params['fields'] = list(set(fields + ['$id', '$revision']))
url = self.rootURL + 'records.json'
lastRecID = '0'
records = []
while True:
if where is None:
params['query'] = '($id > ' + lastRecID + ' ) order by $id asc limit 500'
else:
params['query'] = '($id > ' + lastRecID + ' ) and (' + where + ') order by $id asc limit 500'
response = self.requestKintone(method='GET', url=url, json=params)
totalCount = int(response['totalCount'])
if totalCount == 0:
break
#レコードIDの最大値を取得
lastRecID = response['records'][-1]['$id']['value']
records.extend(response['records'])
if totalCount <= 500:
break
result = []
for record in records:
tmp_record = {}
for field_code, value in record.items():
field_type = value['type' ]
field_value = value['value']
if field_type == 'NUMBER' and field_value is not None and field_value != "":
#intかfloatにキャスト
try:
field_value = int(field_value)
except ValueError:
field_value = float(field_value)
if field_type == 'SUBTABLE':
subtable = []
for sub_rec in field_value:
subtable_record = {}
subtable_record['id'] = sub_rec['id']
for sub_field_code, sub_value in sub_rec['value'].items():
if sub_value['type'] == 'NUMBER' and field_value is not None:
#intかfloatにキャスト
try:
sub_value['value'] = int(sub_value['value'])
except ValueError:
sub_value['value'] = float(sub_value['value'])
subtable_record[sub_field_code] = sub_value['value']
subtable.append(subtable_record)
field_value = subtable
tmp_record[field_code] = field_value
result.append(tmp_record)
return result
def select(self,where=None, order=None , limit=None, fields=None):
"""
select is function to get records from Kintone.\n
Get all fields if "field" is not specified.\n
If you do not specify "where", get all records.\n
Always get "id" and "revision".
"""
params = {
'app' : self.app,
'query' : '($id > 0)',
'totalCount': True,
}
if fields is not None:
params['fields'] = list(set(fields + ['$id', '$revision']))
url = self.rootURL + 'records.json'
if where is not None:
params['query'] += ' and (' + where + ')'
if order is not None:
params['query'] += ' ' + order
if limit is not None:
params['query'] += ' limit ' + str(limit)
response = self.requestKintone(method='GET', url=url, json=params)
records = response['records']
result = []
for record in records:
tmp_record = {}
for field_code, value in record.items():
field_type = value['type' ]
field_value = value['value']
if field_type == 'NUMBER' and field_value is not None and field_value != "":
#intかfloatにキャスト
try:
field_value = int(field_value)
except ValueError:
field_value = float(field_value)
if field_type == 'SUBTABLE':
subtable = []
for sub_rec in field_value:
subtable_record = {}
subtable_record['id'] = sub_rec['id']
for sub_field_code, sub_value in sub_rec['value'].items():
if sub_value['type'] == 'NUMBER' and field_value is not None:
#intかfloatにキャスト
try:
sub_value['value'] = int(sub_value['value'])
except ValueError:
sub_value['value'] = float(sub_value['value'])
subtable_record[sub_field_code] = sub_value['value']
subtable.append(subtable_record)
field_value = subtable
tmp_record[field_code] = field_value
result.append(tmp_record)
return result
def selectRec(self,recordID):
"""
selectRec is function to get 1 record of Kintone.\n
recordID = Specify the record ID.
"""
params = {
'app': self.app,
'id' : recordID
}
url = self.rootURL + 'record.json'
record = self.requestKintone(method='GET', url=url, json=params)
result = {}
for field_code, value in record['record'].items():
field_type = value['type' ]
field_value = value['value']
if field_type == 'NUMBER' and field_value is not None and field_value != "":
#intかfloatにキャスト
try:
field_value = int(field_value)
except ValueError:
field_value = float(field_value)
if field_type == 'SUBTABLE':
subtable = []
for sub_rec in field_value:
subtable_record = {}
subtable_record['id'] = sub_rec['id']
for sub_field_code, sub_value in sub_rec['value'].items():
if sub_value['type'] == 'NUMBER' and field_value is not None:
#intかfloatにキャスト
try:
sub_value['value'] = int(sub_value['value'])
except ValueError:
sub_value['value'] = float(sub_value['value'])
subtable_record[sub_field_code] = sub_value['value']
subtable.append(subtable_record)
field_value = subtable
result[field_code] = field_value
return result
def insert(self,records:list):
"""
insert is Function for inserting records in kintone.\n
records = Specify the record information (field code and field value) in the list.\n
records = [
{
'field_code': 'value'
}
]
"""
if type(records) != list:
raise Exception('Argument is not a list')
params = {
'app': self.app,
"records": [
]
}
tmp_param = {}
url = self.rootURL + 'records.json'
parameter = self.property
resp = []
for record in records:
#100件づつKintoneに登録する
if len(params['records']) == 100:
resp = self.requestKintone(method='POST', url=url, json=params)
params['records'] = []
for key, value in record.items():
if key not in parameter:
continue
if parameter[key]['type'] in ('USER_SELECT', 'ORGANIZATION_SELECT', 'GROUP_SELECT'):
codes = []
for val in value:
codes.append({'code':val})
tmp_param[key] = {
'value': codes
}
continue
elif parameter[key]['type'] == 'FILE':
fileKeys = []
for val in value:
fileKeys.append({'fileKey': val})
tmp_param[key] = {
'value': fileKeys
}
continue
elif parameter[key]['type'] == 'SUBTABLE':
tmp_param[key] = {
'value': []
}
for sub_rec in value:
sub_dict = {}
for sub_key, sub_value in sub_rec.items():
if parameter[key]['fields'][sub_key]['type'] in ('USER_SELECT', 'ORGANIZATION_SELECT', 'GROUP_SELECT'):
codes = []
for val in sub_value:
codes.append({'code': val})
sub_dict[sub_key] = {
'value':codes
}
elif parameter[key]['fields'][sub_key]['type'] == 'FILE':
fileKeys = []
for val in sub_value:
fileKeys.append({'fileKey': val})
sub_dict[sub_key] = {
'value': fileKeys
}
else:
sub_dict[sub_key] = {
'value':sub_value
}
tmp_param[key]['value'].append({
'value':sub_dict
})
continue
else:
tmp_param[key] = {
'value': value
}
continue
params['records'].append(tmp_param)
tmp_param = {}
#最後に残りを追加する
if params['records']:
resp = self.requestKintone(method='POST', url=url, json=params)
return resp
def insertRec(self,record:dict):
"""
insertRec is Function for inserting 1 record in kintone.\n
record = Specify the record information (field code and field value) in the dict.\n
record = {
'field_code': 'value'
}
"""
#引数の型チェック
if type(record) != dict:
raise Exception('Argument is not a dict')
params = {
'app': self.app,
"record": {
}
}
url = self.rootURL + 'record.json'
parameter = self.property
for key, value in record.items():
if key not in parameter:
continue
if parameter[key]['type'] in ('USER_SELECT', 'ORGANIZATION_SELECT', 'GROUP_SELECT'):
codes = []
for val in value:
codes.append({'code': val})
params['record'][key] = {
'value': codes
}
continue
elif parameter[key]['type'] == 'FILE':
fileKeys = []
for val in value:
fileKeys.append({'fileKey': val})
params['record'][key] = {
'value': fileKeys
}
continue
elif parameter[key]['type'] == 'SUBTABLE':
params['record'][key] = {
'value': []
}
for sub_rec in value:
sub_dict = {}
for sub_key, sub_value in sub_rec.items():
if parameter[key]['fields'][sub_key]['type'] in ('USER_SELECT', 'ORGANIZATION_SELECT', 'GROUP_SELECT'):
codes = []
for val in sub_value:
codes.append({'code': val})
sub_dict[sub_key] = {
'value': codes
}
elif parameter[key]['fields'][sub_key]['type'] == 'FILE':
fileKeys = []
for val in sub_value:
fileKeys.append({'fileKey': val})
sub_dict[sub_key] = {
'value': fileKeys
}
else:
sub_dict[sub_key] = {
'value': sub_value
}
params['record'][key]['value'].append({
'value': sub_dict
})
continue
else:
params['record'][key] = {
'value': value
}
continue
resp = self.requestKintone(method='POST', url=url, json=params)
return resp
def update(self, records:list):
"""
update is function for updating records in kintone.\n
records = data to update in kintone.\n
records = [
{
'$id':'1'
or
'updateKey': {
'field':'No duplication field code',
'value':'value'
}
'field_code': 'value'
}
]
"""
if type(records) != list:
raise Exception('Argument is not a list')
params = {
'app': self.app,
"records": [
]
}
tmp_param = {}
url = self.rootURL + 'records.json'
parameter = self.property
resp = []
for record in records:
#100件づつKintoneに登録する
if len(params['records']) == 100:
resp = self.requestKintone(method='PUT', url=url, json=params)
params['records'] = []
tmp_param['record'] = {}
for key, value in record.items():
if key == '$revision':
continue
#レコードIDを取得する
if key in ('id','$id'):
tmp_param['id'] = value
continue
elif key == 'updateKey':
tmp_param['updateKey'] = {
'field':value['field'],
'value':value['value']
}
continue
elif parameter[key]['type'] in ('USER_SELECT', 'ORGANIZATION_SELECT', 'GROUP_SELECT'):
codes = []
for val in value:
codes.append({'code': val})
tmp_param['record'][key] = {
'value': codes
}
continue
elif parameter[key]['type'] == 'FILE':
fileKeys = []
for val in value:
fileKeys.append({'fileKey': val})
tmp_param['record'][key] = {
'value': fileKeys
}
continue
elif parameter[key]['type'] == 'SUBTABLE':
tmp_param['record'][key] = {
'value': []
}
for sub_rec in value:
sub_dict = {}
sub_id = ''
for sub_key, sub_value in sub_rec.items():
if sub_key == 'id':
sub_id = sub_value
continue
elif parameter[key]['fields'][sub_key]['type'] in ('USER_SELECT', 'ORGANIZATION_SELECT', 'GROUP_SELECT'):
codes = []
for val in sub_value:
codes.append({'code': val})
sub_dict[sub_key] = {
'value': codes
}
elif parameter[key]['fields'][sub_key]['type'] == 'FILE':
fileKeys = []
for val in sub_value:
fileKeys.append({'fileKey': val})
sub_dict[sub_key] = {
'value': fileKeys
}
else:
sub_dict[sub_key] = {
'value': sub_value
}
if sub_id:
tmp_param['record'][key]['value'].append({
'id' : sub_id,
'value': sub_dict
})
else:
tmp_param['record'][key]['value'].append({
'value': sub_dict
})
continue
else:
tmp_param['record'][key] = {
'value': value
}
continue
params['records'].append(tmp_param)
tmp_param = {}
#最後に残りを追加する
if params['records']:
resp = self.requestKintone(method='PUT', url=url, json=params)
return resp
def updateRec(self,record:dict):
"""
updateRec is function for updating 1 record in kintone.\n
record = data to update in kintone.\n
record = {
'$id':'1'
or
'updateKey': {
'field':'No duplication field code',
'value':'value'
}
'field_code': 'value'
}
"""
if type(record) != dict:
raise Exception('Argument is not a dict')
params = {
'app': self.app,
"record": {
}
}
url = self.rootURL + 'record.json'
parameter = self.property
for key, value in record.items():
if key == '$revision':
continue
#レコードIDを取得する
if key in ('id', '$id'):
params['id'] = value
continue
elif key == 'updateKey':
params['updateKey'] = {
'field': value['field'],
'value': value['value']
}
continue
elif parameter[key]['type'] in ('USER_SELECT', 'ORGANIZATION_SELECT', 'GROUP_SELECT'):
codes = []
for val in value:
codes.append({'code': val})
params['record'][key] = {
'value': codes
}
continue
elif parameter[key]['type'] == 'FILE':
fileKeys = []
for val in value:
fileKeys.append({'fileKey': val})
params['record'][key] = {
'value': fileKeys
}
continue
elif parameter[key]['type'] == 'SUBTABLE':
params['record'][key] = {
'value': []
}
for sub_rec in value:
sub_dict = {}
sub_id = ''
for sub_key, sub_value in sub_rec.items():
if sub_key == 'id':
sub_id = sub_value
continue
elif parameter[key]['fields'][sub_key]['type'] in ('USER_SELECT', 'ORGANIZATION_SELECT', 'GROUP_SELECT'):
codes = []
for val in sub_value:
codes.append({'code': val})
sub_dict[sub_key] = {
'value': codes
}
elif parameter[key]['fields'][sub_key]['type'] == 'FILE':
fileKeys = []
for val in sub_value:
fileKeys.append({'fileKey': val})
sub_dict[sub_key] = {
'value': fileKeys
}
else:
sub_dict[sub_key] = {
'value': sub_value
}
if sub_id:
params['record'][key]['value'].append({
'id': sub_id,
'value': sub_dict
})
else:
params['record'][key]['value'].append({
'value': sub_dict
})
continue
else:
params['record'][key] = {
'value': value
}
continue
resp = self.requestKintone(method='PUT', url=url, json=params)
return resp
def delete(self,ids:list,revisions=None):
"""
delete is function for deleting records in kintone.\n
ids = Specify the record ID of the record to be deleted as a list.\n
revisions = The expected revision number.
The order is the same as ids (the revision number expected in the first record of ids is the first number of revisions).
If it does not match the actual revision number, an error will occur (no record will be deleted).
However, if the value is -1 or not specified, the revision number will not be verified.
"""
if type(ids) != list:
raise Exception('Argument is not a list')
params = {
'app': self.app,
'ids': [
]
}
response = {}
url = self.rootURL + 'records.json'
if revisions is not None:
params['revisions'] = revisions
while len(ids) > 100:
params['ids'] = ids[0:100]
response = self.requestKintone(method='DELETE', url=url, json=params)
del ids[0:100]
if ids:
params['ids'] = ids[0:100]
response = self.requestKintone(method='DELETE', url=url, json=params)
return response
def postComment(self, recordID, text:str, mentions=None):
"""
postComment is function for posting comments to kintone.
"""
if type(text) != str:
raise Exception('Argument is not a str')
if mentions is not None and type(mentions) != list:
raise Exception('Argument is not a list')
params = {
'app' : self.app,
'record' : recordID,
'comment': {
'text' : text,
'mentions': mentions if mentions is not None else []
}
}
url = self.rootURL + 'record/comment.json'
resp = self.requestKintone(method='POST', url=url, json=params)
return resp
def deleteComment(self, recordID, commentID):
"""
deleteComment is function to delete the comment of kintone.
"""
params = {
'app' : self.app,
'record' : recordID,
'comment':commentID
}
url = self.rootURL + 'record/comment.json'
resp = self.requestKintone(method='DELETE', url=url, json=params)
return resp
def selectComment(self, recordID, order=None, offset=None, limit=None):
"""
selectComment function to get the comments of kintone records at once.
"""
params = {
'app' : self.app,
'record' : recordID
}
url = self.rootURL + 'record/comments.json'
if order is not None:
params['order'] = order
if offset is not None:
params['offset'] = offset
if limit is not None:
params['limit'] = limit
resp = self.requestKintone(method='GET', url=url, json=params)
return resp | zenkPytone | /zenkPytone-2.0.2-py3-none-any.whl/pytone/kintone.py | kintone.py |
from ..kaku.machine import (
IO,
Conn,
FeatureIdxStepX,
Idx,
LearningMachine,
State,
update_io,
)
from ..tansaku.assessors import XPopulationAssessor
from ..tansaku.core import Individual
from ..tansaku.modifiers import SlopeModifier
from ..tansaku.populators import BinaryPopulator, GaussianPopulator, PopulationLimiter
from ..tansaku.selectors import BestSelectorFeature
class HillClimbStepX(FeatureIdxStepX):
def __init__(
self,
learner: LearningMachine,
k: int,
std: float = 0.1,
lr: float = 1e-2,
momentum: float = 0.5,
maximize: bool = False,
):
"""use a hill climbing algorithm to update the input values
Args:
learner (LearningMachine):
"""
super().__init__()
self.learner = learner
self.limiter = PopulationLimiter()
self.populator = GaussianPopulator(k, std=std)
self.modifier = SlopeModifier(momentum, lr, maximize=maximize)
self.assessor = XPopulationAssessor(self.learner, ["x"], "loss", "mean", k)
def step_x(self, conn: Conn, state: State, feature_idx: Idx = None) -> Conn:
individual = Individual(x=conn.step_x.x[0])
population = self.limiter(
individual,
self.populator(individual),
feature_idx.tolist() if feature_idx is not None else None,
)
population = self.assessor(population, conn.step_x.t)
selected = self.modifier(individual, population)
# conn.step_x.x_(IO(selected['x'], detach=True))
update_io(IO(selected["x"], detach=True), conn.step_x.x)
conn.tie_inp()
return conn
class HillClimbBinaryStepX(FeatureIdxStepX):
def __init__(self, learner: LearningMachine, k: int = 8, keep_p: float = 0.9):
"""use a hill climbing algorithm to update the input values
Args:
learner (LearningMachine):
"""
super().__init__()
self.learner = learner
self.populator = BinaryPopulator(k, keep_p)
self.selector = BestSelectorFeature() # to_sample=False)
self.limiter = PopulationLimiter()
# self.modifier = tansaku.BinaryGaussianModifier(k_best, "x")
self.assessor = XPopulationAssessor(self.learner, ["x"], "loss", "mean", k)
@property
def update_populator(self, k: int, keep_p: float):
self.populator = BinaryPopulator(k, keep_p)
def step_x(self, conn: Conn, state: State, feature_idx: Idx = None) -> Conn:
individual = Individual(x=conn.step_x.x[0])
# pass conn.limit into the populator
# perhaps call this a kind of modifier?
population = self.limiter(
individual,
self.populator(individual),
feature_idx.tolist() if feature_idx is not None else None,
)
population = self.assessor(population, conn.step_x.t)
selected = self.selector(population)
# selected = self.modifier(individual, population)
# conn.step_x.x_(IO(selected['x'], detach=True))
update_io(IO(selected["x"], detach=True), conn.step_x.x)
conn.tie_inp()
return conn | zenkai | /kikai/hill.py | hill.py |
import typing
from abc import ABC, abstractmethod
import numpy as np
import scipy.linalg
# 3rd party
import torch
import torch.nn as nn
from ..kaku.machine import (
IO,
AssessmentDict,
Conn,
LearningMachine,
State,
StepTheta,
StepX,
ThLoss,
update_io,
)
# local
from ..utils import to_np, to_th_as
# TODO: Move to itadaki
class LeastSquaresSolver(ABC):
@abstractmethod
def solve(self, a: torch.Tensor, b: torch.Tensor):
pass
class LeastSquaresStandardSolver(LeastSquaresSolver):
"""Solve least squares"""
def __init__(self, bias: bool = False):
"""initializer
Args:
bias (bool, optional): Whether there is a bias. Defaults to False.
"""
if bias:
self._prepare = self._prepare_with_bias
else:
self._prepare = self._prepare_without_bias
def _prepare_without_bias(
self, a: np.ndarray, b: np.ndarray
) -> typing.Tuple[np.ndarray, np.ndarray]:
return a, b
def _prepare_with_bias(
self, a: np.ndarray, b: np.ndarray
) -> typing.Tuple[np.ndarray, np.ndarray]:
m, _ = np.shape(a)
a = np.hstack([a, np.ones((m, 1))])
return a, b
def solve(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""Solve least squares between a and b
Args:
a (torch.Tensor): input
b (torch.Tensor): target
Returns:
torch.Tensor: the least squares solution
"""
new_, _, _, _ = scipy.linalg.lstsq(*self._prepare(to_np(a), to_np(b)))
return to_th_as(new_, a).T
class LeastSquaresRidgeSolver(LeastSquaresSolver):
"""Solve least squares using ridge regression"""
def __init__(self, lam: float = 1e-1, bias: bool = False):
self._bias = bias
self._lambda = lam
if self._bias:
self._prepare = self._prepare_with_bias
else:
self._prepare = self._prepare_without_bias
def _prepare_without_bias(self, a: np.ndarray, b: np.ndarray):
_, n = np.shape(a)
lower_half = np.zeros((n, n))
np.fill_diagonal(lower_half, np.sqrt(self._lambda))
return (np.vstack((a, lower_half)), np.vstack([b, np.zeros((n, b.shape[1]))]))
def _prepare_with_bias(self, a: np.ndarray, b: np.ndarray):
m, n = np.shape(a)
# print('Preparing w/')
upper_half = np.hstack([a, np.ones((m, 1))])
lower = np.zeros((n, n))
np.fill_diagonal(lower, np.sqrt(self._lambda))
lower_half = np.hstack([lower, np.zeros((n, 1))])
return (
np.vstack((upper_half, lower_half)),
np.vstack([b, np.zeros((n, b.shape[1]))]),
)
def solve(self, a: torch.Tensor, b: torch.Tensor):
"""Solve least squares between a and b
Args:
a (torch.Tensor): input
b (torch.Tensor): target
Returns:
torch.Tensor: the least squares solution
"""
A, B = self._prepare(to_np(a), to_np(b))
new_, _, _, _ = scipy.linalg.lstsq(A.T @ A, A.T @ B)
# print(new_.shape)
return to_th_as(new_, a).T
class LeastSquaresStepTheta(StepTheta):
def __init__(
self,
linear: nn.Linear,
solver: LeastSquaresSolver = LeastSquaresStandardSolver,
optimize_dw: bool = False,
lr: typing.Optional[float] = None,
):
"""initializer
Args:
linear (nn.Linear): _description_
solver (LeastSquaresSolver, optional): _description_. Defaults to LeastSquaresStandardSolver.
optimize_dw (bool, optional): _description_. Defaults to False.
lr (typing.Optional[float], optional): _description_. Defaults to None.
is_fresh (bool, optional): _description_. Defaults to True.
"""
self.linear = linear
self.solver = solver
self._optimize = self._optimize_dw if optimize_dw else self._optimize_w
self._lr = lr
def split(self, p: torch.Tensor) -> typing.Tuple[torch.Tensor, torch.Tensor]:
if self.linear.bias is None:
return p, None
return p[:, :-1], p[:, -1]
def _optimize_dw(self, x: torch.Tensor, t: torch.Tensor):
t_delta = t - self.linear.forward(x)
dweight, dbias = self.split(self.solver.solve(x, t_delta))
self.linear.weight.data = self.linear.weight.data + dweight
if dbias is not None:
self.linear.bias.data = self.linear.bias.data + dbias
def _optimize_w(self, x: torch.Tensor, t: torch.Tensor):
weight, bias = self.split(self.solver.solve(x, t))
self.linear.weight.data = weight
if bias is not None:
self.linear.bias.data = bias
def step(self, conn: Conn, state: State, from_: IO = None) -> Conn:
self._optimize(conn.step.x[0], conn.step.t[0])
return conn.connect_in(from_)
class LeastSquaresStepX(StepX):
def __init__(
self,
linear: nn.Linear,
solver: LeastSquaresSolver = LeastSquaresStandardSolver,
optimize_dx: bool = False,
lr: typing.Optional[float] = None,
):
"""initializer
Args:
linear (nn.Linear): _description_
solver (LeastSquaresSolver, optional): _description_. Defaults to LeastSquaresStandardSolver.
optimize_dx (bool, optional): _description_. Defaults to False.
lr (typing.Optional[float], optional): _description_. Defaults to None.
is_fresh (bool, optional): _description_. Defaults to True.
"""
self.linear = linear
self.solver = solver
self._optimize = self._optimize_dx if optimize_dx else self._optimize_x
self._lr = lr
def _optimize_dx(self, x: torch.Tensor, t: torch.Tensor):
y = self.linear(x)
if self.linear.bias is not None:
t = t - self.linear.bias[None]
y = y - self.linear.bias[None]
t_delta = t - y
dx = self.solver.solve(self.linear.weight, t_delta.T)
return x + dx
def _optimize_x(self, x: torch.Tensor, t: torch.Tensor):
if self.linear.bias is not None:
t = t - self.linear.bias[None]
return self.solver.solve(self.linear.weight, t.T)
def step_x(self, conn: Conn, state: State) -> Conn:
"""Update x
Args:
conn (Conn): The connection to update with
state (State): The current learning state
Returns:
Conn: The connection
"""
x = self._optimize(conn.step_x.x[0], conn.step_x.t[0])
update_io(IO(x), conn.step_x.x)
conn.tie_inp()
return conn
class LeastSquaresLearner(LearningMachine):
"""Learner that uses least squares"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
optimize_dx: bool = True,
x_lr: float = 1e-2,
x_reduction: str = "mean",
):
super().__init__()
self._linear = nn.Linear(in_features, out_features, bias)
self._loss = ThLoss("mse", "mean")
self._step_x = LeastSquaresStepX(
self._linear, LeastSquaresRidgeSolver(1e-4, False), optimize_dx
)
self._step_theta = LeastSquaresStepTheta(
self._linear, LeastSquaresRidgeSolver(1e-3, bias=bias), optimize_dx
)
def assess_y(self, y: IO, t: IO, reduction_override: str = None) -> AssessmentDict:
return self._loss.assess_dict(y[0], t[0], reduction_override=reduction_override)
def step(self, conn: Conn, state: State, from_: IO = None) -> Conn:
conn = self._step_theta.step(conn, state, from_)
return conn
def step_x(self, conn: Conn, state: State) -> Conn:
conn = self._step_x.step_x(conn, state)
return conn
def forward(self, x: IO, state: State, detach: bool = True) -> IO:
x.freshen(False)
return IO(self._linear(x[0]), detach=detach) | zenkai | /kikai/least_squares.py | least_squares.py |
import typing
from ..kaku import IO, AssessmentDict, Conn, LearningMachine, State, ThLoss
# local
from ..utils import Reversible, SequenceReversible
class ReversibleMachine(LearningMachine):
def __init__(
self,
reversible: typing.Union[Reversible, typing.List[Reversible]],
loss: ThLoss,
maximize: bool = False,
):
"""initializer
Args:
reversible (typing.Union[Reversible, typing.List[Reversible]]): Reversible module to adapt
loss (ThLoss): The loss
maximize (bool, optional): _description_. Defaults to False.
"""
super().__init__(maximize)
if isinstance(reversible, typing.List):
reversible = SequenceReversible(*reversible)
self.reversible = reversible
self.loss = loss
def assess_y(self, y: IO, t: IO, reduction_override: str = None) -> AssessmentDict:
return self.loss.assess_dict(y, t, reduction_override).transfer(
"loss", self.validation_name
)
def step_x(self, conn: Conn, state: State) -> Conn:
"""Update x
Args:
conn (Conn): The connection to update based on
state (State): The learning state
Returns:
Conn: The connection with an updated target for step
"""
conn.step_x.x_(self.reversible.reverse(conn.step_x.t[0]))
conn.tie_inp(True)
return conn
def step(self, conn: Conn, state: State, from_: IO = None) -> Conn:
"""These layers do not have parameters so the internal mechanics are not updated
Args:
conn (Conn): The connection for the layer
state (State): The learning state
from_ (IO, optional): The input to the previous layer. Defaults to None.
Returns:
Conn: the connection for the preceding layer
"""
return conn.connect_in(from_)
def forward(self, x: IO, state: State, detach: bool = True) -> IO:
return IO(self.reversible(x[0]), detach=detach) | zenkai | /kikai/reversible.py | reversible.py |
import typing
import torch
# 3rd Party
import torch.nn as nn
from ..kaku import AssessmentDict, OptimFactory, ThLoss
# Local
from ..kaku import (
IO,
BatchIdxStepTheta,
BatchIdxStepX,
Conn,
Idx,
LearningMachine,
State,
StepTheta,
StepX,
idx_io,
)
class GradStepTheta(StepTheta):
"""Update theta with the loss between y and t on the forward pass"""
Y_NAME = "y"
def __init__(
self,
learner: LearningMachine,
optim_factory: OptimFactory,
reduction: str = "mean",
):
super().__init__()
self.learner = learner
self.optim = optim_factory(learner.parameters())
self.reduction = reduction
def step(self, conn: Conn, state: State, from_: IO = None) -> Conn:
x, t, y = conn.step
y = state.get(self, self.Y_NAME)
stepped = state.get(self, "stepped", False)
if stepped or y is None:
y = self.learner(x, state, detach=False)
self.optim.zero_grad()
assessment = self.learner.assess_y(y, t)
assessment.backward("loss")
state.store(self, "stepped", True)
self.optim.step()
return conn.connect_in(from_)
class NullStepTheta(StepTheta):
"""Do not update theta"""
def step(self, conn: Conn, state: State, from_: IO = None) -> Conn:
return conn.connect_in(from_in_x=from_)
class GradLoopStepTheta(BatchIdxStepTheta):
"""Update theta with the loss between y and t after passing forward again"""
def __init__(
self,
learner: LearningMachine,
optim_factory: OptimFactory,
reduction: str = "mean",
loss_name: str = "loss",
):
super().__init__()
self.learner = learner
self.optim = optim_factory(learner.parameters())
self.reduction = reduction
self.loss_name = loss_name
def step(
self, conn: Conn, state: State, from_: IO = None, batch_idx: Idx = None
) -> Conn:
x = idx_io(conn.step.x, batch_idx, False)
t = idx_io(conn.step.t, batch_idx, False)
y = self.learner(x, state, False)
self.optim.zero_grad()
assessment = self.learner.assess_y(y, t, self.reduction)
assessment[self.loss_name].backward()
self.optim.step()
return conn.connect_in(from_)
class GradStepX(StepX):
"""Update x with the loss between y and t based on the grad value of step_x.x"""
Y_NAME = "y"
def step_x(self, conn: Conn, state: State) -> Conn:
x = conn.step_x.x[0]
x = x - x.grad
x.grad = None
# TODO: Debug. This is causing problems in backpropagation
# due to the inplace operation
# update_io(IO(x), conn.step_x.x)
conn.step_x.x = IO(x, detach=True)
conn = conn.tie_step(True)
return conn
class GradLoopStepX(BatchIdxStepX):
"""Update x with the loss between y and t after passing x forward again and getting the grad of x"""
def __init__(
self,
learner: LearningMachine,
optim_factory: OptimFactory,
reduction: str = "mean",
loss_name: str = "loss",
):
"""initializer
Args:
learner (LearningMachine):
optim_factory (OptimFactory): OptimFactory for "optimizing" x
reduction (str, optional): The loss reduction to use. Defaults to 'mean'.
loss_name (str, optional): Name of the loss. Defaults to 'loss'.
"""
super().__init__()
self.learner = learner
self.optim_factory = optim_factory
self.reduction = reduction
self.loss_name = loss_name
def step_x(self, conn: Conn, state: State, batch_idx: Idx = None) -> Conn:
my_state = conn.state.mine(self)
if "optim" not in my_state:
my_state.optim = self.optim_factory([*conn.step_x.x])
x = idx_io(conn.step_x.x, batch_idx)
t = idx_io(conn.step_x.t, batch_idx)
my_state.optim.zero_grad()
y = self.learner(x, detach=False)
assessment = self.learner.assess_y(y, t, self.reduction)
assessment.backward(self.loss_name)
my_state.optim.step()
# TODO: Detach
return conn.tie_inp()
class GradLearner(LearningMachine):
"""Standard gradient learner"""
VALIDATION_NAME = "validation"
LOSS_NAME = "loss"
Y_NAME = "y"
def __init__(
self,
sequence: typing.List[nn.Module],
loss: ThLoss,
optim_factory: OptimFactory,
theta_reduction: str = "mean",
):
super().__init__()
self._sequence = nn.Sequential(*sequence)
self._loss = loss
self._theta_step = GradStepTheta(self, optim_factory, theta_reduction)
self._x_step = GradStepX()
def assess_y(self, y: IO, t: IO, reduction_override: str = None) -> AssessmentDict:
assessment = self._loss.assess_dict(y[0], t[0], reduction_override)
assessment[self.VALIDATION_NAME] = assessment[self.LOSS_NAME]
return assessment
def step(self, conn: Conn, state: State, from_: IO = None) -> Conn:
return self._theta_step.step(conn, state, from_)
def step_x(self, conn: Conn, state: State) -> Conn:
return self._x_step.step_x(conn, state)
def forward(self, x: IO, state: State, detach: bool = True) -> IO:
x.freshen(False)
y = state[self, self.Y_NAME] = IO(self._sequence(*x.vals), detach=False)
return y.out(detach)
class GradLoopLearner(LearningMachine, BatchIdxStepX, BatchIdxStepTheta):
"""Gradient learner designed for multiple loops"""
LOSS_NAME = "loss"
VALIDATION_NAME = "validation"
Y_NAME = "y"
def __init__(
self,
sequence: typing.List[nn.Module],
loss: ThLoss,
theta_optim_factory: OptimFactory,
x_optim_factory: OptimFactory,
theta_reduction: str = "mean",
x_reduction: str = "mean",
):
super().__init__()
self._sequence = nn.Sequential(*sequence)
self._loss = loss
self._theta_step = GradLoopStepTheta(self, theta_optim_factory, theta_reduction)
self._x_step = GradLoopStepX(self, x_optim_factory, x_reduction)
def assess_y(self, y: IO, t: IO, reduction_override: str = None) -> AssessmentDict:
assessment = self._loss.assess_dict(y[0], t[0], reduction_override)
assessment[self.VALIDATION_NAME] = assessment[self.LOSS_NAME]
return assessment
def step(
self, conn: Conn, state: State, from_: IO = None, batch_idx: Idx = None
) -> Conn:
return self._theta_step.step(conn, state, from_, batch_idx)
def step_x(self, conn: Conn, state: State, batch_idx: Idx = None) -> Conn:
return self._x_step.step_x(conn, state, batch_idx)
def forward(self, x: IO, state: State, detach: bool = True) -> IO:
x.freshen(False)
y = state[self, self.Y_NAME] = IO(self._sequence(*x.vals), detach=False)
return y.out(detach)
def update_x(
x: IO, lr: float = 1.0, detach: bool = False, zero_grad: bool = False
) -> IO:
"""Updates x by subtracting the gradient from x times the learning rate
Args:
x (IO): the IO to update. Grad must not be 0
lr (float, optional): multipler to multiple the gradient by. Defaults to 1.0.
detach (bool, optional): whether to detach the output. Defaults to False.
zero_grad (bool, optional): whether the gradient should be set to none. Defaults to True.
Returns:
IO: updated x
"""
updated = []
for x_i in x:
if isinstance(x_i, torch.Tensor):
x_i = x_i - lr * x_i.grad
if zero_grad:
x_i.grad = None
updated.append(x_i)
return IO(*updated, detach=detach) | zenkai | /kikai/grad.py | grad.py |
=============================
Zenlayer Cloud SDK for Python
=============================
Zenlayer Cloud Python SDK is the official software development kit, which allows Python developers to write software that makes use of Zenlayer Cloud services like BMC and VM.
The SDK works on Python versions:
* 2.7 and greater, including 3.x
Quick Start
-----------
First, install the library:
.. code-block:: sh
$ pip install zenlayercloud-sdk-python
or download source code from github and install:
.. code-block:: sh
$ git clone https://github.com/zenlayer/zenlayercloud-sdk-python.git
$ cd zenlayercloud-sdk-python
$ python setup.py install
| zenlayercloud-sdk-python | /zenlayercloud-sdk-python-2.0.2.tar.gz/zenlayercloud-sdk-python-2.0.2/README.rst | README.rst |
from zenlayercloud.common.abstract_model import AbstractModel
class DescribeZonesRequest(AbstractModel):
def __init__(self):
self.zoneIds = None
def _deserialize(self, params):
self.zoneIds = params.get("zoneIds")
class DescribeZonesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.zoneSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("zoneSet") is not None:
self.zoneSet = []
for item in params.get("zoneSet"):
obj = ZoneInfo(item)
self.zoneSet.append(obj)
class ZoneInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.zoneName = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.zoneName = params.get("zoneName")
class DescribeZoneInstanceConfigInfosRequest(AbstractModel):
def __init__(self):
self.instanceChargeType = None
self.zoneId = None
self.instanceType = None
def _deserialize(self, params):
self.instanceChargeType = params.get("instanceChargeType")
self.zoneId = params.get("zoneId")
self.instanceType = params.get("instanceType")
class DescribeZoneInstanceConfigInfosResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instanceTypeQuotaSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("instanceTypeQuotaSet") is not None:
self.instanceTypeQuotaSet = []
for item in params.get("instanceTypeQuotaSet"):
obj = InstanceTypeQuotaItem(item)
self.instanceTypeQuotaSet.append(obj)
class InstanceTypeQuotaItem(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.instanceType = None
self.cpuCount = None
self.memory = None
self.frequency = None
self.internetMaxBandwidthOutLimit = None
self.instanceTypeName = None
self.internetChargeTypes = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.instanceType = params.get("instanceType")
self.cpuCount = params.get("cpuCount")
self.memory = params.get("memory")
self.frequency = params.get("frequency")
self.internetMaxBandwidthOutLimit = params.get("internetMaxBandwidthOutLimit")
self.instanceTypeName = params.get("instanceTypeName")
self.internetChargeTypes = params.get("internetChargeTypes")
class InquiryPriceCreateInstanceRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.instanceType = None
self.instanceChargeType = None
self.internetChargeType = None
self.instanceChargePrepaid = None
self.trafficPackageSize = None
self.internetMaxBandwidthOut = None
self.systemDisk = None
self.dataDisks = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.instanceType = params.get("instanceType")
self.instanceChargeType = params.get("instanceChargeType")
self.internetChargeType = params.get("internetChargeType")
if params.get("instanceChargePrepaid") is not None:
self.instanceChargePrepaid = ChargePrepaid(params.get("instanceChargePrepaid"))
self.trafficPackageSize = params.get("trafficPackageSize")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
if params.get("systemDisk") is not None:
self.systemDisk = SystemDisk(params.get("systemDisk"))
if params.get("dataDisks") is not None:
self.dataDisks = []
for item in params.get("dataDisks"):
obj = DataDisk(item)
self.dataDisks.append(obj)
class InquiryPriceCreateInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instancePrice = None
self.bandwidthPrice = None
self.systemDiskPrice = None
self.dataDiskPrice = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.instancePrice = Price(params.get("instancePrice"))
if params.get("bandwidthPrice") is not None:
self.bandwidthPrice = []
for item in params.get("bandwidthPrice"):
obj = Price(item)
self.bandwidthPrice.append(obj)
self.systemDiskPrice = Price(params.get("systemDiskPrice"))
if params.get("dataDiskPrice") is not None:
self.dataDiskPrice = Price(params.get("dataDiskPrice"))
class ChargePrepaid(AbstractModel):
"""描述了实例的计费模式
"""
def __init__(self, params=None):
"""
:param period: 购买实例的时长,单位:月。
:type period: int
"""
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.period = None
def _deserialize(self, params):
self.period = params.get("period")
class SystemDisk(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.diskId = None
self.diskSize = None
self.diskCategory = None
def _deserialize(self, params):
self.diskId = params.get("diskId")
self.diskSize = params.get("diskSize")
self.diskCategory = params.get("diskCategory")
class DataDisk(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.diskId = None
self.diskSize = None
self.diskName = None
self.diskCategory = None
self.portable = None
def _deserialize(self, params):
self.diskId = params.get("diskId")
self.diskSize = params.get("diskSize")
self.diskName = params.get("diskName")
self.diskCategory = params.get("diskCategory")
self.portable = params.get("portable")
class Price(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.originalPrice = None
self.discountPrice = None
self.discount = None
self.unitPrice = None
self.discountUnitPrice = None
self.chargeUnit = None
self.stepPrices = None
def _deserialize(self, params):
self.originalPrice = params.get("originalPrice")
self.discountPrice = params.get("discountPrice")
self.discount = params.get("discount")
self.unitPrice = params.get("unitPrice")
self.discountUnitPrice = params.get("discountUnitPrice")
self.chargeUnit = params.get("chargeUnit")
if params.get("stepPrices") is not None:
self.stepPrices = []
for item in params.get("stepPrices"):
obj = StepPrice(item)
self.stepPrices.append(obj)
class StepPrice(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.stepStart = None
self.stepEnd = None
self.unitPrice = None
self.discountUnitPrice = None
def _deserialize(self, params):
self.stepStart = params.get("stepStart")
self.stepEnd = params.get("stepEnd")
self.unitPrice = params.get("unitPrice")
self.discountUnitPrice = params.get("discountUnitPrice")
class CreateInstancesRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.instanceChargeType = None
self.instanceChargePrepaid = None
self.instanceType = None
self.imageId = None
self.resourceGroupId = None
self.instanceName = None
self.instanceCount = None
self.password = None
self.internetChargeType = None
self.internetMaxBandwidthOut = None
self.trafficPackageSize = None
self.subnetId = None
self.systemDisk = None
self.dataDisks = None
self.securityGroupId = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.instanceChargeType = params.get("instanceChargeType")
if params.get("instanceChargePrepaid") is not None:
self.instanceChargePrepaid = ChargePrepaid(params.get("instanceChargePrepaid"))
self.instanceType = params.get("instanceType")
self.imageId = params.get("imageId")
self.resourceGroupId = params.get("resourceGroupId")
self.instanceName = params.get("instanceName")
self.instanceCount = params.get("instanceCount")
self.password = params.get("password")
self.internetChargeType = params.get("internetChargeType")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
self.trafficPackageSize = params.get("trafficPackageSize")
self.subnetId = params.get("subnetId")
if params.get("systemDisk") is not None:
self.systemDisk = SystemDisk(params.get("systemDisk"))
if params.get("dataDisks") is not None:
self.dataDisks = []
for item in params.get("dataDisks"):
obj = DataDisk(item)
self.dataDisks.append(obj)
self.securityGroupId = params.get("securityGroupId")
class CreateInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
self.instanceIdSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
self.instanceIdSet = params.get("instanceIdSet")
class DescribeInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
self.zoneId = None
self.resourceGroupId = None
self.instanceType = None
self.internetChargeType = None
self.imageId = None
self.subnetId = None
self.instanceStatus = None
self.instanceName = None
self.securityGroupId = None
self.publicIpAddresses = None
self.privateIpAddresses = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
self.zoneId = params.get("zoneId")
self.resourceGroupId = params.get("resourceGroupId")
self.instanceType = params.get("instanceType")
self.internetChargeType = params.get("internetChargeType")
self.imageId = params.get("imageId")
self.subnetId = params.get("subnetId")
self.instanceStatus = params.get("instanceStatus")
self.instanceName = params.get("instanceName")
self.securityGroupId = params.get("securityGroupId")
self.publicIpAddresses = params.get("publicIpAddresses")
self.privateIpAddresses = params.get("privateIpAddresses")
self.pageSize = params.get("pageSize")
self.pageNum = params.get("pageNum")
class DescribeInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = InstanceInfo(item)
self.dataSet.append(obj)
class InstanceInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.instanceId = None
self.zoneId = None
self.instanceName = None
self.instanceType = None
self.cpuCount = None
self.memory = None
self.imageId = None
self.imageName = None
self.instanceChargeType = None
self.internetMaxBandwidthOut = None
self.internetChargeType = None
self.period = None
self.publicIpAddresses = None
self.privateIpAddresses = None
self.subnetId = None
self.createTime = None
self.expiredTime = None
self.resourceGroupId = None
self.resourceGroupName = None
self.instanceStatus = None
self.trafficPackageSize = None
self.securityGroupIds = None
self.systemDisk = None
self.dataDisks = None
self.autoRenew = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.zoneId = params.get("zoneId")
self.instanceName = params.get("instanceName")
self.instanceType = params.get("instanceType")
self.cpuCount = params.get("cpuCount")
self.memory = params.get("memory")
self.imageId = params.get("imageId")
self.imageName = params.get("imageName")
self.instanceChargeType = params.get("instanceChargeType")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
self.internetChargeType = params.get("internetChargeType")
self.period = params.get("period")
self.publicIpAddresses = params.get("publicIpAddresses")
self.privateIpAddresses = params.get("privateIpAddresses")
self.subnetId = params.get("subnetId")
self.createTime = params.get("createTime")
self.expiredTime = params.get("expiredTime")
self.resourceGroupId = params.get("resourceGroupId")
self.resourceGroupName = params.get("resourceGroupName")
self.instanceStatus = params.get("instanceStatus")
self.trafficPackageSize = params.get("trafficPackageSize")
self.securityGroupIds = params.get("securityGroupIds")
if params.get("systemDisk") is not None:
self.systemDisk = SystemDisk(params.get("systemDisk"))
if params.get("dataDisks") is not None:
self.dataDisks = []
for item in params.get("dataDisks"):
obj = DataDisk(item)
self.dataDisks.append(obj)
self.autoRenew = params.get("autoRenew")
class DescribeInstancesStatusRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
self.pageSize = params.get("pageSize")
self.pageNum = params.get("pageNum")
class DescribeInstancesStatusResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = InstanceStatus(item)
self.dataSet.append(obj)
class InstanceStatus(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.instanceId = None
self.instanceStatus = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.instanceStatus = params.get("instanceStatus")
class StartInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class StartInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class StopInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class StopInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class RebootInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class RebootInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ResetInstancesPasswordRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
self.password = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
self.password = params.get("password")
class ResetInstancesPasswordResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ResetInstanceRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.imageId = None
self.password = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.imageId = params.get("imageId")
self.password = params.get("password")
class ResetInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class TerminateInstanceRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class TerminateInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ReleaseInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class ReleaseInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ModifyInstancesAttributeRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
self.instanceName = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
self.instanceName = params.get("instanceName")
class ModifyInstancesAttributeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class InquiryPriceInstanceBandwidthRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.internetMaxBandwidthOut = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
class InquiryPriceInstanceBandwidthResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.bandwidthPrice = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("bandwidthPrice") is not None:
self.bandwidthPrice = []
for item in params.get("bandwidthPrice"):
obj = Price(item)
self.bandwidthPrice.append(obj)
class ModifyInstanceBandwidthRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.internetMaxBandwidthOut = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
class ModifyInstanceBandwidthResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
class CancelInstanceBandwidthDowngradeRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class CancelInstanceBandwidthDowngradeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class InquiryPriceInstanceTrafficPackageRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.trafficPackageSize = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.trafficPackageSize = params.get("trafficPackageSize")
class InquiryPriceInstanceTrafficPackageResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.trafficPackagePrice = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("trafficPackagePrice") is not None:
self.trafficPackagePrice = []
for item in params.get("trafficPackagePrice"):
obj = Price(item)
self.trafficPackagePrice.append(obj)
class ModifyInstanceTrafficPackageRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.trafficPackageSize = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.trafficPackageSize = params.get("trafficPackageSize")
class ModifyInstanceTrafficPackageResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
class CancelInstanceTrafficPackageDowngradeRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class CancelInstanceTrafficPackageDowngradeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeInstanceInternetStatusRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class DescribeInstanceInternetStatusResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instanceId = None
self.instanceName = None
self.internetMaxBandwidthOut = None
self.modifiedInternetMaxBandwidthOut = None
self.modifiedBandwidthStatus = None
self.trafficPackageSize = None
self.modifiedTrafficPackageSize = None
self.modifiedTrafficPackageStatus = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.instanceId = params.get("instanceId")
self.instanceName = params.get("instanceName")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
self.modifiedInternetMaxBandwidthOut = params.get("modifiedInternetMaxBandwidthOut")
self.modifiedBandwidthStatus = params.get("modifiedBandwidthStatus")
self.trafficPackageSize = params.get("trafficPackageSize")
self.modifiedTrafficPackageSize = params.get("modifiedTrafficPackageSize")
self.modifiedTrafficPackageStatus = params.get("modifiedTrafficPackageStatus")
class ModifyInstancesResourceGroupRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
self.resourceGroupId = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
self.resourceGroupId = params.get("resourceGroupId")
class ModifyInstancesResourceGroupResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeInstanceTrafficRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.startTime = None
self.endTime = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.startTime = params.get("startTime")
self.endTime = params.get("endTime")
class DescribeInstanceTrafficResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.dataList = None
self.in95 = None
self.in95Time = None
self.inAvg = None
self.inMax = None
self.inMin = None
self.inTotal = None
self.maxBandwidth95ValueMbps = None
self.out95 = None
self.out95Time = None
self.outAvg = None
self.outMax = None
self.outMin = None
self.outTotal = None
self.totalUnit = None
self.unit = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("dataList") is not None:
self.dataList = []
for item in params.get("dataList"):
obj = InstanceTrafficData(item)
self.dataList.append(obj)
self.in95 = params.get("in95")
self.in95Time = params.get("in95Time")
self.inAvg = params.get("inAvg")
self.inMax = params.get("inMax")
self.inMin = params.get("inMin")
self.inTotal = params.get("inTotal")
self.maxBandwidth95ValueMbps = params.get("maxBandwidth95ValueMbps")
self.out95 = params.get("out95")
self.out95Time = params.get("out95Time")
self.outAvg = params.get("outAvg")
self.outMax = params.get("outMax")
self.outMin = params.get("outMin")
self.outTotal = params.get("outTotal")
self.totalUnit = params.get("totalUnit")
self.unit = params.get("unit")
class InstanceTrafficData(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.internetRX = None
self.internetTX = None
self.time = None
def _deserialize(self, params):
self.internetRX = params.get("internetRX")
self.internetTX = params.get("internetTX")
self.time = params.get("time")
class DescribeInstanceCpuMonitorRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.startTime = None
self.endTime = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.startTime = params.get("startTime")
self.endTime = params.get("endTime")
class DescribeInstanceCpuMonitorResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.dataList = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("dataList") is not None:
self.dataList = []
for item in params.get("dataList"):
obj = InstanceCpuMonitorData(item)
self.dataList.append(obj)
class InstanceCpuMonitorData(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.cpu = None
self.time = None
def _deserialize(self, params):
self.cpu = params.get("cpu")
self.time = params.get("time")
class CreateDisksRequest(AbstractModel):
def __init__(self):
self.chargeType = None
self.chargePrepaid = None
self.diskName = None
self.diskSize = None
self.diskCategory = None
self.instanceId = None
self.zoneId = None
self.diskAmount = None
self.resourceGroupId = None
def _deserialize(self, params):
self.chargeType = params.get("chargeType")
if params.get("chargePrepaid") is not None:
self.chargePrepaid = ChargePrepaid(params.get("chargePrepaid"))
self.diskName = params.get("diskName")
self.diskSize = params.get("diskSize")
self.diskCategory = params.get("diskCategory")
self.instanceId = params.get("instanceId")
self.zoneId = params.get("zoneId")
self.diskAmount = params.get("diskAmount")
self.resourceGroupId = params.get("resourceGroupId")
class CreateDisksResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
self.diskIds = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
self.diskIds = params.get("diskIds")
class DescribeDisksRequest(AbstractModel):
def __init__(self):
self.diskIds = None
self.diskName = None
self.diskStatus = None
self.diskType = None
self.diskSize = None
self.diskCategory = None
self.portable = None
self.instanceId = None
self.zoneId = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class DescribeDisksResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = DiskInfo(item)
self.dataSet.append(obj)
class DiskInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.diskId = None
self.diskName = None
self.zoneId = None
self.diskType = None
self.portable = None
self.diskCategory = None
self.diskSize = None
self.diskStatus = None
self.instanceId = None
self.instanceName = None
self.chargeType = None
self.createTime = None
self.expiredTime = None
self.period = None
self.autoRenew = None
def _deserialize(self, params):
self.diskId = params.get("diskId")
self.diskName = params.get("diskName")
self.zoneId = params.get("zoneId")
self.diskType = params.get("diskType")
self.portable = params.get("portable")
self.diskCategory = params.get("diskCategory")
self.diskSize = params.get("diskSize")
self.diskStatus = params.get("diskStatus")
self.instanceId = params.get("instanceId")
self.instanceName = params.get("instanceName")
self.chargeType = params.get("chargeType")
self.createTime = params.get("createTime")
self.expiredTime = params.get("expiredTime")
self.period = params.get("period")
self.autoRenew = params.get("autoRenew")
class AttachDisksRequest(AbstractModel):
def __init__(self):
self.diskIds = None
self.instanceId = None
def _deserialize(self, params):
self.diskIds = params.get("diskIds")
self.instanceId = params.get("instanceId")
class AttachDisksResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ChangeDisksAttachRequest(AbstractModel):
def __init__(self):
self.diskIds = None
self.instanceId = None
def _deserialize(self, params):
self.diskIds = params.get("diskIds")
self.instanceId = params.get("instanceId")
class ChangeDisksAttachResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DetachDisksRequest(AbstractModel):
def __init__(self):
self.diskIds = None
def _deserialize(self, params):
self.diskIds = params.get("diskIds")
class DetachDisksResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ModifyDisksAttributesRequest(AbstractModel):
def __init__(self):
self.diskIds = None
self.diskName = None
def _deserialize(self, params):
self.diskIds = params.get("diskIds")
self.diskName = params.get("diskName")
class ModifyDisksAttributesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class InquiryPriceCreateDisksRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.diskSize = None
self.diskAmount = None
self.chargeType = None
self.chargePrepaid = None
self.diskCategory = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.diskSize = params.get("diskSize")
self.diskAmount = params.get("diskAmount")
self.chargeType = params.get("chargeType")
if params.get("chargePrepaid") is not None:
self.chargePrepaid = ChargePrepaid(params.get("chargePrepaid"))
self.diskCategory = params.get("diskCategory")
class InquiryPriceCreateDisksResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.dataDiskPrice = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("dataDiskPrice") is not None:
self.dataDiskPrice = Price(params.get("dataDiskPrice"))
class TerminateDiskRequest(AbstractModel):
def __init__(self):
self.diskId = None
def _deserialize(self, params):
self.diskId = params.get("diskId")
class TerminateDiskResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ReleaseDiskRequest(AbstractModel):
def __init__(self):
self.diskId = None
def _deserialize(self, params):
self.diskId = params.get("diskId")
class ReleaseDiskResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class RenewDiskRequest(AbstractModel):
def __init__(self):
self.diskId = None
def _deserialize(self, params):
self.diskId = params.get("diskId")
class RenewDiskResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
class DescribeDiskCategoryRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.instanceChargeType = None
self.diskCategory = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.instanceChargeType = params.get("instanceChargeType")
self.diskCategory = params.get("diskCategory")
class DescribeDiskCategoryResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.categoryZoneSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("categoryZoneSet") is not None:
self.categoryZoneSet = []
for item in params.get("categoryZoneSet"):
obj = DiskCategory(item)
self.categoryZoneSet.append(obj)
class DiskCategory(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.categorySet = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.categorySet = params.get("categorySet")
class ModifyDisksResourceGroupRequest(AbstractModel):
def __init__(self):
self.diskIds = None
self.resourceGroupId = None
def _deserialize(self, params):
self.diskIds = params.get("diskIds")
self.resourceGroupId = params.get("resourceGroupId")
class ModifyDisksResourceGroupResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeImagesRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.imageIds = None
self.imageName = None
self.category = None
self.imageType = None
self.osType = None
self.imageStatus = None
self.pageNum = None
self.pageSize = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.imageIds = params.get("imageIds")
self.imageName = params.get("imageName")
self.category = params.get("category")
self.imageType = params.get("imageType")
self.osType = params.get("osType")
self.imageStatus = params.get("imageStatus")
self.pageNum = params.get("pageNum")
self.pageSize = params.get("pageSize")
class DescribeImagesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = ImageInfo(item)
self.dataSet.append(obj)
class ImageInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.imageId = None
self.imageName = None
self.imageType = None
self.imageSize = None
self.imageDescription = None
self.imageVersion = None
self.imageStatus = None
self.category = None
self.osType = None
def _deserialize(self, params):
self.imageId = params.get("imageId")
self.imageName = params.get("imageName")
self.imageType = params.get("imageType")
self.imageSize = params.get("imageSize")
self.imageDescription = params.get("imageDescription")
self.imageVersion = params.get("imageVersion")
self.imageStatus = params.get("imageStatus")
self.category = params.get("category")
self.osType = params.get("osType")
class ModifyImagesAttributesRequest(AbstractModel):
def __init__(self):
self.imageIds = None
self.imageName = None
self.imageDescription = None
def _deserialize(self, params):
self.imageIds = params.get("imageIds")
self.imageName = params.get("imageName")
self.imageDescription = params.get("imageDescription")
class ModifyImagesAttributesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DeleteImagesRequest(AbstractModel):
def __init__(self):
self.imageIds = None
def _deserialize(self, params):
self.imageIds = params.get("imageIds")
class DeleteImagesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class CreateImageRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.imageName = None
self.imageDescription = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.imageName = params.get("imageName")
self.imageDescription = params.get("imageDescription")
class CreateImageResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.imageId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.imageId = params.get("imageId")
class DescribeImageQuotaRequest(AbstractModel):
def __init__(self):
self.zoneIds = None
def _deserialize(self, params):
self.zoneIds = params.get("instanceIds")
class DescribeImageQuotaResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.images = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("images") is not None:
self.images = []
for item in params.get("images"):
obj = ImageQuotaInfo(item)
self.images.append(obj)
class ImageQuotaInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.count = None
self.maxCount = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.count = params.get("count")
self.maxCount = params.get("maxCount")
class DescribeSecurityGroupsRequest(AbstractModel):
def __init__(self):
self.securityGroupIds = None
self.securityGroupName = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.securityGroupIds = params.get("securityGroupIds")
self.securityGroupName = params.get("securityGroupName")
self.pageSize = params.get("pageSize")
self.pageNum = params.get("pageNum")
class DescribeSecurityGroupsResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = SecurityGroupInfo(item)
self.dataSet.append(obj)
class SecurityGroupInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.securityGroupId = None
self.securityGroupName = None
self.securityGroupStatus = None
self.createTime = None
self.description = None
self.instanceIds = None
self.ruleInfos = None
self.isDefault = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
self.securityGroupName = params.get("securityGroupName")
self.securityGroupStatus = params.get("securityGroupStatus")
self.createTime = params.get("createTime")
self.description = params.get("description")
self.instanceIds = params.get("instanceIds")
if params.get("ruleInfos") is not None:
self.ruleInfos = []
for item in params.get("ruleInfos"):
obj = RuleInfo(item)
self.ruleInfos.append(obj)
self.isDefault = params.get("isDefault")
class RuleInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.direction = None
self.policy = None
self.priority = None
self.ipProtocol = None
self.portRange = None
self.cidrIp = None
def _deserialize(self, params):
self.direction = params.get("direction")
self.policy = params.get("policy")
self.priority = params.get("priority")
self.ipProtocol = params.get("ipProtocol")
self.portRange = params.get("portRange")
self.cidrIp = params.get("cidrIp")
class ModifySecurityGroupsAttributeRequest(AbstractModel):
def __init__(self):
self.securityGroupIds = None
self.securityGroupName = None
self.description = None
def _deserialize(self, params):
self.securityGroupIds = params.get("securityGroupIds")
self.securityGroupName = params.get("securityGroupName")
self.description = params.get("description")
class ModifySecurityGroupsAttributeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeInstanceAvailableSecurityGroupResourcesRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class DescribeInstanceAvailableSecurityGroupResourcesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instanceAvailableSecurityGroups = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("instanceAvailableSecurityGroups") is not None:
self.instanceAvailableSecurityGroups = []
for item in params.get("instanceAvailableSecurityGroups"):
obj = InstanceAvailableSecurityGroup(item)
self.instanceAvailableSecurityGroups.append(obj)
class InstanceAvailableSecurityGroup(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.securityGroupId = None
self.securityGroupName = None
self.isDefault = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
self.securityGroupName = params.get("securityGroupName")
self.isDefault = params.get("isDefault")
class CreateSecurityGroupRequest(AbstractModel):
def __init__(self):
self.securityGroupName = None
self.ruleInfos = None
self.description = None
def _deserialize(self, params):
self.securityGroupName = params.get("securityGroupName")
if params.get("ruleInfos") is not None:
self.ruleInfos = []
for item in params.get("ruleInfos"):
obj = RuleInfo(item)
self.ruleInfos.append(obj)
self.description = params.get("description")
class CreateSecurityGroupResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.securityGroupId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.securityGroupId = params.get("securityGroupId")
class DeleteSecurityGroupRequest(AbstractModel):
def __init__(self):
self.securityGroupId = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
class DeleteSecurityGroupResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class AuthorizeSecurityGroupRulesRequest(AbstractModel):
def __init__(self):
self.securityGroupId = None
self.ruleInfos = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
if params.get("ruleInfos") is not None:
self.ruleInfos = []
for item in params.get("ruleInfos"):
obj = RuleInfo(item)
self.ruleInfos.append(obj)
class AuthorizeSecurityGroupRulesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ConfigureSecurityGroupRulesRequest(AbstractModel):
def __init__(self):
self.securityGroupId = None
self.ruleInfos = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
if params.get("ruleInfos") is not None:
self.ruleInfos = []
for item in params.get("ruleInfos"):
obj = RuleInfo(item)
self.ruleInfos.append(obj)
class ConfigureSecurityGroupRulesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class AuthorizeSecurityGroupRuleRequest(AbstractModel):
def __init__(self):
self.securityGroupId = None
self.direction = None
self.policy = None
self.priority = None
self.ipProtocol = None
self.portRange = None
self.cidrIp = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
self.direction = params.get("direction")
self.policy = params.get("policy")
self.priority = params.get("priority")
self.ipProtocol = params.get("ipProtocol")
self.portRange = params.get("portRange")
self.cidrIp = params.get("cidrIp")
class AuthorizeSecurityGroupRuleResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class RevokeSecurityGroupRulesRequest(AbstractModel):
def __init__(self):
self.securityGroupId = None
self.ruleInfos = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
if params.get("ruleInfos") is not None:
self.ruleInfos = []
for item in params.get("ruleInfos"):
obj = RuleInfo(item)
self.ruleInfos.append(obj)
class RevokeSecurityGroupRulesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class AssociateSecurityGroupInstanceRequest(AbstractModel):
def __init__(self):
self.securityGroupId = None
self.instanceId = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
self.instanceId = params.get("instanceId")
class AssociateSecurityGroupInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class UnAssociateSecurityGroupInstanceRequest(AbstractModel):
def __init__(self):
self.securityGroupId = None
self.instanceId = None
def _deserialize(self, params):
self.securityGroupId = params.get("securityGroupId")
self.instanceId = params.get("instanceId")
class UnAssociateSecurityGroupInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class CreateSubnetRequest(AbstractModel):
def __init__(self):
self.cidrBlock = None
self.subnetName = None
self.zoneId = None
self.subnetDescription = None
def _deserialize(self, params):
self.cidrBlock = params.get("cidrBlock")
self.subnetName = params.get("subnetName")
self.zoneId = params.get("zoneId")
self.subnetDescription = params.get("subnetDescription")
class CreateSubnetResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.subnetId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.subnetId = params.get("subnetId")
class DeleteSubnetRequest(AbstractModel):
def __init__(self):
self.subnetId = None
def _deserialize(self, params):
self.subnetId = params.get("subnetId")
class DeleteSubnetResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ModifySubnetsAttributeRequest(AbstractModel):
def __init__(self):
self.subnetIds = None
self.subnetName = None
def _deserialize(self, params):
self.subnetIds = params.get("subnetIds")
self.subnetName = params.get("subnetName")
class ModifySubnetsAttributeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeSubnetsRequest(AbstractModel):
def __init__(self):
self.subnetIds = None
self.cidrBlock = None
self.zoneId = None
self.subnetStatus = None
self.subnetName = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.subnetIds = params.get("subnetIds")
self.cidrBlock = params.get("cidrBlock")
self.zoneId = params.get("zoneId")
self.subnetStatus = params.get("subnetStatus")
self.subnetName = params.get("subnetName")
self.pageSize = params.get("pageSize")
self.pageNum = params.get("pageNum")
class DescribeSubnetsResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = SubnetInfo(item)
self.dataSet.append(obj)
class SubnetInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.subnetId = None
self.zoneId = None
self.subnetName = None
self.subnetStatus = None
self.subnetDescription = None
self.cidrBlock = None
self.cidrBlockList = None
self.usageIpCount = None
self.totalIpCount = None
self.createTime = None
self.instanceIdList = None
def _deserialize(self, params):
self.subnetId = params.get("subnetId")
self.zoneId = params.get("zoneId")
self.subnetName = params.get("subnetName")
self.subnetStatus = params.get("subnetStatus")
self.subnetDescription = params.get("subnetDescription")
self.cidrBlock = params.get("cidrBlock")
self.cidrBlockList = params.get("cidrBlockList")
self.usageIpCount = params.get("usageIpCount")
self.totalIpCount = params.get("totalIpCount")
self.createTime = params.get("createTime")
self.instanceIdList = params.get("instanceIdList") | zenlayercloud-sdk-python | /zenlayercloud-sdk-python-2.0.2.tar.gz/zenlayercloud-sdk-python-2.0.2/zenlayercloud/vm/v20230313/models.py | models.py |
from zenlayercloud.vm.v20230313 import models
from zenlayercloud.common.abstract_client import AbstractClient
class VmClient(AbstractClient):
_api_version = "2023-03-13"
_service = "vm"
def DescribeZones(self, request):
response = self._api_call("DescribeZones", request)
model = models.DescribeZonesResponse()
model._deserialize(response)
return model
def DescribeZoneInstanceConfigInfos(self, request):
response = self._api_call("DescribeZoneInstanceConfigInfos", request)
model = models.DescribeZoneInstanceConfigInfosResponse()
model._deserialize(response)
return model
def InquiryPriceCreateInstance(self, request):
response = self._api_call("InquiryPriceCreateInstance", request)
model = models.InquiryPriceCreateInstanceResponse()
model._deserialize(response)
return model
def CreateInstances(self, request):
response = self._api_call("CreateInstances", request)
model = models.CreateInstancesResponse()
model._deserialize(response)
return model
def DescribeInstances(self, request):
response = self._api_call("DescribeInstances", request)
model = models.DescribeInstancesResponse()
model._deserialize(response)
return model
def DescribeInstancesStatus(self, request):
response = self._api_call("DescribeInstancesStatus", request)
model = models.DescribeInstancesStatusResponse()
model._deserialize(response)
return model
def StartInstances(self, request):
response = self._api_call("StartInstances", request)
model = models.StartInstancesResponse()
model._deserialize(response)
return model
def StopInstances(self, request):
response = self._api_call("StopInstances", request)
model = models.StopInstancesResponse()
model._deserialize(response)
return model
def RebootInstances(self, request):
response = self._api_call("RebootInstances", request)
model = models.RebootInstancesResponse()
model._deserialize(response)
return model
def ResetInstancesPassword(self, request):
response = self._api_call("ResetInstancesPassword", request)
model = models.ResetInstancesPasswordResponse()
model._deserialize(response)
return model
def ResetInstance(self, request):
response = self._api_call("ResetInstance", request)
model = models.ResetInstanceResponse()
model._deserialize(response)
return model
def TerminateInstance(self, request):
response = self._api_call("TerminateInstance", request)
model = models.TerminateInstanceResponse()
model._deserialize(response)
return model
def ReleaseInstances(self, request):
response = self._api_call("ReleaseInstances", request)
model = models.ReleaseInstancesResponse()
model._deserialize(response)
return model
def ModifyInstancesAttribute(self, request):
response = self._api_call("ModifyInstancesAttribute", request)
model = models.ModifyInstancesAttributeResponse()
model._deserialize(response)
return model
def InquiryPriceInstanceBandwidth(self, request):
response = self._api_call("InquiryPriceInstanceBandwidth", request)
model = models.InquiryPriceInstanceBandwidthResponse()
model._deserialize(response)
return model
def ModifyInstanceBandwidth(self, request):
response = self._api_call("ModifyInstanceBandwidth", request)
model = models.ModifyInstanceBandwidthResponse()
model._deserialize(response)
return model
def CancelInstanceBandwidthDowngrade(self, request):
response = self._api_call("CancelInstanceBandwidthDowngrade", request)
model = models.CancelInstanceBandwidthDowngradeResponse()
model._deserialize(response)
return model
def InquiryPriceInstanceTrafficPackage(self, request):
response = self._api_call("InquiryPriceInstanceTrafficPackage", request)
model = models.InquiryPriceInstanceTrafficPackageResponse()
model._deserialize(response)
return model
def ModifyInstanceTrafficPackage(self, request):
response = self._api_call("ModifyInstanceTrafficPackage", request)
model = models.ModifyInstanceTrafficPackageResponse()
model._deserialize(response)
return model
def CancelInstanceTrafficPackageDowngrade(self, request):
response = self._api_call("CancelInstanceTrafficPackageDowngrade", request)
model = models.CancelInstanceTrafficPackageDowngradeResponse()
model._deserialize(response)
return model
def DescribeInstanceInternetStatus(self, request):
response = self._api_call("DescribeInstanceInternetStatus", request)
model = models.DescribeInstanceInternetStatusResponse()
model._deserialize(response)
return model
def ModifyInstancesResourceGroup(self, request):
response = self._api_call("ModifyInstancesResourceGroup", request)
model = models.ModifyInstancesResourceGroupResponse()
model._deserialize(response)
return model
def DescribeInstanceTraffic(self, request):
response = self._api_call("DescribeInstanceTraffic", request)
model = models.DescribeInstanceTrafficResponse()
model._deserialize(response)
return model
def DescribeInstanceCpuMonitor(self, request):
response = self._api_call("DescribeInstanceCpuMonitor", request)
model = models.DescribeInstanceCpuMonitorResponse()
model._deserialize(response)
return model
def CreateDisks(self, request):
response = self._api_call("CreateDisks", request)
model = models.CreateDisksResponse()
model._deserialize(response)
return model
def DescribeDisks(self, request):
response = self._api_call("DescribeDisks", request)
model = models.DescribeDisksResponse()
model._deserialize(response)
return model
def AttachDisks(self, request):
response = self._api_call("AttachDisks", request)
model = models.AttachDisksResponse()
model._deserialize(response)
return model
def ChangeDisksAttach(self, request):
response = self._api_call("ChangeDisksAttach", request)
model = models.ChangeDisksAttachResponse()
model._deserialize(response)
return model
def DetachDisks(self, request):
response = self._api_call("DetachDisks", request)
model = models.DetachDisksResponse()
model._deserialize(response)
return model
def ModifyDisksAttributes(self, request):
response = self._api_call("ModifyDisksAttributes", request)
model = models.ModifyDisksAttributesResponse()
model._deserialize(response)
return model
def InquiryPriceCreateDisks(self, request):
response = self._api_call("InquiryPriceCreateDisks", request)
model = models.InquiryPriceCreateDisksResponse()
model._deserialize(response)
return model
def TerminateDisk(self, request):
response = self._api_call("TerminateDisk", request)
model = models.TerminateDiskResponse()
model._deserialize(response)
return model
def ReleaseDisk(self, request):
response = self._api_call("ReleaseDisk", request)
model = models.ReleaseDiskResponse()
model._deserialize(response)
return model
def RenewDisk(self, request):
response = self._api_call("RenewDisk", request)
model = models.RenewDiskResponse()
model._deserialize(response)
return model
def DescribeDiskCategory(self, request):
response = self._api_call("DescribeDiskCategory", request)
model = models.DescribeDiskCategoryResponse()
model._deserialize(response)
return model
def ModifyDisksResourceGroup(self, request):
response = self._api_call("ModifyDisksResourceGroup", request)
model = models.ModifyDisksResourceGroupResponse()
model._deserialize(response)
return model
def DescribeImages(self, request):
response = self._api_call("DescribeImages", request)
model = models.DescribeImagesResponse()
model._deserialize(response)
return model
def ModifyImagesAttributes(self, request):
response = self._api_call("ModifyImagesAttributes", request)
model = models.ModifyImagesAttributesResponse()
model._deserialize(response)
return model
def DeleteImages(self, request):
response = self._api_call("DeleteImages", request)
model = models.DeleteImagesResponse()
model._deserialize(response)
return model
def CreateImage(self, request):
response = self._api_call("CreateImage", request)
model = models.CreateImageResponse()
model._deserialize(response)
return model
def DescribeImageQuota(self, request):
response = self._api_call("DescribeImageQuota", request)
model = models.DescribeImageQuotaResponse()
model._deserialize(response)
return model
def DescribeSecurityGroups(self, request):
response = self._api_call("DescribeSecurityGroups", request)
model = models.DescribeSecurityGroupsResponse()
model._deserialize(response)
return model
def ModifySecurityGroupsAttribute(self, request):
response = self._api_call("ModifySecurityGroupsAttribute", request)
model = models.ModifySecurityGroupsAttributeResponse()
model._deserialize(response)
return model
def DescribeInstanceAvailableSecurityGroupResources(self, request):
response = self._api_call("DescribeInstanceAvailableSecurityGroupResources", request)
model = models.DescribeInstanceAvailableSecurityGroupResourcesResponse()
model._deserialize(response)
return model
def CreateSecurityGroup(self, request):
response = self._api_call("CreateSecurityGroup", request)
model = models.CreateSecurityGroupResponse()
model._deserialize(response)
return model
def DeleteSecurityGroup(self, request):
response = self._api_call("DeleteSecurityGroup", request)
model = models.DeleteSecurityGroupResponse()
model._deserialize(response)
return model
def AuthorizeSecurityGroupRules(self, request):
response = self._api_call("AuthorizeSecurityGroupRules", request)
model = models.AuthorizeSecurityGroupRulesResponse()
model._deserialize(response)
return model
def ConfigureSecurityGroupRules(self, request):
response = self._api_call("ConfigureSecurityGroupRules", request)
model = models.ConfigureSecurityGroupRulesResponse()
model._deserialize(response)
return model
def AuthorizeSecurityGroupRule(self, request):
response = self._api_call("AuthorizeSecurityGroupRule", request)
model = models.AuthorizeSecurityGroupRuleResponse()
model._deserialize(response)
return model
def RevokeSecurityGroupRules(self, request):
response = self._api_call("RevokeSecurityGroupRules", request)
model = models.RevokeSecurityGroupRulesResponse()
model._deserialize(response)
return model
def AssociateSecurityGroupInstance(self, request):
response = self._api_call("AssociateSecurityGroupInstance", request)
model = models.AssociateSecurityGroupInstanceResponse()
model._deserialize(response)
return model
def UnAssociateSecurityGroupInstance(self, request):
response = self._api_call("UnAssociateSecurityGroupInstance", request)
model = models.UnAssociateSecurityGroupInstanceResponse()
model._deserialize(response)
return model
def CreateSubnet(self, request):
response = self._api_call("CreateSubnet", request)
model = models.CreateSubnetResponse()
model._deserialize(response)
return model
def DeleteSubnet(self, request):
response = self._api_call("DeleteSubnet", request)
model = models.DeleteSubnetResponse()
model._deserialize(response)
return model
def ModifySubnetsAttribute(self, request):
response = self._api_call("ModifySubnetsAttribute", request)
model = models.ModifySubnetsAttributeResponse()
model._deserialize(response)
return model
def DescribeSubnets(self, request):
response = self._api_call("DescribeSubnets", request)
model = models.DescribeSubnetsResponse()
model._deserialize(response)
return model | zenlayercloud-sdk-python | /zenlayercloud-sdk-python-2.0.2.tar.gz/zenlayercloud-sdk-python-2.0.2/zenlayercloud/vm/v20230313/vm_client.py | vm_client.py |
from zenlayercloud.bmc.v20221120 import models
from zenlayercloud.common.abstract_client import AbstractClient
class BmcClient(AbstractClient):
_api_version = "2022-11-20"
_service = "bmc"
def DescribeZones(self, request):
response = self._api_call("DescribeZones", request)
model = models.DescribeZonesResponse()
model._deserialize(response)
return model
#
def DescribeImages(self, request):
response = self._api_call("DescribeImages", request)
model = models.DescribeImagesResponse()
model._deserialize(response)
return model
#
def DescribeInstances(self, request):
response = self._api_call("DescribeInstances", request)
model = models.DescribeInstancesResponse()
model._deserialize(response)
return model
def CreateInstances(self, request):
""" 本接口 (CreateInstances) 用于创建一个或多个指定配置的BMC实例。
:param request: Request instance for CreateInstances.
:return:
"""
response = self._api_call("CreateInstances", request)
model = models.CreateInstancesResponse()
model._deserialize(response)
return model
#
def StartInstances(self, request):
response = self._api_call("StartInstances", request)
model = models.StartInstancesResponse()
model._deserialize(response)
return model
#
def StopInstances(self, request):
response = self._api_call("StopInstances", request)
model = models.StopInstancesResponse()
model._deserialize(response)
return model
#
def RebootInstances(self, request):
response = self._api_call("RebootInstances", request)
model = models.RebootInstancesResponse()
model._deserialize(response)
return model
#
def ReinstallInstance(self, request):
response = self._api_call("ReinstallInstance", request)
model = models.ReinstallInstanceResponse()
model._deserialize(response)
return model
#
def TerminateInstance(self, request):
response = self._api_call("TerminateInstance", request)
model = models.TerminateInstanceResponse()
model._deserialize(response)
return model
#
def ReleaseInstances(self, request):
response = self._api_call("ReleaseInstances", request)
model = models.ReleaseInstancesResponse()
model._deserialize(response)
return model
#
def RenewInstance(self, request):
response = self._api_call("RenewInstance", request)
model = models.RenewInstanceResponse()
model._deserialize(response)
return model
#
def ModifyInstancesAttribute(self, request):
response = self._api_call("ModifyInstancesAttribute", request)
model = models.ModifyInstancesAttributeResponse()
model._deserialize(response)
return model
#
def InquiryPriceCreateInstance(self, request):
response = self._api_call("InquiryPriceCreateInstance", request)
model = models.InquiryPriceCreateInstanceResponse()
model._deserialize(response)
return model
#
def DescribeInstanceTypes(self, request):
response = self._api_call("DescribeInstanceTypes", request)
model = models.DescribeInstanceTypesResponse()
model._deserialize(response)
return model
#
def DescribeAvailableResources(self, request):
response = self._api_call("DescribeAvailableResources", request)
model = models.DescribeAvailableResourcesResponse()
model._deserialize(response)
return model
#
def ModifyInstanceBandwidth(self, request):
response = self._api_call("ModifyInstanceBandwidth", request)
model = models.ModifyInstanceBandwidthResponse()
model._deserialize(response)
return model
#
def CancelInstanceBandwidthDowngrade(self, request):
response = self._api_call("CancelInstanceBandwidthDowngrade", request)
model = models.CancelInstanceBandwidthDowngradeResponse()
model._deserialize(response)
return model
#
def InquiryPriceInstanceBandwidth(self, request):
response = self._api_call("InquiryPriceInstanceBandwidth", request)
model = models.InquiryPriceInstanceBandwidthResponse()
model._deserialize(response)
return model
#
def ModifyInstanceTrafficPackage(self, request):
response = self._api_call("ModifyInstanceTrafficPackage", request)
model = models.ModifyInstanceTrafficPackageResponse()
model._deserialize(response)
return model
#
def CancelInstanceTrafficPackageDowngrade(self, request):
response = self._api_call("CancelInstanceTrafficPackageDowngrade", request)
model = models.CancelInstanceTrafficPackageDowngradeResponse()
model._deserialize(response)
return model
#
def InquiryPriceInstanceTrafficPackage(self, request):
response = self._api_call("InquiryPriceInstanceTrafficPackage", request)
model = models.InquiryPriceInstanceTrafficPackageResponse()
model._deserialize(response)
return model
#
def DescribeInstanceInternetStatus(self, request):
response = self._api_call("DescribeInstanceInternetStatus", request)
model = models.DescribeInstanceInternetStatusResponse()
model._deserialize(response)
return model
#
def ModifyInstancesResourceGroup(self, request):
response = self._api_call("ModifyInstancesResourceGroup", request)
model = models.ModifyInstancesResourceGroupResponse()
model._deserialize(response)
return model
#
def DescribeInstanceTraffic(self, request):
response = self._api_call("DescribeInstanceTraffic", request)
model = models.DescribeInstanceTrafficResponse()
model._deserialize(response)
return model
# eip
def DescribeEipAddresses(self, request):
response = self._api_call("DescribeEipAddresses", request)
model = models.DescribeEipAddressesResponse()
model._deserialize(response)
return model
def DescribeEipAvailableResources(self, request):
response = self._api_call("DescribeEipAvailableResources", request)
model = models.DescribeEipAvailableResourcesResponse()
model._deserialize(response)
return model
def AllocateEipAddresses(self, request):
response = self._api_call("AllocateEipAddresses", request)
model = models.AllocateEipAddressesResponse()
model._deserialize(response)
return model
def TerminateEipAddress(self, request):
response = self._api_call("TerminateEipAddress", request)
model = models.TerminateEipAddressResponse()
model._deserialize(response)
return model
def ReleaseEipAddresses(self, request):
response = self._api_call("ReleaseEipAddresses", request)
model = models.ReleaseEipAddressesResponse()
model._deserialize(response)
return model
def RenewEipAddress(self, request):
response = self._api_call("RenewEipAddress", request)
model = models.RenewEipAddressResponse()
model._deserialize(response)
return model
def AssociateEipAddress(self, request):
response = self._api_call("AssociateEipAddress", request)
model = models.AssociateEipAddressResponse()
model._deserialize(response)
return model
def UnAssociateEipAddress(self, request):
response = self._api_call("UnAssociateEipAddress", request)
model = models.UnAssociateEipAddressResponse()
model._deserialize(response)
return model
def InquiryPriceCreateEipAddress(self, request):
response = self._api_call("InquiryPriceCreateEipAddress", request)
model = models.InquiryPriceCreateEipAddressResponse()
model._deserialize(response)
return model
def DescribeInstanceAvailableEipResources(self, request):
response = self._api_call("DescribeInstanceAvailableEipResources", request)
model = models.DescribeInstanceAvailableEipResourcesResponse()
model._deserialize(response)
return model
def ModifyEipAddressesResourceGroup(self, request):
response = self._api_call("ModifyEipAddressesResourceGroup", request)
model = models.ModifyEipAddressesResourceGroupResponse()
model._deserialize(response)
return model
# cidr
def DescribeCidrBlocks(self, request):
response = self._api_call("DescribeCidrBlocks", request)
model = models.DescribeCidrBlocksResponse()
model._deserialize(response)
return model
def DescribeCidrBlockIps(self, request):
response = self._api_call("DescribeCidrBlockIps", request)
model = models.DescribeCidrBlockIpsResponse()
model._deserialize(response)
return model
def DescribeAvailableIpv4Resources(self, request):
response = self._api_call("DescribeAvailableIpv4Resources", request)
model = models.DescribeAvailableIpv4ResourcesResponse()
model._deserialize(response)
return model
def DescribeAvailableIpv6Resources(self, request):
response = self._api_call("DescribeAvailableIpv6Resources", request)
model = models.DescribeAvailableIpv6ResourcesResponse()
model._deserialize(response)
return model
def DescribeInstanceAvailableCidrBlock(self, request):
response = self._api_call("DescribeInstanceAvailableCidrBlock", request)
model = models.DescribeInstanceAvailableCidrBlockResponse()
model._deserialize(response)
return model
def InquiryPriceCreateIpv4Block(self, request):
response = self._api_call("InquiryPriceCreateIpv4Block", request)
model = models.InquiryPriceCreateIpv4BlockResponse()
model._deserialize(response)
return model
def CreateIpv4Block(self, request):
response = self._api_call("CreateIpv4Block", request)
model = models.CreateIpv4BlockResponse()
model._deserialize(response)
return model
def CreateIpv6Block(self, request):
response = self._api_call("CreateIpv6Block", request)
model = models.CreateIpv6BlockResponse()
model._deserialize(response)
return model
def ModifyCidrBlocksAttribute(self, request):
response = self._api_call("ModifyCidrBlocksAttribute", request)
model = models.ModifyCidrBlocksAttributeResponse()
model._deserialize(response)
return model
def RenewCidrBlock(self, request):
response = self._api_call("RenewCidrBlock", request)
model = models.RenewCidrBlockResponse()
model._deserialize(response)
return model
def TerminateCidrBlock(self, request):
response = self._api_call("TerminateCidrBlock", request)
model = models.TerminateCidrBlockResponse()
model._deserialize(response)
return model
def ReleaseCidrBlocks(self, request):
response = self._api_call("ReleaseCidrBlocks", request)
model = models.ReleaseCidrBlocksResponse()
model._deserialize(response)
return model
def BindCidrBlockIps(self, request):
response = self._api_call("BindCidrBlockIps", request)
model = models.BindCidrBlockIpsResponse()
model._deserialize(response)
return model
def UnbindCidrBlockIps(self, request):
response = self._api_call("UnbindCidrBlockIps", request)
model = models.UnbindCidrBlockIpsResponse()
model._deserialize(response)
return model
# vpc、subnet
def DescribeVpcAvailableRegions(self, request):
response = self._api_call("DescribeVpcAvailableRegions", request)
model = models.DescribeVpcAvailableRegionsResponse()
model._deserialize(response)
return model
def ModifyVpcsAttribute(self, request):
response = self._api_call("ModifyVpcsAttribute", request)
model = models.ModifyVpcsAttributeResponse()
model._deserialize(response)
return model
def DescribeVpcs(self, request):
response = self._api_call("DescribeVpcs", request)
model = models.DescribeVpcsResponse()
model._deserialize(response)
return model
def CreateVpc(self, request):
response = self._api_call("CreateVpc", request)
model = models.CreateVpcResponse()
model._deserialize(response)
return model
def DeleteVpc(self, request):
response = self._api_call("DeleteVpc", request)
model = models.DeleteVpcResponse()
model._deserialize(response)
return model
def DescribeSubnets(self, request):
response = self._api_call("DescribeSubnets", request)
model = models.DescribeSubnetsResponse()
model._deserialize(response)
return model
def ModifySubnetsAttribute(self, request):
response = self._api_call("ModifySubnetsAttribute", request)
model = models.ModifySubnetsAttributeResponse()
model._deserialize(response)
return model
def CreateSubnet(self, request):
response = self._api_call("CreateSubnet", request)
model = models.CreateSubnetResponse()
model._deserialize(response)
return model
def DeleteSubnet(self, request):
response = self._api_call("DeleteSubnet", request)
model = models.DeleteSubnetResponse()
model._deserialize(response)
return model
def AssociateSubnetInstances(self, request):
response = self._api_call("AssociateSubnetInstances", request)
model = models.AssociateSubnetInstancesResponse()
model._deserialize(response)
return model
def UnAssociateSubnetInstance(self, request):
response = self._api_call("UnAssociateSubnetInstance", request)
model = models.UnAssociateSubnetInstanceResponse()
model._deserialize(response)
return model
def AssociateVpcSubnet(self, request):
response = self._api_call("AssociateVpcSubnet", request)
model = models.AssociateVpcSubnetResponse()
model._deserialize(response)
return model
def DescribeSubnetAvailableResources(self, request):
response = self._api_call("DescribeSubnetAvailableResources", request)
model = models.DescribeSubnetAvailableResourcesResponse()
model._deserialize(response)
return model | zenlayercloud-sdk-python | /zenlayercloud-sdk-python-2.0.2.tar.gz/zenlayercloud-sdk-python-2.0.2/zenlayercloud/bmc/v20221120/bmc_client.py | bmc_client.py |
from zenlayercloud.common.abstract_model import AbstractModel
class CreateInstancesRequest(AbstractModel):
"""CreateInstances请求参数结构体
"""
def __init__(self):
self.zoneId = None
self.instanceChargeType = None
self.instanceChargePrepaid = None
self.instanceTypeId = None
self.imageId = None
self.resourceGroupId = None
self.instanceName = None
self.hostname = None
self.amount = None
self.password = None
self.sshKeys = None
self.internetChargeType = None
self.internetMaxBandwidthOut = None
self.trafficPackageSize = None
self.subnetId = None
self.raidConfig = None
self.partitions = None
self.nic = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.instanceChargeType = params.get("instanceChargeType")
if params.get("instanceChargePrepaid") is not None:
self.instanceChargePrepaid = ChargePrepaid(params.get("instanceChargePrepaid"))
self.instanceTypeId = params.get("instanceTypeId")
self.imageId = params.get("imageId")
self.resourceGroupId = params.get("imageId")
self.instanceName = params.get("instanceName")
self.hostname = params.get("hostname")
self.amount = params.get("amount")
self.password = params.get("password")
self.sshKeys = params.get("sshKeys")
self.internetChargeType = params.get("internetChargeType")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
self.trafficPackageSize = params.get("trafficPackageSize")
self.subnetId = params.get("subnetId")
if params.get("raidConfig") is not None:
self.raidConfig = RaidConfig(params.get("raidConfig"))
if params.get("partitions") is not None:
self.partitions = []
for item in params.get("partitions"):
obj = Partition(item)
self.partitions.append(obj)
if params.get("nic") is not None:
self.nic = Nic(params.get("nic"))
class CreateInstancesResponse(AbstractModel):
"""CreateInstances返回参数结构体
"""
def __init__(self):
self.instanceIdSet = None
self.requestId = None
def _deserialize(self, params):
self.instanceIdSet = params.get("instanceIdSet")
self.requestId = params.get("requestId")
class ChargePrepaid(AbstractModel):
"""描述了实例的计费模式
"""
def __init__(self, params=None):
"""
:param period: 购买实例的时长,单位:月。
:type period: int
"""
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.period = None
def _deserialize(self, params):
self.period = params.get("period")
class Zone(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.zoneName = None
self.cityName = None
self.areaName = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.zoneName = params.get("zoneName")
self.cityName = params.get("cityName")
self.areaName = params.get("areaName")
class RaidConfig(AbstractModel):
"""描述了Raid的配置信息
"""
def __init__(self, params=None):
r"""
:param raidType: Raid类型。
该配置进行快捷raid配置,支持0, 1, 5, 10。
raidType和customRaids只能指定其中一个参数。
:type raidType: int
:param customRaids: 自定义Raid配置。
自定义磁盘进行raid的配置。
raidType和customRaids只能指定其中一个参数。
:type customRaids: list of CustomRaid
"""
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.raidType = None
self.customRaids = None
def _deserialize(self, params):
self.raidType = params.get("raidType")
if params.get("customRaids") is not None:
self.customRaids = []
for item in params.get("customRaids"):
obj = CustomRaid(item)
self.customRaids.append(obj)
class CustomRaid(AbstractModel):
"""进行自定义Raid配置时需要的raid级别和指定的磁盘序号。
"""
def __init__(self, params=None):
r"""
:param raidType: Raid类型。
支持0, 1, 5, 10。
:type raidType: int
:param diskSequence: 磁盘序号。
根据机型里的磁盘从1开始顺序编号。如果是多个磁盘序号,则必须连续。
:type diskSequence: list of int
"""
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.raidType = None
self.diskSequence = None
def _deserialize(self, params):
self.raidType = params.get("raidType")
self.diskSequence = params.get("diskSequence")
class Partition(AbstractModel):
"""分区配置信息。包括文件类型, 分区大小等。
"""
def __init__(self, params=None):
r"""
:param fsType: 分区的文件类型。
linux系统:支持的值ext2,ext3, ext4, ext类型必须要有。
windows系统: 只能为NTFS。
:type fsType: int
:param fsPath: 分区盘符。
linux系统:必须为/开头,且第一个为系统分区必须为/。
windows系统:支持C~H,第一个系统分区必须指定为C。
:type fsPath: str
:param size: 分区大小。
单位为GB。
:type size: str
"""
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.fsType = None
self.fsPath = None
self.size = None
def _deserialize(self, params):
self.fsType = params.get("fsType")
self.fsPath = params.get("fsPath")
self.size = params.get("size")
class Nic(AbstractModel):
"""分区配置信息。包括文件类型, 分区大小等。
"""
def __init__(self, params=None):
r"""
:param wanName: 公网网卡名称。
只能是数字和大小写字母,且必须以字母开头,长度限制为4-10。
非高可用机型,默认的公网网卡名称为wan0。且不能为lan开头。
高可用机型,默认的公网网卡名称为bond0。
公网名称和内网名称不能相同。
:type wanName: str
:param lanName: 内网网卡名称。
只能是数字和大小写字母,且必须以字母开头,长度限制为4-10。
非高可用机型,默认的内网网卡名称为lan0。且不能为wan开头。
高可用机型,默认的内网网卡名称为bond1。
公网名称和内网名称不能相同。
:type lanName: str
"""
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.wanName = None
self.lanName = None
def _deserialize(self, params):
self.wanName = params.get("wanName")
self.lanName = params.get("lanName")
class DescribeZonesRequest(AbstractModel):
def __init__(self):
self.acceptLanguage = None
def _deserialize(self, params):
self.acceptLanguage = params.get("acceptLanguage")
class DescribeZonesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.zoneSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("zoneSet") is not None:
self.zoneSet = []
for item in params.get("zoneSet"):
obj = Zone(item)
self.zoneSet.append(obj)
class DescribeImagesRequest(AbstractModel):
def __init__(self):
self.imageIds = None
self.imageName = None
self.catalog = None
self.imageType = None
self.osType = None
self.instanceTypeId = None
def _deserialize(self, params):
self.imageIds = params.get("imageIds")
self.imageName = params.get("imageName")
self.catalog = params.get("catalog")
self.imageType = params.get("imageType")
self.osType = params.get("osType")
self.instanceTypeId = params.get("instanceTypeId")
class DescribeImagesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.images = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("images") is not None:
self.images = []
for item in params.get("images"):
obj = ImageInfo(item)
self.images.append(obj)
class ImageInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.imageId = None
self.imageName = None
self.catalog = None
self.imageType = None
self.osType = None
def _deserialize(self, params):
self.imageId = params.get("imageId")
self.imageName = params.get("imageName")
self.catalog = params.get("catalog")
self.imageType = params.get("imageType")
self.osType = params.get("osType")
class DescribeInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
self.zoneId = None
self.resourceGroupId = None
self.instanceTypeId = None
self.internetChargeType = None
self.imageId = None
self.subnetId = None
self.instanceStatus = None
self.instanceName = None
self.hostname = None
self.publicIpAddresses = None
self.privateIpAddresses = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
self.zoneId = params.get("zoneId")
self.resourceGroupId = params.get("resourceGroupId")
self.instanceTypeId = params.get("instanceTypeId")
self.internetChargeType = params.get("internetChargeType")
self.imageId = params.get("imageId")
self.subnetId = params.get("subnetId")
self.instanceStatus = params.get("instanceStatus")
self.instanceName = params.get("instanceName")
self.hostname = params.get("hostname")
self.publicIpAddresses = params.get("publicIpAddresses")
self.privateIpAddresses = params.get("privateIpAddresses")
self.pageSize = params.get("pageSize")
self.pageNum = params.get("pageNum")
class DescribeInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = InstanceInfo(item)
self.dataSet.append(obj)
class InstanceInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.instanceId = None
self.zoneId = None
self.instanceName = None
self.hostname = None
self.instanceTypeId = None
self.imageId = None
self.imageName = None
self.instanceChargeType = None
self.bandwidthOutMbps = None
self.internetChargeType = None
self.period = None
self.publicIpAddresses = None
self.privateIpAddresses = None
self.ipv6Addresses = None
self.subnetIds = None
self.createTime = None
self.expiredTime = None
self.resourceGroupId = None
self.resourceGroupName = None
self.instanceStatus = None
self.primaryPublicIpAddress = None
self.trafficPackageSize = None
self.raidConfig = None
self.partitions = None
self.nic = None
self.autoRenew = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.zoneId = params.get("zoneId")
self.instanceName = params.get("instanceName")
self.hostname = params.get("hostname")
self.instanceTypeId = params.get("instanceTypeId")
self.imageId = params.get("imageId")
self.imageName = params.get("imageName")
self.instanceChargeType = params.get("instanceChargeType")
self.bandwidthOutMbps = params.get("bandwidthOutMbps")
self.internetChargeType = params.get("internetChargeType")
self.period = params.get("period")
self.publicIpAddresses = params.get("publicIpAddresses")
self.privateIpAddresses = params.get("privateIpAddresses")
self.ipv6Addresses = params.get("ipv6Addresses")
self.subnetIds = params.get("subnetIds")
self.createTime = params.get("createTime")
self.expiredTime = params.get("expiredTime")
self.resourceGroupId = params.get("resourceGroupId")
self.resourceGroupName = params.get("resourceGroupName")
self.instanceStatus = params.get("instanceStatus")
self.primaryPublicIpAddress = params.get("primaryPublicIpAddress")
self.trafficPackageSize = params.get("trafficPackageSize")
if params.get("raidConfig") is not None:
self.raidConfig = RaidConfig(params.get("raidConfig"))
if params.get("partitions") is not None:
self.partitions = []
for item in params.get("partitions"):
obj = Partition(item)
self.partitions.append(obj)
if params.get("nic") is not None:
self.nic = Nic(params.get("nic"))
self.autoRenew = params.get("autoRenew")
class StartInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class StartInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class StopInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class StopInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class RebootInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class RebootInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ReinstallInstanceRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.imageId = None
self.hostname = None
self.password = None
self.sshKeys = None
self.raidConfig = None
self.partitions = None
self.nic = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.imageId = params.get("imageId")
self.hostname = params.get("hostname")
self.password = params.get("password")
self.sshKeys = params.get("sshKeys")
if params.get("raidConfig") is not None:
self.raidConfig = RaidConfig(params.get("raidConfig"))
if params.get("partitions") is not None:
self.partitions = []
for item in params.get("partitions"):
obj = Partition(item)
self.partitions.append(obj)
if params.get("nic") is not None:
self.nic = Nic(params.get("nic"))
class ReinstallInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class TerminateInstanceRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class TerminateInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ReleaseInstancesRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
class ReleaseInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class RenewInstanceRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class RenewInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ModifyInstancesAttributeRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
self.instanceName = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
self.instanceName = params.get("instanceName")
class ModifyInstancesAttributeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class InquiryPriceCreateInstanceRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.instanceTypeId = None
self.instanceChargeType = None
self.internetChargeType = None
self.instanceChargePrepaid = None
self.trafficPackageSize = None
self.internetMaxBandwidthOut = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.instanceTypeId = params.get("instanceTypeId")
self.instanceChargeType = params.get("instanceChargeType")
self.internetChargeType = params.get("internetChargeType")
if params.get("instanceChargePrepaid") is not None:
self.instanceChargePrepaid = ChargePrepaid(params.get("instanceChargePrepaid"))
self.trafficPackageSize = params.get("trafficPackageSize")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
class InquiryPriceCreateInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instancePrice = None
self.bandwidthPrice = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("instancePrice") is not None:
self.instancePrice = Price(params.get("instancePrice"))
if params.get("bandwidthPrice") is not None:
self.bandwidthPrice = []
for item in params.get("bandwidthPrice"):
obj = Price(item)
self.bandwidthPrice.append(obj)
class Price(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.originalPrice = None
self.discountPrice = None
self.discount = None
self.unitPrice = None
self.discountUnitPrice = None
self.chargeUnit = None
self.stepPrices = None
def _deserialize(self, params):
self.originalPrice = params.get("originalPrice")
self.discountPrice = params.get("discountPrice")
self.discount = params.get("discount")
self.unitPrice = params.get("unitPrice")
self.discountUnitPrice = params.get("discountUnitPrice")
self.chargeUnit = params.get("chargeUnit")
if params.get("stepPrices") is not None:
self.stepPrices = []
for item in params.get("stepPrices"):
obj = StepPrice(item)
self.stepPrices.append(obj)
class StepPrice(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.stepStart = None
self.stepEnd = None
self.unitPrice = None
self.discountUnitPrice = None
def _deserialize(self, params):
self.stepStart = params.get("stepStart")
self.stepEnd = params.get("stepEnd")
self.unitPrice = params.get("unitPrice")
self.discountUnitPrice = params.get("discountUnitPrice")
class DescribeInstanceTypesRequest(AbstractModel):
def __init__(self):
self.instanceTypeIds = None
self.minimumCpuCoreCount = None
self.maximumCpuCoreCount = None
self.minimumMemorySize = None
self.maximumMemorySize = None
self.minimumBandwidth = None
self.supportRaids = None
self.supportSubnet = None
self.minimumDiskSize = None
self.maximumDiskSize = None
self.isHA = None
self.imageId = None
def _deserialize(self, params):
self.instanceTypeIds = params.get("instanceTypeIds")
self.minimumCpuCoreCount = params.get("minimumCpuCoreCount")
self.maximumCpuCoreCount = params.get("maximumCpuCoreCount")
self.minimumMemorySize = params.get("minimumMemorySize")
self.maximumMemorySize = params.get("maximumMemorySize")
self.minimumBandwidth = params.get("minimumBandwidth")
self.supportRaids = params.get("supportRaids")
self.supportSubnet = params.get("supportSubnet")
self.minimumDiskSize = params.get("minimumDiskSize")
self.maximumDiskSize = params.get("maximumDiskSize")
self.isHA = params.get("isHA")
self.imageId = params.get("imageId")
class DescribeInstanceTypesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instanceTypes = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("instanceTypes") is not None:
self.instanceTypes = []
for item in params.get("instanceTypes"):
obj = InstanceType(item)
self.instanceTypes.append(obj)
class InstanceType(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.instanceTypeId = None
self.description = None
self.cpuCoreCount = None
self.memorySize = None
self.supportRaids = None
self.supportSubnet = None
self.maximumBandwidth = None
self.diskInfo = None
self.imageIds = None
self.isHA = None
def _deserialize(self, params):
self.instanceTypeId = params.get("instanceTypeId")
self.description = params.get("description")
self.cpuCoreCount = params.get("cpuCoreCount")
self.memorySize = params.get("memorySize")
self.supportRaids = params.get("supportRaids")
self.supportSubnet = params.get("supportSubnet")
self.maximumBandwidth = params.get("maximumBandwidth")
if params.get("diskInfo") is not None:
self.diskInfo = InstanceDiskInfo(params.get("diskInfo"))
self.imageIds = params.get("imageIds")
self.isHA = params.get("isHA")
class InstanceDiskInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.totalDiskSize = None
self.diskDescription = None
self.disks = None
def _deserialize(self, params):
self.totalDiskSize = params.get("totalDiskSize")
self.diskDescription = params.get("diskDescription")
if params.get("disks") is not None:
self.disks = []
for item in params.get("disks"):
obj = Disk(item)
self.disks.append(obj)
class Disk(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.diskSize = None
self.diskCount = None
def _deserialize(self, params):
self.diskSize = params.get("diskSize")
self.diskCount = params.get("diskCount")
class DescribeAvailableResourcesRequest(AbstractModel):
def __init__(self):
self.instanceChargeType = None
self.zoneId = None
self.instanceTypeId = None
self.sellStatus = None
def _deserialize(self, params):
self.instanceChargeType = params.get("instanceChargeType")
self.zoneId = params.get("zoneId")
self.instanceTypeId = params.get("instanceTypeId")
self.sellStatus = params.get("sellStatus")
class DescribeAvailableResourcesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.availableResources = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("availableResources") is not None:
self.availableResources = []
for item in params.get("availableResources"):
obj = AvailableResource(item)
self.availableResources.append(obj)
class AvailableResource(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.sellStatus = None
self.internetChargeTypes = None
self.instanceTypeId = None
self.maximumBandwidthOut = None
self.defaultBandwidthOut = None
self.defaultTrafficPackageSize = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.sellStatus = params.get("sellStatus")
self.internetChargeTypes = params.get("internetChargeTypes")
self.instanceTypeId = params.get("instanceTypeId")
self.maximumBandwidthOut = params.get("maximumBandwidthOut")
self.defaultBandwidthOut = params.get("defaultBandwidthOut")
self.defaultTrafficPackageSize = params.get("defaultTrafficPackageSize")
class ModifyInstanceBandwidthRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.bandwidthOutMbps = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.bandwidthOutMbps = params.get("bandwidthOutMbps")
class ModifyInstanceBandwidthResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
class CancelInstanceBandwidthDowngradeRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class CancelInstanceBandwidthDowngradeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class InquiryPriceInstanceBandwidthRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.bandwidthOutMbps = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.bandwidthOutMbps = params.get("bandwidthOutMbps")
class InquiryPriceInstanceBandwidthResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.bandwidthPrice = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("bandwidthPrice") is not None:
self.bandwidthPrice = []
for item in params.get("bandwidthPrice"):
obj = Price(item)
self.bandwidthPrice.append(obj)
class ModifyInstanceTrafficPackageRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.trafficPackageSize = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.trafficPackageSize = params.get("trafficPackageSize")
class ModifyInstanceTrafficPackageResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
class CancelInstanceTrafficPackageDowngradeRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class CancelInstanceTrafficPackageDowngradeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class InquiryPriceInstanceTrafficPackageRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.trafficPackageSize = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.trafficPackageSize = params.get("trafficPackageSize")
class InquiryPriceInstanceTrafficPackageResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.trafficPackagePrice = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("trafficPackagePrice") is not None:
self.trafficPackagePrice = []
for item in params.get("trafficPackagePrice"):
obj = Price(item)
self.trafficPackagePrice.append(obj)
class DescribeInstanceInternetStatusRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class DescribeInstanceInternetStatusResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instanceId = None
self.instanceName = None
self.internetMaxBandwidthOut = None
self.modifiedInternetMaxBandwidthOut = None
self.modifiedBandwidthStatus = None
self.trafficPackageSize = None
self.modifiedTrafficPackageSize = None
self.modifiedTrafficPackageStatus = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.instanceId = params.get("instanceId")
self.instanceName = params.get("instanceName")
self.internetMaxBandwidthOut = params.get("internetMaxBandwidthOut")
self.modifiedInternetMaxBandwidthOut = params.get("modifiedInternetMaxBandwidthOut")
self.modifiedBandwidthStatus = params.get("modifiedBandwidthStatus")
self.trafficPackageSize = params.get("trafficPackageSize")
self.modifiedTrafficPackageSize = params.get("modifiedTrafficPackageSize")
self.modifiedTrafficPackageStatus = params.get("modifiedTrafficPackageStatus")
class ModifyInstancesResourceGroupRequest(AbstractModel):
def __init__(self):
self.instanceIds = None
self.resourceGroupId = None
def _deserialize(self, params):
self.instanceIds = params.get("instanceIds")
self.resourceGroupId = params.get("resourceGroupId")
class ModifyInstancesResourceGroupResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeInstanceTrafficRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.startTime = None
self.endTime = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.startTime = params.get("startTime")
self.endTime = params.get("endTime")
class DescribeInstanceTrafficResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.dataList = None
self.in95 = None
self.in95Time = None
self.inAvg = None
self.inMax = None
self.inMin = None
self.inTotal = None
self.maxBandwidth95ValueMbps = None
self.out95 = None
self.out95Time = None
self.outAvg = None
self.outMax = None
self.outMin = None
self.outTotal = None
self.totalUnit = None
self.unit = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("dataList") is not None:
self.dataList = []
for item in params.get("dataList"):
obj = InstanceTrafficData(item)
self.dataList.append(obj)
self.in95 = params.get("in95")
self.in95Time = params.get("in95Time")
self.inAvg = params.get("inAvg")
self.inMax = params.get("inMax")
self.inMin = params.get("inMin")
self.inTotal = params.get("inTotal")
self.maxBandwidth95ValueMbps = params.get("maxBandwidth95ValueMbps")
self.out95 = params.get("out95")
self.out95Time = params.get("out95Time")
self.outAvg = params.get("outAvg")
self.outMax = params.get("outMax")
self.outMin = params.get("outMin")
self.outTotal = params.get("outTotal")
self.totalUnit = params.get("totalUnit")
self.unit = params.get("unit")
class InstanceTrafficData(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.internetRX = None
self.internetTX = None
self.time = None
def _deserialize(self, params):
self.internetRX = params.get("internetRX")
self.internetTX = params.get("internetTX")
self.time = params.get("time")
class DescribeEipAddressesRequest(AbstractModel):
def __init__(self):
self.eipChargeType = None
self.eipIds = None
self.eipStatus = None
self.instanceId = None
self.instanceName = None
self.ipAddress = None
self.zoneId = None
self.resourceGroupId = None
self.pageNum = None
self.pageSize = None
def _deserialize(self, params):
self.eipChargeType = params.get("eipChargeType")
self.eipIds = params.get("eipIds")
self.eipStatus = params.get("eipStatus")
self.instanceId = params.get("instanceId")
self.instanceName = params.get("instanceName")
self.ipAddress = params.get("ipAddress")
self.zoneId = params.get("zoneId")
self.resourceGroupId = params.get("resourceGroupId")
self.pageNum = params.get("pageNum")
self.pageSize = params.get("pageSize")
class DescribeEipAddressesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = EipAddress(item)
self.dataSet.append(obj)
class EipAddress(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.eipId = None
self.zoneId = None
self.ipAddress = None
self.instanceId = None
self.instanceName = None
self.eipChargeType = None
self.period = None
self.createTime = None
self.expiredTime = None
self.eipStatus = None
self.resourceGroupId = None
self.resourceGroupName = None
def _deserialize(self, params):
self.eipId = params.get("eipId")
self.zoneId = params.get("zoneId")
self.ipAddress = params.get("ipAddress")
self.instanceId = params.get("instanceId")
self.instanceName = params.get("instanceName")
self.eipChargeType = params.get("eipChargeType")
self.period = params.get("period")
self.createTime = params.get("createTime")
self.expiredTime = params.get("expiredTime")
self.eipStatus = params.get("eipStatus")
self.resourceGroupId = params.get("resourceGroupId")
self.resourceGroupName = params.get("resourceGroupName")
class DescribeEipAvailableResourcesRequest(AbstractModel):
def __init__(self):
self.eipChargeType = None
self.zoneId = None
def _deserialize(self, params):
self.eipChargeType = params.get("eipChargeType")
self.zoneId = params.get("zoneId")
class DescribeEipAvailableResourcesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.eipResources = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("eipResources") is not None:
self.eipResources = []
for item in params.get("eipResources"):
obj = EipAvailable(item)
self.eipResources.append(obj)
class EipAvailable(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.status = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.status = params.get("status")
class AllocateEipAddressesRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.eipChargeType = None
self.eipChargePrepaid = None
self.amount = None
self.resourceGroupId = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.eipChargeType = params.get("eipChargeType")
if params.get("eipChargePrepaid") is not None:
self.eipChargePrepaid = ChargePrepaid(params.get("eipChargePrepaid"))
self.amount = params.get("amount")
self.resourceGroupId = params.get("resourceGroupId")
class AllocateEipAddressesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
self.eipIdSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
self.eipIdSet = params.get("eipIdSet")
class TerminateEipAddressRequest(AbstractModel):
def __init__(self):
self.eipId = None
def _deserialize(self, params):
self.eipId = params.get("eipId")
class TerminateEipAddressResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ReleaseEipAddressesRequest(AbstractModel):
def __init__(self):
self.eipIds = None
def _deserialize(self, params):
self.eipIds = params.get("eipIds")
class ReleaseEipAddressesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class RenewEipAddressRequest(AbstractModel):
def __init__(self):
self.eipId = None
def _deserialize(self, params):
self.eipId = params.get("eipId")
class RenewEipAddressResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class AssociateEipAddressRequest(AbstractModel):
def __init__(self):
self.eipId = None
self.instanceId = None
def _deserialize(self, params):
self.eipId = params.get("eipId")
self.instanceId = params.get("instanceId")
class AssociateEipAddressResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class UnAssociateEipAddressRequest(AbstractModel):
def __init__(self):
self.eipId = None
def _deserialize(self, params):
self.eipId = params.get("eipId")
class UnAssociateEipAddressResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class InquiryPriceCreateEipAddressRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.eipChargeType = None
self.eipChargePrepaid = None
self.amount = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.eipChargeType = params.get("eipChargeType")
if params.get("eipChargePrepaid") is not None:
self.eipChargePrepaid = ChargePrepaid(params.get("eipChargePrepaid"))
self.amount = params.get("amount")
class InquiryPriceCreateEipAddressResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.eipPrice = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("eipPrice") is not None:
self.eipPrice = Price(params.get("eipPrice"))
class DescribeInstanceAvailableEipResourcesRequest(AbstractModel):
def __init__(self):
self.instanceId = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
class DescribeInstanceAvailableEipResourcesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instanceEipResources = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("instanceEipResources") is not None:
self.instanceEipResources = []
for item in params.get("instanceEipResources"):
obj = InstanceAvailableEip(item)
self.instanceEipResources.append(obj)
class InstanceAvailableEip(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.eipId = None
self.ipAddress = None
def _deserialize(self, params):
self.eipId = params.get("eipId")
self.ipAddress = params.get("ipAddress")
class ModifyEipAddressesResourceGroupRequest(AbstractModel):
def __init__(self):
self.eipIds = None
self.resourceGroupId = None
def _deserialize(self, params):
self.eipIds = params.get("eipIds")
self.resourceGroupId = params.get("resourceGroupId")
class ModifyEipAddressesResourceGroupResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeCidrBlocksRequest(AbstractModel):
def __init__(self):
self.cidrBlockIds = None
self.cidrBlock = None
self.cidrBlockName = None
self.zoneId = None
self.cidrBlockType = None
self.gateway = None
self.chargeType = None
self.resourceGroupId = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.cidrBlockIds = params.get("cidrBlockIds")
self.cidrBlock = params.get("cidrBlock")
self.cidrBlockName = params.get("cidrBlockName")
self.zoneId = params.get("zoneId")
self.cidrBlockType = params.get("cidrBlockType")
self.gateway = params.get("gateway")
self.chargeType = params.get("chargeType")
self.resourceGroupId = params.get("resourceGroupId")
self.pageSize = params.get("pageSize")
self.pageNum = params.get("pageNum")
class DescribeCidrBlocksResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = CidrBlockInfo(item)
self.dataSet.append(obj)
class CidrBlockInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.cidrBlockId = None
self.cidrBlockType = None
self.cidrBlockName = None
self.zoneId = None
self.cidrBlock = None
self.gateway = None
self.availableIpStart = None
self.availableIpEnd = None
self.availableIpCount = None
self.instanceIds = None
self.status = None
self.chargeType = None
self.createTime = None
self.expireTime = None
self.resourceGroupId = None
self.resourceGroupName = None
def _deserialize(self, params):
self.cidrBlockId = params.get("cidrBlockId")
self.cidrBlockType = params.get("cidrBlockType")
self.cidrBlockName = params.get("cidrBlockName")
self.zoneId = params.get("zoneId")
self.cidrBlock = params.get("cidrBlock")
self.gateway = params.get("gateway")
self.availableIpStart = params.get("availableIpStart")
self.availableIpEnd = params.get("availableIpEnd")
self.availableIpCount = params.get("availableIpCount")
self.instanceIds = params.get("instanceIds")
self.status = params.get("status")
self.chargeType = params.get("chargeType")
self.createTime = params.get("createTime")
self.expireTime = params.get("expireTime")
self.resourceGroupId = params.get("resourceGroupId")
self.resourceGroupName = params.get("resourceGroupName")
class DescribeCidrBlockIpsRequest(AbstractModel):
def __init__(self):
self.cidrBlockId = None
self.instanceId = None
self.ip = None
def _deserialize(self, params):
self.cidrBlockId = params.get("cidrBlockId")
self.instanceId = params.get("instanceId")
self.ip = params.get("ip")
class DescribeCidrBlockIpsResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.cidrBlockIps = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("cidrBlockIps") is not None:
self.cidrBlockIps = []
for item in params.get("cidrBlockIps"):
obj = CidrBlockIp(item)
self.cidrBlockIps.append(obj)
class CidrBlockIp(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.cidrBlockId = None
self.cidrBlockType = None
self.ip = None
self.instanceId = None
self.status = None
def _deserialize(self, params):
self.cidrBlockId = params.get("cidrBlockId")
self.cidrBlockType = params.get("cidrBlockType")
self.ip = params.get("ip")
self.instanceId = params.get("instanceId")
self.status = params.get("status")
class DescribeAvailableIpv4ResourcesRequest(AbstractModel):
def __init__(self):
self.chargeType = None
self.zoneId = None
def _deserialize(self, params):
self.chargeType = params.get("chargeType")
self.zoneId = params.get("zoneId")
class DescribeAvailableIpv4ResourcesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.availableIpv4Resources = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("availableIpv4Resources") is not None:
self.availableIpv4Resources = []
for item in params.get("availableIpv4Resources"):
obj = AvailableIpv4Resource(item)
self.availableIpv4Resources.append(obj)
class AvailableIpv4Resource(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.netmask = None
self.sellStatus = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.netmask = params.get("netmask")
self.sellStatus = params.get("sellStatus")
class DescribeAvailableIpv6ResourcesRequest(AbstractModel):
def __init__(self):
self.zoneId = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
class DescribeAvailableIpv6ResourcesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.availableIpv6Resources = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("availableIpv6Resources") is not None:
self.availableIpv6Resources = []
for item in params.get("availableIpv6Resources"):
obj = AvailableIpv6Resource(item)
self.availableIpv6Resources.append(obj)
class AvailableIpv6Resource(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.zoneId = None
self.sellStatus = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.sellStatus = params.get("sellStatus")
class DescribeInstanceAvailableCidrBlockRequest(AbstractModel):
def __init__(self):
self.instanceId = None
self.cidrBlockType = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.cidrBlockType = params.get("cidrBlockType")
class DescribeInstanceAvailableCidrBlockResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.instanceAvailableCidrBlocks = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("instanceAvailableCidrBlocks") is not None:
self.instanceAvailableCidrBlocks = []
for item in params.get("instanceAvailableCidrBlocks"):
obj = InstanceAvailableCidrBlock(item)
self.instanceAvailableCidrBlocks.append(obj)
class InstanceAvailableCidrBlock(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.cidrBlockId = None
self.zoneId = None
self.cidrBlockIpType = None
self.cidrBlock = None
self.availableIps = None
self.availableIpCount = None
def _deserialize(self, params):
self.cidrBlockId = params.get("cidrBlockId")
self.zoneId = params.get("zoneId")
self.cidrBlockIpType = params.get("cidrBlockIpType")
self.cidrBlock = params.get("cidrBlock")
self.availableIps = params.get("availableIps")
self.availableIpCount = params.get("availableIpCount")
class InquiryPriceCreateIpv4BlockRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.chargeType = None
self.chargePrepaid = None
self.netmask = None
self.amount = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.chargeType = params.get("chargeType")
if params.get("chargePrepaid") is not None:
self.chargePrepaid = ChargePrepaid(params.get("chargePrepaid"))
self.netmask = params.get("netmask")
self.amount = params.get("amount")
class InquiryPriceCreateIpv4BlockResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.price = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("price") is not None:
self.price = Price(params.get("price"))
class CreateIpv4BlockRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.name = None
self.chargeType = None
self.chargePrepaid = None
self.netmask = None
self.amount = None
self.resourceGroupId = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.name = params.get("name")
self.chargeType = params.get("chargeType")
if params.get("chargePrepaid") is not None:
self.chargePrepaid = ChargePrepaid(params.get("chargePrepaid"))
self.netmask = params.get("netmask")
self.amount = params.get("amount")
self.resourceGroupId = params.get("resourceGroupId")
class CreateIpv4BlockResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.orderNumber = None
self.cidrBlockIds = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.orderNumber = params.get("orderNumber")
self.cidrBlockIds = params.get("cidrBlockIds")
class CreateIpv6BlockRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.name = None
self.amount = None
self.resourceGroupId = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.name = params.get("name")
self.amount = params.get("amount")
self.resourceGroupId = params.get("resourceGroupId")
class CreateIpv6BlockResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.cidrBlockIds = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.cidrBlockIds = params.get("cidrBlockIds")
class ModifyCidrBlocksAttributeRequest(AbstractModel):
def __init__(self):
self.cidrBlockIds = None
self.name = None
def _deserialize(self, params):
self.cidrBlockIds = params.get("cidrBlockIds")
self.name = params.get("name")
class ModifyCidrBlocksAttributeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class RenewCidrBlockRequest(AbstractModel):
def __init__(self):
self.cidrBlockId = None
def _deserialize(self, params):
self.cidrBlockId = params.get("cidrBlockId")
class RenewCidrBlockResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class TerminateCidrBlockRequest(AbstractModel):
def __init__(self):
self.cidrBlockId = None
def _deserialize(self, params):
self.cidrBlockId = params.get("cidrBlockId")
class TerminateCidrBlockResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class ReleaseCidrBlocksRequest(AbstractModel):
def __init__(self):
self.cidrBlockIds = None
def _deserialize(self, params):
self.cidrBlockIds = params.get("cidrBlockIds")
class ReleaseCidrBlocksResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class BindCidrBlockIpsRequest(AbstractModel):
def __init__(self):
self.cidrBlockId = None
self.ipBindList = None
def _deserialize(self, params):
self.cidrBlockId = params.get("cidrBlockId")
if params.get("ipBindList") is not None:
self.ipBindList = []
for item in params.get("ipBindList"):
obj = IpBindParam(item)
self.ipBindList.append(obj)
class BindCidrBlockIpsResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class IpBindParam(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.instanceId = None
self.ip = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.ip = params.get("ip")
class UnbindCidrBlockIpsRequest(AbstractModel):
def __init__(self):
self.cidrBlockId = None
self.ipList = None
def _deserialize(self, params):
self.cidrBlockId = params.get("cidrBlockId")
self.ipList = params.get("ipList")
class UnbindCidrBlockIpsResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeVpcAvailableRegionsRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.vpcRegionId = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.vpcRegionId = params.get("vpcRegionId")
class DescribeVpcAvailableRegionsResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.vpcRegionSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("vpcRegionSet") is not None:
self.vpcRegionSet = []
for item in params.get("vpcRegionSet"):
obj = VpcRegionInfo(item)
self.vpcRegionSet.append(obj)
class VpcRegionInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.vpcRegionId = None
self.vpcRegionName = None
self.zoneIds = None
def _deserialize(self, params):
self.vpcRegionId = params.get("vpcRegionId")
self.vpcRegionName = params.get("vpcRegionName")
self.zoneIds = params.get("zoneIds")
class ModifyVpcsAttributeRequest(AbstractModel):
def __init__(self):
self.vpcIds = None
self.vpcName = None
def _deserialize(self, params):
self.vpcIds = params.get("vpcIds")
self.vpcName = params.get("vpcName")
class ModifyVpcsAttributeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeVpcsRequest(AbstractModel):
def __init__(self):
self.vpcIds = None
self.cidrBlock = None
self.vpcStatus = None
self.vpcName = None
self.vpcRegionId = None
self.resourceGroupId = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.vpcIds = params.get("vpcIds")
self.cidrBlock = params.get("cidrBlock")
self.vpcStatus = params.get("vpcStatus")
self.vpcName = params.get("vpcName")
self.vpcRegionId = params.get("vpcRegionId")
self.resourceGroupId = params.get("resourceGroupId")
self.pageSize = params.get("pageSize")
self.pageNum = params.get("pageNum")
class DescribeVpcsResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = VpcInfo(item)
self.dataSet.append(obj)
class VpcInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.vpcId = None
self.vpcRegionId = None
self.vpcRegionName = None
self.vpcName = None
self.cidrBlock = None
self.createTime = None
self.resourceGroupId = None
self.resourceGroupName = None
self.vpcStatus = None
def _deserialize(self, params):
self.vpcId = params.get("vpcId")
self.vpcRegionId = params.get("vpcRegionId")
self.vpcRegionName = params.get("vpcRegionName")
self.vpcName = params.get("vpcName")
self.cidrBlock = params.get("cidrBlock")
self.createTime = params.get("createTime")
self.resourceGroupId = params.get("resourceGroupId")
self.resourceGroupName = params.get("resourceGroupName")
self.vpcStatus = params.get("vpcStatus")
class CreateVpcRequest(AbstractModel):
def __init__(self):
self.vpcRegionId = None
self.cidrBlock = None
self.vpcName = None
self.resourceGroupId = None
def _deserialize(self, params):
self.vpcRegionId = params.get("vpcRegionId")
self.cidrBlock = params.get("cidrBlock")
self.vpcName = params.get("vpcName")
self.resourceGroupId = params.get("resourceGroupId")
class CreateVpcResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.vpcId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.vpcId = params.get("vpcId")
class DeleteVpcRequest(AbstractModel):
def __init__(self):
self.vpcId = None
def _deserialize(self, params):
self.vpcId = params.get("vpcId")
class DeleteVpcResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeSubnetsRequest(AbstractModel):
def __init__(self):
self.subnetIds = None
self.cidrBlock = None
self.zoneId = None
self.subnetStatus = None
self.subnetName = None
self.resourceGroupId = None
self.vpcId = None
self.pageSize = None
self.pageNum = None
def _deserialize(self, params):
self.subnetIds = params.get("subnetIds")
self.cidrBlock = params.get("cidrBlock")
self.zoneId = params.get("zoneId")
self.subnetStatus = params.get("subnetStatus")
self.subnetName = params.get("subnetName")
self.resourceGroupId = params.get("resourceGroupId")
self.vpcId = params.get("vpcId")
self.pageSize = params.get("pageSize")
self.pageNum = params.get("pageNum")
class DescribeSubnetsResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.totalCount = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.totalCount = params.get("totalCount")
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = SubnetInfo(item)
self.dataSet.append(obj)
class SubnetInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.subnetId = None
self.subnetName = None
self.zoneId = None
self.availableIpCount = None
self.cidrBlock = None
self.subnetStatus = None
self.createTime = None
self.vpcSubnetStatus = None
self.vpcId = None
self.vpcName = None
self.resourceGroupId = None
self.resourceGroupName = None
self.subnetInstanceSet = None
def _deserialize(self, params):
self.subnetId = params.get("subnetId")
self.subnetName = params.get("subnetName")
self.zoneId = params.get("zoneId")
self.availableIpCount = params.get("availableIpCount")
self.cidrBlock = params.get("cidrBlock")
self.subnetStatus = params.get("subnetStatus")
self.createTime = params.get("createTime")
self.vpcSubnetStatus = params.get("vpcSubnetStatus")
self.vpcId = params.get("vpcId")
self.vpcName = params.get("vpcName")
self.resourceGroupId = params.get("resourceGroupId")
self.resourceGroupName = params.get("resourceGroupName")
if params.get("subnetInstanceSet") is not None:
self.subnetInstanceSet = []
for item in params.get("subnetInstanceSet"):
obj = SubnetInstance(item)
self.subnetInstanceSet.append(obj)
class SubnetInstance(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.instanceId = None
self.privateIpAddress = None
self.privateIpStatus = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.privateIpAddress = params.get("privateIpAddress")
self.privateIpStatus = params.get("privateIpStatus")
class ModifySubnetsAttributeRequest(AbstractModel):
def __init__(self):
self.subnetIds = None
self.subnetName = None
def _deserialize(self, params):
self.subnetIds = params.get("subnetIds")
self.subnetName = params.get("subnetName")
class ModifySubnetsAttributeResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class CreateSubnetRequest(AbstractModel):
def __init__(self):
self.zoneId = None
self.cidrBlock = None
self.subnetName = None
self.resourceGroupId = None
self.vpcId = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
self.cidrBlock = params.get("cidrBlock")
self.subnetName = params.get("subnetName")
self.resourceGroupId = params.get("resourceGroupId")
self.vpcId = params.get("vpcId")
class CreateSubnetResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.subnetId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.subnetId = params.get("subnetId")
class DeleteSubnetRequest(AbstractModel):
def __init__(self):
self.subnetId = None
def _deserialize(self, params):
self.subnetId = params.get("subnetId")
class DeleteSubnetResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class AssociateSubnetInstancesRequest(AbstractModel):
def __init__(self):
self.subnetId = None
self.subnetInstanceList = None
def _deserialize(self, params):
self.subnetId = params.get("subnetId")
if params.get("subnetInstanceList") is not None:
self.subnetInstanceList = []
for item in params.get("subnetInstanceList"):
obj = AssociateSubnetInstance(item)
self.subnetInstanceList.append(obj)
class AssociateSubnetInstancesResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class AssociateSubnetInstance(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.instanceId = None
self.privateIpAddress = None
def _deserialize(self, params):
self.instanceId = params.get("instanceId")
self.privateIpAddress = params.get("privateIpAddress")
class UnAssociateSubnetInstanceRequest(AbstractModel):
def __init__(self):
self.subnetId = None
self.instanceId = None
def _deserialize(self, params):
self.subnetId = params.get("subnetId")
self.instanceId = params.get("instanceId")
class UnAssociateSubnetInstanceResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class AssociateVpcSubnetRequest(AbstractModel):
def __init__(self):
self.subnetId = None
self.vpcId = None
def _deserialize(self, params):
self.subnetId = params.get("subnetId")
self.vpcId = params.get("vpcId")
class AssociateVpcSubnetResponse(AbstractModel):
def __init__(self):
self.requestId = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
class DescribeSubnetAvailableResourcesRequest(AbstractModel):
def __init__(self):
self.zoneId = None
def _deserialize(self, params):
self.zoneId = params.get("zoneId")
class DescribeSubnetAvailableResourcesResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.zoneIdSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
self.zoneIdSet = params.get("zoneIdSet") | zenlayercloud-sdk-python | /zenlayercloud-sdk-python-2.0.2.tar.gz/zenlayercloud-sdk-python-2.0.2/zenlayercloud/bmc/v20221120/models.py | models.py |
from zenlayercloud.common.abstract_model import AbstractModel
class DescribeLogsRequest(AbstractModel):
def __init__(self):
self.startTime = None
self.endTime = None
self.resUid = None
self.resEvent = None
self.clientIP = None
self.size = None
self.cursor = None
def _deserialize(self, params):
self.startTime = params.get("startTime")
self.endTime = params.get("endTime")
self.resUid = params.get("resUid")
self.resEvent = params.get("resEvent")
self.clientIP = params.get("clientIP")
self.size = params.get("size")
if params.get("cursor") is not None:
self.cursor = []
for item in params.get("cursor"):
obj = item
self.cursor.append(obj)
class DescribeLogsResponse(AbstractModel):
def __init__(self):
self.requestId = None
self.cursor = None
self.dataSet = None
def _deserialize(self, params):
self.requestId = params.get("requestId")
if params.get("cursor") is not None:
self.cursor = []
for item in params.get("cursor"):
obj = item
self.cursor.append(obj)
if params.get("dataSet") is not None:
self.dataSet = []
for item in params.get("dataSet"):
obj = LogInfo(item)
self.dataSet.append(obj)
class LogInfo(AbstractModel):
def __init__(self, params=None):
if params is None:
params = {}
if len(params) > 0:
self._deserialize(params)
return
self.resUid = None
self.resType = None
self.resEvent = None
self.opsTime = None
self.eventSource = None
self.apiVersion = None
self.opsUser = None
self.clientIP = None
self.request = None
self.response = None
def _deserialize(self, params):
self.resUid = params.get("resUid")
self.resType = params.get("resType")
self.resEvent = params.get("resEvent")
self.opsTime = params.get("opsTime")
self.eventSource = params.get("eventSource")
self.apiVersion = params.get("apiVersion")
self.opsUser = params.get("opsUser")
self.clientIP = params.get("clientIP")
self.request = params.get("request")
self.response = params.get("response") | zenlayercloud-sdk-python | /zenlayercloud-sdk-python-2.0.2.tar.gz/zenlayercloud-sdk-python-2.0.2/zenlayercloud/zls/v20230804/models.py | models.py |
import json
import time
import zenlayercloud
from zenlayercloud.common.abstract_model import AbstractModel
from zenlayercloud.common.config import Config
from zenlayercloud.common.excpetion import error_code
from zenlayercloud.common.excpetion.zenlayer_cloud_sdk_exception import ZenlayerCloudSdkException
from zenlayercloud.common.request import BaseRequest, ApiProxyClient
from zenlayercloud.common.response import BaseResponse
from zenlayercloud.common.utils import sha256hex, hmac_sha256
_json_content_type = 'application/json'
class AbstractClient(object):
_api_version = ''
_service = ''
_sdk_version = 'SDK_PYTHON_%s' % zenlayercloud.__version__
_signature_method = "ZC2-HMAC-SHA256"
def __init__(self, credential, config=None):
"""base client interactive with zenlayer cloud
:param credential: the zenlayer cloud credential
:type credential: zenlayercloud.common.credential.Credential
:param config: the additional config for client
:type config: zenlayercloud.common.config.Config
"""
self.credential = credential
self.config = Config() if config is None else config
is_http = True if self.config.scheme == "http" else False
self.proxy = ApiProxyClient(host=self.config.domain,
timeout=self.config.request_timeout,
proxy=self.config.proxy, certification=self.config.certification,
is_http=is_http, debug=self.config.debug, keep_alive=self.config.keep_alive)
return
def _api_call(self, action, request, method="POST", headers=None) -> dict:
if not isinstance(request, AbstractModel):
raise ZenlayerCloudSdkException(
code=error_code.SDK_INVALID_REQUEST,
message="Request must be AbstractModel"
)
uri = "/api/v2/%s" % self._service
req = BaseRequest(host=self.config.domain, method=method, uri=uri, header=headers)
header = req.header
timestamp = int(time.time())
header["x-zc-version"] = self._api_version
header["x-zc-signature-method"] = self._signature_method
header["x-zc-timestamp"] = str(timestamp)
header["x-zc-service"] = self._service
header["x-zc-action"] = action
header["x-zc-sdk-version"] = self._sdk_version
header["x-zc-sdk-lang"] = "Python"
req.set_host(self._get_endpoint())
req.set_content_type(_json_content_type)
req.data = json.dumps(request.serialize())
authorization = self._build_zc2_authorization(req)
req.header["Authorization"] = authorization
resp = self._send_request(req)
return self._handle_response(resp)
def _build_zc2_authorization(self, req):
canonical_querystring = ""
canonical_uri = "/"
request_payload = req.data
http_request_method = req.method
canonical_headers = "content-type:%s\nhost:%s\n" % (
req.get_content_type(),
req.get_host()
)
signed_headers = "content-type;host"
hashed_request_payload = sha256hex(request_payload)
canonical_request = '%s\n%s\n%s\n%s\n%s\n%s' % (http_request_method,
canonical_uri,
canonical_querystring,
canonical_headers,
signed_headers,
hashed_request_payload)
hashed_canonical_request = sha256hex(canonical_request)
timestamp = req.header["x-zc-timestamp"]
string2sign = '%s\n%s\n%s' % (self._signature_method,
timestamp,
hashed_canonical_request)
access_key_id = self.credential.access_key_id
access_key_password = self.credential.access_key_password
signature = hmac_sha256(access_key_password, string2sign)
return "%s Credential=%s, SignedHeaders=%s, Signature=%s" % (
self._signature_method, access_key_id, signed_headers, signature)
def _get_endpoint(self):
return self.config.domain
def _send_request(self, req):
try:
http_resp = self.proxy.request(req)
headers = dict(http_resp.headers)
response = BaseResponse(status=http_resp.status_code,
header=headers,
data=http_resp.text)
return response
except Exception as e:
raise ZenlayerCloudSdkException("NETWORK_ERROR", str(e))
@staticmethod
def _handle_response(resp):
resp_data = json.loads(resp.data)
if resp.status != 200:
code = resp_data["code"]
message = resp_data["message"]
request_id = resp_data["requestId"]
raise ZenlayerCloudSdkException(code, message, request_id)
return resp_data['response'] | zenlayercloud-sdk-python | /zenlayercloud-sdk-python-2.0.2.tar.gz/zenlayercloud-sdk-python-2.0.2/zenlayercloud/common/abstract_client.py | abstract_client.py |
import logging
import os
from urllib.parse import urlparse
import certifi
import requests
logger = logging.getLogger("zenlayercloud_sdk_common")
class ApiProxyClient(object):
def __init__(self, host, timeout=60, proxy=None, certification=None, is_http=False, debug=False, keep_alive=False):
self.host = host
url = urlparse(host)
if not url.hostname:
if is_http:
host = "http://" + host
else:
host = "https://" + host
self.request_host = host
self.certification = certification
if certification is None:
self.certification = certifi.where()
self.timeout = timeout
self.proxy = None
self.debug = debug
self.keep_alive = keep_alive
if is_http:
proxy = proxy or _get_proxy_from_env(host, varname="HTTP_PROXY")
else:
proxy = proxy or _get_proxy_from_env(host, varname="HTTPS_PROXY")
if proxy:
self.proxy = {"http": proxy, "https": proxy}
def request(self, req):
if self.keep_alive:
req.header["Connection"] = "Keep-Alive"
if self.debug:
logger.debug("Send request = %s" % req)
url = '%s%s' % (self.request_host, req.uri)
response = requests.request(method=req.method,
url=url,
data=req.data,
headers=req.header,
timeout=self.timeout,
verify=self.certification,
proxies=self.proxy)
if self.debug:
logger.debug("Http response = %s" % response)
return response
def _get_proxy_from_env(host, varname="HTTPS_PROXY"):
no_proxy = os.environ.get("NO_PROXY") or os.environ.get("no_proxy")
if no_proxy and host in no_proxy:
return None
return os.environ.get(varname.lower()) or os.environ.get(varname.upper())
class BaseRequest(object):
def __init__(self, host="", uri="", method="", header=None, data=""):
self.header = {} if header is None else header
self.method = method
self.host = host
self.uri = uri
self.data = data
def __str__(self):
headers = "\n".join("%s: %s" % (k, v) for k, v in self.header.items())
return ("Host: %s\nMethod: %s\nUri: %s\nHeader: %s\nData: %s\n"
% (self.host, self.method, self.uri, headers, self.data))
def get_content_type(self):
return self.header["Content-Type"]
def get_host(self):
return self.header["host"]
def set_host(self, host):
self.header["host"] = host
def set_content_type(self, content_type: str):
self.header["Content-Type"] = content_type | zenlayercloud-sdk-python | /zenlayercloud-sdk-python-2.0.2.tar.gz/zenlayercloud-sdk-python-2.0.2/zenlayercloud/common/request.py | request.py |
zenmai
========================================
.. image:: https://travis-ci.org/podhmo/zenmai.svg?branch=master
:target: https://travis-ci.org/podhmo/zenmai
toylang on yaml or json
command line example
----------------------------------------
main.yaml
.. code-block:: yaml
code:
$import: ./filters.py
as: f
definitions:
$let:
nums: {$load: ./nums.yaml#/definitions/nums0/enum}
odds:
type: integer
enum:
$f.odds: {$get: nums}
even:
type: integer
enum:
$f.evens: {$get: nums}
nums.yaml
.. code-block:: yaml
definitions:
nums0:
type: integer
enum:
[1, 2, 3, 4, 5, 6]
nums1:
type: integer
enum:
[1, 2, 3, 5, 7, 11]
filters.py
.. code-block:: python
def odds(nums):
return [n for n in nums if n % 2 == 1]
def evens(nums):
return [n for n in nums if n % 2 == 0]
run.
.. code-block:: bash
$ zenmai examples/readme2/main.yaml
output
.. code-block:: yaml
zenmai main.yaml
definitions:
odds:
type: integer
enum:
- 1
- 3
- 5
even:
type: integer
enum:
- 2
- 4
- 6
config loader
----------------------------------------
using zenmai as config loader.
.. code-block:: python
from zenma.loader import load
with open("config.yaml") as rf:
d = load(rf)
| zenmai | /zenmai-0.3.0.tar.gz/zenmai-0.3.0/README.rst | README.rst |
ZenMake
=======
|Licence| |Python| |PythonImpl| |PyPI| |Docs| |GithubCI| |coveralls|
|ProjectStatus|
ZenMake is a cross-platform build system for C/C++ and some other languages.
Main features
-------------
- Build config as python (.py) or as yaml file.
- Distribution as zip application or as system package (pip).
- Automatic reconfiguring: no need to run command 'configure'.
- Compiler autodetection.
- Building and running functional/unit tests including an ability to
build and run tests only on changes.
- Build configs in sub directories.
- Building external dependencies.
- Supported platforms: GNU/Linux, MacOS, MS Windows. Some other
platforms like OpenBSD/FreeBSD should work as well but it
hasn't been tested.
- Supported languages:
- C: gcc, clang, msvc, icc, xlc, suncc, irixcc
- C++: g++, clang++, msvc, icpc, xlc++, sunc++
- D: dmd, ldc2, gdc; MS Windows is not supported yet
- Fortran: gfortran, ifort (should work but not tested)
- Assembler: gas (GNU Assembler)
- Supported toolkits/frameworks: SDL2, GTK3, Qt5
Documentation
-------------
For full documentation, including installation, tutorials and PDF documents,
please see https://zenmake.readthedocs.io
Project links
-------------
- Primary git repository: https://github.com/pustotnik/zenmake
- Secondary git repository: https://gitlab.com/pustotnik/zenmake
- Issue tracker: https://github.com/pustotnik/zenmake/issues
- Pypi package: https://pypi.org/project/zenmake
- Documentation: https://zenmake.readthedocs.io
.. |Licence| image:: https://img.shields.io/pypi/l/zenmake.svg
:target: https://pypi.org/project/zenmake/
.. |Python| image:: https://img.shields.io/pypi/pyversions/zenmake.svg
:target: https://pypi.org/project/zenmake/
.. |PythonImpl| image:: https://img.shields.io/pypi/implementation/zenmake.svg
:target: https://pypi.org/project/zenmake/
.. |PyPI| image:: https://img.shields.io/pypi/v/zenmake.svg
:target: https://pypi.org/project/zenmake/
.. |Docs| image:: https://readthedocs.org/projects/zenmake/badge/?version=latest
:target: https://zenmake.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. |GithubCI| image:: https://img.shields.io/github/workflow/status/pustotnik/zenmake/CI
:target: https://github.com/pustotnik/zenmake/actions
:alt: GitHub Workflow CI Status
.. |coveralls| image:: https://coveralls.io/repos/github/pustotnik/zenmake/badge.svg
:target: https://coveralls.io/github/pustotnik/zenmake
.. |ProjectStatus| image:: https://img.shields.io/pypi/status/zenmake.svg
:target: https://pypi.org/project/zenmake/
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/README.rst | README.rst |
import sys
import os
import io
import re
import subprocess
import shutil
if sys.hexversion < 0x3050000:
raise ImportError('Python >= 3.5 is required')
here = os.path.dirname(os.path.abspath(__file__))
os.chdir(here)
import setup
#sys.path.append(os.path.join(here, setup.SRC_DIR))
from zenmake.zm import pyutils
version = setup.version
GIT_EXE = shutil.which('git')
PY_EXE = sys.executable
def _runInShell(cmd):
try:
output = subprocess.check_output(cmd, shell = True)
except subprocess.CalledProcessError as ex:
print(ex)
sys.exit(ex.returncode)
return output.decode(sys.stdout.encoding)
def _runPyScript(args, tryPy3 = True):
python = shutil.which('python3') if tryPy3 else PY_EXE
if not python:
python = PY_EXE
return _runInShell('%s %s' % (python, args))
def _runGitCmd(args):
return _runInShell('git %s' % args)
def _getAnswerYesNo(question):
while True:
answer = input(question + ' [y/n]')
if not answer or answer[0] == 'n':
return False
if answer != 'y' and answer != 'yes':
print('Invalid answer %r' % answer)
continue
return True
def _writeVersionFile(ver):
filePath = version.VERSION_FILE_PATH
with io.open(filePath, 'wt') as file:
file.write(pyutils.texttype(ver))
def _bumpVersion(ver):
output = _runGitCmd("tag")
existingVers = [x[1:] for x in output.split() if x and x[0] == 'v']
if ver in existingVers:
print("Version %r already exists in git tags. Stopped." % ver)
sys.exit(1)
_writeVersionFile(ver)
verFilePath = os.path.relpath(version.VERSION_FILE_PATH, here)
_runGitCmd("add %s" % verFilePath)
_runGitCmd("commit -m 'bump version'")
_runGitCmd("tag -a v%s -m 'version %s'" % (ver, ver))
#_runGitCmd("push")
#_runGitCmd("push --tags")
_runGitCmd("push --follow-tags")
def _writeNewDevVersion(baseVer):
parsed = version.parseVersion(baseVer)
gr = parsed.groups()
nextVer = '.'.join([gr[0], gr[1], str(int(gr[2])+1)])
nextVer += '-dev'
_writeVersionFile(nextVer)
return nextVer
def _checkChangeLog(newVer):
filePath = os.path.join(here, 'CHANGELOG.rst')
if not os.path.isfile(filePath):
print("File %r doesn't exist" % filePath)
sys.exit(1)
pattern = 'Version\s+%s\s+' % newVer
pattern = re.compile(pattern)
with io.open(filePath, 'rt') as file:
for line in file:
if pattern.search(line):
break
else:
return False
return True
def main():
""" do main work """
cmdArgs = sys.argv[1:]
if not cmdArgs:
msg = "There is no version in args. Current version: "
msg += version.current()
print(msg)
if GIT_EXE:
print("Result of 'git describe': ")
print(_runGitCmd('describe'))
msg = "\nUsage: " + sys.argv[0] + " x.y.z where x,y,z are numbers"
print(msg)
return 0
newVer = cmdArgs[0]
if not version.checkFormat(newVer):
print('Version %r has invalid format' % newVer)
return 1
if not GIT_EXE:
print("There is no 'git'. Install 'git' to use this script.")
return 2
if not _checkChangeLog(newVer):
print('There is no records for the version %r in changelog file' % newVer)
return 3
question = 'Bump version to %s?'
question += ' It will write the version to file,'
question += '\nadd it to git repo, commit it and add git tag with the version.'
answer = _getAnswerYesNo(question % newVer)
if not answer:
return 0
print("Bumping version to %r .." % newVer)
_bumpVersion(newVer)
print("Building distribution ..")
_runPyScript('setup.py clean sdist bdist_wheel')
answer = _getAnswerYesNo('Distribution was built successfully. Publish it to pypi?')
if answer:
print("Publishing distribution ..")
_runPyScript('setup.py publish')
print("Distribution was published.")
answer = _getAnswerYesNo('Publish release to github?')
if answer:
scriptPath = os.path.join('scripts', 'publish-github-release.py')
args = '%s %s' % (scriptPath, newVer)
print("Publishing release to github ..")
_runPyScript(args, tryPy3 = True)
print("Release was published on github.")
print("Writing new dev version to file %r .." % version.VERSION_FILE_PATH)
nextVer = _writeNewDevVersion(newVer)
print("New dev version %r was written to file." % nextVer)
return 0
if __name__ == '__main__':
sys.exit(main()) | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/make-release.py | make-release.py |
Changelog
=========
Version 0.11.0 (2022-09-04)
----------------------------
Added
- embed pyyaml
- add value 'all' for variable 'for' in the 'byfilter' parameter
- add buildconf parameter export-* for libpath, stlibpath and all \*flags
- add the 'cleanall' command as replacement for the 'distclean' command
- remake/improve/extend substitutions (buildconf variables inside a text)
- add some syntactic sugar for buildconf
- get rid of ${TARGET} and rewrite substitution of ${SRC} and ${TGT}
- add ability to use 'and', 'or' and 'not' in the '\*.select'
- add 'host-os' and 'distro' for the '\*.select' conditions
- add 'if' for the 'byfilter' parameter
- add the 'run' command
- support qt5 for c++ (almost done) #31
- enable absolute paths in path patterns
- add runtime lib paths for the 'run' command and for the 'run' feature
- support python 3.10
Changed
- update waf to 2.0.23
- fix bug with auto detection of interpreter in 'runcmd'
- rename 'include' to 'incl' and 'exclude' to 'excl' for buildconf parameter 'source'
- rename buildconf parameter 'matrix' to 'byfilter'
- rename 'export-config-actions' to 'export-config-results'
- rename buildconf parameter 'config-actions' to 'configure'
- remake and improve the buildconf parameters 'export-*'
- prioritize yaml buildconf format
- fix bug of no automatic reconfiguration with changed env/cli args for install/uninstall
- rename buildconf 'features' to 'general'
- fix bug with 'enabled.select'
- improve buildconf validator
- extend/improve install directory vars
- fix problem when not all values from buildconf.cliopts have effect
- fix order of reading config values from env, cli and config file
- fix terminal width detection in CLI
- improve system libraries detection
- fix bug when zenmake could not find toolchain from sys env vars like CC, CXX, etc
- fix problem with found zero-byte executables (mostly windows problem)
- fix problem with short file names (8.3 filename) on windows
- fix bug when getting rid of CXX in cmd line does not induce reconfigure
- make stop child procces in the 'run' command on keyboard interrupt
- many other fixes
Removed
- drop python 2.x, 3.4 and pypy
- remove task features aliases: more problems than profits
- remove redundant 'default-buildtype' parameter
- remove the 'platforms' parameter
Version 0.10.0 (2020-09-23)
----------------------------
Added
- support Fortran language
- add basic D language support
- add selectable parameters for buildconf task parameters
- support external dependencies
- add 'tryall' and 'after'/'before' for parallel configuration actions
- add correct buildconf validation for nested types
- add configuration action 'call-pyfunc' ('check-by-pyfunc') to parallel actions
- add configuration action 'check-code'
- add configuration actions 'pkgconfig' and 'toolconfig' (support pkg-config and other \*-config tools)
- add configuration action 'find-file'
- add 'remove-defines' for configuration action 'write-config-header'
- add option to add extra files to monitor ('monitor-files')
- add buildconf task parameters 'stlibs' and 'stlibpath'
- add buildconf task parameters 'monitlibs' and 'monitstlibs'
- add buildconf task parameter 'export-config-actions'
- add buildconf task parameter 'enabled'
- add buildconf task parameter 'group-dependent-tasks'
- add add buildconf task parameter 'install-files'
- add parameter 'build-work-dir-name' to buildconf 'features'
- add simplified form of patterns using for buildconf task parameter 'source'
- add custom substitution variables
- add detection of msvc, gfortran, ifort and D compilers for command 'sysinfo'
- add number of CPUs for command 'sysinfo'
- add 'not-for' condition for config var 'matrix'
- add ability to set compiler flags in buildconf parameter 'toolchains'
- add ability to use 'run' in buildconf as a string or function
- add cdmline options --verbose-configure (-A) and --verbose-build (-B)
- add cmdline option '--force-edeps'
- add c++ demo project with boost libraries
- add demo project with luac
- add demo project with 'strip' utility on linux
- add demo project with dbus-binding-tool
- add demo projects for gtk3
- add demo project for sdl2
- add codegen demo project
Changed
- improve support of spaces in values (paths, etc)
- improve unicode support
- use sha1 by default for hashes
- correct some english text in documentation
- detach build obj files from target files
- remove locks in parallel configuration actions
- small optimization of configuration actions
- improve validation for parallel configuration actions
- improve error handling for configuration actions with python funcs
- improve buildconf errors handling
- improve use of buildconf parameter 'project.version'
- remake/improve handling of cache/db files (see buildconf parameter 'db-format')
- reduce size of zenmake.pyz by ignoring some unused waf modules
- apply solution from waf issue 2272 to fix max path limit on windows with msvc
- rename '--build-tests' to '--with-tests', enable it for 'configure' and add ability to use -t and -T as flags
- rename 'sys-lib-path' to 'libpath' and fix bug with incorrect value
- rename 'sys-libs' to 'libs'
- rename 'conftests' to 'config-actions'
- rename config action 'check-programs' to 'find-program' and change behaviour
- make ordered configuration actions
- disable ':' in task names
- refactor code to support task features in separated python modules
- don't merge buildconf parameter 'project' in sub buildconfs (see 'subdirs')
- fix bug with toolchain supported more than one language
- fix some bugs with env vars
- fix compiling problem with the same files in different tasks
- fix bug with object file indexes
- fix command 'clean' for case when build dir is symlink
- fix Waf bug of broken 'vnum' for some toolchains
- fix parsing of cmd line in 'runcmd' on windows
- fix processing of destdir, prefix, bindir, libdir
Removed
- remove configuration action (test) 'check'
Version 0.9.0 (2019-12-10)
----------------------------
Added
- add config parameter 'startdir'
- add config parameter 'subdirs' to support sub configs
- add 'buildroot' as the command-line arg and the environment variable
- print header with some project info
- add parallel configuration tests
Changed
- fix default command-line command
- fix problem of too long paths in configuration tests on Windows
- fix some small bugs in configuration tests
- rid of the wscript file during building
- improve buildconf validator
- improve checking of the task features
- update Waf to version 2.0.19
Removed
- remove config parameters 'project.root' and 'srcroot' | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/CHANGELOG.rst | CHANGELOG.rst |
ZenMake demo projects
=====================
This directory contains demo projects which are used for testing on different
platforms with different toolchains, libraries, etc. Also they demonstrate
use of ZenMake and can be used as examples.
These projects have many dependencies due to use of many different things.
Full list of actual dependencies which are used for regular testing in CI
can be obtained from file 'ci.yml' in the repository directory '.github/workflows'.
Different demo projects have different dependencies and you need to have all of these
dependencies only if you want to run all these examples.
Not every project can be run on all platforms.
At the time of writing there are following dependencies:
Linux:
- python 3.x
- pyyaml (optional)
- gcc
- clang
- nasm
- yasm
- dmd
- gdc
- ldc
- boost
- lua (5.1)
- dbus-glib (libdbus-glib-1-dev)
- gfortran
- gtk3 (libgtk-3-dev)
- sdl2 (libsdl2-dev)
- qt5 (qt5-default)
- qt5 tools: qmake, uic, rcc, lrelease, lupdate
macOS:
- python 3.x
- pyyaml (optional)
- clang
- dmd
- ldc
- boost
- qt5
MS Windows:
- python 3.x
- pyyaml (optional)
- msvc (Microsoft Visual C++)
- boost
- qt5 (msvc and mingw versions in C:\Qt)
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/demos/README.rst | README.rst |
import os
import sys
import getpass
import tempfile
joinpath = os.path.join
#username = getpass.getuser() # portable way to get user name
#tmpdir = tempfile.gettempdir() # portable way to get temp directory
iswin32 = os.sep == '\\' or sys.platform == 'win32' or os.name == 'nt'
#realbuildroot = joinpath(tmpdir, username, 'projects', 'complex-unittest', 'build')
project = {
'name' : 'zm-complex-unittest',
}
LS_CMD = 'dir /B' if iswin32 else 'ls'
EXE = 'exe'
def somefunc(args):
print("somefunc: buildtype = %r" % args['buildtype'])
tasks = {
'shlib' : {
'features' : 'cxxshlib',
'source' : 'shlib/**/*.cpp',
'includes' : '.',
'run' : "echo 'This is runcmd in task \"shlib\"'",
#'configure' : [
# dict(do = 'check-headers', names = 'iostream'),
#],
# testing of 'configure.select' feature
'configure.select' : {
'default' : [
dict(do = 'check-headers', names = 'iostream'),
],
'linux' : [
dict(do = 'check-headers', names = 'iostream cstdio'),
]
}
},
'stlib' : {
'features' : 'cxxstlib',
'source' : 'stlib/**/*.cpp',
'includes' : '.',
'configure' : [
dict(do = 'check-headers', names = 'cstdio'),
],
},
'shlibmain' : {
'features' : 'cxxshlib',
'source' : 'shlibmain/**/*.cpp',
'includes' : '.',
'use' : 'shlib stlib ls',
},
'complex' : {
'features' : 'cxxprogram runcmd',
'source' : 'prog/**/*.cpp',
'includes' : '.',
'use' : 'shlibmain',
'run' : "echo 'This is runcmd in task \"complex\"'",
'install-path' : '$(prefix)/${EXE}',
},
'echo' : {
'run' : {
'cmd' : "echo say hello",
'repeat' : 2,
},
'use' : 'shlibmain',
'target' : '',
},
'ls' : {
'run' : {
'cmd' : '${LS_CMD}',
# a different way for the same result
#'cmd' : iswin32 and "dir /B" or "ls",
'cwd' : '.',
},
'target' : '',
},
'test.py' : {
'run' : {
'cmd' : '${PYTHON} tests/test.py',
'cwd' : '.',
'env' : { 'JUST_ENV_VAR' : 'qwerty', },
'shell' : False,
},
'use' : 'shlibmain',
'configure' : [ dict(do = 'find-program', names = 'python python3'), ],
'target' : '',
},
'altscript' : {
'run' : { 'cmd' : '"alt script.py"', 'cwd' : '.' },
'target' : '',
},
'pyfunc' : {
'run': somefunc,
'target' : '',
},
#### tasks for build/run tests
'stlib-test' : {
'features' : 'cxxprogram test',
'source' : 'tests/test_stlib.cpp',
'use' : 'stlib testcmn',
},
'test from script' : {
'features' : 'test',
'run' : {
'cmd' : 'tests/test.py',
#'cmd' : '${PYTHON} tests/test.py',
'cwd' : '.',
'shell' : False,
},
'use' : 'complex',
'configure' : [ dict(do = 'find-program', names = 'python python3'), ]
},
'testcmn' : {
'features' : 'cxxshlib test',
'source' : 'tests/common.cpp',
'includes' : '.',
},
'shlib-test' : {
'features' : 'cxxprogram test',
'source' : 'tests/test_shlib.cpp',
'use' : 'shlib testcmn',
'run' : {
'cmd' : '$(tgt) a b c',
#'cwd' : '.', # can be path relative to current project root path
#'cwd' : '.1',
'env' : { 'AZ' : '111', 'BROKEN_TEST' : 'false'},
'repeat' : 2,
'timeout' : 10, # in seconds, Python 3 only
'shell' : False,
},
'configure' : [ dict(do = 'check-headers', names = 'vector'), ]
},
'shlibmain-test' : {
'features' : 'cxxprogram test',
'source' : 'tests/test_shlibmain.cpp',
'use' : 'shlibmain testcmn',
},
#### these tasks are always failed but they're disabled: it's to check the 'enabled' param
'always-failed' : {
'run': "asdfghjklzxcvb",
'enabled' : False,
},
'always-failed2' : {
'run': "asdfghjklzxcvb2",
'enabled.select' : { 'default': False }
},
}
buildtypes = {
# -fPIC is necessary to compile static lib
'debug' : {
'toolchain.select' : {
'default': 'g++',
'macos' : 'clang++',
'windows': 'msvc',
},
'cxxflags.select' : {
'g++ or clang++' : '-fPIC -O0 -g',
'msvc' : '/Od /EHsc',
},
'linkflags.select' : {
'g++': '-Wl,--as-needed',
},
},
'release' : {
'toolchain.select' : {
'default': 'g++',
'macos' : 'clang++',
'windows': 'msvc',
},
'cxxflags.select' : {
'g++ or clang++' : '-fPIC -O2',
'msvc' : '/O2 /EHsc',
},
'linkflags.select' : {
'g++': '-Wl,--as-needed',
},
},
'default' : 'debug',
}
byfilter = [
#{ 'for' : 'all', 'set' : { 'rpath' : '.', } },
] | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/demos/cpp/09-complex-unittest/buildconf.py | buildconf.py |
buildtypes = {
# -fPIC is necessary to compile static lib
'debug-gcc' : { 'cxxflags' : '-fPIC -O0 -g' },
'release-gcc' : { 'cxxflags' : '-fPIC -O2' },
'debug-clang' : { 'cxxflags' : '-fPIC -O0 -g' },
'release-clang': { 'cxxflags' : '-fPIC -O2' },
'debug-msvc' : { 'cxxflags' : '/Od /EHsc' },
'release-msvc' : { 'cxxflags' : '/O2 /EHsc' },
'default' : {
'linux': 'debug-gcc',
'darwin': 'debug-clang',
'windows': 'debug-msvc',
},
}
byfilter = [
{
'for' : 'all',
'set' : {
'includes' : '.',
'rpath' : '.',
}
},
{
'for' : { 'task' : 'shlib shlibmain', },
'set' : { 'features' : 'cxxshlib', }
},
{
'for' : { 'task' : 'shlib', },
'set' : { 'source' : 'shlib/**/*.cpp', }
},
{
'for' : { 'task' : 'stlib', },
'set' : {
'features' : 'cxxstlib',
'source' : 'stlib/**/*.cpp',
}
},
{
'for' : { 'task' : 'shlibmain', },
'set' : {
'source' : 'shlibmain/**/*.cpp',
'use' : 'shlib stlib',
}
},
{
'for' : { 'task' : 'test', },
'set' : {
'features' : 'cxxprogram',
'source' : 'prog/**/*.cpp',
'use' : 'shlibmain',
}
},
{
'for' : { 'buildtype' : ['debug-gcc', 'release-gcc'], 'platform' : 'linux', },
'set' : {
'toolchain' : 'g++',
'linkflags' : '-Wl,--as-needed',
}
},
{
'for' : { 'buildtype' : 'release-gcc', 'platform' : 'linux', },
'set' : { 'cxxflags' : '-fPIC -O3', }
},
{
'for' : { 'buildtype' : ['debug-clang', 'release-clang'], 'platform' : 'linux darwin', },
'set' : {
'toolchain' : 'clang++',
}
},
{
'for' : { 'buildtype' : ['debug-msvc', 'release-msvc'], 'platform' : 'windows', },
'set' : {
'toolchain' : 'msvc',
},
},
] | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/demos/cpp/06-complex-alt-py/buildconf.py | buildconf.py |
from buildconf_tools import *
cliopts = {
#'color': 'no',
'jobs' : { 'build' : 4 },
#'progress' : {'any': False, 'build': True },
'verbose-build' : 1,
}
general = {
'monitor-files' : 'buildconf_tools.py',
'build-work-dir-name' : 'wrk',
}
tasks = {
'shlib' : {
'features' : 'cxxshlib',
'source' : 'shlib/**/*.cpp',
'includes' : 'include',
'defines' : 'ABC=1 DOIT MY_LONG_STRING="some long string"',
'export' : 'includes defines',
'install-path' : False,
'configure' : [
{
'do' : 'parallel', 'actions' : [
{ 'do' : 'check-headers', 'names' : 'cstdio iostream', 'id' : 'first' },
{ 'do' : 'check-headers', 'names' : 'stdlib.h', 'after' : 'first' },
{ 'do' : 'check-headers', 'names' : 'stdlibasd.h', 'mandatory' : False },
{ 'do' : 'check-libs', 'names' : 'boost_random', 'mandatory' : not iswin32 },
],
'tryall' : True,
},
],
},
'stlib' : {
'features' : 'cxxstlib',
'source' : 'stlib/**/*.cpp',
},
'shlibmain' : {
'features' : 'cxxshlib',
'source' : 'shlibmain/**/*.cpp',
'use' : 'shlib stlib',
'install-path' : '$(prefix)/lbr',
},
'main' : {
'features' : 'cxxprogram',
'source' : 'prog/**/*.cpp',
'use' : 'shlibmain',
'target' : '@bld', # to check 'build-work-dir-name'
},
}
buildtypes = {
'debug-gcc' : {
'toolchain' : 'g++',
'cxxflags' : '-O0 -g',
},
'release-gcc' : {
'toolchain' : 'g++',
'cxxflags' : '-O2',
},
'debug-clang' : {
'toolchain' : 'clang++',
'cxxflags' : '-O0 -g',
},
'release-clang' : {
'toolchain' : 'clang++',
'cxxflags' : '-O2',
},
'debug-msvc' : {
'toolchain' : 'msvc',
'cxxflags' : '/Od',
},
'release-msvc' : {
'toolchain' : 'msvc',
'cxxflags' : '/O2',
},
'default' : {
'linux': 'debug-gcc',
'darwin': 'debug-clang',
'windows': 'debug-msvc',
},
}
toolchains = {
'g++': {
'LINKFLAGS' : '-Wl,--as-needed',
'CXXFLAGS' : '-fPIC -Wall',
},
'clang++': {
'CXXFLAGS' : '-fPIC',
},
'msvc': {
'CXXFLAGS' : '/EHsc',
},
} | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/demos/cpp/04-complex/buildconf.py | buildconf.py |
general = {
'db-format' : 'py',
'provide-edep-targets' : True,
}
foolibdir = '../foo-lib'
def triggerConfigure(**kwargs):
#print(kwargs)
return False
edeps = {
'foo-lib-d' : {
'rootdir': foolibdir,
'export-includes' : foolibdir,
'targets': {
'shared-lib' : {
'dir' : foolibdir + '/_build_/debug',
'type': 'shlib',
'name': 'fooutil',
},
'static-lib' : {
'dir' : foolibdir + '/_build_/debug',
'type': 'stlib',
'name': 'fooutil',
},
},
'rules' : {
'configure': { # just for testing
'cmd' : './configure',
'shell' : True,
'trigger' : {
'paths-dont-exist' : dict(
startdir = foolibdir,
incl = '**/*.label',
),
'func' : triggerConfigure,
},
},
'build' : 'make debug',
#'build' : {
# 'cmd' : 'make debug',
# 'shell' : False,
#},
#'clean' : 'make cleandebug',
'clean' : {
# clean 'foo-lib' with command 'clean', it's just demo
'cmd' : 'make cleandebug',
'shell' : False,
'zm-commands' : 'clean',
},
},
},
'foo-lib-r' : {
'rootdir': foolibdir,
'export-includes' : foolibdir,
'targets': {
'shared-lib' : {
'dir' : foolibdir + '/_build_/release',
'type': 'shlib',
'name': 'fooutil',
},
'static-lib' : {
'dir' : foolibdir + '/_build_/release',
'type': 'stlib',
'name': 'fooutil',
},
},
'rules' : {
'configure': '',
'build' : 'make release',
'clean' : 'make cleanrelease',
},
},
}
tasks = {
'util' : {
'features' : 'cxxshlib',
'source' : 'shlib/**/*.cpp',
'use.select' : {
'debug' : 'foo-lib-d:static-lib',
'release' : 'foo-lib-r:static-lib',
},
'configure' : [
{ 'do' : 'check-headers', 'names' : 'cstdio iostream' },
],
},
'program' : {
'features' : 'cxxprogram',
'source' : 'prog/**/*.cpp',
'use.select' : {
'debug' : 'util foo-lib-d:shared-lib',
'release' : 'util foo-lib-r:shared-lib',
},
},
}
buildtypes = {
'debug' : {
'cxxflags' : '-O0 -g',
},
'release' : {
'cxxflags' : '-O2',
},
'default' : 'debug',
} | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/demos/external-deps/1-makefile/project/buildconf.py | buildconf.py |
def check():
# some checking
return True
def check2(**kwargs):
task = kwargs['taskname']
buildtype = kwargs['buildtype']
# some checking
#return True
return False
tasks = {
'extra' : {
'features' : 'cxxshlib',
'source' : 'src/extra.cpp',
'includes' : 'src',
'use' : 'corelib',
'ver-num' : '0.3.0',
'configure' : [
dict(do = 'check-headers', names = 'cstdio iostream'),
check,
dict(do = 'check-headers', names = 'iostream'), # for test only
dict(do = 'write-config-header'),
],
},
'engine' : {
'features' : 'cxxshlib',
'source' : dict( incl = 'src/**/*.cpp', excl = 'src/extra*' ),
'includes' : 'src',
'use' : 'extra',
'ver-num' : '0.3.1',
'configure' : [
dict( do = 'check-headers', names = 'stdio.h iostream' ),
dict( do = 'parallel', actions = [
dict(do = 'check-headers', names = 'cstdio iostream', id = 'first'),
dict(do = 'check-headers', names = 'stdlib.h', after = 'first'),
dict(do = 'check-headers', names = 'stdlibasd.h', mandatory = False),
# for test only
dict(do = 'check-headers', names = 'iostream'),
dict(do = 'check-headers', names = 'iostream'),
dict(do = 'check-headers', names = 'iostream'),
check,
dict(do = 'call-pyfunc', func = check2, mandatory = False),
],
#tryall = True,
tryall = False,
#mandatory = False,
),
dict( do = 'write-config-header'),
dict( do = 'check-headers', names = 'string vector' ),
],
'export' : 'includes config-results',
'defines' : '$${VAR_A_NAME}="test" $${VAR_B_NAME}="check"', # for test only
},
'extra-test' : {
'features' : 'cxxprogram test',
'source' : 'tests/test_extra.cpp',
'includes' : 'src ../../tests/src',
'use' : 'extra testcmn',
},
}
buildtypes = {
'debug' : {
'cxxflags.select': {
'default' : '-O1 -g',
#'msvc' : '/Od /EHsc',
},
},
} | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/demos/subdirs/2-complex/libs/engine/buildconf.py | buildconf.py |
.. include:: global.rst.inc
.. highlight:: python
.. _buildconf-edep-params:
Build config: edeps
=============================
The config parameter ``edeps`` is a :ref:`dict<buildconf-dict-def>` with
configurations of external non-system dependencies.
General description of external dependencies is :ref:`here<dependencies-external>`.
Each such a dependency can have own unique name and parameters:
.. _buildconf-edep-params-rootdir:
rootdir
"""""""""""""""""""""
A path to the root of the dependency project. It should be path to directory
with the build script of the dependency project.
This path can be relative to the :ref:`startdir<buildconf-startdir>` or absolute.
targets
"""""""""""""""""""""
A :ref:`dict<buildconf-dict-def>` with descriptions of targets of the
dependency project. Each target has a reference name which can be in
:ref:`use<buildconf-taskparams-use>` in format
``dependency-name:target-reference-name`` and parameters:
:dir:
A path with the current target file. Usually it's some build directory.
This path can be relative to the :ref:`startdir<buildconf-startdir>` or absolute.
:type:
It's type of the target file. This type has effects to the link of
the build tasks and some other things. Supported types:
:stlib:
The target file is a static library.
:shlib:
The target file is a shared library.
:program:
The target file is an executable file.
:file:
The target file is any file.
:name:
It is a base name of the target file which is used
for detecting of resulting target file name depending on destination
operating system, selected toolchain, value of ``type``, etc.
If it's not set the target reference name is used.
:ver-num:
It's a version number for the target file if it is a shared library.
It can have effect on resulting target file name.
:fname:
It's a real file name of the target. Usually it's detected by ZenMake
from other parameters but you can set it manually but it's not
recommended until you really need it.
If parameter ``type`` is equal to ``file`` the value of this parameter
is always equal to value of parameter ``name`` by default.
Example in YAML format for non-ZenMake dependency:
.. code-block:: yaml
targets:
# 'shared-lib' and 'static-lib' are target reference names
shared-lib:
dir : ../foo-lib/_build_/debug
type: shlib
name: fooutil
static-lib:
dir : ../foo-lib/_build_/debug
type: stlib
name: fooutil
Example in Python format for non-ZenMake dependency:
.. code-block:: python
'targets': {
# 'shared-lib' and 'static-lib' are target reference names
'shared-lib' : {
'dir' : '../foo-lib/_build_/debug',
'type': 'shlib',
'name': 'fooutil',
},
'static-lib' : {
'dir' : '../foo-lib/_build_/debug',
'type': 'stlib',
'name': 'fooutil',
},
},
.. _buildconf-edep-params-export-includes:
export-includes
"""""""""""""""""""""
A list of paths with 'includes' for C/C++/D/Fortran compilers to export from
the dependency project for all build tasks which depend on the current dependency.
Paths should be relative to the :ref:`startdir<buildconf-startdir>` or
absolute but last variant is not recommended.
If paths contain spaces and all these paths are listed
in one string then each such a path must be in quotes.
rules
"""""""""""""""""""""
A :ref:`dict<buildconf-dict-def>` with descriptions of rules to produce
targets files of dependency. Each rule has own reserved name and
parameters to run. The rule names that allowed to use are:
``configure``, ``build``, ``test``, ``clean``, ``install``, ``uninstall``.
The parameters for each rule can be a string with a command line to run or
a dict with attributes:
:cmd:
A command line to run. It can be any suitable command line.
:cwd:
A working directory where to run ``cmd``. By default it's
the :ref:`rootdir<buildconf-edep-params-rootdir>`.
This path can be relative to the :ref:`startdir<buildconf-startdir>` or absolute.
:env:
Environment variables for ``cmd``. It's a ``dict`` where each
key is a name of variable and value is a value of env variable.
:timeout:
A timeout for ``cmd`` in seconds. By default there is no timeout.
:shell:
If shell is True, the specified command will be executed through
the shell. By default it is False.
In some cases it can be set to True by ZenMake even though you
set it to False.
:trigger:
A dict that describes conditions to run the rule.
If any configured trigger returns True then the rule will be run.
You can configure one or more triggers for each rule.
ZenMake supports the following types of trigger:
:always:
If it's True then the rule will be run always. If it's False and
no other triggers then the rule will not be run automatically.
:paths-exist:
This trigger returns True only if configured paths exist on
a file system. You can set paths as a string, list of strings or as
a dict like for config task parameter
:ref:`source<buildconf-taskparams-source>`.
Examples in YAML format:
.. code-block:: yaml
trigger:
paths-exist: /etc/fstab
trigger:
paths-exist: [ /etc/fstab, /tmp/somefile ]
trigger:
paths-exist:
startdir: '../foo-lib'
incl: '**/*.label'
Examples in Python format:
.. code-block:: python
'trigger': {
'paths-exist' : '/etc/fstab',
}
'trigger': {
'paths-exist' : ['/etc/fstab', '/tmp/somefile'],
}
'trigger': {
'paths-exist' : dict(
startdir = ../foo-lib,
incl = '**/*.label',
),
}
:paths-dont-exist:
This trigger is the same as ``paths-exist`` but returns True if
configured paths don't exist.
:env:
This trigger returns True only if all configured environment variables
exist and equal to configured values. Format is simple:
it's a ``dict`` where each key is a name of variable and value
is a value of environment variable.
:no-targets:
If it is True this trigger returns True only if any of target files
for current dependency doesn't exist. It can be useful to detect
the need to run 'build' rule.
This trigger cannot be used in ZenMake command 'configure'.
:func:
This trigger is a custom python function that must return True or False.
This function gets the following parameters as arguments:
:zmcmd:
It's a name of the current ZenMake command that has been used
to run the rule.
:targets:
A list of configured/detected targets. It's can be None if rule
has been run from command 'configure'.
It's better to use `**kwargs` in this function because some new
parameters can be added in the future.
This trigger cannot be used in YAML buildconf file.
.. note::
For any non-ZenMake dependency there are following
default triggers for rules:
configure: { always: true }
build: { no-targets: true }
Any other rule: { always: false }
.. note::
You can use command line option ``-E``/``--force-edeps`` to run
rules for external dependencies without checking triggers.
:zm-commands:
A list with names of ZenMake commands in which selected rule will be run.
By default each rule can be run in the ZenMake command with the same name only.
For example, rule 'configure' by default can be run with the command
'configure' and rule 'build' with the command 'build', etc.
But here you can set up a different behavior.
.. _buildconf-edep-params-buildtypes-map:
buildtypes-map
"""""""""""""""""""""
This parameter is used only for external dependencies which are other
ZenMake projects. By default ZenMake uses value of current ``buildtype``
for all such dependencies to run rules but in some cases names of buildtype
can be not matched. For example, current project can have buildtypes
``debug`` and ``release`` but project from dependency can have
buildtypes ``dbg`` and ``rls``. In this case
you can use this parameter to set up the map of these buildtype names.
Example in YAML format:
.. code-block:: yaml
buildtypes-map:
debug : dbg
release : rls
Example in Python format:
.. code-block:: python
buildtypes-map: {
'debug' : 'dbg',
'release' : 'rls',
}
Some examples can be found in the directory 'external-deps'
in the repository `here <repo_demo_projects_>`_.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/buildconf-edeps.rst | buildconf-edeps.rst |
.. include:: global.rst.inc
.. highlight:: console
.. _dependencies:
Dependencies
============
ZenMake supports several types of dependencies for build projects:
.. contents::
:local:
System libraries
------------------------------
System libraries can be specified by using the config parameter
:ref:`libs<buildconf-taskparams-libs>`.
Usually you don't need to set paths to system libraries but you can set them
using the config parameter :ref:`libpath<buildconf-taskparams-libpath>`.
Local libraries
------------------------------
Local libraries are libraries from your project. Use the config parameter
:ref:`use<buildconf-taskparams-use>` to specify such dependencies.
.. _dependencies-subdirs:
Sub buildconfs
------------------------------
You can organize building of your project by using more than one
:ref:`buildconf<buildconf>` file in some sub directories of your project.
In this case ZenMake merges parameters from all such buildconf files.
But you must specify these sub directories by using the config parameter
:ref:`subdirs<buildconf-subdirs>`.
Parameters in the sub buildconf can always overwrite matching parameters
from the parent :ref:`buildconf<buildconf>`. But some parameters are not changed.
These parameters can be set only in the the top-level buildconf:
``buildroot``, ``realbuildroot``, ``project``, ``general``, ``cliopts``
Also default build type can be set only in the top-level buildconf.
These parameters are always used without merging with parent buildconfs:
``startdir``, ``subdirs``, ``tasks``
ZenMake doesn't merge your own variables in your buildconf
files if you use some of them.
Other variables are merged including ``byfilter``. But build tasks in
the ``byfilter`` which are not from the current buildconf are ignored
excepting explicit specified ones.
Some examples can be found in the directory 'subdirs'
in the repository `here <repo_demo_projects_>`_.
.. _dependencies-external:
External dependencies
------------------------------
A few basic types of external dependencies can be used:
- :ref:`Depending on other ZenMake projects<dependencies-external-zenmake>`
- :ref:`Depending on non-ZenMake projects<dependencies-external-non-zenmake>`
See full description of buildconf parameters for external dependencies
:ref:`here<buildconf-edep-params>`.
.. _dependencies-external-zenmake:
ZenMake projects
""""""""""""""""""""
Configuration for this type of dependency is simple in most cases: you set up
the config variable :ref:`edeps<buildconf-edeps>` with
the :ref:`rootdir<buildconf-edep-params-rootdir>` and
the :ref:`export-includes<buildconf-edep-params-export-includes>` (if it's necessary)
and then specify this dependency in :ref:`use<buildconf-taskparams-use>`, using existing
task names from dependency buildconf.
Example in YAML format:
.. code-block:: yaml
edeps:
zmdep:
rootdir: ../zmdep
export-includes: ../zmdep
tasks:
myutil:
features : cxxshlib
source : 'shlib/**/*.cpp'
# Names 'calclib' and 'printlib' are existing tasks in 'zmdep' project
use: zmdep:calclib zmdep:printlib
Example in Python format:
.. code-block:: python
edeps = {
'zmdep' : {
'rootdir': '../zmdep',
'export-includes' : '../zmdep',
},
}
tasks = {
'myutil' : {
'features' : 'cxxshlib',
'source' : 'shlib/**/*.cpp',
# Names 'calclib' and 'printlib' are existing tasks in 'zmdep' project
'use' : 'zmdep:calclib zmdep:printlib',
},
}
Additionally, in some cases, the parameter
:ref:`buildtypes-map<buildconf-edep-params-buildtypes-map>` can be useful.
Also it's recommended to use always the same version of ZenMake for all such projects.
Otherwise there are some compatible problems can be occured.
.. note::
Command line options ``--force-edeps`` and ``--buildtype``
for current project will affect rules for its external dependencies
while all other command line options will be ignored.
You can use :ref:`environment variables<envvars>`
to have effect on all external dependencies. And, of course, you can set up
each buildconf in the dependencies to have desirable behavior.
.. _dependencies-external-non-zenmake:
Non-ZenMake projects
"""""""""""""""""""""
You can use external dependencies from some other build systems but in this
case you need to set up more parameters in the config
variable :ref:`edeps<buildconf-edeps>`. Full description of these
parameters can be found :ref:`here<buildconf-edep-params>`. Only one parameter
``buildtypes-map`` is not used for such dependencies.
If it's necessary to set up different targets for different buildtypes you
can use :ref:`selectable parameters<buildconf-select>` in build tasks of your
ZenMake project.
Example in Python format:
.. code-block:: python
foolibdir = '../foo-lib'
edeps = {
'foo-lib-d' : {
'rootdir': foolibdir,
'export-includes' : foolibdir,
'targets': {
'shared-lib' : {
'dir' : foolibdir + '/_build_/debug',
'type': 'shlib',
'name': 'fooutil',
},
},
'rules' : {
'build' : 'make debug',
},
},
'foo-lib-r' : {
'rootdir': foolibdir,
'export-includes' : foolibdir,
'targets': {
'shared-lib' : {
'dir' : foolibdir + '/_build_/release',
'type': 'shlib',
'name': 'fooutil',
},
},
'rules' : {
'build' : 'make release',
},
},
}
tasks = {
'util' : {
'features' : 'cxxshlib',
'source' : 'shlib/**/*.cpp',
},
'program' : {
'features' : 'cxxprogram',
'source' : 'prog/**/*.cpp',
'use.select' : {
'debug' : 'util foo-lib-d:shared-lib',
'release' : 'util foo-lib-r:shared-lib',
},
},
}
Common notes
"""""""""""""""""""""
You can use command line option ``-E``/``--force-edeps`` to run rules for
external dependencies without checking triggers.
Some examples can be found in the directory 'external-deps'
in the repository `here <repo_demo_projects_>`_.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/dependencies.rst | dependencies.rst |
.. include:: global.rst.inc
.. highlight:: console
.. _faq:
FAQ
================================
**Why is python used?**
It's mostly because Waf_ is implemented in python.
**Can I use buildconf.py as usual python script?**
Yes, you can. Such a behavior is supported while you don't try to use
reserved config variable names for inappropriate reasons.
**I want to install my project via zenmake without 'bin' and 'lib64' in one directory**
Example on Linux::
DESTDIR=your_install_path PREFIX=/ BINDIR=/ LIBDIR=/ zenmake install
or::
PREFIX=your_install_path BINDIR=your_install_path LIBDIR=your_install_path zenmake install
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/faq.rst | faq.rst |
.. include:: global.rst.inc
.. highlight:: console
.. _envvars:
Environment variables
=====================
ZenMake supports some environment variables that can be used. Most of examples
are for POSIX platforms (Linux/MacOS) with ``gcc`` and ``clang`` installed.
Also see :ref:`bash-like substitutions<buildconf-substitutions-vars>`.
AR
Set archive-maintaining program.
CC
Set C compiler. It can be name of installed a system compiler or any path
to existing compiler. It overrides values from :ref:`build config<buildconf>`
if present. Example::
CC=clang zenmake build -B
CXX
Set C++ compiler. It can be name of installed a system compiler or any path
to existing compiler. It overrides values from :ref:`build config<buildconf>`
if present. Example::
CXX=clang++ zenmake build -B
DC
Set D compiler. It can be name of installed a system compiler or any path
to existing compiler. It overrides values from :ref:`build config<buildconf>`
if present. Example::
DC=ldc2 zenmake build -B
FC
Set Fortran compiler. It can be name of installed a system compiler or any path
to existing compiler. It overrides values from :ref:`build config<buildconf>`
if present. Example::
FC=gfortran zenmake build -B
AS
Set Assembler. It can be name of installed a system compiler or any path
to existing compiler. It overrides values from :ref:`build config<buildconf>`
if present. Example::
AS=gcc zenmake build -B
ARFLAGS
Flags to give the archive-maintaining program.
CFLAGS
Extra flags to give to the C compiler. Example::
CFLAGS='-O3 -fPIC' zenmake build -B
CXXFLAGS
Extra flags to give to the C++ compiler. Example::
CXXFLAGS='-O3 -fPIC' zenmake build -B
CPPFLAGS
Extra flags added at the end of compilation commands for C/C++.
DFLAGS
Extra flags to give to the D compiler. Example::
DFLAGS='-O' zenmake build -B
FCFLAGS
Extra flags to give to the Fortran compiler. Example::
FCFLAGS='-O0' zenmake build -B
ASFLAGS
Extra flags to give to the Assembler. Example::
ASFLAGS='-Os' zenmake build -B
LINKFLAGS
Extra list of linker flags for C/C++/D/Fortran. Example::
LINKFLAGS='-Wl,--as-needed' zenmake build -B
LDFLAGS
Extra list of linker flags at the end of the link command for C/C++/D/Fortran. Example::
LDFLAGS='-Wl,--as-needed' zenmake build -B
ASLINKFLAGS
Extra list of linker flags for Assembler files. Example::
ASLINKFLAGS='-s' zenmake build -B
JOBS
Default value for the amount of parallel jobs. Has no effect when ``-j`` is
provided on the command line. Example::
JOBS=2 zenmake build
NUMBER_OF_PROCESSORS
Default value for the amount of parallel jobs when the JOBS environment
variable is not provided; it is usually set on windows systems. Has no
effect when ``-j`` is provided on the command line.
NOCOLOR
When set to a non-empty value, colors in console outputs are disabled.
Has no effect when ``--color`` is provided on the command line. Example::
NOCOLOR=1 zenmake build
NOSYNC
When set to a non-empty value, console outputs are displayed in an
asynchronous manner; console text outputs may appear faster on some
platforms. Example::
NOSYNC=1 zenmake build
BUILDROOT
A path to the root of a project build directory.
The path can be absolute or relative to the current directory.
See also :ref:`buildroot<buildconf-buildroot>`.
Example::
BUILDROOT=bld zenmake build
DESTDIR
Default installation base directory when ``--destdir`` is not provided on
the command line. It's mostly for installing to a temporary directory.
For example it can be used to create deb/rpm/etc packages.
Example::
DESTDIR=dest zenmake install
.. _envvars-prefix:
PREFIX
Set value of built-in variable :ref:`prefix<buildconf-builtin-vars-prefix>`
as the installation prefix.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``. Example::
PREFIX=/usr zenmake install
.. _envvars-execprefix:
EXEC_PREFIX
Set value of built-in variable :ref:`execprefix<buildconf-builtin-vars-execprefix>`
as the installation prefix for machine-specific files.
.. _envvars-bindir:
BINDIR
Set value of built-in variable :ref:`bindir<buildconf-builtin-vars-bindir>`
as the directory for installing executable programs that users can run.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
Example::
BINDIR=/usr/bin zenmake install
.. _envvars-sbindir:
SBINDIR
Set value of built-in variable :ref:`sbindir<buildconf-builtin-vars-sbindir>`
as the directory for installing executable programs that can be run, but are
only generally useful to system administrators.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-libexecdir:
LIBEXECDIR
Set value of built-in variable :ref:`libexecdir<buildconf-builtin-vars-libexecdir>`
as the directory for installing executable programs to be run by other programs
rather than by users.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-libdir:
LIBDIR
Set value of built-in variable :ref:`libdir<buildconf-builtin-vars-libdir>`
as the installation directory for object files and libraries of object code.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-sysconfdir:
SYSCONFDIR
Set value of built-in variable :ref:`sysconfdir<buildconf-builtin-vars-sysconfdir>`
as the installation directory for read-only single-machine data.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-sharedstatedir:
SHAREDSTATEDIR
Set value of built-in variable :ref:`sharedstatedir<buildconf-builtin-vars-sharedstatedir>`
as the installation directory for modifiable architecture-independent data.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-localstatedir:
LOCALSTATEDIR
Set value of built-in variable :ref:`localstatedir<buildconf-builtin-vars-localstatedir>`
as the installation directory for modifiable single-machine data.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-includedir:
INCLUDEDIR
Set value of built-in variable :ref:`includedir<buildconf-builtin-vars-includedir>`
as the installation directory for C header files.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-datarootdir:
DATAROOTDIR
Set value of built-in variable :ref:`datarootdir<buildconf-builtin-vars-datarootdir>`
as the installation root directory for read-only architecture-independent data.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-datadir:
DATADIR
Set value of built-in variable :ref:`datadir<buildconf-builtin-vars-datadir>`
as the installation directory for read-only architecture-independent data.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-appdatadir:
APPDATADIR
Set value of built-in variable :ref:`appdatadir<buildconf-builtin-vars-appdatadir>`
as the installation directory for read-only architecture-independent application data.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-docdir:
DOCDIR
Set value of built-in variable :ref:`docdir<buildconf-builtin-vars-docdir>`
as the installation directory for documentation.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-mandir:
MANDIR
Set value of built-in variable :ref:`mandir<buildconf-builtin-vars-mandir>`
as the installation directory for man documentation.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-infodir:
INFODIR
Set value of built-in variable :ref:`infodir<buildconf-builtin-vars-infodir>`
as the installation directory for info documentation.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-localedir:
LOCALEDIR
Set value of built-in variable :ref:`localedir<buildconf-builtin-vars-localedir>`
as the installation directory for locale-dependent data.
This path is always considered as an absolute path or
as a relative path to ``DESTDIR``.
.. _envvars-qt5bindir:
QT5_BINDIR
Set the bin directory of the installed Qt5 toolkit. This directory must
contain such tools like qmake, moc, uic, etc.
This path must be absolute native path or path relative
to the current working directory but last variant is not recommended.
This variable can be especially useful for standalone installation of Qt5,
for example on Windows.
The ``PATH`` and ``QT5_SEARCH_ROOT`` environment variables are ignored
if ``QT5_BINDIR`` is not empty.
.. _envvars-qt5libdir:
QT5_LIBDIR
Set the library directory of the installed Qt5 toolkit.
This path must be absolute native path or path relative
to the current working directory but last variant is not recommended.
Usually you don't need to use this variable if you
set the ``QT5_BINDIR`` variable.
.. _envvars-qt5includes:
QT5_INCLUDES
Set the directory with 'includes' of the installed Qt5 toolkit.
This path must be absolute native path or path relative
to the current working directory but last variant is not recommended.
Usually you don't need to use this variable if you
set the ``QT5_BINDIR`` variable. This variable has no effect
on systems with pkg-config/pkgconf installed (while you
don't turn on the :ref:`QT5_NO_PKGCONF<envvars-qt5nopkgconf>`).
.. _envvars-qt5searchroot:
QT5_SEARCH_ROOT
Set the root directory to search for installed Qt5 toolkit(s).
ZenMake will try to find the bin directories of all Qt5 toolkits in this
directory recursively. Dot not set this variable to path like ``/`` or ``C:\``
because it will slow down the detection very much.
Qt5 toolkits found in this directory have priority over values from
the ``PATH`` environment variable.
You can set more than one directories using path separator
(``;`` on Windows and ``:`` on other OS) like this::
QT5_SEARCH_ROOT=/usr/local/qt:/usr/local/opt/qt zenmake
It defaults to ``C:\Qt`` on Windows.
Usually you don't need to use this variable on Linux.
.. _envvars-qt5minver:
QT5_MIN_VER
Set minimum version of Qt5. For example it can be ``5.1`` or ``5.1.2``.
.. _envvars-qt5maxver:
QT5_MAX_VER
Set maximum version of Qt5. For example it can be ``5.12`` or ``5.12.2``.
.. _envvars-qt5usehighestver:
QT5_USE_HIGHEST_VER
By default ZenMake will use first useful version of Qt5.
When this variable set to a 'True', 'true', 'yes' or non-zero number then
ZenMake will try to use the highest version of Qt5 among found versions.
.. _envvars-qt5nopkgconf:
QT5_NO_PKGCONF
When set to a 'True', 'true', 'yes' or non-zero number,
ZenMake will not use pkg-config/pkgconf
to configure building with Qt5.
Usually you don't need to use this variable.
.. _envvars-qt5tools:
QT5_{MOC,UIC,RCC,LRELEASE,LUPDATE}
These variables can be used to specify full file paths to Qt5 tools
``moc``, ``uic``, ``rcc``, ``lrelease`` and ``lupdate``.
Usually you don't need to use these variables.
ZM_CACHE_CFGACTIONS
When set to a 'True', 'true', 'yes' or non-zero number, ZenMake tries
to use a cache for some :ref:`configuration actions<config-actions>`.
Has no effect when ``--cache-cfg-actions`` is provided on the command line.
It can speed up next runs of some configuration actions but also it can ignore
changes in toolchains, system paths, etc. In general, it is safe to use it
if there were no changes in the current system. Example::
ZM_CACHE_CFGACTIONS=1 zenmake configure | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/envvars.rst | envvars.rst |
.. include:: global.rst.inc
.. highlight:: python
.. _buildconf-extended-syntax:
Build config: extended syntax
===================================
For convenience, ZenMake supports some syntax extensions in buildconf files.
.. _buildconf-syntactic-sugar:
Syntactic sugar
-----------------------------------
There are some syntactic sugar constructions that can be used to make a buildconf
a little shorter.
configure
""""""""""
It can be used as a replacement for :ref:`configure<buildconf-taskparams-configure>` task param.
For example you have (in YAML format):
.. code-block:: yaml
tasks:
util:
features : cshlib
source : shlib/**/*.c
configure:
- do: check-headers
names : stdio.h
test:
features : cprogram
source : prog/**/*.c
use : util
configure:
- do: check-headers
names : stdio.h
So it can be converting into this:
.. code-block:: yaml
tasks:
util:
features : cshlib
source : shlib/**/*.c
test:
features : cprogram
source : prog/**/*.c
use : util
configure:
- do: check-headers
names : stdio.h
The ``configure`` above is the same as following construction:
.. code-block:: yaml
byfilter:
- for: all
set:
configure:
- do: check-headers
names : stdio.h
In addition to regular arguments for :ref:`configure<buildconf-taskparams-configure>`
task param you can use ``for``/``not-for``/``if`` in the same way as in
the :ref:`byfilter<buildconf-byfilter>`.
Example:
.. code-block:: yaml
tasks:
# .. skipped
configure:
- do: check-headers
names : stdio.h
not-for: { task: mytask }
install
""""""""""
Like as previous ``configure`` this can be used as a replacement for
:ref:`install-files<buildconf-taskparams-install-files>` task param.
Example:
.. code-block:: yaml
tasks:
# .. skipped
install:
- for: { task: gui }
src: 'some/src/path/ui.res'
dst: '$(prefix)/share/$(prjname)'
.. _buildconf-substitutions:
Substitutions
-----------------------------------
There are two types of substitutions in ZenMake: bash-like variables
with ability to use system environment variables and built-in variables.
.. _buildconf-substitutions-vars:
Bash-like variables
"""""""""""""""""""""""
ZenMake supports substitution variables with syntax similar to syntax
of bash variables.
Both $VAR and ${VAR} syntax are supported. These variables can be used in any
buildconf parameter value of string/text type.
in YAML format:
.. code-block:: yaml
param: '${VAR}/some-string'
in Python format:
.. code-block:: python
'param' : '${VAR}/some-string'
ZenMake looks such variables in environment variables at first and
then in the buildconf file. You can use a $$ (double-dollar sign) to prevent
use of environment variables.
Example in YAML format:
.. code-block:: yaml
# set 'fragment' variable
fragment: |
program
end program
# set 'GCC_BASE_FLAGS' variable
GCC_BASE_FLAGS: -std=f2018 -Wall
tasks:
# ... skipped values
test:
features : fcprogram
source : src/calculator.f90 src/main.f90
includes : src/inc
use : staticlib sharedlib
configure:
- do: check-code
text: $$fragment # <-- substitution without env
label: fragment
buildtypes:
# GCC_BASE_FLAGS can be overwritten by environment variable with the same name
debug : { fcflags: $GCC_BASE_FLAGS -O0 }
release: { fcflags: $GCC_BASE_FLAGS -O2 }
default: debug
.. note::
These substitution variables inherit values from parent buildconf in
:ref:`subdirs<dependencies-subdirs>`.
Also values for such variables can be set by some :ref:`configuration actions<config-actions>`.
For example see ``var`` in configuration action ``find-program``. But in this case
these values are not visible everywhere.
For YAML format there are some constraints with ${VAR} form due to YAML specification:
.. code-block:: yaml
debug : { fcflags: $GCC_BASE_FLAGS -O0 } # works
debug : { fcflags: "$GCC_BASE_FLAGS -O0" } # works
debug : { fcflags: ${GCC_BASE_FLAGS} -O0 } # doesn't work
debug : { fcflags: "${GCC_BASE_FLAGS} -O0" } # works
debug :
fcflags: ${GCC_BASE_FLAGS} -O0 # works
.. _buildconf-builtin-vars:
Built-in variables
"""""""""""""""""""""""
ZenMake has some built-in variables that can be used as substitutions.
To avoid possible conflicts with environment and bash-like variables the syntax of
substitutions is a little bit different in this case:
in YAML format:
.. code-block:: yaml
param: '$(var)/some-string'
in Python format:
.. code-block:: python
'param' : '$(var)/some-string'
List of built-in variables:
.. _buildconf-builtin-vars-prjname:
prjname
Name of the current project.
It can be changed via ``name`` from :ref:`here<buildconf-project>`.
.. _buildconf-builtin-vars-topdir:
topdir
Absolute path of :ref:`startdir<buildconf-startdir>` of the top-level buildconf file.
Usually it is root directory of the current project.
.. _buildconf-builtin-vars-buildrootdir:
buildrootdir
Absolute path of :ref:`buildroot<buildconf-buildroot>`.
.. _buildconf-builtin-vars-buildtypedir:
buildtypedir
Absolute path of current buildtype directory. It is
current value of :ref:`buildroot<buildconf-buildroot>` plus current buildtype.
.. _buildconf-builtin-vars-prefix:
prefix
The installation prefix. It is a directory that is prepended onto all
install directories and it defaults to ``/usr/local`` on UNIX and
``C:/Program Files/$(prjname)`` on Windows.
It can be changed via environment variable :ref:`PREFIX<envvars-prefix>`
or via ``--prefix`` on the command line.
.. _buildconf-builtin-vars-execprefix:
execprefix
The installation prefix for machine-specific files. In most cases it is
the same as the ``$(prefix)`` variable.
It was introduced mostly for compatibility with GNU standard:
https://www.gnu.org/prep/standards/html_node/Directory-Variables.html.
It can be changed via environment variable :ref:`EXEC_PREFIX<envvars-execprefix>`
or via ``--execprefix`` on the command line.
.. _buildconf-builtin-vars-bindir:
bindir
The directory for installing executable programs that users can run.
It defaults to ``$(exeprefix)/bin`` on UNIX and ``$(exeprefix)`` on Windows.
It can be changed via environment variable :ref:`BINDIR<envvars-bindir>`
or via ``--bindir`` on the command line.
.. _buildconf-builtin-vars-sbindir:
sbindir
The directory for installing executable programs that can be run, but
are only generally useful to system administrators.
It defaults to ``$(exeprefix)/sbin`` on UNIX and ``$(exeprefix)`` on Windows.
It can be changed via environment variable :ref:`SBINDIR<envvars-sbindir>`
or via ``--sbindir`` on the command line.
.. _buildconf-builtin-vars-libexecdir:
libexecdir
The directory for installing executable programs to be run by other
programs rather than by users.
It defaults to ``$(exeprefix)/libexec`` on UNIX and ``$(exeprefix)`` on Windows.
It can be changed via environment variable :ref:`LIBEXECDIR<envvars-libexecdir>`
or via ``--libexecdir`` on the command line.
.. _buildconf-builtin-vars-libdir:
libdir
The installation directory for object files and libraries of object code.
It defaults to ``$(exeprefix)/lib`` or ``$(exeprefix)/lib64`` on UNIX
and ``$(exeprefix)`` on Windows.
On Debian/Ubuntu, it may be ``$(exeprefix)/lib/<multiarch-tuple>``.
It can be changed via environment variable :ref:`LIBDIR<envvars-libdir>`
or via ``--libdir`` on the command line.
.. _buildconf-builtin-vars-sysconfdir:
sysconfdir
The installation directory for read-only single-machine data.
It defaults to ``$(prefix)/etc`` on UNIX and ``$(prefix)`` on Windows.
It can be changed via environment variable :ref:`SYSCONFDIR<envvars-sysconfdir>`
or via ``--sysconfdir`` on the command line.
.. _buildconf-builtin-vars-sharedstatedir:
sharedstatedir
The installation directory for modifiable architecture-independent data.
It defaults to ``/var/lib`` on UNIX and ``$(prefix)`` on Windows.
It can be changed via environment variable :ref:`SHAREDSTATEDIR<envvars-sharedstatedir>`
or via ``--sharedstatedir`` on the command line.
.. _buildconf-builtin-vars-localstatedir:
localstatedir
The installation directory for modifiable single-machine data.
It defaults to ``$(prefix)/var``.
It can be changed via environment variable :ref:`LOCALSTATEDIR<envvars-localstatedir>`
or via ``--localstatedir`` on the command line.
.. _buildconf-builtin-vars-includedir:
includedir
The installation directory for C header files.
It defaults to ``$(prefix)/include``.
It can be changed via environment variable :ref:`INCLUDEDIR<envvars-includedir>`
or via ``--includedir`` on the command line.
.. _buildconf-builtin-vars-datarootdir:
datarootdir
The installation root directory for read-only architecture-independent data.
It defaults to ``$(prefix)/share`` on UNIX and ``$(prefix)`` on Windows.
It can be changed via environment variable :ref:`DATAROOTDIR<envvars-datarootdir>`
or via ``--datarootdir`` on the command line.
.. _buildconf-builtin-vars-datadir:
datadir
The installation directory for read-only architecture-independent data.
It defaults to ``$(datarootdir)``.
It can be changed via environment variable :ref:`DATADIR<envvars-datadir>`
or via ``--datadir`` on the command line.
.. _buildconf-builtin-vars-appdatadir:
appdatadir
The installation directory for read-only architecture-independent application data.
It defaults to ``$(datarootdir)/$(prjname)`` on UNIX
and ``$(datarootdir)`` on Windows.
It can be changed via environment variable :ref:`APPDATADIR<envvars-appdatadir>`
or via ``--appdatadir`` on the command line.
.. _buildconf-builtin-vars-docdir:
docdir
The installation directory for documentation.
It defaults to ``$(datarootdir)/doc/$(prjname)`` on UNIX
and ``$(datarootdir)/doc`` on Windows.
It can be changed via environment variable :ref:`DOCDIR<envvars-docdir>`
or via ``--docdir`` on the command line.
.. _buildconf-builtin-vars-mandir:
mandir
The installation directory for man documentation.
It defaults to ``$(datarootdir)/man``.
It can be changed via environment variable :ref:`MANDIR<envvars-mandir>`
or via ``--mandir`` on the command line.
.. _buildconf-builtin-vars-infodir:
infodir
The installation directory for info documentation.
It defaults to ``$(datarootdir)/info``.
It can be changed via environment variable :ref:`INFODIR<envvars-infodir>`
or via ``--infodir`` on the command line.
.. _buildconf-builtin-vars-localedir:
localedir
The installation directory for locale-dependent data.
It defaults to ``$(datarootdir)/locale``.
It can be changed via environment variable :ref:`LOCALEDIR<envvars-localedir>`
or via ``--localedir`` on the command line.
In some cases some extra variables are provided. For example,
variables ``src`` and ``tgt`` are provided
for the ``cmd`` in the task parameter :ref:`run<buildconf-taskparams-run>`.
Built-in variables cannot be used in buildconf parameters which are used to
determine values of that built-in variables. These parameters are:
- :ref:`startdir<buildconf-startdir>`, :ref:`buildroot<buildconf-buildroot>`,
:ref:`realbuildroot<buildconf-realbuildroot>`
- **buildtypedir** only: the ``default`` in the :ref:`buildtypes<buildconf-buildtypes>`
- **buildtypedir** only: the ``buildtypes``, ``platform`` and ``task`` in
the :ref:`byfilter<buildconf-byfilter>`
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/buildconf-ext-syntax.rst | buildconf-ext-syntax.rst |
.. include:: global.rst.inc
.. highlight:: python
.. _buildconf-taskparams:
Build config: task parameters
=============================
It's a :ref:`dict<buildconf-dict-def>` as a collection of build task parameters
for a build task. This collection is used in :ref:`tasks<buildconf-tasks>`,
:ref:`buildtypes<buildconf-buildtypes>` and :ref:`byfilter<buildconf-byfilter>`.
And it's core buildconf element.
Also see :ref:`substitutions<buildconf-substitutions>` for string/text values.
.. _buildconf-taskparams-features:
features
"""""""""""""""""""""
It describes type of a build task. Can be one value or list
of values. Supported values:
:c:
Means that the task has a C code. Optional in most cases.
Also it's 'lang' feature for C language.
:cxx:
Means that the task has a C++ code. Optional in most cases.
Also it's 'lang' feature for C++ language.
:d:
Means that the task has a D code. Optional in most cases.
Also it's 'lang' feature for D language.
:fc:
Means that the task has a Fortran code. Optional in most cases.
Also it's 'lang' feature for Fortran language.
:asm:
Means that the task has an Assembler code. Optional in most cases.
Also it's 'lang' feature for Assembler language.
:<lang>stlib:
Means that result of the task is a static library for the <lang> code.
For example: ``cstlib``, ``cxxstlib``, etc.
:<lang>shlib:
Means that result of the task is a shared library for the <lang> code.
For example: ``cshlib``, ``cxxshlib``, etc.
:<lang>program:
Means that result of the task is an executable file for the <lang> code.
For example: ``cprogram``, ``cxxprogram``, etc.
:runcmd:
Means that the task has the ``run`` parameter and should run some
command. It's optional because ZenMake detects this feature
automatically by presence of the ``run`` in task parameters.
You need to set it explicitly only if you want to try to run
<lang>program task without parameter ``run``.
:test:
Means that the task is a test. More details about
tests are :ref:`here<buildtests>`. It is not needed to add ``runcmd``
to this feature because ZenMake adds ``runcmd`` itself if necessary.
:qt5:
Means that the task has Qt5 code.
More details are :ref:`here<toolkits_qt5>`.
Some features can be mixed. For example ``cxxprogram`` can be mixed
with ``cxx`` for C++ build tasks but it's not necessary because ZenMake
adds ``cxx`` for ``cxxprogram`` itself. The ``cxxshlib`` feature cannot be
mixed for example with the ``cxxprogram`` in one build task because they
are different types of build task targets. Using of such features as
``c`` or ``cxx`` doesn't make sense without
\*stlib/\*shlib/\*program features in most cases.
The ``runcmd`` and ``test`` features can be mixed with any feature.
Examples in YAML format:
.. code-block:: yaml
features : cprogram
features : cxxshlib
features : cxxprogram runcmd
features : cxxprogram test
Examples in Python format:
.. code-block:: python
'features' : 'cprogram'
'features' : 'cxxshlib'
'features' : 'cxxprogram runcmd'
'features' : 'cxxprogram test'
.. _buildconf-taskparams-target:
target
"""""""""""""""""""""
Name of resulting file. The target will have different extension and
name depending on the platform but you don't need to declare this
difference explicitly. It will be generated automatically. For example
the ``sample`` for \*shlib task will be converted into
``sample.dll`` on Windows and into ``libsample.so`` on Linux.
By default it's equal to the name of the build task. So in most cases
it is not needed to be set explicitly.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-source:
source
"""""""""""""""""""""
One or more source files for compiler/toolchain/toolkit.
It can be:
- a string with one or more paths separated by space
- a :ref:`dict<buildconf-dict-def>`, description see below
- a list of items where each item is a string with one or more paths or a dict
The ``dict`` type is used for ``ant_glob`` Waf_ function. Format of patterns
for ``ant_glob`` can be found here https://waf.io/book/.
Most significant details from there:
- Patterns may contain wildcards such as \* or \?, but they are
`Ant patterns <https://ant.apache.org/manual/dirtasks.html>`_,
not regular expressions.
- The symbol ``**`` enable recursion. Complex folder hierarchies may
take a lot of time, so use with care.
- The '..' sequence does not represent the parent directory.
So such a ``dict`` can contain fields:
:incl:
Ant pattern or list of patterns to include, required field.
:excl:
Ant pattern or list of patterns to exclude, optional field.
:ignorecase:
Ignore case while matching (False by default), optional field.
:startdir:
Start directory for patterns, optional field. It must be relative to
the :ref:`startdir<buildconf-startdir>` or an absolute path.
By default it's '.', that is, it's equal to
:ref:`startdir<buildconf-startdir>`.
ZenMake always adds several patterns to exclude files for any ant pattern.
These patterns include `Default Excludes` from
`Ant patterns <https://ant.apache.org/manual/dirtasks.html>`_ and some additional
patterns like ``**/*.swp``.
There is simplified form of ant patterns using: if string value contains
'*' or '?' it will be converted into ``dict`` form to use patterns.
See examples below.
Any path or pattern should be relative to the :ref:`startdir<buildconf-startdir>`.
But for pattern (in dict) can be used custom ``startdir`` parameter.
.. note::
If paths contain spaces and all these paths are listed
in one string then each such a path must be in quotes.
*YAML*: You can write a string without quotes (as a plain scalar) in many
cases but there are some special symbols which cannot be used at the
beginning without quotes, for example ``*`` and ``?<space>``.
So a value like ``**/*.cpp`` must be always in qoutes (``'`` or ``"``).
See details here: https://www.yaml.info/learn/quote.html.
Examples in YAML format:
.. code-block:: yaml
# just one file
source : test.cpp
# list of two files
source : main.c about.c
# or
source : [main.c, about.c]
# get all *.cpp files in the 'startdir' recursively
source : { incl: '**/*.cpp' }
# or
source :
incl: '**/*.cpp'
# or (shortest record with the same result)
source : '**/*.cpp'
# get all *.c and *.cpp files in the 'startdir' recursively
source : { incl: '**/*.c **/*.cpp' }
# or (shorter record with the same result)
source : '**/*.c **/*.cpp'
# get all *.cpp files in the 'startdir'/mylib recursively
source : mylib/**/*.cpp
# get all *.cpp files in the 'startdir'/src recursively
# but don't include files according pattern 'src/extra*'
source :
incl: src/**/*.cpp
excl: src/extra*
# get all *.c files in the 'src' and in '../others' recursively
source :
- src/**/*.c
- incl: '**/*.c'
startdir: ../others
# pattern with space, it's necessary to use both types of quotes here:
source : '"my prog/**/*.c"'
# two file paths with spaces
source : '"my shlib/my util.c" "my shlib/my util2.c"'
Examples in python format:
.. code-block:: python
# just one file
'source' : 'test.cpp'
# list of two files
'source' : 'main.c about.c'
'source' : ['main.c', 'about.c'] # the same result
# get all *.cpp files in the 'startdir' recursively
'source' : dict( incl = '**/*.cpp' )
# or
'source' : { 'incl': '**/*.cpp' }
# or (shortest record with the same result)
'source' : '**/*.cpp'
# get all *.c and *.cpp files in the 'startdir' recursively
'source' : { 'incl': ['**/*.c', '**/*.cpp'] }
# or (shorter record with the same result)
'source' : ['**/*.c', '**/*.cpp']
# get all *.cpp files in the 'startdir'/mylib recursively
'source' : 'mylib/**/*.cpp'
# get all *.cpp files in the 'startdir'/src recursively
# but don't include files according pattern 'src/extra*'
'source' : dict( incl = 'src/**/*.cpp', excl = 'src/extra*' )
# get all *.c files in the 'src' and in '../others' recursively
'source' : [
'src/**/*.c',
{ 'incl': '**/*.c', 'startdir' : '../others' },
]
# pattern with space:
'source' : '"my prog/**/*.c"'
# two file paths with spaces
'source' : '"my shlib/my util.c" "my shlib/my util2.c"'
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-includes:
includes
"""""""""""""""""""""
Include paths are used by the C/C++/D/Fortran compilers for finding headers/files.
Paths should be relative to :ref:`startdir<buildconf-startdir>` or absolute.
But last variant is not recommended.
If paths contain spaces and all these paths are listed
in one string then each such a path must be in quotes.
Examples in YAML format:
.. code-block:: yaml
includes : myinclude
includes : include myinclude
includes : [ include, myinclude ]
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
This parameter can be :ref:`exported<buildconf-taskparams-export>`.
.. _buildconf-taskparams-toolchain:
toolchain
"""""""""""""""""""""
Name of toolchain/compiler to use in the task. It can be any system
compiler that is supported by ZenMake or a toolchain from custom
:ref:`toolchains<buildconf-toolchains>`.
There are also the special names for autodetecting in format
``auto-*`` where ``*`` is a 'lang' feature for programming language,
for example ``auto-c``, ``auto-c++``, etc.
| Known names for C: ``auto-c``, ``gcc``, ``clang``, ``msvc``,
``icc``, ``xlc``, ``suncc``, ``irixcc``.
| Known names for C++: ``auto-c++``, ``g++``, ``clang++``, ``msvc``,
``icpc``, ``xlc++``, ``sunc++``.
| Known names for D: ``auto-d``, ``ldc2``, ``gdc``, ``dmd``.
| Known names for Fortran: ``auto-fc``, ``gfortran``, ``ifort``.
| Known names for Assembler: ``auto-asm``, ``gas``, ``nasm``.
.. note::
If you don't set ``toolchain`` then ZenMake will try to
set ``auto-*`` itself
according values in `features <buildconf-taskparams-features_>`_.
In some rare cases this parameter can contain more than one value as a
string with values separated by space or as list. For example, for case
when C and Assembler files are used in one task, it can be ``"gcc gas"``.
If toolchain from custom :ref:`toolchains<buildconf-toolchains>` or some
system toolchain contain spaces in their names and all these toolchains are
listed in one string then each
such a toolchain must be in quotes.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
cflags
"""""""""""""""""""""
One or more compiler flags for C.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
cxxflags
"""""""""""""""""""""
One or more compiler flags for C++.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
dflags
"""""""""""""""""""""
One or more compiler flags for D.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
fcflags
"""""""""""""""""""""
One or more compiler flags for Fortran.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
asflags
"""""""""""""""""""""
One or more compiler flags for Assembler.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
cppflags
"""""""""""""""""""""
One or more compiler flags added at the end of compilation commands for C/C++.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
linkflags
"""""""""""""""""""""
One or more linker flags for C/C++/D/Fortran.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
ldflags
"""""""""""""""""""""
One or more linker flags for C/C++/D/Fortran at the end of the link command.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
aslinkflags
"""""""""""""""""""""
One or more linker flags for Assembler.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
arflags
"""""""""""""""""""""
Flags to give the archive-maintaining program.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
.. _buildconf-taskparams-defines:
defines
"""""""""""""""""""""
One or more defines for C/C++/Assembler/Fortran.
Examples in YAML format:
.. code-block:: yaml
defines : MYDEFINE
defines : [ ABC=1, DOIT ]
defines :
- ABC=1
- DOIT
defines : 'ABC=1 DOIT AAA="some long string"'
Examples in Python format:
.. code-block:: python
'defines' : 'MYDEFINE'
'defines' : ['ABC=1', 'DOIT']
'defines' : 'ABC=1 DOIT AAA="some long string"'
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
.. _buildconf-taskparams-use:
use
"""""""""""""""""""""
This attribute enables the link against libraries (static or shared).
It can be used for local libraries from other tasks or to declare
dependencies between build tasks. Also it can be used to declare using of
:ref:`external dependencies<dependencies-external>`.
For external dependencies the format of any dependency in ``use`` must be:
``dependency-name:target-reference-name``.
It can contain one or more the other task names.
If a task name contain spaces and all these names are listed in one
string then each such a name must be in quotes.
Examples in YAML format:
.. code-block:: yaml
use : util
use : util mylib
use : [util, mylib]
use : 'util "my lib"'
use : ['util', 'my lib']
use : util mylib someproject:somelib
Examples in Python format:
.. code-block:: python
'use' : 'util'
'use' : 'util mylib'
'use' : ['util', 'mylib']
'use' : 'util "my lib"'
'use' : ['util', 'my lib']
'use' : 'util mylib someproject:somelib'
It can be used to specify libraries of qt5 as well.
More details are :ref:`here<toolkits_qt5>`.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-libs:
libs
"""""""""""""""""""""
One or more names of existing shared libraries as dependencies,
without prefix or extension. Usually it's used to set system libraries.
If you use this parameter to specify non-system shared libraries for some
task you may need to specify the same libraries for all other tasks which
depend on the current task. For example, you set library 'mylib'
to the task A but the task B has parameter ``use`` with 'A',
then it's recommended to add 'mylib' to the parameter ``libs`` for the
task B. Otherwise you can get link error ``... undefined reference to ...``
or something like that.
Some other ways to solve this problem include using environment variable
``LD_LIBRARY_PATH`` or changing of /etc/ld.so.conf file. But usually last
method is not recommended.
Example in YAML format:
.. code-block:: yaml
libs : m rt
Example in Python format:
.. code-block:: python
'libs' : 'm rt'
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-libpath:
libpath
"""""""""""""""""""""
One or more additional paths to find libraries. Usually you don't need to
set it.
If paths contain spaces and all these paths are listed
in one string then each such a path must be in quotes.
Paths should be absolute or relative to :ref:`startdir<buildconf-startdir>`.
Examples in YAML format:
.. code-block:: yaml
libpath : /local/lib
libpath : '/local/lib "my path"' # in case of spaces in a path
Examples in Python format:
.. code-block:: python
'libpath' : '/local/lib'
'libpath' : '/local/lib "my path"' # in case of spaces in a path
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
monitlibs
"""""""""""""""""""""
One or more names from ``libs`` to monitor changes.
For example, a project has used some system library 'superlib' and once this
library was upgraded by a system package manager. After that the building of
the project will not make a relink with the new version of 'superlib'
if no changes in the project which can trigger such a relink.
Usually it is not a problem because a project is changed much more frequently than
upgrading of system libraries during development.
Any names not from ``libs`` are ignored.
It can be True or False as well. If it is True then value of ``libs``
is used. If it is False then it means an empty list.
By default it's False.
Using of this parameter can slow down a building of some
projects with a lot of values in this parameter.
ZenMake uses sha1/md5 hashes to check changes of every library file.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
stlibs
"""""""""""""""""""""
The same as ``libs`` but for static libraries.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
stlibpath
"""""""""""""""""""""
The same as ``libpath`` but for static libraries.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Also this parameter can be :ref:`exported<buildconf-taskparams-export>`.
monitstlibs
"""""""""""""""""""""
The same as ``monitlibs`` but for static libraries. It means it's affected
by parameter ``stlibs``.
It's possible to use :ref:`selectable parameters<buildconf-select>`
variables to set this parameter.
.. _buildconf-taskparams-moc:
moc
"""""""""""""""""""""
One or more header files (.h) with C++ class declarations with Q_OBJECT.
These files are handled with Qt Meta-Object Compiler, moc.
Format for this parameter is the same as for
the :ref:`source<buildconf-taskparams-source>` parameter.
You can specify header files without Q_OBJECT here because ZenMake filters
such files by itself. So you can specify just all .h files of your directory
with header files if you wish.
It can be used only for tasks with ``qt5``
in :ref:`features<buildconf-taskparams-features>`.
.. _buildconf-taskparams-rclangprefix:
rclangprefix
"""""""""""""""""""""
Value of ``qresource prefix`` in generated .qrc file for a qt5 task.
When .ts files are specified in the :ref:`source<buildconf-taskparams-source>`
parameter ZenMake compiles these files into .qm files.
If you set the ``rclangprefix`` parameter ZenMake will insert all
compiled .qm files in .qrc file to embed .qm files as internal binary
resourses inside compiled task target file.
And the value of this parameter can be used in the QTranslator::load method
in the 'directory' argument in your Qt5 code.
The :ref:`bld-langprefix<buildconf-taskparams-bld-langprefix>`,
:ref:`unique-qmpaths<buildconf-taskparams-unique-qmpaths>`
and :ref:`install-langdir<buildconf-taskparams-install-langdir>`
parameters are ignored if the ``rclangprefix`` is set.
It can be used only for tasks with ``qt5``
in :ref:`features<buildconf-taskparams-features>`.
.. _buildconf-taskparams-langdir-defname:
langdir-defname
"""""""""""""""""""""
Name of a define to set for your Qt5 code to detect current directory with
compiled .qm files to use in the QTranslator::load method.
When .ts files are specified in the :ref:`source<buildconf-taskparams-source>`
parameter ZenMake compiles these files into .qm files. But when you use
the ``install`` command ZenMake copies these files from build directory
into install directory. So during regular building and for installed
application the directory with .qm files are different.
Value of the define with the name from the ``langdir-defname`` is
the install directory of .qm files for the ``install`` command and
the build directory of .qm files in other cases.
This parameter is ignored if
:ref:`rclangprefix<buildconf-taskparams-rclangprefix>` is set.
It can be used only for tasks with ``qt5``
in :ref:`features<buildconf-taskparams-features>`.
.. _buildconf-taskparams-bld-langprefix:
bld-langprefix
"""""""""""""""""""""
Set build directory path prefix for compiled .qm files.
It is relative to :ref:`buildtypedir<buildconf-builtin-vars-buildtypedir>`
and defaults to ``@translations``.
Usually you don't need to use this parameter.
This parameter is ignored if
:ref:`rclangprefix<buildconf-taskparams-rclangprefix>` is set.
It can be used only for tasks with ``qt5``
in :ref:`features<buildconf-taskparams-features>`.
.. _buildconf-taskparams-unique-qmpaths:
unique-qmpaths
"""""""""""""""""""""
Make unique file paths for compiled .qm files by adding name of current
buld task by the pattern:
``$(buildtypedir)/<bld-langprefix>/<task name>_<original .qm file name>``
where :ref:`buildtypedir<buildconf-builtin-vars-buildtypedir>` is
the built-in variable.
Usually you don't need to use this parameter.
This parameter is ignored if
:ref:`rclangprefix<buildconf-taskparams-rclangprefix>` is set.
It can be used only for tasks with ``qt5``
in :ref:`features<buildconf-taskparams-features>`.
rpath
"""""""""""""""""""""
One or more paths to hard-code into the binary during
linking time. It's ignored on platforms that do not support it.
If paths contain spaces and all these paths are listed
in one string then each such a path must be in quotes.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-ver-num:
ver-num
"""""""""""""""""""""
Enforce version numbering on shared libraries. It can be used with
\*shlib ``features`` for example. It can be ignored on platforms that do
not support it.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-run:
run
"""""""""""""""""""""
A :ref:`dict<buildconf-dict-def>` with parameters to run something in
the task. It' used with task features ``runcmd`` and ``test``. It can be
also just a string or a python function (for buildconf.py only). In this case
it's the same as using dict with one parameter ``cmd``.
:cmd:
Command line to run. It can be any suitable command line.
For convenience special :ref:`built-in substitution<buildconf-builtin-vars>`
variables ``src`` and ``tgt`` can be used here.
The ``tgt`` variable contains string with the absolute path to resulting
target file of the current task.
And the ``src`` contains string with all source files of the task.
Environment variables also can be used here but see
:ref:`bash-like substitutions<buildconf-substitutions-vars>`.
For python variant of buildconf it can be python function as well.
In this case such a function gets one argument as a python dict
with parameters:
:taskname:
Name of current build task
:startdir:
Current :ref:`startdir<buildconf-startdir>`
:buildroot:
Root directory for building
:buildtype:
Current buildtype
:target:
Absolute path to resulting target. It may not be existing.
:waftask:
Object of Waf class Task. It's for advanced use.
:cwd:
Working directory where to run ``cmd``. By default it's build
directory for current buildtype. Path can be absolute or
relative to the :ref:`startdir<buildconf-startdir>`.
:env:
Environment variables for ``cmd``. It's a ``dict`` where each
key is a name of variable and value is a value of env variable.
:timeout:
Timeout for ``cmd`` in seconds. It works only when ZenMake is run
with python 3. By default there is no timeout.
:shell:
If shell is True, the specified command will be executed through
the shell. By default to avoid some common problems it is True.
But in many cases it's safe to set False.
In this case it avoids some overhead of using shell.
In some cases it can be set to True by ZenMake/Waf even though you
set it to False.
:repeat:
Just amount of running of ``cmd``. It's mostly for tests.
By default it's 1.
If current task has parameter ``run`` with empty ``features`` or with only ``runcmd``
in the ``features`` then it is standalone runcmd task.
If current task is not standalone runcmd task then command from parameter
``run`` will be run after compilation and linking. If you want to have
a command that will be called before compilation and linking you can make
another standalone runcmd task and specify this new task in the parameter
``use`` of the current task.
By default ZenMake expects that any build task produces a target file
and if it doesn't find this file when the task is finished
it will throw an error. And it is true for standalone runcmd tasks also.
If you want to create standalone runcmd task which doesn't produce target
file you can set task parameter
:ref:`target<buildconf-taskparams-target>` to an empty string.
Examples in YAML format:
.. code-block:: yaml
echo:
run: "echo 'say hello'"
target: ''
test.py:
run:
cmd : python tests/test.py
cwd : .
env : { JUST_ENV_VAR: qwerty }
shell : false
target: ''
configure :
- do: find-program
names: python
shlib-test:
features : cxxprogram test
# ...
run:
cmd : '$(tgt) a b c'
env : { ENV_VAR1: '111', ENV_VAR2: 'false' }
repeat : 2
timeout : 10 # in seconds
shell : false
foo.luac:
source : foo.lua
configure : [ { do: find-program, names: luac } ]
run: '${LUAC} -s -o $(tgt) $(src)'
Examples in Python format:
.. code-block:: python
'echo' : {
'run' : "echo 'say hello'",
'target': '',
},
'test.py' : {
'run' : {
'cmd' : 'python tests/test.py',
'cwd' : '.',
'env' : { 'JUST_ENV_VAR' : 'qwerty', },
'shell' : False,
},
'target': '',
'configure' : [ dict(do = 'find-program', names = 'python'), ]
},
'shlib-test' : {
'features' : 'cxxprogram test',
# ...
'run' : {
'cmd' : '$(tgt) a b c',
'env' : { 'ENV_VAR1' : '111', 'ENV_VAR2' : 'false'},
'repeat' : 2,
'timeout' : 10, # in seconds, Python 3 only
'shell' : False,
},
},
'foo.luac' : {
'source' : 'foo.lua',
'configure' : [ dict(do = 'find-program', names = 'luac'), ],
'run': '${LUAC} -s -o $(tgt) $(src)',
},
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-configure:
configure
"""""""""""""""""""""
A list of configuration actions (configuration checks and others).
Details are :ref:`here<config-actions>`.
These actions are called on **configure** step (in command **configure**).
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
Results of these configuration actions
can be :ref:`exported<buildconf-taskparams-export>` with the name `config-results`.
.. _buildconf-taskparams-export:
export-<param> / export
""""""""""""""""""""""""
Some task parameters can be exported to all dependent build tasks.
There two forms: ``export-<param>`` and ``export``.
In first form ``<param>`` is the name of the exported task parameter.
The boolean True/False value or specific valid value to the ``<param>``
can be used to export.
If value is True then ZenMake exports the value of the parameter from current task
to all dependent build tasks. If value is False then ZenMake
exports nothing.
Supported names: ``includes``, ``defines``, ``config-results``,
``libpath``, ``stlibpath``, ``moc`` and all ``*flags``.
But the parameter ``export-config-results`` accepts boolean True/False only value.
In second form it must be string or list with the names of parameters to export.
Second form is simplified form of the first form when all values are True.
And this form cannot be used to set specific value to export.
.. note::
By default ZenMake exports nothing (all values are False).
Exporting values are inserted in the beginning of the current parameter values
in dependent tasks. It was made to have ability to overwrite parent values.
For example, task A has ``defines`` with value ``AAA=q`` and task B depends
on task A and has ``defines`` with value ``BBB=v``. So if task A has
``export-defines`` with True, then actual value of ``defines`` in task B will
be ``AAA=q BBB=v``.
Examples in YAML format:
.. code-block:: yaml
# export all includes from current task
export-includes: true
# the same result:
export: includes
# export all includes and defines from current task
export-includes: true
export-defines: true
# the same result:
export: includes defines
# export specific includes, value of parameter 'includes' from current
# task is not used
export-includes: incl1 incl2
# export specific defines, value of parameter 'defines' from current
# task is not used
export-defines : 'ABC=1 DOIT AAA="some long string"'
# export results of all configuration actions from current task
export-config-results: true
# export all includes, defines and results of configuration actions
export: includes defines config-results
Specific remarks:
:includes:
If specified paths contain spaces and all these paths are listed
in one string then each such a path must be in quotes.
:defines:
Defines from :ref:`configuration actions<config-actions>`
are not exported. Use ``export-config-results`` or
``export`` with ``config-results`` for that.
It's possible to use :ref:`selectable parameters<buildconf-select>`
(in strings) to set this parameter.
.. _buildconf-taskparams-install-path:
install-path
"""""""""""""""""""""
String representing the installation directory for the task
:ref:`target<buildconf-taskparams-target>` file.
It is used in the ``install`` and ``uninstall`` commands.
This path must be absolute.
To disable installation, set it to False or to empty string.
If it's absent then built-in ``prefix``, ``bindir``
and ``libdir`` variables will be used to detect path.
You can use any :ref:`built-in substitution<buildconf-builtin-vars>` variable
including ``prefix``, ``bindir`` and ``libdir`` here
like this:
Example in YAML format:
.. code-block:: yaml
install-path : '$(prefix)/bin'
This parameter is false for standalone runcmd tasks by default.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-install-files:
install-files
"""""""""""""""""""""
A list of additional files to install.
Each item in this list must be a :ref:`dict<buildconf-dict-def>` with
following parameters:
:do:
It is what to do and it can be ``copy``, ``copy-as`` or ``symlink``.
The ``copy`` value means copying specified files to a directory from the ``dst``.
The ``copy-as`` value means copying one specified file to a path from the ``dst``
so you can use a difference file name.
The ``symlink`` value means creation of symlink. It's for POSIX platforms only
and does nothing on MS Windows.
You may not set this parameter in some cases.
If this parameter is absent:
- It's ``symlink`` if parameter ``symlink`` exists in current dict.
- It's ``copy`` in other cases.
:src:
If ``do`` is ``copy`` then rules for this parameter are the same as for
`source <buildconf-taskparams-source_>`_ but with one addition: you can
specify one or more paths to directory if you don't use any ant pattern.
In this case all files from specified directory will be copied
recursively with directories hierarchy.
If ``do`` is ``copy-as``, it must be one path to a file. And it must be relative
to the :ref:`startdir<buildconf-startdir>` or an absolute path.
If ``do`` is ``symlink``, it must be one path to a file. Created symbolic
link will point to this path. Also it must be relative
to the :ref:`startdir<buildconf-startdir>` or an absolute path.
:dst:
If ``do`` is ``copy`` then it must be a path to a directory.
If ``do`` is ``copy-as``, it must be one path to a file.
If ``do`` is ``symlink``, this parameter cannot be used. See parameter ``symlink``.
It must be relative to the :ref:`startdir<buildconf-startdir>` or
an absolute path.
Any path here will have value of ``destdir``
at the beginning if this ``destdir`` is set to non-empty value.
This ``destdir`` can be set from command line argument ``--destdir`` or from
environment variable ``DESTDIR`` and it is not set by default.
:symlink:
It is like ``dst`` for ``copy-as`` but for creating a symlink.
This parameter can be used only if ``do`` is ``symlink``.
It must be relative to the :ref:`startdir<buildconf-startdir>` or
an absolute path.
:chmod:
Change file mode bits. It's for POSIX platforms only
and does nothing on MS Windows.
And it cannot be used for ``do`` = ``symlink``.
It must be integer or string. If it is an integer it must be correct value
for python function os.chmod. For example: 0o755.
If it is a string then value will be converted to integer as octal
representation of an integer.
For example, '755' will be converted to 493 (it's 755 in octal representation).
By default it is 0o644.
:user:
Change file owner. It's for POSIX platforms only
and does nothing on MS Windows.
It must be a name of existing user.
It is not set by default and the value from original file will be used.
:group:
Change file user's group. It's for POSIX platforms only
and does nothing on MS Windows.
It must be a name of existing user's group.
It is not set by default and the value from original file will be used.
:follow-symlinks:
Follow symlinks from ``src`` if ``do`` is ``copy`` or ``copy-as``.
If it is false, symbolic links in the paths from ``src`` are
represented as symbolic links in the ``dst``, but the metadata of the
original links is NOT copied; if true or omitted, the contents and
metadata of the linked files are copied to the new destination.
It's true by default.
:relative:
This parameter can be used only if ``do`` is ``symlink``.
If it is true, relative symlink will created.
It's false by default.
Some examples can be found in the directory 'mixed/01-cshlib-cxxprogram'
in the repository `here <repo_demo_projects_>`_.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. _buildconf-taskparams-install-langdir:
install-langdir
"""""""""""""""""""""
Installation directory for .qm files.
It defaults to ``$(appdatadir)/translations`` where
:ref:`appdatadir<buildconf-builtin-vars-appdatadir>` is the built-in variable.
This parameter is ignored if
:ref:`rclangprefix<buildconf-taskparams-rclangprefix>` is set.
It can be used only for tasks with ``qt5``
in :ref:`features<buildconf-taskparams-features>`.
normalize-target-name
"""""""""""""""""""""
Convert ``target`` name to ensure the name is suitable for file name
and has not any potential problems.
It replaces all space symbols for example. Experimental.
By default it is False.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
enabled
"""""""""""""""""""""
If it's False then current task will not be used at all.
By default it is True.
It makes sense mostly to use with
:ref:`selectable parameters<buildconf-select>` or with
:ref:`byfilter<buildconf-byfilter>`. With this parameter you can make a build
task which can be used, for example, on Linux only or for specific toolchain
or with another condition.
group-dependent-tasks
"""""""""""""""""""""
Although runtime jobs for the tasks may be executed in parallel, some
preparation is made before this in one thread. It includes, for example,
analyzing of the task dependencies and file paths in :ref:`source<buildconf-taskparams-source>`.
Such list of tasks is called `build group` and, by default, it's only one
build group for each project which uses ZenMake. If this parameter is true,
ZenMake creates a new build group for all other dependent tasks and
preparation for these dependent tasks will be run only when all jobs for current
task, including all dependencies, are done.
For example, if some task produces source files (\*.c, \*.cpp, etc) that
don't exist at the time
of such a preparation, you can get a problem because ZenMake cannot find
not existing files. It is not a problem if such a
file is declared in :ref:`target<buildconf-taskparams-target>` and then this
file is specified without use of ant pattern in ``source`` of dependent tasks.
In other cases you can solve the problem by setting this parameter to True
for a task which produces these source files.
By default it is False. Don't set it to True without reasons because it
can slow building down.
objfile-index
"""""""""""""""""""""
Counter for the object file extension.
By default it's calculated automatically as unique index number for each
build task.
If you set this for one task but not for others in the same project and your
index number is matched with one of automatic generated indexes
then it can cause compilation errors if different tasks use the same files in
parameter ``source``.
Also you can set the same value for the all build tasks and often it's not a
problem while different tasks use the different files in
parameter ``source``.
Set this parameter only if you know what you do.
It's possible to use :ref:`selectable parameters<buildconf-select>`
to set this parameter.
.. note::
More examples of buildconf files can be found in repository
`here <repo_demo_projects_>`_. | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/buildconf-taskparams.rst | buildconf-taskparams.rst |
.. include:: global.rst.inc
.. highlight:: python
.. _buildconf-select:
Build config: selectable parameters
===================================
ZenMake provides ability to select values for parameters in
:ref:`task params<buildconf-taskparams>` depending on some conditions.
This feature of ZenMake is similar to `Configurable attributes` from
Bazel build system and main idea was borrowed from that system. But
implementation is different.
It can be used for selecting different source files, includes, compiler flags
and others on different platforms, different toolchains, etc.
Example in YAML format:
.. code-block:: yaml
tasks:
# ...
conditions:
windows-msvc:
platform: windows
toolchain: msvc
buildtypes:
debug: {}
release:
cxxflags.select:
windows-msvc: /O2
default: -O2
Example in Python format:
.. code-block:: python
tasks = {
# ...
}
conditions = {
'windows-msvc' : {
'platform' : 'windows',
'toolchain' : 'msvc',
},
}
buildtypes = {
'debug' : {
},
'release' : {
'cxxflags.select' : {
'windows-msvc': '/O2',
'default': '-O2',
},
},
}
In this example for build type 'release' we set value '/O2' to 'cxxflags'
if toolchain 'msvc' is used on MS Windows and set '-02' for all other cases.
This method can be used for any parameter in :ref:`task params<buildconf-taskparams>`
excluding :ref:`features<buildconf-taskparams-features>` in the form:
YAML format:
.. code-block:: yaml
<parameter name>.select:
<condition name1>: <value>
<condition name2>: <value>
...
default: <value>
Python format:
.. code-block:: python
'<parameter name>.select' : {
'<condition name1>' : <value>,
'<condition name2>' : <value>,
...
'default' : <value>,
}
A <parameter name> here is a parameter from :ref:`task params<buildconf-taskparams>`.
Examples: 'toolchain.select', 'source.select', 'use.select', etc.
Each condition name must refer to a key in :ref:`conditions<buildconf-conditions>`
or to one of built-in conditions (see below).
There is also special optional key ``default`` wich means default value if none
of the conditions has been selected. If the key ``default`` doen't exist then ZenMake
tries to use the value of <parameter name> if it exists. If none of the
conditions has been selected and no default value for the parameter then this
parameter will not be used.
Keys in :ref:`conditions<buildconf-conditions>` are just strings which consist of
latin characters, digits and symbols '+', '-', '_' . A value for each condition
is a dict with one or more such parameters:
:platform:
Selected platform like 'linux', 'windows', 'darwin', etc.
Valid values are the same as for ``default`` in
the :ref:`buildtypes<buildconf-buildtypes>`.
It can be one value or list of values or string with more than one
value separated by spaces like this: 'linux windows'.
:host-os:
Selected basic name of a host operating system. It is almost the same as the
``platform`` parameter but for the MSYS2 and cygwin platforms
it is always 'windows' and for the darwin platform it is 'macos'.
:distro:
Name of a Linux distribution like 'debian', 'fedora', etc.
This name is empty string for other operating systems.
:cpu-arch:
Selected current CPU architecture. Actual it's a result of the python function
platform.machine() See https://docs.python.org/library/platform.html.
Some possible values are: arm, i386, i686, x86_64, AMD64.
Real value depends on a platform. For example, on Windows you can get
AMD64 while on Linux you gets x86_64 on the same host.
Current value can be obtained also with the command ``zenmake sysinfo``.
It can be one value or list of values or string with more than one value
separated by spaces like this: 'i686 x86_64'.
:toolchain:
Selected/detected toolchain.
It can be one value or list of values or string with more than one value
separated by spaces like this: 'gcc clang'.
:task:
Selected build task name.
It can be one value or list of values or string with more than one value
separated by spaces like this: 'mylib myprogram'.
:buildtype:
Selected buildtype.
It can be one value or list of values or string with more than one value
separated by spaces like this: 'debug release'.
:env:
Check system environment variables. It's a dict of pairs <variable> : <value>.
Example in YAML format:
.. code-block:: yaml
conditions:
my-env:
env:
TEST: 'true' # use 'true' as a string
CXX: gcc
Example in Python format:
.. code-block:: python
conditions = {
'my-env' : {
'env' : {
'TEST' : 'true',
'CXX' : 'gcc',
}
},
}
If a parameter in a condition contains more than one value then any of these
values will fulfill selected condition. It means if some condition, for example,
has ``platform`` which contains ``'linux windows'`` without other parameters then
this condition will be selected on any of these platforms (on GNU/Linux and
on MS Windows). But with parameter ``env`` the situation is different. This
parameter can contain more than one environment variable and a condition will be
selected only when all of these variables are equal to existing variables from the
system environment. If you want to have condition to select by any of such
variables you can do it by making different conditions in
:ref:`conditions<buildconf-conditions>`.
.. note::
There is a constraint for ``toolchain.select`` - it's not possible to use
a condition with the 'toolchain' parameter inside ``toolchain.select``.
Only one record from ``*.select`` for each parameter can be selected for each task
during configuring but a condition name in ``*.select`` can be string with more than
one name from ``conditions``. Such names can be used with 'and', 'or', 'not'
and '()' to form different conditions in ``*.select``.
Example in YAML format:
.. code-block:: yaml
conditions:
linux:
platform: linux
g++:
toolchain: g++
buildtypes:
debug: {}
release:
cxxflags.select:
# will be selected only on linux with selected/detected toolchain g++
linux and g++: -Ofast
# will be selected in all other cases
default: -O2
Example in Python format:
.. code-block:: python
conditions = {
'linux' : {
'platform' : 'linux',
},
'g++' : {
'toolchain' : 'g++',
},
}
buildtypes = {
'debug' : {
},
'release' : {
'cxxflags.select' : {
# will be selected only on linux with selected/detected toolchain g++
'linux and g++': '-Ofast',
# will be selected in all other cases
'default': '-O2',
},
},
}
For convenience there are ready to use built-in conditions for known platforms and
supported toolchains. So in example above the ``conditions`` variable is not needed
at all because conditions with names ``linux`` and ``g++`` already exist:
in YAML format:
.. code-block:: yaml
# no declaration of conditions
buildtypes:
debug: {}
release:
cxxflags.select:
# will be selected only on linux with selected/detected toolchain g++
linux and g++: -Ofast
# will be selected in all other cases
default: -O2
in Python format:
.. code-block:: python
# no declaration of conditions
buildtypes = {
'debug' : {
},
'release' : {
'cxxflags.select' : {
# will be selected only on linux with selected/detected toolchain g++
'linux and g++': '-Ofast',
# will be selected in all other cases
'default': '-O2',
},
},
}
Also you can use built-in conditions for supported buildtypes. But if any name
of supported buildtype is the same as one of known platforms or supported
toolchains then such a buildtype cannot be used as a built-in condition.
For example, you may want to make/use the buildtype 'linux' and it will be possible
but you have to declare a different name to use it in conditions in this case
because the 'linux' value is one of known platforms.
There is one detail about built-in conditions for toolchains - only toolchains
supported for current build tasks can be used. ZenMake detects them from
all ``features`` of all existing build tasks in current project during configuring.
For example, if tasks exist for C language only then supported toolchains for
all other languages cannot be used as a built-in condition.
If you declare condition in :ref:`conditions<buildconf-conditions>` with the
same name of a built-in condition then your condition will be used instead
of that built-in condition.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/buildconf-select.rst | buildconf-select.rst |
.. ZenMake documentation master file, created by
sphinx-quickstart on Thu Sep 26 14:16:40 2019.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
.. include:: global.rst.inc
ZenMake documentation
===================================
ZenMake - build system based on the meta build system Waf_.
.. toctree::
:maxdepth: 2
:glob:
introduction
why
quickstart
installation
buildconf
buildconf-taskparams
buildconf-select
buildconf-edeps
buildconf-ext-syntax
commands
envvars
langs
toolkits
config-actions
dependencies
tests
perftips
faq
changelog
license
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/index.rst | index.rst |
.. include:: global.rst.inc
.. highlight:: console
.. _commands:
Commands
=====================
Here are some descriptions of general commands. You can get the list of the all
commands with a short description by ``zenmake help`` or ``zenmake --help``.
To get help on selected command you
can use ``zenmake help <selected command>`` or
``zenmake <selected command> --help``. Some commands have short aliases.
For example you can use ``bld`` instead of ``build`` and ``dc``
instead of ``distclean``.
configure
Configure the project. In most cases you don't need to call this command
directly. The ``build`` command calls this command by itself if necessary.
This command processes most of values from :ref:`buildconf<buildconf>`
of a project. Any change in :ref:`buildconf<buildconf>` leads to call
of this command. You can change this behaviour with parameter ``autoconfig``
in buildconf :ref:`general features<buildconf-general>`.
build
Build the project in the current directory. It's the main command. To see all
possible parameters use ``zenmake help build`` or
``zenmake build --help``. For example you can use ``-v`` to see more info
about building process or ``-p`` to use progress bar instead of text logging.
By default it calls the ``configure`` command by itself if necessary.
test
Build (if necessery) and run tests in the current directory. If the project
has no tests it's almost the same as running the ``build`` command.
The ``test`` command builds and runs tests by default while
the ``build`` command doesn't.
run
Build the project (if necessery) and run one executable target from the
build directory.
You can specify build task/target to run if the project has more than
one executable targets or omit it if the project has only one
executable target. To provide command line args directly to your program
you can put them after '--' in command line after all args for ZenMake.
This command is for fast checking of the built project.
clean
Remove build files for selected ``buildtype`` of the project.
It doesn't touch other build files.
cleanall
Remove the build directory of the project with everything in it.
install
Install the build targets in some destination directory using installation
prefix. It builds targets by itself if necessary.
You can control paths with :ref:`environment variables<envvars>`
or command line parameters (see ``zenmake help install``).
It looks like classic ``make install`` in common.
uninstall
Remove the build targets installed with the ``install`` command.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/commands.rst | commands.rst |
.. include:: global.rst.inc
.. highlight:: console
.. _installation:
Installation
============
**Dependencies**
* Python_ >=3.5. Python must have threading support.
Python has threading in most cases while nobody uses ``--without-threads``
for Python building. Python >= 3.7 always has threading.
* `PyYAML <https://pyyaml.org/>`_ It's optional and needed only
if you use yaml :ref:`buildconf<buildconf>`. ZenMake can be used with yaml
buildconf file even with PyYAML not installed in an operating system because
ZenMake has an internal copy of PyYAML python library. This copy is used only
if there is no PyYAML installed in an operating system.
There are different ways to install/use ZenMake:
.. contents::
:local:
Via python package (pip)
------------------------
ZenMake has its `own python package <pypipkg_>`_. You can install it by::
pip install zenmake
In this way pip will install PyYAML if it's not installed already.
.. note::
``POSIX``: It requires root and will install it system-wide.
Alternatively, you can use::
pip install --user zenmake
which will install it for your user
and does not require any special privileges. This will install the package
in ~/.local/, so you will have to add ~/.local/bin to your PATH.
``Windows``: It doesn't always require administrator rights.
.. note::
You need to have ``pip`` installed. Most of the modern Linux distributions
have pip in their packages. On Windows you can use, for example,
`chocolatey <https://chocolatey.org/>`_ to install pip.
Common instructions to install pip can be found
`here <https://pip.pypa.io/en/stable/installing/>`_.
.. note::
You can install ZenMake with pip and virtualenv_. In this case you don't
touch system packages and it doesn't require root privileges.
After installing you can run ZenMake just by typing::
zenmake
.. _installation-via-git:
Via git
----------
You can use ZenMake from Git repository. But branch ``master`` can be
broken. Also, you can just to switch to the required version using git tag. Each
version of ZenMake has a git tag. The body of ZenMake application is located in
``src/zenmake`` path in the repository. You don't need other directories and
files in repository and you can remove them if you want. Then you can make symlink
to ``src/zenmake/zmrun.py``, shell alias or make executable
.sh script (for Linux/MacOS/..) or .bat (for Windows) to
run ZenMake. Example for Linux (``zmrepo`` is custom directory)::
$ mkdir zmrepo
$ cd zmrepo
$ git clone https://github.com/pustotnik/zenmake.git .
Next step is optional. Switch to existing version, for example to 0.7.0::
$ git checkout v0.7.0
Here you can make symlink/alias/script to run zenmake.
Other options to run ZenMake::
$ <path-to-zenmake-repo>/src/zenmake/zmrun.py
or::
$ python <path-to-zenmake-repo>/src/zenmake
As a zip application
------------------------
Zenmake can be run as an executable python zip application. And ZenMake can make
such zipapp with the command ``zipapp``.
Using steps from `Via Git <installation-via-git_>`_ you can run::
$ python src/zenmake zipapp
$ ls *.pyz
zenmake.pyz
$ ./zenmake.pyz
...
Resulting file ``zenmake.pyz`` can be run standalone without the repository and pip.
You can copy ``zenmake.pyz`` to the root of your project and distribute this
file with your project. It can be used on any supported platform and doesn't
require any additional access and changes in your system.
.. note::
Since ZenMake 0.10.0 you can download ready to use ``zenmake.pyz`` from
GitHub `releases <github_releases_>`_.
.. _virtualenv: https://pypi.python.org/pypi/virtualenv/
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/installation.rst | installation.rst |
.. include:: global.rst.inc
.. _introduction:
Introduction
============
What is it?
-----------
ZenMake is a cross-platform build system for C/C++ and some other languages.
It uses meta build system Waf_ as a framework.
Some reasons to create this project can be found :ref:`here<why>`.
Main features
-------------
- Build config as python (.py) or as yaml file.
Details are :ref:`here<buildconf>`.
- Distribution as zip application or as system package (pip).
See :ref:`Installation<installation>`.
- Automatic reconfiguring: no need to run command 'configure'.
- Compiler autodetection.
- Building and running functional/unit tests including an ability to
build and run tests only on changes. Details are :ref:`here<buildtests>`.
- Build configs in sub directories.
- Building external dependencies.
- Supported platforms: GNU/Linux, MacOS, MS Windows. Some other
platforms like OpenBSD/FreeBSD should work as well but it
hasn't been tested.
- Supported languages:
- C: gcc, clang, msvc, icc, xlc, suncc, irixcc
- C++: g++, clang++, msvc, icpc, xlc++, sunc++
- D: dmd, ldc2, gdc; MS Windows is not supported yet
- Fortran: gfortran, ifort (should work but not tested)
- Assembler: gas (GNU Assembler)
- Supported toolkits/frameworks:
- SDL v2 (Linux only)
- GTK v3 (Linux only)
- Qt v5
Plans to do
------------
There is no clear roadmap for this project. I add features that I think are
needed to include.
Project links
-------------
.. include:: projectlinks.rst
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/introduction.rst | introduction.rst |
.. include:: global.rst.inc
.. highlight:: python
.. _config-actions:
Configuration actions
=====================
ZenMake supports some configuration actions. They can be used to check system
libraries, headers, etc. To set configuration actions use the ``configure`` parameter
in :ref:`task params<buildconf-taskparams>`. The value of the ``configure`` parameter
must be a list of such actions. An item in the list
can be a ``dict`` where ``do`` specifies what to do, in other words it is some type of
configuration action. It's like a function where ``do`` describes the name of
a function and others parameters are parameters for the function.
There is another possible value for such an item in python format of buildconf file
and it is a python function which must return True/False on Success/Failure.
If such a function raises some exception then ZenMake interprets it
as if the function returns False. This function can be without arguments or
with named arguments: ``taskname``, ``buildtype``.
It's better to use `**kwargs` to have universal way to work with any input arguments.
These actions can be run sequentially or in parallel (see ``do`` = ``parallel``).
And they all are called on the **configure** step (in command **configure**).
Results of the same configuration actions are cached when it's possible
but not between runnings of ZenMake.
These configuration actions in ``dict`` format:
``do`` = ``check-headers``
*Parameters*: ``names``, ``defname`` = '', ``defines`` = [],
``mandatory`` = True.
*Supported languages*: C, C++.
Check existence of C/C++ headers from the ``names`` list.
The ``defname`` parameter is a name of a define to set
for your code when the action is over. By default the name for each header
is generated in the form 'HAVE_<HEADER NAME>=1'. For example, if you set
'cstdio' in the ``names`` then the define 'HAVE_CSTDIO=1' will be generated.
If you set 'stdio.h' in the ``names`` then the define 'HAVE_STDIO_H=1'
will be generated.
The ``defines`` parameter can be used to set additional C/C++ defines
to use in compiling of the action.
These defines will not be set for your code, only for the action.
The :ref:`toolchain<buildconf-taskparams-toolchain>`,
:ref:`includes<buildconf-taskparams-includes>`
and :ref:`libpath<buildconf-taskparams-libpath>` task parameters
affect this type of action.
``do`` = ``check-libs``
*Parameters*: ``names`` = [], ``fromtask`` = True, ``defines`` = [],
``autodefine`` = False, ``mandatory`` = True.
*Supported languages*: C, C++.
Check existence of the shared libraries from the ``libs`` task
parameter or/and from the ``names`` list.
If ``fromtask`` is set to False then names of libraries from the ``libs``
task parameter will not be used for checking.
If ``autodefine`` is set to True it generates
C/C++ define name like ``HAVE_LIB_LIBNAME=1``.
The ``defines`` parameter can be used to set additional C/C++ defines
to use in compiling of the action.
These defines will not be set for your code, only for the action.
The :ref:`toolchain<buildconf-taskparams-toolchain>`,
:ref:`includes<buildconf-taskparams-includes>`
and :ref:`libpath<buildconf-taskparams-libpath>` task parameters
affect this type of action.
``do`` = ``check-code``
*Parameters*: ``text`` = '', ``file`` = '', ``label`` = '',
``defines`` = [], ``defname`` = '', ``execute`` = False, ``mandatory`` = True.
*Supported languages*: C, C++, D, Fortran.
Provide piece of code for the test. Code can be provided with
the ``text`` parameter as a plane text or with the ``file`` parameter
as a path to the file with a code. This path can be absolute or relative to
the :ref:`startdir<buildconf-startdir>`. At least one of the
``text`` or ``file`` parameters must be set.
The ``label`` parameter can be used to mark message of the test.
If the ``execute`` parameter is True it means that the resulting binary
will be executed and the result will have effect on the current configuration action.
The ``defname`` parameter is a name of C/C++/D/Fortran define to set
for your code when the test is over. There is no such a name by default.
The ``defines`` parameter can be used to set additional C/C++/D/Fortran defines
to use in compiling of the test.
These defines will not be set for your code, only for the test.
The :ref:`toolchain<buildconf-taskparams-toolchain>`,
:ref:`includes<buildconf-taskparams-includes>`
and :ref:`libpath<buildconf-taskparams-libpath>` task parameters
affect this type of action.
``do`` = ``find-program``
*Parameters*: ``names``, ``paths``, ``var`` = '', ``mandatory`` = True.
*Supported languages*: all languages supported by ZenMake.
Find a program.
The ``names`` parameter must be used to specify one or more possible file
names for the program. Do not add an extension for portability.
This action does nothing if ``names`` is empty.
The ``paths`` parameter can be used to set paths to find
the program, but usually you don't need to use it because by default
the ``PATH`` system environment variable is used. Also the Windows Registry
is used on MS Windows if the program was not found.
The ``var`` parameter can be used to set
:ref:`substitution<buildconf-substitutions-vars>` variable name.
By default it's a first name from the ``names`` in upper case and without
symbols '-' and '.'.
If this name is found in environment variables, ZenMake will use it instead of
trying to find the program. Also this name can be used in parameter
:ref:`run <buildconf-taskparams-run>` like this:
in YAML format:
.. code-block:: yaml
foo.luac:
source : foo.lua
configure : [ { do: find-program, names: luac } ]
# var 'LUAC' will be set in 'find-program' if 'luac' is found.
run: '${LUAC} -s -o $(tgt) $(src)'
in Python format:
.. code-block:: python
'foo.luac' : {
'source' : 'foo.lua',
'configure' : [ dict(do = 'find-program', names = 'luac'), ],
# var 'LUAC' will be set in 'find-program' if 'luac' is found.
'run': '${LUAC} -s -o $(tgt) $(src)',
},
``do`` = ``find-file``
*Parameters*: ``names``, ``paths``, ``var`` = '', ``mandatory`` = True.
*Supported languages*: all languages supported by ZenMake.
Find a file on file system.
The ``names`` parameter must be used to specify one or more possible file
names.
This action does nothing if ``names`` is empty.
The ``paths`` parameter must be used to set paths to find
the file. Each path can be absolute or relative to
the :ref:`startdir<buildconf-startdir>`.
By default it's '.' which means :ref:`startdir<buildconf-startdir>`.
The ``var`` parameter can be used to set
:ref:`substitution<buildconf-substitutions-vars>` variable name.
By default it's a first name from the ``names`` in upper case and without
symbols '-' and '.'.
``do`` = ``call-pyfunc``
*Parameters*: ``func``, ``mandatory`` = True.
*Supported languages*: any but only in python format of buildconf file.
Call a python function. It'a another way to use python
function as an action.
In this way you can use the ``mandatory`` parameter.
``do`` = ``pkgconfig``
*Parameters*: ``toolname`` = 'pkg-config', ``toolpaths``,
``packages``, ``cflags`` = True, ``libs`` = True, ``static`` = False,
``defnames`` = True, ``def-pkg-vars``, ``tool-atleast-version``,
``pkg-version`` = False, ``mandatory`` = True.
*Supported languages*: C, C++.
Execute ``pkg-config`` or compatible tool (for example ``pkgconf``) and
use results. The ``toolname`` parameter can be used to set name of the
tool and it is 'pkg-config' by default.
The ``toolpaths`` parameter can be used to set
paths to find the tool, but usually you don't need to use it.
The ``packages`` parameter is required parameter to set one or more names of
packages in database of pkg-config. Each such a package name can be used
with '>', '<', '=', '<=' or '>=' to check version of a package.
The parameters named ``cflags`` (default: True), ``libs`` = (default: True),
``static`` (default: False) are used to set corresponding command line
parameters ``--cflags``, ``--libs``, ``--static`` for 'pkg-config' to
get compiler/linker options. If ``cflags`` or ``libs`` is True then
obtained compiler/linker options are used by ZenMake in a build task.
Parameter ``static`` means forcing of static libraries and
it is ignored if ``cflags`` and ``libs`` are False.
The ``defnames`` parameter is used to set C/C++ defines. It can be True/False
or ``dict``. If it's True then default names for defines will be used.
If it's False then no defines will be used. If it's ``dict`` then keys
must be names of used packages and values must be dicts with keys ``have``
and ``version`` and values as names for defines. By default it's True.
Each package can have 'HAVE_PKGNAME' and 'PKGNAME_VERSION' define
where PKGNAME is a package name in upper case. And it's default patterns.
But you can set custom defines. Name of 'PKGNAME_VERSION' is used only
if ``pkg-version`` is True.
The ``pkg-version`` parameter can be used to get 'define' with version of
a package. It can be True of False. If it's True then 'define' will be set.
If it's False then corresponding 'define' will not be set. It's False by default.
This parameter will not set 'define' if ``defnames`` is False.
The ``def-pkg-vars`` parameter can be used to set custom values of variables
for 'pkg-config'. It must be ``dict`` where keys and values are names and
values of these variables. ZenMake uses the command line option ``--define-variable``
for this parameter. It's empty by default.
The ``tool-atleast-version`` parameter can be used to check minimum version
of selected tool (pkg-config).
Examples in YAML format:
.. code-block:: yaml
# Elements like 'tasks' and other task params are skipped
# ZenMake will check package 'gtk+-3.0' and set define 'HAVE_GTK_3_0=1'
configure:
- do: pkgconfig
packages: gtk+-3.0
# ZenMake will check packages 'gtk+-3.0' and 'pango' and
# will check 'gtk+-3.0' version > 1 and <= 100.
# Before checking of packages ZenMake will check that 'pkg-config' version
# is greater than 0.1.
# Also it will set defines 'WE_HAVE_GTK3=1', 'HAVE_PANGO=1',
# GTK3_VER="gtk3-ver" and LIBPANGO_VER="pango-ver" where 'gtk3-ver'
# and 'pango-ver' are values of current versions of
# 'gtk+-3.0' and 'pango'.
configure:
- do: pkgconfig
packages: 'gtk+-3.0 > 1 pango gtk+-3.0 <= 100'
tool-atleast-version: '0.1'
pkg-version: true
defnames:
gtk+-3.0: { have: WE_HAVE_GTK3, version: GTK3_VER }
pango: { version: LIBPANGO_VER }
Examples in Python format:
.. code-block:: python
# Elements like 'tasks' and other task params are skipped
# ZenMake will check package 'gtk+-3.0' and set define 'HAVE_GTK_3_0=1'
'configure' : [
{ 'do' : 'pkgconfig', 'packages' : 'gtk+-3.0' },
]
# ZenMake will check packages 'gtk+-3.0' and 'pango' and
# will check 'gtk+-3.0' version > 1 and <= 100.
# Before checking of packages ZenMake will check that 'pkg-config' version
# is greater than 0.1.
# Also it will set defines 'WE_HAVE_GTK3=1', 'HAVE_PANGO=1',
# GTK3_VER="gtk3-ver" and LIBPANGO_VER="pango-ver" where 'gtk3-ver'
# and 'pango-ver' are values of current versions of
# 'gtk+-3.0' and 'pango'.
'configure' : [
{
'do' : 'pkgconfig',
'packages' : 'gtk+-3.0 > 1 pango gtk+-3.0 <= 100 ',
'tool-atleast-version' : '0.1',
'pkg-version' : True,
'defnames' : {
'gtk+-3.0' : { 'have' : 'WE_HAVE_GTK3', 'version': 'GTK3_VER' },
'pango' : { 'version': 'LIBPANGO_VER' },
},
},
],
``do`` = ``toolconfig``
*Parameters*: ``toolname`` = 'pkg-config', ``toolpaths``,
``args`` = '\-\-cflags \-\-libs', ``static`` = False,
``parse-as`` = 'flags-libs', ``defname``, ``var``, ``msg``,
``mandatory`` = True.
*Supported languages*: any.
Execute any ``*-config`` tool. It can be pkg-config, sdl-config,
sdl2-config, mpicc, etc.
ZenMake doesn't know which tool will be used and therefore this action
can be used in any task including standalone runcmd task.
The ``toolname`` parameter must be used to set name of such a tool.
The ``toolpaths`` parameter can be used to set
paths to find the tool, but usually you don't need to use it.
The ``args`` parameter can be used to set command line arguments. By default
it is '\-\-cflags \-\-libs'.
The ``static`` parameter means forcing of static libraries and
it is ignored if ``parse-as`` is not set to 'flags-libs'.
The ``parse-as`` parameter describes how to parse output. If it is 'none'
then output will not be parsed. If it is 'flags-libs' then ZenMake will
try to parse the output for compiler/linker options but ZenMake knows how
to parse C/C++ compiler/linker options only, other languages are not
supported for this value. And if it is 'entire'
then output will not be parsed but value of output will be set to define
name from the ``defname`` and/or ``var`` if they are defined.
By default ``parse-as`` is set to 'flags-libs'.
The ``defname`` parameter can be used to set C/C++ define. If ``parse-as``
is set to 'flags-libs' then ZenMake will try to set define name by using
value of the ``toolname`` discarding '-config' part if it exists. For example
if the ``toolname`` is 'sdl2-config' then 'HAVE_SDL2=1' will be used.
For other values of ``parse-as`` there is no default value for ``defname``
but you can set some custom define name.
The ``var`` parameter can be used to set
:ref:`substitution<buildconf-substitutions-vars>` variable name. This parameter
is ignored if value of ``parse-as`` is not 'entire'.
By default it is not defined.
The ``msg`` parameter can be used to set custom message for this action.
Examples in YAML format:
.. code-block:: yaml
tasks:
myapp:
# other task params are skipped
configure:
# ZenMake will get compiler/linker options for SDL2 and
# set define to 'HAVE_SDL2=1'
- do: toolconfig
toolname: sdl2-config
# ZenMake will get SDL2 version and put it in the define 'SDL2_VERSION'
- do: toolconfig
toolname: sdl2-config
msg: Getting SDL2 version
args: --version
parse-as: entire
defname: SDL2_VERSION
Examples in Python format:
.. code-block:: python
tasks = {
'myapp' : {
# other task params are skipped
'configure' : [
# ZenMake will get compiler/linker options for SDL2 and
# set define to 'HAVE_SDL2=1'
{ 'do' : 'toolconfig', 'toolname' : 'sdl2-config' },
# ZenMake will get SDL2 version and put it in the define 'SDL2_VERSION'
{
'do' : 'toolconfig',
'toolname' : 'sdl2-config',
'msg' : 'Getting SDL2 version',
'args' : '--version',
'parse-as' : 'entire',
'defname' : 'SDL2_VERSION',
},
]
},
}
``do`` = ``write-config-header``
*Parameters*: ``file`` = '', ``guard`` = '', ``remove-defines`` = True,
``mandatory`` = True.
*Supported languages*: C, C++.
Write a configuration header in the build directory after some
configuration actions.
By default file name is ``<task name>_config.h``.
The ``guard`` parameter can be used to change C/C++ header guard.
The ``remove-defines`` parameter means removing the defines after they are
added into configuration header file and it is True by default.
In your C/C++ code you can just include this file like that:
.. code-block:: c++
#include "yourconfig.h"
You can override file name by using the ``file`` parameter.
``do`` = ``parallel``
*Parameters*: ``actions``, ``tryall`` = False, ``mandatory`` = True.
*Supported languages*: all languages supported by ZenMake.
Run configuration actions from the ``actions`` parameter
in parallel. Not all types of actions are supported.
Allowed actions are ``check-headers``, ``check-libs``,
``check-code`` and ``call-pyfunc``.
If you use ``call-pyfunc`` in ``actions`` you should understand that
python function must be thread safe. If you don't use any shared data
in such a function you don't need to worry about concurrency.
If the ``tryall`` parameter is True then all configuration actions
from the ``actions`` parameter will be executed despite of errors.
By default the ``tryall`` is False.
You can control order of the actions by using the parameters ``before``
and ``after`` with the parameter ``id``. For example, one action can have
``id = 'base'`` and then another action can have ``after = 'base'``.
Any configuration action has the ``mandatory`` parameter which is True by default.
It also has effect for any action inside ``actions``
for parallel actions and for the whole bundle of parallel actions as well.
All results (defines and some other values) of configuration actions
(excluding ``call-pyfunc``) in one build
task can be exported to all dependent build tasks.
Use :ref:`export<buildconf-taskparams-export>` with the name `config-results`
for this ability. It allows you to avoid writing the same config actions in tasks
and reduce configuration actions time run.
Example in python format:
.. code-block:: python
def check(**kwargs):
buildtype = kwargs['buildtype']
# some checking
return True
tasks = {
'myapp' : {
'features' : 'cxxshlib',
'libs' : ['m', 'rt'],
# ...
'configure' : [
# do checking in function 'check'
check,
# Check libs from param 'libs'
# { 'do' : 'check-libs' },
{ 'do' : 'check-headers', 'names' : 'cstdio', 'mandatory' : True },
{ 'do' : 'check-headers', 'names' : 'cstddef stdint.h', 'mandatory' : False },
# Each lib will have define 'HAVE_LIB_<LIBNAME>' if autodefine = True
{ 'do' : 'check-libs', 'names' : 'pthread', 'autodefine' : True,
'mandatory' : False },
{ 'do' : 'find-program', 'names' = 'python' },
{ 'do' : 'parallel',
'actions' : [
{ 'do' : 'check-libs', 'id' : 'syslibs' },
{ 'do' : 'check-headers', 'names' : 'stdlib.h iostream' },
{ 'do' : 'check-headers', 'names' : 'stdlibasd.h', 'mandatory' : False },
{ 'do' : 'check-headers', 'names' : 'string', 'after' : 'syslibs' },
],
'mandatory' : False,
#'tryall' : True,
},
#{ 'do' : 'write-config-header', 'file' : 'myapp_config.h' }
{ 'do' : 'write-config-header' },
],
},
}
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/config-actions.rst | config-actions.rst |
.. include:: global.rst.inc
.. _languages:
Supported languages
===================
C/C++
-----------
C an C++ are main languages that ZenMake supports.
And the most of ZenMake features were made for these languages.
Supported compilers:
- C:
- GCC C (gcc): regularly tested
- CLANG C from LLVM (clang): regularly tested
- Microsoft Visual C/C++ (msvc): regularly tested
- Intel C/C++ (icc): should work but not tested
- IBM XL C/C++ (xlc): should work but not tested
- Oracle/Sun C (suncc): should work but not tested
- IRIX/MIPSpro C (irixcc): may be works, not tested
- C++:
- GCC C++ (g++): regularly tested
- CLANG C++ from LLVM (clang++): regularly tested
- Microsoft Visual C/C++ (msvc): regularly tested
- Intel C/C++ (icpc): should work but not tested
- IBM XL C/C++ (xlc++): should work but not tested
- Oracle/Sun C++ (sunc++): should work but not tested
Examples of projects can be found in the directory ``c`` and ``cpp``
in the repository `here <repo_demo_projects_>`_.
Assembler
-----------
ZenMake supports gas (GNU Assembler) and has experimental support for nasm/yasm.
Examples of projects can be found in the directory ``asm``
in the repository `here <repo_demo_projects_>`_.
D
-----------
ZenMake supports compiling for D language. You can configure and build D code
like C/C++ code but there are some limits:
- There is no support for MS Windows yet.
- There is no support for D package manager DUB.
While nobody uses ZenMake for D, there are no plans to resolve these issues.
Supported compilers:
- DMD Compiler - official D compiler (dmd): regularly tested
- GCC D Compiler (gdc): regularly tested
- LLVM D compiler (ldc2): regularly tested
Examples of projects can be found in the directory ``d``
in the repository `here <repo_demo_projects_>`_.
FORTRAN
-----------
ZenMake supports compiling for Fortran language.
Supported compilers:
- GCC Fortran Compiler (gfortran): regularly tested
- Intel Fortran Compiler (ifort): should work but not tested
Examples of projects can be found in the directory ``fortran``
in the repository `here <repo_demo_projects_>`_.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/langs.rst | langs.rst |
.. include:: global.rst.inc
.. highlight:: python
.. _buildconf:
Build config
============
ZenMake uses build configuration file with name ``buildconf.py`` or
``buildconf.yaml``/``buildconf.yml``. First variant is a regular python file and second one is
an YAML file. ZenMake doesn't use both files in one directory at the same time.
If both files exist in one directory then only ``buildconf.py`` will be used.
Common name ``buildconf`` is used in this manual.
The format for both config files is the same. YAML variant is a little more
readable but in python variant you can add a custom python code if you wish.
Simplified scheme of buildconf is:
.. parsed-literal::
startdir_ = path
buildroot_ = path
realbuildroot_ = path
project_ = { ... }
:ref:`buildconf-general` = { ... }
cliopts_ = { ... }
conditions_ = { ... }
tasks_ = { name: :ref:`task parameters<buildconf-taskparams>` }
buildtypes_ = { name: :ref:`task parameters<buildconf-taskparams>` }
toolchains_ = { name: parameters }
byfilter_ = [ { for: {...}, set: :ref:`task parameters<buildconf-taskparams>` }, ... ]
:ref:`buildconf-subdirs` = []
edeps_ = { ... }
Also see :ref:`syntactic sugar<buildconf-syntactic-sugar>`.
.. _buildconf-dict-def:
Where symbols '{}' mean an associative array/dictionary and symbols '[]'
mean a list. In python notation '{}' is known as dictionary. In some other
languages it's called an associative array including YAML (Of course YAML is not
programming language but it's markup language). For shortness it's called
a ``dict`` here.
Not all variables are required in the scheme above but buildconf cannot be
empty. All variables have reserved names and they all are described here.
Other names in buildconf are just ignored by ZenMake
(excluding :ref:`substitution variables<buildconf-substitutions>`) if present and it means
they can be used for some custom purposes.
.. note::
**About paths in general.**
You can use native paths but it's recommended to use wherever possible
POSIX paths (Symbol ``/`` is used as a separator in a path).
With POSIX paths you will ensure the same paths on
different platforms/operating systems. POSIX paths will be
converted into native paths automatically, but not vice versa.
For example, path 'my/path' will be converted into 'my\\path' on Windows.
Also it's recommended to use relative paths wherever possible.
.. warning::
``Windows only``: do NOT use short filename notation (8.3 filename) for
paths in buildconf files. It can cause unexpected errors.
Below is the detailed description of each buildconf variable.
.. _buildconf-startdir:
startdir
""""""""
A start path for all paths in a buildconf.
It is ``.`` by default. The path can be absolute or relative to directory
where current buildconf file is located. It means that by default all other
relative paths in the current buildconf file are considered as the paths
relative to directory with the current buildconf file.
But you can change this by setting a different value to this variable.
.. _buildconf-buildroot:
buildroot
"""""""""
A path to the root of a project build directory. By default it is
directory 'build' in the directory with the top-level buildconf file of
the project. Path can be absolute or relative to the startdir_.
It is important to be able to remove the build
directory safely, so it should never be given as ``.`` or ``..``.
.. note::
If you change value of ``buildroot`` with already using/existing
build directory then ZenMake will not touch previous build directory.
You can remove previous build directory manually or run
command ``distclean`` before changing of ``buildroot``.
ZenMake cannot do it because it stores all
meta information in current build directory and if you change this
directory it will lose all such an information.
This can be changed in the future by storing extra information in some
other place like user home directory but now it is.
.. _buildconf-realbuildroot:
realbuildroot
"""""""""""""
A path to the real root of a project build directory and by default it is
equal to value of ``buildroot``. It is optional parameter and if
``realbuildroot`` has different value from ``buildroot`` then
``buildroot`` will be symlink to ``realbuildroot``. Using ``realbuildroot``
makes sense mostly on linux where '/tmp' is usually on tmpfs filesystem
nowadays and it can used to make building in memory. Such a way can improve
speed of building. Note that on Windows OS the process of ZenMake needs to be
started with enabled "Create symbolic links" privilege and usual user
doesn't have a such privilege.
Path can be absolute or relative to the startdir_.
It is important to be able to remove the build directory safely,
so it should never be given as ``.`` or ``..``.
.. _buildconf-project:
project
"""""""
A `dict <buildconf-dict-def_>`_ with some parameters for the project.
Supported values:
:name: The name of the project. It's name of the top-level startdir_
directory by default.
:version: The version of the project. It's empty by default.
It's used as default value for
:ref:`ver-num<buildconf-taskparams-ver-num>` field if not empty.
.. _buildconf-general:
general
""""""""
A `dict <buildconf-dict-def_>`_ array with some general features.
Supported values:
:autoconfig: Execute the command ``configure`` automatically in
the command ``build`` if it's necessary.
It's ``True`` by default. Usually you don't need to change
this value.
:monitor-files: Set extra file paths to check changes in them. You can use
additional files with your buildconf file(s). For example
it can be extra python module with some tools. But in this
case ZenMake doesn't know about such files when it checks
buildconf file(s) for changes to detect if it must call
command ``configure`` for feature ``autoconfig``. You
can add such files to this variable and ZenMake will check
them for changes as it does so for regular buildconf file(s).
If paths contain spaces and all these paths are listed
in one string then each such a path must be in quotes.
:hash-algo: Set hash algorithm to use in ZenMake. It can be ``sha1`` or
``md5``. By default ZenMake uses sha1 algorithm to control
changes of config/built files and for some other things.
Sha1 has much less collisions than md5
and that's why it's used by default. Modern CPUs often has support
for this algorithm and sha1 show better or almost the same
performance than md5 in this cases. But in some cases md5 can be
faster and you can set here this variant. However, don't expect big
difference in performance of ZenMake. Also, if a rare
"FIPS compliant" build of Python is used it's always sha1 anyway.
:db-format: Set format for internal ZenMake db/cache files.
Use one of possible values: ``py``, ``pickle``, ``msgpack``.
The value ``py`` means text file with python syntax. It is not fastest
format but it is human readable one.
The value ``pickle`` means python pickle binary format. It has
good performance and python always supports this format.
The value ``msgpack`` means msgpack binary
format by using python module ``msgpack``. Using of this format can
decrease ZenMake overhead in building of some big projects because
it has the best performance among all supported formats.
If the package ``msgpack`` doesn't exist in the current system then
the ``pickle`` value will be used.
Note: ZenMake doesn't try to install package ``msgpack``.
This package must be installed in some other way.
The default value is ``pickle``.
:provide-edep-targets: Provide target files of
:ref:`external dependencies<dependencies-external>`
in the :ref:`buildroot<buildconf-buildroot>` directory.
It is useful to run built files from the build directory without
the need to use such a thing as LD_LIBRARY_PATH for each dependency.
Only existing and used target files are provided.
Static libraries are also ignored because they are not needed
to run built files.
On Windows ZenMake copies these files while on other OS
(Linux, MacOS, etc) it makes symlinks.
It's ``False`` by default.
:build-work-dir-name: Set a name of work directory which is used mostly for
object files during compilation. This directory seperates
resulting target files from other files in a buildtype directory to
avoid file/directory conflicts. Usually you don't need to set this
parameter until some target name has conflict with default value of
this parameter.
The default value is ``@bld``.
.. _buildconf-cliopts:
cliopts
""""""""
A `dict <buildconf-dict-def_>`_ array with default values for command
line options. It can be any existing command line option that ZenMake has.
If you want to set an option for selected commands then you can set it in
the format of a `dict <buildconf-dict-def_>`_ where key is a name of
specific command or special value 'any' which means any command.
If some command doesn't have selected option then it will be ignored.
Example in YAML format:
.. code-block:: yaml
cliopts:
verbose: 1
jobs : { build : 4 }
progress :
any: false
build: true
.. note::
Selected command here is a command that is used on command line.
It means if you set an option for the ``build`` command and zenmake calls
the ``configure`` command before this command by itself then this option will
be applied for both ``configure`` and ``build`` commands. In other words
it is like you are running this command with this option on command line.
.. _buildconf-conditions:
conditions
"""""""""""
A `dict <buildconf-dict-def_>`_ with conditions for
:ref:`selectable parameters<buildconf-select>`.
.. _buildconf-tasks:
tasks
"""""
A `dict <buildconf-dict-def_>`_ with build tasks. Each task has own
unique name and :ref:`parameters<buildconf-taskparams>`. Name of task can
be used as dependency id for other build tasks. Also this name is used as a
base for resulting target file name if parameter ``target`` is not set in
:ref:`task parameters<buildconf-taskparams>`.
In this variable you can set up build parameters particularly for each build task.
Example in YAML format:
.. code-block:: yaml
tasks:
mylib :
# some task parameters
myexe :
# some task parameters
use : mylib
.. note::
Parameters in this variable can be overridden by parameters in
buildtypes_ and/or byfilter_.
.. note::
Name of a task cannot contain symbol ``:``. You can use
parameter ``target`` if you want to have this symbol in
resulting target file names.
.. _buildconf-buildtypes:
buildtypes
""""""""""
A `dict <buildconf-dict-def_>`_ with build types like ``debug``, ``release``,
``debug-gcc`` and so on. Here is also a special value with name ``default``
that is used to set default build type if nothing is specified. Names of
these build types are just names, they can be any name but not ``default``.
Also you should remember that these names are used as
directory names. So don't use
incorrect symbols if you don't want a problem with it.
This variable can be empty or absent. In this case current buildtype is
always just an empty string.
Possible parameters for each build type are described in
:ref:`task parameters<buildconf-taskparams>`.
Special value ``default`` must be a string with the name of one of the
build types or a `dict <buildconf-dict-def_>`_ where keys are supported name
of the host operating system and values are strings with the names of one of the
build types. Special key '_' or 'no-match' can be used in the ``default`` to
define a value that will be used if the name of the current host operating system
is not found among the keys in the ``default``.
Valid host operating system names: ``linux``, ``windows``, ``darwin``, ``freebsd``,
``openbsd``, ``sunos``, ``cygwin``, ``msys``, ``riscos``, ``atheos``,
``os2``, ``os2emx``, ``hp-ux``, ``hpux``, ``aix``, ``irix``.
.. note::
Only ``linux``, ``windows`` and ``darwin`` are tested.
Examples in YAML format:
.. code-block:: yaml
buildtypes:
debug : { toolchain: auto-c++ }
debug-gcc : { toolchain: g++, cxxflags: -fPIC -O0 -g }
release-gcc : { toolchain: g++, cxxflags: -fPIC -O2 }
debug-clang : { toolchain: clang++, cxxflags: -fPIC -O0 -g }
release-clang: { toolchain: clang++, cxxflags: -fPIC -O2 }
debug-msvc : { toolchain: msvc, cxxflags: /Od /EHsc }
release-msvc : { toolchain: msvc, cxxflags: /O2 /EHsc }
default: debug
buildtypes:
debug:
toolchain.select:
default: g++
darwin: clang++
windows: msvc
cxxflags.select:
default : -O0 -g
msvc : /Od /EHsc
release:
toolchain.select:
default: g++
darwin: clang++
windows: msvc
cxxflags.select:
default : -O2
msvc : /O2 /EHsc
default: debug
buildtypes:
debug-gcc : { cxxflags: -O0 -g }
release-gcc : { cxxflags: -O2 }
debug-clang : { cxxflags: -O0 -g }
release-clang: { cxxflags: -O2 }
debug-msvc : { cxxflags: /Od /EHsc }
release-msvc : { cxxflags: /O2 /EHsc }
default:
_: debug-gcc
linux: debug-gcc
darwin: debug-clang
windows: debug-msvc
.. note::
Parameters in this variable override corresponding parameters in tasks_ and
can be overridden by parameters in byfilter_.
.. _buildconf-toolchains:
toolchains
""""""""""
A `dict <buildconf-dict-def_>`_ with custom toolchain setups. It's useful
for simple cross builds for example, or for custom settings for existing
toolchains. Each value has unique name and parameters. Parameters are also
dict with names of environment variables and
special name ``kind`` that is used to specify the type of
toolchain/compiler. Environment variables are usually such variables as
``CC``, ``CXX``, ``AR``, etc that are used to specify
name or path to existing toolchain/compiler. Path can be absolute or
relative to the startdir_. Also such variables as ``CFLAGS``,
``CXXFLAGS``, etc can be set here.
Names of toolchains from this parameter can be used as a value for the
``toolchain`` in :ref:`task parameters<buildconf-taskparams>`.
Example in YAML format:
.. code-block:: yaml
toolchains:
custom-g++:
kind : auto-c++
CXX : custom-toolchain/gccemu/g++
AR : custom-toolchain/gccemu/ar
custom-clang++:
kind : clang++
CXX : custom-toolchain/clangemu/clang++
AR : custom-toolchain/clangemu/llvm-ar
g++:
LINKFLAGS : -Wl,--as-needed
.. _buildconf-byfilter:
byfilter
""""""""
This variable describes extra/alternative way to set up build tasks.
It's a list of `dicts <buildconf-dict-def_>`_ with attributes
``set`` and ``for``, ``not-for`` and/or ``if``.
Attributes ``for``/``not-for``/``if`` describe conditions for parameters
in attribute ``set``, that is, a filter to set some build task parameters.
The attribute ``for`` is like a ``if a`` and the attribute
``not-for`` is like a ``if not b`` where ``a`` and ``b`` are some conditions.
And they are like a ``if a and if not b`` when both of them exist in the
same item. The attribute ``not-for`` has higher priority in the case of the
same condition in the both of them.
Since v0.11 ZenMake supports ``if`` attribute where you can set a string with
python like expression.
The ``for``/``not-for`` are dicts and ``if`` is an expression.
In ``for``/``not-for``/``if`` you can use such variables as dict keys in
``for``/``not-for`` and as keywords within an expression:
:task: Build task name or list of build task names.
It can be existing task(s) from tasks_ or new (only in ``for``).
:buildtype: Build type or list of build types.
It can be existing build type(s) from buildtypes_ or new (only in ``for``).
:platform: Name of a host platform/operating system or list of them.
Valid values are the same as for ``default`` in buildtypes_.
The ``if`` is a real python expression with some builtin functions.
You can use standard python operators as '(', ')', 'and', 'or', 'not', '==',
'!=' and 'in'. ZenMake supports a little set of extensions as well:
=========================== ==========================================================
Name Description
=========================== ==========================================================
true The same as python 'True'.
false The same as python 'False'.
startswith(str, prefix) Returns true if 'str' starts with the specified 'prefix'.
endswith(str, prefix) Returns true if 'str' ends with the specified 'suffix'.
=========================== ==========================================================
The attribute ``set`` has value of the :ref:`task parameters<buildconf-taskparams>`.
Other features:
- If some key parameter is not specified in ``for``/``not-for``/``if`` it means that
this is for all possible values of this kind of condition. For example
if it has no ``task`` it means 'for all existing tasks'.
Special word ``all`` (without any other parameters) can be used to indicate
that current item must be applied to all build tasks.
Empty dict (i.e. ``{}``) in ``for``/``not-for`` can be used for the same reason as well.
- Variable 'byfilter' overrides all matched values defined in
tasks_ and buildtypes_.
- Items in ``set`` with the same names and the same conditions in
``for``/``not-for``/``if`` override items defined before.
- If ``for``/``not-for`` and ``if`` are used for the same ``set`` then
result will be the intersection of resulting sets from ``for``/``not-for`` and ``if``.
- When ``set`` is empty or not defined it does nothing.
.. note::
ZenMake applies every item in the ``byfilter`` in the order as they were written.
It's possible to use ``byfilter`` without tasks_ and buildtypes_.
Example in YAML format:
.. code-block:: yaml
GCC_BASE_CXXFLAGS: -std=c++11 -fPIC
buildtypes:
debug-gcc : { cxxflags: $GCC_BASE_CXXFLAGS -O0 -g }
release-gcc : { cxxflags: $GCC_BASE_CXXFLAGS -O2 }
debug-clang : { cxxflags: $GCC_BASE_CXXFLAGS -O0 -g }
release-clang: { cxxflags: $GCC_BASE_CXXFLAGS -O2 }
debug-msvc : { cxxflags: /Od /EHsc }
release-msvc : { cxxflags: /O2 /EHsc }
default:
_: debug-gcc
linux: debug-gcc
darwin: debug-clang
windows: debug-msvc
byfilter:
- for: all
set: { includes: '.', rpath : '.', }
- for: { task: shlib shlibmain }
set: { features: cxxshlib }
- for: { buildtype: debug-gcc release-gcc, platform: linux }
set:
toolchain: g++
linkflags: -Wl,--as-needed
- for: { buildtype: release-gcc }
not-for : { platform : windows }
set: { cxxflags: -fPIC -O3 }
- for: { buildtype: [debug-clang, release-clang], platform: linux darwin }
set: { toolchain: clang++ }
- if: endswith(buildtype, '-gcc') and platform == 'linux'
set:
toolchain: g++
linkflags: -Wl,--as-needed
- if: buildtype == 'release-gcc' and platform == 'linux'
set:
cxxflags: $GCC_BASE_CXXFLAGS -O3
- if: endswith(buildtype, '-clang') and platform in ('linux', 'darwin')
set:
toolchain: clang++
- if: endswith(buildtype, '-msvc') and platform == 'windows'
set:
toolchain: msvc
.. note::
Parameters in this variable override corresponding parameters in tasks_
and in buildtypes_.
.. _buildconf-subdirs:
subdirs
"""""""
This variable controls including buildconf files from other sub directories
of the project.
- If it is list of paths then ZenMake will try to use this list as paths
to sub directories with the buildconf files and will use all found ones.
Paths can be absolute or relative to the :ref:`startdir<buildconf-startdir>`.
- If it is an empty list or just absent at all
then ZenMake will not try to use any
sub directories of the project to find buildconf files.
Example in Python format:
.. code-block:: python
subdirs = [
'libs/core',
'libs/engine',
'main',
]
Example in YAML format:
.. code-block:: yaml
subdirs:
- libs/core
- libs/engine
- main
See some details :ref:`here<dependencies-subdirs>`.
.. _buildconf-edeps:
edeps
""""""""""""
A `dict <buildconf-dict-def_>`_ with configurations of external non-system
dependencies. Each such a dependency has own unique name which can be used in
task parameter :ref:`use<buildconf-taskparams-use>`.
See full description of parameters :ref:`here<buildconf-edep-params>`.
Description of external dependencies is :ref:`here<dependencies-external>`.
.. note::
More examples of buildconf files can be found in repository
`here <repo_demo_projects_>`_.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/buildconf.rst | buildconf.rst |
.. include:: global.rst.inc
.. highlight:: console
.. _perftips:
Performance tips
================
Here are some tips which can help to improve performance of ZenMake in some cases.
Hash algorithm
"""""""""""""""""""""
By default ZenMake uses sha1 algorithm to control changes of config/built files
and for some other things. Modern CPUs often have support
for this algorithm and sha1 shows better or almost the same
performance as md5 in this cases. But in some other cases md5 can be
faster and you can switch to use this hash algorithm. However, don't expect a big
difference in performance of ZenMake.
It's recommended to check if it really has positive effect before using of md5.
To change hash algorithm you can use parameter ``hash-algo`` in buildconf
:ref:`general features<buildconf-general>`.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/perftips.rst | perftips.rst |
.. include:: global.rst.inc
.. highlight:: bash
.. _quickstart:
Quickstart guide
================
To use ZenMake you need :ref:`ZenMake<installation>` and
:ref:`buildconf<buildconf>` file in the root of your project.
Let's consider an example with this structure:
.. code-block:: shell
testproject
├── buildconf.yml
├── prog
│ └── test.cpp
└── shlib
├── util.cpp
└── util.h
For this project ``buildconf.yml`` can be like that:
.. code-block:: yaml
:linenos:
tasks:
util :
features : cxxshlib
source : 'shlib/**/*.cpp'
includes : '.'
program :
features : cxxprogram
source : 'prog/**/*.cpp'
includes : '.'
use : util
buildtypes:
debug :
toolchain : clang++
cxxflags : -O0 -g
release :
toolchain : g++
cxxflags : -O2
default : debug
===== =======================================================================
Lines Description
===== =======================================================================
1 Section with build tasks
2,6 Names of build tasks. By default they are used as target names.
Resulting target names will be adjusted depending on a platform.
For example, on Windows 'program' will result to 'program.exe'.
3 Mark build task as a C++ shared library.
4 Specify all \*.cpp files in the directory 'shlib' recursively.
5,9 Specify the path for C/C++ headers relative to the project root directory.
In this example, this parameter is optional as ZenMake adds the
project root directory itself. But it's an example.
7 Mark build task as a C++ executable.
8 Specify all \*.cpp files in the directory 'prog' recursively.
10 Specify task 'util' as dependency to task 'program'.
12 Section with build types.
13,16 Names of build types. They can be almost any.
14 Specify Clang C++ compiler for debug.
15 Specify C++ compiler flags for debug.
17 Specify g++ compiler (from GCC) for release.
18 Specify C++ compiler flags for release.
19 Special case: specify default build type that is used when no build
type was specified for ZenMake command.
===== =======================================================================
In case of using python config the file ``buildconf.py`` with the same values as above
would look like this:
.. code-block:: python
tasks = {
'util' : {
'features' : 'cxxshlib',
'source' : 'shlib/**/*.cpp',
'includes' : '.',
},
'program' : {
'features' : 'cxxprogram',
'source' : 'prog/**/*.cpp',
'includes' : '.',
'use' : 'util',
},
}
buildtypes = {
'debug' : {
'toolchain' : 'clang++',
'cxxflags' : '-O0 -g',
},
'release' : {
'toolchain' : 'g++',
'cxxflags' : '-O2',
},
'default' : 'debug',
}
Once you have the config, run ``zenmake`` in the root of the project and
ZenMake does the build:
.. code-block:: console
$ zenmake
* Project name: 'testproject'
* Build type: 'debug'
Setting top to : /tmp/testproject
Setting out to : /tmp/testproject/build
Checking for 'clang++' : /usr/lib/llvm/11/bin/clang++
[1/4] Compiling shlib/util.cpp
[2/4] Compiling prog/test.cpp
[3/4] Linking build/debug/libutil.so
[4/4] Linking build/debug/program
'build' finished successfully (0.531s)
Running ZenMake without any parameters in a directory with ``buildconf.py`` or
``buildconf.yml`` is the same as running ``zenmake build``. Otherwise it's
the same as ``zenmake help``.
Get the list of all commands with a short description using
``zenmake help`` or ``zenmake --help``. To get help on selected command you
can use ``zenmake help <selected command>`` or
``zenmake <selected command> --help``
For example to build ``release`` of the project above such a command can
be used:
.. code-block:: console
$ zenmake build -b release
* Project name: 'testproject'
* Build type: 'release'
Setting top to : /tmp/testproject
Setting out to : /tmp/testproject/build
Checking for 'g++' : /usr/bin/g++
[1/4] Compiling shlib/util.cpp
[2/4] Compiling prog/test.cpp
[3/4] Linking build/release/libutil.so
[4/4] Linking build/release/program
'build' finished successfully (0.498s)
Here is some possible variant of extended version of the config from above:
.. code-block:: yaml
:emphasize-lines: 6,22-25
tasks:
util :
features : cxxshlib
source : 'shlib/**/*.cpp'
includes : '.'
libs : boost_timer # <-- Add the boost timer library as dependency
program :
features : cxxprogram
source : 'prog/**/*.cpp'
includes : '.'
use : util
buildtypes:
debug :
toolchain : clang++
cxxflags : -O0 -g
release :
toolchain : g++
cxxflags : -O2
default : debug
configure:
- do: check-headers
names : cstdio iostream # <-- Check C++ 'cstdio' and 'iostream' headers
- do: check-libs # <-- Check all libraries from the 'libs' parameter
One of the effective and simple ways to learn something is to use
real examples. So it is recommended to look at examples in ``demos`` directory
which can be found in the repository `here <repo_demo_projects_>`_.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/quickstart.rst | quickstart.rst |
.. include:: global.rst.inc
.. highlight:: none
.. _why:
Why?
============
Short answer: because I could and wanted.
Long answer is below.
https://news.ycombinator.com/item?id=18789162
::
Cool. One more "new" build system...
Yes, I know, we already have a lot of them. I decided to create this project
because I couldn’t find a build tool for Linux which is quick and easy to use,
flexible, ready to use, with declarative configuration, without the need to learn one more
special language and suitable for my needs.
I know about lots of build systems and I have tried some of them.
Well, a little story of the project. In 2010 year I developed a build system in a
company where I was working that time. It was a build system based on Waf and it was
used successfully for linux projects several years. But that system had a lot of
internal problems and I wanted to remake it from scratch.
And in `2013 <https://bitbucket.org/pustotnik/zenmake.old/src/master/>`_
year I tried to begin a new project. But I had no time to develop it at that time.
Then, in 2019 year I decided to make some own opensorce project and was selecting
a build system for my project. I was considering only opensource cross-platform build
systems that can build C/C++ projects on GNU/Linux. Firstly I tried CMake, then
Meson and Waf. Also I was looking at some other build systems like Bazel.
Eventually, I concluded that I had to try to make my own build tool.
I would do it mostly for myself, but I would be glad if my tool was useful
for others.
..
All text below was hiding and TAB was added
Below there is very small comparison of ZenMake with some of existing popular
build systems. Remember, it's not complete technical comparison.
**CMake**
- ZenMake uses YAML and/or python language for build config files. CMake uses own language.
- ZenMake uses mostly declarative syntax. CMake uses imperative syntax.
- ZenMake can be used as embedded build system or as installed in an OS build system.
CMake must be installed in an OS.
- ZenMake supports gathering of source files with wildcards.
CMake doesn't recommend to use wildcards due to a problem::
We do not recommend using GLOB to collect a list of source files from your
source tree. If no CMakeLists.txt file changes when a source is added or
removed then the generated build system cannot know when to ask CMake to regenerate.
**Meson**
- ZenMake uses YAML and/or python language for build config files.
Meson uses some dialect of python language.
- ZenMake can be used as embedded build system or as installed in an OS build system.
Meson must be installed in an OS.
- ZenMake supports gathering of source files with wildcards.
Meson doesn't support wildcards for performance reasons:
https://mesonbuild.com/FAQ.html#why-cant-i-specify-target-files-with-a-wildcard
**Bazel**
- ZenMake uses YAML and/or python language for build config files.
Bazel uses some dialect of python language with name 'Starlark'.
- ZenMake can be used as embedded build system or as installed in an OS build system.
Bazel must be installed in an OS.
- Bazel is large build system and therefore it almost is not used for
opensorce projects. ZenMake is small and has minimum dependencies.
**Waf**
- ZenMake uses Waf as internal framework.
- ZenMake uses YAML and/or python language for build config files.
Waf uses python language.
- Waf is very flexible but build scripts for Waf are often not easy to create/use
if you don't know Waf.
Build configs for ZenMake are easier to create/use.
- ZenMake can be used as embedded build system or as installed in an OS build system.
Waf is not considered for installing in an OS by the author of Waf.
There are many other build systems like Make, Autotools, SCons, xmake, etc.
But I was lazy to make comparison for all existing build systems.
Anyway ZenMake is very young project and has no large number of features. | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/why.rst | why.rst |
.. include:: global.rst.inc
.. _toolkits:
Supported toolkits
===================
.. _toolkits_qt5:
Qt5
-----------
To build C++ project with Qt5 you can put ``qt5``
in :ref:`features<buildconf-taskparams-features>`.
In such tasks in the :ref:`source<buildconf-taskparams-source>` parameter
not only .cpp files but .qrc, .ui and .ts files can be specified as well.
There are additional task parameters for Qt5 tasks:
:ref:`moc<buildconf-taskparams-moc>`,
:ref:`rclangprefix<buildconf-taskparams-rclangprefix>`,
:ref:`langdir-defname<buildconf-taskparams-langdir-defname>`,
:ref:`bld-langprefix<buildconf-taskparams-bld-langprefix>`,
:ref:`unique-qmpaths<buildconf-taskparams-unique-qmpaths>`,
:ref:`install-langdir<buildconf-taskparams-install-langdir>`.
There are also several additional environment variables for Qt5 toolkit such as:
:ref:`QT5_BINDIR<envvars-qt5bindir>`,
:ref:`QT5_SEARCH_ROOT<envvars-qt5searchroot>`,
:ref:`QT5_LIBDIR<envvars-qt5libdir>` and some others.
ZenMake tries to find Qt5 with ``qmake`` and searches for it in
``QT5_SEARCH_ROOT`` and in the
system ``PATH`` environment variables.
You can use ``QT5_BINDIR`` to set directory path
with ``qmake`` in it.
The ``PATH`` and ``QT5_SEARCH_ROOT`` environment variables are ignored
in this case.
You can specify minimum/maximum version of Qt5 with the
:ref:`QT5_MIN_VER<envvars-qt5minver>` and :ref:`QT5_MAX_VER<envvars-qt5maxver>`
environment variables.
To specify needed Qt5 modules you should use the
:ref:`use<buildconf-taskparams-use>` parameter like this:
.. code-block:: yaml
use : QtWidgets QtDBus # original title case of Qt5 modules must be used
ZenMake always adds ``QtCore`` module to the ``use`` for tasks with ``qt5``
in :ref:`features<buildconf-taskparams-features>` because every
other Qt5 module depends on ``QtCore`` module.
So you don't need to specify ``QtCore`` to the ``use`` parameter.
Simple Qt5 task can be like that:
.. code-block:: yaml
tasks:
myqt5app:
features : cxxprogram qt5
source : prog/**/*.cpp prog/**/*.qrc prog/**/*.ui prog/**/*.ts
moc : prog/**/*.h
use : QtWidgets
Also it is recommended to look at examples in the ``qt5`` directory
in the repository `here <repo_demo_projects_>`_.
| zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/docs/toolkits.rst | toolkits.rst |
# pylint: disable = invalid-name, missing-function-docstring
"""
Copyright (c) 2020, Alexander Magola
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
import subprocess
import tempfile
import shutil
import atexit
import mimetypes
import requests
import keyring
ZMDIR = os.path.dirname(os.path.abspath(__file__))
ZMDIR = os.path.abspath(os.path.join(ZMDIR, os.path.pardir))
os.chdir(ZMDIR)
REPO_OWNER = 'pustotnik'
REPO_NAME = 'zenmake'
REPO_RELEASES_URL = 'https://api.github.com/repos/{0}/{1}/releases'.format(REPO_OWNER, REPO_NAME)
ZENMAKE_BIN_DIR = os.path.join(ZMDIR, 'src', 'zenmake')
PYTHON_EXE = sys.executable
TMP_DIR = tempfile.mkdtemp()
sys.path.append(ZENMAKE_BIN_DIR)
@atexit.register
def _removeTmpDir():
shutil.rmtree(TMP_DIR)
def prepareAssets():
files = []
cmd = [PYTHON_EXE, ZENMAKE_BIN_DIR, 'zipapp', '--destdir', TMP_DIR]
devnull = open(os.devnull, 'w')
subprocess.call(cmd, stdout = devnull)
from zm.zipapp import ZIPAPP_NAME
files.append(os.path.join(TMP_DIR, ZIPAPP_NAME))
return files
def makeAuthHeader(token):
return { 'Authorization': 'token {0}'.format(token) }
def makeRelease(tag, token):
ver = tag[1:] if tag[0] == 'v' else tag
response = requests.post(
REPO_RELEASES_URL,
json = {
'tag_name': tag,
#'target_commitish': 'master',
'name': tag,
'body': 'version {0}'.format(ver),
'prerelease': False,
'draft': False,
},
headers = makeAuthHeader(token)
)
return response
def getRelease(tag, token):
url = '{0}/tags/{1}'.format(REPO_RELEASES_URL, tag)
response = requests.get(url, headers = makeAuthHeader(token))
return response
def getReleaseAssets(releaseId, token):
url = '{0}/{1}/assets'.format(REPO_RELEASES_URL, releaseId)
response = requests.get(url, headers = makeAuthHeader(token))
response.raise_for_status()
return response
def deleteReleaseAsset(assetId, token):
url = '{0}/assets/{1}'.format(REPO_RELEASES_URL, assetId)
response = requests.delete(url, headers = makeAuthHeader(token))
return response
def uploadReleaseAsset(path, uploadUrl, token):
contentType, _ = mimetypes.guess_type(path)
if contentType is None:
contentType = 'application/octet-stream'
headers = makeAuthHeader(token)
headers['Content-Type'] = contentType
params = { 'name': os.path.basename(path) }
with open(path, "rb") as file:
response = requests.post(
uploadUrl,
data = file,
params = params,
headers = headers
)
response.raise_for_status()
return response
def obtainRelease(tag, token):
response = makeRelease(tag, token)
responseJson = response.json()
errors = responseJson.get('errors', [])
created = not any(err.get('code') == 'already_exists' for err in errors)
if created:
response.raise_for_status()
else:
response = getRelease(tag, token)
response.raise_for_status()
responseJson = response.json()
return responseJson
def uploadAssets(releaseJson, files, token):
releaseId = releaseJson['id']
assets = getReleaseAssets(releaseId, token).json()
for asset in assets:
deleteReleaseAsset(asset['id'], token)
uploadUrl = releaseJson['upload_url']
uploadUrl = uploadUrl[0:uploadUrl.index('{')]
for file in files:
path = os.path.join(ZMDIR, file)
uploadReleaseAsset(path, uploadUrl, token)
def main():
""" do main work """
cmdArgs = sys.argv[1:]
if not cmdArgs:
print("There is no tag version in args")
return 1
tagVer = cmdArgs[0]
if tagVer[0] != 'v':
tagVer = 'v' + tagVer
#print("Publishing release {0} to github ...".format(tagVer))
files = prepareAssets()
#keyring.set_password("github-zenmake", "deploy-token", "token value")
token = keyring.get_password("github-zenmake", "deploy-token")
releaseJson = obtainRelease(tagVer, token)
uploadAssets(releaseJson, files, token)
return 0
if __name__ == '__main__':
sys.exit(main()) | zenmake | /zenmake-0.11.0.tar.gz/zenmake-0.11.0/scripts/publish-github-release.py | publish-github-release.py |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[[email protected]](mailto:[email protected]).
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
at [https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/inclusion
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations | zenml | /zenml-0.44.1.tar.gz/zenml-0.44.1/CODE-OF-CONDUCT.md | CODE-OF-CONDUCT.md |
<!-- PROJECT SHIELDS -->
<!--
*** I'm using markdown "reference style" links for readability.
*** Reference links are enclosed in brackets [ ] instead of parentheses ( ).
*** See the bottom of this document for the declaration of the reference variables
*** for contributors-url, forks-url, etc. This is an optional, concise syntax you may use.
*** https://www.markdownguide.org/basic-syntax/#reference-style-links
-->
[![PyPi][pypi-shield]][pypi-url]
[![PyPi][pypiversion-shield]][pypi-url]
[![PyPi][downloads-shield]][downloads-url]
[![Contributors][contributors-shield]][contributors-url]
[![License][license-shield]][license-url]
<!-- [![Build][build-shield]][build-url] -->
<!-- [![CodeCov][codecov-shield]][codecov-url] -->
<!-- MARKDOWN LINKS & IMAGES -->
<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->
[pypi-shield]: https://img.shields.io/pypi/pyversions/zenml?style=for-the-badge
[pypi-url]: https://pypi.org/project/zenml/
[pypiversion-shield]: https://img.shields.io/pypi/v/zenml?style=for-the-badge
[downloads-shield]: https://img.shields.io/pypi/dm/zenml?style=for-the-badge
[downloads-url]: https://pypi.org/project/zenml/
[codecov-shield]: https://img.shields.io/codecov/c/gh/zenml-io/zenml?style=for-the-badge
[codecov-url]: https://codecov.io/gh/zenml-io/zenml
[contributors-shield]: https://img.shields.io/github/contributors/zenml-io/zenml?style=for-the-badge
[contributors-url]: https://github.com/othneildrew/Best-README-Template/graphs/contributors
[license-shield]: https://img.shields.io/github/license/zenml-io/zenml?style=for-the-badge
[license-url]: https://github.com/zenml-io/zenml/blob/main/LICENSE
[linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-url]: https://www.linkedin.com/company/zenml/
[twitter-shield]: https://img.shields.io/twitter/follow/zenml_io?style=for-the-badge
[twitter-url]: https://twitter.com/zenml_io
[slack-shield]: https://img.shields.io/badge/-Slack-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[slack-url]: https://zenml.io/slack-invite
[build-shield]: https://img.shields.io/github/workflow/status/zenml-io/zenml/Build,%20Lint,%20Unit%20&%20Integration%20Test/develop?logo=github&style=for-the-badge
[build-url]: https://github.com/zenml-io/zenml/actions/workflows/ci.yml
<!-- PROJECT LOGO -->
<br />
<div align="center">
<a href="https://zenml.io">
<img alt="ZenML Logo" src="https://user-images.githubusercontent.com/3348134/223112746-345126ff-a0e8-479f-8ac0-670d78f71712.png" alt="Logo" width="400">
</a>
<h3 align="center">Build portable, production-ready MLOps pipelines.</h3>
<p align="center">
A simple yet powerful open-source framework that integrates all your ML tools.
<br />
<a href="https://docs.zenml.io/"><strong>Explore the docs »</strong></a>
<br />
<div align="center">
Join our <a href="https://zenml.io/slack-invite" target="_blank">
<img width="25" src="https://cdn3.iconfinder.com/data/icons/logos-and-brands-adobe/512/306_Slack-512.png" alt="Slack"/>
<b>Slack Community</b> </a> and be part of the ZenML family.
</div>
<br />
<a href="https://zenml.io/features">Features</a>
·
<a href="https://zenml.io/roadmap">Roadmap</a>
·
<a href="https://github.com/zenml-io/zenml/issues">Report Bug</a>
·
<a href="https://zenml.io/discussion">Vote New Features</a>
·
<a href="https://blog.zenml.io/">Read Blog</a>
·
<a href="#-meet-the-team">Meet the Team</a>
<br />
<b>ZenML Cloud</b> is now available in beta. <a href="https://cloud.zenml.io">Sign up</a> to see it in action.
<br />
🎉 Version 0.44.1 is out. Check out the release notes
<a href="https://github.com/zenml-io/zenml/releases">here</a>.
<br />
<a href="https://www.linkedin.com/company/zenml/">
<img src="https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555" alt="Logo">
</a>
<a href="https://twitter.com/zenml_io">
<img src="https://img.shields.io/badge/-Twitter-black.svg?style=for-the-badge&logo=twitter&colorB=555" alt="Logo">
</a>
<a href="https://www.youtube.com/c/ZenML">
<img src="https://img.shields.io/badge/-YouTube-black.svg?style=for-the-badge&logo=youtube&colorB=555" alt="Logo">
</a>
</p>
</div>
<!-- TABLE OF CONTENTS -->
<details>
<summary>🏁 Table of Contents</summary>
<ol>
<li><a href="#-introduction">Introduction</a></li>
<li><a href="#-quickstart">Quickstart</a></li>
<li>
<a href="#-create-your-own-mlops-platform">Create your own MLOps Platform</a>
<ul>
<li><a href="##-1-deploy-zenml">Deploy ZenML</a></li>
<li><a href="#-2-deploy-stack-components">Deploy Stack Components</a></li>
<li><a href="#-3-create-a-pipeline">Create a Pipeline</a></li>
<li><a href="#-4-start-the-dashboard">Start the Dashboard</a></li>
</ul>
</li>
<li><a href="#-roadmap">Roadmap</a></li>
<li><a href="#-contributing-and-community">Contributing and Community</a></li>
<li><a href="#-getting-help">Getting Help</a></li>
<li><a href="#-license">License</a></li>
</ol>
</details>
<br />
# 🤖 Introduction
🤹 ZenML is an extensible, open-source MLOps framework for creating portable,
production-ready machine learning pipelines. By decoupling infrastructure from
code, ZenML enables developers across your organization to collaborate more
effectively as they develop to production.
- 💼 ZenML gives data scientists the freedom to fully focus on modeling and
experimentation while writing code that is production-ready from the get-go.
- 👨💻 ZenML empowers ML engineers to take ownership of the entire ML lifecycle
end-to-end. Adopting ZenML means fewer handover points and more visibility on
what is happening in your organization.
- 🛫 ZenML enables MLOps infrastructure experts to define, deploy, and manage
sophisticated production environments that are easy to use for colleagues.

ZenML provides a user-friendly syntax designed for ML workflows, compatible with
any cloud or tool. It enables centralized pipeline management, enabling
developers to write code once and effortlessly deploy it to various
infrastructures.
<div align="center">
<img src="docs/book/.gitbook/assets/stack.gif">
</div>
# 🤸 Quickstart
[Install ZenML](https://docs.zenml.io/getting-started/installation) via
[PyPI](https://pypi.org/project/zenml/). Python 3.8 - 3.11 is required:
```bash
pip install "zenml[server]"
```
Take a tour with the guided quickstart by running:
```bash
zenml go
```
# 🖼️ Create your own MLOps Platform
ZenML allows you to create and manage your own MLOps platform using
best-in-class open-source and cloud-based technologies. Here is an example of
how you could set this up for your team:
## 🔋 1. Deploy ZenML
For full functionality ZenML should be deployed on the cloud to
enable collaborative features as the central MLOps interface for teams.

Currently, there are two main options to deploy ZenML:
- **ZenML Cloud**: With [ZenML Cloud](https://docs.zenml.io/deploying-zenml/zenml-cloud),
you can utilize a control plane to create ZenML servers, also known as tenants.
These tenants are managed and maintained by ZenML's dedicated team, alleviating
the burden of server management from your end.
- **Self-hosted deployment**: Alternatively, you have the flexibility to [deploy
ZenML on your own self-hosted environment](https://docs.zenml.io/deploying-zenml/zenml-self-hosted).
This can be achieved through various methods, including using our CLI, Docker,
Helm, or HuggingFace Spaces.
## 👨🍳 2. Deploy Stack Components
ZenML boasts a ton of [integrations](https://zenml.io/integrations) into
popular MLOps tools. The [ZenML Stack](https://docs.zenml.io/user-guide/starter-guide/understand-stacks)
concept ensures that these tools work nicely together, therefore bringing
structure and standardization into the MLOps workflow.
Deploying and configuring this is super easy with ZenML. For **AWS**, this might
look a bit like this
```bash
# Deploy and register an orchestrator and an artifact store
zenml orchestrator deploy kubernetes_orchestrator --flavor kubernetes --cloud aws
zenml artifact-store deploy s3_artifact_store --flavor s3
# Register this combination of components as a stack
zenml stack register production_stack --orchestrator kubernetes_orchestrator --artifact-store s3_artifact_store --set # Register your production environment
```
When you run a pipeline with this stack set, it will be running on your deployed
Kubernetes cluster.
You can also [deploy your own tooling manually](https://docs.zenml.io/stacks-and-components/stack-deployment).
## 🏇 3. Create a Pipeline
Here's an example of a hello world ZenML pipeline in code:
```python
# run.py
from zenml import pipeline, step
@step
def step_1() -> str:
"""Returns the `world` substring."""
return "world"
@step
def step_2(input_one: str, input_two: str) -> None:
"""Combines the two strings at its input and prints them."""
combined_str = input_one + ' ' + input_two
print(combined_str)
@pipeline
def my_pipeline():
output_step_one = step_1()
step_2(input_one="hello", input_two=output_step_one)
if __name__ == "__main__":
my_pipeline()
```
```bash
python run.py
```
## 👭 4. Start the Dashboard
Open up the ZenML dashboard using this command.
```bash
zenml show
```

# 🗺 Roadmap
ZenML is being built in public. The [roadmap](https://zenml.io/roadmap) is a
regularly updated source of truth for the ZenML community to understand where
the product is going in the short, medium, and long term.
ZenML is managed by a [core team](https://zenml.io/company#CompanyTeam) of
developers that are responsible for making key decisions and incorporating
feedback from the community. The team oversees feedback via various channels,
and you can directly influence the roadmap as follows:
- Vote on your most wanted feature on our [Discussion
board](https://zenml.io/discussion).
- Start a thread in our [Slack channel](https://zenml.io/slack-invite).
- [Create an issue](https://github.com/zenml-io/zenml/issues/new/choose) on our
GitHub repo.
# 🙌 Contributing and Community
We would love to develop ZenML together with our community! Best way to get
started is to select any issue from the [`good-first-issue`
label](https://github.com/zenml-io/zenml/labels/good%20first%20issue). If you
would like to contribute, please review our [Contributing
Guide](CONTRIBUTING.md) for all relevant details.
# 🆘 Getting Help
The first point of call should
be [our Slack group](https://zenml.io/slack-invite/).
Ask your questions about bugs or specific use cases, and someone from
the [core team](https://zenml.io/company#CompanyTeam) will respond.
Or, if you
prefer, [open an issue](https://github.com/zenml-io/zenml/issues/new/choose) on
our GitHub repo.
# 📜 License
ZenML is distributed under the terms of the Apache License Version 2.0.
A complete version of the license is available in the [LICENSE](LICENSE) file in
this repository. Any contribution made to this project will be licensed under
the Apache License Version 2.0.
| zenml | /zenml-0.44.1.tar.gz/zenml-0.44.1/README.md | README.md |
# 🧑💻 Contributing to ZenML
A big welcome and thank you for considering contributing to ZenML! It’s people
like you that make it a reality for users
in our community.
Reading and following these guidelines will help us make the contribution
process easy and effective for everyone
involved. It also communicates that you agree to respect the developers' time
management and develop these open-source projects. In return, we will reciprocate that respect by reading your
issue, assessing changes, and helping
you finalize your pull requests.
## ⚡️ Quicklinks
- [🧑💻 Contributing to ZenML](#-contributing-to-zenml)
- [⚡️ Quicklinks](#-quicklinks)
- [🧑⚖️ Code of Conduct](#-code-of-conduct)
- [🛫 Getting Started](#-getting-started)
- [⁉️ Issues](#-issues)
- [🏷 Pull Requests: When to make one](#-pull-requests-when-to-make-one)
- [💯 Pull Requests: Workflow to Contribute](#-pull-requests-workflow-to-contribute)
- [🧐 Linting, formatting, and tests](#-linting-formatting-and-tests)
- [🚨 Reporting a Vulnerability](#-reporting-a-vulnerability)
- [Coding Conventions](#coding-conventions)
- [👷 Creating a new Integration](#-creating-a-new-integration)
- [🆘 Getting Help](#-getting-help)
## 🧑⚖️ Code of Conduct
We take our open-source community seriously and hold ourselves and other
contributors to high standards of communication.
By participating and contributing to this project, you agree to uphold
our [Code of Conduct](https://github.com/zenml-io/zenml/blob/master/CODE-OF-CONDUCT.md)
.
## 🛫 Getting Started
Contributions are made to this repo via Issues and Pull Requests (PRs). A few
general guidelines that cover both:
- To report security vulnerabilities, please get in touch
at [[email protected]](mailto:[email protected]), monitored by
our security team.
- Search for existing Issues and PRs before creating your own.
- We work hard to make sure issues are handled on time, but it could take a
while to investigate the root cause depending on the impact.
A friendly ping in the comment thread to the submitter or a contributor can help
draw attention if your issue is blocking.
The best way to start is to check the
[`good-first-issue`](https://github.com/zenml-io/zenml/labels/good%20first%20issue)
label on the issue board. The core team creates these issues as necessary
smaller tasks that you can work on to get deeper into ZenML internals. These
should generally require relatively simple changes, probably affecting just one
or two files which we think are ideal for people new to ZenML.
The next step after that would be to look at the
[`good-second-issue`](https://github.com/zenml-io/zenml/labels/good%20second%20issue)
label on the issue board. These are a bit more complex, might involve more
files, but should still be well-defined and achievable to people relatively new
to ZenML.
### ⁉️ Issues
Issues should be used to report problems with the library, request a new
feature, or to discuss potential changes before
a PR is created. When you create a new Issue, a template will be loaded that
will guide you through collecting and
providing the information we need to investigate.
If you find an Issue that addresses your problem, please add your own
reproduction information to the
existing issue rather than creating a new one. Adding
a [reaction](https://github.blog/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/)
can also help by
indicating to our maintainers that a particular issue is affecting more than
just the reporter.
### 🏷 Pull Requests: When to make one
Pull Requests (PRs) to ZenML are always welcome and can be a quick way to get your fix or
improvement slated for the next release. In
general, PRs should:
- Only fix/add the functionality in question **OR** address widespread
whitespace/style issues, not both.
- Add unit or integration tests for fixed or changed functionality (if a test
suite already exists).
- Address a single concern in the least number of changed lines as possible.
- Include documentation in the repo or in your Pull Request.
- Be accompanied by a filled-out Pull Request template (loaded automatically when
a PR is created).
For changes that address core functionality or would require breaking changes (e.g. a major release), it's best to open
an Issue to discuss your proposal first. This is not required but can save time
creating and reviewing changes.
### 💯 Pull Requests: Workflow to Contribute
<p class="callout warning">Please note that development in ZenML happens off of the <b>develop</b> branch, <b>not main</b>,
which is the default branch on GitHub. Therefore, please pay particular attention to step 5 and step 9 below. </p>
In general, we follow
the ["fork-and-pull" Git workflow](https://github.com/susam/gitpr)
1. Review and sign
the [Contributor License Agreement](https://cla-assistant.io/zenml-io/zenml) (
CLA).
2. Fork the repository to your own Github account.
3. Clone the project to your machine.
4. Checkout the **develop** branch <- `git checkout develop`.
5. Create a branch (again, off of the develop branch) locally with a succinct but descriptive name.
6. Commit changes to the branch
7. Follow the `Linting, formatting, and tests` guide to make sure your code adheres to the ZenML coding style (see below).
8. Push changes to your fork.
9. Open a PR in our repository (to the `develop` branch, **NOT** `main`) and
follow the PR template so that we can efficiently review the changes.
### 🧐 Linting, formatting, and tests
To install ZenML from your local checked out files including all core dev-dependencies, run:
```
pip install -e ".[server,dev]"
```
Optionally, you might want to run the following commands to ensure you have all
integrations for `mypy` checks:
```
zenml integration install -y -i feast
pip install click~=8.0.3
mypy --install-types
```
Warning: This might take a while for both (~ 15 minutes each, depending on your machine), however if you have
time, please run it as it will make the
next commands error-free.
You can now run the following scripts to automatically format your
code and to check whether the code formatting, linting, docstrings, and
spelling is in order:
```
bash scripts/format.sh
bash scripts/run-ci-checks.sh
```
Tests can be run as follows:
```
bash scripts/test-coverage-xml.sh
```
Please note that it is good practice to run the above commands before submitting
any Pull Request: The CI GitHub Action
will run it anyway, so you might as well catch the errors locally!
### 🚨 Reporting a Vulnerability
If you think you have found a vulnerability, and even if you are not sure about it,
please report it right away by sending an
email to: [email protected]. Please try to be as explicit as possible,
describing all the steps and example code to
reproduce the security issue.
We will review it thoroughly and get back to you.
Please refrain from publicly discussing a potential security vulnerability as
this could potentially put our users at
risk! It's better to discuss privately and give us a chance to find a solution
first, to limit the potential impact
as much as possible.
## Coding Conventions
The code within the repository is structured in the following way -
the most relevant places for contributors are highlighted with a `<-` arrow:
```
├── .github -- Definition of the GH action workflows
├── docker -- Dockerfiles used to build ZenML docker images
├── docs <- The ZenML docs, CLI docs and API docs live here
│ ├── book <- In case you make user facing changes, update docs here
│ └── mkdocs -- Some configurations for the API/CLI docs
├── examples <- When adding an integration, add an example here
├── scripts -- Scripts used by Github Actions or for local linting/testing
├── src/zenml <- The heart of ZenML
│ ├── <stack_component> <- Each stack component has its own directory
│ ├── cli <- Change and improve the CLI here
│ ├── config -- The ZenML config methods live here
│ ├── integrations <- Add new integrations here
│ ├── io -- File operation implementations
│ ├── materializers <- Materializers responsible for reading/writing artifacts
│ ├── pipelines <- The base pipeline and its decorator
│ ├── services -- Code responsible for managing services
│ ├── stack <- Stack, Stack Components and the flavor registry
│ ├── steps <- Steps and their decorators are defined here
│ ├── utils <- Collection on useful utils
│ ├── zen_server -- Code for running the Zen Server
│ └── zen_stores -- Code for storing stacks in multiple settings
└── test <- Don't forget to write unit tests for your code
```
## 👷 Creating a new Integration
In case you want to create an entirely new integration that you would like to
see supported by ZenML there are a few steps that you should follow:
1. Create the actual integration. Check out the
[Integrations README](src/zenml/integrations/README.md)
for detailed step-by-step instructions.
2. Create an example of how to use the integration. Check out the
[Examples README](examples/README.md)
to find out what to do.
3. All integrations deserve to be documented. Make sure to pay a visit to the
[Component Guide](https://docs.zenml.io/stacks-and-components/component-guide)
in the docs and add your implementations.
## 🆘 Getting Help
Join us in the [ZenML Slack Community](https://zenml.io/slack-invite/) to
interact directly with the core team and community at large. This is a good
place to ideate, discuss concepts or ask for help.
| zenml | /zenml-0.44.1.tar.gz/zenml-0.44.1/CONTRIBUTING.md | CONTRIBUTING.md |
# Fiduciary License Agreement 2.0
based on the
## Individual Contributor Exclusive License Agreement
(including the Traditional Patent License OPTION)
Thank you for your interest in contributing to ZenML by ZenML GmbH ("We" or "Us").
The purpose of this contributor agreement ("Agreement") is to clarify and document the rights granted by contributors to Us. To make this document effective, please follow the instructions at https://zenml.io/cla/.
### 0. Preamble
Software is deeply embedded in all aspects of our lives and it is important that it empower, rather than restrict us. Free Software gives everybody the rights to use, understand, adapt and share software. These rights help support other fundamental freedoms like freedom of speech, press and privacy.
Development of Free Software can follow many patterns. In some cases whole development is handled by a sole programmer or a small group of people. But usually, the creation and maintenance of software is a complex process that requires the contribution of many individuals. This also affects who owns the rights to the software. In the latter case, rights in software are owned jointly by a great number of individuals.
To tackle this issue some projects require a full copyright assignment to be signed by all contributors. The problem with such assignments is that they often lack checks and balances that would protect the contributors from potential abuse of power from the new copyright holder.
FSFE’s Fiduciary License Agreement (FLA) was created by the Free Software Foundation Europe e.V. with just that in mind – to concentrate all deciding power within one entity and prevent fragmentation of rights on one hand, while on the other preventing that single entity from abusing its power. The main aim is to ensure that the software covered under the FLA will forever remain Free Software.
This process only serves for the transfer of economic rights. So-called moral rights (e.g. authors right to be identified as author) remain with the original author(s) and are inalienable.
How to use this FLA
If You are an employee and have created the Contribution as part of your employment, You need to have Your employer approve this Agreement or sign the Entity version of this document. If You do not own the Copyright in the entire work of authorship, any other author of the Contribution should also sign this – in any event, please contact Us at [email protected]
### 1. Definitions
"You" means the individual Copyright owner who Submits a Contribution to Us.
"Contribution" means any original work of authorship, including any original modifications or additions to an existing work of authorship, Submitted by You to Us, in which You own the Copyright.
"Copyright" means all rights protecting works of authorship, including copyright, moral and neighboring rights, as appropriate, for the full term of their existence.
"Material" means the software or documentation made available by Us to third parties. When this Agreement covers more than one software project, the Material means the software or documentation to which the Contribution was Submitted. After You Submit the Contribution, it may be included in the Material.
"Submit" means any act by which a Contribution is transferred to Us by You by means of tangible or intangible media, including but not limited to electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, Us, but excluding any transfer that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
"Documentation" means any non-software portion of a Contribution.
### 2. License grant
#### 2.1 Copyright license to Us
Subject to the terms and conditions of this Agreement, You hereby grant to Us a worldwide, royalty-free, exclusive, perpetual and irrevocable (except as stated in Section 8.2) license, with the right to transfer an unlimited number of non-exclusive licenses or to grant sublicenses to third parties, under the Copyright covering the Contribution to use the Contribution by all means, including, but not limited to:
publish the Contribution,
modify the Contribution,
prepare derivative works based upon or containing the Contribution and/or to combine the Contribution with other Materials,
reproduce the Contribution in original or modified form,
distribute, to make the Contribution available to the public, display and publicly perform the Contribution in original or modified form.
#### 2.2 Moral rights
Moral Rights remain unaffected to the extent they are recognized and not waivable by applicable law. Notwithstanding, You may add your name to the attribution mechanism customary used in the Materials you Contribute to, such as the header of the source code files of Your Contribution, and We will respect this attribution when using Your Contribution.
#### 2.3 Copyright license back to You
Upon such grant of rights to Us, We immediately grant to You a worldwide, royalty-free, non-exclusive, perpetual and irrevocable license, with the right to transfer an unlimited number of non-exclusive licenses or to grant sublicenses to third parties, under the Copyright covering the Contribution to use the Contribution by all means, including, but not limited to:
publish the Contribution,
modify the Contribution,
prepare derivative works based upon or containing the Contribution and/or to combine the Contribution with other Materials,
reproduce the Contribution in original or modified form,
distribute, to make the Contribution available to the public, display and publicly perform the Contribution in original or modified form.
This license back is limited to the Contribution and does not provide any rights to the Material.
### 3. Patents
#### 3.1 Patent license
Subject to the terms and conditions of this Agreement You hereby grant to Us and to recipients of Materials distributed by Us a worldwide, royalty-free, non-exclusive, perpetual and irrevocable (except as stated in Section 3.2) patent license, with the right to transfer an unlimited number of non-exclusive licenses or to grant sublicenses to third parties, to make, have made, use, sell, offer for sale, import and otherwise transfer the Contribution and the Contribution in combination with any Material (and portions of such combination). This license applies to all patents owned or controlled by You, whether already acquired or hereafter acquired, that would be infringed by making, having made, using, selling, offering for sale, importing or otherwise transferring of Your Contribution(s) alone or by combination of Your Contribution(s) with any Material.
#### 3.2 Revocation of patent license
You reserve the right to revoke the patent license stated in section 3.1 if We make any infringement claim that is targeted at your Contribution and not asserted for a Defensive Purpose. An assertion of claims of the Patents shall be considered for a "Defensive Purpose" if the claims are asserted against an entity that has filed, maintained, threatened, or voluntarily participated in a patent infringement lawsuit against Us or any of Our licensees.
### 4. License obligations by Us
We agree to (sub)license the Contribution or any Materials containing, based on or derived from your Contribution under the terms of any licenses the Free Software Foundation classifies as Free Software License and which are approved by the Open Source Initiative as Open Source licenses.
More specifically and in strict accordance with the above paragraph, we agree to (sub)license the Contribution or any Materials containing, based on or derived from the Contribution only under the terms of the following license(s) Apache-2.0 (including any right to adopt any future version of a license if permitted).
We agree to license patents owned or controlled by You only to the extent necessary to (sub)license Your Contribution(s) and the combination of Your Contribution(s) with the Material under the terms of any licenses the Free Software Foundation classifies as Free Software licenses and which are approved by the Open Source Initiative as Open Source licenses..
### 5. Disclaimer
THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF SATISFACTORY QUALITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE EXPRESSLY DISCLAIMED BY YOU TO US AND BY US TO YOU. TO THE EXTENT THAT ANY SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION AND EXTENT TO THE MINIMUM PERIOD AND EXTENT PERMITTED BY LAW.
### 6. Consequential damage waiver
TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU OR WE BE LIABLE FOR ANY LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF DATA, INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL AND EXEMPLARY DAMAGES ARISING OUT OF THIS AGREEMENT REGARDLESS OF THE LEGAL OR EQUITABLE THEORY (CONTRACT, TORT OR OTHERWISE) UPON WHICH THE CLAIM IS BASED.
### 7. Approximation of disclaimer and damage waiver
IF THE DISCLAIMER AND DAMAGE WAIVER MENTIONED IN SECTION 5. AND SECTION 6. CANNOT BE GIVEN LEGAL EFFECT UNDER APPLICABLE LOCAL LAW, REVIEWING COURTS SHALL APPLY LOCAL LAW THAT MOST CLOSELY APPROXIMATES AN ABSOLUTE WAIVER OF ALL CIVIL OR CONTRACTUAL LIABILITY IN CONNECTION WITH THE CONTRIBUTION.
### 8. Term
#### 8.1 This Agreement shall come into effect upon Your acceptance of the terms and conditions.
#### 8.2 This Agreement shall apply for the term of the copyright and patents licensed here. However, You shall have the right to terminate the Agreement if We do not fulfill the obligations as set forth in Section 4. Such termination must be made in writing.
#### 8.3 In the event of a termination of this Agreement Sections 5., 6., 7., 8., and 9. shall survive such termination and shall remain in full force thereafter. For the avoidance of doubt, Free and Open Source Software (sub)licenses that have already been granted for Contributions at the date of the termination shall remain in full force after the termination of this Agreement.
### 9. Miscellaneous
#### 9.1 This Agreement and all disputes, claims, actions, suits or other proceedings arising out of this agreement or relating in any way to it shall be governed by the laws of Germany excluding its private international law provisions.
#### 9.2 This Agreement sets out the entire agreement between You and Us for Your Contributions to Us and overrides all other agreements or understandings.
#### 9.3 In case of Your death, this agreement shall continue with Your heirs. In case of more than one heir, all heirs must exercise their rights through a commonly authorized person.
#### 9.4 If any provision of this Agreement is found void and unenforceable, such provision will be replaced to the extent possible with a provision that comes closest to the meaning of the original provision and that is enforceable. The terms and conditions set forth in this Agreement shall apply notwithstanding any failure of essential purpose of this Agreement or any limited remedy to the maximum extent possible under law.
#### 9.5 You agree to notify Us of any facts or circumstances of which you become aware that would make this Agreement inaccurate in any respect.
**You**
Date:_______________________________
Name:_______________________________
Title:______________________________
Address:____________________________
**Us**
Date:_______________________________
Name:_______________________________
Title:_______________________________
Address:_______________________________
| zenml | /zenml-0.44.1.tar.gz/zenml-0.44.1/CLA.md | CLA.md |
# 📜 ZenNews: Generate summarized news on a schedule
In today's information age, we are bombarded with a constant stream of news
and media from a variety of sources. Summarizing tasks, particularly when it
comes to news sources, can be a powerful tool for the efficient consumption of
information. They distill complex or lengthy content into easily
digestible chunks that can be scanned and absorbed quickly, allowing us to
keep up with the news without being overwhelmed. They can also help us separate
the signal from the noise, highlighting the most important details and helping
us identify what's worth further investigation.
This is where **ZenNews** come into play. It offers a tool that can
automate the summarization process and save users time and effort while
providing them with the information they need. This can be particularly valuable
for busy professionals or anyone who wants to keep up with the news but doesn't
have the time to read every article in full.
# 🎯 The goal of the project
The definition of the concrete use case aside, this project aims to showcase
some of the advantages that ZenML brings to the table. Some major points we
would like to highlight include:
- **The ease of use**: ZenML features [a simple and clean Python
SDK](https://docs.zenml.io/starter-guide/pipelines). As you can
observe in this example, it is not only used to define your steps and
pipelines but also to access/manage the resources and artifacts that you
interact with along the way. This makes it significantly easier for you
to build their applications around ZenML.
- **The extensibility**: ZenML is an extensible framework. ML projects often
require custom-tailored solutions and what you get out of the box may not be
what you need. This is why ZenML is using base abstractions to allow you
to create your own solutions without reinventing the whole wheel. You can find
great examples of this feature by taking a look at the custom materializer
([ArticleMaterializer](src/zennews/materializers/article_materializer.py))
and the custom stack component
([DiscordAlerter](src/zennews/alerter/discord_alerter.py))
implemented within the context of this project.
- **Stack vs Code**: One of the main features of ZenML is rooted within the
concept of our stacks. As you follow this example, you will see that there is a
complete separation between the code and the infrastructure that the pipeline
is running on. In fact, by utilizing just one flag, it is possible to switch
from a local default stack to a remote deployment with scheduled pipelines.
- **The scalability**: This is a small PoC-like example that aims to prove
that ZenML can help you streamline your workflows and accelerate your
development process. However, it barely scratches the surface of how you can
improve it even further. For more information, check
[this section](#-limitations-future-improvements-and-upcoming-changes).
# 🐍 Base installation
The **ZenNews** project is designed as a
[PyPI package](https://pypi.org/project/zennews/)
that you can install through `pip`:
```bash
pip install zennews
```
The package comes equipped with the following set of **key** pieces:
- **The pipeline**: The [`zen_news_pipeline`](src/zennews/pipelines/zen_news_pipeline.py)
is the main pipeline in this workflow. In total, it features three steps,
namely `collect`, `summarize` and `report`. The first step is responsible
for collecting articles, the second step summarizes them, and the last step
creates a report and posts it.
- **The steps**: There is a concrete implementation for each step defined above.
- For the `collect` step, we have the [`bbc_news_source`](src/zennews/steps/sources/bbc.py)
which (by default) collects the top stories from the BBC news feed and
prepares [`Article`](src/zennews/models/article.py) objects.
- For the `summarize` step, we have implemented [`bart_large_cnn_samsum`](src/zennews/steps/summarize/bart_large_cnn_samsum.py)
step. As the name suggests, it uses the BART model to generate summaries.
- Ultimately, for the `report` step, we have implemented the
[`post_summaries`](src/zennews/steps/report/report.py)
step. It showcases how a generalized step can function within a ZenML
pipeline and uses an alerter to share the results.
- **The materializer**: As mentioned above, the steps within our pipeline are
using the concept of `Article`s to define their input and output space. Using
the [`ArticleMaterializer`](src/zennews/materializers/article_materializer.py),
we can show how to handle the materialization of these artifacts
when it comes to a data type that is not built-in.
- **The custom stack component**: The ultimate goal of `ZenNews` is to
serve the use the direct outcomes of the pipeline. That is why we have used it
as a chance to show the extensibility of ZenML in terms of the stack components
and implemented a [`DiscordAlerter`](src/zennews/alerter/discord_alerter.py).
- **The CLI application**: The example also includes a
[Click](https://click.palletsprojects.com/en/8.1.x/) CLI application.
It utilizes how easily you can use our Python SDK to build your application
around your ZenML workflows. In order to see it in action simply execute:
```bash
zennews --help
```
# 🕹 Test it locally right away
After installing the `zennews` package, you are ready to test it out locally
right away. The following command will get the top five articles from the BBC
news feed, summarize them and present the results to you.
> Warning: This will temporarily switch your active ZenML stack to the
> **default** stack and when the pipeline runs, you will download the model
> to your local machine.
```bash
zennews bbc
```
You can also parameterize this process. In order to see the possible
parameters, please use:
```bash
zennews bbc --help
```
# 🚀 Switching to scheduled pipelines with Vertex
The potential of an application like `zennews` can be only unlocked by
scheduling summarization pipelines instead of manually triggering them
yourself. In order to showcase it, we will set up a fully remote GCP stack
and use the `VertexOrchestrator` to schedule the pipeline.
## Deploy ZenML on GCP
Before you start building the stack, you need to deploy ZenML on GCP. For more
information on how you can achieve do that, please check
[the corresponding docs page](https://docs.zenml.io/getting-started/deploying-zenml).
## ZenNews Stack
Once the ZenML is deployed, we can start to build up our stack. Our stack will
consist of the following components:
- [GCP Secrets Manager](https://docs.zenml.io/component-gallery/secrets-managers/gcp)
- [GCP Container Registry](https://docs.zenml.io/component-gallery/container-registries/gcloud)
- [GCS Artifact Store](https://docs.zenml.io/component-gallery/artifact-stores/gcloud-gcs)
- [Vertex Orchestrator](https://docs.zenml.io/component-gallery/orchestrators/gcloud-vertexai)
- [Discord Alerter (part of the `zennews` package)](src/zennews/alerter/discord_alerter.py)
Let's start by installing the `gcp` integration:
```bash
zenml integration install gcp
```
### Secrets Manager
The first component to register is a
[GCP secrets manager](https://docs.zenml.io/component-gallery/secrets-managers/gcp).
The command is quite straightforward. You just have to give it a name and
provide the ID of your project on GCP.
```bash
zenml secrets-manager register <SECRETS_MANAGER_NAME> \
--flavor=gcp \
--project_id=<PROJECT_ID>
```
### Container Registry
The second component is a
[GCP container registry](https://docs.zenml.io/component-gallery/container-registries/gcloud).
Similar to the previous component, you just need to provide a name and the
URI to your container registry on GCP.
```bash
zenml container-registry register <CONTAINER_REGISTERY_NAME> \
--flavor=gcp \
--uri=<REGISTRY_URI>
```
### Artifact Store
The next component on the list is a
[GCS artifact store](https://docs.zenml.io/component-gallery/artifact-stores/gcloud-gcs).
In order to register it, all you have to do is to provide the path to your GCS
bucket:
```bash
zenml artifact-store register <ARTIFACT_STORE_NAME> \
--flavor=gcp \
--path=<PATH_TO_BUCKET>
```
### Orchestrator
Following the artifact store, we will register a
[Vertex AI orchestrator.](https://docs.zenml.io/component-gallery/orchestrators/gcloud-vertexai)
```bash
zenml orchestrator register <ORCHESTRATOR_NAME> \
--flavor=vertex \
--project=<PROJECT_ID> \
--location=<GCP_LOCATION> \
--workload_service_account=<EMAIL_OF_YOUR_SERVICE_ACCOUNT> \
--service_account_path=<PATH_TO_YOUR_SERVICE_ACCOUNT_KEY>
```
You need to simply provide the id of your project, the name of your GCP
region and the service account you would like to use.
> Warning: In this version, you have to provide both the email of the service
> account and the path to a key.json file. This interaction will be improved
> in the upcoming releases.
Make sure that the service account has the proper roles for the following
services: Cloud Functions, Cloud Scheduler, Secret Manager, Service Account,
Storage, and Vertex AI.
### GCP Stack
With these four components, we are ready to establish and activate the base
version of our GCP stack.
```bash
zenml stack register <STACK_NAME> \
-x <SECRETS_MANAGER_NAME> \
-c <CONTAINER_REGISTERY_NAME> \
-a <ARTIFACT_STORE_NAME> \
-o <ORCHESTRATOR_NAME> \
--set
```
### Alerter
The last component in our stack is a special case. As mentioned before
the `zennews` package already comes equipped with a custom stack component
implementation, namely the `DiscordAlerter`. In a nutshell, it uses the
[**discord.py**](https://discordpy.readthedocs.io/en/stable/index.html) package
to send messages via a webhook to a discord text channel. You can find the
implementation right [here](src/zennews/alerter/discord_alerter.py).
The following sections show how we can register `DiscordAlerter` as a custom
flavor, create an instance of it, and update our stack.
#### Registering the custom flavor
All you have to do to register such a custom flavor is to provide the
corresponding source path to the flavor class.
```bash
zenml alerter flavor register zennews.alerter.discord_alerter_flavor.DiscordAlerterFlavor
```
ZenML will import and add that to the list of available alerter flavors.
```bash
zenml alerter flavor list
```
#### Registering the alerter
Now that the flavor is registered, you can create an alerter with the flavor
`discord-webhook`. Through this example, you will also see how you can use
secret references to handle sensitive information during the registration of
stack components.
Let's start by registering the secret:
```bash
zenml secrets-manager secret register <SECRET_NAME> \
--webhook_url=<ACTUAL_URL_OF_THE_WEBHOOK>
```
This will use the secrets manager in our active GCP stack. Once the secret
registration is complete, you can register your alerter as follows:
```bash
zenml alerter register <ALERTER_NAME> \
--flavor discord-webhook \
--webhook_url=<SECRET_REFERENCE> # formatted as {{SECRET_NAME:WEBHOOK_URL}}
```
#### Updating the stack
The last step is to update our stack with our new alerter:
```bash
zenml stack update <STACK_NAME> -al <ALERTER_NAME>
```
## Scheduling pipelines through the `zennews` CLI
Now the stack is set up, you can use the `--schedule` option when you run your
`zennews` pipeline. There are three possible values that you can use for the
`schedule` option: `hourly`, `daily` (every day at 9 AM), or `weekly` (every
Monday at 9 AM).
```bash
zennews bbc --schedule daily
```
This will use your active stack (the GCP stack) and schedule your ZenNews
pipeline.
# 📙 Limitations, future improvements and upcoming changes
Before we end this project, it is also important to talk about the limitations
we faced, the possible future improvements, and changes that are already in
motion:
- The first limitation of ZenNews is the number of supported news sources.
As this project was initially designed as a PoC, the only supported news
source is BBC. However, thanks to our design, it is really easy to expand this
list by adding additional steps, which consume data and create `Article`
objects.
- The ability to schedule pipelines through ZenML played a critical role
within the context of this project. However, this feature has its own
limitations. While you can create scheduled pipelines, once the pipeline and
its schedule is created, you can not cancel or modify the behavior of this
scheduled pipeline. This means that if you want to cancel it, you have to do it
via the orchestrator UI or interface yourself and not from within ZenML.
- The other limitation regarding the schedules is the format. As of now, the
CLI application takes the user input and converts it into a cron expression.
Any orchestrator which does not support these expressions is not supported.
- As the ZenML team, we have been looking for ways to improve the interface
of our base alerters. You might see some changes in upcoming releases.
- Similar to the alerters, we are working on improving the management of our
secrets.
Tune in to [our slack](https://zenml.io/slack-invite/) to stay updated about
the upcoming changes and ask any questions you might have.
| zennews | /zennews-0.1.8.tar.gz/zennews-0.1.8/README.md | README.md |
<h3 align="center">
<img src="images/crp_logo.png" width="400"/>
An open-source library for neural network interpretability built on [zennit](https://github.com/chr5tphr/zennit)
with Relevance and Activation Maximization.
</h3>
[](https://badge.fury.io/py/zennit-crp)

## **Concept Relevance Propagation (CRP)**
computes conditional attributions for in latent space defined concepts that allow to
- localize concepts in input space
- compute their relative importance to the final classification output
- and hierarchically decompose higher-level concepts into low-level concepts
In addition, this repository ships with
## **Relevance Maximization**
an explaining-by-example strategy for concepts that illustrates the most <em>useful</em> pattern for prediction, unlike _Activation Maximization_, which reveals patterns that lead to _strong activation_.
## **Activation Maximization**
as reference sampling approach and class-wide statistics are also supplied for comparision.
Curious? Then take a look at our [preprint](https://arxiv.org/abs/2206.03208):
```
@article{achtibat2022from,
title = {From "Where" to "What": Towards Human-Understandable Explanations through Concept Relevance Propagation},
author = {Achtibat, Reduan and
Dreyer, Maximilian and
Eisenbraun, Ilona and
Bosse, Sebastian and
Wiegand, Thomas and
Samek, Wojciech and
Lapuschkin, Sebastian},
journal={arXiv},
year = {2022},
volume={abs/2206.03208},
doi = {10.48550/ARXIV.2206.03208},
url = {https://arxiv.org/abs/2206.03208},
}
```
## Why Concept Relevance Propagation?
For a detailed discussion, feel free to check out the paper, but here we will give a summary of the most exciting features:
<details>
<summary>Overview</summary>
CRP applied on three age range predictions given different input samples from the Adience dataset
for age and gender estimation.

**(Left):** Traditional heatmaps are rather
uninformative despite being class-specific.
Here, heatmaps only hint at the locations of relevant body parts, but what feature(s) in particular the model has recognized in those regions remains open for interpretation by the stakeholder, which, depending on the domain, may prove to be highly ambiguous. In this case, they indicate that the model seems to focus on the eye region during inference in all cases.
**(Rightmost):** Intermediate features encoded by the model in general
can be investigated using global XAI (Activation or Relevance Maximization). Choosing a particular layer, individual channels can be
assigned concepts. However, during
inference, global XAI alone does not inform which features are recognized, used and combined
by the model in per-sample inference.
**(Center):** By combining local and global XAI, _glocal_ XAI is able to assign (relevance) attribution scores to individual neuron(-group)s. This tells, which concepts have been involved in a particular prediction. Further, concept-conditional heatmaps can be computed, indicating where a recognized concept identified as relevant has its origin in a sample’s input space. Vice versa, choosing a specific region in input space, the local relevance attribution, responsible concepts can be traced
back. Lastly, peripheral information can be masked out of the shown reference examples using
conditionally computed heatmap attributions for further focusing the feature visualization on the
concept’s defining parts, which increases interpretability and clarity:
Concentrating on the eye region, we immediately recognize that the topmost sample has been predicted into age group (3-7)
due to the sample’s large irides and round eyes, while the middle sample is predicted as (25-32), as
more of the sclera is visible and eyebrows are more ostensible. For the bottom sample the model
has predicted class (60+) based on its recognition of heavy wrinkles around the eyes and on the
eyelids, and pronounced tear sacs next to a large knobby nose.
</details>
<details>
<summary>Disentangling Explanations</summary>
<img src="images/disentangling.png"/>
Target concept “dog” is described by a combination of lower-level concepts such as “snout”, “eye” and “fur”. CRP heatmaps regarding individual concepts, and their contribution to the prediction of “dog”, can be generated by applying masks to filter-channels in the backward pass. Global (in the context of an input sample) relevance of a concept wrt. to the explained prediction can thus not only be measured in latent space, but also precisely visualized, localized and measured in input space. The concept-conditional computation reveals the relatively high importance of the spatially distributed “fur” feature for the prediction of “dog”, compared to the feature “eye”.
</details>
<details>
<summary>Localization of Concepts</summary>
<img src="images/local.png" width=300/>
CRP applied in combination with local aggregation of relevance scores over regions $I_1$ and $I_2$ in order to locally assess conceptual importance and localize concepts involved in inference.
</details>
<details>
<summary>Relevance vs. Activation Maximization</summary>
Activation- and relevance-based sample selection.

**a)** Activation scores only measure
the stimulation of a latent filter without considering its role and impact during inference. Relevance scores are contextual to distinct model outputs and describe how features
are utilized in a DNN’s prediction of a specific class.
**b)** As a result, samples selected
based on Activation Maximization only represent maximized latent neuron activation, while
samples based on Relevance Maximization represent features which are actually useful and
representative for solving a prediction task.
_(A) ActMax identifies a concept that encodes for white strokes. RelMax, however, shows that this concept is not simply used to find white strokes, but white characters!_
**c)** Assume we wish to find representative examples for features $x_1$
and $x_2$. Even though a sample leads to a high activation score in a given layer and neuron (group) — here $x_1$ and $x_2$ — it does not necessarily result in high relevance or contribution to inference: The feature transformation $w$ of a linear layer with inputs $x_1$ and $x_2$, which is followed by a ReLU non-linearity, is shown. Here, samples from the blue cluster of feature activations lead to high activation values for both features $x_1$ and $x_2$, and would be selected by ActMax, but receive zero relevance, as they lead to an inactive neuron output after the ReLU, and are thus of no value to following layers. That is, even though
the given samples activate features $x_1$ and $x_2$ maximally strong, they can not contribute
meaningfully to the prediction process through the context determined by $w$. Thus, samples
selected as representative via activation might not be representative to the overall decision
process of the model. Representative examples selected based on relevance, however, are
guaranteed to play an important role in the model’s decision process.
**d):** Correlation analyses are shown for an intermediate ResNet layer’s channel and neuron. Neurons that are on average highly activated are not, in general, also highly relevant, as a correlation coefficient of $c = 0.111$ shows, since a specific combination of activation magnitudes is important for neurons to be representative in a larger model context.
</details>
<details>
<summary>Hierarchical Concept Decomposition through Attribution Graphs</summary>
Decomposing a high-level concept into its lower-level concepts.
<img src="images/hierarchical.png" width=400/>
Given an interesting concept encoded by channel j in layer l, relevance quantities computed during a CRP backward pass can then be utilized to identify how its relevance distributes across lower layer channels (here shown side-by-side in an exploded view).
</details>
## Project status
Project is under active development but should be stable. Please expect interfaces to change in future releases.
## Installation
To install directly from PyPI using pip, write:
```shell
$ pip install zennit-crp[fast_img]
```
Alternatively, install from a manually cloned repository to try out the tutorials:
```shell
$ git clone https://github.com/rachtibat/zennit-crp
$ pip install ./zennit-crp
```
## Documentation
Still under development, but you can refer to the tutorials below.
Docstrings are also missing in some places.
## Tutorials
Check out the [jupyter notebook tutorials.](https://github.com/rachtibat/zennit-crp/tree/master/tutorials) Please start with attribution and then feature_visualization.
## Quickstart
### Conditional Attributions
```python
from crp.attribution import CondAttribution
from crp.concepts import ChannelConcept
from crp.helper import get_layer_names
from zennit.composites import EpsilonPlusFlat
from zennit.canonizers import SequentialMergeBatchNorm
# define LRP rules and canonizers in zennit
composite = EpsilonPlusFlat([SequentialMergeBatchNorm()])
# load CRP toolbox
attribution = CondAttribution(model)
# here, each channel is defined as a concept
# or define your own notion!
cc = ChannelConcept()
# get layer names of Conv2D and MLP layers
layer_names = get_layer_names(model, [nn.Conv2d, nn.Linear])
# get a conditional attribution for channel 50 in layer features.27 wrt. output 1
conditions = [{'features.27': [50], 'y': [1]}]
attr = attribution(data, conditions, composite, record_layer=layer_names)
# heatmap and prediction
attr.heatmap, attr.prediction
# activations and relevances for each layer name
attr.activations, attr.relevances
# relative importance of each concept for final prediction
rel_c = cc.attribute(attr.relevances['features.40'])
# most relevant channels in features.40
concept_ids = torch.argsort(rel_c, descending=True)
```
### Feature Visualization
```python
from crp.visualization import FeatureVisualization
from crp.image import plot_grid
# define which concept is used in each layer
layer_map = {(name, cc) for name in layer_names}
# compute visualization (it takes for VGG16 and ImageNet testset on Titan RTX 30 min)
fv = FeatureVisualization(attribution, dataset, layer_map)
fv.run(composite, 0, len(dataset))
# visualize MaxRelevance reference images for top-5 concepts
ref_c = fv.get_max_reference(concept_ids[:5], 'features.40', 'relevance', composite=composite)
plot_grid(ref_c)
```
## Roadmap
Coming soon...
- [ ] Distributed HPC-Cluster support
- [ ] Complete MaskHook Tutorial
- [ ] Visualization for the Attribution Graph
- [ ] Documentation
## Contributing
### Code Style
We use [PEP8](https://www.python.org/dev/peps/pep-0008) with a line-width of 120 characters. For
docstrings we use [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html).
We use [`pylint`](https://pypi.org/project/pylint/) for style checks.
Basic tests are implemented with [`pytest`](https://docs.pytest.org/).
We are open to any improvements (:
## License
BSD 3-Clause Clear License
| zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/README.md | README.md |
import torch
import numpy as np
from typing import List, Dict
class Concept:
"""
Abstract class that imlplements the core functionality for the attribution computation of concepts.
"""
def mask(self, batch_id, concept_ids, layer_name):
raise NotImplementedError("'Concept'class must be implemented!")
def mask_rf(self, neuron_ids, layer_name):
raise NotImplementedError("'Concept'class must be implemented!")
def reference_sampling(self, relevance, layer_name: str = None, max_target: str = "sum", abs_norm=True):
raise NotImplementedError("'Concept'class must be implemented!")
def get_rf_indices(self, output_shape, layer_name):
raise NotImplementedError("'Concept'class must be implemented!")
def attribute(self, relevance, mask=None, layer_name: str = None, abs_norm=True):
raise NotImplementedError("'Concept'class must be implemented!")
class ChannelConcept(Concept):
"""
Concept Class for torch.nn.Conv2D and torch.nn.Linear layers
"""
@staticmethod
def mask(batch_id: int, concept_ids: List, layer_name=None):
"""
Wrapper that generates a function thath modifies the gradient (replaced by zennit by attributions).
Parameters:
----------
batch_id: int
Specifies the batch dimension in the torch.Tensor.
concept_ids: list of integer values
integer lists corresponding to channel indices.
Returns:
--------
callable function that modifies the gradient
"""
def mask_fct(grad):
mask = torch.zeros_like(grad[batch_id])
mask[concept_ids] = 1
grad[batch_id] = grad[batch_id] * mask
return grad
return mask_fct
@staticmethod
def mask_rf(batch_id: int, c_n_map: Dict[int, List], layer_name=None):
"""
Wrapper that generates a function that modifies the gradient (replaced by zennit by attributions) for a single neuron.
Parameters:
----------
batch_id: int
Specifies the batch dimension in the torch.Tensor.
c_n_map: dist with int keys and list values
Keys correspond to channel indices and values correspond to neuron indices.
Neuron Indices are counted as if the 2D Channel has 1D dimension i.e. channel dimension [3, 20, 20] -> [3, 400],
so that neuron indices range between 0 and 399.
Returns:
--------
callable function that modifies the gradient
"""
def mask_fct(grad):
grad_shape = grad.shape
grad = grad.view(*grad_shape[:2], -1)
mask = torch.zeros_like(grad[batch_id])
for channel in c_n_map:
mask[channel, c_n_map[channel]] = 1
grad[batch_id] = grad[batch_id] * mask
return grad.view(grad_shape)
return mask_fct
def get_rf_indices(self, output_shape, layer_name=None):
if len(output_shape) == 1:
return [0]
else:
end = np.prod(output_shape[1:])
return np.arange(0, end)
def attribute(self, relevance, mask=None, layer_name: str = None, abs_norm=True):
if isinstance(mask, torch.Tensor):
relevance = relevance * mask
rel_l = torch.sum(relevance.view(*relevance.shape[:2], -1), dim=-1)
if abs_norm:
rel_l = rel_l / (torch.abs(rel_l).sum(-1).view(-1, 1) + 1e-10)
return rel_l
def reference_sampling(self, relevance, layer_name: str = None, max_target: str = "sum", abs_norm=True):
"""
Parameters:
max_target: str. Either 'sum' or 'max'.
"""
# position of receptive field neuron
rel_l = relevance.view(*relevance.shape[:2], -1)
rf_neuron = torch.argmax(rel_l, dim=-1)
# channel maximization target
if max_target == "sum":
rel_l = torch.sum(relevance.view(*relevance.shape[:2], -1), dim=-1)
elif max_target == "max":
rel_l = torch.gather(rel_l, -1, rf_neuron.unsqueeze(-1)).squeeze(-1)
else:
raise ValueError("'max_target' supports only 'max' or 'sum'.")
if abs_norm:
rel_l = rel_l / (torch.abs(rel_l).sum(-1).view(-1, 1) + 1e-10)
d_ch_sorted = torch.argsort(rel_l, dim=0, descending=True)
rel_ch_sorted = torch.gather(rel_l, 0, d_ch_sorted)
rf_ch_sorted = torch.gather(rf_neuron, 0, d_ch_sorted)
return d_ch_sorted, rel_ch_sorted, rf_ch_sorted | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/concepts.py | concepts.py |
import torch
import numpy as np
from typing import List
import os
from pathlib import Path
def get_layer_names(model: torch.nn.Module, types: List):
"""
Retrieves the layer names of all layers that belong to a torch.nn.Module type defined
in 'types'.
Parameters
----------
model: torch.nn.Module
types: list of torch.nn.Module
Layer types i.e. torch.nn.Conv2D
Returns
-------
layer_names: list of strings
"""
layer_names = []
for name, layer in model.named_modules():
for layer_definition in types:
if isinstance(layer, layer_definition) or issubclass(layer.__class__, layer_definition):
if name not in layer_names:
layer_names.append(name)
return layer_names
def abs_norm(rel: torch.Tensor, stabilize=1e-10):
"""
Parameter:
rel: 1-D array
"""
abs_sum = torch.sum(torch.abs(rel))
return rel / (abs_sum + stabilize)
def max_norm(rel, stabilize=1e-10):
return rel / (rel.max() + stabilize)
def get_output_shapes(model, single_sample: torch.tensor, record_layers: List[str]):
"""
calculates the output shape of each layer using a forward pass.
"""
output_shapes = {}
def generate_hook(name):
def shape_hook(module, input, output):
output_shapes[name] = output.shape[1:]
return shape_hook
hooks = []
for name, layer in model.named_modules():
if name in record_layers:
shape_hook = generate_hook(name)
hooks.append(layer.register_forward_hook(shape_hook))
_ = model(single_sample)
[h.remove() for h in hooks]
return output_shapes
def load_maximization(path_folder, layer_name):
filename = f"{layer_name}_"
d_c_sorted = np.load(Path(path_folder) / Path(filename + "data.npy"), mmap_mode="r")
rel_c_sorted = np.load(Path(path_folder) / Path(filename + "rel.npy"), mmap_mode="r")
rf_c_sorted = np.load(Path(path_folder) / Path(filename + "rf.npy"), mmap_mode="r")
return d_c_sorted, rel_c_sorted, rf_c_sorted
def load_stat_targets(path_folder):
targets = np.load(Path(path_folder) / Path("targets.npy")).astype(np.int)
return targets
def load_statistics(path_folder, layer_name, target):
filename = f"{target}_"
d_c_sorted = np.load(Path(path_folder) / Path(layer_name) / Path(filename + "data.npy"), mmap_mode="r")
rel_c_sorted = np.load(Path(path_folder) / Path(layer_name) / Path(filename + "rel.npy"), mmap_mode="r")
rf_c_sorted = np.load(Path(path_folder) / Path(layer_name) / Path(filename + "rf.npy"), mmap_mode="r")
return d_c_sorted, rel_c_sorted, rf_c_sorted
def load_receptive_field(path_folder, layer_name):
filename = f"{layer_name}.npy"
rf_array = np.load(Path(path_folder) / Path(filename), mmap_mode="r")
return rf_array
def find_files(path=None):
"""
Parameters:
path: path analysis results
"""
if path is None:
path = os.getcwd()
folders = os.listdir(path)
r_max, a_max, r_stats, a_stats, rf = [], [], [], [], []
for name in folders:
found_path = str(Path(path) / Path(name))
if "RelMax" in name:
r_max.append(found_path)
elif "ActMax" in name:
a_max.append(found_path)
elif "RelStats" in name:
r_stats.append(found_path)
elif "ActStats" in name:
a_stats.append(found_path)
elif "ReField" in name:
rf.append(found_path)
return r_max, a_max, r_stats, a_stats, rf | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/helper.py | helper.py |
from pathlib import Path
from PIL import Image
from typing import Any, Tuple, Dict, List, Union
class Cache:
"""
Abstract class that imlplements the core functionality for caching reference images.
"""
def __init__(self, path="cache"):
self.path = Path(path)
def save(self, ref_c, layer_name, mode, r_range, composite, rf, f_name, plot_name, **kwargs) -> None:
raise NotImplementedError("'Cache' class must be implemented!")
def load(self, concept_ids, layer_name, mode, r_range, composite, rf, f_name, plot_name, **kwargs) -> Tuple[Dict[int, Any],
Dict[int, Tuple[int, int]]]:
raise NotImplementedError("'Cache' class must be implemented!")
def extend_dict(self, ref_original, rf_addition):
raise NotImplementedError("'Cache' class must be implemented!")
class ImageCache(Cache):
"""
Cache that saves lists or tuple of lists of PIL.Image files that are values of a dictionary.
Parameters:
----------
path: str or pathlib.Path
folder where to solve the images
"""
def _create_path(self, layer_name, mode, composite, rf, func_name, plot_name):
folder_name = mode + "_" + composite.__class__.__name__
if rf:
folder_name += "_rf"
path = self.path / Path(func_name, plot_name, folder_name, layer_name)
path.mkdir(parents=True, exist_ok=True)
return path
def _save_img_list(self, img_list, id, tuple_index, r_range, path):
for img, r in zip(img_list, range(*r_range)):
if not isinstance(img, Image.Image):
raise TypeError(f"'ImageCache' can only save PIL.Image objects. \
But you tried to save a {type(img)} object.")
img.save(path / Path(f"{id}_{tuple_index}_{r}.png"), optimize=True)
def save(self, ref_dict: Dict[Any, Union[Image.Image, List]],
layer_name, mode, r_range: Tuple[int, int],
composite, rf, func_name, plot_name) -> None:
"""
Saves PIL.Images inside 'ref_dict' in the path defined by the remaining arguments.
Parameters:
----------
red_dict: dict of PIL.Image objects
layer_name: str
mode: str, 'relevance' or 'activation'
r_range: tuple (int, int)
composite: zennit.composites object
rf: boolean
func_name: str, 'get_max_reference' or 'get_stats_reference'
plot_name: str
name of plot_fn
"""
path = self._create_path(layer_name, mode, composite, rf, func_name, plot_name)
for id in ref_dict:
value = ref_dict[id]
if isinstance(value, Tuple):
self._save_img_list(value[0], id, 0, r_range, path)
self._save_img_list(value[1], id, 1, r_range, path)
elif isinstance(value[0], Image.Image):
self._save_img_list(value, id, 0, r_range, path)
def _load_img_list(self, id, tuple_index, r_range, path):
imgs, not_found = [], None
for r in range(*r_range):
try:
img = Image.open(path / Path(f"{id}_{tuple_index}_{r}.png"))
imgs.append(img)
except FileNotFoundError:
not_found = (r, r_range[-1])
break
return imgs, not_found
def load(self, indices: List,
layer_name, mode, r_range, composite, rf, func_name, plot_name) -> Tuple[Dict[Any, Any],
Dict[int, Tuple[int, int]]]:
"""
Loads PIL.Images with concept index 'indices' and layer 'layer_name' from the path defined by the remaining arguments.
Parameters:
----------
indices: list of int or str
layer_name: str
mode: str, 'relevance' or 'activation'
r_range: tuple (int, int)
composite: zennit.composites object
rf: boolean
func_name: str, 'get_max_reference' or 'get_stats_reference'
"""
path = self._create_path(layer_name, mode, composite, rf, func_name, plot_name)
ref_c, not_found = {}, {}
for id in indices:
imgs_0, not_found_0 = self._load_img_list(id, 0, r_range, path)
imgs_1, _ = self._load_img_list(id, 1, r_range, path)
if imgs_0:
if imgs_1:
# tuple per sample exists
ref_c[id] = (imgs_0, imgs_1)
else:
ref_c[id] = imgs_0
if not_found_0:
not_found[id] = not_found_0
return ref_c, not_found
def extend_dict(self, ref_original, rf_addition):
for key, value in rf_addition.items():
if key in ref_original:
if isinstance(value, Tuple):
ref_original[key][0].extend(value[0])
ref_original[key][1].extend(value[1])
elif isinstance(value, List):
ref_original[key].extend(value)
else:
raise TypeError("'ref_original' must contain tuples or a list")
else:
ref_original[key] = value
return ref_original | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/cache.py | cache.py |
from typing import List, Union, Dict, Tuple, Callable
import warnings
import torch
import numpy as np
import math
from collections.abc import Iterable
import concurrent.futures
import functools
import inspect
from tqdm import tqdm
from zennit.composites import NameMapComposite, Composite
from crp.attribution import CondAttribution
from crp.maximization import Maximization
from crp.concepts import ChannelConcept, Concept
from crp.statistics import Statistics
from crp.hooks import FeatVisHook
from crp.helper import load_maximization, load_statistics, load_stat_targets
from crp.image import vis_img_heatmap, vis_opaque_img
from crp.cache import Cache
class FeatureVisualization:
def __init__(
self, attribution: CondAttribution, dataset, layer_map: Dict[str, Concept], preprocess_fn: Callable=None,
max_target="sum", abs_norm=True, path="FeatureVisualization", device=None, cache: Cache=None):
self.dataset = dataset
self.layer_map = layer_map
self.preprocess_fn = preprocess_fn
self.attribution = attribution
self.device = attribution.device if device is None else device
self.RelMax = Maximization("relevance", max_target, abs_norm, path)
self.ActMax = Maximization("activation", max_target, abs_norm, path)
self.RelStats = Statistics("relevance", max_target, abs_norm, path)
self.ActStats = Statistics("activation", max_target, abs_norm, path)
self.Cache = cache
def preprocess_data(self, data):
if callable(self.preprocess_fn):
return self.preprocess_fn(data)
else:
return data
def get_data_sample(self, index, preprocessing=True) -> Tuple[torch.Tensor, int]:
"""
returns a data sample from dataset at index.
Parameter:
index: integer
preprocessing: boolean.
If True, return the sample after preprocessing. If False, return the sample for plotting.
"""
data, target = self.dataset[index]
data = data.to(self.device).unsqueeze(0)
if preprocessing:
data = self.preprocess_data(data)
data.requires_grad = True
return data, target
def multitarget_to_single(self, multi_target):
raise NotImplementedError
def run(self, composite: Composite, data_start, data_end, batch_size=32, checkpoint=500, on_device=None):
print("Running Analysis...")
saved_checkpoints = self.run_distributed(composite, data_start, data_end, batch_size, checkpoint, on_device)
print("Collecting results...")
saved_files = self.collect_results(saved_checkpoints)
return saved_files
def run_distributed(self, composite: Composite, data_start, data_end, batch_size=16, checkpoint=500, on_device=None):
"""
max batch_size = max(multi_targets) * data_batch
data_end: exclusively counted
"""
self.saved_checkpoints = {"r_max": [], "a_max": [], "r_stats": [], "a_stats": []}
last_checkpoint = 0
n_samples = data_end - data_start
samples = np.arange(start=data_start, stop=data_end)
if n_samples > batch_size:
batches = math.ceil(n_samples / batch_size)
else:
batches = 1
batch_size = n_samples
# feature visualization is performed inside forward and backward hook of layers
name_map, dict_inputs = [], {}
for l_name, concept in self.layer_map.items():
hook = FeatVisHook(self, concept, l_name, dict_inputs, on_device)
name_map.append(([l_name], hook))
fv_composite = NameMapComposite(name_map)
if composite:
composite.register(self.attribution.model)
fv_composite.register(self.attribution.model)
pbar = tqdm(total=batches, dynamic_ncols=True)
for b in range(batches):
pbar.update(1)
samples_batch = samples[b * batch_size: (b + 1) * batch_size]
data_batch, targets_samples = self.get_data_concurrently(samples_batch, preprocessing=True)
targets_samples = np.array(targets_samples) # numpy operation needed
# convert multi target to single target if user defined the method
data_broadcast, targets, sample_indices = [], [], []
try:
for i_t, target in enumerate(targets_samples):
single_targets = self.multitarget_to_single(target)
for st in single_targets:
targets.append(st)
data_broadcast.append(data_batch[i_t])
sample_indices.append(samples_batch[i_t])
if len(data_broadcast) == 0:
continue
# TODO: test stack
data_broadcast = torch.stack(data_broadcast, dim=0)
sample_indices = np.array(sample_indices)
targets = np.array(targets)
except NotImplementedError:
data_broadcast, targets, sample_indices = data_batch, targets_samples, samples_batch
conditions = [{self.attribution.MODEL_OUTPUT_NAME: [t]} for t in targets]
# dict_inputs is linked to FeatHooks
dict_inputs["sample_indices"] = sample_indices
dict_inputs["targets"] = targets
# composites are already registered before
self.attribution(data_broadcast, conditions, None, exclude_parallel=False)
if b % checkpoint == checkpoint - 1:
self._save_results((last_checkpoint, sample_indices[-1] + 1))
last_checkpoint = sample_indices[-1] + 1
# TODO: what happens if result arrays are empty?
self._save_results((last_checkpoint, sample_indices[-1] + 1))
if composite:
composite.remove()
fv_composite.remove()
pbar.close()
return self.saved_checkpoints
@torch.no_grad()
def analyze_relevance(self, rel, layer_name, concept, data_indices, targets):
"""
Finds input samples that maximally activate each neuron in a layer and most relevant samples
"""
d_c_sorted, rel_c_sorted, rf_c_sorted, t_c_sorted = self.RelMax.analyze_layer(
rel, concept, layer_name, data_indices, targets)
self.RelStats.analyze_layer(d_c_sorted, rel_c_sorted, rf_c_sorted, t_c_sorted, layer_name)
@torch.no_grad()
def analyze_activation(self, act, layer_name, concept, data_indices, targets):
"""
Finds input samples that maximally activate each neuron in a layer and most relevant samples
"""
# activation analysis once per sample if multi target dataset
unique_indices = np.unique(data_indices, return_index=True)[1]
data_indices = data_indices[unique_indices]
act = act[unique_indices]
targets = targets[unique_indices]
d_c_sorted, act_c_sorted, rf_c_sorted, t_c_sorted = self.ActMax.analyze_layer(
act, concept, layer_name, data_indices, targets)
self.ActStats.analyze_layer(d_c_sorted, act_c_sorted, rf_c_sorted, t_c_sorted, layer_name)
def _save_results(self, d_index=None):
self.saved_checkpoints["r_max"].extend(self.RelMax._save_results(d_index))
self.saved_checkpoints["a_max"].extend(self.ActMax._save_results(d_index))
self.saved_checkpoints["r_stats"].extend(self.RelStats._save_results(d_index))
self.saved_checkpoints["a_stats"].extend(self.ActStats._save_results(d_index))
def collect_results(self, checkpoints: Dict[str, List[str]], d_index: Tuple[int, int] = None):
saved_files = {}
saved_files["r_max"] = self.RelMax.collect_results(checkpoints["r_max"], d_index)
saved_files["a_max"] = self.ActMax.collect_results(checkpoints["a_max"], d_index)
saved_files["r_stats"] = self.RelStats.collect_results(checkpoints["r_stats"], d_index)
saved_files["a_stats"] = self.ActStats.collect_results(checkpoints["a_stats"], d_index)
return saved_files
def get_data_concurrently(self, indices: Union[List, np.ndarray, torch.tensor], preprocessing=False):
if len(indices) == 1:
data, label = self.get_data_sample(indices[0], preprocessing)
return data, label
threads = []
data_returned = []
labels_returned = []
with concurrent.futures.ThreadPoolExecutor() as executor:
for index in indices:
future = executor.submit(self.get_data_sample, index, preprocessing)
threads.append(future)
for t in threads:
single_data = t.result()[0]
single_label = t.result()[1]
data_returned.append(single_data)
labels_returned.append(single_label)
data_returned = torch.cat(data_returned, dim=0)
return data_returned, labels_returned
def cache_reference(func):
"""
Decorator for get_max_reference and get_stats_reference. If a crp.cache object is supplied to the FeatureVisualization object,
reference samples are cached i.e. saved after computing a visualization with a 'plot_fn' (argument of get_max_reference) or
loaded from the disk if available.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""
Parameters:
-----------
overwrite: boolean
If set to True, already computed reference samples are computed again (overwritten).
"""
overwrite = kwargs.pop("overwrite", False)
args_f = inspect.getcallargs(func, self, *args, **kwargs)
plot_fn = args_f["plot_fn"]
if self.Cache is None or plot_fn is None:
return func(**args_f)
r_range, mode, l_name, rf, composite = args_f["r_range"], args_f["mode"], args_f["layer_name"], args_f["rf"], args_f["composite"]
f_name, plot_name = func.__name__, plot_fn.__name__
if f_name == "get_max_reference":
indices = args_f["concept_ids"]
else:
indices = [f'{args_f["concept_id"]}:{i}' for i in args_f["targets"]]
if overwrite:
not_found = {id: r_range for id in indices}
ref_c = {}
else:
ref_c, not_found = self.Cache.load(indices, l_name, mode, r_range, composite, rf, f_name, plot_name)
if len(not_found):
for id in not_found:
args_f["r_range"] = not_found[id]
if f_name == "get_max_reference":
args_f["concept_ids"] = id
ref_c_left = func(**args_f)
elif f_name == "get_stats_reference":
args_f["targets"] = int(id.split(":")[-1])
ref_c_left = func(**args_f)
else:
raise ValueError("Only the methods 'get_max_reference' and 'get_stats_reference' can be decorated.")
self.Cache.save(ref_c_left, l_name, mode, not_found[id], composite, rf, f_name, plot_name)
ref_c = self.Cache.extend_dict(ref_c, ref_c_left)
return ref_c
return wrapper
@cache_reference
def get_max_reference(
self, concept_ids: Union[int,list], layer_name: str, mode="relevance", r_range: Tuple[int, int] = (0, 8), composite: Composite=None,
rf=False, plot_fn=vis_img_heatmap, batch_size=32)-> Dict:
"""
Retreive reference samples for a list of concepts in a layer. Relevance and Activation Maximization
are availble if FeatureVisualization was computed for the mode. In addition, conditional heatmaps can be computed on reference samples.
If the crp.concept class (supplied to the FeatureVisualization layer_map) implements masking for a single neuron in the 'mask_rf' method,
the reference samples and heatmaps can be cropped using the receptive field of the most relevant or active neuron.
Parameters:
----------
concept_ids: int or list
layer_name: str
mode: "relevance" or "activation"
Relevance or Activation Maximization
r_range: Tuple(int, int)
Range of N-top reference samples. For example, (3, 7) corresponds to the Top-3 to -6 samples.
Argument must be a closed set i.e. second element of tuple > first element.
composite: zennit.composites or None
If set, compute conditional heatmaps on reference samples. `composite` is used for the CondAttribution object.
rf: boolean
If True, compute the CRP heatmap for the most relevant/most activating neuron only to restrict the conditonal heatmap
on the receptive field.
plot_fn: callable function with signature (samples: torch.Tensor, heatmaps: torch.Tensor, rf: boolean) or None
Draws reference images. The function receives as input the samples used for computing heatmaps before preprocessing
with self.preprocess_data and the final heatmaps after computation. In addition, the boolean flag 'rf' is passed to it.
The return value of the function should correspond to the Cache supplied to the FeatureVisualization object (if available).
If None, the raw tensors are returned.
batch_size: int
If heatmap is True, describes maximal batch size of samples to compute for conditional heatmaps.
Returns:
-------
ref_c: dictionary.
Key values correspond to channel index and values are reference samples. The values depend on the implementation of
the 'plot_fn'.
"""
ref_c = {}
if not isinstance(concept_ids, Iterable):
concept_ids = [concept_ids]
if mode == "relevance":
d_c_sorted, _, rf_c_sorted = load_maximization(self.RelMax.PATH, layer_name)
elif mode == "activation":
d_c_sorted, _, rf_c_sorted = load_maximization(self.ActMax.PATH, layer_name)
else:
raise ValueError("`mode` must be `relevance` or `activation`")
if rf and not composite:
warnings.warn("The receptive field is only computed, if you fill the 'composite' argument with a zennit Composite.")
for c_id in concept_ids:
d_indices = d_c_sorted[r_range[0]:r_range[1], c_id]
n_indices = rf_c_sorted[r_range[0]:r_range[1], c_id]
ref_c[c_id] = self._load_ref_and_attribution(d_indices, c_id, n_indices, layer_name, composite, rf, plot_fn, batch_size)
return ref_c
@cache_reference
def get_stats_reference(self, concept_id: int, layer_name: str, targets: Union[int, list], mode="relevance", r_range: Tuple[int, int] = (0, 8),
composite=None, rf=False, plot_fn=vis_img_heatmap, batch_size=32):
"""
Retreive reference samples for a single concept in a layer wrt. different explanation targets i.e. returns the reference samples
that are computed by self.compute_stats. Relevance and Activation are availble if FeatureVisualization was computed for the statitics mode.
In addition, conditional heatmaps can be computed on reference samples. If the crp.concept class (supplied to the FeatureVisualization layer_map)
implements masking for a single neuron in the 'mask_rf' method, the reference samples and heatmaps can be cropped using the receptive field of
the most relevant or active neuron.
Parameters:
----------
concept_ids: int or list
layer_name: str
mode: "relevance" or "activation"
Relevance or Activation Maximization
r_range: Tuple(int, int)
Range of N-top reference samples. For example, (3, 7) corresponds to the Top-3 to -6 samples.
Argument must be a closed set i.e. second element of tuple > first element.
composite: zennit.composites or None
If set, compute conditional heatmaps on reference samples. `composite` is used for the CondAttribution object.
rf: boolean
If True, compute the CRP heatmap for the most relevant/most activating neuron only to restrict the conditonal heatmap
on the receptive field.
plot_fn: callable function with signature (samples: torch.Tensor, heatmaps: torch.Tensor, rf: boolean)
Draws reference images. The function receives as input the samples used for computing heatmaps before preprocessing
with self.preprocess and the final heatmaps after computation. In addition, the boolean flag 'rf' is passed to it.
The return value of the function should correspond to the Cache supplied to the FeatureVisualization object (if available).
If None, the raw tensors are returned.
batch_size: int
If heatmap is True, describes maximal batch size of samples to compute for conditional heatmaps.
Returns:
-------
ref_t: dictionary.
Key values correspond to target indices and values are reference samples. The values depend on the implementation of
the 'plot_fn'.
"""
ref_t = {}
if not isinstance(targets, Iterable):
targets = [targets]
if mode == "relevance":
path = self.RelStats.PATH
elif mode == "activation":
path = self.ActStats.PATH
else:
raise ValueError("`mode` must be `relevance` or `activation`")
if rf and not composite:
warnings.warn("The receptive field is only computed, if you fill the 'composite' argument with a zennit Composite.")
for t in targets:
d_c_sorted, _, rf_c_sorted = load_statistics(path, layer_name, t)
d_indices = d_c_sorted[r_range[0]:r_range[1], concept_id]
n_indices = rf_c_sorted[r_range[0]:r_range[1], concept_id]
ref_t[f"{concept_id}:{t}"] = self._load_ref_and_attribution(d_indices, concept_id, n_indices, layer_name, composite, rf, plot_fn, batch_size)
return ref_t
def _load_ref_and_attribution(self, d_indices, c_id, n_indices, layer_name, composite, rf, plot_fn, batch_size):
data_batch, _ = self.get_data_concurrently(d_indices, preprocessing=False)
if composite:
data_p = self.preprocess_data(data_batch)
heatmaps = self._attribution_on_reference(data_p, c_id, layer_name, composite, rf, n_indices, batch_size)
if callable(plot_fn):
return plot_fn(data_batch.detach(), heatmaps.detach(), rf)
else:
return data_batch.detach().cpu(), heatmaps.detach().cpu()
else:
return data_batch.detach().cpu()
def _attribution_on_reference(self, data, concept_id: int, layer_name: str, composite, rf=False, neuron_ids: list=[], batch_size=32):
n_samples = len(data)
if n_samples > batch_size:
batches = math.ceil(n_samples / batch_size)
else:
batches = 1
batch_size = n_samples
if rf and (len(neuron_ids) != n_samples):
raise ValueError("length of 'neuron_ids' must be equal to the length of 'data'")
heatmaps = []
for b in range(batches):
data_batch = data[b * batch_size: (b + 1) * batch_size].detach().requires_grad_()
if rf:
neuron_ids = neuron_ids[b * batch_size: (b + 1) * batch_size]
conditions = [{layer_name: {concept_id: n_index}} for n_index in neuron_ids]
attr = self.attribution(data_batch, conditions, composite, mask_map=ChannelConcept.mask_rf, start_layer=layer_name, on_device=self.device,
exclude_parallel=False)
else:
conditions = [{layer_name: [concept_id]}]
# initialize relevance with activation before non-linearity (could be changed in a future release)
attr = self.attribution(data_batch, conditions, composite, start_layer=layer_name, on_device=self.device, exclude_parallel=False)
heatmaps.append(attr.heatmap)
return torch.cat(heatmaps, dim=0)
def compute_stats(self, concept_id, layer_name: str, mode="relevance", top_N=5, mean_N=10, norm=False) -> Tuple[list, list]:
"""
Computes statistics about the targets i.e. classes that are most relevant or most activating for the concept with index 'concept_id'
in layer 'layer_name'. Statistics must be computed before utilizing this method.
Parameters:
-----------
concept_id: int
Index of concept
layer_name: str
mode: str, 'relevance' or 'activation'
top_N: int
Returns the 'top_N' classes that most activate or are most relevant for the concept.
mean_N: int
Computes the importance of each target using the 'mean_N' top reference images for each target.
norm: boolean
If True, returns the mean relevance for each target normed.
Returns:
--------
sorted_t, sorted_val as tuple
sorted_t: list of most relevant targets
sorted_val: list of respective mean relevance/activation values for each target
"""
if mode == "relevance":
path = self.RelStats.PATH
elif mode == "activation":
path = self.ActStats.PATH
else:
raise ValueError("`mode` must be `relevance` or `activation`")
targets = load_stat_targets(path)
rel_target = torch.zeros(len(targets))
for i, t in enumerate(targets):
_, rel_c_sorted, _ = load_statistics(path, layer_name, t)
rel_target[i] = float(rel_c_sorted[:mean_N, concept_id].mean())
args = torch.argsort(rel_target, descending=True)[:top_N]
sorted_t = targets[args]
sorted_val = rel_target[args]
if norm:
sorted_val = sorted_val / sorted_val[0]
return sorted_t, sorted_val
def _save_precomputed(self, s_tensor, h_tensor, index, plot_list, layer_name, mode, r_range, composite, rf, f_name):
for plot_fn in plot_list:
ref = {index: plot_fn(s_tensor, h_tensor, rf)}
self.Cache.save(ref, layer_name, mode, r_range, composite, rf, f_name, plot_fn.__name__)
def precompute_ref(self, layer_c_ind:Dict[str, List], composite: Composite, rf=True, stats=False, top_N=4, mean_N=10, mode="relevance", r_range: Tuple[int, int] = (0, 8), plot_list=[vis_opaque_img], batch_size=32):
"""
Precomputes and saves all reference samples resulting from 'self.get_ref_samples' and 'self.get_stats_reference' for concepts supplied in 'layer_c_ind'.
Parameters:
-----------
layer_c_ind: dict with str keys and list values
Keys correspond to layer names and values to a list of all concept indices
stats: boolean
If True, precomputes reference samples of 'self.get_stats_reference'. Otherwise, only samples of 'self.get_ref_samples' are computed.
plot_list: list of callable functions
Functions to plot and save the images. The signature should correspond to the 'plot_fn' of 'get_max_reference'.
REMAINING PARAMETERS: correspond to 'self.get_ref_samples' and 'self.get_stats_reference'
"""
if self.Cache is None:
raise ValueError("You must supply a crp.Cache object to the 'FeatureVisualization' class to precompute reference images!")
if composite is None:
raise ValueError("You must supply a zennit.Composite object to precompute reference images!")
for l_name in layer_c_ind:
c_indices = layer_c_ind[l_name]
print("Layer:", l_name)
pbar = tqdm(total=len(c_indices), dynamic_ncols=True)
for c_id in c_indices:
s_tensor, h_tensor = self.get_max_reference(c_id, l_name, mode, r_range, composite, rf, None, batch_size)[c_id]
self._save_precomputed(s_tensor, h_tensor, c_id, plot_list, l_name, mode, r_range, composite, rf, "get_max_reference")
if stats:
targets, _ = self.compute_stats(c_id, l_name, mode, top_N, mean_N)
for t in targets:
stat_index = f"{c_id}:{t}"
s_tensor, h_tensor = self.get_stats_reference(c_id, l_name, t, mode, r_range, composite, rf, None, batch_size)[stat_index]
self._save_precomputed(s_tensor, h_tensor, stat_index, plot_list, l_name, mode, r_range, composite, rf, "get_stats_reference")
pbar.update(1)
pbar.close() | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/visualization.py | visualization.py |
from zennit.composites import NameMapComposite
from zennit.core import Composite
from crp.hooks import MaskHook
from crp.concepts import Concept, ChannelConcept
from crp.graph import ModelGraph
from typing import Callable, List, Dict, Union, Tuple
import torch
import warnings
import numpy as np
import math
from tqdm import tqdm
from collections import namedtuple
attrResult = namedtuple("AttributionResults", "heatmap, activations, relevances, prediction")
attrGraphResult = namedtuple("AttributionGraphResults", "nodes, connections")
class CondAttribution:
def __init__(self, model: torch.nn.Module, device: torch.device = None, overwrite_data_grad=True, no_param_grad=True) -> None:
"""
This class contains the functionality to compute conditional attributions.
Parameters:
----------
model: torch.nn.Module
device: torch.device
specifies where the model and subsequent computation takes place.
overwrite_data_grad: boolean
If True, the .grad attribute of the 'data' argument is set to None before each __call__.
no_param_grad: boolean
If True, sets the requires_grad attribute of all model parameters to zero, to reduce the GPU memory footprint.
"""
self.MODEL_OUTPUT_NAME = "y"
self.device = next(model.parameters()).device if device is None else device
self.model = model
self.overwrite_data_grad = overwrite_data_grad
if no_param_grad:
self.model.requires_grad_(False)
def backward(self, pred, grad_mask, partial_backward, layer_names, layer_out, generate=False):
if partial_backward and len(layer_names) > 0:
wrt_tensor, grad_tensors = pred, grad_mask.to(pred)
for l_name in layer_names:
inputs = layer_out[l_name]
try:
grad = torch.autograd.grad(wrt_tensor, inputs=inputs, grad_outputs=grad_tensors, retain_graph=True)
except RuntimeError as e:
if "allow_unused=True" not in str(e):
raise e
else:
raise RuntimeError(
"The layer names must be ordered according to their succession in the model if 'exclude_parallel'=True."
" Please make sure to start with the last and end with the first layer in each condition dict. In addition,"
" parallel layers can not be used in one condition.")
# TODO: necessary?
if grad is None:
raise RuntimeError(
"The layer names must be ordered according to their succession in the model if 'exclude_parallel'=True."
" Please make sure to start with the last and end with the first layer in each condition dict. In addition,"
" parallel layers can not be used in one condition.")
wrt_tensor, grad_tensors = layer_out[l_name], grad
torch.autograd.backward(wrt_tensor, grad_tensors, retain_graph=generate)
else:
torch.autograd.backward(pred, grad_mask.to(pred), retain_graph=generate)
def relevance_init(self, prediction, target_list, init_rel):
"""
Parameters:
-----------
prediction: torch.Tensor
output of model forward pass
target_list: list/numpy.ndarray or None
list of all 'y' values of condition dictionaries. Indices are used to set the
initial relevance to prediction values. If target_list is None and init_rel is None,
relevance is initialized at all indices with prediction values. If start_layer is
used, target_list is set to None.
init_rel: torch.Tensor or None
used to initialize relevance instead of prediction. If None, target_list is used.
Please make sure to choose the right shape.
"""
if callable(init_rel):
output_selection = init_rel(prediction)
elif isinstance(init_rel, torch.Tensor):
output_selection = init_rel
elif isinstance(init_rel, (int, np.integer)):
output_selection = torch.full(prediction.shape, init_rel)
else:
output_selection = prediction
if target_list:
mask = torch.zeros_like(output_selection)
for i, targets in enumerate(target_list):
mask[i, targets] = output_selection[i, targets]
output_selection = mask
return output_selection
def heatmap_modifier(self, data, on_device=None):
heatmap = data.grad.detach()
heatmap = heatmap.to(on_device) if on_device else heatmap
return torch.sum(heatmap, dim=1)
def broadcast(self, data, conditions) -> Tuple[torch.Tensor, Dict]:
len_data, len_cond = len(data), len(conditions)
if len_data == len_cond:
data.retain_grad()
return data, conditions
if len_cond > 1:
data = torch.repeat_interleave(data, len_cond, dim=0)
if len_data > 1:
conditions = conditions * len_data
data.retain_grad()
return data, conditions
def _check_arguments(self, data, conditions, start_layer, exclude_parallel, init_rel):
if not data.requires_grad:
raise ValueError(
"requires_grad attribute of 'data' must be True.")
if self.overwrite_data_grad:
data.grad = None
elif data.grad is not None:
warnings.warn("'data' already has a filled .grad attribute. Set to None if not intended or set 'overwrite_grad' to True.")
distinct_cond = set()
for cond in conditions:
if self.MODEL_OUTPUT_NAME not in cond and start_layer is None and init_rel is None:
raise ValueError(
f"Either {self.MODEL_OUTPUT_NAME} in 'conditions' or 'start_layer' or 'init_rel' must be defined.")
if self.MODEL_OUTPUT_NAME in cond and start_layer is not None:
warnings.warn(
f"You defined a condition for {self.MODEL_OUTPUT_NAME} that has no effect, since the 'start_layer' {start_layer}"
" is provided where the backward pass begins. If this behavior is not wished, remove 'start_layer'.")
if exclude_parallel:
if len(distinct_cond) == 0:
distinct_cond.update(cond.keys())
elif distinct_cond ^ set(cond.keys()):
raise ValueError("If the 'exclude_parallel' flag is set to True, each condition dict must contain the"
" same layer names. (This limitation does not apply to the __call__ method)")
def _register_mask_fn(self, hook, mask_map, b_index, c_indices, l_name):
if callable(mask_map):
mask_fn = mask_map(b_index, c_indices, l_name)
elif isinstance(mask_map, Dict):
mask_fn = mask_map[l_name](b_index, c_indices, l_name)
else:
raise ValueError("<mask_map> must be a dictionary or callable function.")
hook.fn_list.append(mask_fn)
def __call__(
self, data: torch.tensor, conditions: List[Dict[str, List]],
composite: Composite = None, record_layer: List[str] = [],
mask_map: Union[Callable, Dict[str, Callable]] = ChannelConcept.mask, start_layer: str = None, init_rel=None,
on_device: str = None, exclude_parallel=True) -> attrResult:
"""
Computes conditional attributions by masking the gradient flow of PyTorch (that is replaced by zennit with relevance values).
The relevance distribution rules (as for LRP e.g.) are described in the zennit 'composite'. Relevance can be initialized at
the model output or 'start_layer' with the 'init_rel' argument.
How the relevances are masked is determined by the 'conditions' as well as the 'mask_map'. In addition, 'exclude_parallel'=True,
restricts the PyTorch gradient flow so that it does not enter into parallel layers (shortcut connections) of the layers mentioned
in the 'conditions' dictionary.
The name of the model output is designated with self.MODEL_OUTPUT_NAME ('y' per default) and can be used inside 'conditions'.
Parameters:
-----------
data: torch.Tensor
Input sample for which a conditional heatmap is computed
conditions: list of dict
The key of a dict are string layer names and their value is a list of integers describing the concept (channel, neuron) index.
In general, the values are passed to the 'mask_map' function as 'concept_ids' argument.
composite: zennit Composite
Object that describes how relevance is distributed. Should contain a suitable zennit Canonizer.
mask_map: dict of callable or callable
The keys of the dict are string layer names and the values functions that implement gradient masking. If no dict is used,
all layers are masked according to the same function.
The 'conditions' values are passed into the function as 'concept_ids' argument.
start_layer: (optional) str
Layer name where to start the backward pass instead of starting at the model output.
If set, 'init_rel' modifies the tensor at 'start_layer' instead and a condition containing self.MODEL_OUTPUT_NAME is ignored.
init_rel: (optional) torch.Tensor, int or callable
Initializes the relevance distribution process as described in the LRP algorithm e.g. The callable must have the signature
callable(activations).
Per default, relevance is initialized with the logit activation before a non-linearity.
on_device: (optional) str
On which device (cpu, cuda) to save the heatmap, intermediate activations and relevances.
Per default, everything is kept on the same device as the model parameters.
exclude_parallel: boolean
If set, the PyTorch gradient flow is restricted so that it does not enter into parallel layers (shortcut connections)
of the layers mentioned in the 'conditions' dictionary. Useful to get the sole contribution of a specific concept.
Returns:
--------
attrResult: namedtuple object
Contains the attributes 'heatmap', 'activations', 'relevances' and 'prediction'.
'heatmap': torch.Tensor
Output of the self.attribution_modifier method that defines how 'data'.grad is processed.
'activations': dict of str and torch.Tensor
The keys are the layer names and values are the activations
'relevances': dict of str and torch.Tensor
The keys are the layer names and values are the relevances
'prediction': torch.Tensor
The model prediction output. If 'start_layer' is set, 'prediction' is the layer activation.
"""
if exclude_parallel:
return self._conditions_wrapper(data, conditions, composite, record_layer, mask_map, start_layer, init_rel, on_device, True)
else:
return self._attribute(data, conditions, composite, record_layer, mask_map, start_layer, init_rel, on_device, False)
def _conditions_wrapper(self, *args):
"""
Since 'exclude_parallel'=True requires that the condition set contains only the same layer names,
the list is divided into distinct lists that all contain the same layer name.
"""
data, conditions = args[:2]
relevances, activations = {}, {}
heatmap, prediction = None, None
dist_conds = self._separate_conditions(conditions)
for dist_layer in dist_conds:
attr = self._attribute(data, dist_conds[dist_layer], *args[2:])
for l_name in attr.relevances:
if l_name not in relevances:
relevances[l_name] = attr.relevances[l_name]
activations[l_name] = attr.activations[l_name]
else:
relevances[l_name] = torch.cat([relevances[l_name], attr.relevances[l_name]], dim=0)
activations[l_name] = torch.cat([activations[l_name], attr.activations[l_name]], dim=0)
if heatmap is None:
heatmap = attr.heatmap
prediction = attr.prediction
else:
heatmap = torch.cat([heatmap, attr.heatmap], dim=0)
prediction = torch.cat([prediction, attr.prediction], dim=0)
return attrResult(heatmap, activations, relevances, prediction)
def _separate_conditions(self, conditions):
"""
Finds identical subsets of layer names inside 'conditions'
"""
distinct_cond = dict()
for cond in conditions:
cond_set = frozenset(cond.keys())
if cond_set in distinct_cond:
distinct_cond[cond_set].append(cond)
else:
distinct_cond[cond_set] = [cond]
return distinct_cond
def _attribute(
self, data: torch.tensor, conditions: List[Dict[str, List]],
composite: Composite = None, record_layer: List[str] = [],
mask_map: Union[Callable, Dict[str, Callable]] = ChannelConcept.mask, start_layer: str = None, init_rel=None,
on_device: str = None, exclude_parallel=True) -> attrResult:
"""
Computes the actual attributions as described in __call__ method docstring.
exclude_parallel: boolean
If set, all layer names in 'conditions' must be identical. This limitation does not apply to the __call__ method.
"""
data, conditions = self.broadcast(data, conditions)
self._check_arguments(data, conditions, start_layer, exclude_parallel, init_rel)
hook_map, y_targets, cond_l_names = {}, [], []
for i, cond in enumerate(conditions):
for l_name, indices in cond.items():
if l_name == self.MODEL_OUTPUT_NAME:
y_targets.append(indices)
else:
if l_name not in hook_map:
hook_map[l_name] = MaskHook([])
self._register_mask_fn(hook_map[l_name], mask_map, i, indices, l_name)
if l_name not in cond_l_names:
cond_l_names.append(l_name)
handles, layer_out = self._append_recording_layer_hooks(record_layer, start_layer, cond_l_names)
name_map = [([name], hook) for name, hook in hook_map.items()]
mask_composite = NameMapComposite(name_map)
if composite is None:
composite = Composite()
with mask_composite.context(self.model), composite.context(self.model) as modified:
if start_layer:
_ = modified(data)
pred = layer_out[start_layer]
grad_mask = self.relevance_init(pred.detach().clone(), None, init_rel)
if start_layer in cond_l_names:
cond_l_names.remove(start_layer)
self.backward(pred, grad_mask, exclude_parallel, cond_l_names, layer_out)
else:
pred = modified(data)
grad_mask = self.relevance_init(pred.detach().clone(), y_targets, init_rel)
self.backward(pred, grad_mask, exclude_parallel, cond_l_names, layer_out)
attribution = self.heatmap_modifier(data, on_device)
activations, relevances = {}, {}
if len(layer_out) > 0:
activations, relevances = self._collect_hook_activation_relevance(layer_out, on_device)
[h.remove() for h in handles]
return attrResult(attribution, activations, relevances, pred)
def generate(
self, data: torch.tensor, conditions: List[Dict[str, List]],
composite: Composite = None, record_layer: List[str] = [],
mask_map: Union[Callable, Dict[str, Callable]] = ChannelConcept.mask, start_layer: str = None, init_rel=None,
batch_size=10, on_device=None, exclude_parallel=True, verbose=True) -> attrResult:
"""
Computes several conditional attributions for single data point by broadcasting 'data' to length 'batch_size' and
iterating through the 'conditions' list with stepsize 'batch_size'. The model forward pass is performed only once and
the backward graph kept in memory in order to double the performance.
Please refer to the docstring of the __call__ method.
batch_size: int
batch size of each forward and backward pass
exclude_parallel: boolean
If set, all layer names in 'conditions' must be identical. This limitation does not apply to the __call__ method.
verbose: boolean
If set, a progressbar is displayed.
"""
self._check_arguments(data, conditions, start_layer, exclude_parallel, init_rel)
# register on all layers in layer_map an empty hook
hook_map, cond_l_names = {}, []
for cond in conditions:
for l_name in cond.keys():
if l_name not in hook_map:
hook_map[l_name] = MaskHook([])
if l_name != self.MODEL_OUTPUT_NAME and l_name not in cond_l_names:
cond_l_names.append(l_name)
handles, layer_out = self._append_recording_layer_hooks(record_layer, start_layer, cond_l_names)
name_map = [([name], hook) for name, hook in hook_map.items()]
mask_composite = NameMapComposite(name_map)
if composite is None:
composite = Composite()
cond_length = len(conditions)
if cond_length > batch_size:
batches = math.ceil(cond_length / batch_size)
else:
batches = 1
batch_size = cond_length
data_batch = torch.repeat_interleave(data, batch_size, dim=0)
data_batch.grad = None
data_batch.retain_grad()
retain_graph = True
with mask_composite.context(self.model), composite.context(self.model) as modified:
if start_layer:
_ = modified(data_batch)
pred = layer_out[start_layer]
if start_layer in cond_l_names:
cond_l_names.remove(start_layer)
else:
pred = modified(data_batch)
if verbose:
pbar = tqdm(total=batches, dynamic_ncols=True)
for b in range(batches):
if verbose:
pbar.update(1)
cond_batch = conditions[b * batch_size: (b + 1) * batch_size]
y_targets = []
for i, cond in enumerate(cond_batch):
for l_name, indices in cond.items():
if l_name == self.MODEL_OUTPUT_NAME:
y_targets.append(indices)
else:
self._register_mask_fn(hook_map[l_name], mask_map, i, indices, l_name)
if b == batches-1:
# last batch may have len(y_targets) != batch_size. Padded part is ignored later.
# and backward graph is freed with retain_graph=False
if not start_layer:
y_targets.extend([y_targets[0] for i in range(batch_size-len(y_targets))])
batch_size = len(cond_batch)
retain_graph = False
grad_mask = self.relevance_init(pred.detach().clone(), y_targets, init_rel)
self.backward(pred, grad_mask, exclude_parallel, cond_l_names, layer_out, retain_graph)
heatmap = self.heatmap_modifier(data_batch)
activations, relevances = {}, {}
if len(layer_out) > 0:
activations, relevances = self._collect_hook_activation_relevance(
layer_out, on_device, batch_size)
yield attrResult(heatmap[:batch_size], activations, relevances, pred[:batch_size])
self._reset_gradients(data_batch)
[hook.fn_list.clear() for hook in hook_map.values()]
[h.remove() for h in handles]
if verbose:
pbar.close()
@staticmethod
def _generate_hook(layer_name, layer_out):
def get_tensor_hook(module, input, output):
layer_out[layer_name] = output
output.retain_grad()
return get_tensor_hook
def _append_recording_layer_hooks(self, record_l_names: list, start_layer, cond_l_names):
"""
applies a forward hook to all layers in record_l_names, start_layer and cond_l_names to record
the activations and relevances
"""
handles = []
layer_out = {}
record_l_names = record_l_names.copy()
for l_name in cond_l_names:
if l_name not in record_l_names:
record_l_names.append(l_name)
if start_layer is not None and start_layer not in record_l_names:
record_l_names.append(start_layer)
for name, layer in self.model.named_modules():
if name == self.MODEL_OUTPUT_NAME:
raise ValueError(
"No layer name should match the constant for the identifier of the model output."
"Please change the layer name or the OUTPUT_NAME constant of the object."
"Note, that the condition set then references to the output with OUTPUT_NAME and no longer 'y'.")
if name in record_l_names:
h = layer.register_forward_hook(self._generate_hook(name, layer_out))
handles.append(h)
record_l_names.remove(name)
if start_layer in record_l_names:
raise KeyError(f"<start_layer> {start_layer} not found in model.")
if len(record_l_names) > 0:
warnings.warn(
f"Some layer names not found in model: {record_l_names}.")
return handles, layer_out
def _collect_hook_activation_relevance(self, layer_out, on_device=None, length=None):
"""
Parameters:
----------
layer_out: dict
contains the intermediate layer outputs
on_device: str
copy layer_out on cpu or cuda device
length: int
copy only first length elements of layer_out. Used for uneven batch sizes.
"""
relevances = {}
activations = {}
for name in layer_out:
act = layer_out[name].detach()[:length]
activations[name] = act.to(on_device) if on_device else act
activations[name].requires_grad = False
if layer_out[name].grad is None:
rel = torch.zeros_like(activations[name], requires_grad=False)[:length]
relevances[name] = rel.to(on_device) if on_device else rel
else:
rel = layer_out[name].grad.detach()[:length]
relevances[name] = rel.to(on_device) if on_device else rel
relevances[name].requires_grad = False
layer_out[name].grad = None
return activations, relevances
def _reset_gradients(self, data):
"""
custom zero_grad() function
"""
for p in self.model.parameters():
p.grad = None
data.grad = None
class AttributionGraph:
def __init__(self, attribution: CondAttribution, graph: ModelGraph, layer_map: Dict[str, Concept]):
self.attribution = attribution
self.graph = graph
self.set_layer_map(layer_map)
def set_layer_map(self, layer_map):
"""
set layer map of attribution graph
"""
self.layer_map = layer_map
self.mask_map = {l_name: c.mask for l_name, c in layer_map.items()}
def __call__(
self, sample, composite, concept_id: int, layer_name, target=None, width: List[int] = [4, 2],
parent_c_id: int = None, parent_layer: str = None, abs_norm=True, batch_size=16, verbose=True):
"""
Decomposes a higher-level concept into its lower-level concepts taking advantage of the
relevance flow of a specific prediction.
Parameters:
-----------
sample: torch.Tensor
composite: zennit.composites.Composite
concept_id: int
index of higher-level concept that is decomposed
layer_name: str
name of layer where the higher-level concept is located
target: None or int
if defined, decomposes the higher-level concept w.r.t. target prediction
width: list of integers
describes how many lower-level concepts per layer are returned. The length
of the list specifies the number of lower-level layers that are successively decomposed
following the higher level layer `layer_name`.
parent_c_id: int
if the higher-level concept `concept_id` is decomposed in context of another higher concept,
then this parameter denotes the original higher-level concept.
parent_layer: str
layer name of concept with index `parent_c_id`
abs_norm: boolean
if True, normalizes the relevance by dividing with the sum of absolute value
batch_size: int
maximal batch size
Returns:
--------
nodes: list of tuples
All concept indices with their layer names present in the attribution graph.
The first element is the layer name and the second the index.
connections: dict, keys are str and values are tuples with a length of three
Describes the connection between two nodes in the graph.
The key is the source and the value the target. The first element is the layer name,
the second the index and the third the relevance value.
"""
nodes = [(layer_name, concept_id)]
connections = {}
if target is not None:
start_layer = None
elif parent_layer:
start_layer = parent_layer
else:
start_layer = layer_name
parent_cond = {}
if parent_c_id is not None and parent_layer:
parent_cond[parent_layer] = [parent_c_id]
else:
parent_cond[layer_name] = [concept_id]
if target is not None:
parent_cond[self.attribution.MODEL_OUTPUT_NAME] = [target]
cond_tuples = [(layer_name, concept_id)]
for w in width:
conditions, input_layers = [], []
for l_name, c_id in cond_tuples:
cond = {l_name: [c_id]}
cond.update(parent_cond)
conditions.append(cond)
in_layers = self.graph.find_input_layers(l_name)
for name in in_layers:
if name not in input_layers:
input_layers.append(name)
b, next_cond_tuples = 0, []
for attr in self.attribution.generate(
sample, conditions, composite, record_layer=input_layers,
mask_map=self.mask_map, start_layer=start_layer, batch_size=batch_size, verbose=verbose, exclude_parallel=False):
self._attribute_lower_level(
cond_tuples[b * batch_size: (b + 1) * batch_size],
attr.relevances, w, nodes, connections, next_cond_tuples, abs_norm)
b += 1
cond_tuples = next_cond_tuples
return attrGraphResult(nodes, connections)
def _attribute_lower_level(self, cond_tuples, relevances, w, nodes, connections, next_cond_tuples, abs_norm):
for i, (l_name, c_id) in enumerate(cond_tuples):
input_layers = self.graph.find_input_layers(l_name)
for inp_l in input_layers:
rel = relevances[inp_l][[i]]
rel_c = self.layer_map[inp_l].attribute(rel, abs_norm=abs_norm)[0]
c_ids = torch.argsort(rel_c, descending=True)[:w].tolist()
nodes.extend([(inp_l, id) for id in c_ids])
next_cond_tuples.extend([(inp_l, id) for id in c_ids])
if (l_name, c_id) not in connections:
connections[(l_name, c_id)] = []
connections[(l_name, c_id)].extend([(inp_l, id, rel_c[id].item()) for id in c_ids])
return None | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/attribution.py | attribution.py |
import weakref
import functools
import torch
from zennit.core import RemovableHandle, RemovableHandleList
class MaskHook:
'''Mask hooks for adaptive gradient masking or simple modification.'''
def __init__(self, fn_list):
self.fn_list = fn_list
def post_forward(self, module, input, output):
'''Register a backward-hook to the resulting tensor right after the forward.'''
hook_ref = weakref.ref(self)
@functools.wraps(self.backward)
def wrapper(grad):
return hook_ref().backward(module, grad)
if not isinstance(output, tuple):
output = (output,)
if output[0].grad_fn is not None:
# only if gradient required
output[0].register_hook(wrapper)
return output[0] if len(output) == 1 else output
def backward(self, module, grad):
'''Hook applied during backward-pass'''
for mask_fn in self.fn_list:
grad = mask_fn(grad)
return grad
def copy(self):
'''Return a copy of this hook.
This is used to describe hooks of different modules by a single hook instance.
Copies retain the same fn_list list.
'''
return self.__class__(fn_list=self.fn_list)
def remove(self):
'''When removing hooks, remove all stored mask_fn.'''
self.fn_list.clear()
def register(self, module):
'''Register this instance by registering the neccessary forward hook to the supplied module.'''
return RemovableHandleList([
RemovableHandle(self),
module.register_forward_hook(self.post_forward),
])
class FeatVisHook:
'''Feature Visualization hooks for reference sampling inside forward and backward passes.'''
def __init__(self, FV, concept, layer_name, dict_inputs, on_device):
"""
Parameters:
dict_inputs: contains sample_indices and targets inputs to FV.analyze_activation and FV.analyze_relevance
"""
self.FV = FV
self.concept = concept
self.layer_name = layer_name
self.dict_inputs = dict_inputs
self.on_device = on_device
def post_forward(self, module, input, output):
'''Register a backward-hook to the resulting tensor right after the forward.'''
s_indices, targets = self.dict_inputs["sample_indices"], self.dict_inputs["targets"]
activation = output.detach().to(self.on_device) if self.on_device else output.detach()
self.FV.analyze_activation(activation, self.layer_name, self.concept, s_indices, targets)
hook_ref = weakref.ref(self)
@functools.wraps(self.backward)
def wrapper(grad):
return hook_ref().backward(module, grad)
if not isinstance(output, tuple):
output = (output,)
if output[0].grad_fn is not None:
# only if gradient required
output[0].register_hook(wrapper)
return output[0] if len(output) == 1 else output
def backward(self, module, grad):
'''Hook applied during backward-pass'''
s_indices, targets = self.dict_inputs["sample_indices"], self.dict_inputs["targets"]
relevance = grad.detach().to(self.on_device) if self.on_device else grad.detach()
self.FV.analyze_relevance(relevance, self.layer_name, self.concept, s_indices, targets)
return grad
def copy(self):
'''Return a copy of this hook.
This is used to describe hooks of different modules by a single hook instance.
Copies retain the same stored_grads list.
'''
return self.__class__(self.FV, self.concept, self.layer_name, self.dict_inputs, self.on_device)
def remove(self):
pass
def register(self, module):
'''Register this instance by registering the neccessary forward hook to the supplied module.'''
return RemovableHandleList([
RemovableHandle(self),
module.register_forward_hook(self.post_forward),
]) | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/hooks.py | hooks.py |
import torch
from typing import List
class GraphNode:
"""
Contains meta information about a node in a PyTorch jit graph
"""
def __init__(self, node):
self.id = node.__repr__()
self.scopeName = node.scopeName()
if len(self.scopeName) == 0:
self.scopeName = node.kind()
self.output_nodes = []
self.input_nodes = []
self.is_layer = False
self.input_layers = []
if "aten" in node.kind():
self.layer_name = self.scopeName.split("__module.")[-1]
self.is_layer = True
class ModelGraph:
"""
This class contains meta information about layer connections inside the PyTorch model.
Use `find_input_layers` method to get the input layers of a specific nn.Module
"""
def __init__(self, input_nodes):
self.id_node_map = {}
self.layer_node_map = {}
for node in input_nodes:
self._add_node(node)
def _add_node(self, node):
"""
Add node to intern dictionaries
"""
node_id = node.__repr__()
if node_id not in self.id_node_map:
node_obj = GraphNode(node)
self.id_node_map[node_id] = node_obj
if node_obj.is_layer:
self.layer_node_map[node_obj.layer_name] = node_obj
return self.id_node_map[node_id]
def _add_connection(self, in_node, out_node):
"""
Add an entry for the connection between input_node and end_node
"""
layer_node_in = self._add_node(in_node)
layer_node_out = self._add_node(out_node)
new_connection = False
if layer_node_in not in layer_node_out.input_nodes:
layer_node_out.input_nodes.append(layer_node_in)
new_connection = True
if layer_node_out not in layer_node_in.output_nodes:
layer_node_in.output_nodes.append(layer_node_out)
new_connection = True
return new_connection
def set_layer_names(self, layer_names: list):
self.layer_names = layer_names
for name in layer_names:
# cache results
self.find_input_layers(name)
def find_input_layers(self, layer_name: str) -> List:
"""
Returns all layer names that are connected to the input of `layer_name`.
The method returns only layers, that were supplied as `layer_names` argument
to the `trace_model_graph` function. If you wish to change available layers,
please modify the self.layer_names attribute.
Parameters:
----------
layer_name: str
name of torch.nn.Module
"""
if layer_name not in self.layer_node_map:
raise KeyError(f"{layer_name} does not exist")
root_node = self.layer_node_map[layer_name]
if len(root_node.input_layers) == 0:
# cache results
layers = self._recursive_search(root_node)
root_node.input_layers = layers
return layers
else:
return root_node.input_layers
def _recursive_search(self, node_obj):
found_layers = []
for g_node in node_obj.input_nodes:
if g_node.is_layer and g_node.layer_name in self.layer_names:
found_layers.append(g_node.layer_name)
else:
found_layers.extend(self._recursive_search(g_node))
return found_layers
def __str__(self):
model_string = ""
for layer in self.id_node_map.values():
model_string += layer.scopeName + " -> "
for next_l in layer.output_nodes:
model_string += next_l.scopeName + ", "
if len(layer.output_nodes) == 0:
model_string += " end"
else:
model_string += "\n"
return model_string
def trace_model_graph(model, sample: torch.Tensor, layer_names: List[str], debug=False) -> ModelGraph:
""""
As pytorch does not trace the model structure like tensorflow, we need to do it ourselves.
Thus, this function generates a model graph - a summary - how all nn.Module are connected with each other.
Parameters:
----------
model: torch.nn.Module
sample: torch.Tensor
An examplary input. Used to trace the model with torch.jit.trace
layer_names: list of strings
List of all layer names that should be accessible in the model graph summary
debug: boolean
If True, returns the tarced inlined_graph of torch.jit
Returns:
-------
ModelGraph: obj
Object that contains meta information about the connection of modules.
Use the `find_input_layers` method to get the input layers of a specific nn.Module.
"""
# we use torch.jit to record the connections of all tensors
traced = torch.jit.trace(model, (sample,), check_trace=False)
# inlined_graph returns a suitable presentation of the traced model
graph = traced.inlined_graph
if debug is True:
dump_pytorch_graph(graph)
"""
We search for all input nodes where we could start a recursive travers through the network graph.
First, we concatenate all input and output tensor ids for each node as they are spread out in the original
torch.jit representation. Then, we search for a node with input tensors that have no connection to the output
of another node. This node is an input node per definition.
"""
node_inputs, node_outputs = _collect_node_inputs_and_outputs(graph)
input_nodes = _get_input_nodes(graph, node_inputs, node_outputs)
# initialize a model representation where we save the results
MG = ModelGraph(input_nodes)
# start recursive decoding of torch.jit graph
for node in input_nodes:
_build_graph_recursive(MG, graph, node)
MG.set_layer_names(layer_names)
# explicitly free gpu ram
del traced, graph
return MG
def _build_graph_recursive(MG: ModelGraph, graph, in_node):
"""
Recursive function traverses the graph constructed by torch.jit.trace
and records the graph structure inside our ModelGraph class
"""
node_outputs = [i.unique() for i in in_node.outputs()]
next_nodes = _find_next_nodes(graph, node_outputs)
if len(next_nodes) == 0:
return
for node in next_nodes:
new_connection = MG._add_connection(in_node, node)
if new_connection:
_build_graph_recursive(MG, graph, node)
else:
return
def _find_next_nodes(graph, node_outputs):
"""
Helper function for build_graph_recursive.
"""
next_nodes = []
for node in graph.nodes():
node_inputs = [i.unique() for i in node.inputs()]
if set(node_inputs) & set(node_outputs):
next_nodes.append(node)
return next_nodes
def _collect_node_inputs_and_outputs(graph):
"""
Helper function to get all tensor ids of the input and output of each node.
Used to retrieve the input layers of the model.
"""
layer_inputs = {}
layer_outputs = {}
for node in graph.nodes():
# "aten" nodes are torch.nn.Modules
if "aten" in node.kind():
name = node.scopeName()
if name not in layer_inputs:
layer_inputs[name] = []
layer_outputs[name] = []
[layer_inputs[name].append(i.unique()) for i in node.inputs()]
[layer_outputs[name].append(i.unique()) for i in node.outputs()]
return layer_inputs, layer_outputs
def _get_input_nodes(graph, layer_inputs: dict, layer_outputs: dict):
"""
Returns input nodes of jit graph.
Used to retrieve the input layers of the model.
"""
input_nodes = []
for node in graph.nodes():
# "aten" describes all real layers
if "aten" in node.kind():
name = node.scopeName()
node_inputs = layer_inputs[name]
# if its inputs are not ourputs of other modules -> an input node
if not _find_overlap_with_output(node_inputs, layer_outputs):
input_nodes.append(node)
return input_nodes
def _find_overlap_with_output(node_inputs: list, layer_outputs: dict):
"""
More efficient version of _find_next_nodes.
"""
for name in layer_outputs:
node_outputs = layer_outputs[name]
if set(node_inputs) & set(node_outputs):
# if overlap, no input node
return True
return False
def dump_pytorch_graph(graph):
"""
List all the nodes in a PyTorch jit graph.
Source: https://github.com/waleedka/hiddenlayer/blob/master/hiddenlayer/pytorch_builder.py
"""
f = "{:25} {:40} {} -> {}"
print(f.format("kind", "scopeName", "inputs", "outputs"))
for node in graph.nodes():
print(f.format(node.kind(), node.scopeName(),
[i.unique() for i in node.inputs()],
[i.unique() for i in node.outputs()]
)) | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/graph.py | graph.py |
from typing import Dict, List, Union, Any, Tuple, Iterable
from PIL import Image
import torch
from torchvision.transforms.functional import gaussian_blur
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import zennit.image as zimage
from crp.helper import max_norm
def get_crop_range(heatmap, crop_th):
"""
Returns indices in order to crop the supplied heatmap where relevance is greater than heatmap > crop_th.
Parameters:
----------
heatmaps: torch.Tensor
ouput heatmap tensor of the CondAttribution call
crop_th: between [0 and 1)
Cropping Threshold: Crops the image in regions where relevance is smaller than max(relevance)*crop_th.
Cropping is only applied, if receptive field 'rf' is set to True.
"""
crop_mask = heatmap > crop_th
rows, columns = torch.where(crop_mask)
if len(rows) == 0 or len(columns) == 0:
# rf is empty
return 0, -1, 0, -1
row1, row2 = rows.min(), rows.max()
col1, col2 = columns.min(), columns.max()
if (row1 >= row2) and (col1 >= col2):
# rf is empty
return 0, -1, 0, -1
return row1, row2, col1, col2
@torch.no_grad()
def vis_opaque_img(data_batch, heatmaps, rf=False, alpha=0.3, vis_th=0.2, crop_th=0.1, kernel_size=19) -> Image.Image:
"""
Draws reference images. The function lowers the opacity in regions with relevance lower than max(relevance)*vis_th.
In addition, the reference image can be cropped where relevance is less than max(relevance)*crop_th by setting 'rf' to True.
Parameters:
----------
data_batch: torch.Tensor
original images from dataset without FeatureVisualization.preprocess() applied to it
heatmaps: torch.Tensor
ouput heatmap tensor of the CondAttribution call
rf: boolean
Computes the CRP heatmap for a single neuron and hence restricts the heatmap to the receptive field.
The amount of cropping is further specified by the 'crop_th' argument.
alpha: between [0 and 1]
Regulates the transparency in low relevance regions.
vis_th: between [0 and 1)
Visualization Threshold: Increases transparency in regions where relevance is smaller than max(relevance)*vis_th.
crop_th: between [0 and 1)
Cropping Threshold: Crops the image in regions where relevance is smaller than max(relevance)*crop_th.
Cropping is only applied, if receptive field 'rf' is set to True.
kernel_size: scalar
Parameter of the torchvision.transforms.functional.gaussian_blur function used to smooth the CRP heatmap.
Returns:
--------
image: list of PIL.Image objects
If 'rf' is True, reference images have different shapes.
"""
if alpha > 1 or alpha < 0:
raise ValueError("'alpha' must be between [0, 1]")
if vis_th >= 1 or vis_th < 0:
raise ValueError("'vis_th' must be between [0, 1)")
if crop_th >= 1 or crop_th < 0:
raise ValueError("'crop_th' must be between [0, 1)")
imgs = []
for i in range(len(data_batch)):
img = data_batch[i]
filtered_heat = max_norm(gaussian_blur(heatmaps[i].unsqueeze(0), kernel_size=kernel_size)[0])
vis_mask = filtered_heat > vis_th
if rf:
row1, row2, col1, col2 = get_crop_range(filtered_heat, crop_th)
img_t = img[..., row1:row2, col1:col2]
vis_mask_t = vis_mask[row1:row2, col1:col2]
if img_t.sum() != 0 and vis_mask_t.sum() != 0:
# check whether img_t or vis_mask_t is not empty
img = img_t
vis_mask = vis_mask_t
inv_mask = ~vis_mask
img = img * vis_mask + img * inv_mask * alpha
img = zimage.imgify(img.detach().cpu())
imgs.append(img)
return imgs
@torch.no_grad()
def vis_img_heatmap(data_batch, heatmaps, rf=False, crop_th=0.1, kernel_size=19, cmap="bwr", vmin=None, vmax=None, symmetric=True) -> Tuple[Image.Image, Image.Image]:
"""
Draws reference images and their conditional heatmaps. The function illustrates images using zennit.imgify and applies the supplied 'cmap' to heatmaps.
In addition, the reference images and heatmaps can be cropped where relevance is less than max(relevance)*crop_th by setting 'rf' to True.
Parameters:
----------
data_batch: torch.Tensor
original images from dataset without FeatureVisualization.preprocess() applied to it
heatmaps: torch.Tensor
ouput heatmap tensor of the CondAttribution call
rf: boolean
Computes the CRP heatmap for a single neuron and hence restricts the heatmap to the receptive field.
The amount of cropping is further specified by the 'crop_th' argument.
crop_th: between [0 and 1)
Cropping Threshold: Crops the image in regions where relevance is smaller than max(relevance)*crop_th.
Cropping is only applied, if receptive field 'rf' is set to True.
kernel_size: scalar
Parameter of the torchvision.transforms.functional.gaussian_blur function used to smooth the CRP heatmap.
REMAINING PARAMETERS: correspond to zennit.image.imgify
Returns:
--------
image: list of PIL.Image objects
If 'rf' is True, reference images have different shapes.
"""
img_list, heat_list = [], []
for i in range(len(data_batch)):
img = data_batch[i]
heat = heatmaps[i]
if rf:
filtered_heat = max_norm(gaussian_blur(heat.unsqueeze(0), kernel_size=kernel_size)[0])
row1, row2, col1, col2 = get_crop_range(filtered_heat, crop_th)
img_t = img[..., row1:row2, col1:col2]
heat_t = heat[row1:row2, col1:col2]
if img_t.sum() != 0 and heat_t.sum() != 0:
# check whether img or vis_mask is not empty
img = img_t
heat = heat_t
heat = imgify(heat, cmap=cmap, vmin=vmin, vmax=vmax, symmetric=symmetric)
img = imgify(img)
img_list.append(img)
heat_list.append(heat)
return img_list, heat_list
def imgify(image: Union[Image.Image, torch.Tensor, np.ndarray], cmap: str = "bwr", vmin=None, vmax=None, symmetric=False, level=1.0, grid=False, gridfill=None, resize:int=None,
padding=False) -> Image.Image:
"""
Convenient wrapper around zennit.image.imgify supporting tensors, numpy arrays and PIL Images. Allows resizing while keeping the aspect
ratio intact and padding to a square shape.
Parameters:
----------
image: torch.Tensor, np.ndarray or PIL Image
With 2 dimensions greyscale, or 3 dimensions with 1 or 3 values in the first or last dimension (color).
resize: None or int
If None, no resizing is applied. If int, sets the maximal aspect ratio of the image.
padding: boolean
If True, pads the image into a square shape by setting the alpha channel to zero outside the image.
vmin: float or obj:numpy.ndarray
Manual minimum value of the array. Overrides the used norm's minimum value.
vmax: float or obj:numpy.ndarray
Manual maximum value of the array. Overrides the used norm's maximum value.
cmap: str or ColorMap
String to specify a built-in color map, code used to create a new color map, or a ColorMap instance, which will be used to create a palette. The color map will only be applied for arrays with only a single color channel. The color will be specified as a palette in the PIL Image.
Returns:
--------
image: PIL.Image object
"""
if isinstance(image, torch.Tensor):
img = zimage.imgify(image.detach().cpu(), cmap=cmap, vmin=vmin, vmax=vmax, symmetric=symmetric, level=level, grid=grid, gridfill=gridfill)
elif isinstance(image, np.ndarray):
img = zimage.imgify(image, cmap=cmap, vmin=vmin, vmax=vmax, symmetric=symmetric, level=level, grid=grid, gridfill=gridfill)
elif isinstance(image, Image.Image):
img = image
else:
raise TypeError("Only PIL.Image, torch.Tensor or np.ndarray types are supported!")
if resize:
ratio = resize/max(img.size)
new_size = tuple([int(x*ratio) for x in img.size])
img = img.resize(new_size, Image.NEAREST)
if padding:
max_size = resize if resize else max(img.size)
new_im = Image.new("RGBA", (max_size, max_size))
new_im.putalpha(0)
new_im.paste(img, ((max_size-img.size[0])//2, (max_size-img.size[1])//2))
img = new_im
return img
def plot_grid(ref_c: Dict[int, Any], cmap_dim=1, cmap="bwr", vmin=None, vmax=None, symmetric=True, resize=None, padding=True, figsize=(6, 6)):
"""
Plots dictionary of reference images as they are returned of the 'get_max_reference' method. To every element in the list crp.imgify is applied with its respective argument values.
Parameters:
----------
ref_c: dict with keys: integer and value: several lists filled with torch.Tensor, np.ndarray or PIL Image
To every element in the list crp.imgify is applied.
resize: None or int
If None, no resizing is applied. If int, sets the maximal aspect ratio of the image.
padding: boolean
If True, pads the image into a square shape by setting the alpha channel to zero outside the image.
figsize: tuple or None
Size of plt.figure
cmap_dim: int, 0 or 1
Applies the remaining parameters to the first or second element of the tuple list, i.e. plot as heatmap
REMAINING PARAMETERS: correspond to zennit.imgify
Returns:
--------
shows matplotlib.pyplot plot
"""
keys = list(ref_c.keys())
nrows = len(keys)
value = next(iter(ref_c.values()))
if cmap_dim > 2 or cmap_dim < 1 or cmap_dim == None:
raise ValueError("'cmap_dim' must be 0 or 1 or None.")
if isinstance(value, Tuple) and isinstance(value[0], Iterable):
nsubrows = len(value)
ncols = len(value[0])
elif isinstance(value, Iterable):
nsubrows = 1
ncols = len(value)
else:
raise ValueError("'ref_c' dictionary must contain an iterable of torch.Tensor, np.ndarray or PIL Image or a tuple of thereof.")
fig = plt.figure(figsize=figsize)
outer = gridspec.GridSpec(nrows, 1, wspace=0, hspace=0.2)
for i in range(nrows):
inner = gridspec.GridSpecFromSubplotSpec(nsubrows, ncols, subplot_spec=outer[i], wspace=0, hspace=0.1)
for sr in range(nsubrows):
if nsubrows > 1:
img_list = ref_c[keys[i]][sr]
else:
img_list = ref_c[keys[i]]
for c in range(ncols):
ax = plt.Subplot(fig, inner[sr, c])
if sr == cmap_dim:
img = imgify(img_list[c], cmap=cmap, vmin=vmin, vmax=vmax, symmetric=symmetric, resize=resize, padding=padding)
else:
img = imgify(img_list[c], resize=resize, padding=padding)
ax.imshow(img)
ax.set_xticks([])
ax.set_yticks([])
if sr == 0 and c == 0:
ax.set_ylabel(keys[i])
fig.add_subplot(ax)
outer.tight_layout(fig)
fig.show() | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/image.py | image.py |
import torch
import numpy as np
import gc
import os
from pathlib import Path
from typing import List, Tuple
from tqdm import tqdm
class Statistics:
def __init__(self, mode="relevance", max_target="sum", abs_norm=False, path=None):
self.d_c_sorted, self.rel_c_sorted, self.rf_c_sorted = {}, {}, {}
self.SAMPLE_SIZE = 40
# generate path string for filenames
if abs_norm:
norm_str = "normed"
else:
norm_str = "unnormed"
if mode == "relevance":
self.sub_folder = Path(f"RelStats_{max_target}_{norm_str}/")
elif mode == "activation":
self.sub_folder = Path(f"ActStats_{max_target}_{norm_str}/")
else:
raise ValueError("<mode> must be 'relevance' or 'activation'.")
self.PATH = Path(path) / self.sub_folder if path else self.sub_folder
self.PATH.mkdir(parents=True, exist_ok=True)
# TODO: what happens if rf_c_sorted is empty? In sort and save method
# TODO: activation in save path instead of relevance!
def analyze_layer(self, d_c_sorted, rel_c_sorted, rf_c_sorted, t_c_sorted, layer_name):
t_unique = torch.unique(t_c_sorted)
for t in t_unique:
# gather d_c, rel_c and rf_c for each target separately
t_indices = t_c_sorted.t() == t
# - each column of t_c_sorted contains the same number of same value targets
# - C-style arrays start indexing row-wise
# - we transpose, so that reshaping the flattened array, that results of [t_indices] operation,
# maintains the order of elements
n_concepts = t_c_sorted.shape[1]
d_c_t = d_c_sorted.t()[t_indices].view(n_concepts, -1).t()
rel_c_t = rel_c_sorted.t()[t_indices].view(n_concepts, -1).t()
rf_c_t = rf_c_sorted.t()[t_indices].view(n_concepts, -1).t()
self.concatenate_with_results(layer_name, t.item(), d_c_t, rel_c_t, rf_c_t)
self.sort_result_array(layer_name, t.item())
def delete_result_arrays(self):
self.d_c_sorted, self.rel_c_sorted, self.rf_c_sorted = {}, {}, {}
gc.collect()
def concatenate_with_results(self, layer_name, target, d_c_sorted, rel_c_sorted, rf_c_sorted):
if target not in self.d_c_sorted:
self.d_c_sorted[target] = {}
self.rel_c_sorted[target] = {}
self.rf_c_sorted[target] = {}
if layer_name not in self.d_c_sorted[target]:
self.d_c_sorted[target][layer_name] = d_c_sorted
self.rel_c_sorted[target][layer_name] = rel_c_sorted
self.rf_c_sorted[target][layer_name] = rf_c_sorted
else:
self.d_c_sorted[target][layer_name] = torch.cat([d_c_sorted, self.d_c_sorted[target][layer_name]])
self.rel_c_sorted[target][layer_name] = torch.cat([rel_c_sorted, self.rel_c_sorted[target][layer_name]])
self.rf_c_sorted[target][layer_name] = torch.cat([rf_c_sorted, self.rf_c_sorted[target][layer_name]])
def sort_result_array(self, layer_name, target):
d_c_args = torch.argsort(self.rel_c_sorted[target][layer_name], dim=0, descending=True)
d_c_args = d_c_args[:self.SAMPLE_SIZE, :]
self.rel_c_sorted[target][layer_name] = torch.gather(self.rel_c_sorted[target][layer_name], 0, d_c_args)
self.rf_c_sorted[target][layer_name] = torch.gather(self.rf_c_sorted[target][layer_name], 0, d_c_args)
self.d_c_sorted[target][layer_name] = torch.gather(self.d_c_sorted[target][layer_name], 0, d_c_args)
def _save_results(self, d_index: Tuple[int, int] = None):
saved_files = []
for target in self.d_c_sorted:
for layer_name in self.d_c_sorted[target]:
if d_index:
filename = f"{target}_{d_index[0]}_{d_index[1]}_"
else:
filename = f"{target}_"
p_path = self.PATH / Path(layer_name)
p_path.mkdir(parents=True, exist_ok=True)
np.save(p_path / Path(filename + "data.npy"), self.d_c_sorted[target][layer_name].cpu().numpy())
np.save(p_path / Path(filename + "rf.npy"), self.rf_c_sorted[target][layer_name].cpu().numpy())
np.save(p_path / Path(filename + "rel.npy"), self.rel_c_sorted[target][layer_name].cpu().numpy())
saved_files.append(str(p_path / Path(filename)))
if d_index is None:
# if final collection, then save targets
np.save(self.PATH / Path("targets.npy"), np.array(list(self.d_c_sorted.keys())))
self.delete_result_arrays()
return saved_files
def collect_results(self, path_list: List[str], d_index: Tuple[int, int] = None):
self.delete_result_arrays()
pbar = tqdm(total=len(path_list), dynamic_ncols=True)
for path in path_list:
l_name, filename = path.split("/")[-2:]
target = filename.split("_")[0]
d_c_sorted = np.load(path + "data.npy")
rf_c_sorted = np.load(path + "rf.npy")
rel_c_sorted = np.load(path + "rel.npy")
d_c_sorted, rf_c_sorted, rel_c_sorted = map(torch.from_numpy, [d_c_sorted, rf_c_sorted, rel_c_sorted])
self.concatenate_with_results(l_name, target, d_c_sorted, rel_c_sorted, rf_c_sorted)
self.sort_result_array(l_name, target)
pbar.update(1)
for path in path_list:
for suffix in ["data.npy", "rf.npy", "rel.npy"]:
os.remove(path + suffix)
pbar.close()
return self._save_results(d_index) | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/statistics.py | statistics.py |
import torch
import numpy as np
import gc
import os
from pathlib import Path
from typing import List, Tuple
from tqdm import tqdm
import re
from crp.concepts import Concept
class Maximization:
def __init__(self, mode="relevance", max_target="sum", abs_norm=False, path=None):
self.d_c_sorted, self.rel_c_sorted, self.rf_c_sorted = {}, {}, {}
self.SAMPLE_SIZE = 40
self.max_target = max_target
self.abs_norm = abs_norm
# generate path string for filenames
if abs_norm:
norm_str = "normed"
else:
norm_str = "unnormed"
if mode == "relevance":
self.sub_folder = Path(f"RelMax_{max_target}_{norm_str}/")
elif mode == "activation":
self.sub_folder = Path(f"ActMax_{max_target}_{norm_str}/")
else:
raise ValueError("<mode> must be 'relevance' or 'activation'.")
self.PATH = Path(path) / self.sub_folder if path else self.sub_folder
self.PATH.mkdir(parents=True, exist_ok=True)
# TODO: what happens if rf_c_sorted is empty? In sort and save
# TODO: activation in save path instead of relevance!
# TODO: for statistics in other class: make dummy variable for extra datset instead of SDS
def analyze_layer(self, rel, concept: Concept, layer_name: str, data_indices, targets):
b_c_sorted, rel_c_sorted, rf_c_sorted = concept.reference_sampling(
rel, layer_name, self.max_target, self.abs_norm)
# convert batch index to dataset wide index
data_indices = torch.from_numpy(data_indices).to(b_c_sorted)
d_c_sorted = torch.take(data_indices, b_c_sorted)
# sort targets
targets = torch.Tensor(targets).to(b_c_sorted)
t_c_sorted = torch.take(targets, b_c_sorted)
SZ = self.SAMPLE_SIZE
self.concatenate_with_results(layer_name, d_c_sorted[:SZ], rel_c_sorted[:SZ], rf_c_sorted[:SZ])
self.sort_result_array(layer_name)
return d_c_sorted, rel_c_sorted, rf_c_sorted, t_c_sorted
def delete_result_arrays(self):
self.d_c_sorted, self.rel_c_sorted, self.rf_c_sorted = {}, {}, {}
gc.collect()
def concatenate_with_results(self, layer_name, d_c_sorted, rel_c_sorted, rf_c_sorted):
if layer_name not in self.d_c_sorted:
self.d_c_sorted[layer_name] = d_c_sorted
self.rel_c_sorted[layer_name] = rel_c_sorted
self.rf_c_sorted[layer_name] = rf_c_sorted
else:
self.d_c_sorted[layer_name] = torch.cat([d_c_sorted, self.d_c_sorted[layer_name]])
self.rel_c_sorted[layer_name] = torch.cat([rel_c_sorted, self.rel_c_sorted[layer_name]])
self.rf_c_sorted[layer_name] = torch.cat([rf_c_sorted, self.rf_c_sorted[layer_name]])
def sort_result_array(self, layer_name):
d_c_args = torch.flip(torch.argsort(self.rel_c_sorted[layer_name], dim=0), dims=(0,))
d_c_args = d_c_args[:self.SAMPLE_SIZE, :]
self.rel_c_sorted[layer_name] = torch.gather(self.rel_c_sorted[layer_name], 0, d_c_args)
self.rf_c_sorted[layer_name] = torch.gather(self.rf_c_sorted[layer_name], 0, d_c_args)
self.d_c_sorted[layer_name] = torch.gather(self.d_c_sorted[layer_name], 0, d_c_args)
def _save_results(self, d_index: Tuple[int, int] = None):
saved_files = []
for layer_name in self.d_c_sorted:
if d_index:
filename = f"{layer_name}_{d_index[0]}_{d_index[1]}_"
else:
filename = f"{layer_name}_"
np.save(self.PATH / Path(filename + "data.npy"), self.d_c_sorted[layer_name].cpu().numpy())
np.save(self.PATH / Path(filename + "rf.npy"), self.rf_c_sorted[layer_name].cpu().numpy())
np.save(self.PATH / Path(filename + "rel.npy"), self.rel_c_sorted[layer_name].cpu().numpy())
saved_files.append(str(self.PATH / Path(filename)))
self.delete_result_arrays()
return saved_files
def collect_results(self, path_list: List[str], d_index: Tuple[int, int] = None):
self.delete_result_arrays()
pbar = tqdm(total=len(path_list), dynamic_ncols=True)
for path in path_list:
filename = path.split("/")[-1]
l_name = re.split(r"_[0-9]+_[0-9]+_\b", filename)[0]
d_c_sorted = np.load(path + "data.npy")
rf_c_sorted = np.load(path + "rf.npy")
rel_c_sorted = np.load(path + "rel.npy")
d_c_sorted, rf_c_sorted, rel_c_sorted = map(torch.from_numpy, [d_c_sorted, rf_c_sorted, rel_c_sorted])
self.concatenate_with_results(l_name, d_c_sorted, rel_c_sorted, rf_c_sorted)
self.sort_result_array(l_name)
pbar.update(1)
for path in path_list:
for suffix in ["data.npy", "rf.npy", "rel.npy"]:
os.remove(path + suffix)
pbar.close()
return self._save_results(d_index) | zennit-crp | /zennit-crp-0.6.0.tar.gz/zennit-crp-0.6.0/crp/maximization.py | maximization.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.