code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import boto.exception
from boto.compat import json
import requests
import boto
from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
# Let's do some extra work and let the user handle errors on his/her own.
errors = None
class EncodingError(Exception):
"""
Content sent for Cloud Search indexing was incorrectly encoded.
This usually happens when a document is marked as unicode but non-unicode
characters are present.
"""
pass
class ContentTooLongError(Exception):
"""
Content sent for Cloud Search indexing was too long
This will usually happen when documents queued for indexing add up to more
than the limit allowed per upload batch (5MB)
"""
pass
class DocumentServiceConnection(object):
"""
A CloudSearch document service.
The DocumentServiceConection is used to add, remove and update documents in
CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document
Format).
To generate an appropriate SDF, use :func:`add` to add or update documents,
as well as :func:`delete` to remove documents.
Once the set of documents is ready to be index, use :func:`commit` to send
the commands to CloudSearch.
If there are a lot of documents to index, it may be preferable to split the
generation of SDF data and the actual uploading into CloudSearch. Retrieve
the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
it can be retrieved back afterwards for upload into CloudSearch using
:func:`add_sdf_from_s3`.
The SDF is not cleared after a :func:`commit`. If you wish to continue
using the DocumentServiceConnection for another batch upload of commands,
you will need to :func:`clear_sdf` first to stop the previous batch of
commands from being uploaded again.
"""
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not self.endpoint:
self.endpoint = domain.doc_service_endpoint
self.documents_batch = []
self._sdf = None
# Copy proxy settings from connection and check if request should be signed
self.proxy = {}
self.sign_request = False
if self.domain and self.domain.layer1:
if self.domain.layer1.use_proxy:
self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()}
self.sign_request = getattr(self.domain.layer1, 'sign_request', False)
if self.sign_request:
# Create a domain connection to send signed requests
layer1 = self.domain.layer1
self.domain_connection = CloudSearchDomainConnection(
host=self.endpoint,
aws_access_key_id=layer1.aws_access_key_id,
aws_secret_access_key=layer1.aws_secret_access_key,
region=layer1.region,
provider=layer1.provider
)
def add(self, _id, fields):
"""
Add a document to be processed by the DocumentService
The document will not actually be added until :func:`commit` is called
:type _id: string
:param _id: A unique ID used to refer to this document.
:type fields: dict
:param fields: A dictionary of key-value pairs to be uploaded .
"""
d = {'type': 'add', 'id': _id, 'fields': fields}
self.documents_batch.append(d)
def delete(self, _id):
"""
Schedule a document to be removed from the CloudSearch service
The document will not actually be scheduled for removal until
:func:`commit` is called
:type _id: string
:param _id: The unique ID of this document.
"""
d = {'type': 'delete', 'id': _id}
self.documents_batch.append(d)
def get_sdf(self):
"""
Generate the working set of documents in Search Data Format (SDF)
:rtype: string
:returns: JSON-formatted string of the documents in SDF
"""
return self._sdf if self._sdf else json.dumps(self.documents_batch)
def clear_sdf(self):
"""
Clear the working documents from this DocumentServiceConnection
This should be used after :func:`commit` if the connection will be
reused for another set of documents.
"""
self._sdf = None
self.documents_batch = []
def add_sdf_from_s3(self, key_obj):
"""
Load an SDF from S3
Using this method will result in documents added through
:func:`add` and :func:`delete` being ignored.
:type key_obj: :class:`boto.s3.key.Key`
:param key_obj: An S3 key which contains an SDF
"""
#@todo:: (lucas) would be nice if this could just take an s3://uri..."
self._sdf = key_obj.get_contents_as_string()
def _commit_with_auth(self, sdf, api_version):
return self.domain_connection.upload_documents(sdf, 'application/json')
def _commit_without_auth(self, sdf, api_version):
url = "http://%s/%s/documents/batch" % (self.endpoint, api_version)
# Keep-alive is automatic in a post-1.0 requests world.
session = requests.Session()
session.proxies = self.proxy
adapter = requests.adapters.HTTPAdapter(
pool_connections=20,
pool_maxsize=50,
max_retries=5
)
session.mount('http://', adapter)
session.mount('https://', adapter)
resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
return resp
def commit(self):
"""
Actually send an SDF to CloudSearch for processing
If an SDF file has been explicitly loaded it will be used. Otherwise,
documents added through :func:`add` and :func:`delete` will be used.
:rtype: :class:`CommitResponse`
:returns: A summary of documents added and deleted
"""
sdf = self.get_sdf()
if ': null' in sdf:
boto.log.error('null value in sdf detected. This will probably '
'raise 500 error.')
index = sdf.index(': null')
boto.log.error(sdf[index - 100:index + 100])
api_version = '2013-01-01'
if self.domain and self.domain.layer1:
api_version = self.domain.layer1.APIVersion
if self.sign_request:
r = self._commit_with_auth(sdf, api_version)
else:
r = self._commit_without_auth(sdf, api_version)
return CommitResponse(r, self, sdf, signed_request=self.sign_request)
class CommitResponse(object):
"""Wrapper for response to Cloudsearch document batch commit.
:type response: :class:`requests.models.Response`
:param response: Response from Cloudsearch /documents/batch API
:type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection`
:param doc_service: Object containing the documents posted and methods to
retry
:raises: :class:`boto.exception.BotoServerError`
:raises: :class:`boto.cloudsearch2.document.SearchServiceException`
:raises: :class:`boto.cloudsearch2.document.EncodingError`
:raises: :class:`boto.cloudsearch2.document.ContentTooLongError`
"""
def __init__(self, response, doc_service, sdf, signed_request=False):
self.response = response
self.doc_service = doc_service
self.sdf = sdf
self.signed_request = signed_request
if self.signed_request:
self.content = response
else:
_body = response.content.decode('utf-8')
try:
self.content = json.loads(_body)
except:
boto.log.error('Error indexing documents.\nResponse Content:\n{0}'
'\n\nSDF:\n{1}'.format(_body, self.sdf))
raise boto.exception.BotoServerError(self.response.status_code, '',
body=_body)
self.status = self.content['status']
if self.status == 'error':
self.errors = [e.get('message') for e in self.content.get('errors',
[])]
for e in self.errors:
if "Illegal Unicode character" in e:
raise EncodingError("Illegal Unicode character in document")
elif e == "The Content-Length is too long":
raise ContentTooLongError("Content was too long")
else:
self.errors = []
self.adds = self.content['adds']
self.deletes = self.content['deletes']
self._check_num_ops('add', self.adds)
self._check_num_ops('delete', self.deletes)
def _check_num_ops(self, type_, response_num):
"""Raise exception if number of ops in response doesn't match commit
:type type_: str
:param type_: Type of commit operation: 'add' or 'delete'
:type response_num: int
:param response_num: Number of adds or deletes in the response.
:raises: :class:`boto.cloudsearch2.document.CommitMismatchError`
"""
commit_num = len([d for d in self.doc_service.documents_batch
if d['type'] == type_])
if response_num != commit_num:
if self.signed_request:
boto.log.debug(self.response)
else:
boto.log.debug(self.response.content)
# There will always be a commit mismatch error if there is any
# errors on cloudsearch. self.errors gets lost when this
# CommitMismatchError is raised. Whoever is using boto has no idea
# why their commit failed. They can't even notify the user of the
# cause by parsing the error messages from amazon. So let's
# attach the self.errors to the exceptions if we already spent
# time and effort collecting them out of the response.
exc = CommitMismatchError(
'Incorrect number of {0}s returned. Commit: {1} Response: {2}'
.format(type_, commit_num, response_num)
)
exc.errors = self.errors
raise exc
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearch2/document.py
| 0.68784 | 0.273371 |
document.py
|
pypi
|
from boto.cloudsearch2.optionstatus import IndexFieldStatus
from boto.cloudsearch2.optionstatus import ServicePoliciesStatus
from boto.cloudsearch2.optionstatus import ExpressionStatus
from boto.cloudsearch2.optionstatus import AvailabilityOptionsStatus
from boto.cloudsearch2.optionstatus import ScalingParametersStatus
from boto.cloudsearch2.document import DocumentServiceConnection
from boto.cloudsearch2.search import SearchConnection
def handle_bool(value):
if value in [True, 'true', 'True', 'TRUE', 1]:
return True
return False
class Domain(object):
"""
A Cloudsearch domain.
:ivar name: The name of the domain.
:ivar id: The internally generated unique identifier for the domain.
:ivar created: A boolean which is True if the domain is
created. It can take several minutes to initialize a domain
when CreateDomain is called. Newly created search domains are
returned with a False value for Created until domain creation
is complete
:ivar deleted: A boolean which is True if the search domain has
been deleted. The system must clean up resources dedicated to
the search domain when delete is called. Newly deleted
search domains are returned from list_domains with a True
value for deleted for several minutes until resource cleanup
is complete.
:ivar processing: True if processing is being done to activate the
current domain configuration.
:ivar num_searchable_docs: The number of documents that have been
submittted to the domain and indexed.
:ivar requires_index_document: True if index_documents needs to be
called to activate the current domain configuration.
:ivar search_instance_count: The number of search instances that are
available to process search requests.
:ivar search_instance_type: The instance type that is being used to
process search requests.
:ivar search_partition_count: The number of partitions across which
the search index is spread.
"""
def __init__(self, layer1, data):
"""
Constructor - Create a domain object from a layer1 and data params
:type layer1: :class:`boto.cloudsearch2.layer1.Layer1` object
:param layer1: A :class:`boto.cloudsearch2.layer1.Layer1` object
which is used to perform operations on the domain.
"""
self.layer1 = layer1
self.update_from_data(data)
def update_from_data(self, data):
self.created = data['Created']
self.deleted = data['Deleted']
self.processing = data['Processing']
self.requires_index_documents = data['RequiresIndexDocuments']
self.domain_id = data['DomainId']
self.domain_name = data['DomainName']
self.search_instance_count = data['SearchInstanceCount']
self.search_instance_type = data.get('SearchInstanceType', None)
self.search_partition_count = data['SearchPartitionCount']
self._doc_service = data['DocService']
self._service_arn = data['ARN']
self._search_service = data['SearchService']
@property
def service_arn(self):
return self._service_arn
@property
def doc_service_endpoint(self):
return self._doc_service['Endpoint']
@property
def search_service_endpoint(self):
return self._search_service['Endpoint']
@property
def created(self):
return self._created
@created.setter
def created(self, value):
self._created = handle_bool(value)
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, value):
self._deleted = handle_bool(value)
@property
def processing(self):
return self._processing
@processing.setter
def processing(self, value):
self._processing = handle_bool(value)
@property
def requires_index_documents(self):
return self._requires_index_documents
@requires_index_documents.setter
def requires_index_documents(self, value):
self._requires_index_documents = handle_bool(value)
@property
def search_partition_count(self):
return self._search_partition_count
@search_partition_count.setter
def search_partition_count(self, value):
self._search_partition_count = int(value)
@property
def search_instance_count(self):
return self._search_instance_count
@search_instance_count.setter
def search_instance_count(self, value):
self._search_instance_count = int(value)
@property
def name(self):
return self.domain_name
@property
def id(self):
return self.domain_id
def delete(self):
"""
Delete this domain and all index data associated with it.
"""
return self.layer1.delete_domain(self.name)
def get_analysis_schemes(self):
"""
Return a list of Analysis Scheme objects.
"""
return self.layer1.describe_analysis_schemes(self.name)
def get_availability_options(self):
"""
Return a :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus`
object representing the currently defined availability options for
the domain.
:return: OptionsStatus object
:rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus`
object
"""
return AvailabilityOptionsStatus(
self, refresh_fn=self.layer1.describe_availability_options,
refresh_key=['DescribeAvailabilityOptionsResponse',
'DescribeAvailabilityOptionsResult',
'AvailabilityOptions'],
save_fn=self.layer1.update_availability_options)
def get_scaling_options(self):
"""
Return a :class:`boto.cloudsearch2.option.ScalingParametersStatus`
object representing the currently defined scaling options for the
domain.
:return: ScalingParametersStatus object
:rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus`
object
"""
return ScalingParametersStatus(
self, refresh_fn=self.layer1.describe_scaling_parameters,
refresh_key=['DescribeScalingParametersResponse',
'DescribeScalingParametersResult',
'ScalingParameters'],
save_fn=self.layer1.update_scaling_parameters)
def get_access_policies(self):
"""
Return a :class:`boto.cloudsearch2.option.ServicePoliciesStatus`
object representing the currently defined access policies for the
domain.
:return: ServicePoliciesStatus object
:rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object
"""
return ServicePoliciesStatus(
self, refresh_fn=self.layer1.describe_service_access_policies,
refresh_key=['DescribeServiceAccessPoliciesResponse',
'DescribeServiceAccessPoliciesResult',
'AccessPolicies'],
save_fn=self.layer1.update_service_access_policies)
def index_documents(self):
"""
Tells the search domain to start indexing its documents using
the latest text processing options and IndexFields. This
operation must be invoked to make options whose OptionStatus
has OptionState of RequiresIndexDocuments visible in search
results.
"""
self.layer1.index_documents(self.name)
def get_index_fields(self, field_names=None):
"""
Return a list of index fields defined for this domain.
:return: list of IndexFieldStatus objects
:rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus`
object
"""
data = self.layer1.describe_index_fields(self.name, field_names)
data = (data['DescribeIndexFieldsResponse']
['DescribeIndexFieldsResult']
['IndexFields'])
return [IndexFieldStatus(self, d) for d in data]
def create_index_field(self, field_name, field_type,
default='', facet=False, returnable=False,
searchable=False, sortable=False,
highlight=False, source_field=None,
analysis_scheme=None):
"""
Defines an ``IndexField``, either replacing an existing
definition or creating a new one.
:type field_name: string
:param field_name: The name of a field in the search index.
:type field_type: string
:param field_type: The type of field. Valid values are
int | double | literal | text | date | latlon |
int-array | double-array | literal-array | text-array | date-array
:type default: string or int
:param default: The default value for the field. If the
field is of type ``int`` this should be an integer value.
Otherwise, it's a string.
:type facet: bool
:param facet: A boolean to indicate whether facets
are enabled for this field or not. Does not apply to
fields of type ``int, int-array, text, text-array``.
:type returnable: bool
:param returnable: A boolean to indicate whether values
of this field can be returned in search results or
used in ranking.
:type searchable: bool
:param searchable: A boolean to indicate whether search
is enabled for this field or not.
:type sortable: bool
:param sortable: A boolean to indicate whether sorting
is enabled for this field or not. Does not apply to
fields of array types.
:type highlight: bool
:param highlight: A boolean to indicate whether highlighting
is enabled for this field or not. Does not apply to
fields of type ``double, int, date, latlon``
:type source_field: list of strings or string
:param source_field: For array types, this is the list of fields
to treat as the source. For singular types, pass a string only.
:type analysis_scheme: string
:param analysis_scheme: The analysis scheme to use for this field.
Only applies to ``text | text-array`` field types
:return: IndexFieldStatus objects
:rtype: :class:`boto.cloudsearch2.option.IndexFieldStatus` object
:raises: BaseException, InternalException, LimitExceededException,
InvalidTypeException, ResourceNotFoundException
"""
index = {
'IndexFieldName': field_name,
'IndexFieldType': field_type
}
if field_type == 'literal':
index['LiteralOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['LiteralOptions']['DefaultValue'] = default
if source_field:
index['LiteralOptions']['SourceField'] = source_field
elif field_type == 'literal-array':
index['LiteralArrayOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable
}
if default:
index['LiteralArrayOptions']['DefaultValue'] = default
if source_field:
index['LiteralArrayOptions']['SourceFields'] = \
','.join(source_field)
elif field_type == 'int':
index['IntOptions'] = {
'DefaultValue': default,
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['IntOptions']['DefaultValue'] = default
if source_field:
index['IntOptions']['SourceField'] = source_field
elif field_type == 'int-array':
index['IntArrayOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable
}
if default:
index['IntArrayOptions']['DefaultValue'] = default
if source_field:
index['IntArrayOptions']['SourceFields'] = \
','.join(source_field)
elif field_type == 'date':
index['DateOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['DateOptions']['DefaultValue'] = default
if source_field:
index['DateOptions']['SourceField'] = source_field
elif field_type == 'date-array':
index['DateArrayOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable
}
if default:
index['DateArrayOptions']['DefaultValue'] = default
if source_field:
index['DateArrayOptions']['SourceFields'] = \
','.join(source_field)
elif field_type == 'double':
index['DoubleOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['DoubleOptions']['DefaultValue'] = default
if source_field:
index['DoubleOptions']['SourceField'] = source_field
elif field_type == 'double-array':
index['DoubleArrayOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable
}
if default:
index['DoubleArrayOptions']['DefaultValue'] = default
if source_field:
index['DoubleArrayOptions']['SourceFields'] = \
','.join(source_field)
elif field_type == 'text':
index['TextOptions'] = {
'ReturnEnabled': returnable,
'HighlightEnabled': highlight,
'SortEnabled': sortable
}
if default:
index['TextOptions']['DefaultValue'] = default
if source_field:
index['TextOptions']['SourceField'] = source_field
if analysis_scheme:
index['TextOptions']['AnalysisScheme'] = analysis_scheme
elif field_type == 'text-array':
index['TextArrayOptions'] = {
'ReturnEnabled': returnable,
'HighlightEnabled': highlight
}
if default:
index['TextArrayOptions']['DefaultValue'] = default
if source_field:
index['TextArrayOptions']['SourceFields'] = \
','.join(source_field)
if analysis_scheme:
index['TextArrayOptions']['AnalysisScheme'] = analysis_scheme
elif field_type == 'latlon':
index['LatLonOptions'] = {
'FacetEnabled': facet,
'ReturnEnabled': returnable,
'SearchEnabled': searchable,
'SortEnabled': sortable
}
if default:
index['LatLonOptions']['DefaultValue'] = default
if source_field:
index['LatLonOptions']['SourceField'] = source_field
data = self.layer1.define_index_field(self.name, index)
data = (data['DefineIndexFieldResponse']
['DefineIndexFieldResult']
['IndexField'])
return IndexFieldStatus(self, data,
self.layer1.describe_index_fields)
def get_expressions(self, names=None):
"""
Return a list of rank expressions defined for this domain.
:return: list of ExpressionStatus objects
:rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus`
object
"""
fn = self.layer1.describe_expressions
data = fn(self.name, names)
data = (data['DescribeExpressionsResponse']
['DescribeExpressionsResult']
['Expressions'])
return [ExpressionStatus(self, d, fn) for d in data]
def create_expression(self, name, value):
"""
Create a new expression.
:type name: string
:param name: The name of an expression for processing
during a search request.
:type value: string
:param value: The expression to evaluate for ranking
or thresholding while processing a search request. The
Expression syntax is based on JavaScript expressions
and supports:
* Single value, sort enabled numeric fields (int, double, date)
* Other expressions
* The _score variable, which references a document's relevance
score
* The _time variable, which references the current epoch time
* Integer, floating point, hex, and octal literals
* Arithmetic operators: + - * / %
* Bitwise operators: | & ^ ~ << >> >>>
* Boolean operators (including the ternary operator): && || ! ?:
* Comparison operators: < <= == >= >
* Mathematical functions: abs ceil exp floor ln log2 log10 logn
max min pow sqrt pow
* Trigonometric functions: acos acosh asin asinh atan atan2 atanh
cos cosh sin sinh tanh tan
* The haversin distance function
Expressions always return an integer value from 0 to the maximum
64-bit signed integer value (2^63 - 1). Intermediate results are
calculated as double-precision floating point values and the return
value is rounded to the nearest integer. If the expression is
invalid or evaluates to a negative value, it returns 0. If the
expression evaluates to a value greater than the maximum, it
returns the maximum value.
The source data for an Expression can be the name of an
IndexField of type int or double, another Expression or the
reserved name _score. The _score source is
defined to return as a double from 0 to 10.0 (inclusive) to
indicate how relevant a document is to the search request,
taking into account repetition of search terms in the
document and proximity of search terms to each other in
each matching IndexField in the document.
For more information about using rank expressions to
customize ranking, see the Amazon CloudSearch Developer
Guide.
:return: ExpressionStatus object
:rtype: :class:`boto.cloudsearch2.option.ExpressionStatus` object
:raises: BaseException, InternalException, LimitExceededException,
InvalidTypeException, ResourceNotFoundException
"""
data = self.layer1.define_expression(self.name, name, value)
data = (data['DefineExpressionResponse']
['DefineExpressionResult']
['Expression'])
return ExpressionStatus(self, data,
self.layer1.describe_expressions)
def get_document_service(self):
return DocumentServiceConnection(domain=self)
def get_search_service(self):
return SearchConnection(domain=self)
def __repr__(self):
return '<Domain: %s>' % self.domain_name
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearch2/domain.py
| 0.759761 | 0.371963 |
domain.py
|
pypi
|
from boto.compat import json
class OptionStatus(dict):
"""
Presents a combination of status field (defined below) which are
accessed as attributes and option values which are stored in the
native Python dictionary. In this class, the option values are
merged from a JSON object that is stored as the Option part of
the object.
:ivar domain_name: The name of the domain this option is associated with.
:ivar create_date: A timestamp for when this option was created.
:ivar state: The state of processing a change to an option.
Possible values:
* RequiresIndexDocuments: the option's latest value will not
be visible in searches until IndexDocuments has been called
and indexing is complete.
* Processing: the option's latest value is not yet visible in
all searches but is in the process of being activated.
* Active: the option's latest value is completely visible.
:ivar update_date: A timestamp for when this option was updated.
:ivar update_version: A unique integer that indicates when this
option was last updated.
"""
def __init__(self, domain, data=None, refresh_fn=None, refresh_key=None,
save_fn=None):
self.domain = domain
self.refresh_fn = refresh_fn
self.refresh_key = refresh_key
self.save_fn = save_fn
self.refresh(data)
def _update_status(self, status):
self.creation_date = status['CreationDate']
self.status = status['State']
self.update_date = status['UpdateDate']
self.update_version = int(status['UpdateVersion'])
def _update_options(self, options):
if options:
self.update(options)
def refresh(self, data=None):
"""
Refresh the local state of the object. You can either pass
new state data in as the parameter ``data`` or, if that parameter
is omitted, the state data will be retrieved from CloudSearch.
"""
if not data:
if self.refresh_fn:
data = self.refresh_fn(self.domain.name)
if data and self.refresh_key:
# Attempt to pull out the right nested bag of data
for key in self.refresh_key:
data = data[key]
if data:
self._update_status(data['Status'])
self._update_options(data['Options'])
def to_json(self):
"""
Return the JSON representation of the options as a string.
"""
return json.dumps(self)
def save(self):
"""
Write the current state of the local object back to the
CloudSearch service.
"""
if self.save_fn:
data = self.save_fn(self.domain.name, self.to_json())
self.refresh(data)
class IndexFieldStatus(OptionStatus):
def save(self):
pass
class AvailabilityOptionsStatus(OptionStatus):
def save(self):
pass
class ScalingParametersStatus(IndexFieldStatus):
pass
class ExpressionStatus(IndexFieldStatus):
pass
class ServicePoliciesStatus(OptionStatus):
def new_statement(self, arn, ip):
"""
Returns a new policy statement that will allow
access to the service described by ``arn`` by the
ip specified in ``ip``.
:type arn: string
:param arn: The Amazon Resource Notation identifier for the
service you wish to provide access to. This would be
either the search service or the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
return {
"Effect": "Allow",
"Action": "*", # Docs say use GET, but denies unless *
"Resource": arn,
"Condition": {
"IpAddress": {
"aws:SourceIp": [ip]
}
}
}
def _allow_ip(self, arn, ip):
if 'Statement' not in self:
s = self.new_statement(arn, ip)
self['Statement'] = [s]
self.save()
else:
add_statement = True
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
add_statement = False
condition = statement['Condition'][condition_name]
if ip not in condition['aws:SourceIp']:
condition['aws:SourceIp'].append(ip)
if add_statement:
s = self.new_statement(arn, ip)
self['Statement'].append(s)
self.save()
def allow_search_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._allow_ip(arn, ip)
def allow_doc_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._allow_ip(arn, ip)
def _disallow_ip(self, arn, ip):
if 'Statement' not in self:
return
need_update = False
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
condition = statement['Condition'][condition_name]
if ip in condition['aws:SourceIp']:
condition['aws:SourceIp'].remove(ip)
need_update = True
if need_update:
self.save()
def disallow_search_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._disallow_ip(arn, ip)
def disallow_doc_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._disallow_ip(arn, ip)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearch2/optionstatus.py
| 0.758511 | 0.387545 |
optionstatus.py
|
pypi
|
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cloudsearch2 import exceptions
class CloudSearchConnection(AWSQueryConnection):
"""
Amazon CloudSearch Configuration Service
You use the Amazon CloudSearch configuration service to create,
configure, and manage search domains. Configuration service
requests are submitted using the AWS Query protocol. AWS Query
requests are HTTP or HTTPS requests submitted via HTTP GET or POST
with a query parameter named Action.
The endpoint for configuration service requests is region-
specific: cloudsearch. region .amazonaws.com. For example,
cloudsearch.us-east-1.amazonaws.com. For a current list of
supported regions and endpoints, see `Regions and Endpoints`_.
"""
APIVersion = "2013-01-01"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidTypeException": exceptions.InvalidTypeException,
"LimitExceededException": exceptions.LimitExceededException,
"InternalException": exceptions.InternalException,
"DisabledOperationException": exceptions.DisabledOperationException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"BaseException": exceptions.BaseException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
sign_request = kwargs.pop('sign_request', False)
self.sign_request = sign_request
super(CloudSearchConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def build_suggesters(self, domain_name):
"""
Indexes the search suggestions.
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='BuildSuggesters',
verb='POST',
path='/', params=params)
def create_domain(self, domain_name):
"""
Creates a new search domain. For more information, see
`Creating a Search Domain`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A name for the domain you are creating. Allowed
characters are a-z (lower-case letters), 0-9, and hyphen (-).
Domain names must start with a letter or number and be at least 3
and no more than 28 characters long.
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='CreateDomain',
verb='POST',
path='/', params=params)
def define_analysis_scheme(self, domain_name, analysis_scheme):
"""
Configures an analysis scheme that can be applied to a `text`
or `text-array` field to define language-specific text
processing options. For more information, see `Configuring
Analysis Schemes`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type analysis_scheme: dict
:param analysis_scheme: Configuration information for an analysis
scheme. Each analysis scheme has a unique name and specifies the
language of the text to be processed. The following options can be
configured for an analysis scheme: `Synonyms`, `Stopwords`,
`StemmingDictionary`, and `AlgorithmicStemming`.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'AnalysisScheme',
analysis_scheme)
return self._make_request(
action='DefineAnalysisScheme',
verb='POST',
path='/', params=params)
def define_expression(self, domain_name, expression):
"""
Configures an `Expression` for the search domain. Used to
create new expressions and modify existing ones. If the
expression exists, the new configuration replaces the old one.
For more information, see `Configuring Expressions`_ in the
Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type expression: dict
:param expression: A named expression that can be evaluated at search
time. Can be used to sort the search results, define other
expressions, or return computed information in the search results.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'Expression',
expression)
return self._make_request(
action='DefineExpression',
verb='POST',
path='/', params=params)
def define_index_field(self, domain_name, index_field):
"""
Configures an `IndexField` for the search domain. Used to
create new fields and modify existing ones. You must specify
the name of the domain you are configuring and an index field
configuration. The index field configuration specifies a
unique name, the index field type, and the options you want to
configure for the field. The options you can specify depend on
the `IndexFieldType`. If the field exists, the new
configuration replaces the old one. For more information, see
`Configuring Index Fields`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type index_field: dict
:param index_field: The index field and field options you want to
configure.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'IndexField',
index_field)
return self._make_request(
action='DefineIndexField',
verb='POST',
path='/', params=params)
def define_suggester(self, domain_name, suggester):
"""
Configures a suggester for a domain. A suggester enables you
to display possible matches before users finish typing their
queries. When you configure a suggester, you must specify the
name of the text field you want to search for possible matches
and a unique name for the suggester. For more information, see
`Getting Search Suggestions`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type suggester: dict
:param suggester: Configuration information for a search suggester.
Each suggester has a unique name and specifies the text field you
want to use for suggestions. The following options can be
configured for a suggester: `FuzzyMatching`, `SortExpression`.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'Suggester',
suggester)
return self._make_request(
action='DefineSuggester',
verb='POST',
path='/', params=params)
def delete_analysis_scheme(self, domain_name, analysis_scheme_name):
"""
Deletes an analysis scheme. For more information, see
`Configuring Analysis Schemes`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type analysis_scheme_name: string
:param analysis_scheme_name: The name of the analysis scheme you want
to delete.
"""
params = {
'DomainName': domain_name,
'AnalysisSchemeName': analysis_scheme_name,
}
return self._make_request(
action='DeleteAnalysisScheme',
verb='POST',
path='/', params=params)
def delete_domain(self, domain_name):
"""
Permanently deletes a search domain and all of its data. Once
a domain has been deleted, it cannot be recovered. For more
information, see `Deleting a Search Domain`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to permanently
delete.
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='DeleteDomain',
verb='POST',
path='/', params=params)
def delete_expression(self, domain_name, expression_name):
"""
Removes an `Expression` from the search domain. For more
information, see `Configuring Expressions`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type expression_name: string
:param expression_name: The name of the `Expression` to delete.
"""
params = {
'DomainName': domain_name,
'ExpressionName': expression_name,
}
return self._make_request(
action='DeleteExpression',
verb='POST',
path='/', params=params)
def delete_index_field(self, domain_name, index_field_name):
"""
Removes an `IndexField` from the search domain. For more
information, see `Configuring Index Fields`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type index_field_name: string
:param index_field_name: The name of the index field your want to
remove from the domain's indexing options.
"""
params = {
'DomainName': domain_name,
'IndexFieldName': index_field_name,
}
return self._make_request(
action='DeleteIndexField',
verb='POST',
path='/', params=params)
def delete_suggester(self, domain_name, suggester_name):
"""
Deletes a suggester. For more information, see `Getting Search
Suggestions`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type suggester_name: string
:param suggester_name: Specifies the name of the suggester you want to
delete.
"""
params = {
'DomainName': domain_name,
'SuggesterName': suggester_name,
}
return self._make_request(
action='DeleteSuggester',
verb='POST',
path='/', params=params)
def describe_analysis_schemes(self, domain_name,
analysis_scheme_names=None, deployed=None):
"""
Gets the analysis schemes configured for a domain. An analysis
scheme defines language-specific text processing options for a
`text` field. Can be limited to specific analysis schemes by
name. By default, shows all analysis schemes and includes any
pending changes to the configuration. Set the `Deployed`
option to `True` to show the active configuration and exclude
pending changes. For more information, see `Configuring
Analysis Schemes`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type analysis_scheme_names: list
:param analysis_scheme_names: The analysis schemes you want to
describe.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if analysis_scheme_names is not None:
self.build_list_params(params,
analysis_scheme_names,
'AnalysisSchemeNames.member')
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeAnalysisSchemes',
verb='POST',
path='/', params=params)
def describe_availability_options(self, domain_name, deployed=None):
"""
Gets the availability options configured for a domain. By
default, shows the configuration with any pending changes. Set
the `Deployed` option to `True` to show the active
configuration and exclude pending changes. For more
information, see `Configuring Availability Options`_ in the
Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeAvailabilityOptions',
verb='POST',
path='/', params=params)
def describe_domains(self, domain_names=None):
"""
Gets information about the search domains owned by this
account. Can be limited to specific domains. Shows all domains
by default. To get the number of searchable documents in a
domain, use the console or submit a `matchall` request to your
domain's search endpoint:
`q=matchall&q.parser=structured&size=0`. For more information,
see `Getting Information about a Search Domain`_ in the Amazon
CloudSearch Developer Guide .
:type domain_names: list
:param domain_names: The names of the domains you want to include in
the response.
"""
params = {}
if domain_names is not None:
self.build_list_params(params,
domain_names,
'DomainNames.member')
return self._make_request(
action='DescribeDomains',
verb='POST',
path='/', params=params)
def describe_expressions(self, domain_name, expression_names=None,
deployed=None):
"""
Gets the expressions configured for the search domain. Can be
limited to specific expressions by name. By default, shows all
expressions and includes any pending changes to the
configuration. Set the `Deployed` option to `True` to show the
active configuration and exclude pending changes. For more
information, see `Configuring Expressions`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type expression_names: list
:param expression_names: Limits the `DescribeExpressions` response to
the specified expressions. If not specified, all expressions are
shown.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if expression_names is not None:
self.build_list_params(params,
expression_names,
'ExpressionNames.member')
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeExpressions',
verb='POST',
path='/', params=params)
def describe_index_fields(self, domain_name, field_names=None,
deployed=None):
"""
Gets information about the index fields configured for the
search domain. Can be limited to specific fields by name. By
default, shows all fields and includes any pending changes to
the configuration. Set the `Deployed` option to `True` to show
the active configuration and exclude pending changes. For more
information, see `Getting Domain Information`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type field_names: list
:param field_names: A list of the index fields you want to describe. If
not specified, information is returned for all configured index
fields.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if field_names is not None:
self.build_list_params(params,
field_names,
'FieldNames.member')
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeIndexFields',
verb='POST',
path='/', params=params)
def describe_scaling_parameters(self, domain_name):
"""
Gets the scaling parameters configured for a domain. A
domain's scaling parameters specify the desired search
instance type and replication count. For more information, see
`Configuring Scaling Options`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='DescribeScalingParameters',
verb='POST',
path='/', params=params)
def describe_service_access_policies(self, domain_name, deployed=None):
"""
Gets information about the access policies that control access
to the domain's document and search endpoints. By default,
shows the configuration with any pending changes. Set the
`Deployed` option to `True` to show the active configuration
and exclude pending changes. For more information, see
`Configuring Access for a Search Domain`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeServiceAccessPolicies',
verb='POST',
path='/', params=params)
def describe_suggesters(self, domain_name, suggester_names=None,
deployed=None):
"""
Gets the suggesters configured for a domain. A suggester
enables you to display possible matches before users finish
typing their queries. Can be limited to specific suggesters by
name. By default, shows all suggesters and includes any
pending changes to the configuration. Set the `Deployed`
option to `True` to show the active configuration and exclude
pending changes. For more information, see `Getting Search
Suggestions`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type suggester_names: list
:param suggester_names: The suggesters you want to describe.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if suggester_names is not None:
self.build_list_params(params,
suggester_names,
'SuggesterNames.member')
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeSuggesters',
verb='POST',
path='/', params=params)
def index_documents(self, domain_name):
"""
Tells the search domain to start indexing its documents using
the latest indexing options. This operation must be invoked to
activate options whose OptionStatus is
`RequiresIndexDocuments`.
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='IndexDocuments',
verb='POST',
path='/', params=params)
def list_domain_names(self):
"""
Lists all search domains owned by an account.
"""
params = {}
return self._make_request(
action='ListDomainNames',
verb='POST',
path='/', params=params)
def update_availability_options(self, domain_name, multi_az):
"""
Configures the availability options for a domain. Enabling the
Multi-AZ option expands an Amazon CloudSearch domain to an
additional Availability Zone in the same Region to increase
fault tolerance in the event of a service disruption. Changes
to the Multi-AZ option can take about half an hour to become
active. For more information, see `Configuring Availability
Options`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type multi_az: boolean
:param multi_az: You expand an existing search domain to a second
Availability Zone by setting the Multi-AZ option to true.
Similarly, you can turn off the Multi-AZ option to downgrade the
domain to a single Availability Zone by setting the Multi-AZ option
to `False`.
"""
params = {'DomainName': domain_name, 'MultiAZ': multi_az, }
return self._make_request(
action='UpdateAvailabilityOptions',
verb='POST',
path='/', params=params)
def update_scaling_parameters(self, domain_name, scaling_parameters):
"""
Configures scaling parameters for a domain. A domain's scaling
parameters specify the desired search instance type and
replication count. Amazon CloudSearch will still automatically
scale your domain based on the volume of data and traffic, but
not below the desired instance type and replication count. If
the Multi-AZ option is enabled, these values control the
resources used per Availability Zone. For more information,
see `Configuring Scaling Options`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type scaling_parameters: dict
:param scaling_parameters: The desired instance type and desired number
of replicas of each index partition.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'ScalingParameters',
scaling_parameters)
return self._make_request(
action='UpdateScalingParameters',
verb='POST',
path='/', params=params)
def update_service_access_policies(self, domain_name, access_policies):
"""
Configures the access rules that control access to the
domain's document and search endpoints. For more information,
see ` Configuring Access for an Amazon CloudSearch Domain`_.
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type access_policies: string
:param access_policies: The access rules you want to configure. These
rules replace any existing rules.
"""
params = {
'DomainName': domain_name,
'AccessPolicies': access_policies,
}
return self._make_request(
action='UpdateServiceAccessPolicies',
verb='POST',
path='/', params=params)
def build_complex_param(self, params, label, value):
"""Serialize a structure.
For example::
param_type = 'structure'
label = 'IndexField'
value = {'IndexFieldName': 'a', 'IntOptions': {'DefaultValue': 5}}
would result in the params dict being updated with these params::
IndexField.IndexFieldName = a
IndexField.IntOptions.DefaultValue = 5
:type params: dict
:param params: The params dict. The complex list params
will be added to this dict.
:type label: str
:param label: String label for param key
:type value: any
:param value: The value to serialize
"""
for k, v in value.items():
if isinstance(v, dict):
for k2, v2 in v.items():
self.build_complex_param(params, label + '.' + k, v)
elif isinstance(v, bool):
params['%s.%s' % (label, k)] = v and 'true' or 'false'
else:
params['%s.%s' % (label, k)] = v
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudsearch2/layer1.py
| 0.806548 | 0.318366 |
layer1.py
|
pypi
|
from boto.compat import six
class Step(object):
"""
Jobflow Step base class
"""
def jar(self):
"""
:rtype: str
:return: URI to the jar
"""
raise NotImplemented()
def args(self):
"""
:rtype: list(str)
:return: List of arguments for the step
"""
raise NotImplemented()
def main_class(self):
"""
:rtype: str
:return: The main class name
"""
raise NotImplemented()
class JarStep(Step):
"""
Custom jar step
"""
def __init__(self, name, jar, main_class=None,
action_on_failure='TERMINATE_JOB_FLOW', step_args=None):
"""
A elastic mapreduce step that executes a jar
:type name: str
:param name: The name of the step
:type jar: str
:param jar: S3 URI to the Jar file
:type main_class: str
:param main_class: The class to execute in the jar
:type action_on_failure: str
:param action_on_failure: An action, defined in the EMR docs to
take on failure.
:type step_args: list(str)
:param step_args: A list of arguments to pass to the step
"""
self.name = name
self._jar = jar
self._main_class = main_class
self.action_on_failure = action_on_failure
if isinstance(step_args, six.string_types):
step_args = [step_args]
self.step_args = step_args
def jar(self):
return self._jar
def args(self):
args = []
if self.step_args:
args.extend(self.step_args)
return args
def main_class(self):
return self._main_class
class StreamingStep(Step):
"""
Hadoop streaming step
"""
def __init__(self, name, mapper, reducer=None, combiner=None,
action_on_failure='TERMINATE_JOB_FLOW',
cache_files=None, cache_archives=None,
step_args=None, input=None, output=None,
jar='/home/hadoop/contrib/streaming/hadoop-streaming.jar'):
"""
A hadoop streaming elastic mapreduce step
:type name: str
:param name: The name of the step
:type mapper: str
:param mapper: The mapper URI
:type reducer: str
:param reducer: The reducer URI
:type combiner: str
:param combiner: The combiner URI. Only works for Hadoop 0.20
and later!
:type action_on_failure: str
:param action_on_failure: An action, defined in the EMR docs to
take on failure.
:type cache_files: list(str)
:param cache_files: A list of cache files to be bundled with the job
:type cache_archives: list(str)
:param cache_archives: A list of jar archives to be bundled with
the job
:type step_args: list(str)
:param step_args: A list of arguments to pass to the step
:type input: str or a list of str
:param input: The input uri
:type output: str
:param output: The output uri
:type jar: str
:param jar: The hadoop streaming jar. This can be either a local
path on the master node, or an s3:// URI.
"""
self.name = name
self.mapper = mapper
self.reducer = reducer
self.combiner = combiner
self.action_on_failure = action_on_failure
self.cache_files = cache_files
self.cache_archives = cache_archives
self.input = input
self.output = output
self._jar = jar
if isinstance(step_args, six.string_types):
step_args = [step_args]
self.step_args = step_args
def jar(self):
return self._jar
def main_class(self):
return None
def args(self):
args = []
# put extra args BEFORE -mapper and -reducer so that e.g. -libjar
# will work
if self.step_args:
args.extend(self.step_args)
args.extend(['-mapper', self.mapper])
if self.combiner:
args.extend(['-combiner', self.combiner])
if self.reducer:
args.extend(['-reducer', self.reducer])
else:
args.extend(['-jobconf', 'mapred.reduce.tasks=0'])
if self.input:
if isinstance(self.input, list):
for input in self.input:
args.extend(('-input', input))
else:
args.extend(('-input', self.input))
if self.output:
args.extend(('-output', self.output))
if self.cache_files:
for cache_file in self.cache_files:
args.extend(('-cacheFile', cache_file))
if self.cache_archives:
for cache_archive in self.cache_archives:
args.extend(('-cacheArchive', cache_archive))
return args
def __repr__(self):
return '%s.%s(name=%r, mapper=%r, reducer=%r, action_on_failure=%r, cache_files=%r, cache_archives=%r, step_args=%r, input=%r, output=%r, jar=%r)' % (
self.__class__.__module__, self.__class__.__name__,
self.name, self.mapper, self.reducer, self.action_on_failure,
self.cache_files, self.cache_archives, self.step_args,
self.input, self.output, self._jar)
class ScriptRunnerStep(JarStep):
ScriptRunnerJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
def __init__(self, name, **kw):
super(ScriptRunnerStep, self).__init__(name, self.ScriptRunnerJar, **kw)
class PigBase(ScriptRunnerStep):
BaseArgs = ['s3n://us-east-1.elasticmapreduce/libs/pig/pig-script',
'--base-path', 's3n://us-east-1.elasticmapreduce/libs/pig/']
class InstallPigStep(PigBase):
"""
Install pig on emr step
"""
InstallPigName = 'Install Pig'
def __init__(self, pig_versions='latest'):
step_args = []
step_args.extend(self.BaseArgs)
step_args.extend(['--install-pig'])
step_args.extend(['--pig-versions', pig_versions])
super(InstallPigStep, self).__init__(self.InstallPigName, step_args=step_args)
class PigStep(PigBase):
"""
Pig script step
"""
def __init__(self, name, pig_file, pig_versions='latest', pig_args=[]):
step_args = []
step_args.extend(self.BaseArgs)
step_args.extend(['--pig-versions', pig_versions])
step_args.extend(['--run-pig-script', '--args', '-f', pig_file])
step_args.extend(pig_args)
super(PigStep, self).__init__(name, step_args=step_args)
class HiveBase(ScriptRunnerStep):
BaseArgs = ['s3n://us-east-1.elasticmapreduce/libs/hive/hive-script',
'--base-path', 's3n://us-east-1.elasticmapreduce/libs/hive/']
class InstallHiveStep(HiveBase):
"""
Install Hive on EMR step
"""
InstallHiveName = 'Install Hive'
def __init__(self, hive_versions='latest', hive_site=None):
step_args = []
step_args.extend(self.BaseArgs)
step_args.extend(['--install-hive'])
step_args.extend(['--hive-versions', hive_versions])
if hive_site is not None:
step_args.extend(['--hive-site=%s' % hive_site])
super(InstallHiveStep, self).__init__(self.InstallHiveName,
step_args=step_args)
class HiveStep(HiveBase):
"""
Hive script step
"""
def __init__(self, name, hive_file, hive_versions='latest',
hive_args=None):
step_args = []
step_args.extend(self.BaseArgs)
step_args.extend(['--hive-versions', hive_versions])
step_args.extend(['--run-hive-script', '--args', '-f', hive_file])
if hive_args is not None:
step_args.extend(hive_args)
super(HiveStep, self).__init__(name, step_args=step_args)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/emr/step.py
| 0.747708 | 0.246732 |
step.py
|
pypi
|
class Layer1Decisions(object):
"""
Use this object to build a list of decisions for a decision response.
Each method call will add append a new decision. Retrieve the list
of decisions from the _data attribute.
"""
def __init__(self):
self._data = []
def schedule_activity_task(self,
activity_id,
activity_type_name,
activity_type_version,
task_list=None,
control=None,
heartbeat_timeout=None,
schedule_to_close_timeout=None,
schedule_to_start_timeout=None,
start_to_close_timeout=None,
input=None):
"""
Schedules an activity task.
:type activity_id: string
:param activity_id: The activityId of the type of the activity
being scheduled.
:type activity_type_name: string
:param activity_type_name: The name of the type of the activity
being scheduled.
:type activity_type_version: string
:param activity_type_version: The version of the type of the
activity being scheduled.
:type task_list: string
:param task_list: If set, specifies the name of the task list in
which to schedule the activity task. If not specified, the
defaultTaskList registered with the activity type will be used.
Note: a task list for this activity task must be specified either
as a default for the activity type or through this field. If
neither this field is set nor a default task list was specified
at registration time then a fault will be returned.
"""
o = {}
o['decisionType'] = 'ScheduleActivityTask'
attrs = o['scheduleActivityTaskDecisionAttributes'] = {}
attrs['activityId'] = activity_id
attrs['activityType'] = {
'name': activity_type_name,
'version': activity_type_version,
}
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if control is not None:
attrs['control'] = control
if heartbeat_timeout is not None:
attrs['heartbeatTimeout'] = heartbeat_timeout
if schedule_to_close_timeout is not None:
attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout
if schedule_to_start_timeout is not None:
attrs['scheduleToStartTimeout'] = schedule_to_start_timeout
if start_to_close_timeout is not None:
attrs['startToCloseTimeout'] = start_to_close_timeout
if input is not None:
attrs['input'] = input
self._data.append(o)
def request_cancel_activity_task(self, activity_id):
"""
Attempts to cancel a previously scheduled activity task. If
the activity task was scheduled but has not been assigned to a
worker, then it will be canceled. If the activity task was
already assigned to a worker, then the worker will be informed
that cancellation has been requested in the response to
RecordActivityTaskHeartbeat.
"""
o = {}
o['decisionType'] = 'RequestCancelActivityTask'
attrs = o['requestCancelActivityTaskDecisionAttributes'] = {}
attrs['activityId'] = activity_id
self._data.append(o)
def record_marker(self, marker_name, details=None):
"""
Records a MarkerRecorded event in the history. Markers can be
used for adding custom information in the history for instance
to let deciders know that they do not need to look at the
history beyond the marker event.
"""
o = {}
o['decisionType'] = 'RecordMarker'
attrs = o['recordMarkerDecisionAttributes'] = {}
attrs['markerName'] = marker_name
if details is not None:
attrs['details'] = details
self._data.append(o)
def complete_workflow_execution(self, result=None):
"""
Closes the workflow execution and records a WorkflowExecutionCompleted
event in the history
"""
o = {}
o['decisionType'] = 'CompleteWorkflowExecution'
attrs = o['completeWorkflowExecutionDecisionAttributes'] = {}
if result is not None:
attrs['result'] = result
self._data.append(o)
def fail_workflow_execution(self, reason=None, details=None):
"""
Closes the workflow execution and records a
WorkflowExecutionFailed event in the history.
"""
o = {}
o['decisionType'] = 'FailWorkflowExecution'
attrs = o['failWorkflowExecutionDecisionAttributes'] = {}
if reason is not None:
attrs['reason'] = reason
if details is not None:
attrs['details'] = details
self._data.append(o)
def cancel_workflow_executions(self, details=None):
"""
Closes the workflow execution and records a WorkflowExecutionCanceled
event in the history.
"""
o = {}
o['decisionType'] = 'CancelWorkflowExecution'
attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {}
if details is not None:
attrs['details'] = details
self._data.append(o)
def continue_as_new_workflow_execution(self,
child_policy=None,
execution_start_to_close_timeout=None,
input=None,
tag_list=None,
task_list=None,
start_to_close_timeout=None,
workflow_type_version=None):
"""
Closes the workflow execution and starts a new workflow execution of
the same type using the same workflow id and a unique run Id. A
WorkflowExecutionContinuedAsNew event is recorded in the history.
"""
o = {}
o['decisionType'] = 'ContinueAsNewWorkflowExecution'
attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {}
if child_policy is not None:
attrs['childPolicy'] = child_policy
if execution_start_to_close_timeout is not None:
attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
if input is not None:
attrs['input'] = input
if tag_list is not None:
attrs['tagList'] = tag_list
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if start_to_close_timeout is not None:
attrs['taskStartToCloseTimeout'] = start_to_close_timeout
if workflow_type_version is not None:
attrs['workflowTypeVersion'] = workflow_type_version
self._data.append(o)
def start_timer(self,
start_to_fire_timeout,
timer_id,
control=None):
"""
Starts a timer for this workflow execution and records a TimerStarted
event in the history. This timer will fire after the specified delay
and record a TimerFired event.
"""
o = {}
o['decisionType'] = 'StartTimer'
attrs = o['startTimerDecisionAttributes'] = {}
attrs['startToFireTimeout'] = start_to_fire_timeout
attrs['timerId'] = timer_id
if control is not None:
attrs['control'] = control
self._data.append(o)
def cancel_timer(self, timer_id):
"""
Cancels a previously started timer and records a TimerCanceled
event in the history.
"""
o = {}
o['decisionType'] = 'CancelTimer'
attrs = o['cancelTimerDecisionAttributes'] = {}
attrs['timerId'] = timer_id
self._data.append(o)
def signal_external_workflow_execution(self,
workflow_id,
signal_name,
run_id=None,
control=None,
input=None):
"""
Requests a signal to be delivered to the specified external workflow
execution and records a SignalExternalWorkflowExecutionInitiated
event in the history.
"""
o = {}
o['decisionType'] = 'SignalExternalWorkflowExecution'
attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowId'] = workflow_id
attrs['signalName'] = signal_name
if run_id is not None:
attrs['runId'] = run_id
if control is not None:
attrs['control'] = control
if input is not None:
attrs['input'] = input
self._data.append(o)
def request_cancel_external_workflow_execution(self,
workflow_id,
control=None,
run_id=None):
"""
Requests that a request be made to cancel the specified
external workflow execution and records a
RequestCancelExternalWorkflowExecutionInitiated event in the
history.
"""
o = {}
o['decisionType'] = 'RequestCancelExternalWorkflowExecution'
attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowId'] = workflow_id
if control is not None:
attrs['control'] = control
if run_id is not None:
attrs['runId'] = run_id
self._data.append(o)
def start_child_workflow_execution(self,
workflow_type_name,
workflow_type_version,
workflow_id,
child_policy=None,
control=None,
execution_start_to_close_timeout=None,
input=None,
tag_list=None,
task_list=None,
task_start_to_close_timeout=None):
"""
Requests that a child workflow execution be started and
records a StartChildWorkflowExecutionInitiated event in the
history. The child workflow execution is a separate workflow
execution with its own history.
"""
o = {}
o['decisionType'] = 'StartChildWorkflowExecution'
attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowType'] = {
'name': workflow_type_name,
'version': workflow_type_version,
}
attrs['workflowId'] = workflow_id
if child_policy is not None:
attrs['childPolicy'] = child_policy
if control is not None:
attrs['control'] = control
if execution_start_to_close_timeout is not None:
attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
if input is not None:
attrs['input'] = input
if tag_list is not None:
attrs['tagList'] = tag_list
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if task_start_to_close_timeout is not None:
attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout
self._data.append(o)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/swf/layer1_decisions.py
| 0.653569 | 0.269819 |
layer1_decisions.py
|
pypi
|
import os
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.awslambda import exceptions
class AWSLambdaConnection(AWSAuthConnection):
"""
AWS Lambda
**Overview**
This is the AWS Lambda API Reference. The AWS Lambda Developer
Guide provides additional information. For the service overview,
go to `What is AWS Lambda`_, and for information about how the
service works, go to `AWS LambdaL How it Works`_ in the AWS Lambda
Developer Guide.
"""
APIVersion = "2014-11-11"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "lambda.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidRequestContentException": exceptions.InvalidRequestContentException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidParameterValueException": exceptions.InvalidParameterValueException,
"ServiceException": exceptions.ServiceException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
super(AWSLambdaConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_event_source(self, event_source, function_name, role,
batch_size=None, parameters=None):
"""
Identifies an Amazon Kinesis stream as the event source for an
AWS Lambda function. AWS Lambda invokes the specified function
when records are posted to the stream.
This is the pull model, where AWS Lambda invokes the function.
For more information, go to `AWS LambdaL How it Works`_ in the
AWS Lambda Developer Guide.
This association between an Amazon Kinesis stream and an AWS
Lambda function is called the event source mapping. You
provide the configuration information (for example, which
stream to read from and which AWS Lambda function to invoke)
for the event source mapping in the request body.
This operation requires permission for the `iam:PassRole`
action for the IAM role. It also requires permission for the
`lambda:AddEventSource` action.
:type event_source: string
:param event_source: The Amazon Resource Name (ARN) of the Amazon
Kinesis stream that is the event source. Any record added to this
stream causes AWS Lambda to invoke your Lambda function. AWS Lambda
POSTs the Amazon Kinesis event, containing records, to your Lambda
function as JSON.
:type function_name: string
:param function_name: The Lambda function to invoke when AWS Lambda
detects an event on the stream.
:type role: string
:param role: The ARN of the IAM role (invocation role) that AWS Lambda
can assume to read from the stream and invoke the function.
:type batch_size: integer
:param batch_size: The largest number of records that AWS Lambda will
give to your function in a single event. The default is 100
records.
:type parameters: map
:param parameters: A map (key-value pairs) defining the configuration
for AWS Lambda to use when reading the event source. Currently, AWS
Lambda supports only the `InitialPositionInStream` key. The valid
values are: "TRIM_HORIZON" and "LATEST". The default value is
"TRIM_HORIZON". For more information, go to `ShardIteratorType`_ in
the Amazon Kinesis Service API Reference.
"""
uri = '/2014-11-13/event-source-mappings/'
params = {
'EventSource': event_source,
'FunctionName': function_name,
'Role': role,
}
headers = {}
query_params = {}
if batch_size is not None:
params['BatchSize'] = batch_size
if parameters is not None:
params['Parameters'] = parameters
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def delete_function(self, function_name):
"""
Deletes the specified Lambda function code and configuration.
This operation requires permission for the
`lambda:DeleteFunction` action.
:type function_name: string
:param function_name: The Lambda function to delete.
"""
uri = '/2014-11-13/functions/{0}'.format(function_name)
return self.make_request('DELETE', uri, expected_status=204)
def get_event_source(self, uuid):
"""
Returns configuration information for the specified event
source mapping (see AddEventSource).
This operation requires permission for the
`lambda:GetEventSource` action.
:type uuid: string
:param uuid: The AWS Lambda assigned ID of the event source mapping.
"""
uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid)
return self.make_request('GET', uri, expected_status=200)
def get_function(self, function_name):
"""
Returns the configuration information of the Lambda function
and a presigned URL link to the .zip file you uploaded with
UploadFunction so you can download the .zip file. Note that
the URL is valid for up to 10 minutes. The configuration
information is the same information you provided as parameters
when uploading the function.
This operation requires permission for the
`lambda:GetFunction` action.
:type function_name: string
:param function_name: The Lambda function name.
"""
uri = '/2014-11-13/functions/{0}'.format(function_name)
return self.make_request('GET', uri, expected_status=200)
def get_function_configuration(self, function_name):
"""
Returns the configuration information of the Lambda function.
This the same information you provided as parameters when
uploading the function by using UploadFunction.
This operation requires permission for the
`lambda:GetFunctionConfiguration` operation.
:type function_name: string
:param function_name: The name of the Lambda function for which you
want to retrieve the configuration information.
"""
uri = '/2014-11-13/functions/{0}/configuration'.format(function_name)
return self.make_request('GET', uri, expected_status=200)
def invoke_async(self, function_name, invoke_args):
"""
Submits an invocation request to AWS Lambda. Upon receiving
the request, Lambda executes the specified function
asynchronously. To see the logs generated by the Lambda
function execution, see the CloudWatch logs console.
This operation requires permission for the
`lambda:InvokeAsync` action.
:type function_name: string
:param function_name: The Lambda function name.
:type invoke_args: blob
:param invoke_args: JSON that you want to provide to your Lambda
function as input.
"""
uri = '/2014-11-13/functions/{0}/invoke-async/'.format(function_name)
headers = {}
query_params = {}
try:
content_length = str(len(invoke_args))
except (TypeError, AttributeError):
# If a file like object is provided and seekable, try to retrieve
# the file size via fstat.
try:
invoke_args.tell()
except (AttributeError, OSError, IOError):
raise TypeError(
"File-like object passed to parameter "
"``invoke_args`` must be seekable."
)
content_length = str(os.fstat(invoke_args.fileno()).st_size)
headers['Content-Length'] = content_length
return self.make_request('POST', uri, expected_status=202,
data=invoke_args, headers=headers,
params=query_params)
def list_event_sources(self, event_source_arn=None, function_name=None,
marker=None, max_items=None):
"""
Returns a list of event source mappings. For each mapping, the
API returns configuration information (see AddEventSource).
You can optionally specify filters to retrieve specific event
source mappings.
This operation requires permission for the
`lambda:ListEventSources` action.
:type event_source_arn: string
:param event_source_arn: The Amazon Resource Name (ARN) of the Amazon
Kinesis stream.
:type function_name: string
:param function_name: The name of the AWS Lambda function.
:type marker: string
:param marker: Optional string. An opaque pagination token returned
from a previous `ListEventSources` operation. If present, specifies
to continue the list from where the returning call left off.
:type max_items: integer
:param max_items: Optional integer. Specifies the maximum number of
event sources to return in response. This value must be greater
than 0.
"""
uri = '/2014-11-13/event-source-mappings/'
params = {}
headers = {}
query_params = {}
if event_source_arn is not None:
query_params['EventSource'] = event_source_arn
if function_name is not None:
query_params['FunctionName'] = function_name
if marker is not None:
query_params['Marker'] = marker
if max_items is not None:
query_params['MaxItems'] = max_items
return self.make_request('GET', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def list_functions(self, marker=None, max_items=None):
"""
Returns a list of your Lambda functions. For each function,
the response includes the function configuration information.
You must use GetFunction to retrieve the code for your
function.
This operation requires permission for the
`lambda:ListFunctions` action.
:type marker: string
:param marker: Optional string. An opaque pagination token returned
from a previous `ListFunctions` operation. If present, indicates
where to continue the listing.
:type max_items: integer
:param max_items: Optional integer. Specifies the maximum number of AWS
Lambda functions to return in response. This parameter value must
be greater than 0.
"""
uri = '/2014-11-13/functions/'
params = {}
headers = {}
query_params = {}
if marker is not None:
query_params['Marker'] = marker
if max_items is not None:
query_params['MaxItems'] = max_items
return self.make_request('GET', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def remove_event_source(self, uuid):
"""
Removes an event source mapping. This means AWS Lambda will no
longer invoke the function for events in the associated
source.
This operation requires permission for the
`lambda:RemoveEventSource` action.
:type uuid: string
:param uuid: The event source mapping ID.
"""
uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid)
return self.make_request('DELETE', uri, expected_status=204)
def update_function_configuration(self, function_name, role=None,
handler=None, description=None,
timeout=None, memory_size=None):
"""
Updates the configuration parameters for the specified Lambda
function by using the values provided in the request. You
provide only the parameters you want to change. This operation
must only be used on an existing Lambda function and cannot be
used to update the function's code.
This operation requires permission for the
`lambda:UpdateFunctionConfiguration` action.
:type function_name: string
:param function_name: The name of the Lambda function.
:type role: string
:param role: The Amazon Resource Name (ARN) of the IAM role that Lambda
will assume when it executes your function.
:type handler: string
:param handler: The function that Lambda calls to begin executing your
function. For Node.js, it is the module-name.export value in your
function.
:type description: string
:param description: A short user-defined function description. Lambda
does not use this value. Assign a meaningful description as you see
fit.
:type timeout: integer
:param timeout: The function execution time at which Lambda should
terminate the function. Because the execution time has cost
implications, we recommend you set this value based on your
expected execution time. The default is 3 seconds.
:type memory_size: integer
:param memory_size: The amount of memory, in MB, your Lambda function
is given. Lambda uses this memory size to infer the amount of CPU
allocated to your function. Your function use-case determines your
CPU and memory requirements. For example, a database operation
might need less memory compared to an image processing function.
The default value is 128 MB. The value must be a multiple of 64 MB.
"""
uri = '/2014-11-13/functions/{0}/configuration'.format(function_name)
params = {}
headers = {}
query_params = {}
if role is not None:
query_params['Role'] = role
if handler is not None:
query_params['Handler'] = handler
if description is not None:
query_params['Description'] = description
if timeout is not None:
query_params['Timeout'] = timeout
if memory_size is not None:
query_params['MemorySize'] = memory_size
return self.make_request('PUT', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def upload_function(self, function_name, function_zip, runtime, role,
handler, mode, description=None, timeout=None,
memory_size=None):
"""
Creates a new Lambda function or updates an existing function.
The function metadata is created from the request parameters,
and the code for the function is provided by a .zip file in
the request body. If the function name already exists, the
existing Lambda function is updated with the new code and
metadata.
This operation requires permission for the
`lambda:UploadFunction` action.
:type function_name: string
:param function_name: The name you want to assign to the function you
are uploading. The function names appear in the console and are
returned in the ListFunctions API. Function names are used to
specify functions to other AWS Lambda APIs, such as InvokeAsync.
:type function_zip: blob
:param function_zip: A .zip file containing your packaged source code.
For more information about creating a .zip file, go to `AWS LambdaL
How it Works`_ in the AWS Lambda Developer Guide.
:type runtime: string
:param runtime: The runtime environment for the Lambda function you are
uploading. Currently, Lambda supports only "nodejs" as the runtime.
:type role: string
:param role: The Amazon Resource Name (ARN) of the IAM role that Lambda
assumes when it executes your function to access any other Amazon
Web Services (AWS) resources.
:type handler: string
:param handler: The function that Lambda calls to begin execution. For
Node.js, it is the module-name . export value in your function.
:type mode: string
:param mode: How the Lambda function will be invoked. Lambda supports
only the "event" mode.
:type description: string
:param description: A short, user-defined function description. Lambda
does not use this value. Assign a meaningful description as you see
fit.
:type timeout: integer
:param timeout: The function execution time at which Lambda should
terminate the function. Because the execution time has cost
implications, we recommend you set this value based on your
expected execution time. The default is 3 seconds.
:type memory_size: integer
:param memory_size: The amount of memory, in MB, your Lambda function
is given. Lambda uses this memory size to infer the amount of CPU
allocated to your function. Your function use-case determines your
CPU and memory requirements. For example, database operation might
need less memory compared to image processing function. The default
value is 128 MB. The value must be a multiple of 64 MB.
"""
uri = '/2014-11-13/functions/{0}'.format(function_name)
headers = {}
query_params = {}
if runtime is not None:
query_params['Runtime'] = runtime
if role is not None:
query_params['Role'] = role
if handler is not None:
query_params['Handler'] = handler
if mode is not None:
query_params['Mode'] = mode
if description is not None:
query_params['Description'] = description
if timeout is not None:
query_params['Timeout'] = timeout
if memory_size is not None:
query_params['MemorySize'] = memory_size
try:
content_length = str(len(function_zip))
except (TypeError, AttributeError):
# If a file like object is provided and seekable, try to retrieve
# the file size via fstat.
try:
function_zip.tell()
except (AttributeError, OSError, IOError):
raise TypeError(
"File-like object passed to parameter "
"``function_zip`` must be seekable."
)
content_length = str(os.fstat(function_zip.fileno()).st_size)
headers['Content-Length'] = content_length
return self.make_request('PUT', uri, expected_status=201,
data=function_zip, headers=headers,
params=query_params)
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = AWSAuthConnection.make_request(
self, verb, resource, headers=headers, data=data, params=params)
body = response.read().decode('utf-8')
if body:
body = json.loads(body)
if response.status == expected_status:
return body
else:
error_type = response.getheader('x-amzn-ErrorType').split(':')[0]
error_class = self._faults.get(error_type, self.ResponseError)
raise error_class(response.status, response.reason, body)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/awslambda/layer1.py
| 0.718693 | 0.287518 |
layer1.py
|
pypi
|
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.logs import exceptions
from boto.compat import json
class CloudWatchLogsConnection(AWSQueryConnection):
"""
Amazon CloudWatch Logs Service API Reference
This is the Amazon CloudWatch Logs API Reference . Amazon
CloudWatch Logs is a managed service for real time monitoring and
archival of application logs. This guide provides detailed
information about Amazon CloudWatch Logs actions, data types,
parameters, and errors. For detailed information about Amazon
CloudWatch Logs features and their associated API calls, go to the
`Amazon CloudWatch Logs Developer Guide`_.
Use the following links to get started using the Amazon CloudWatch
API Reference :
+ `Actions`_: An alphabetical list of all Amazon CloudWatch Logs
actions.
+ `Data Types`_: An alphabetical list of all Amazon CloudWatch
Logs data types.
+ `Common Parameters`_: Parameters that all Query actions can use.
+ `Common Errors`_: Client and server errors that all actions can
return.
+ `Regions and Endpoints`_: Itemized regions and endpoints for all
AWS products.
In addition to using the Amazon CloudWatch Logs API, you can also
use the following SDKs and third-party libraries to access Amazon
CloudWatch Logs programmatically.
+ `AWS SDK for Java Documentation`_
+ `AWS SDK for .NET Documentation`_
+ `AWS SDK for PHP Documentation`_
+ `AWS SDK for Ruby Documentation`_
Developers in the AWS developer community also provide their own
libraries, which you can find at the following AWS developer
centers:
+ `AWS Java Developer Center`_
+ `AWS PHP Developer Center`_
+ `AWS Python Developer Center`_
+ `AWS Ruby Developer Center`_
+ `AWS Windows and .NET Developer Center`_
"""
APIVersion = "2014-03-28"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "logs.us-east-1.amazonaws.com"
ServiceName = "CloudWatchLogs"
TargetPrefix = "Logs_20140328"
ResponseError = JSONResponseError
_faults = {
"LimitExceededException": exceptions.LimitExceededException,
"DataAlreadyAcceptedException": exceptions.DataAlreadyAcceptedException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ServiceUnavailableException": exceptions.ServiceUnavailableException,
"InvalidParameterException": exceptions.InvalidParameterException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"ResourceAlreadyExistsException": exceptions.ResourceAlreadyExistsException,
"OperationAbortedException": exceptions.OperationAbortedException,
"InvalidSequenceTokenException": exceptions.InvalidSequenceTokenException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudWatchLogsConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_log_group(self, log_group_name):
"""
Creates a new log group with the specified name. The name of
the log group must be unique within a region for an AWS
account. You can create up to 100 log groups per account.
You must use the following guidelines when naming a log group:
+ Log group names can be between 1 and 512 characters long.
+ Allowed characters are az, AZ, 09, '_' (underscore), '-'
(hyphen), '/' (forward slash), and '.' (period).
Log groups are created with a default retention of 14 days.
The retention attribute allow you to configure the number of
days you want to retain log events in the specified log group.
See the `SetRetention` operation on how to modify the
retention of your log groups.
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='CreateLogGroup',
body=json.dumps(params))
def create_log_stream(self, log_group_name, log_stream_name):
"""
Creates a new log stream in the specified log group. The name
of the log stream must be unique within the log group. There
is no limit on the number of log streams that can exist in a
log group.
You must use the following guidelines when naming a log
stream:
+ Log stream names can be between 1 and 512 characters long.
+ The ':' colon character is not allowed.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
return self.make_request(action='CreateLogStream',
body=json.dumps(params))
def delete_log_group(self, log_group_name):
"""
Deletes the log group with the specified name. Amazon
CloudWatch Logs will delete a log group only if there are no
log streams and no metric filters associated with the log
group. If this condition is not satisfied, the request will
fail and the log group will not be deleted.
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='DeleteLogGroup',
body=json.dumps(params))
def delete_log_stream(self, log_group_name, log_stream_name):
"""
Deletes a log stream and permanently deletes all the archived
log events associated with it.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
return self.make_request(action='DeleteLogStream',
body=json.dumps(params))
def delete_metric_filter(self, log_group_name, filter_name):
"""
Deletes a metric filter associated with the specified log
group.
:type log_group_name: string
:param log_group_name:
:type filter_name: string
:param filter_name: The name of the metric filter.
"""
params = {
'logGroupName': log_group_name,
'filterName': filter_name,
}
return self.make_request(action='DeleteMetricFilter',
body=json.dumps(params))
def delete_retention_policy(self, log_group_name):
"""
:type log_group_name: string
:param log_group_name:
"""
params = {'logGroupName': log_group_name, }
return self.make_request(action='DeleteRetentionPolicy',
body=json.dumps(params))
def describe_log_groups(self, log_group_name_prefix=None,
next_token=None, limit=None):
"""
Returns all the log groups that are associated with the AWS
account making the request. The list returned in the response
is ASCII-sorted by log group name.
By default, this operation returns up to 50 log groups. If
there are more log groups to list, the response would contain
a `nextToken` value in the response body. You can also limit
the number of log groups returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name_prefix: string
:param log_group_name_prefix:
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeLogGroups` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {}
if log_group_name_prefix is not None:
params['logGroupNamePrefix'] = log_group_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeLogGroups',
body=json.dumps(params))
def describe_log_streams(self, log_group_name,
log_stream_name_prefix=None, next_token=None,
limit=None):
"""
Returns all the log streams that are associated with the
specified log group. The list returned in the response is
ASCII-sorted by log stream name.
By default, this operation returns up to 50 log streams. If
there are more log streams to list, the response would contain
a `nextToken` value in the response body. You can also limit
the number of log streams returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type log_stream_name_prefix: string
:param log_stream_name_prefix:
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeLogStreams` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {'logGroupName': log_group_name, }
if log_stream_name_prefix is not None:
params['logStreamNamePrefix'] = log_stream_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeLogStreams',
body=json.dumps(params))
def describe_metric_filters(self, log_group_name,
filter_name_prefix=None, next_token=None,
limit=None):
"""
Returns all the metrics filters associated with the specified
log group. The list returned in the response is ASCII-sorted
by filter name.
By default, this operation returns up to 50 metric filters. If
there are more metric filters to list, the response would
contain a `nextToken` value in the response body. You can also
limit the number of metric filters returned in the response by
specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type filter_name_prefix: string
:param filter_name_prefix: The name of the metric filter.
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
response of the previous `DescribeMetricFilters` request.
:type limit: integer
:param limit: The maximum number of items returned in the response. If
you don't specify a value, the request would return up to 50 items.
"""
params = {'logGroupName': log_group_name, }
if filter_name_prefix is not None:
params['filterNamePrefix'] = filter_name_prefix
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
return self.make_request(action='DescribeMetricFilters',
body=json.dumps(params))
def get_log_events(self, log_group_name, log_stream_name,
start_time=None, end_time=None, next_token=None,
limit=None, start_from_head=None):
"""
Retrieves log events from the specified log stream. You can
provide an optional time range to filter the results on the
event `timestamp`.
By default, this operation returns as much log events as can
fit in a response size of 1MB, up to 10,000 log events. The
response will always include a `nextForwardToken` and a
`nextBackwardToken` in the response body. You can use any of
these tokens in subsequent `GetLogEvents` requests to paginate
through events in either forward or backward direction. You
can also limit the number of log events returned in the
response by specifying the `limit` parameter in the request.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
:type start_time: long
:param start_time: A point in time expressed as the number milliseconds
since Jan 1, 1970 00:00:00 UTC.
:type end_time: long
:param end_time: A point in time expressed as the number milliseconds
since Jan 1, 1970 00:00:00 UTC.
:type next_token: string
:param next_token: A string token used for pagination that points to
the next page of results. It must be a value obtained from the
`nextForwardToken` or `nextBackwardToken` fields in the response of
the previous `GetLogEvents` request.
:type limit: integer
:param limit: The maximum number of log events returned in the
response. If you don't specify a value, the request would return as
much log events as can fit in a response size of 1MB, up to 10,000
log events.
:type start_from_head: boolean
:param start_from_head:
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
}
if start_time is not None:
params['startTime'] = start_time
if end_time is not None:
params['endTime'] = end_time
if next_token is not None:
params['nextToken'] = next_token
if limit is not None:
params['limit'] = limit
if start_from_head is not None:
params['startFromHead'] = start_from_head
return self.make_request(action='GetLogEvents',
body=json.dumps(params))
def put_log_events(self, log_group_name, log_stream_name, log_events,
sequence_token=None):
"""
Uploads a batch of log events to the specified log stream.
Every PutLogEvents request must include the `sequenceToken`
obtained from the response of the previous request. An upload
in a newly created log stream does not require a
`sequenceToken`.
The batch of events must satisfy the following constraints:
+ The maximum batch size is 32,768 bytes, and this size is
calculated as the sum of all event messages in UTF-8, plus 26
bytes for each log event.
+ None of the log events in the batch can be more than 2 hours
in the future.
+ None of the log events in the batch can be older than 14
days or the retention period of the log group.
+ The log events in the batch must be in chronological ordered
by their `timestamp`.
+ The maximum number of log events in a batch is 1,000.
:type log_group_name: string
:param log_group_name:
:type log_stream_name: string
:param log_stream_name:
:type log_events: list
:param log_events: A list of events belonging to a log stream.
:type sequence_token: string
:param sequence_token: A string token that must be obtained from the
response of the previous `PutLogEvents` request.
"""
params = {
'logGroupName': log_group_name,
'logStreamName': log_stream_name,
'logEvents': log_events,
}
if sequence_token is not None:
params['sequenceToken'] = sequence_token
return self.make_request(action='PutLogEvents',
body=json.dumps(params))
def put_metric_filter(self, log_group_name, filter_name, filter_pattern,
metric_transformations):
"""
Creates or updates a metric filter and associates it with the
specified log group. Metric filters allow you to configure
rules to extract metric data from log events ingested through
`PutLogEvents` requests.
:type log_group_name: string
:param log_group_name:
:type filter_name: string
:param filter_name: The name of the metric filter.
:type filter_pattern: string
:param filter_pattern:
:type metric_transformations: list
:param metric_transformations:
"""
params = {
'logGroupName': log_group_name,
'filterName': filter_name,
'filterPattern': filter_pattern,
'metricTransformations': metric_transformations,
}
return self.make_request(action='PutMetricFilter',
body=json.dumps(params))
def put_retention_policy(self, log_group_name, retention_in_days):
"""
:type log_group_name: string
:param log_group_name:
:type retention_in_days: integer
:param retention_in_days: Specifies the number of days you want to
retain log events in the specified log group. Possible values are:
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730.
"""
params = {
'logGroupName': log_group_name,
'retentionInDays': retention_in_days,
}
return self.make_request(action='PutRetentionPolicy',
body=json.dumps(params))
def set_retention(self, log_group_name, retention_in_days):
"""
Sets the retention of the specified log group. Log groups are
created with a default retention of 14 days. The retention
attribute allow you to configure the number of days you want
to retain log events in the specified log group.
:type log_group_name: string
:param log_group_name:
:type retention_in_days: integer
:param retention_in_days: Specifies the number of days you want to
retain log events in the specified log group. Possible values are:
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730.
"""
params = {
'logGroupName': log_group_name,
'retentionInDays': retention_in_days,
}
return self.make_request(action='SetRetention',
body=json.dumps(params))
def test_metric_filter(self, filter_pattern, log_event_messages):
"""
Tests the filter pattern of a metric filter against a sample
of log event messages. You can use this operation to validate
the correctness of a metric filter pattern.
:type filter_pattern: string
:param filter_pattern:
:type log_event_messages: list
:param log_event_messages:
"""
params = {
'filterPattern': filter_pattern,
'logEventMessages': log_event_messages,
}
return self.make_request(action='TestMetricFilter',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/logs/layer1.py
| 0.730097 | 0.339431 |
layer1.py
|
pypi
|
from copy import deepcopy
class NEWVALUE(object):
# A marker for new data added.
pass
class Item(object):
"""
An object representing the item data within a DynamoDB table.
An item is largely schema-free, meaning it can contain any data. The only
limitation is that it must have data for the fields in the ``Table``'s
schema.
This object presents a dictionary-like interface for accessing/storing
data. It also tries to intelligently track how data has changed throughout
the life of the instance, to be as efficient as possible about updates.
Empty items, or items that have no data, are considered falsey.
"""
def __init__(self, table, data=None, loaded=False):
"""
Constructs an (unsaved) ``Item`` instance.
To persist the data in DynamoDB, you'll need to call the ``Item.save``
(or ``Item.partial_save``) on the instance.
Requires a ``table`` parameter, which should be a ``Table`` instance.
This is required, as DynamoDB's API is focus around all operations
being table-level. It's also for persisting schema around many objects.
Optionally accepts a ``data`` parameter, which should be a dictionary
of the fields & values of the item. Alternatively, an ``Item`` instance
may be provided from which to extract the data.
Optionally accepts a ``loaded`` parameter, which should be a boolean.
``True`` if it was preexisting data loaded from DynamoDB, ``False`` if
it's new data from the user. Default is ``False``.
Example::
>>> users = Table('users')
>>> user = Item(users, data={
... 'username': 'johndoe',
... 'first_name': 'John',
... 'date_joined': 1248o61592,
... })
# Change existing data.
>>> user['first_name'] = 'Johann'
# Add more data.
>>> user['last_name'] = 'Doe'
# Delete data.
>>> del user['date_joined']
# Iterate over all the data.
>>> for field, val in user.items():
... print "%s: %s" % (field, val)
username: johndoe
first_name: John
date_joined: 1248o61592
"""
self.table = table
self._loaded = loaded
self._orig_data = {}
self._data = data
self._dynamizer = table._dynamizer
if isinstance(self._data, Item):
self._data = self._data._data
if self._data is None:
self._data = {}
if self._loaded:
self._orig_data = deepcopy(self._data)
def __getitem__(self, key):
return self._data.get(key, None)
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
if not key in self._data:
return
del self._data[key]
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def get(self, key, default=None):
return self._data.get(key, default)
def __iter__(self):
for key in self._data:
yield self._data[key]
def __contains__(self, key):
return key in self._data
def __bool__(self):
return bool(self._data)
__nonzero__ = __bool__
def _determine_alterations(self):
"""
Checks the ``-orig_data`` against the ``_data`` to determine what
changes to the data are present.
Returns a dictionary containing the keys ``adds``, ``changes`` &
``deletes``, containing the updated data.
"""
alterations = {
'adds': {},
'changes': {},
'deletes': [],
}
orig_keys = set(self._orig_data.keys())
data_keys = set(self._data.keys())
# Run through keys we know are in both for changes.
for key in orig_keys.intersection(data_keys):
if self._data[key] != self._orig_data[key]:
if self._is_storable(self._data[key]):
alterations['changes'][key] = self._data[key]
else:
alterations['deletes'].append(key)
# Run through additions.
for key in data_keys.difference(orig_keys):
if self._is_storable(self._data[key]):
alterations['adds'][key] = self._data[key]
# Run through deletions.
for key in orig_keys.difference(data_keys):
alterations['deletes'].append(key)
return alterations
def needs_save(self, data=None):
"""
Returns whether or not the data has changed on the ``Item``.
Optionally accepts a ``data`` argument, which accepts the output from
``self._determine_alterations()`` if you've already called it. Typically
unnecessary to do. Default is ``None``.
Example:
>>> user.needs_save()
False
>>> user['first_name'] = 'Johann'
>>> user.needs_save()
True
"""
if data is None:
data = self._determine_alterations()
needs_save = False
for kind in ['adds', 'changes', 'deletes']:
if len(data[kind]):
needs_save = True
break
return needs_save
def mark_clean(self):
"""
Marks an ``Item`` instance as no longer needing to be saved.
Example:
>>> user.needs_save()
False
>>> user['first_name'] = 'Johann'
>>> user.needs_save()
True
>>> user.mark_clean()
>>> user.needs_save()
False
"""
self._orig_data = deepcopy(self._data)
def mark_dirty(self):
"""
DEPRECATED: Marks an ``Item`` instance as needing to be saved.
This method is no longer necessary, as the state tracking on ``Item``
has been improved to automatically detect proper state.
"""
return
def load(self, data):
"""
This is only useful when being handed raw data from DynamoDB directly.
If you have a Python datastructure already, use the ``__init__`` or
manually set the data instead.
Largely internal, unless you know what you're doing or are trying to
mix the low-level & high-level APIs.
"""
self._data = {}
for field_name, field_value in data.get('Item', {}).items():
self[field_name] = self._dynamizer.decode(field_value)
self._loaded = True
self._orig_data = deepcopy(self._data)
def get_keys(self):
"""
Returns a Python-style dict of the keys/values.
Largely internal.
"""
key_fields = self.table.get_key_fields()
key_data = {}
for key in key_fields:
key_data[key] = self[key]
return key_data
def get_raw_keys(self):
"""
Returns a DynamoDB-style dict of the keys/values.
Largely internal.
"""
raw_key_data = {}
for key, value in self.get_keys().items():
raw_key_data[key] = self._dynamizer.encode(value)
return raw_key_data
def build_expects(self, fields=None):
"""
Builds up a list of expecations to hand off to DynamoDB on save.
Largely internal.
"""
expects = {}
if fields is None:
fields = list(self._data.keys()) + list(self._orig_data.keys())
# Only uniques.
fields = set(fields)
for key in fields:
expects[key] = {
'Exists': True,
}
value = None
# Check for invalid keys.
if not key in self._orig_data and not key in self._data:
raise ValueError("Unknown key %s provided." % key)
# States:
# * New field (only in _data)
# * Unchanged field (in both _data & _orig_data, same data)
# * Modified field (in both _data & _orig_data, different data)
# * Deleted field (only in _orig_data)
orig_value = self._orig_data.get(key, NEWVALUE)
current_value = self._data.get(key, NEWVALUE)
if orig_value == current_value:
# Existing field unchanged.
value = current_value
else:
if key in self._data:
if not key in self._orig_data:
# New field.
expects[key]['Exists'] = False
else:
# Existing field modified.
value = orig_value
else:
# Existing field deleted.
value = orig_value
if value is not None:
expects[key]['Value'] = self._dynamizer.encode(value)
return expects
def _is_storable(self, value):
# We need to prevent ``None``, empty string & empty set from
# heading to DDB, but allow false-y values like 0 & False make it.
if not value:
if not value in (0, 0.0, False):
return False
return True
def prepare_full(self):
"""
Runs through all fields & encodes them to be handed off to DynamoDB
as part of an ``save`` (``put_item``) call.
Largely internal.
"""
# This doesn't save on its own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
for key, value in self._data.items():
if not self._is_storable(value):
continue
final_data[key] = self._dynamizer.encode(value)
return final_data
def prepare_partial(self):
"""
Runs through **ONLY** the changed/deleted fields & encodes them to be
handed off to DynamoDB as part of an ``partial_save`` (``update_item``)
call.
Largely internal.
"""
# This doesn't save on its own. Rather, we prepare the datastructure
# and hand-off to the table to handle creation/update.
final_data = {}
fields = set()
alterations = self._determine_alterations()
for key, value in alterations['adds'].items():
final_data[key] = {
'Action': 'PUT',
'Value': self._dynamizer.encode(self._data[key])
}
fields.add(key)
for key, value in alterations['changes'].items():
final_data[key] = {
'Action': 'PUT',
'Value': self._dynamizer.encode(self._data[key])
}
fields.add(key)
for key in alterations['deletes']:
final_data[key] = {
'Action': 'DELETE',
}
fields.add(key)
return final_data, fields
def partial_save(self):
"""
Saves only the changed data to DynamoDB.
Extremely useful for high-volume/high-write data sets, this allows
you to update only a handful of fields rather than having to push
entire items. This prevents many accidental overwrite situations as
well as saves on the amount of data to transfer over the wire.
Returns ``True`` on success, ``False`` if no save was performed or
the write failed.
Example::
>>> user['last_name'] = 'Doh!'
# Only the last name field will be sent to DynamoDB.
>>> user.partial_save()
"""
key = self.get_keys()
# Build a new dict of only the data we're changing.
final_data, fields = self.prepare_partial()
if not final_data:
return False
# Remove the key(s) from the ``final_data`` if present.
# They should only be present if this is a new item, in which
# case we shouldn't be sending as part of the data to update.
for fieldname, value in key.items():
if fieldname in final_data:
del final_data[fieldname]
try:
# It's likely also in ``fields``, so remove it there too.
fields.remove(fieldname)
except KeyError:
pass
# Build expectations of only the fields we're planning to update.
expects = self.build_expects(fields=fields)
returned = self.table._update_item(key, final_data, expects=expects)
# Mark the object as clean.
self.mark_clean()
return returned
def save(self, overwrite=False):
"""
Saves all data to DynamoDB.
By default, this attempts to ensure that none of the underlying
data has changed. If any fields have changed in between when the
``Item`` was constructed & when it is saved, this call will fail so
as not to cause any data loss.
If you're sure possibly overwriting data is acceptable, you can pass
an ``overwrite=True``. If that's not acceptable, you may be able to use
``Item.partial_save`` to only write the changed field data.
Optionally accepts an ``overwrite`` parameter, which should be a
boolean. If you provide ``True``, the item will be forcibly overwritten
within DynamoDB, even if another process changed the data in the
meantime. (Default: ``False``)
Returns ``True`` on success, ``False`` if no save was performed.
Example::
>>> user['last_name'] = 'Doh!'
# All data on the Item is sent to DynamoDB.
>>> user.save()
# If it fails, you can overwrite.
>>> user.save(overwrite=True)
"""
if not self.needs_save() and not overwrite:
return False
final_data = self.prepare_full()
expects = None
if overwrite is False:
# Build expectations about *all* of the data.
expects = self.build_expects()
returned = self.table._put_item(final_data, expects=expects)
# Mark the object as clean.
self.mark_clean()
return returned
def delete(self):
"""
Deletes the item's data to DynamoDB.
Returns ``True`` on success.
Example::
# Buh-bye now.
>>> user.delete()
"""
key_data = self.get_keys()
return self.table.delete_item(**key_data)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb2/items.py
| 0.870432 | 0.506469 |
items.py
|
pypi
|
from binascii import crc32
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.dynamodb2 import exceptions
class DynamoDBConnection(AWSQueryConnection):
"""
Amazon DynamoDB
**Overview**
This is the Amazon DynamoDB API Reference. This guide provides
descriptions and samples of the low-level DynamoDB API. For
information about DynamoDB application development, go to the
`Amazon DynamoDB Developer Guide`_.
Instead of making the requests to the low-level DynamoDB API
directly from your application, we recommend that you use the AWS
Software Development Kits (SDKs). The easy-to-use libraries in the
AWS SDKs make it unnecessary to call the low-level DynamoDB API
directly from your application. The libraries take care of request
authentication, serialization, and connection management. For more
information, go to `Using the AWS SDKs with DynamoDB`_ in the
Amazon DynamoDB Developer Guide .
If you decide to code against the low-level DynamoDB API directly,
you will need to write the necessary code to authenticate your
requests. For more information on signing your requests, go to
`Using the DynamoDB API`_ in the Amazon DynamoDB Developer Guide .
The following are short descriptions of each low-level API action,
organized by function.
**Managing Tables**
+ CreateTable - Creates a table with user-specified provisioned
throughput settings. You must designate one attribute as the hash
primary key for the table; you can optionally designate a second
attribute as the range primary key. DynamoDB creates indexes on
these key attributes for fast data access. Optionally, you can
create one or more secondary indexes, which provide fast data
access using non-key attributes.
+ DescribeTable - Returns metadata for a table, such as table
size, status, and index information.
+ UpdateTable - Modifies the provisioned throughput settings for a
table. Optionally, you can modify the provisioned throughput
settings for global secondary indexes on the table.
+ ListTables - Returns a list of all tables associated with the
current AWS account and endpoint.
+ DeleteTable - Deletes a table and all of its indexes.
For conceptual information about managing tables, go to `Working
with Tables`_ in the Amazon DynamoDB Developer Guide .
**Reading Data**
+ GetItem - Returns a set of attributes for the item that has a
given primary key. By default, GetItem performs an eventually
consistent read; however, applications can specify a strongly
consistent read instead.
+ BatchGetItem - Performs multiple GetItem requests for data items
using their primary keys, from one table or multiple tables. The
response from BatchGetItem has a size limit of 16 MB and returns a
maximum of 100 items. Both eventually consistent and strongly
consistent reads can be used.
+ Query - Returns one or more items from a table or a secondary
index. You must provide a specific hash key value. You can narrow
the scope of the query using comparison operators against a range
key value, or on the index key. Query supports either eventual or
strong consistency. A single response has a size limit of 1 MB.
+ Scan - Reads every item in a table; the result set is eventually
consistent. You can limit the number of items returned by
filtering the data attributes, using conditional expressions. Scan
can be used to enable ad-hoc querying of a table against non-key
attributes; however, since this is a full table scan without using
an index, Scan should not be used for any application query use
case that requires predictable performance.
For conceptual information about reading data, go to `Working with
Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB
Developer Guide .
**Modifying Data**
+ PutItem - Creates a new item, or replaces an existing item with
a new item (including all the attributes). By default, if an item
in the table already exists with the same primary key, the new
item completely replaces the existing item. You can use
conditional operators to replace an item only if its attribute
values match certain conditions, or to insert a new item only if
that item doesn't already exist.
+ UpdateItem - Modifies the attributes of an existing item. You
can also use conditional operators to perform an update only if
the item's attribute values match certain conditions.
+ DeleteItem - Deletes an item in a table by primary key. You can
use conditional operators to perform a delete an item only if the
item's attribute values match certain conditions.
+ BatchWriteItem - Performs multiple PutItem and DeleteItem
requests across multiple tables in a single request. A failure of
any request(s) in the batch will not cause the entire
BatchWriteItem operation to fail. Supports batches of up to 25
items to put or delete, with a maximum total request size of 16
MB.
For conceptual information about modifying data, go to `Working
with Items`_ and `Query and Scan Operations`_ in the Amazon
DynamoDB Developer Guide .
"""
APIVersion = "2012-08-10"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "dynamodb.us-east-1.amazonaws.com"
ServiceName = "DynamoDB"
TargetPrefix = "DynamoDB_20120810"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ConditionalCheckFailedException": exceptions.ConditionalCheckFailedException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalServerError": exceptions.InternalServerError,
"ItemCollectionSizeLimitExceededException": exceptions.ItemCollectionSizeLimitExceededException,
}
NumberRetries = 10
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
validate_checksums = kwargs.pop('validate_checksums', True)
if not region:
region_name = boto.config.get('DynamoDB', 'region',
self.DefaultRegionName)
for reg in boto.dynamodb2.regions():
if reg.name == region_name:
region = reg
break
# Only set host if it isn't manually overwritten
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(DynamoDBConnection, self).__init__(**kwargs)
self.region = region
self._validate_checksums = boto.config.getbool(
'DynamoDB', 'validate_checksums', validate_checksums)
self.throughput_exceeded_events = 0
def _required_auth_capability(self):
return ['hmac-v4']
def batch_get_item(self, request_items, return_consumed_capacity=None):
"""
The BatchGetItem operation returns the attributes of one or
more items from one or more tables. You identify requested
items by primary key.
A single operation can retrieve up to 16 MB of data, which can
contain as many as 100 items. BatchGetItem will return a
partial result if the response size limit is exceeded, the
table's provisioned throughput is exceeded, or an internal
processing failure occurs. If a partial result is returned,
the operation returns a value for UnprocessedKeys . You can
use this value to retry the operation starting with the next
item to get.
For example, if you ask to retrieve 100 items, but each
individual item is 300 KB in size, the system returns 52 items
(so as not to exceed the 16 MB limit). It also returns an
appropriate UnprocessedKeys value so you can get the next page
of results. If desired, your application can include its own
logic to assemble the pages of results into one data set.
If none of the items can be processed due to insufficient
provisioned throughput on all of the tables in the request,
then BatchGetItem will return a
ProvisionedThroughputExceededException . If at least one of
the items is successfully processed, then BatchGetItem
completes successfully, while returning the keys of the unread
items in UnprocessedKeys .
If DynamoDB returns any unprocessed items, you should retry
the batch operation on those items. However, we strongly
recommend that you use an exponential backoff algorithm . If
you retry the batch operation immediately, the underlying read
or write requests can still fail due to throttling on the
individual tables. If you delay the batch operation using
exponential backoff, the individual requests in the batch are
much more likely to succeed.
For more information, go to `Batch Operations and Error
Handling`_ in the Amazon DynamoDB Developer Guide .
By default, BatchGetItem performs eventually consistent reads
on every table in the request. If you want strongly consistent
reads instead, you can set ConsistentRead to `True` for any or
all tables.
In order to minimize response latency, BatchGetItem retrieves
items in parallel.
When designing your application, keep in mind that DynamoDB
does not return attributes in any particular order. To help
parse the response by item, include the primary key values for
the items in your request in the AttributesToGet parameter.
If a requested item does not exist, it is not returned in the
result. Requests for nonexistent items consume the minimum
read capacity units according to the type of read. For more
information, see `Capacity Units Calculations`_ in the Amazon
DynamoDB Developer Guide .
:type request_items: map
:param request_items:
A map of one or more table names and, for each table, the corresponding
primary keys for the items to retrieve. Each table name can be
invoked only once.
Each element in the map consists of the following:
+ Keys - An array of primary key attribute values that define specific
items in the table. For each primary key, you must provide all of
the key attributes. For example, with a hash type primary key, you
only need to specify the hash attribute. For a hash-and-range type
primary key, you must specify both the hash attribute and the range
attribute.
+ AttributesToGet - One or more attributes to be retrieved from the
table. By default, all attributes are returned. If a specified
attribute is not found, it does not appear in the result. Note that
AttributesToGet has no effect on provisioned throughput
consumption. DynamoDB determines capacity units consumed based on
item size, not on the amount of data that is returned to an
application.
+ ConsistentRead - If `True`, a strongly consistent read is used; if
`False` (the default), an eventually consistent read is used.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
"""
params = {'RequestItems': request_items, }
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
return self.make_request(action='BatchGetItem',
body=json.dumps(params))
def batch_write_item(self, request_items, return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
The BatchWriteItem operation puts or deletes multiple items in
one or more tables. A single call to BatchWriteItem can write
up to 16 MB of data, which can comprise as many as 25 put or
delete requests. Individual items to be written can be as
large as 400 KB.
BatchWriteItem cannot update items. To update items, use the
UpdateItem API.
The individual PutItem and DeleteItem operations specified in
BatchWriteItem are atomic; however BatchWriteItem as a whole
is not. If any requested operations fail because the table's
provisioned throughput is exceeded or an internal processing
failure occurs, the failed operations are returned in the
UnprocessedItems response parameter. You can investigate and
optionally resend the requests. Typically, you would call
BatchWriteItem in a loop. Each iteration would check for
unprocessed items and submit a new BatchWriteItem request with
those unprocessed items until all items have been processed.
Note that if none of the items can be processed due to
insufficient provisioned throughput on all of the tables in
the request, then BatchWriteItem will return a
ProvisionedThroughputExceededException .
If DynamoDB returns any unprocessed items, you should retry
the batch operation on those items. However, we strongly
recommend that you use an exponential backoff algorithm . If
you retry the batch operation immediately, the underlying read
or write requests can still fail due to throttling on the
individual tables. If you delay the batch operation using
exponential backoff, the individual requests in the batch are
much more likely to succeed.
For more information, go to `Batch Operations and Error
Handling`_ in the Amazon DynamoDB Developer Guide .
With BatchWriteItem , you can efficiently write or delete
large amounts of data, such as from Amazon Elastic MapReduce
(EMR), or copy data from another database into DynamoDB. In
order to improve performance with these large-scale
operations, BatchWriteItem does not behave in the same way as
individual PutItem and DeleteItem calls would For example, you
cannot specify conditions on individual put and delete
requests, and BatchWriteItem does not return deleted items in
the response.
If you use a programming language that supports concurrency,
such as Java, you can use threads to write items in parallel.
Your application must include the necessary logic to manage
the threads. With languages that don't support threading, such
as PHP, you must update or delete the specified items one at a
time. In both situations, BatchWriteItem provides an
alternative where the API performs the specified put and
delete operations in parallel, giving you the power of the
thread pool approach without having to introduce complexity
into your application.
Parallel processing reduces latency, but each specified put
and delete request consumes the same number of write capacity
units whether it is processed in parallel or not. Delete
operations on nonexistent items consume one write capacity
unit.
If one or more of the following is true, DynamoDB rejects the
entire batch write operation:
+ One or more tables specified in the BatchWriteItem request
does not exist.
+ Primary key attributes specified on an item in the request
do not match those in the corresponding table's primary key
schema.
+ You try to perform multiple operations on the same item in
the same BatchWriteItem request. For example, you cannot put
and delete the same item in the same BatchWriteItem request.
+ There are more than 25 requests in the batch.
+ Any individual item in a batch exceeds 400 KB.
+ The total request size exceeds 16 MB.
:type request_items: map
:param request_items:
A map of one or more table names and, for each table, a list of
operations to be performed ( DeleteRequest or PutRequest ). Each
element in the map consists of the following:
+ DeleteRequest - Perform a DeleteItem operation on the specified item.
The item to be deleted is identified by a Key subelement:
+ Key - A map of primary key attribute values that uniquely identify
the ! item. Each entry in this map consists of an attribute name
and an attribute value. For each primary key, you must provide all
of the key attributes. For example, with a hash type primary key,
you only need to specify the hash attribute. For a hash-and-range
type primary key, you must specify both the hash attribute and the
range attribute.
+ PutRequest - Perform a PutItem operation on the specified item. The
item to be put is identified by an Item subelement:
+ Item - A map of attributes and their values. Each entry in this map
consists of an attribute name and an attribute value. Attribute
values must not be null; string and binary type attributes must
have lengths greater than zero; and set type attributes must not be
empty. Requests that contain empty values will be rejected with a
ValidationException exception. If you specify any attributes that
are part of an index key, then the data types for those attributes
must match those of the schema in the table's attribute definition.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: A value that if set to `SIZE`,
the response includes statistics about item collections, if any,
that were modified during the operation are returned in the
response. If set to `NONE` (the default), no statistics are
returned.
"""
params = {'RequestItems': request_items, }
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
return self.make_request(action='BatchWriteItem',
body=json.dumps(params))
def create_table(self, attribute_definitions, table_name, key_schema,
provisioned_throughput, local_secondary_indexes=None,
global_secondary_indexes=None):
"""
The CreateTable operation adds a new table to your account. In
an AWS account, table names must be unique within each region.
That is, you can have two tables with same name if you create
the tables in different regions.
CreateTable is an asynchronous operation. Upon receiving a
CreateTable request, DynamoDB immediately returns a response
with a TableStatus of `CREATING`. After the table is created,
DynamoDB sets the TableStatus to `ACTIVE`. You can perform
read and write operations only on an `ACTIVE` table.
You can optionally define secondary indexes on the new table,
as part of the CreateTable operation. If you want to create
multiple tables with secondary indexes on them, you must
create the tables sequentially. Only one table with secondary
indexes can be in the `CREATING` state at any given time.
You can use the DescribeTable API to check the table status.
:type attribute_definitions: list
:param attribute_definitions: An array of attributes that describe the
key schema for the table and indexes.
:type table_name: string
:param table_name: The name of the table to create.
:type key_schema: list
:param key_schema: Specifies the attributes that make up the primary
key for a table or an index. The attributes in KeySchema must also
be defined in the AttributeDefinitions array. For more information,
see `Data Model`_ in the Amazon DynamoDB Developer Guide .
Each KeySchemaElement in the array is composed of:
+ AttributeName - The name of this key attribute.
+ KeyType - Determines whether the key attribute is `HASH` or `RANGE`.
For a primary key that consists of a hash attribute, you must specify
exactly one element with a KeyType of `HASH`.
For a primary key that consists of hash and range attributes, you must
specify exactly two elements, in this order: The first element must
have a KeyType of `HASH`, and the second element must have a
KeyType of `RANGE`.
For more information, see `Specifying the Primary Key`_ in the Amazon
DynamoDB Developer Guide .
:type local_secondary_indexes: list
:param local_secondary_indexes:
One or more local secondary indexes (the maximum is five) to be created
on the table. Each index is scoped to a given hash key value. There
is a 10 GB size limit per hash key; otherwise, the size of a local
secondary index is unconstrained.
Each local secondary index in the array includes the following:
+ IndexName - The name of the local secondary index. Must be unique
only for this table.
+ KeySchema - Specifies the key schema for the local secondary index.
The key schema must begin with the same hash key attribute as the
table.
+ Projection - Specifies attributes that are copied (projected) from
the table into the index. These are in addition to the primary key
attributes and index key attributes, which are automatically
projected. Each attribute specification is composed of:
+ ProjectionType - One of the following:
+ `KEYS_ONLY` - Only the index and primary keys are projected into the
index.
+ `INCLUDE` - Only the specified table attributes are projected into
the index. The list of projected attributes are in NonKeyAttributes
.
+ `ALL` - All of the table attributes are projected into the index.
+ NonKeyAttributes - A list of one or more non-key attribute names that
are projected into the secondary index. The total count of
attributes specified in NonKeyAttributes , summed across all of the
secondary indexes, must not exceed 20. If you project the same
attribute into two different indexes, this counts as two distinct
attributes when determining the total.
:type global_secondary_indexes: list
:param global_secondary_indexes:
One or more global secondary indexes (the maximum is five) to be
created on the table. Each global secondary index in the array
includes the following:
+ IndexName - The name of the global secondary index. Must be unique
only for this table.
+ KeySchema - Specifies the key schema for the global secondary index.
+ Projection - Specifies attributes that are copied (projected) from
the table into the index. These are in addition to the primary key
attributes and index key attributes, which are automatically
projected. Each attribute specification is composed of:
+ ProjectionType - One of the following:
+ `KEYS_ONLY` - Only the index and primary keys are projected into the
index.
+ `INCLUDE` - Only the specified table attributes are projected into
the index. The list of projected attributes are in NonKeyAttributes
.
+ `ALL` - All of the table attributes are projected into the index.
+ NonKeyAttributes - A list of one or more non-key attribute names that
are projected into the secondary index. The total count of
attributes specified in NonKeyAttributes , summed across all of the
secondary indexes, must not exceed 20. If you project the same
attribute into two different indexes, this counts as two distinct
attributes when determining the total.
+ ProvisionedThroughput - The provisioned throughput settings for the
global secondary index, consisting of read and write capacity
units.
:type provisioned_throughput: dict
:param provisioned_throughput: Represents the provisioned throughput
settings for a specified table or index. The settings can be
modified using the UpdateTable operation.
For current minimum and maximum provisioned throughput values, see
`Limits`_ in the Amazon DynamoDB Developer Guide .
"""
params = {
'AttributeDefinitions': attribute_definitions,
'TableName': table_name,
'KeySchema': key_schema,
'ProvisionedThroughput': provisioned_throughput,
}
if local_secondary_indexes is not None:
params['LocalSecondaryIndexes'] = local_secondary_indexes
if global_secondary_indexes is not None:
params['GlobalSecondaryIndexes'] = global_secondary_indexes
return self.make_request(action='CreateTable',
body=json.dumps(params))
def delete_item(self, table_name, key, expected=None,
conditional_operator=None, return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None):
"""
Deletes a single item in a table by primary key. You can
perform a conditional delete operation that deletes the item
if it exists, or if it has an expected attribute value.
In addition to deleting an item, you can also return the
item's attribute values in the same operation, using the
ReturnValues parameter.
Unless you specify conditions, the DeleteItem is an idempotent
operation; running it multiple times on the same item or
attribute does not result in an error response.
Conditional deletes are useful for deleting items only if
specific conditions are met. If those conditions are met,
DynamoDB performs the delete. Otherwise, the item is not
deleted.
:type table_name: string
:param table_name: The name of the table from which to delete the item.
:type key: map
:param key: A map of attribute names to AttributeValue objects,
representing the primary key of the item to delete.
For the primary key, you must provide all of the attributes. For
example, with a hash type primary key, you only need to specify the
hash attribute. For a hash-and-range type primary key, you must
specify both the hash attribute and the range attribute.
:type expected: map
:param expected:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use Expected and ConditionExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A map of attribute/condition pairs. Expected provides a conditional
block for the DeleteItem operation.
Each element of Expected consists of an attribute name, a comparison
operator, and one or more values. DynamoDB compares the attribute
with the value(s) you supplied, using the comparison operator. For
each Expected element, the result of the evaluation is either true
or false.
If you specify more than one element in the Expected map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
If the Expected map evaluates to true, then the conditional operation
succeeds; otherwise, it fails.
Expected contains the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
`a` is greater than `A`, and `a` is greater than `B`. For a list of
code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes in the
AttributeValueList . When performing the comparison, DynamoDB uses
strongly consistent reads. The following comparison operators are
available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL |
CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following
are descriptions of each comparison operator.
+ `EQ` : Equal. `EQ` is supported for all datatypes, including lists
and maps. AttributeValueList can contain only one AttributeValue
element of type String, Number, Binary, String Set, Number Set, or
Binary Set. If an item contains an AttributeValue element of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `NE` : Not equal. `NE` is supported for all datatypes, including
lists and maps. AttributeValueList can contain only one
AttributeValue of type String, Number, Binary, String Set, Number
Set, or Binary Set. If an item contains an AttributeValue of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set type).
If an item contains an AttributeValue element of a different type
than the one specified in the request, the value does not match.
For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
existence of an attribute, not its data type. If the data type of
attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
result is a Boolean true . This result is because the attribute "
`a`" exists; its data type is not relevant to the `NOT_NULL`
comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
nonexistence of an attribute, not its data type. If the data type
of attribute " `a`" is null, and you evaluate it using `NULL`, the
result is a Boolean false . This is because the attribute " `a`"
exists; its data type is not relevant to the `NULL` comparison
operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
attribute of the comparison is of type String, then the operator
checks for a substring match. If the target attribute of the
comparison is of type Binary, then the operator looks for a
subsequence of the target that matches the input. If the target
attribute of the comparison is a set (" `SS`", " `NS`", or "
`BS`"), then the operator evaluates to true if it finds an exact
match with any member of the set. CONTAINS is supported for lists:
When evaluating " `a CONTAINS b`", " `a`" can be a list; however, "
`b`" cannot be a set, a map, or a list.
+ `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a
value in a set. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If the target attribute of the comparison is a String, then
the operator checks for the absence of a substring match. If the
target attribute of the comparison is Binary, then the operator
checks for the absence of a subsequence of the target that matches
the input. If the target attribute of the comparison is a set ("
`SS`", " `NS`", or " `BS`"), then the operator evaluates to true if
it does not find an exact match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating " `a NOT
CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a
set, a map, or a list.
+ `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set type). The target attribute of the comparison must be of type
String or Binary (not a Number or a set type). > <li>
+ `IN` : Checks for matching elements within two sets.
AttributeValueList can contain one or more AttributeValue elements
of type String, Number, or Binary (not a set type). These
attributes are compared against an existing set type attribute of
an item. If any elements of the input set are present in the item
attribute, the expression evaluates to true.
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set type). A target attribute matches if the target
value is greater than, or equal to, the first element and less
than, or equal to, the second element. If an item contains an
AttributeValue element of a different type than the one specified
in the request, the value does not match. For example, `{"S":"6"}`
does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare
to `{"NS":["6", "2", "1"]}`
For usage examples of AttributeValueList and ComparisonOperator , see
`Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer
Guide .
For backward compatibility with previous DynamoDB releases, the
following parameters can be used instead of AttributeValueList and
ComparisonOperator :
+ Value - A value for DynamoDB to compare with an attribute.
+ Exists - A Boolean value that causes DynamoDB to evaluate the value
before attempting the conditional operation:
+ If Exists is `True`, DynamoDB will check to see if that attribute
value already exists in the table. If it is found, then the
condition evaluates to true; otherwise the condition evaluate to
false.
+ If Exists is `False`, DynamoDB assumes that the attribute value does
not exist in the table. If in fact the value does not exist, then
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
AttributeValueList and ComparisonOperator . Note that if you use
both sets of parameters at once, DynamoDB will return a
ValidationException exception.
:type conditional_operator: string
:param conditional_operator:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use ConditionalOperator and ConditionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the Expected map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared before they were deleted. For DeleteItem , the valid
values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This setting is the default for
ReturnValues .)
+ `ALL_OLD` - The content of the old item is returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: A value that if set to `SIZE`,
the response includes statistics about item collections, if any,
that were modified during the operation are returned in the
response. If set to `NONE` (the default), no statistics are
returned.
:type condition_expression: string
:param condition_expression: A condition that must be satisfied in
order for a conditional DeleteItem to succeed.
An expression can contain any of the following:
+ Boolean functions: `attribute_exists | attribute_not_exists |
contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
+ Logical operators: `AND | OR | NOT`
For more information on condition expressions, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
if expected is not None:
params['Expected'] = expected
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
if condition_expression is not None:
params['ConditionExpression'] = condition_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='DeleteItem',
body=json.dumps(params))
def delete_table(self, table_name):
"""
The DeleteTable operation deletes a table and all of its
items. After a DeleteTable request, the specified table is in
the `DELETING` state until DynamoDB completes the deletion. If
the table is in the `ACTIVE` state, you can delete it. If a
table is in `CREATING` or `UPDATING` states, then DynamoDB
returns a ResourceInUseException . If the specified table does
not exist, DynamoDB returns a ResourceNotFoundException . If
table is already in the `DELETING` state, no error is
returned.
DynamoDB might continue to accept data read and write
operations, such as GetItem and PutItem , on a table in the
`DELETING` state until the table deletion is complete.
When you delete a table, any indexes on that table are also
deleted.
Use the DescribeTable API to check the status of the table.
:type table_name: string
:param table_name: The name of the table to delete.
"""
params = {'TableName': table_name, }
return self.make_request(action='DeleteTable',
body=json.dumps(params))
def describe_table(self, table_name):
"""
Returns information about the table, including the current
status of the table, when it was created, the primary key
schema, and any indexes on the table.
If you issue a DescribeTable request immediately after a
CreateTable request, DynamoDB might return a
ResourceNotFoundException. This is because DescribeTable uses
an eventually consistent query, and the metadata for your
table might not be available at that moment. Wait for a few
seconds, and then try the DescribeTable request again.
:type table_name: string
:param table_name: The name of the table to describe.
"""
params = {'TableName': table_name, }
return self.make_request(action='DescribeTable',
body=json.dumps(params))
def get_item(self, table_name, key, attributes_to_get=None,
consistent_read=None, return_consumed_capacity=None,
projection_expression=None, expression_attribute_names=None):
"""
The GetItem operation returns a set of attributes for the item
with the given primary key. If there is no matching item,
GetItem does not return any data.
GetItem provides an eventually consistent read by default. If
your application requires a strongly consistent read, set
ConsistentRead to `True`. Although a strongly consistent read
might take more time than an eventually consistent read, it
always returns the last updated value.
:type table_name: string
:param table_name: The name of the table containing the requested item.
:type key: map
:param key: A map of attribute names to AttributeValue objects,
representing the primary key of the item to retrieve.
For the primary key, you must provide all of the attributes. For
example, with a hash type primary key, you only need to specify the
hash attribute. For a hash-and-range type primary key, you must
specify both the hash attribute and the range attribute.
:type attributes_to_get: list
:param attributes_to_get:
There is a newer parameter available. Use ProjectionExpression instead.
Note that if you use AttributesToGet and ProjectionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter allows you to retrieve lists or maps; however, it cannot
retrieve individual list or map elements.
The names of one or more attributes to retrieve. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
Note that AttributesToGet has no effect on provisioned throughput
consumption. DynamoDB determines capacity units consumed based on
item size, not on the amount of data that is returned to an
application.
:type consistent_read: boolean
:param consistent_read: A value that if set to `True`, then the
operation uses strongly consistent reads; otherwise, eventually
consistent reads are used.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type projection_expression: string
:param projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
For more information on projection expressions, go to `Accessing Item
Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if consistent_read is not None:
params['ConsistentRead'] = consistent_read
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if projection_expression is not None:
params['ProjectionExpression'] = projection_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
return self.make_request(action='GetItem',
body=json.dumps(params))
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Returns an array of table names associated with the current
account and endpoint. The output from ListTables is paginated,
with each page returning a maximum of 100 table names.
:type exclusive_start_table_name: string
:param exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
LastEvaluatedTableName in a previous operation, so that you can
obtain the next page of results.
:type limit: integer
:param limit: A maximum number of table names to return. If this
parameter is not specified, the limit is 100.
"""
params = {}
if exclusive_start_table_name is not None:
params['ExclusiveStartTableName'] = exclusive_start_table_name
if limit is not None:
params['Limit'] = limit
return self.make_request(action='ListTables',
body=json.dumps(params))
def put_item(self, table_name, item, expected=None, return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
conditional_operator=None, condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None):
"""
Creates a new item, or replaces an old item with a new item.
If an item that has the same primary key as the new item
already exists in the specified table, the new item completely
replaces the existing item. You can perform a conditional put
operation (add a new item if one with the specified primary
key doesn't exist), or replace an existing item if it has
certain attribute values.
In addition to putting an item, you can also return the item's
attribute values in the same operation, using the ReturnValues
parameter.
When you add an item, the primary key attribute(s) are the
only required attributes. Attribute values cannot be null.
String and Binary type attributes must have lengths greater
than zero. Set type attributes cannot be empty. Requests with
empty values will be rejected with a ValidationException
exception.
You can request that PutItem return either a copy of the
original item (before the update) or a copy of the updated
item (after the update). For more information, see the
ReturnValues description below.
To prevent a new item from replacing an existing item, use a
conditional put operation with ComparisonOperator set to
`NULL` for the primary key attribute, or attributes.
For more information about using this API, see `Working with
Items`_ in the Amazon DynamoDB Developer Guide .
:type table_name: string
:param table_name: The name of the table to contain the item.
:type item: map
:param item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a hash type primary key, you only need to specify the
hash attribute. For a hash-and-range type primary key, you must
specify both the hash attribute and the range attribute.
If you specify any attributes that are part of an index key, then the
data types for those attributes must match those of the schema in
the table's attribute definition.
For more information about primary keys, see `Primary Key`_ in the
Amazon DynamoDB Developer Guide .
Each element in the Item map is an AttributeValue object.
:type expected: map
:param expected:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use Expected and ConditionExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A map of attribute/condition pairs. Expected provides a conditional
block for the PutItem operation.
Each element of Expected consists of an attribute name, a comparison
operator, and one or more values. DynamoDB compares the attribute
with the value(s) you supplied, using the comparison operator. For
each Expected element, the result of the evaluation is either true
or false.
If you specify more than one element in the Expected map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
If the Expected map evaluates to true, then the conditional operation
succeeds; otherwise, it fails.
Expected contains the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
`a` is greater than `A`, and `a` is greater than `B`. For a list of
code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes in the
AttributeValueList . When performing the comparison, DynamoDB uses
strongly consistent reads. The following comparison operators are
available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL |
CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following
are descriptions of each comparison operator.
+ `EQ` : Equal. `EQ` is supported for all datatypes, including lists
and maps. AttributeValueList can contain only one AttributeValue
element of type String, Number, Binary, String Set, Number Set, or
Binary Set. If an item contains an AttributeValue element of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `NE` : Not equal. `NE` is supported for all datatypes, including
lists and maps. AttributeValueList can contain only one
AttributeValue of type String, Number, Binary, String Set, Number
Set, or Binary Set. If an item contains an AttributeValue of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set type).
If an item contains an AttributeValue element of a different type
than the one specified in the request, the value does not match.
For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
existence of an attribute, not its data type. If the data type of
attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
result is a Boolean true . This result is because the attribute "
`a`" exists; its data type is not relevant to the `NOT_NULL`
comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
nonexistence of an attribute, not its data type. If the data type
of attribute " `a`" is null, and you evaluate it using `NULL`, the
result is a Boolean false . This is because the attribute " `a`"
exists; its data type is not relevant to the `NULL` comparison
operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
attribute of the comparison is of type String, then the operator
checks for a substring match. If the target attribute of the
comparison is of type Binary, then the operator looks for a
subsequence of the target that matches the input. If the target
attribute of the comparison is a set (" `SS`", " `NS`", or "
`BS`"), then the operator evaluates to true if it finds an exact
match with any member of the set. CONTAINS is supported for lists:
When evaluating " `a CONTAINS b`", " `a`" can be a list; however, "
`b`" cannot be a set, a map, or a list.
+ `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a
value in a set. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If the target attribute of the comparison is a String, then
the operator checks for the absence of a substring match. If the
target attribute of the comparison is Binary, then the operator
checks for the absence of a subsequence of the target that matches
the input. If the target attribute of the comparison is a set ("
`SS`", " `NS`", or " `BS`"), then the operator evaluates to true if
it does not find an exact match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating " `a NOT
CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a
set, a map, or a list.
+ `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set type). The target attribute of the comparison must be of type
String or Binary (not a Number or a set type). > <li>
+ `IN` : Checks for matching elements within two sets.
AttributeValueList can contain one or more AttributeValue elements
of type String, Number, or Binary (not a set type). These
attributes are compared against an existing set type attribute of
an item. If any elements of the input set are present in the item
attribute, the expression evaluates to true.
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set type). A target attribute matches if the target
value is greater than, or equal to, the first element and less
than, or equal to, the second element. If an item contains an
AttributeValue element of a different type than the one specified
in the request, the value does not match. For example, `{"S":"6"}`
does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare
to `{"NS":["6", "2", "1"]}`
For usage examples of AttributeValueList and ComparisonOperator , see
`Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer
Guide .
For backward compatibility with previous DynamoDB releases, the
following parameters can be used instead of AttributeValueList and
ComparisonOperator :
+ Value - A value for DynamoDB to compare with an attribute.
+ Exists - A Boolean value that causes DynamoDB to evaluate the value
before attempting the conditional operation:
+ If Exists is `True`, DynamoDB will check to see if that attribute
value already exists in the table. If it is found, then the
condition evaluates to true; otherwise the condition evaluate to
false.
+ If Exists is `False`, DynamoDB assumes that the attribute value does
not exist in the table. If in fact the value does not exist, then
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
AttributeValueList and ComparisonOperator . Note that if you use
both sets of parameters at once, DynamoDB will return a
ValidationException exception.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared before they were updated with the PutItem request. For
PutItem , the valid values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This setting is the default for
ReturnValues .)
+ `ALL_OLD` - If PutItem overwrote an attribute name-value pair, then
the content of the old item is returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: A value that if set to `SIZE`,
the response includes statistics about item collections, if any,
that were modified during the operation are returned in the
response. If set to `NONE` (the default), no statistics are
returned.
:type conditional_operator: string
:param conditional_operator:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use ConditionalOperator and ConditionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the Expected map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type condition_expression: string
:param condition_expression: A condition that must be satisfied in
order for a conditional PutItem operation to succeed.
An expression can contain any of the following:
+ Boolean functions: `attribute_exists | attribute_not_exists |
contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
+ Logical operators: `AND | OR | NOT`
For more information on condition expressions, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Item': item, }
if expected is not None:
params['Expected'] = expected
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if condition_expression is not None:
params['ConditionExpression'] = condition_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='PutItem',
body=json.dumps(params))
def query(self, table_name, key_conditions, index_name=None, select=None,
attributes_to_get=None, limit=None, consistent_read=None,
query_filter=None, conditional_operator=None,
scan_index_forward=None, exclusive_start_key=None,
return_consumed_capacity=None, projection_expression=None,
filter_expression=None, expression_attribute_names=None,
expression_attribute_values=None):
"""
A Query operation directly accesses items from a table using
the table primary key, or from an index using the index key.
You must provide a specific hash key value. You can narrow the
scope of the query by using comparison operators on the range
key value, or on the index key. You can use the
ScanIndexForward parameter to get results in forward or
reverse order, by range key or by index key.
Queries that do not return results consume the minimum number
of read capacity units for that type of read operation.
If the total number of items meeting the query criteria
exceeds the result set size limit of 1 MB, the query stops and
results are returned to the user with LastEvaluatedKey to
continue the query in a subsequent operation. Unlike a Scan
operation, a Query operation never returns both an empty
result set and a LastEvaluatedKey . The LastEvaluatedKey is
only provided if the results exceed 1 MB, or if you have used
Limit .
You can query a table, a local secondary index, or a global
secondary index. For a query on a table or on a local
secondary index, you can set ConsistentRead to true and obtain
a strongly consistent result. Global secondary indexes support
eventually consistent reads only, so do not specify
ConsistentRead when querying a global secondary index.
:type table_name: string
:param table_name: The name of the table containing the requested
items.
:type index_name: string
:param index_name: The name of an index to query. This index can be any
local secondary index or global secondary index on the table.
:type select: string
:param select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index.
+ `ALL_ATTRIBUTES` - Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch the
entire item from the parent table. If the index is configured to
project all item attributes, then all of the data can be obtained
from the local secondary index, and no fetching is required.
+ `ALL_PROJECTED_ATTRIBUTES` - Allowed only when querying an index.
Retrieves all attributes that have been projected into the index.
If the index is configured to project all attributes, this return
value is equivalent to specifying `ALL_ATTRIBUTES`.
+ `COUNT` - Returns the number of matching items, rather than the
matching items themselves.
+ `SPECIFIC_ATTRIBUTES` - Returns only the attributes listed in
AttributesToGet . This return value is equivalent to specifying
AttributesToGet without specifying any value for Select . If you
query a local secondary index and request only attributes that are
projected into that index, the operation will read only the index
and not the table. If any of the requested attributes are not
projected into the local secondary index, DynamoDB will fetch each
of these attributes from the parent table. This extra fetching
incurs additional throughput cost and latency. If you query a
global secondary index, you can only request attributes that are
projected into the index. Global secondary index queries cannot
fetch attributes from the parent table.
If neither Select nor AttributesToGet are specified, DynamoDB defaults
to `ALL_ATTRIBUTES` when accessing a table, and
`ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use
both Select and AttributesToGet together in a single request,
unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage
is equivalent to specifying AttributesToGet without any value for
Select .)
:type attributes_to_get: list
:param attributes_to_get:
There is a newer parameter available. Use ProjectionExpression instead.
Note that if you use AttributesToGet and ProjectionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter allows you to retrieve lists or maps; however, it cannot
retrieve individual list or map elements.
The names of one or more attributes to retrieve. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
Note that AttributesToGet has no effect on provisioned throughput
consumption. DynamoDB determines capacity units consumed based on
item size, not on the amount of data that is returned to an
application.
You cannot use both AttributesToGet and Select together in a Query
request, unless the value for Select is `SPECIFIC_ATTRIBUTES`.
(This usage is equivalent to specifying AttributesToGet without any
value for Select .)
If you query a local secondary index and request only attributes that
are projected into that index, the operation will read only the
index and not the table. If any of the requested attributes are not
projected into the local secondary index, DynamoDB will fetch each
of these attributes from the parent table. This extra fetching
incurs additional throughput cost and latency.
If you query a global secondary index, you can only request attributes
that are projected into the index. Global secondary index queries
cannot fetch attributes from the parent table.
:type limit: integer
:param limit: The maximum number of items to evaluate (not necessarily
the number of matching items). If DynamoDB processes the number of
items up to the limit while processing the results, it stops the
operation and returns the matching values up to that point, and a
key in LastEvaluatedKey to apply in a subsequent operation, so that
you can pick up where you left off. Also, if the processed data set
size exceeds 1 MB before DynamoDB reaches this limit, it stops the
operation and returns the matching values up to the limit, and a
key in LastEvaluatedKey to apply in a subsequent operation to
continue the operation. For more information, see `Query and Scan`_
in the Amazon DynamoDB Developer Guide .
:type consistent_read: boolean
:param consistent_read: A value that if set to `True`, then the
operation uses strongly consistent reads; otherwise, eventually
consistent reads are used.
Strongly consistent reads are not supported on global secondary
indexes. If you query a global secondary index with ConsistentRead
set to `True`, you will receive an error message.
:type key_conditions: map
:param key_conditions: The selection criteria for the query. For a
query on a table, you can have conditions only on the table primary
key attributes. You must specify the hash key attribute name and
value as an `EQ` condition. You can optionally specify a second
condition, referring to the range key attribute. If you do not
specify a range key condition, all items under the hash key will be
fetched and processed. Any filters will applied after this.
For a query on an index, you can have conditions only on the index key
attributes. You must specify the index hash attribute name and
value as an EQ condition. You can optionally specify a second
condition, referring to the index key range attribute.
Each KeyConditions element consists of an attribute name to compare,
along with the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
`a` is greater than `A`, and `a` is greater than `B`. For a list of
code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes, for
example, equals, greater than, less than, and so on. For
KeyConditions , only the following comparison operators are
supported: `EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN` The
following are descriptions of these comparison operators.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
of type String, Number, or Binary (not a set type). If an item
contains an AttributeValue element of a different type than the one
specified in the request, the value does not match. For example,
`{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
equal `{"NS":["6", "2", "1"]}`.
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set type).
If an item contains an AttributeValue element of a different type
than the one specified in the request, the value does not match.
For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set type). The target attribute of the comparison must be of type
String or Binary (not a Number or a set type). > <li>
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set type). A target attribute matches if the target
value is greater than, or equal to, the first element and less
than, or equal to, the second element. If an item contains an
AttributeValue element of a different type than the one specified
in the request, the value does not match. For example, `{"S":"6"}`
does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare
to `{"NS":["6", "2", "1"]}`
For usage examples of AttributeValueList and ComparisonOperator , see
`Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer
Guide .
:type query_filter: map
:param query_filter:
There is a newer parameter available. Use FilterExpression instead.
Note that if you use QueryFilter and FilterExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A condition that evaluates the query results after the items are read
and returns only the desired values.
Query filters are applied after the items are read, so they do not
limit the capacity used.
If you specify more than one condition in the QueryFilter map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
QueryFilter does not allow key attributes. You cannot define a filter
condition on a hash key or range key.
Each QueryFilter element consists of an attribute name to compare,
along with the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
operator specified in ComparisonOperator . For type Number, value
comparisons are numeric. String value comparisons for greater than,
equals, or less than are based on ASCII character code values. For
example, `a` is greater than `A`, and `a` is greater than `B`. For
a list of code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions. For information on specifying data
types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB
Developer Guide .
+ ComparisonOperator - A comparator for evaluating attributes. For
example, equals, greater than, less than, etc. The following
comparison operators are available: `EQ | NE | LE | LT | GE | GT |
NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN |
BETWEEN` For complete descriptions of all comparison operators, see
`API_Condition.html`_.
:type conditional_operator: string
:param conditional_operator:
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the QueryFilter map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type scan_index_forward: boolean
:param scan_index_forward: A value that specifies ascending (true) or
descending (false) traversal of the index. DynamoDB returns results
reflecting the requested order determined by the range key. If the
data type is Number, the results are returned in numeric order. For
type String, the results are returned in order of ASCII character
code values. For type Binary, DynamoDB treats each byte of the
binary data as unsigned when it compares binary values.
If ScanIndexForward is not specified, the results are returned in
ascending order.
:type exclusive_start_key: map
:param exclusive_start_key: The primary key of the first item that this
operation will evaluate. Use the value that was returned for
LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type projection_expression: string
:param projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
For more information on projection expressions, go to `Accessing Item
Attributes`_ in the Amazon DynamoDB Developer Guide .
:type filter_expression: string
:param filter_expression: A condition that evaluates the query results
after the items are read and returns only the desired values.
The condition you specify is applied to the items queried; any items
that do not match the expression are not returned.
Filter expressions are applied after the items are read, so they do not
limit the capacity used.
A FilterExpression has the same syntax as a ConditionExpression . For
more information on expression syntax, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {
'TableName': table_name,
'KeyConditions': key_conditions,
}
if index_name is not None:
params['IndexName'] = index_name
if select is not None:
params['Select'] = select
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if limit is not None:
params['Limit'] = limit
if consistent_read is not None:
params['ConsistentRead'] = consistent_read
if query_filter is not None:
params['QueryFilter'] = query_filter
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if scan_index_forward is not None:
params['ScanIndexForward'] = scan_index_forward
if exclusive_start_key is not None:
params['ExclusiveStartKey'] = exclusive_start_key
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if projection_expression is not None:
params['ProjectionExpression'] = projection_expression
if filter_expression is not None:
params['FilterExpression'] = filter_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='Query',
body=json.dumps(params))
def scan(self, table_name, attributes_to_get=None, limit=None,
select=None, scan_filter=None, conditional_operator=None,
exclusive_start_key=None, return_consumed_capacity=None,
total_segments=None, segment=None, projection_expression=None,
filter_expression=None, expression_attribute_names=None,
expression_attribute_values=None):
"""
The Scan operation returns one or more items and item
attributes by accessing every item in the table. To have
DynamoDB return fewer items, you can provide a ScanFilter
operation.
If the total number of scanned items exceeds the maximum data
set size limit of 1 MB, the scan stops and results are
returned to the user as a LastEvaluatedKey value to continue
the scan in a subsequent operation. The results also include
the number of items exceeding the limit. A scan can result in
no table data meeting the filter criteria.
The result set is eventually consistent.
By default, Scan operations proceed sequentially; however, for
faster performance on large tables, applications can request a
parallel Scan operation by specifying the Segment and
TotalSegments parameters. For more information, see `Parallel
Scan`_ in the Amazon DynamoDB Developer Guide .
:type table_name: string
:param table_name: The name of the table containing the requested
items.
:type attributes_to_get: list
:param attributes_to_get:
There is a newer parameter available. Use ProjectionExpression instead.
Note that if you use AttributesToGet and ProjectionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter allows you to retrieve lists or maps; however, it cannot
retrieve individual list or map elements.
The names of one or more attributes to retrieve. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
Note that AttributesToGet has no effect on provisioned throughput
consumption. DynamoDB determines capacity units consumed based on
item size, not on the amount of data that is returned to an
application.
:type limit: integer
:param limit: The maximum number of items to evaluate (not necessarily
the number of matching items). If DynamoDB processes the number of
items up to the limit while processing the results, it stops the
operation and returns the matching values up to that point, and a
key in LastEvaluatedKey to apply in a subsequent operation, so that
you can pick up where you left off. Also, if the processed data set
size exceeds 1 MB before DynamoDB reaches this limit, it stops the
operation and returns the matching values up to the limit, and a
key in LastEvaluatedKey to apply in a subsequent operation to
continue the operation. For more information, see `Query and Scan`_
in the Amazon DynamoDB Developer Guide .
:type select: string
:param select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, or the
count of matching items.
+ `ALL_ATTRIBUTES` - Returns all of the item attributes.
+ `COUNT` - Returns the number of matching items, rather than the
matching items themselves.
+ `SPECIFIC_ATTRIBUTES` - Returns only the attributes listed in
AttributesToGet . This return value is equivalent to specifying
AttributesToGet without specifying any value for Select .
If neither Select nor AttributesToGet are specified, DynamoDB defaults
to `ALL_ATTRIBUTES`. You cannot use both AttributesToGet and Select
together in a single request, unless the value for Select is
`SPECIFIC_ATTRIBUTES`. (This usage is equivalent to specifying
AttributesToGet without any value for Select .)
:type scan_filter: map
:param scan_filter:
There is a newer parameter available. Use FilterExpression instead.
Note that if you use ScanFilter and FilterExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A condition that evaluates the scan results and returns only the
desired values.
If you specify more than one condition in the ScanFilter map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
Each ScanFilter element consists of an attribute name to compare, along
with the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
operator specified in ComparisonOperator . For type Number, value
comparisons are numeric. String value comparisons for greater than,
equals, or less than are based on ASCII character code values. For
example, `a` is greater than `A`, and `a` is greater than `B`. For
a list of code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions. For information on specifying data
types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB
Developer Guide .
+ ComparisonOperator - A comparator for evaluating attributes. For
example, equals, greater than, less than, etc. The following
comparison operators are available: `EQ | NE | LE | LT | GE | GT |
NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN |
BETWEEN` For complete descriptions of all comparison operators, see
`Condition`_.
:type conditional_operator: string
:param conditional_operator:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use ConditionalOperator and ConditionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the ScanFilter map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type exclusive_start_key: map
:param exclusive_start_key: The primary key of the first item that this
operation will evaluate. Use the value that was returned for
LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
In a parallel scan, a Scan request that includes ExclusiveStartKey must
specify the same segment whose previous Scan returned the
corresponding value of LastEvaluatedKey .
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type total_segments: integer
:param total_segments: For a parallel Scan request, TotalSegments
represents the total number of segments into which the Scan
operation will be divided. The value of TotalSegments corresponds
to the number of application workers that will perform the parallel
scan. For example, if you want to scan a table using four
application threads, specify a TotalSegments value of 4.
The value for TotalSegments must be greater than or equal to 1, and
less than or equal to 1000000. If you specify a TotalSegments value
of 1, the Scan operation will be sequential rather than parallel.
If you specify TotalSegments , you must also specify Segment .
:type segment: integer
:param segment: For a parallel Scan request, Segment identifies an
individual segment to be scanned by an application worker.
Segment IDs are zero-based, so the first segment is always 0. For
example, if you want to scan a table using four application
threads, the first thread specifies a Segment value of 0, the
second thread specifies 1, and so on.
The value of LastEvaluatedKey returned from a parallel Scan request
must be used as ExclusiveStartKey with the same segment ID in a
subsequent Scan operation.
The value for Segment must be greater than or equal to 0, and less than
the value provided for TotalSegments .
If you specify Segment , you must also specify TotalSegments .
:type projection_expression: string
:param projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas.
If no attribute names are specified, then all attributes will be
returned. If any of the requested attributes are not found, they
will not appear in the result.
For more information on projection expressions, go to `Accessing Item
Attributes`_ in the Amazon DynamoDB Developer Guide .
:type filter_expression: string
:param filter_expression: A condition that evaluates the scan results
and returns only the desired values.
The condition you specify is applied to the items scanned; any items
that do not match the expression are not returned.
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, }
if attributes_to_get is not None:
params['AttributesToGet'] = attributes_to_get
if limit is not None:
params['Limit'] = limit
if select is not None:
params['Select'] = select
if scan_filter is not None:
params['ScanFilter'] = scan_filter
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if exclusive_start_key is not None:
params['ExclusiveStartKey'] = exclusive_start_key
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if total_segments is not None:
params['TotalSegments'] = total_segments
if segment is not None:
params['Segment'] = segment
if projection_expression is not None:
params['ProjectionExpression'] = projection_expression
if filter_expression is not None:
params['FilterExpression'] = filter_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='Scan',
body=json.dumps(params))
def update_item(self, table_name, key, attribute_updates=None,
expected=None, conditional_operator=None,
return_values=None, return_consumed_capacity=None,
return_item_collection_metrics=None,
update_expression=None, condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None):
"""
Edits an existing item's attributes, or adds a new item to the
table if it does not already exist. You can put, delete, or
add attribute values. You can also perform a conditional
update (insert a new attribute name-value pair if it doesn't
exist, or replace an existing name-value pair if it has
certain expected attribute values).
You can also return the item's attribute values in the same
UpdateItem operation using the ReturnValues parameter.
:type table_name: string
:param table_name: The name of the table containing the item to update.
:type key: map
:param key: The primary key of the item to be updated. Each element
consists of an attribute name and a value for that attribute.
For the primary key, you must provide all of the attributes. For
example, with a hash type primary key, you only need to specify the
hash attribute. For a hash-and-range type primary key, you must
specify both the hash attribute and the range attribute.
:type attribute_updates: map
:param attribute_updates:
There is a newer parameter available. Use UpdateExpression instead.
Note that if you use AttributeUpdates and UpdateExpression at the
same time, DynamoDB will return a ValidationException exception.
This parameter can be used for modifying top-level attributes; however,
it does not support individual list or map elements.
The names of attributes to be modified, the action to perform on each,
and the new value for each. If you are updating an attribute that
is an index key attribute for any indexes on that table, the
attribute type must match the index key type defined in the
AttributesDefinition of the table description. You can use
UpdateItem to update any nonkey attributes.
Attribute values cannot be null. String and Binary type attributes must
have lengths greater than zero. Set type attributes must not be
empty. Requests with empty values will be rejected with a
ValidationException exception.
Each AttributeUpdates element consists of an attribute name to modify,
along with the following:
+ Value - The new value, if applicable, for this attribute.
+ Action - A value that specifies how to perform the update. This
action is only valid for an existing attribute whose data type is
Number or is a set; do not use `ADD` for other data types. If an
item with the specified primary key is found in the table, the
following values perform the following actions:
+ `PUT` - Adds the specified attribute to the item. If the attribute
already exists, it is replaced by the new value.
+ `DELETE` - Removes the attribute and its value, if no value is
specified for `DELETE`. The data type of the specified value must
match the existing value's data type. If a set of values is
specified, then those values are subtracted from the old set. For
example, if the attribute value was the set `[a,b,c]` and the
`DELETE` action specifies `[a,c]`, then the final attribute value
is `[b]`. Specifying an empty set is an error.
+ `ADD` - Adds the specified value to the item, if the attribute does
not already exist. If the attribute does exist, then the behavior
of `ADD` depends on the data type of the attribute:
+ If the existing attribute is a number, and if Value is also a number,
then Value is mathematically added to the existing attribute. If
Value is a negative number, then it is subtracted from the existing
attribute. If you use `ADD` to increment or decrement a number
value for an item that doesn't exist before the update, DynamoDB
uses 0 as the initial value. Similarly, if you use `ADD` for an
existing item to increment or decrement an attribute value that
doesn't exist before the update, DynamoDB uses `0` as the initial
value. For example, suppose that the item you want to update
doesn't have an attribute named itemcount , but you decide to `ADD`
the number `3` to this attribute anyway. DynamoDB will create the
itemcount attribute, set its initial value to `0`, and finally add
`3` to it. The result will be a new itemcount attribute, with a
value of `3`.
+ If the existing data type is a set, and if Value is also a set, then
Value is appended to the existing set. For example, if the
attribute value is the set `[1,2]`, and the `ADD` action specified
`[3]`, then the final attribute value is `[1,2,3]`. An error occurs
if an `ADD` action is specified for a set attribute and the
attribute type specified does not match the existing set type. Both
sets must have the same primitive data type. For example, if the
existing data type is a set of strings, Value must also be a set of
strings.
If no item with the specified key is found in the table, the following
values perform the following actions:
+ `PUT` - Causes DynamoDB to create a new item with the specified
primary key, and then adds the attribute.
+ `DELETE` - Nothing happens, because attributes cannot be deleted from
a nonexistent item. The operation succeeds, but DynamoDB does not
create a new item.
+ `ADD` - Causes DynamoDB to create an item with the supplied primary
key and number (or set of numbers) for the attribute value. The
only data types allowed are Number and Number Set.
If you specify any attributes that are part of an index key, then the
data types for those attributes must match those of the schema in
the table's attribute definition.
:type expected: map
:param expected:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use Expected and ConditionExpression at the same
time, DynamoDB will return a ValidationException exception.
This parameter does not support lists or maps.
A map of attribute/condition pairs. Expected provides a conditional
block for the UpdateItem operation.
Each element of Expected consists of an attribute name, a comparison
operator, and one or more values. DynamoDB compares the attribute
with the value(s) you supplied, using the comparison operator. For
each Expected element, the result of the evaluation is either true
or false.
If you specify more than one element in the Expected map, then by
default all of the conditions must evaluate to true. In other
words, the conditions are ANDed together. (You can use the
ConditionalOperator parameter to OR the conditions instead. If you
do this, then at least one of the conditions must evaluate to true,
rather than all of them.)
If the Expected map evaluates to true, then the conditional operation
succeeds; otherwise, it fails.
Expected contains the following:
+ AttributeValueList - One or more values to evaluate against the
supplied attribute. The number of values in the list depends on the
ComparisonOperator being used. For type Number, value comparisons
are numeric. String value comparisons for greater than, equals, or
less than are based on ASCII character code values. For example,
`a` is greater than `A`, and `a` is greater than `B`. For a list of
code values, see
`http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
For type Binary, DynamoDB treats each byte of the binary data as
unsigned when it compares binary values, for example when
evaluating query expressions.
+ ComparisonOperator - A comparator for evaluating attributes in the
AttributeValueList . When performing the comparison, DynamoDB uses
strongly consistent reads. The following comparison operators are
available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL |
CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following
are descriptions of each comparison operator.
+ `EQ` : Equal. `EQ` is supported for all datatypes, including lists
and maps. AttributeValueList can contain only one AttributeValue
element of type String, Number, Binary, String Set, Number Set, or
Binary Set. If an item contains an AttributeValue element of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `NE` : Not equal. `NE` is supported for all datatypes, including
lists and maps. AttributeValueList can contain only one
AttributeValue of type String, Number, Binary, String Set, Number
Set, or Binary Set. If an item contains an AttributeValue of a
different type than the one specified in the request, the value
does not match. For example, `{"S":"6"}` does not equal
`{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2",
"1"]}`. > <li>
+ `LE` : Less than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `LT` : Less than. AttributeValueList can contain only one
AttributeValue of type String, Number, or Binary (not a set type).
If an item contains an AttributeValue element of a different type
than the one specified in the request, the value does not match.
For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GE` : Greater than or equal. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `GT` : Greater than. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If an item contains an AttributeValue element of a different
type than the one specified in the request, the value does not
match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also,
`{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. > <li>
+ `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
existence of an attribute, not its data type. If the data type of
attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the
result is a Boolean true . This result is because the attribute "
`a`" exists; its data type is not relevant to the `NOT_NULL`
comparison operator.
+ `NULL` : The attribute does not exist. `NULL` is supported for all
datatypes, including lists and maps. This operator tests for the
nonexistence of an attribute, not its data type. If the data type
of attribute " `a`" is null, and you evaluate it using `NULL`, the
result is a Boolean false . This is because the attribute " `a`"
exists; its data type is not relevant to the `NULL` comparison
operator.
+ `CONTAINS` : Checks for a subsequence, or value in a set.
AttributeValueList can contain only one AttributeValue element of
type String, Number, or Binary (not a set type). If the target
attribute of the comparison is of type String, then the operator
checks for a substring match. If the target attribute of the
comparison is of type Binary, then the operator looks for a
subsequence of the target that matches the input. If the target
attribute of the comparison is a set (" `SS`", " `NS`", or "
`BS`"), then the operator evaluates to true if it finds an exact
match with any member of the set. CONTAINS is supported for lists:
When evaluating " `a CONTAINS b`", " `a`" can be a list; however, "
`b`" cannot be a set, a map, or a list.
+ `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a
value in a set. AttributeValueList can contain only one
AttributeValue element of type String, Number, or Binary (not a set
type). If the target attribute of the comparison is a String, then
the operator checks for the absence of a substring match. If the
target attribute of the comparison is Binary, then the operator
checks for the absence of a subsequence of the target that matches
the input. If the target attribute of the comparison is a set ("
`SS`", " `NS`", or " `BS`"), then the operator evaluates to true if
it does not find an exact match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating " `a NOT
CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a
set, a map, or a list.
+ `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain
only one AttributeValue of type String or Binary (not a Number or a
set type). The target attribute of the comparison must be of type
String or Binary (not a Number or a set type). > <li>
+ `IN` : Checks for matching elements within two sets.
AttributeValueList can contain one or more AttributeValue elements
of type String, Number, or Binary (not a set type). These
attributes are compared against an existing set type attribute of
an item. If any elements of the input set are present in the item
attribute, the expression evaluates to true.
+ `BETWEEN` : Greater than or equal to the first value, and less than
or equal to the second value. AttributeValueList must contain two
AttributeValue elements of the same type, either String, Number, or
Binary (not a set type). A target attribute matches if the target
value is greater than, or equal to, the first element and less
than, or equal to, the second element. If an item contains an
AttributeValue element of a different type than the one specified
in the request, the value does not match. For example, `{"S":"6"}`
does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare
to `{"NS":["6", "2", "1"]}`
For usage examples of AttributeValueList and ComparisonOperator , see
`Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer
Guide .
For backward compatibility with previous DynamoDB releases, the
following parameters can be used instead of AttributeValueList and
ComparisonOperator :
+ Value - A value for DynamoDB to compare with an attribute.
+ Exists - A Boolean value that causes DynamoDB to evaluate the value
before attempting the conditional operation:
+ If Exists is `True`, DynamoDB will check to see if that attribute
value already exists in the table. If it is found, then the
condition evaluates to true; otherwise the condition evaluate to
false.
+ If Exists is `False`, DynamoDB assumes that the attribute value does
not exist in the table. If in fact the value does not exist, then
the assumption is valid and the condition evaluates to true. If the
value is found, despite the assumption that it does not exist, the
condition evaluates to false.
Note that the default value for Exists is `True`.
The Value and Exists parameters are incompatible with
AttributeValueList and ComparisonOperator . Note that if you use
both sets of parameters at once, DynamoDB will return a
ValidationException exception.
:type conditional_operator: string
:param conditional_operator:
There is a newer parameter available. Use ConditionExpression instead.
Note that if you use ConditionalOperator and ConditionExpression at
the same time, DynamoDB will return a ValidationException
exception.
This parameter does not support lists or maps.
A logical operator to apply to the conditions in the Expected map:
+ `AND` - If all of the conditions evaluate to true, then the entire
map evaluates to true.
+ `OR` - If at least one of the conditions evaluate to true, then the
entire map evaluates to true.
If you omit ConditionalOperator , then `AND` is the default.
The operation will succeed only if the entire map evaluates to true.
:type return_values: string
:param return_values:
Use ReturnValues if you want to get the item attributes as they
appeared either before or after they were updated. For UpdateItem ,
the valid values are:
+ `NONE` - If ReturnValues is not specified, or if its value is `NONE`,
then nothing is returned. (This setting is the default for
ReturnValues .)
+ `ALL_OLD` - If UpdateItem overwrote an attribute name-value pair,
then the content of the old item is returned.
+ `UPDATED_OLD` - The old versions of only the updated attributes are
returned.
+ `ALL_NEW` - All of the attributes of the new version of the item are
returned.
+ `UPDATED_NEW` - The new versions of only the updated attributes are
returned.
:type return_consumed_capacity: string
:param return_consumed_capacity: A value that if set to `TOTAL`, the
response includes ConsumedCapacity data for tables and indexes. If
set to `INDEXES`, the response includes ConsumedCapacity for
indexes. If set to `NONE` (the default), ConsumedCapacity is not
included in the response.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: A value that if set to `SIZE`,
the response includes statistics about item collections, if any,
that were modified during the operation are returned in the
response. If set to `NONE` (the default), no statistics are
returned.
:type update_expression: string
:param update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
The following action values are available for UpdateExpression .
+ `SET` - Adds one or more attributes and values to an item. If any of
these attribute already exist, they are replaced by the new values.
You can also use `SET` to add or subtract from an attribute that is
of type Number. `SET` supports the following functions:
+ `if_not_exists (path, operand)` - if the item does not contain an
attribute at the specified path, then `if_not_exists` evaluates to
operand; otherwise, it evaluates to path. You can use this function
to avoid overwriting an attribute that may already be present in
the item.
+ `list_append (operand, operand)` - evaluates to a list with a new
element added to it. You can append the new element to the start or
the end of the list by reversing the order of the operands.
These function names are case-sensitive.
+ `REMOVE` - Removes one or more attributes from an item.
+ `ADD` - Adds the specified value to the item, if the attribute does
not already exist. If the attribute does exist, then the behavior
of `ADD` depends on the data type of the attribute:
+ If the existing attribute is a number, and if Value is also a number,
then Value is mathematically added to the existing attribute. If
Value is a negative number, then it is subtracted from the existing
attribute. If you use `ADD` to increment or decrement a number
value for an item that doesn't exist before the update, DynamoDB
uses `0` as the initial value. Similarly, if you use `ADD` for an
existing item to increment or decrement an attribute value that
doesn't exist before the update, DynamoDB uses `0` as the initial
value. For example, suppose that the item you want to update
doesn't have an attribute named itemcount , but you decide to `ADD`
the number `3` to this attribute anyway. DynamoDB will create the
itemcount attribute, set its initial value to `0`, and finally add
`3` to it. The result will be a new itemcount attribute in the
item, with a value of `3`.
+ If the existing data type is a set and if Value is also a set, then
Value is added to the existing set. For example, if the attribute
value is the set `[1,2]`, and the `ADD` action specified `[3]`,
then the final attribute value is `[1,2,3]`. An error occurs if an
`ADD` action is specified for a set attribute and the attribute
type specified does not match the existing set type. Both sets must
have the same primitive data type. For example, if the existing
data type is a set of strings, the Value must also be a set of
strings.
The `ADD` action only supports Number and set data types. In addition,
`ADD` can only be used on top-level attributes, not nested
attributes.
+ `DELETE` - Deletes an element from a set. If a set of values is
specified, then those values are subtracted from the old set. For
example, if the attribute value was the set `[a,b,c]` and the
`DELETE` action specifies `[a,c]`, then the final attribute value
is `[b]`. Specifying an empty set is an error. The `DELETE` action
only supports Number and set data types. In addition, `DELETE` can
only be used on top-level attributes, not nested attributes.
You can have many actions in a single expression, such as the
following: `SET a=:value1, b=:value2 DELETE :value3, :value4,
:value5`
For more information on update expressions, go to `Modifying Items and
Attributes`_ in the Amazon DynamoDB Developer Guide .
:type condition_expression: string
:param condition_expression: A condition that must be satisfied in
order for a conditional update to succeed.
An expression can contain any of the following:
+ Boolean functions: `attribute_exists | attribute_not_exists |
contains | begins_with` These function names are case-sensitive.
+ Comparison operators: ` = | <> | < | > | <=
| >= | BETWEEN | IN`
+ Logical operators: `AND | OR | NOT`
For more information on condition expressions, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_names: map
:param expression_attribute_names: One or more substitution tokens for
simplifying complex expressions. The following are some use cases
for using ExpressionAttributeNames :
+ To shorten an attribute name that is very long or unwieldy in an
expression.
+ To create a placeholder for repeating occurrences of an attribute
name in an expression.
+ To prevent special characters in an attribute name from being
misinterpreted in an expression.
Use the **#** character in an expression to dereference an attribute
name. For example, consider the following expression:
+ `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName
= "Jones"`
Now suppose that you specified the following for
ExpressionAttributeNames :
+ `{"#name":"order.customerInfo.LastName"}`
The expression can now be simplified as follows:
+ `#name = "Smith" OR #name = "Jones"`
For more information on expression attribute names, go to `Accessing
Item Attributes`_ in the Amazon DynamoDB Developer Guide .
:type expression_attribute_values: map
:param expression_attribute_values: One or more values that can be
substituted in an expression.
Use the **:** (colon) character in an expression to dereference an
attribute value. For example, suppose that you wanted to check
whether the value of the ProductStatus attribute was one of the
following:
`Available | Backordered | Discontinued`
You would first need to specify ExpressionAttributeValues as follows:
`{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
":disc":{"S":"Discontinued"} }`
You could then use these values in an expression, such as this:
`ProductStatus IN (:avail, :back, :disc)`
For more information on expression attribute values, go to `Specifying
Conditions`_ in the Amazon DynamoDB Developer Guide .
"""
params = {'TableName': table_name, 'Key': key, }
if attribute_updates is not None:
params['AttributeUpdates'] = attribute_updates
if expected is not None:
params['Expected'] = expected
if conditional_operator is not None:
params['ConditionalOperator'] = conditional_operator
if return_values is not None:
params['ReturnValues'] = return_values
if return_consumed_capacity is not None:
params['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics is not None:
params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
if update_expression is not None:
params['UpdateExpression'] = update_expression
if condition_expression is not None:
params['ConditionExpression'] = condition_expression
if expression_attribute_names is not None:
params['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values is not None:
params['ExpressionAttributeValues'] = expression_attribute_values
return self.make_request(action='UpdateItem',
body=json.dumps(params))
def update_table(self, table_name, provisioned_throughput=None,
global_secondary_index_updates=None,
attribute_definitions=None):
"""
Updates the provisioned throughput for the given table, or
manages the global secondary indexes on the table.
You can increase or decrease the table's provisioned
throughput values within the maximums and minimums listed in
the `Limits`_ section in the Amazon DynamoDB Developer Guide .
In addition, you can use UpdateTable to add, modify or delete
global secondary indexes on the table. For more information,
see `Managing Global Secondary Indexes`_ in the Amazon
DynamoDB Developer Guide .
The table must be in the `ACTIVE` state for UpdateTable to
succeed. UpdateTable is an asynchronous operation; while
executing the operation, the table is in the `UPDATING` state.
While the table is in the `UPDATING` state, the table still
has the provisioned throughput from before the call. The
table's new provisioned throughput settings go into effect
when the table returns to the `ACTIVE` state; at that point,
the UpdateTable operation is complete.
:type attribute_definitions: list
:param attribute_definitions: An array of attributes that describe the
key schema for the table and indexes. If you are adding a new
global secondary index to the table, AttributeDefinitions must
include the key element(s) of the new index.
:type table_name: string
:param table_name: The name of the table to be updated.
:type provisioned_throughput: dict
:param provisioned_throughput: Represents the provisioned throughput
settings for a specified table or index. The settings can be
modified using the UpdateTable operation.
For current minimum and maximum provisioned throughput values, see
`Limits`_ in the Amazon DynamoDB Developer Guide .
:type global_secondary_index_updates: list
:param global_secondary_index_updates:
An array of one or more global secondary indexes for the table. For
each index in the array, you can specify one action:
+ Create - add a new global secondary index to the table.
+ Update - modify the provisioned throughput settings of an existing
global secondary index.
+ Delete - remove a global secondary index from the table.
"""
params = {'TableName': table_name, }
if attribute_definitions is not None:
params['AttributeDefinitions'] = attribute_definitions
if provisioned_throughput is not None:
params['ProvisionedThroughput'] = provisioned_throughput
if global_secondary_index_updates is not None:
params['GlobalSecondaryIndexUpdates'] = global_secondary_index_updates
return self.make_request(action='UpdateTable',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.host,
'Content-Type': 'application/x-amz-json-1.0',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body, host=self.host)
response = self._mexe(http_request, sender=None,
override_num_retries=self.NumberRetries,
retry_handler=self._retry_handler)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
def _retry_handler(self, response, i, next_sleep):
status = None
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
data = json.loads(response_body)
if 'ProvisionedThroughputExceededException' in data.get('__type'):
self.throughput_exceeded_events += 1
msg = "%s, retry attempt %s" % (
'ProvisionedThroughputExceededException',
i
)
next_sleep = self._truncated_exponential_time(i)
i += 1
status = (msg, i, next_sleep)
if i == self.NumberRetries:
# If this was our last retry attempt, raise
# a specific error saying that the throughput
# was exceeded.
raise exceptions.ProvisionedThroughputExceededException(
response.status, response.reason, data)
elif 'ConditionalCheckFailedException' in data.get('__type'):
raise exceptions.ConditionalCheckFailedException(
response.status, response.reason, data)
elif 'ValidationException' in data.get('__type'):
raise exceptions.ValidationException(
response.status, response.reason, data)
else:
raise self.ResponseError(response.status, response.reason,
data)
expected_crc32 = response.getheader('x-amz-crc32')
if self._validate_checksums and expected_crc32 is not None:
boto.log.debug('Validating crc32 checksum for body: %s',
response.read())
actual_crc32 = crc32(response.read()) & 0xffffffff
expected_crc32 = int(expected_crc32)
if actual_crc32 != expected_crc32:
msg = ("The calculated checksum %s did not match the expected "
"checksum %s" % (actual_crc32, expected_crc32))
status = (msg, i + 1, self._truncated_exponential_time(i))
return status
def _truncated_exponential_time(self, i):
if i == 0:
next_sleep = 0
else:
next_sleep = min(0.05 * (2 ** i),
boto.config.get('Boto', 'max_retry_delay', 60))
return next_sleep
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb2/layer1.py
| 0.785309 | 0.434281 |
layer1.py
|
pypi
|
class ResultSet(object):
"""
A class used to lazily handle page-to-page navigation through a set of
results.
It presents a transparent iterator interface, so that all the user has
to do is use it in a typical ``for`` loop (or list comprehension, etc.)
to fetch results, even if they weren't present in the current page of
results.
This is used by the ``Table.query`` & ``Table.scan`` methods.
Example::
>>> users = Table('users')
>>> results = ResultSet()
>>> results.to_call(users.query, username__gte='johndoe')
# Now iterate. When it runs out of results, it'll fetch the next page.
>>> for res in results:
... print res['username']
"""
def __init__(self, max_page_size=None):
super(ResultSet, self).__init__()
self.the_callable = None
self.call_args = []
self.call_kwargs = {}
self._results = []
self._offset = -1
self._results_left = True
self._last_key_seen = None
self._fetches = 0
self._max_page_size = max_page_size
self._limit = None
@property
def first_key(self):
return 'exclusive_start_key'
def _reset(self):
"""
Resets the internal state of the ``ResultSet``.
This prevents results from being cached long-term & consuming
excess memory.
Largely internal.
"""
self._results = []
self._offset = 0
def __iter__(self):
return self
def __next__(self):
self._offset += 1
if self._offset >= len(self._results):
if self._results_left is False:
raise StopIteration()
self.fetch_more()
# It's possible that previous call to ``fetch_more`` may not return
# anything useful but there may be more results. Loop until we get
# something back, making sure we guard for no results left.
while not len(self._results) and self._results_left:
self.fetch_more()
if self._offset < len(self._results):
if self._limit is not None:
self._limit -= 1
if self._limit < 0:
raise StopIteration()
return self._results[self._offset]
else:
raise StopIteration()
next = __next__
def to_call(self, the_callable, *args, **kwargs):
"""
Sets up the callable & any arguments to run it with.
This is stored for subsequent calls so that those queries can be
run without requiring user intervention.
Example::
# Just an example callable.
>>> def squares_to(y):
... for x in range(1, y):
... yield x**2
>>> rs = ResultSet()
# Set up what to call & arguments.
>>> rs.to_call(squares_to, y=3)
"""
if not callable(the_callable):
raise ValueError(
'You must supply an object or function to be called.'
)
# We pop the ``limit``, if present, to track how many we should return
# to the user. This isn't the same as the ``limit`` that the low-level
# DDB api calls use (which limit page size, not the overall result set).
self._limit = kwargs.pop('limit', None)
if self._limit is not None and self._limit < 0:
self._limit = None
self.the_callable = the_callable
self.call_args = args
self.call_kwargs = kwargs
def fetch_more(self):
"""
When the iterator runs out of results, this method is run to re-execute
the callable (& arguments) to fetch the next page.
Largely internal.
"""
self._reset()
args = self.call_args[:]
kwargs = self.call_kwargs.copy()
if self._last_key_seen is not None:
kwargs[self.first_key] = self._last_key_seen
# If the page size is greater than limit set them
# to the same value
if self._limit and self._max_page_size and self._max_page_size > self._limit:
self._max_page_size = self._limit
# Put in the max page size.
if self._max_page_size is not None:
kwargs['limit'] = self._max_page_size
elif self._limit is not None:
# If max_page_size is not set and limit is available
# use it as the page size
kwargs['limit'] = self._limit
results = self.the_callable(*args, **kwargs)
self._fetches += 1
new_results = results.get('results', [])
self._last_key_seen = results.get('last_key', None)
if len(new_results):
self._results.extend(results['results'])
# Check the limit, if it's present.
if self._limit is not None and self._limit >= 0:
limit = self._limit
limit -= len(results['results'])
# If we've exceeded the limit, we don't have any more
# results to look for.
if limit <= 0:
self._results_left = False
if self._last_key_seen is None:
self._results_left = False
class BatchGetResultSet(ResultSet):
def __init__(self, *args, **kwargs):
self._keys_left = kwargs.pop('keys', [])
self._max_batch_get = kwargs.pop('max_batch_get', 100)
super(BatchGetResultSet, self).__init__(*args, **kwargs)
def fetch_more(self):
self._reset()
args = self.call_args[:]
kwargs = self.call_kwargs.copy()
# Slice off the max we can fetch.
kwargs['keys'] = self._keys_left[:self._max_batch_get]
self._keys_left = self._keys_left[self._max_batch_get:]
if len(self._keys_left) <= 0:
self._results_left = False
results = self.the_callable(*args, **kwargs)
if not len(results.get('results', [])):
return
self._results.extend(results['results'])
for offset, key_data in enumerate(results.get('unprocessed_keys', [])):
# We've got an unprocessed key. Reinsert it into the list.
# DynamoDB only returns valid keys, so there should be no risk of
# missing keys ever making it here.
self._keys_left.insert(offset, key_data)
if len(self._keys_left) > 0:
self._results_left = True
# Decrease the limit, if it's present.
if self.call_kwargs.get('limit'):
self.call_kwargs['limit'] -= len(results['results'])
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb2/results.py
| 0.798108 | 0.331444 |
results.py
|
pypi
|
from boto.dynamodb2.types import STRING
class BaseSchemaField(object):
"""
An abstract class for defining schema fields.
Contains most of the core functionality for the field. Subclasses must
define an ``attr_type`` to pass to DynamoDB.
"""
attr_type = None
def __init__(self, name, data_type=STRING):
"""
Creates a Python schema field, to represent the data to pass to
DynamoDB.
Requires a ``name`` parameter, which should be a string name of the
field.
Optionally accepts a ``data_type`` parameter, which should be a
constant from ``boto.dynamodb2.types``. (Default: ``STRING``)
"""
self.name = name
self.data_type = data_type
def definition(self):
"""
Returns the attribute definition structure DynamoDB expects.
Example::
>>> field.definition()
{
'AttributeName': 'username',
'AttributeType': 'S',
}
"""
return {
'AttributeName': self.name,
'AttributeType': self.data_type,
}
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> field.schema()
{
'AttributeName': 'username',
'KeyType': 'HASH',
}
"""
return {
'AttributeName': self.name,
'KeyType': self.attr_type,
}
class HashKey(BaseSchemaField):
"""
An field representing a hash key.
Example::
>>> from boto.dynamodb2.types import NUMBER
>>> HashKey('username')
>>> HashKey('date_joined', data_type=NUMBER)
"""
attr_type = 'HASH'
class RangeKey(BaseSchemaField):
"""
An field representing a range key.
Example::
>>> from boto.dynamodb2.types import NUMBER
>>> HashKey('username')
>>> HashKey('date_joined', data_type=NUMBER)
"""
attr_type = 'RANGE'
class BaseIndexField(object):
"""
An abstract class for defining schema indexes.
Contains most of the core functionality for the index. Subclasses must
define a ``projection_type`` to pass to DynamoDB.
"""
def __init__(self, name, parts):
self.name = name
self.parts = parts
def definition(self):
"""
Returns the attribute definition structure DynamoDB expects.
Example::
>>> index.definition()
{
'AttributeName': 'username',
'AttributeType': 'S',
}
"""
definition = []
for part in self.parts:
definition.append({
'AttributeName': part.name,
'AttributeType': part.data_type,
})
return definition
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> index.schema()
{
'IndexName': 'LastNameIndex',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
}
}
"""
key_schema = []
for part in self.parts:
key_schema.append(part.schema())
return {
'IndexName': self.name,
'KeySchema': key_schema,
'Projection': {
'ProjectionType': self.projection_type,
}
}
class AllIndex(BaseIndexField):
"""
An index signifying all fields should be in the index.
Example::
>>> AllIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ])
"""
projection_type = 'ALL'
class KeysOnlyIndex(BaseIndexField):
"""
An index signifying only key fields should be in the index.
Example::
>>> KeysOnlyIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ])
"""
projection_type = 'KEYS_ONLY'
class IncludeIndex(BaseIndexField):
"""
An index signifying only certain fields should be in the index.
Example::
>>> IncludeIndex('GenderIndex', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ], includes=['gender'])
"""
projection_type = 'INCLUDE'
def __init__(self, *args, **kwargs):
self.includes_fields = kwargs.pop('includes', [])
super(IncludeIndex, self).__init__(*args, **kwargs)
def schema(self):
schema_data = super(IncludeIndex, self).schema()
schema_data['Projection']['NonKeyAttributes'] = self.includes_fields
return schema_data
class GlobalBaseIndexField(BaseIndexField):
"""
An abstract class for defining global indexes.
Contains most of the core functionality for the index. Subclasses must
define a ``projection_type`` to pass to DynamoDB.
"""
throughput = {
'read': 5,
'write': 5,
}
def __init__(self, *args, **kwargs):
throughput = kwargs.pop('throughput', None)
if throughput is not None:
self.throughput = throughput
super(GlobalBaseIndexField, self).__init__(*args, **kwargs)
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> index.schema()
{
'IndexName': 'LastNameIndex',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
}
"""
schema_data = super(GlobalBaseIndexField, self).schema()
schema_data['ProvisionedThroughput'] = {
'ReadCapacityUnits': int(self.throughput['read']),
'WriteCapacityUnits': int(self.throughput['write']),
}
return schema_data
class GlobalAllIndex(GlobalBaseIndexField):
"""
An index signifying all fields should be in the index.
Example::
>>> GlobalAllIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'ALL'
class GlobalKeysOnlyIndex(GlobalBaseIndexField):
"""
An index signifying only key fields should be in the index.
Example::
>>> GlobalKeysOnlyIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'KEYS_ONLY'
class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
"""
An index signifying only certain fields should be in the index.
Example::
>>> GlobalIncludeIndex('GenderIndex', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... includes=['gender'],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'INCLUDE'
def __init__(self, *args, **kwargs):
throughput = kwargs.pop('throughput', None)
IncludeIndex.__init__(self, *args, **kwargs)
if throughput:
kwargs['throughput'] = throughput
GlobalBaseIndexField.__init__(self, *args, **kwargs)
def schema(self):
# Pick up the includes.
schema_data = IncludeIndex.schema(self)
# Also the throughput.
schema_data.update(GlobalBaseIndexField.schema(self))
return schema_data
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/dynamodb2/fields.py
| 0.858229 | 0.52342 |
fields.py
|
pypi
|
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.elastictranscoder import exceptions
class ElasticTranscoderConnection(AWSAuthConnection):
"""
AWS Elastic Transcoder Service
The AWS Elastic Transcoder Service.
"""
APIVersion = "2012-09-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "elastictranscoder.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"IncompatibleVersionException": exceptions.IncompatibleVersionException,
"LimitExceededException": exceptions.LimitExceededException,
"ResourceInUseException": exceptions.ResourceInUseException,
"AccessDeniedException": exceptions.AccessDeniedException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalServiceException": exceptions.InternalServiceException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
super(ElasticTranscoderConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def cancel_job(self, id=None):
"""
The CancelJob operation cancels an unfinished job.
You can only cancel a job that has a status of `Submitted`. To
prevent a pipeline from starting to process a job while you're
getting the job identifier, use UpdatePipelineStatus to
temporarily pause the pipeline.
:type id: string
:param id: The identifier of the job that you want to cancel.
To get a list of the jobs (including their `jobId`) that have a status
of `Submitted`, use the ListJobsByStatus API action.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def create_job(self, pipeline_id=None, input_name=None, output=None,
outputs=None, output_key_prefix=None, playlists=None):
"""
When you create a job, Elastic Transcoder returns JSON data
that includes the values that you specified plus information
about the job that is created.
If you have specified more than one output for your jobs (for
example, one output for the Kindle Fire and another output for
the Apple iPhone 4s), you currently must use the Elastic
Transcoder API to list the jobs (as opposed to the AWS
Console).
:type pipeline_id: string
:param pipeline_id: The `Id` of the pipeline that you want Elastic
Transcoder to use for transcoding. The pipeline determines several
settings, including the Amazon S3 bucket from which Elastic
Transcoder gets the files to transcode and the bucket into which
Elastic Transcoder puts the transcoded files.
:type input_name: dict
:param input_name: A section of the request body that provides
information about the file that is being transcoded.
:type output: dict
:param output: The `CreateJobOutput` structure.
:type outputs: list
:param outputs: A section of the request body that provides information
about the transcoded (target) files. We recommend that you use the
`Outputs` syntax instead of the `Output` syntax.
:type output_key_prefix: string
:param output_key_prefix: The value, if any, that you want Elastic
Transcoder to prepend to the names of all files that this job
creates, including output files, thumbnails, and playlists.
:type playlists: list
:param playlists: If you specify a preset in `PresetId` for which the
value of `Container` is ts (MPEG-TS), Playlists contains
information about the master playlists that you want Elastic
Transcoder to create.
We recommend that you create only one master playlist. The maximum
number of master playlists in a job is 30.
"""
uri = '/2012-09-25/jobs'
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if input_name is not None:
params['Input'] = input_name
if output is not None:
params['Output'] = output
if outputs is not None:
params['Outputs'] = outputs
if output_key_prefix is not None:
params['OutputKeyPrefix'] = output_key_prefix
if playlists is not None:
params['Playlists'] = playlists
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_pipeline(self, name=None, input_bucket=None,
output_bucket=None, role=None, notifications=None,
content_config=None, thumbnail_config=None):
"""
The CreatePipeline operation creates a pipeline with settings
that you specify.
:type name: string
:param name: The name of the pipeline. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
Constraints: Maximum 40 characters.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket in which you saved the media
files that you want to transcode.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket in which you want Elastic
Transcoder to save the transcoded files. (Use this, or use
ContentConfig:Bucket plus ThumbnailConfig:Bucket.)
Specify this value when all of the following are true:
+ You want to save transcoded files, thumbnails (if any), and playlists
(if any) together in one bucket.
+ You do not want to specify the users or groups who have access to the
transcoded files, thumbnails, and playlists.
+ You do not want to specify the permissions that Elastic Transcoder
grants to the files. When Elastic Transcoder saves files in
`OutputBucket`, it grants full control over the files only to the
AWS account that owns the role that is specified by `Role`.
+ You want to associate the transcoded files and thumbnails with the
Amazon S3 Standard storage class.
If you want to save transcoded files and playlists in one bucket and
thumbnails in another bucket, specify which users can access the
transcoded files or the permissions the users have, or change the
Amazon S3 storage class, omit `OutputBucket` and specify values for
`ContentConfig` and `ThumbnailConfig` instead.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to use to create the pipeline.
:type notifications: dict
:param notifications:
The Amazon Simple Notification Service (Amazon SNS) topic that you want
to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process a job in this pipeline. This is
the ARN that Amazon SNS returned when you created the topic. For
more information, see Create a Topic in the Amazon Simple
Notification Service Developer Guide.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job in
this pipeline. This is the ARN that Amazon SNS returned when you
created the topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
:type content_config: dict
:param content_config:
The optional `ContentConfig` object specifies information about the
Amazon S3 bucket in which you want Elastic Transcoder to save
transcoded files and playlists: which bucket to use, which users
you want to have access to the files, the type of access you want
users to have, and the storage class that you want to assign to the
files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig`.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save transcoded files and playlists.
+ **Permissions** (Optional): The Permissions object specifies which
users you want to have access to transcoded files and the type of
access you want them to have. You can grant permissions to a
maximum of 30 users and/or predefined Amazon S3 groups.
+ **Grantee Type**: Specify the type of value that appears in the
`Grantee` object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. For more information about
canonical user IDs, see Access Control List (ACL) Overview in the
Amazon Simple Storage Service Developer Guide. For more information
about using CloudFront origin access identities to require that
users use CloudFront URLs instead of Amazon S3 URLs, see Using an
Origin Access Identity to Restrict Access to Your Amazon S3
Content. A canonical user ID is not the same as an AWS account
number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
transcoded files and playlists. To identify the user or group, you
can specify the canonical user ID for an AWS account, an origin
access identity for a CloudFront distribution, the registered email
address of an AWS account, or a predefined Amazon S3 group
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the files
that Elastic Transcoder adds to the bucket, including playlists and
video files. Valid values include:
+ `READ`: The grantee can read the objects and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the objects that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the video files and playlists that it stores in your Amazon S3
bucket.
:type thumbnail_config: dict
:param thumbnail_config:
The `ThumbnailConfig` object specifies several values, including the
Amazon S3 bucket in which you want Elastic Transcoder to save
thumbnail files, which users you want to have access to the files,
the type of access you want users to have, and the storage class
that you want to assign to the files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig` even if you don't want to create thumbnails.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save thumbnail files.
+ **Permissions** (Optional): The `Permissions` object specifies which
users and/or predefined Amazon S3 groups you want to have access to
thumbnail files, and the type of access you want them to have. You
can grant permissions to a maximum of 30 users and/or predefined
Amazon S3 groups.
+ **GranteeType**: Specify the type of value that appears in the
Grantee object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. A canonical user ID is not
the same as an AWS account number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
thumbnail files. To identify the user or group, you can specify the
canonical user ID for an AWS account, an origin access identity for
a CloudFront distribution, the registered email address of an AWS
account, or a predefined Amazon S3 group.
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the
thumbnail files that Elastic Transcoder adds to the bucket. Valid
values include:
+ `READ`: The grantee can read the thumbnails and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the thumbnails that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the thumbnails that it stores in your Amazon S3 bucket.
"""
uri = '/2012-09-25/pipelines'
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_preset(self, name=None, description=None, container=None,
video=None, audio=None, thumbnails=None):
"""
The CreatePreset operation creates a preset with settings that
you specify.
Elastic Transcoder checks the CreatePreset settings to ensure
that they meet Elastic Transcoder requirements and to
determine whether they comply with H.264 standards. If your
settings are not valid for Elastic Transcoder, Elastic
Transcoder returns an HTTP 400 response (
`ValidationException`) and does not create the preset. If the
settings are valid for Elastic Transcoder but aren't strictly
compliant with the H.264 standard, Elastic Transcoder creates
the preset and returns a warning message in the response. This
helps you determine whether your settings comply with the
H.264 standard while giving you greater flexibility with
respect to the video that Elastic Transcoder produces.
Elastic Transcoder uses the H.264 video-compression format.
For more information, see the International Telecommunication
Union publication Recommendation ITU-T H.264: Advanced video
coding for generic audiovisual services .
:type name: string
:param name: The name of the preset. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
:type description: string
:param description: A description of the preset.
:type container: string
:param container: The container type for the output file. Valid values
include `mp3`, `mp4`, `ogg`, `ts`, and `webm`.
:type video: dict
:param video: A section of the request body that specifies the video
parameters.
:type audio: dict
:param audio: A section of the request body that specifies the audio
parameters.
:type thumbnails: dict
:param thumbnails: A section of the request body that specifies the
thumbnail parameters, if any.
"""
uri = '/2012-09-25/presets'
params = {}
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if container is not None:
params['Container'] = container
if video is not None:
params['Video'] = video
if audio is not None:
params['Audio'] = audio
if thumbnails is not None:
params['Thumbnails'] = thumbnails
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def delete_pipeline(self, id=None):
"""
The DeletePipeline operation removes a pipeline.
You can only delete a pipeline that has never been used or
that is not currently in use (doesn't contain any active
jobs). If the pipeline is currently in use, `DeletePipeline`
returns an error.
:type id: string
:param id: The identifier of the pipeline that you want to delete.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def delete_preset(self, id=None):
"""
The DeletePreset operation removes a preset that you've added
in an AWS region.
You can't delete the default presets that are included with
Elastic Transcoder.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def list_jobs_by_pipeline(self, pipeline_id=None, ascending=None,
page_token=None):
"""
The ListJobsByPipeline operation gets a list of the jobs
currently in a pipeline.
Elastic Transcoder returns all of the jobs currently in the
specified pipeline. The response body contains one element for
each job that satisfies the search criteria.
:type pipeline_id: string
:param pipeline_id: The ID of the pipeline for which you want to get
job information.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByPipeline/{0}'.format(pipeline_id)
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_jobs_by_status(self, status=None, ascending=None,
page_token=None):
"""
The ListJobsByStatus operation gets a list of jobs that have a
specified status. The response body contains one element for
each job that satisfies the search criteria.
:type status: string
:param status: To get information about all of the jobs associated with
the current AWS account that have a given status, specify the
following status: `Submitted`, `Progressing`, `Complete`,
`Canceled`, or `Error`.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByStatus/{0}'.format(status)
params = {}
if status is not None:
params['Status'] = status
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_pipelines(self, ascending=None, page_token=None):
"""
The ListPipelines operation gets a list of the pipelines
associated with the current AWS account.
:type ascending: string
:param ascending: To list pipelines in chronological order by the date
and time that they were created, enter `True`. To list pipelines in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/pipelines'.format()
params = {}
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_presets(self, ascending=None, page_token=None):
"""
The ListPresets operation gets a list of the default presets
included with Elastic Transcoder and the presets that you've
added in an AWS region.
:type ascending: string
:param ascending: To list presets in chronological order by the date
and time that they were created, enter `True`. To list presets in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/presets'.format()
params = {}
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def read_job(self, id=None):
"""
The ReadJob operation returns detailed information about a
job.
:type id: string
:param id: The identifier of the job for which you want to get detailed
information.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_pipeline(self, id=None):
"""
The ReadPipeline operation gets detailed information about a
pipeline.
:type id: string
:param id: The identifier of the pipeline to read.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_preset(self, id=None):
"""
The ReadPreset operation gets detailed information about a
preset.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def test_role(self, role=None, input_bucket=None, output_bucket=None,
topics=None):
"""
The TestRole operation tests the IAM role used to create the
pipeline.
The `TestRole` action lets you determine whether the IAM role
you are using has sufficient permissions to let Elastic
Transcoder perform tasks associated with the transcoding
process. The action attempts to assume the specified IAM role,
checks read access to the input and output buckets, and tries
to send a test notification to Amazon SNS topics that you
specify.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to test.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket that contains media files to
be transcoded. The action attempts to read from this bucket.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket that Elastic Transcoder will
write transcoded media files to. The action attempts to read from
this bucket.
:type topics: list
:param topics: The ARNs of one or more Amazon Simple Notification
Service (Amazon SNS) topics that you want the action to send a test
notification to.
"""
uri = '/2012-09-25/roleTests'
params = {}
if role is not None:
params['Role'] = role
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if topics is not None:
params['Topics'] = topics
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline(self, id, name=None, input_bucket=None, role=None,
notifications=None, content_config=None,
thumbnail_config=None):
"""
Use the `UpdatePipeline` operation to update settings for a
pipeline. When you change pipeline settings, your changes take
effect immediately. Jobs that you have already submitted and
that Elastic Transcoder has not started to process are
affected in addition to jobs that you submit after you change
settings.
:type id: string
:param id: The ID of the pipeline that you want to update.
:type name: string
:param name: The name of the pipeline. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
Constraints: Maximum 40 characters
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket in which you saved the media
files that you want to transcode and the graphics that you want to
use as watermarks.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to use to transcode jobs for this pipeline.
:type notifications: dict
:param notifications:
The Amazon Simple Notification Service (Amazon SNS) topic or topics to
notify in order to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
:type content_config: dict
:param content_config:
The optional `ContentConfig` object specifies information about the
Amazon S3 bucket in which you want Elastic Transcoder to save
transcoded files and playlists: which bucket to use, which users
you want to have access to the files, the type of access you want
users to have, and the storage class that you want to assign to the
files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig`.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save transcoded files and playlists.
+ **Permissions** (Optional): The Permissions object specifies which
users you want to have access to transcoded files and the type of
access you want them to have. You can grant permissions to a
maximum of 30 users and/or predefined Amazon S3 groups.
+ **Grantee Type**: Specify the type of value that appears in the
`Grantee` object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. For more information about
canonical user IDs, see Access Control List (ACL) Overview in the
Amazon Simple Storage Service Developer Guide. For more information
about using CloudFront origin access identities to require that
users use CloudFront URLs instead of Amazon S3 URLs, see Using an
Origin Access Identity to Restrict Access to Your Amazon S3
Content. A canonical user ID is not the same as an AWS account
number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
transcoded files and playlists. To identify the user or group, you
can specify the canonical user ID for an AWS account, an origin
access identity for a CloudFront distribution, the registered email
address of an AWS account, or a predefined Amazon S3 group
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the files
that Elastic Transcoder adds to the bucket, including playlists and
video files. Valid values include:
+ `READ`: The grantee can read the objects and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the objects that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the video files and playlists that it stores in your Amazon S3
bucket.
:type thumbnail_config: dict
:param thumbnail_config:
The `ThumbnailConfig` object specifies several values, including the
Amazon S3 bucket in which you want Elastic Transcoder to save
thumbnail files, which users you want to have access to the files,
the type of access you want users to have, and the storage class
that you want to assign to the files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig` even if you don't want to create thumbnails.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save thumbnail files.
+ **Permissions** (Optional): The `Permissions` object specifies which
users and/or predefined Amazon S3 groups you want to have access to
thumbnail files, and the type of access you want them to have. You
can grant permissions to a maximum of 30 users and/or predefined
Amazon S3 groups.
+ **GranteeType**: Specify the type of value that appears in the
Grantee object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. A canonical user ID is not
the same as an AWS account number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
thumbnail files. To identify the user or group, you can specify the
canonical user ID for an AWS account, an origin access identity for
a CloudFront distribution, the registered email address of an AWS
account, or a predefined Amazon S3 group.
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the
thumbnail files that Elastic Transcoder adds to the bucket. Valid
values include:
+ `READ`: The grantee can read the thumbnails and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the thumbnails that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the thumbnails that it stores in your Amazon S3 bucket.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('PUT', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_notifications(self, id=None, notifications=None):
"""
With the UpdatePipelineNotifications operation, you can update
Amazon Simple Notification Service (Amazon SNS) notifications
for a pipeline.
When you update notifications for a pipeline, Elastic
Transcoder returns the values that you specified in the
request.
:type id: string
:param id: The identifier of the pipeline for which you want to change
notification settings.
:type notifications: dict
:param notifications:
The topic ARN for the Amazon Simple Notification Service (Amazon SNS)
topic that you want to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process jobs that are added to this
pipeline. This is the ARN that Amazon SNS returned when you created
the topic.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job.
This is the ARN that Amazon SNS returned when you created the
topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition. This
is the ARN that Amazon SNS returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition. This
is the ARN that Amazon SNS returned when you created the topic.
"""
uri = '/2012-09-25/pipelines/{0}/notifications'.format(id)
params = {}
if id is not None:
params['Id'] = id
if notifications is not None:
params['Notifications'] = notifications
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_status(self, id=None, status=None):
"""
The UpdatePipelineStatus operation pauses or reactivates a
pipeline, so that the pipeline stops or restarts the
processing of jobs.
Changing the pipeline status is useful if you want to cancel
one or more jobs. You can't cancel jobs after Elastic
Transcoder has started processing them; if you pause the
pipeline to which you submitted the jobs, you have more time
to get the job IDs for the jobs that you want to cancel, and
to send a CancelJob request.
:type id: string
:param id: The identifier of the pipeline to update.
:type status: string
:param status:
The desired status of the pipeline:
+ `Active`: The pipeline is processing jobs.
+ `Paused`: The pipeline is not currently processing jobs.
"""
uri = '/2012-09-25/pipelines/{0}/status'.format(id)
params = {}
if id is not None:
params['Id'] = id
if status is not None:
params['Status'] = status
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = super(ElasticTranscoderConnection, self).make_request(
verb, resource, headers=headers, data=data, params=params)
body = json.loads(response.read().decode('utf-8'))
if response.status == expected_status:
return body
else:
error_type = response.getheader('x-amzn-ErrorType').split(':')[0]
error_class = self._faults.get(error_type, self.ResponseError)
raise error_class(response.status, response.reason, body)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/elastictranscoder/layer1.py
| 0.703651 | 0.173603 |
layer1.py
|
pypi
|
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cloudtrail import exceptions
from boto.compat import json
class CloudTrailConnection(AWSQueryConnection):
"""
AWS CloudTrail
This is the CloudTrail API Reference. It provides descriptions of
actions, data types, common parameters, and common errors for
CloudTrail.
CloudTrail is a web service that records AWS API calls for your
AWS account and delivers log files to an Amazon S3 bucket. The
recorded information includes the identity of the user, the start
time of the AWS API call, the source IP address, the request
parameters, and the response elements returned by the service.
As an alternative to using the API, you can use one of the AWS
SDKs, which consist of libraries and sample code for various
programming languages and platforms (Java, Ruby, .NET, iOS,
Android, etc.). The SDKs provide a convenient way to create
programmatic access to AWSCloudTrail. For example, the SDKs take
care of cryptographically signing requests, managing errors, and
retrying requests automatically. For information about the AWS
SDKs, including how to download and install them, see the `Tools
for Amazon Web Services page`_.
See the CloudTrail User Guide for information about the data that
is included with each AWS API call listed in the log files.
"""
APIVersion = "2013-11-01"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudtrail.us-east-1.amazonaws.com"
ServiceName = "CloudTrail"
TargetPrefix = "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101"
ResponseError = JSONResponseError
_faults = {
"InvalidMaxResultsException": exceptions.InvalidMaxResultsException,
"InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException,
"InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException,
"TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException,
"InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
"InvalidLookupAttributesException": exceptions.InvalidLookupAttributesException,
"InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException,
"InvalidCloudWatchLogsLogGroupArnException": exceptions.InvalidCloudWatchLogsLogGroupArnException,
"InvalidCloudWatchLogsRoleArnException": exceptions.InvalidCloudWatchLogsRoleArnException,
"InvalidTrailNameException": exceptions.InvalidTrailNameException,
"CloudWatchLogsDeliveryUnavailableException": exceptions.CloudWatchLogsDeliveryUnavailableException,
"TrailNotFoundException": exceptions.TrailNotFoundException,
"S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException,
"InvalidNextTokenException": exceptions.InvalidNextTokenException,
"InvalidS3PrefixException": exceptions.InvalidS3PrefixException,
"MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException,
"InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudTrailConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_trail(self, name, s3_bucket_name, s3_key_prefix=None,
sns_topic_name=None, include_global_service_events=None,
cloud_watch_logs_log_group_arn=None,
cloud_watch_logs_role_arn=None):
"""
From the command line, use `create-subscription`.
Creates a trail that specifies the settings for delivery of
log data to an Amazon S3 bucket.
:type name: string
:param name: Specifies the name of the trail.
:type s3_bucket_name: string
:param s3_bucket_name: Specifies the name of the Amazon S3 bucket
designated for publishing log files.
:type s3_key_prefix: string
:param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
the name of the bucket you have designated for log file delivery.
:type sns_topic_name: string
:param sns_topic_name: Specifies the name of the Amazon SNS topic
defined for notification of log file delivery.
:type include_global_service_events: boolean
:param include_global_service_events: Specifies whether the trail is
publishing events from global services such as IAM to the log
files.
:type cloud_watch_logs_log_group_arn: string
:param cloud_watch_logs_log_group_arn: Specifies a log group name using
an Amazon Resource Name (ARN), a unique identifier that represents
the log group to which CloudTrail logs will be delivered. Not
required unless you specify CloudWatchLogsRoleArn.
:type cloud_watch_logs_role_arn: string
:param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch
Logs endpoint to assume to write to a users log group.
"""
params = {'Name': name, 'S3BucketName': s3_bucket_name, }
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
if sns_topic_name is not None:
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
if cloud_watch_logs_log_group_arn is not None:
params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn
if cloud_watch_logs_role_arn is not None:
params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn
return self.make_request(action='CreateTrail',
body=json.dumps(params))
def delete_trail(self, name):
"""
Deletes a trail.
:type name: string
:param name: The name of a trail to be deleted.
"""
params = {'Name': name, }
return self.make_request(action='DeleteTrail',
body=json.dumps(params))
def describe_trails(self, trail_name_list=None):
"""
Retrieves settings for the trail associated with the current
region for your account.
:type trail_name_list: list
:param trail_name_list: The trail returned.
"""
params = {}
if trail_name_list is not None:
params['trailNameList'] = trail_name_list
return self.make_request(action='DescribeTrails',
body=json.dumps(params))
def get_trail_status(self, name):
"""
Returns a JSON-formatted list of information about the
specified trail. Fields include information on delivery
errors, Amazon SNS and Amazon S3 errors, and start and stop
logging times for each trail.
:type name: string
:param name: The name of the trail for which you are requesting the
current status.
"""
params = {'Name': name, }
return self.make_request(action='GetTrailStatus',
body=json.dumps(params))
def lookup_events(self, lookup_attributes=None, start_time=None,
end_time=None, max_results=None, next_token=None):
"""
Looks up API activity events captured by CloudTrail that
create, update, or delete resources in your account. Events
for a region can be looked up for the times in which you had
CloudTrail turned on in that region during the last seven
days. Lookup supports five different attributes: time range
(defined by a start time and end time), user name, event name,
resource type, and resource name. All attributes are optional.
The maximum number of attributes that can be specified in any
one lookup request are time range and one other attribute. The
default number of results returned is 10, with a maximum of 50
possible. The response includes a token that you can use to
get the next page of results.
The rate of lookup requests is limited to one per second per
account. If this limit is exceeded, a throttling error occurs.
Events that occurred during the selected time range will not
be available for lookup if CloudTrail logging was not enabled
when the events occurred.
:type lookup_attributes: list
:param lookup_attributes: Contains a list of lookup attributes.
Currently the list can contain only one item.
:type start_time: timestamp
:param start_time: Specifies that only events that occur after or at
the specified time are returned. If the specified start time is
after the specified end time, an error is returned.
:type end_time: timestamp
:param end_time: Specifies that only events that occur before or at the
specified time are returned. If the specified end time is before
the specified start time, an error is returned.
:type max_results: integer
:param max_results: The number of events to return. Possible values are
1 through 50. The default is 10.
:type next_token: string
:param next_token: The token to use to get the next page of results
after a previous API call. This token must be passed in with the
same parameters that were specified in the the original call. For
example, if the original call specified an AttributeKey of
'Username' with a value of 'root', the call with NextToken should
include those same parameters.
"""
params = {}
if lookup_attributes is not None:
params['LookupAttributes'] = lookup_attributes
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if max_results is not None:
params['MaxResults'] = max_results
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='LookupEvents',
body=json.dumps(params))
def start_logging(self, name):
"""
Starts the recording of AWS API calls and log file delivery
for a trail.
:type name: string
:param name: The name of the trail for which CloudTrail logs AWS API
calls.
"""
params = {'Name': name, }
return self.make_request(action='StartLogging',
body=json.dumps(params))
def stop_logging(self, name):
"""
Suspends the recording of AWS API calls and log file delivery
for the specified trail. Under most circumstances, there is no
need to use this action. You can update a trail without
stopping it first. This action is the only way to stop
recording.
:type name: string
:param name: Communicates to CloudTrail the name of the trail for which
to stop logging AWS API calls.
"""
params = {'Name': name, }
return self.make_request(action='StopLogging',
body=json.dumps(params))
def update_trail(self, name, s3_bucket_name=None, s3_key_prefix=None,
sns_topic_name=None, include_global_service_events=None,
cloud_watch_logs_log_group_arn=None,
cloud_watch_logs_role_arn=None):
"""
From the command line, use `update-subscription`.
Updates the settings that specify delivery of log files.
Changes to a trail do not require stopping the CloudTrail
service. Use this action to designate an existing bucket for
log delivery. If the existing bucket has previously been a
target for CloudTrail log files, an IAM policy exists for the
bucket.
:type name: string
:param name: Specifies the name of the trail.
:type s3_bucket_name: string
:param s3_bucket_name: Specifies the name of the Amazon S3 bucket
designated for publishing log files.
:type s3_key_prefix: string
:param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
the name of the bucket you have designated for log file delivery.
:type sns_topic_name: string
:param sns_topic_name: Specifies the name of the Amazon SNS topic
defined for notification of log file delivery.
:type include_global_service_events: boolean
:param include_global_service_events: Specifies whether the trail is
publishing events from global services such as IAM to the log
files.
:type cloud_watch_logs_log_group_arn: string
:param cloud_watch_logs_log_group_arn: Specifies a log group name using
an Amazon Resource Name (ARN), a unique identifier that represents
the log group to which CloudTrail logs will be delivered. Not
required unless you specify CloudWatchLogsRoleArn.
:type cloud_watch_logs_role_arn: string
:param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch
Logs endpoint to assume to write to a users log group.
"""
params = {'Name': name, }
if s3_bucket_name is not None:
params['S3BucketName'] = s3_bucket_name
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
if sns_topic_name is not None:
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
if cloud_watch_logs_log_group_arn is not None:
params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn
if cloud_watch_logs_role_arn is not None:
params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn
return self.make_request(action='UpdateTrail',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudtrail/layer1.py
| 0.731155 | 0.358999 |
layer1.py
|
pypi
|
from boto.exception import BotoServerError
class InvalidSnsTopicNameException(BotoServerError):
"""
Raised when an invalid SNS topic name is passed to Cloudtrail.
"""
pass
class InvalidS3BucketNameException(BotoServerError):
"""
Raised when an invalid S3 bucket name is passed to Cloudtrail.
"""
pass
class TrailAlreadyExistsException(BotoServerError):
"""
Raised when the given trail name already exists.
"""
pass
class InsufficientSnsTopicPolicyException(BotoServerError):
"""
Raised when the SNS topic does not allow Cloudtrail to post
messages.
"""
pass
class InvalidTrailNameException(BotoServerError):
"""
Raised when the trail name is invalid.
"""
pass
class InternalErrorException(BotoServerError):
"""
Raised when there was an internal Cloudtrail error.
"""
pass
class TrailNotFoundException(BotoServerError):
"""
Raised when the given trail name is not found.
"""
pass
class S3BucketDoesNotExistException(BotoServerError):
"""
Raised when the given S3 bucket does not exist.
"""
pass
class TrailNotProvidedException(BotoServerError):
"""
Raised when no trail name was provided.
"""
pass
class InvalidS3PrefixException(BotoServerError):
"""
Raised when an invalid key prefix is given.
"""
pass
class MaximumNumberOfTrailsExceededException(BotoServerError):
"""
Raised when no more trails can be created.
"""
pass
class InsufficientS3BucketPolicyException(BotoServerError):
"""
Raised when the S3 bucket does not allow Cloudtrail to
write files into the prefix.
"""
pass
class InvalidMaxResultsException(BotoServerError):
pass
class InvalidTimeRangeException(BotoServerError):
pass
class InvalidLookupAttributesException(BotoServerError):
pass
class InvalidCloudWatchLogsLogGroupArnException(BotoServerError):
pass
class InvalidCloudWatchLogsRoleArnException(BotoServerError):
pass
class CloudWatchLogsDeliveryUnavailableException(BotoServerError):
pass
class InvalidNextTokenException(BotoServerError):
pass
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudtrail/exceptions.py
| 0.645567 | 0.380586 |
exceptions.py
|
pypi
|
import uuid
import hashlib
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
import boto
class SNSConnection(AWSQueryConnection):
"""
Amazon Simple Notification Service
Amazon Simple Notification Service (Amazon SNS) is a web service
that enables you to build distributed web-enabled applications.
Applications can use Amazon SNS to easily push real-time
notification messages to interested subscribers over multiple
delivery protocols. For more information about this product see
`http://aws.amazon.com/sns`_. For detailed information about
Amazon SNS features and their associated API calls, see the
`Amazon SNS Developer Guide`_.
We also provide SDKs that enable you to access Amazon SNS from
your preferred programming language. The SDKs contain
functionality that automatically takes care of tasks such as:
cryptographically signing your service requests, retrying
requests, and handling error responses. For a list of available
SDKs, go to `Tools for Amazon Web Services`_.
"""
DefaultRegionName = boto.config.get('Boto', 'sns_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'sns_region_endpoint',
'sns.us-east-1.amazonaws.com')
APIVersion = boto.config.get('Boto', 'sns_version', '2010-03-31')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True,
profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=SNSConnection)
self.region = region
super(SNSConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _build_dict_as_list_params(self, params, dictionary, name):
"""
Serialize a parameter 'name' which value is a 'dictionary' into a list of parameters.
See: http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html
For example::
dictionary = {'PlatformPrincipal': 'foo', 'PlatformCredential': 'bar'}
name = 'Attributes'
would result in params dict being populated with:
Attributes.entry.1.key = PlatformPrincipal
Attributes.entry.1.value = foo
Attributes.entry.2.key = PlatformCredential
Attributes.entry.2.value = bar
:param params: the resulting parameters will be added to this dict
:param dictionary: dict - value of the serialized parameter
:param name: name of the serialized parameter
"""
items = sorted(dictionary.items(), key=lambda x:x[0])
for kv, index in zip(items, list(range(1, len(items)+1))):
key, value = kv
prefix = '%s.entry.%s' % (name, index)
params['%s.key' % prefix] = key
params['%s.value' % prefix] = value
def _required_auth_capability(self):
return ['hmac-v4']
def get_all_topics(self, next_token=None):
"""
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListTopics', params)
def get_topic_attributes(self, topic):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
"""
params = {'TopicArn': topic}
return self._make_request('GetTopicAttributes', params)
def set_topic_attributes(self, topic, attr_name, attr_value):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
:type attr_name: string
:param attr_name: The name of the attribute you want to set.
Only a subset of the topic's attributes are mutable.
Valid values: Policy | DisplayName
:type attr_value: string
:param attr_value: The new value for the attribute.
"""
params = {'TopicArn': topic,
'AttributeName': attr_name,
'AttributeValue': attr_value}
return self._make_request('SetTopicAttributes', params)
def add_permission(self, topic, label, account_ids, actions):
"""
Adds a statement to a topic's access control policy, granting
access for the specified AWS accounts to the specified actions.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the new policy statement.
:type account_ids: list of strings
:param account_ids: The AWS account ids of the users who will be
give access to the specified actions.
:type actions: list of strings
:param actions: The actions you want to allow for each of the
specified principal(s).
"""
params = {'TopicArn': topic,
'Label': label}
self.build_list_params(params, account_ids, 'AWSAccountId.member')
self.build_list_params(params, actions, 'ActionName.member')
return self._make_request('AddPermission', params)
def remove_permission(self, topic, label):
"""
Removes a statement from a topic's access control policy.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the policy statement
to be removed.
"""
params = {'TopicArn': topic,
'Label': label}
return self._make_request('RemovePermission', params)
def create_topic(self, topic):
"""
Create a new Topic.
:type topic: string
:param topic: The name of the new topic.
"""
params = {'Name': topic}
return self._make_request('CreateTopic', params)
def delete_topic(self, topic):
"""
Delete an existing topic
:type topic: string
:param topic: The ARN of the topic
"""
params = {'TopicArn': topic}
return self._make_request('DeleteTopic', params, '/', 'GET')
def publish(self, topic=None, message=None, subject=None, target_arn=None,
message_structure=None, message_attributes=None):
"""
Sends a message to all of a topic's subscribed endpoints
:type topic: string
:param topic: The topic you want to publish to.
:type message: string
:param message: The message you want to send to the topic.
Messages must be UTF-8 encoded strings and
be at most 4KB in size.
:type message_structure: string
:param message_structure: Optional parameter. If left as ``None``,
plain text will be sent. If set to ``json``,
your message should be a JSON string that
matches the structure described at
http://docs.aws.amazon.com/sns/latest/dg/PublishTopic.html#sns-message-formatting-by-protocol
:type message_attributes: dict
:param message_attributes: Message attributes to set. Should be
of the form:
.. code-block:: python
{
"name1": {
"data_type": "Number",
"string_value": "42"
},
"name2": {
"data_type": "String",
"string_value": "Bob"
}
}
:type subject: string
:param subject: Optional parameter to be used as the "Subject"
line of the email notifications.
:type target_arn: string
:param target_arn: Optional parameter for either TopicArn or
EndpointArn, but not both.
"""
if message is None:
# To be backwards compatible when message did not have
# a default value and topic and message were required
# args.
raise TypeError("'message' is a required parameter")
params = {'Message': message}
if subject is not None:
params['Subject'] = subject
if topic is not None:
params['TopicArn'] = topic
if target_arn is not None:
params['TargetArn'] = target_arn
if message_structure is not None:
params['MessageStructure'] = message_structure
if message_attributes is not None:
keys = sorted(message_attributes.keys())
for i, name in enumerate(keys, start=1):
attribute = message_attributes[name]
params['MessageAttributes.entry.{0}.Name'.format(i)] = name
if 'data_type' in attribute:
params['MessageAttributes.entry.{0}.Value.DataType'.format(i)] = \
attribute['data_type']
if 'string_value' in attribute:
params['MessageAttributes.entry.{0}.Value.StringValue'.format(i)] = \
attribute['string_value']
if 'binary_value' in attribute:
params['MessageAttributes.entry.{0}.Value.BinaryValue'.format(i)] = \
attribute['binary_value']
return self._make_request('Publish', params, '/', 'POST')
def subscribe(self, topic, protocol, endpoint):
"""
Subscribe to a Topic.
:type topic: string
:param topic: The ARN of the new topic.
:type protocol: string
:param protocol: The protocol used to communicate with
the subscriber. Current choices are:
email|email-json|http|https|sqs|sms|application
:type endpoint: string
:param endpoint: The location of the endpoint for
the subscriber.
* For email, this would be a valid email address
* For email-json, this would be a valid email address
* For http, this would be a URL beginning with http
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
* For sms, this would be a phone number of an
SMS-enabled device
* For application, the endpoint is the EndpointArn
of a mobile app and device.
"""
params = {'TopicArn': topic,
'Protocol': protocol,
'Endpoint': endpoint}
return self._make_request('Subscribe', params)
def subscribe_sqs_queue(self, topic, queue):
"""
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
in using an SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
the SNS topic to send messages to the SQS queue.
* This JSON policy is then associated with the SQS queue using
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
:type topic: string
:param topic: The ARN of the new topic.
:type queue: A boto Queue object
:param queue: The queue you wish to subscribe to the SNS Topic.
"""
t = queue.id.split('/')
q_arn = queue.arn
sid = hashlib.md5((topic + q_arn).encode('utf-8')).hexdigest()
sid_exists = False
resp = self.subscribe(topic, 'sqs', q_arn)
attr = queue.get_attributes('Policy')
if 'Policy' in attr:
policy = json.loads(attr['Policy'])
else:
policy = {}
if 'Version' not in policy:
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
# See if a Statement with the Sid exists already.
for s in policy['Statement']:
if s['Sid'] == sid:
sid_exists = True
if not sid_exists:
statement = {'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'Principal': {'AWS': '*'},
'Resource': q_arn,
'Sid': sid,
'Condition': {'StringLike': {'aws:SourceArn': topic}}}
policy['Statement'].append(statement)
queue.set_attribute('Policy', json.dumps(policy))
return resp
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
"""
params = {'TopicArn': topic, 'Token': token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
return self._make_request('ConfirmSubscription', params)
def unsubscribe(self, subscription):
"""
Allows endpoint owner to delete subscription.
Confirmation message will be delivered.
:type subscription: string
:param subscription: The ARN of the subscription to be deleted.
"""
params = {'SubscriptionArn': subscription}
return self._make_request('Unsubscribe', params)
def get_all_subscriptions(self, next_token=None):
"""
Get list of all subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListSubscriptions', params)
def get_all_subscriptions_by_topic(self, topic, next_token=None):
"""
Get list of all subscriptions to a specific topic.
:type topic: string
:param topic: The ARN of the topic for which you wish to
find subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'TopicArn': topic}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListSubscriptionsByTopic', params)
def create_platform_application(self, name=None, platform=None,
attributes=None):
"""
The `CreatePlatformApplication` action creates a platform
application object for one of the supported push notification
services, such as APNS and GCM, to which devices and mobile
apps may register. You must specify PlatformPrincipal and
PlatformCredential attributes when using the
`CreatePlatformApplication` action. The PlatformPrincipal is
received from the notification service. For APNS/APNS_SANDBOX,
PlatformPrincipal is "SSL certificate". For GCM,
PlatformPrincipal is not applicable. For ADM,
PlatformPrincipal is "client id". The PlatformCredential is
also received from the notification service. For
APNS/APNS_SANDBOX, PlatformCredential is "private key". For
GCM, PlatformCredential is "API key". For ADM,
PlatformCredential is "client secret". The
PlatformApplicationArn that is returned when using
`CreatePlatformApplication` is then used as an attribute for
the `CreatePlatformEndpoint` action. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type name: string
:param name: Application names must be made up of only uppercase and
lowercase ASCII letters, numbers, underscores, hyphens, and
periods, and must be between 1 and 256 characters long.
:type platform: string
:param platform: The following platforms are supported: ADM (Amazon
Device Messaging), APNS (Apple Push Notification Service),
APNS_SANDBOX, and GCM (Google Cloud Messaging).
:type attributes: map
:param attributes: For a list of attributes, see
`SetPlatformApplicationAttributes`_
"""
params = {}
if name is not None:
params['Name'] = name
if platform is not None:
params['Platform'] = platform
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='CreatePlatformApplication',
params=params)
def set_platform_application_attributes(self,
platform_application_arn=None,
attributes=None):
"""
The `SetPlatformApplicationAttributes` action sets the
attributes of the platform application object for the
supported push notification services, such as APNS and GCM.
For more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
SetPlatformApplicationAttributes action.
:type attributes: map
:param attributes:
A map of the platform application attributes. Attributes in this map
include the following:
+ `PlatformCredential` -- The credential received from the notification
service. For APNS/APNS_SANDBOX, PlatformCredential is "private
key". For GCM, PlatformCredential is "API key". For ADM,
PlatformCredential is "client secret".
+ `PlatformPrincipal` -- The principal received from the notification
service. For APNS/APNS_SANDBOX, PlatformPrincipal is "SSL
certificate". For GCM, PlatformPrincipal is not applicable. For
ADM, PlatformPrincipal is "client id".
+ `EventEndpointCreated` -- Topic ARN to which EndpointCreated event
notifications should be sent.
+ `EventEndpointDeleted` -- Topic ARN to which EndpointDeleted event
notifications should be sent.
+ `EventEndpointUpdated` -- Topic ARN to which EndpointUpdate event
notifications should be sent.
+ `EventDeliveryFailure` -- Topic ARN to which DeliveryFailure event
notifications should be sent upon Direct Publish delivery failure
(permanent) to one of the application's endpoints.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='SetPlatformApplicationAttributes',
params=params)
def get_platform_application_attributes(self,
platform_application_arn=None):
"""
The `GetPlatformApplicationAttributes` action retrieves the
attributes of the platform application object for the
supported push notification services, such as APNS and GCM.
For more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
GetPlatformApplicationAttributesInput.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
return self._make_request(action='GetPlatformApplicationAttributes',
params=params)
def list_platform_applications(self, next_token=None):
"""
The `ListPlatformApplications` action lists the platform
application objects for the supported push notification
services, such as APNS and GCM. The results for
`ListPlatformApplications` are paginated and return a limited
list of applications, up to 100. If additional records are
available after the first page results, then a NextToken
string will be returned. To receive the next page, you call
`ListPlatformApplications` using the NextToken string received
from the previous call. When there are no more records to
return, NextToken will be null. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type next_token: string
:param next_token: NextToken string is used when calling
ListPlatformApplications action to retrieve additional records that
are available after the first page results.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self._make_request(action='ListPlatformApplications',
params=params)
def list_endpoints_by_platform_application(self,
platform_application_arn=None,
next_token=None):
"""
The `ListEndpointsByPlatformApplication` action lists the
endpoints and endpoint attributes for devices in a supported
push notification service, such as GCM and APNS. The results
for `ListEndpointsByPlatformApplication` are paginated and
return a limited list of endpoints, up to 100. If additional
records are available after the first page results, then a
NextToken string will be returned. To receive the next page,
you call `ListEndpointsByPlatformApplication` again using the
NextToken string received from the previous call. When there
are no more records to return, NextToken will be null. For
more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
ListEndpointsByPlatformApplicationInput action.
:type next_token: string
:param next_token: NextToken string is used when calling
ListEndpointsByPlatformApplication action to retrieve additional
records that are available after the first page results.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if next_token is not None:
params['NextToken'] = next_token
return self._make_request(action='ListEndpointsByPlatformApplication',
params=params)
def delete_platform_application(self, platform_application_arn=None):
"""
The `DeletePlatformApplication` action deletes a platform
application object for one of the supported push notification
services, such as APNS and GCM. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn of platform
application object to delete.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
return self._make_request(action='DeletePlatformApplication',
params=params)
def create_platform_endpoint(self, platform_application_arn=None,
token=None, custom_user_data=None,
attributes=None):
"""
The `CreatePlatformEndpoint` creates an endpoint for a device
and mobile app on one of the supported push notification
services, such as GCM and APNS. `CreatePlatformEndpoint`
requires the PlatformApplicationArn that is returned from
`CreatePlatformApplication`. The EndpointArn that is returned
when using `CreatePlatformEndpoint` can then be used by the
`Publish` action to send a message to a mobile app or by the
`Subscribe` action for subscription to a topic. For more
information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn returned from
CreatePlatformApplication is used to create a an endpoint.
:type token: string
:param token: Unique identifier created by the notification service for
an app on a device. The specific name for Token will vary,
depending on which notification service is being used. For example,
when using APNS as the notification service, you need the device
token. Alternatively, when using GCM or ADM, the device token
equivalent is called the registration ID.
:type custom_user_data: string
:param custom_user_data: Arbitrary user data to associate with the
endpoint. SNS does not use this data. The data must be in UTF-8
format and less than 2KB.
:type attributes: map
:param attributes: For a list of attributes, see
`SetEndpointAttributes`_.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if token is not None:
params['Token'] = token
if custom_user_data is not None:
params['CustomUserData'] = custom_user_data
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='CreatePlatformEndpoint',
params=params)
def delete_endpoint(self, endpoint_arn=None):
"""
The `DeleteEndpoint` action, which is idempotent, deletes the
endpoint from SNS. For more information, see `Using Amazon SNS
Mobile Push Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn of endpoint to delete.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
return self._make_request(action='DeleteEndpoint', params=params)
def set_endpoint_attributes(self, endpoint_arn=None, attributes=None):
"""
The `SetEndpointAttributes` action sets the attributes for an
endpoint for a device on one of the supported push
notification services, such as GCM and APNS. For more
information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn used for SetEndpointAttributes action.
:type attributes: map
:param attributes:
A map of the endpoint attributes. Attributes in this map include the
following:
+ `CustomUserData` -- arbitrary user data to associate with the
endpoint. SNS does not use this data. The data must be in UTF-8
format and less than 2KB.
+ `Enabled` -- flag that enables/disables delivery to the endpoint.
Message Processor will set this to false when a notification
service indicates to SNS that the endpoint is invalid. Users can
set it back to true, typically after updating Token.
+ `Token` -- device token, also referred to as a registration id, for
an app and mobile device. This is returned from the notification
service when an app and mobile device are registered with the
notification service.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='SetEndpointAttributes',
params=params)
def get_endpoint_attributes(self, endpoint_arn=None):
"""
The `GetEndpointAttributes` retrieves the endpoint attributes
for a device on one of the supported push notification
services, such as GCM and APNS. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn for GetEndpointAttributes input.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
return self._make_request(action='GetEndpointAttributes',
params=params)
def _make_request(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb=verb,
path=path, params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/sns/connection.py
| 0.730674 | 0.184768 |
connection.py
|
pypi
|
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'cloudkms'
class AuditConfig(_messages.Message):
"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ {
"service": "allServices" "audit_log_configs": [ {
"log_type": "DATA_READ", "exempted_members": [
"user:[email protected]" ] }, {
"log_type": "DATA_WRITE", }, {
"log_type": "ADMIN_READ", } ] }, {
"service": "fooservice.googleapis.com" "audit_log_configs": [
{ "log_type": "DATA_READ", }, {
"log_type": "DATA_WRITE", "exempted_members": [
"user:[email protected]" ] } ] }
] } For fooservice, this policy enables DATA_READ, DATA_WRITE and
ADMIN_READ logging. It also exempts [email protected] from DATA_READ logging,
and [email protected] from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
Next ID: 4
exemptedMembers: A string attribute.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
exemptedMembers = _messages.StringField(2, repeated=True)
service = _messages.StringField(3)
class AuditLogConfig(_messages.Message):
"""Provides the configuration for logging a type of permissions. Example:
{ "audit_log_configs": [ { "log_type": "DATA_READ",
"exempted_members": [ "user:[email protected]" ]
}, { "log_type": "DATA_WRITE", } ] }
This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
[email protected] from DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class Binding(_messages.Message):
"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. NOTE: an
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently. This field is GOOGLE_INTERNAL.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `[email protected]`
or `[email protected]`. * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
[email protected]`. * `group:{emailid}`: An email address
that represents a Google group. For example, `[email protected]`.
* `domain:{domain}`: A Google Apps domain name that represents all the
users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`. Required
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CloudkmsProjectsLocationsGetRequest(_messages.Message):
"""A CloudkmsProjectsLocationsGetRequest object.
Fields:
name: Resource name for the location.
"""
name = _messages.StringField(1, required=True)
class CloudkmsProjectsLocationsKeyRingsCreateRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCreateRequest object.
Fields:
keyRing: A KeyRing resource to be passed as the request body.
keyRingId: Required. It must be unique within a location and match the
regular expression `[a-zA-Z0-9_-]{1,63}`
parent: Required. The resource name of the location associated with the
KeyRings, in the format `projects/*/locations/*`.
"""
keyRing = _messages.MessageField('KeyRing', 1)
keyRingId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysCreateRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysCreateRequest object.
Fields:
cryptoKey: A CryptoKey resource to be passed as the request body.
cryptoKeyId: Required. It must be unique within a KeyRing and match the
regular expression `[a-zA-Z0-9_-]{1,63}`
parent: Required. The name of the KeyRing associated with the CryptoKeys.
"""
cryptoKey = _messages.MessageField('CryptoKey', 1)
cryptoKeyId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateRequest(_messages.Message):
"""A
CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateRequest
object.
Fields:
cryptoKeyVersion: A CryptoKeyVersion resource to be passed as the request
body.
parent: Required. The name of the CryptoKey associated with the
CryptoKeyVersions.
"""
cryptoKeyVersion = _messages.MessageField('CryptoKeyVersion', 1)
parent = _messages.StringField(2, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyRequest(_messages.Message):
"""A
CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyRequest
object.
Fields:
destroyCryptoKeyVersionRequest: A DestroyCryptoKeyVersionRequest resource
to be passed as the request body.
name: The resource name of the CryptoKeyVersion to destroy.
"""
destroyCryptoKeyVersionRequest = _messages.MessageField('DestroyCryptoKeyVersionRequest', 1)
name = _messages.StringField(2, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetRequest
object.
Fields:
name: The name of the CryptoKeyVersion to get.
"""
name = _messages.StringField(1, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListRequest
object.
Fields:
pageSize: Optional limit on the number of CryptoKeyVersions to include in
the response. Further CryptoKeyVersions can subsequently be obtained by
including the ListCryptoKeyVersionsResponse.next_page_token in a
subsequent request. If unspecified, the server will pick an appropriate
default.
pageToken: Optional pagination token, returned earlier via
ListCryptoKeyVersionsResponse.next_page_token.
parent: Required. The resource name of the CryptoKey to list, in the
format `projects/*/locations/*/keyRings/*/cryptoKeys/*`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchRequest(_messages.Message):
"""A
CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchRequest
object.
Fields:
cryptoKeyVersion: A CryptoKeyVersion resource to be passed as the request
body.
name: Output only. The resource name for this CryptoKeyVersion in the
format
`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.
updateMask: Required list of fields to be updated in this request.
"""
cryptoKeyVersion = _messages.MessageField('CryptoKeyVersion', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreRequest(_messages.Message):
"""A
CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreRequest
object.
Fields:
name: The resource name of the CryptoKeyVersion to restore.
restoreCryptoKeyVersionRequest: A RestoreCryptoKeyVersionRequest resource
to be passed as the request body.
"""
name = _messages.StringField(1, required=True)
restoreCryptoKeyVersionRequest = _messages.MessageField('RestoreCryptoKeyVersionRequest', 2)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysDecryptRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysDecryptRequest object.
Fields:
decryptRequest: A DecryptRequest resource to be passed as the request
body.
name: Required. The resource name of the CryptoKey to use for decryption.
The server will choose the appropriate version.
"""
decryptRequest = _messages.MessageField('DecryptRequest', 1)
name = _messages.StringField(2, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysEncryptRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysEncryptRequest object.
Fields:
encryptRequest: A EncryptRequest resource to be passed as the request
body.
name: Required. The resource name of the CryptoKey or CryptoKeyVersion to
use for encryption. If a CryptoKey is specified, the server will use
its primary version.
"""
encryptRequest = _messages.MessageField('EncryptRequest', 1)
name = _messages.StringField(2, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysGetIamPolicyRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysGetRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysGetRequest object.
Fields:
name: The name of the CryptoKey to get.
"""
name = _messages.StringField(1, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysListRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysListRequest object.
Fields:
pageSize: Optional limit on the number of CryptoKeys to include in the
response. Further CryptoKeys can subsequently be obtained by including
the ListCryptoKeysResponse.next_page_token in a subsequent request. If
unspecified, the server will pick an appropriate default.
pageToken: Optional pagination token, returned earlier via
ListCryptoKeysResponse.next_page_token.
parent: Required. The resource name of the KeyRing to list, in the format
`projects/*/locations/*/keyRings/*`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysPatchRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysPatchRequest object.
Fields:
cryptoKey: A CryptoKey resource to be passed as the request body.
name: Output only. The resource name for this CryptoKey in the format
`projects/*/locations/*/keyRings/*/cryptoKeys/*`.
updateMask: Required list of fields to be updated in this request.
"""
cryptoKey = _messages.MessageField('CryptoKey', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysSetIamPolicyRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class CloudkmsProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionRequest
object.
Fields:
name: The resource name of the CryptoKey to update.
updateCryptoKeyPrimaryVersionRequest: A
UpdateCryptoKeyPrimaryVersionRequest resource to be passed as the
request body.
"""
name = _messages.StringField(1, required=True)
updateCryptoKeyPrimaryVersionRequest = _messages.MessageField('UpdateCryptoKeyPrimaryVersionRequest', 2)
class CloudkmsProjectsLocationsKeyRingsGetIamPolicyRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class CloudkmsProjectsLocationsKeyRingsGetRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsGetRequest object.
Fields:
name: The name of the KeyRing to get.
"""
name = _messages.StringField(1, required=True)
class CloudkmsProjectsLocationsKeyRingsListRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsListRequest object.
Fields:
pageSize: Optional limit on the number of KeyRings to include in the
response. Further KeyRings can subsequently be obtained by including
the ListKeyRingsResponse.next_page_token in a subsequent request. If
unspecified, the server will pick an appropriate default.
pageToken: Optional pagination token, returned earlier via
ListKeyRingsResponse.next_page_token.
parent: Required. The resource name of the location associated with the
KeyRings, in the format `projects/*/locations/*`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class CloudkmsProjectsLocationsKeyRingsSetIamPolicyRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class CloudkmsProjectsLocationsKeyRingsTestIamPermissionsRequest(_messages.Message):
"""A CloudkmsProjectsLocationsKeyRingsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class CloudkmsProjectsLocationsListRequest(_messages.Message):
"""A CloudkmsProjectsLocationsListRequest object.
Fields:
filter: The standard list filter.
name: The resource that owns the locations collection, if applicable.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class CryptoKey(_messages.Message):
"""A CryptoKey represents a logical key that can be used for cryptographic
operations. A CryptoKey is made up of one or more versions, which represent
the actual key material used in cryptographic operations.
Enums:
PurposeValueValuesEnum: The immutable purpose of this CryptoKey.
Currently, the only acceptable purpose is ENCRYPT_DECRYPT.
Messages:
LabelsValue: Labels with user defined metadata.
Fields:
createTime: Output only. The time at which this CryptoKey was created.
labels: Labels with user defined metadata.
name: Output only. The resource name for this CryptoKey in the format
`projects/*/locations/*/keyRings/*/cryptoKeys/*`.
nextRotationTime: At next_rotation_time, the Key Management Service will
automatically: 1. Create a new version of this CryptoKey. 2. Mark the
new version as primary. Key rotations performed manually via
CreateCryptoKeyVersion and UpdateCryptoKeyPrimaryVersion do not affect
next_rotation_time.
primary: Output only. A copy of the "primary" CryptoKeyVersion that will
be used by Encrypt when this CryptoKey is given in EncryptRequest.name.
The CryptoKey's primary version can be updated via
UpdateCryptoKeyPrimaryVersion.
purpose: The immutable purpose of this CryptoKey. Currently, the only
acceptable purpose is ENCRYPT_DECRYPT.
rotationPeriod: next_rotation_time will be advanced by this period when
the service automatically rotates a key. Must be at least one day. If
rotation_period is set, next_rotation_time must also be set.
"""
class PurposeValueValuesEnum(_messages.Enum):
"""The immutable purpose of this CryptoKey. Currently, the only acceptable
purpose is ENCRYPT_DECRYPT.
Values:
CRYPTO_KEY_PURPOSE_UNSPECIFIED: Not specified.
ENCRYPT_DECRYPT: CryptoKeys with this purpose may be used with Encrypt
and Decrypt.
"""
CRYPTO_KEY_PURPOSE_UNSPECIFIED = 0
ENCRYPT_DECRYPT = 1
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""Labels with user defined metadata.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
createTime = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
name = _messages.StringField(3)
nextRotationTime = _messages.StringField(4)
primary = _messages.MessageField('CryptoKeyVersion', 5)
purpose = _messages.EnumField('PurposeValueValuesEnum', 6)
rotationPeriod = _messages.StringField(7)
class CryptoKeyVersion(_messages.Message):
"""A CryptoKeyVersion represents an individual cryptographic key, and the
associated key material. It can be used for cryptographic operations either
directly, or via its parent CryptoKey, in which case the server will choose
the appropriate version for the operation. For security reasons, the raw
cryptographic key material represented by a CryptoKeyVersion can never be
viewed or exported. It can only be used to encrypt or decrypt data when an
authorized user or application invokes Cloud KMS.
Enums:
StateValueValuesEnum: The current state of the CryptoKeyVersion.
Fields:
createTime: Output only. The time at which this CryptoKeyVersion was
created.
destroyEventTime: Output only. The time this CryptoKeyVersion's key
material was destroyed. Only present if state is DESTROYED.
destroyTime: Output only. The time this CryptoKeyVersion's key material is
scheduled for destruction. Only present if state is DESTROY_SCHEDULED.
name: Output only. The resource name for this CryptoKeyVersion in the
format
`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.
state: The current state of the CryptoKeyVersion.
"""
class StateValueValuesEnum(_messages.Enum):
"""The current state of the CryptoKeyVersion.
Values:
CRYPTO_KEY_VERSION_STATE_UNSPECIFIED: Not specified.
ENABLED: This version may be used in Encrypt and Decrypt requests.
DISABLED: This version may not be used, but the key material is still
available, and the version can be placed back into the ENABLED state.
DESTROYED: This version is destroyed, and the key material is no longer
stored. A version may not leave this state once entered.
DESTROY_SCHEDULED: This version is scheduled for destruction, and will
be destroyed soon. Call RestoreCryptoKeyVersion to put it back into
the DISABLED state.
"""
CRYPTO_KEY_VERSION_STATE_UNSPECIFIED = 0
ENABLED = 1
DISABLED = 2
DESTROYED = 3
DESTROY_SCHEDULED = 4
createTime = _messages.StringField(1)
destroyEventTime = _messages.StringField(2)
destroyTime = _messages.StringField(3)
name = _messages.StringField(4)
state = _messages.EnumField('StateValueValuesEnum', 5)
class DecryptRequest(_messages.Message):
"""Request message for KeyManagementService.Decrypt.
Fields:
additionalAuthenticatedData: Optional data that must match the data
originally supplied in EncryptRequest.additional_authenticated_data.
ciphertext: Required. The encrypted data originally returned in
EncryptResponse.ciphertext.
"""
additionalAuthenticatedData = _messages.BytesField(1)
ciphertext = _messages.BytesField(2)
class DecryptResponse(_messages.Message):
"""Response message for KeyManagementService.Decrypt.
Fields:
plaintext: The decrypted data originally supplied in
EncryptRequest.plaintext.
"""
plaintext = _messages.BytesField(1)
class DestroyCryptoKeyVersionRequest(_messages.Message):
"""Request message for KeyManagementService.DestroyCryptoKeyVersion."""
class EncryptRequest(_messages.Message):
"""Request message for KeyManagementService.Encrypt.
Fields:
additionalAuthenticatedData: Optional data that, if specified, must also
be provided during decryption through
DecryptRequest.additional_authenticated_data. Must be no larger than
64KiB.
plaintext: Required. The data to encrypt. Must be no larger than 64KiB.
"""
additionalAuthenticatedData = _messages.BytesField(1)
plaintext = _messages.BytesField(2)
class EncryptResponse(_messages.Message):
"""Response message for KeyManagementService.Encrypt.
Fields:
ciphertext: The encrypted data.
name: The resource name of the CryptoKeyVersion used in encryption.
"""
ciphertext = _messages.BytesField(1)
name = _messages.StringField(2)
class Expr(_messages.Message):
"""Represents an expression text. Example: title: "User account
presence" description: "Determines whether the request has a user
account" expression: "size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax. The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class KeyRing(_messages.Message):
"""A KeyRing is a toplevel logical grouping of CryptoKeys.
Fields:
createTime: Output only. The time at which this KeyRing was created.
name: Output only. The resource name for the KeyRing in the format
`projects/*/locations/*/keyRings/*`.
"""
createTime = _messages.StringField(1)
name = _messages.StringField(2)
class ListCryptoKeyVersionsResponse(_messages.Message):
"""Response message for KeyManagementService.ListCryptoKeyVersions.
Fields:
cryptoKeyVersions: The list of CryptoKeyVersions.
nextPageToken: A token to retrieve next page of results. Pass this value
in ListCryptoKeyVersionsRequest.page_token to retrieve the next page of
results.
totalSize: The total number of CryptoKeyVersions that matched the query.
"""
cryptoKeyVersions = _messages.MessageField('CryptoKeyVersion', 1, repeated=True)
nextPageToken = _messages.StringField(2)
totalSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class ListCryptoKeysResponse(_messages.Message):
"""Response message for KeyManagementService.ListCryptoKeys.
Fields:
cryptoKeys: The list of CryptoKeys.
nextPageToken: A token to retrieve next page of results. Pass this value
in ListCryptoKeysRequest.page_token to retrieve the next page of
results.
totalSize: The total number of CryptoKeys that matched the query.
"""
cryptoKeys = _messages.MessageField('CryptoKey', 1, repeated=True)
nextPageToken = _messages.StringField(2)
totalSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class ListKeyRingsResponse(_messages.Message):
"""Response message for KeyManagementService.ListKeyRings.
Fields:
keyRings: The list of KeyRings.
nextPageToken: A token to retrieve next page of results. Pass this value
in ListKeyRingsRequest.page_token to retrieve the next page of results.
totalSize: The total number of KeyRings that matched the query.
"""
keyRings = _messages.MessageField('KeyRing', 1, repeated=True)
nextPageToken = _messages.StringField(2)
totalSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class ListLocationsResponse(_messages.Message):
"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Location(_messages.Message):
"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
labels = _messages.MessageField('LabelsValue', 1)
locationId = _messages.StringField(2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
class Policy(_messages.Message):
"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `Binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:[email protected]", "group:[email protected]",
"domain:google.com", "serviceAccount:my-other-
[email protected]", ] }, {
"role": "roles/viewer", "members": ["user:[email protected]"]
} ] } For a description of IAM and its features, see the [IAM
developer's guide](https://cloud.google.com/iam).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. `bindings` with no
members will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten blindly.
iamOwned: A boolean attribute.
version: Version of the `Policy`. The default version is 0.
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
iamOwned = _messages.BooleanField(4)
version = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class RestoreCryptoKeyVersionRequest(_messages.Message):
"""Request message for KeyManagementService.RestoreCryptoKeyVersion."""
class SetIamPolicyRequest(_messages.Message):
"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
updateMask: OPTIONAL: A FieldMask specifying which fields of the policy to
modify. Only the fields in the mask will be modified. If no mask is
provided, the following default mask is used: paths: "bindings, etag"
This field is only used by Cloud IAM.
"""
policy = _messages.MessageField('Policy', 1)
updateMask = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class TestIamPermissionsRequest(_messages.Message):
"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class UpdateCryptoKeyPrimaryVersionRequest(_messages.Message):
"""Request message for KeyManagementService.UpdateCryptoKeyPrimaryVersion.
Fields:
cryptoKeyVersionId: The id of the child CryptoKeyVersion to use as
primary.
"""
cryptoKeyVersionId = _messages.StringField(1)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/third_party/kms_apitools/cloudkms_v1_messages.py
| 0.754373 | 0.309741 |
cloudkms_v1_messages.py
|
pypi
|
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'iamcredentials'
class GenerateAccessTokenRequest(_messages.Message):
r"""A GenerateAccessTokenRequest object.
Fields:
delegates: The sequence of service accounts in a delegation chain. Each
service account must be granted the
`roles/iam.serviceAccountTokenCreator` role on its next service account
in the chain. The last service account in the chain must be granted the
`roles/iam.serviceAccountTokenCreator` role on the service account that
is specified in the `name` field of the request. The delegates must
have the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
lifetime: The desired lifetime duration of the access token in seconds.
Must be set to a value less than or equal to 3600 (1 hour). If a value
is not specified, the token's lifetime will be set to a default value of
one hour.
scope: Code to identify the scopes to be included in the OAuth 2.0 access
token. See https://developers.google.com/identity/protocols/googlescopes
for more information. At least one value required.
"""
delegates = _messages.StringField(1, repeated=True)
lifetime = _messages.StringField(2)
scope = _messages.StringField(3, repeated=True)
class GenerateAccessTokenResponse(_messages.Message):
r"""A GenerateAccessTokenResponse object.
Fields:
accessToken: The OAuth 2.0 access token.
expireTime: Token expiration time. The expiration time is always set.
"""
accessToken = _messages.StringField(1)
expireTime = _messages.StringField(2)
class GenerateIdTokenRequest(_messages.Message):
r"""A GenerateIdTokenRequest object.
Fields:
audience: The audience for the token, such as the API or account that this
token grants access to.
delegates: The sequence of service accounts in a delegation chain. Each
service account must be granted the
`roles/iam.serviceAccountTokenCreator` role on its next service account
in the chain. The last service account in the chain must be granted the
`roles/iam.serviceAccountTokenCreator` role on the service account that
is specified in the `name` field of the request. The delegates must
have the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
includeEmail: Include the service account email in the token. If set to
`true`, the token will contain `email` and `email_verified` claims.
"""
audience = _messages.StringField(1)
delegates = _messages.StringField(2, repeated=True)
includeEmail = _messages.BooleanField(3)
class GenerateIdTokenResponse(_messages.Message):
r"""A GenerateIdTokenResponse object.
Fields:
token: The OpenId Connect ID token.
"""
token = _messages.StringField(1)
class IamcredentialsProjectsServiceAccountsGenerateAccessTokenRequest(_messages.Message):
r"""A IamcredentialsProjectsServiceAccountsGenerateAccessTokenRequest
object.
Fields:
generateAccessTokenRequest: A GenerateAccessTokenRequest resource to be
passed as the request body.
name: The resource name of the service account for which the credentials
are requested, in the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
"""
generateAccessTokenRequest = _messages.MessageField('GenerateAccessTokenRequest', 1)
name = _messages.StringField(2, required=True)
class IamcredentialsProjectsServiceAccountsGenerateIdTokenRequest(_messages.Message):
r"""A IamcredentialsProjectsServiceAccountsGenerateIdTokenRequest object.
Fields:
generateIdTokenRequest: A GenerateIdTokenRequest resource to be passed as
the request body.
name: The resource name of the service account for which the credentials
are requested, in the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
"""
generateIdTokenRequest = _messages.MessageField('GenerateIdTokenRequest', 1)
name = _messages.StringField(2, required=True)
class IamcredentialsProjectsServiceAccountsSignBlobRequest(_messages.Message):
r"""A IamcredentialsProjectsServiceAccountsSignBlobRequest object.
Fields:
name: The resource name of the service account for which the credentials
are requested, in the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
signBlobRequest: A SignBlobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signBlobRequest = _messages.MessageField('SignBlobRequest', 2)
class IamcredentialsProjectsServiceAccountsSignJwtRequest(_messages.Message):
r"""A IamcredentialsProjectsServiceAccountsSignJwtRequest object.
Fields:
name: The resource name of the service account for which the credentials
are requested, in the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
signJwtRequest: A SignJwtRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signJwtRequest = _messages.MessageField('SignJwtRequest', 2)
class SignBlobRequest(_messages.Message):
r"""A SignBlobRequest object.
Fields:
delegates: The sequence of service accounts in a delegation chain. Each
service account must be granted the
`roles/iam.serviceAccountTokenCreator` role on its next service account
in the chain. The last service account in the chain must be granted the
`roles/iam.serviceAccountTokenCreator` role on the service account that
is specified in the `name` field of the request. The delegates must
have the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
payload: The bytes to sign.
"""
delegates = _messages.StringField(1, repeated=True)
payload = _messages.BytesField(2)
class SignBlobResponse(_messages.Message):
r"""A SignBlobResponse object.
Fields:
keyId: The ID of the key used to sign the blob.
signedBlob: The signed blob.
"""
keyId = _messages.StringField(1)
signedBlob = _messages.BytesField(2)
class SignJwtRequest(_messages.Message):
r"""A SignJwtRequest object.
Fields:
delegates: The sequence of service accounts in a delegation chain. Each
service account must be granted the
`roles/iam.serviceAccountTokenCreator` role on its next service account
in the chain. The last service account in the chain must be granted the
`roles/iam.serviceAccountTokenCreator` role on the service account that
is specified in the `name` field of the request. The delegates must
have the following format:
`projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
wildcard character is required; replacing it with a project ID is
invalid.
payload: The JWT payload to sign: a JSON object that contains a JWT Claims
Set.
"""
delegates = _messages.StringField(1, repeated=True)
payload = _messages.StringField(2)
class SignJwtResponse(_messages.Message):
r"""A SignJwtResponse object.
Fields:
keyId: The ID of the key used to sign the JWT.
signedJwt: The signed JWT.
"""
keyId = _messages.StringField(1)
signedJwt = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/third_party/iamcredentials_apitools/iamcredentials_v1_messages.py
| 0.812979 | 0.170819 |
iamcredentials_v1_messages.py
|
pypi
|
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'pubsub'
class AcknowledgeRequest(_messages.Message):
"""Request for the Acknowledge method.
Fields:
ackIds: The acknowledgment ID for the messages being acknowledged that was
returned by the Pub/Sub system in the `Pull` response. Must not be
empty.
"""
ackIds = _messages.StringField(1, repeated=True)
class Binding(_messages.Message):
"""Associates `members` with a `role`.
Fields:
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `[email protected]`
or `[email protected]`. * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
[email protected]`. * `group:{emailid}`: An email address
that represents a Google group. For example, `[email protected]`.
* `domain:{domain}`: A Google Apps domain name that represents all the
users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`. Required
"""
members = _messages.StringField(1, repeated=True)
role = _messages.StringField(2)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class ListSubscriptionsResponse(_messages.Message):
"""Response for the `ListSubscriptions` method.
Fields:
nextPageToken: If not empty, indicates that there may be more
subscriptions that match the request; this value should be passed in a
new `ListSubscriptionsRequest` to get more subscriptions.
subscriptions: The subscriptions that match the request.
"""
nextPageToken = _messages.StringField(1)
subscriptions = _messages.MessageField('Subscription', 2, repeated=True)
class ListTopicSubscriptionsResponse(_messages.Message):
"""Response for the `ListTopicSubscriptions` method.
Fields:
nextPageToken: If not empty, indicates that there may be more
subscriptions that match the request; this value should be passed in a
new `ListTopicSubscriptionsRequest` to get more subscriptions.
subscriptions: The names of the subscriptions that match the request.
"""
nextPageToken = _messages.StringField(1)
subscriptions = _messages.StringField(2, repeated=True)
class ListTopicsResponse(_messages.Message):
"""Response for the `ListTopics` method.
Fields:
nextPageToken: If not empty, indicates that there may be more topics that
match the request; this value should be passed in a new
`ListTopicsRequest`.
topics: The resulting topics.
"""
nextPageToken = _messages.StringField(1)
topics = _messages.MessageField('Topic', 2, repeated=True)
class ModifyAckDeadlineRequest(_messages.Message):
"""Request for the ModifyAckDeadline method.
Fields:
ackDeadlineSeconds: The new ack deadline with respect to the time this
request was sent to the Pub/Sub system. For example, if the value is 10,
the new ack deadline will expire 10 seconds after the
`ModifyAckDeadline` call was made. Specifying zero may immediately make
the message available for another pull request. The minimum deadline you
can specify is 0 seconds. The maximum deadline you can specify is 600
seconds (10 minutes).
ackIds: List of acknowledgment IDs.
"""
ackDeadlineSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32)
ackIds = _messages.StringField(2, repeated=True)
class ModifyPushConfigRequest(_messages.Message):
"""Request for the ModifyPushConfig method.
Fields:
pushConfig: The push configuration for future deliveries. An empty
`pushConfig` indicates that the Pub/Sub system should stop pushing
messages from the given subscription and allow messages to be pulled and
acknowledged - effectively pausing the subscription if `Pull` or
`StreamingPull` is not called.
"""
pushConfig = _messages.MessageField('PushConfig', 1)
class Policy(_messages.Message):
"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `Binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:[email protected]", "group:[email protected]",
"domain:google.com", "serviceAccount:my-other-
[email protected]", ] }, {
"role": "roles/viewer", "members": ["user:[email protected]"]
} ] } For a description of IAM and its features, see the [IAM
developer's guide](https://cloud.google.com/iam).
Fields:
bindings: Associates a list of `members` to a `role`. `bindings` with no
members will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten blindly.
version: Version of the `Policy`. The default version is 0.
"""
bindings = _messages.MessageField('Binding', 1, repeated=True)
etag = _messages.BytesField(2)
version = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class PublishRequest(_messages.Message):
"""Request for the Publish method.
Fields:
messages: The messages to publish.
"""
messages = _messages.MessageField('PubsubMessage', 1, repeated=True)
class PublishResponse(_messages.Message):
"""Response for the `Publish` method.
Fields:
messageIds: The server-assigned ID of each published message, in the same
order as the messages in the request. IDs are guaranteed to be unique
within the topic.
"""
messageIds = _messages.StringField(1, repeated=True)
class PubsubMessage(_messages.Message):
"""A message data and its attributes. The message payload must not be empty;
it must contain either a non-empty data field, or at least one attribute.
Messages:
AttributesValue: Optional attributes for this message.
Fields:
attributes: Optional attributes for this message.
data: The message payload.
messageId: ID of this message, assigned by the server when the message is
published. Guaranteed to be unique within the topic. This value may be
read by a subscriber that receives a `PubsubMessage` via a `Pull` call
or a push delivery. It must not be populated by the publisher in a
`Publish` call.
publishTime: The time at which the message was published, populated by the
server when it receives the `Publish` call. It must not be populated by
the publisher in a `Publish` call.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AttributesValue(_messages.Message):
"""Optional attributes for this message.
Messages:
AdditionalProperty: An additional property for a AttributesValue object.
Fields:
additionalProperties: Additional properties of type AttributesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a AttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
attributes = _messages.MessageField('AttributesValue', 1)
data = _messages.BytesField(2)
messageId = _messages.StringField(3)
publishTime = _messages.StringField(4)
class PubsubProjectsSnapshotsGetIamPolicyRequest(_messages.Message):
"""A PubsubProjectsSnapshotsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class PubsubProjectsSnapshotsSetIamPolicyRequest(_messages.Message):
"""A PubsubProjectsSnapshotsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class PubsubProjectsSnapshotsTestIamPermissionsRequest(_messages.Message):
"""A PubsubProjectsSnapshotsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class PubsubProjectsSubscriptionsAcknowledgeRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsAcknowledgeRequest object.
Fields:
acknowledgeRequest: A AcknowledgeRequest resource to be passed as the
request body.
subscription: The subscription whose message is being acknowledged. Format
is `projects/{project}/subscriptions/{sub}`.
"""
acknowledgeRequest = _messages.MessageField('AcknowledgeRequest', 1)
subscription = _messages.StringField(2, required=True)
class PubsubProjectsSubscriptionsDeleteRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsDeleteRequest object.
Fields:
subscription: The subscription to delete. Format is
`projects/{project}/subscriptions/{sub}`.
"""
subscription = _messages.StringField(1, required=True)
class PubsubProjectsSubscriptionsGetIamPolicyRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class PubsubProjectsSubscriptionsGetRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsGetRequest object.
Fields:
subscription: The name of the subscription to get. Format is
`projects/{project}/subscriptions/{sub}`.
"""
subscription = _messages.StringField(1, required=True)
class PubsubProjectsSubscriptionsListRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsListRequest object.
Fields:
pageSize: Maximum number of subscriptions to return.
pageToken: The value returned by the last `ListSubscriptionsResponse`;
indicates that this is a continuation of a prior `ListSubscriptions`
call, and that the system should return the next page of data.
project: The name of the cloud project that subscriptions belong to.
Format is `projects/{project}`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
project = _messages.StringField(3, required=True)
class PubsubProjectsSubscriptionsModifyAckDeadlineRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsModifyAckDeadlineRequest object.
Fields:
modifyAckDeadlineRequest: A ModifyAckDeadlineRequest resource to be passed
as the request body.
subscription: The name of the subscription. Format is
`projects/{project}/subscriptions/{sub}`.
"""
modifyAckDeadlineRequest = _messages.MessageField('ModifyAckDeadlineRequest', 1)
subscription = _messages.StringField(2, required=True)
class PubsubProjectsSubscriptionsModifyPushConfigRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsModifyPushConfigRequest object.
Fields:
modifyPushConfigRequest: A ModifyPushConfigRequest resource to be passed
as the request body.
subscription: The name of the subscription. Format is
`projects/{project}/subscriptions/{sub}`.
"""
modifyPushConfigRequest = _messages.MessageField('ModifyPushConfigRequest', 1)
subscription = _messages.StringField(2, required=True)
class PubsubProjectsSubscriptionsPullRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsPullRequest object.
Fields:
pullRequest: A PullRequest resource to be passed as the request body.
subscription: The subscription from which messages should be pulled.
Format is `projects/{project}/subscriptions/{sub}`.
"""
pullRequest = _messages.MessageField('PullRequest', 1)
subscription = _messages.StringField(2, required=True)
class PubsubProjectsSubscriptionsSetIamPolicyRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class PubsubProjectsSubscriptionsTestIamPermissionsRequest(_messages.Message):
"""A PubsubProjectsSubscriptionsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class PubsubProjectsTopicsDeleteRequest(_messages.Message):
"""A PubsubProjectsTopicsDeleteRequest object.
Fields:
topic: Name of the topic to delete. Format is
`projects/{project}/topics/{topic}`.
"""
topic = _messages.StringField(1, required=True)
class PubsubProjectsTopicsGetIamPolicyRequest(_messages.Message):
"""A PubsubProjectsTopicsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class PubsubProjectsTopicsGetRequest(_messages.Message):
"""A PubsubProjectsTopicsGetRequest object.
Fields:
topic: The name of the topic to get. Format is
`projects/{project}/topics/{topic}`.
"""
topic = _messages.StringField(1, required=True)
class PubsubProjectsTopicsListRequest(_messages.Message):
"""A PubsubProjectsTopicsListRequest object.
Fields:
pageSize: Maximum number of topics to return.
pageToken: The value returned by the last `ListTopicsResponse`; indicates
that this is a continuation of a prior `ListTopics` call, and that the
system should return the next page of data.
project: The name of the cloud project that topics belong to. Format is
`projects/{project}`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
project = _messages.StringField(3, required=True)
class PubsubProjectsTopicsPublishRequest(_messages.Message):
"""A PubsubProjectsTopicsPublishRequest object.
Fields:
publishRequest: A PublishRequest resource to be passed as the request
body.
topic: The messages in the request will be published on this topic. Format
is `projects/{project}/topics/{topic}`.
"""
publishRequest = _messages.MessageField('PublishRequest', 1)
topic = _messages.StringField(2, required=True)
class PubsubProjectsTopicsSetIamPolicyRequest(_messages.Message):
"""A PubsubProjectsTopicsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class PubsubProjectsTopicsSubscriptionsListRequest(_messages.Message):
"""A PubsubProjectsTopicsSubscriptionsListRequest object.
Fields:
pageSize: Maximum number of subscription names to return.
pageToken: The value returned by the last
`ListTopicSubscriptionsResponse`; indicates that this is a continuation
of a prior `ListTopicSubscriptions` call, and that the system should
return the next page of data.
topic: The name of the topic that subscriptions are attached to. Format is
`projects/{project}/topics/{topic}`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
topic = _messages.StringField(3, required=True)
class PubsubProjectsTopicsTestIamPermissionsRequest(_messages.Message):
"""A PubsubProjectsTopicsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class PullRequest(_messages.Message):
"""Request for the `Pull` method.
Fields:
maxMessages: The maximum number of messages returned for this request. The
Pub/Sub system may return fewer than the number specified.
returnImmediately: If this field set to true, the system will respond
immediately even if it there are no messages available to return in the
`Pull` response. Otherwise, the system may wait (for a bounded amount of
time) until at least one message is available, rather than returning no
messages. The client may cancel the request if it does not wish to wait
any longer for the response.
"""
maxMessages = _messages.IntegerField(1, variant=_messages.Variant.INT32)
returnImmediately = _messages.BooleanField(2)
class PullResponse(_messages.Message):
"""Response for the `Pull` method.
Fields:
receivedMessages: Received Pub/Sub messages. The Pub/Sub system will
return zero messages if there are no more available in the backlog. The
Pub/Sub system may return fewer than the `maxMessages` requested even if
there are more messages available in the backlog.
"""
receivedMessages = _messages.MessageField('ReceivedMessage', 1, repeated=True)
class PushConfig(_messages.Message):
"""Configuration for a push delivery endpoint.
Messages:
AttributesValue: Endpoint configuration attributes. Every endpoint has a
set of API supported attributes that can be used to control different
aspects of the message delivery. The currently supported attribute is
`x-goog-version`, which you can use to change the format of the pushed
message. This attribute indicates the version of the data expected by
the endpoint. This controls the shape of the pushed message (i.e., its
fields and metadata). The endpoint version is based on the version of
the Pub/Sub API. If not present during the `CreateSubscription` call,
it will default to the version of the API used to make such call. If not
present during a `ModifyPushConfig` call, its value will not be changed.
`GetSubscription` calls will always return a valid version, even if the
subscription was created without this attribute. The possible values
for this attribute are: * `v1beta1`: uses the push format defined in
the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses the push format
defined in the v1 Pub/Sub API.
Fields:
attributes: Endpoint configuration attributes. Every endpoint has a set
of API supported attributes that can be used to control different
aspects of the message delivery. The currently supported attribute is
`x-goog-version`, which you can use to change the format of the pushed
message. This attribute indicates the version of the data expected by
the endpoint. This controls the shape of the pushed message (i.e., its
fields and metadata). The endpoint version is based on the version of
the Pub/Sub API. If not present during the `CreateSubscription` call,
it will default to the version of the API used to make such call. If not
present during a `ModifyPushConfig` call, its value will not be changed.
`GetSubscription` calls will always return a valid version, even if the
subscription was created without this attribute. The possible values
for this attribute are: * `v1beta1`: uses the push format defined in
the v1beta1 Pub/Sub API. * `v1` or `v1beta2`: uses the push format
defined in the v1 Pub/Sub API.
pushEndpoint: A URL locating the endpoint to which messages should be
pushed. For example, a Webhook endpoint might use
"https://example.com/push".
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AttributesValue(_messages.Message):
"""Endpoint configuration attributes. Every endpoint has a set of API
supported attributes that can be used to control different aspects of the
message delivery. The currently supported attribute is `x-goog-version`,
which you can use to change the format of the pushed message. This
attribute indicates the version of the data expected by the endpoint. This
controls the shape of the pushed message (i.e., its fields and metadata).
The endpoint version is based on the version of the Pub/Sub API. If not
present during the `CreateSubscription` call, it will default to the
version of the API used to make such call. If not present during a
`ModifyPushConfig` call, its value will not be changed. `GetSubscription`
calls will always return a valid version, even if the subscription was
created without this attribute. The possible values for this attribute
are: * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub
API. * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub
API.
Messages:
AdditionalProperty: An additional property for a AttributesValue object.
Fields:
additionalProperties: Additional properties of type AttributesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a AttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
attributes = _messages.MessageField('AttributesValue', 1)
pushEndpoint = _messages.StringField(2)
class ReceivedMessage(_messages.Message):
"""A message and its corresponding acknowledgment ID.
Fields:
ackId: This ID can be used to acknowledge the received message.
message: The message.
"""
ackId = _messages.StringField(1)
message = _messages.MessageField('PubsubMessage', 2)
class SetIamPolicyRequest(_messages.Message):
"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
"""
policy = _messages.MessageField('Policy', 1)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class Subscription(_messages.Message):
"""A subscription resource.
Fields:
ackDeadlineSeconds: This value is the maximum time after a subscriber
receives a message before the subscriber should acknowledge the message.
After message delivery but before the ack deadline expires and before
the message is acknowledged, it is an outstanding message and will not
be delivered again during that time (on a best-effort basis). For pull
subscriptions, this value is used as the initial value for the ack
deadline. To override this value for a given message, call
`ModifyAckDeadline` with the corresponding `ack_id` if using non-
streaming pull or send the `ack_id` in a
`StreamingModifyAckDeadlineRequest` if using streaming pull. The minimum
custom deadline you can specify is 10 seconds. The maximum custom
deadline you can specify is 600 seconds (10 minutes). If this parameter
is 0, a default value of 10 seconds is used. For push delivery, this
value is also used to set the request timeout for the call to the push
endpoint. If the subscriber never acknowledges the message, the Pub/Sub
system will eventually redeliver the message.
name: The name of the subscription. It must have the format
`"projects/{project}/subscriptions/{subscription}"`. `{subscription}`
must start with a letter, and contain only letters (`[A-Za-z]`), numbers
(`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`),
plus (`+`) or percent signs (`%`). It must be between 3 and 255
characters in length, and it must not start with `"goog"`.
pushConfig: If push delivery is used with this subscription, this field is
used to configure it. An empty `pushConfig` signifies that the
subscriber will pull and ack messages using API methods.
topic: The name of the topic from which this subscription is receiving
messages. Format is `projects/{project}/topics/{topic}`. The value of
this field will be `_deleted-topic_` if the topic has been deleted.
"""
ackDeadlineSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32)
name = _messages.StringField(2)
pushConfig = _messages.MessageField('PushConfig', 3)
topic = _messages.StringField(4)
class TestIamPermissionsRequest(_messages.Message):
"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class Topic(_messages.Message):
"""A topic resource.
Fields:
name: The name of the topic. It must have the format
`"projects/{project}/topics/{topic}"`. `{topic}` must start with a
letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes
(`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or
percent signs (`%`). It must be between 3 and 255 characters in length,
and it must not start with `"goog"`.
"""
name = _messages.StringField(1)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/third_party/pubsub_apitools/pubsub_v1_messages.py
| 0.811825 | 0.327144 |
pubsub_v1_messages.py
|
pypi
|
"""Helper functions for Split Trust Encryption Tool integration."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
from gslib import storage_url
from gslib.utils import execution_util
from gslib.utils import temporary_file_util
from boto import config
class StetSubcommandName(object):
"""Enum class for available STET subcommands."""
ENCRYPT = 'encrypt'
DECRYPT = 'decrypt'
def _get_stet_binary_from_path():
"""Retrieves STET binary from path if available. Python 2 compatible."""
for path_directory in os.getenv('PATH').split(os.path.pathsep):
binary_path = os.path.join(path_directory, 'stet')
if os.path.exists(binary_path):
return binary_path
def _stet_transform(subcommand, blob_id, in_file_path, out_file_path, logger):
"""Runs a STET transform on a file.
Encrypts for uploads. Decrypts for downloads. Automatically populates
flags for the STET binary.
Args:
subcommand (StetSubcommandName): Subcommand to call on STET binary.
blob_id (str): Cloud URL that binary uses for validation.
in_file_path (str): File to be transformed source.
out_file_path (str): Where to write result of transform.
logger (logging.Logger): For logging STET binary output.
Raises:
KeyError: STET binary or config could not be found.
"""
binary_path = config.get('GSUtil', 'stet_binary_path',
_get_stet_binary_from_path())
if not binary_path:
raise KeyError('Could not find STET binary in boto config or PATH.')
command_args = [os.path.expanduser(binary_path), subcommand]
config_path = config.get('GSUtil', 'stet_config_path', None)
if config_path:
command_args.append('--config-file=' + os.path.expanduser(config_path))
command_args.extend(['--blob-id=' + blob_id, in_file_path, out_file_path])
_, stderr = execution_util.ExecuteExternalCommand(command_args)
logger.debug(stderr)
def encrypt_upload(source_url, destination_url, logger):
"""Encrypts a file with STET binary before upload.
Args:
source_url (StorageUrl): Copy source.
destination_url (StorageUrl): Copy destination.
logger (logging.Logger): For logging STET binary output.
Returns:
stet_temporary_file_url (StorageUrl): Path to STET-encrypted file.
"""
in_file = source_url.object_name
out_file = temporary_file_util.GetStetTempFileName(source_url)
blob_id = destination_url.url_string
_stet_transform(StetSubcommandName.ENCRYPT, blob_id, in_file, out_file,
logger)
return storage_url.StorageUrlFromString(out_file)
def decrypt_download(source_url, destination_url, temporary_file_name, logger):
"""STET-decrypts downloaded file.
Args:
source_url (StorageUrl): Copy source.
destination_url (StorageUrl): Copy destination.
temporary_file_name (str): Path to temporary file used for download.
logger (logging.Logger): For logging STET binary output.
"""
in_file = temporary_file_name
out_file = temporary_file_util.GetStetTempFileName(destination_url)
blob_id = source_url.url_string
_stet_transform(StetSubcommandName.DECRYPT, blob_id, in_file, out_file,
logger)
shutil.move(out_file, in_file)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/stet_util.py
| 0.888831 | 0.184565 |
stet_util.py
|
pypi
|
"""Shared utility methods that calculate, convert, and simplify units."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import math
import re
import six
if six.PY3:
long = int
# Binary exponentiation strings.
_EXP_STRINGS = [
(0, 'B', 'bit'),
(10, 'KiB', 'Kibit', 'K'),
(20, 'MiB', 'Mibit', 'M'),
(30, 'GiB', 'Gibit', 'G'),
(40, 'TiB', 'Tibit', 'T'),
(50, 'PiB', 'Pibit', 'P'),
(60, 'EiB', 'Eibit', 'E'),
]
_EXP_TEN_STRING = [
(3, 'k'),
(6, 'm'),
(9, 'b'),
(12, 't'),
(15, 'q'),
]
# Define this method before constants below, as some call it to init themselves.
def _GenerateSuffixRegex():
"""Creates a suffix regex for human-readable byte counts."""
human_bytes_re = r'(?P<num>\d*\.\d+|\d+)\s*(?P<suffix>%s)?'
suffixes = []
suffix_to_si = {}
for i, si in enumerate(_EXP_STRINGS):
si_suffixes = [s.lower() for s in list(si)[1:]]
for suffix in si_suffixes:
suffix_to_si[suffix] = i
suffixes.extend(si_suffixes)
human_bytes_re %= '|'.join(suffixes)
matcher = re.compile(human_bytes_re)
return suffix_to_si, matcher
# TODO: These should include the unit in the name, e.g. BYTES_PER_KIB, or
# BYTES_IN_ONE_KIB.
ONE_KIB = 1024
ONE_MIB = 1024 * ONE_KIB
ONE_GIB = 1024 * ONE_MIB
# TODO: Remove 2, 8, 10 MIB vars.
TWO_MIB = 2 * ONE_MIB
EIGHT_MIB = 8 * ONE_MIB
TEN_MIB = 10 * ONE_MIB
SECONDS_PER_DAY = long(60 * 60 * 24)
SUFFIX_TO_SI, MATCH_HUMAN_BYTES = _GenerateSuffixRegex()
def _RoundToNearestExponent(num):
i = 0
while i + 1 < len(_EXP_STRINGS) and num >= (2**_EXP_STRINGS[i + 1][0]):
i += 1
return i, round(float(num) / 2.0**_EXP_STRINGS[i][0], 2)
def CalculateThroughput(total_bytes_transferred, total_elapsed_time):
"""Calculates throughput and checks for a small total_elapsed_time.
Args:
total_bytes_transferred: Total bytes transferred in a period of time.
total_elapsed_time: The amount of time elapsed in seconds.
Returns:
The throughput as a float.
"""
if total_elapsed_time < 0.01:
total_elapsed_time = 0.01
return float(total_bytes_transferred) / float(total_elapsed_time)
def DecimalShort(num):
"""Creates a shorter string version for a given number of objects.
Args:
num: The number of objects to be shortened.
Returns:
shortened string version for this number. It takes the largest
scale (thousand, million or billion) smaller than the number and divides it
by that scale, indicated by a suffix with one decimal place. This will thus
create a string of at most 6 characters, assuming num < 10^18.
Example: 123456789 => 123.4m
"""
for divisor_exp, suffix in reversed(_EXP_TEN_STRING):
if num >= 10**divisor_exp:
quotient = '%.1lf' % (float(num) / 10**divisor_exp)
return quotient + suffix
return str(num)
def DivideAndCeil(dividend, divisor):
"""Returns ceil(dividend / divisor).
Takes care to avoid the pitfalls of floating point arithmetic that could
otherwise yield the wrong result for large numbers.
Args:
dividend: Dividend for the operation.
divisor: Divisor for the operation.
Returns:
Quotient.
"""
quotient = dividend // divisor
if (dividend % divisor) != 0:
quotient += 1
return quotient
def HumanReadableToBytes(human_string):
"""Tries to convert a human-readable string to a number of bytes.
Args:
human_string: A string supplied by user, e.g. '1M', '3 GiB'.
Returns:
An integer containing the number of bytes.
Raises:
ValueError: on an invalid string.
"""
human_string = human_string.lower()
m = MATCH_HUMAN_BYTES.match(human_string)
if m:
num = float(m.group('num'))
if m.group('suffix'):
power = _EXP_STRINGS[SUFFIX_TO_SI[m.group('suffix')]][0]
num *= (2.0**power)
num = int(round(num))
return num
raise ValueError('Invalid byte string specified: %s' % human_string)
def HumanReadableWithDecimalPlaces(number, decimal_places=1):
"""Creates a human readable format for bytes with fixed decimal places.
Args:
number: The number of bytes.
decimal_places: The number of decimal places.
Returns:
String representing a readable format for number with decimal_places
decimal places.
"""
number_format = MakeHumanReadable(number).split()
num = str(int(round(10**decimal_places * float(number_format[0]))))
if num == '0':
number_format[0] = ('0' +
(('.' +
('0' * decimal_places)) if decimal_places else ''))
else:
num_length = len(num)
if decimal_places:
num = (num[:num_length - decimal_places] + '.' +
num[num_length - decimal_places:])
number_format[0] = num
return ' '.join(number_format)
def MakeBitsHumanReadable(num):
"""Generates human readable string for a number of bits.
Args:
num: The number, in bits.
Returns:
A string form of the number using bit size abbreviations (kbit, Mbit, etc.)
"""
i, rounded_val = _RoundToNearestExponent(num)
return '%g %s' % (rounded_val, _EXP_STRINGS[i][2])
def MakeHumanReadable(num):
"""Generates human readable string for a number of bytes.
Args:
num: The number, in bytes.
Returns:
A string form of the number using size abbreviations (KiB, MiB, etc.).
"""
i, rounded_val = _RoundToNearestExponent(num)
return '%g %s' % (rounded_val, _EXP_STRINGS[i][1])
def Percentile(values, percent, key=lambda x: x):
"""Find the percentile of a list of values.
Taken from: http://code.activestate.com/recipes/511478/
Args:
values: a list of numeric values. Note that the values MUST BE already
sorted.
percent: a float value from 0.0 to 1.0.
key: optional key function to compute value from each element of the list
of values.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(values[int(k)])
d0 = key(values[int(f)]) * (c - k)
d1 = key(values[int(c)]) * (k - f)
return d0 + d1
def PrettyTime(remaining_time):
"""Creates a standard version for a given remaining time in seconds.
Created over using strftime because strftime seems to be
more suitable for a datetime object, rather than just a number of
seconds remaining.
Args:
remaining_time: The number of seconds remaining as a float, or a
string/None value indicating time was not correctly calculated.
Returns:
if remaining_time is a valid float, %H:%M:%D time remaining format with
the nearest integer from remaining_time (%H might be higher than 23).
Else, it returns the same message it received.
"""
remaining_time = int(round(remaining_time))
hours = remaining_time // 3600
if hours >= 100:
# Too large to display with precision of minutes and seconds.
# If over 1000, saying 999+ hours should be enough.
return '%d+ hrs' % min(hours, 999)
remaining_time -= (3600 * hours)
minutes = remaining_time // 60
remaining_time -= (60 * minutes)
seconds = remaining_time
return (str('%02d' % hours) + ':' + str('%02d' % minutes) + ':' +
str('%02d' % seconds))
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/unit_util.py
| 0.797517 | 0.368491 |
unit_util.py
|
pypi
|
"""Shared utility structures and methods for manipulating text."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import binascii
import codecs
import os
import sys
import io
import re
import locale
import collections
import random
import six
import string
from six.moves import urllib
from six.moves import range
from gslib.exception import CommandException
from gslib.lazy_wrapper import LazyWrapper
from gslib.utils.constants import UTF8
from gslib.utils.constants import WINDOWS_1252
from gslib.utils.system_util import IS_CP1252
if six.PY3:
long = int
STORAGE_CLASS_SHORTHAND_TO_FULL_NAME = {
# Values should remain uppercase, as required by non-gs providers.
'CL': 'COLDLINE',
'DRA': 'DURABLE_REDUCED_AVAILABILITY',
'NL': 'NEARLINE',
'S': 'STANDARD',
'STD': 'STANDARD',
'A': 'ARCHIVE',
}
VERSION_MATCHER = LazyWrapper(
lambda: re.compile(r'^(?P<maj>\d+)(\.(?P<min>\d+)(?P<suffix>.*))?'))
def AddQueryParamToUrl(url_str, param_name, param_value):
"""Adds a query parameter to a URL string.
Appends a query parameter to the query string portion of a url. If a parameter
with the given name was already present, it is not removed; the new name/value
pair will be appended to the end of the query string. It is assumed that all
arguments will be of type `str` (either ASCII or UTF-8 encoded) or `unicode`.
Note that this method performs no URL-encoding. It is the caller's
responsibility to ensure proper URL encoding of the entire URL; i.e. if the
URL is already URL-encoded, you should pass in URL-encoded values for
param_name and param_value. If the URL is not URL-encoded, you should not pass
in URL-encoded parameters; instead, you could perform URL-encoding using the
URL string returned from this function.
Args:
url_str: (str or unicode) String representing the URL.
param_name: (str or unicode) String key of the query parameter.
param_value: (str or unicode) String value of the query parameter.
Returns:
(str or unicode) A string representing the modified url, of type `unicode`
if the url_str argument was a `unicode`, otherwise a `str` encoded in UTF-8.
"""
scheme, netloc, path, query_str, fragment = urllib.parse.urlsplit(url_str)
query_params = urllib.parse.parse_qsl(query_str, keep_blank_values=True)
query_params.append((param_name, param_value))
new_query_str = '&'.join(['%s=%s' % (k, v) for (k, v) in query_params])
new_url = urllib.parse.urlunsplit(
(scheme, netloc, path, new_query_str, fragment))
return new_url
def CompareVersions(first, second):
"""Compares the first and second gsutil version strings.
For example, 3.33 > 3.7, and 4.1 is a greater major version than 3.33.
Does not handle multiple periods (e.g. 3.3.4) or complicated suffixes
(e.g., 3.3RC4 vs. 3.3RC5). A version string with a suffix is treated as
less than its non-suffix counterpart (e.g. 3.32 > 3.32pre).
Args:
first: First gsutil version string.
second: Second gsutil version string.
Returns:
(g, m):
g is True if first known to be greater than second, else False.
m is True if first known to be greater by at least 1 major version,
else False.
"""
m1 = VERSION_MATCHER().match(str(first))
m2 = VERSION_MATCHER().match(str(second))
# If passed strings we don't know how to handle, be conservative.
if not m1 or not m2:
return (False, False)
major_ver1 = int(m1.group('maj'))
minor_ver1 = int(m1.group('min')) if m1.group('min') else 0
suffix_ver1 = m1.group('suffix')
major_ver2 = int(m2.group('maj'))
minor_ver2 = int(m2.group('min')) if m2.group('min') else 0
suffix_ver2 = m2.group('suffix')
if major_ver1 > major_ver2:
return (True, True)
elif major_ver1 == major_ver2:
if minor_ver1 > minor_ver2:
return (True, False)
elif minor_ver1 == minor_ver2:
return (bool(suffix_ver2) and not suffix_ver1, False)
return (False, False)
def ConvertRecursiveToFlatWildcard(url_strs):
"""A generator that adds '**' to each url string in url_strs."""
for url_str in url_strs:
yield '%s**' % url_str
def DecodeLongAsString(long_to_convert):
"""Decodes an encoded python long into an ASCII string.
This is used for modeling S3 version_id's as apitools generation.
Args:
long_to_convert: long to convert to ASCII string. If this is already a
string, it is simply returned.
Returns:
String decoded from the input long.
"""
unhexed = binascii.unhexlify(hex(long_to_convert)[2:].rstrip('L'))
return six.ensure_str(unhexed)
def EncodeStringAsLong(string_to_convert):
"""Encodes an ASCII string as a python long.
This is used for modeling S3 version_id's as apitools generation. Because
python longs can be arbitrarily large, this works.
Args:
string_to_convert: ASCII string to convert to a long.
Returns:
Long that represents the input string.
"""
hex_bytestr = codecs.encode(six.ensure_binary(string_to_convert), 'hex_codec')
# Note that `long`/`int` accepts either `bytes` or `unicode` as the
# first arg in both py2 and py3:
return long(hex_bytestr, 16)
def FixWindowsEncodingIfNeeded(input_str):
"""Attempts to detect Windows CP1252 encoding and convert to UTF8.
Windows doesn't provide a way to set UTF-8 for string encodings; you can set
the system locale (see
http://windows.microsoft.com/en-us/windows/change-system-locale#1TC=windows-7)
but that takes you to a "Change system locale" dropdown that just lists
languages (e.g., "English (United States)". Instead, we're forced to check if
a encoding as UTF8 raises an exception and if so, try converting from CP1252
to Unicode.
Args:
input_str: (str or bytes) The input string.
Returns:
(unicode) The converted string or the original, if conversion wasn't needed.
"""
if IS_CP1252:
return six.ensure_text(input_str, WINDOWS_1252)
else:
return six.ensure_text(input_str, UTF8)
def GetPrintableExceptionString(exc):
"""Returns a short Unicode string describing the exception."""
return six.text_type(exc).encode(UTF8) or six.text_type(exc.__class__)
def InsistAscii(string, message):
"""Ensures that the string passed in consists of only ASCII values.
Args:
string: Union[str, unicode, bytes] Text that will be checked for
ASCII values.
message: Union[str, unicode, bytes] Error message, passed into the
exception, in the event that the check on `string` fails.
Returns:
None
Raises:
CommandException
"""
if not all(ord(c) < 128 for c in string):
raise CommandException(message)
def InsistAsciiHeader(header):
"""Checks for ASCII-only characters in `header`.
Also constructs an error message using `header` if the check fails.
Args:
header: Union[str, binary, unicode] Text being checked for ASCII values.
Returns:
None
"""
InsistAscii(header, 'Invalid non-ASCII header (%s).' % header)
def InsistAsciiHeaderValue(header, value):
"""Checks for ASCII-only characters in `value`.
Also constructs an error message using `header` and `value` if the check
fails.
Args:
header: Header name, only used in error message in case of an exception.
value: Union[str, binary, unicode] Text being checked for ASCII values.
Returns:
None
"""
InsistAscii(
value,
'Invalid non-ASCII value (%s) was provided for header %s.\nOnly ASCII '
'characters are allowed in headers other than x-goog-meta- and '
'x-amz-meta- headers' % (repr(value), header))
def InsistOnOrOff(value, message):
"""Ensures that the value passed in consists of only "on" or "off"
Args:
value: (unicode) Unicode string that will be checked for correct text.
message: Union[str, unicode, bytes] Error message passed into the exception
in the event that the check on value fails.
Returns:
None
Raises:
CommandException
"""
if value != 'on' and value != 'off':
raise CommandException(message)
def NormalizeStorageClass(sc):
"""Returns a normalized form of the given storage class name.
Converts the given string to uppercase and expands valid abbreviations to
full storage class names (e.g 'std' would return 'STANDARD'). Note that this
method does not check if the given storage class is valid.
Args:
sc: (str) String representing the storage class's full name or abbreviation.
Returns:
(str) A string representing the full name of the given storage class.
"""
# Use uppercase; storage class argument for the S3 API must be uppercase,
# and it's case-insensitive for GS APIs.
sc = sc.upper()
if sc in STORAGE_CLASS_SHORTHAND_TO_FULL_NAME:
sc = STORAGE_CLASS_SHORTHAND_TO_FULL_NAME[sc]
return sc
def PrintableStr(input_val):
"""Return an UTF8-encoded string type, or None if `input_val` is None.
Args:
input_val: (unicode, str, or None) A string-like object or None. This method
simply calls encode() on `input_val` if it is not None; if `input_val`
is not of type "unicode", this will implicitly call decode() with the
default encoding for strings (for Python 2, this is ASCII), then call
encode().
Returns:
(str) A UTF-8 encoded string, or None.
"""
return input_val
def print_to_fd(*objects, **kwargs):
"""A Python 2/3 compatible analogue to the print function.
This function writes text to a file descriptor as the
builtin print function would, favoring unicode encoding.
Aguments and return values are the same as documented in
the Python 2 print function.
"""
def _get_args(**kwargs):
"""Validates keyword arguments that would be used in Print
Valid keyword arguments, mirroring print(), are 'sep',
'end', and 'file'. These must be of types string, string,
and file / file interface respectively.
Returns the above kwargs of the above types.
"""
expected_keywords = collections.OrderedDict([('sep', ' '), ('end', '\n'),
('file', sys.stdout)])
for key, value in kwargs.items():
if key not in expected_keywords:
error_msg = ('{} is not a valid keyword argument. '
'Please use one of: {}')
raise KeyError(error_msg.format(key,
' '.join(expected_keywords.keys())))
else:
expected_keywords[key] = value
return expected_keywords.values()
def _get_byte_strings(*objects):
"""Gets a `bytes` string for each item in a list of printable objects."""
byte_objects = []
for item in objects:
if not isinstance(item, (six.binary_type, six.text_type)):
# If the item wasn't bytes or unicode, its __str__ method
# should return one of those types.
item = str(item)
if isinstance(item, six.binary_type):
byte_objects.append(item)
else:
# The item should be unicode. If it's not, ensure_binary()
# will throw a TypeError.
byte_objects.append(six.ensure_binary(item))
return byte_objects
sep, end, file = _get_args(**kwargs)
sep = six.ensure_binary(sep)
end = six.ensure_binary(end)
data = _get_byte_strings(*objects)
data = sep.join(data)
data += end
write_to_fd(file, data)
def write_to_fd(fd, data):
"""Write given data to given file descriptor, doing any conversions needed"""
if six.PY2:
fd.write(data)
return
# PY3 logic:
if isinstance(data, bytes):
if (hasattr(fd, 'mode') and 'b' in fd.mode) or isinstance(fd, io.BytesIO):
fd.write(data)
elif hasattr(fd, 'buffer'):
fd.buffer.write(data)
else:
fd.write(six.ensure_text(data))
elif 'b' in fd.mode:
fd.write(six.ensure_binary(data))
else:
fd.write(data)
def RemoveCRLFFromString(input_str):
r"""Returns the input string with all \n and \r removed."""
return re.sub(r'[\r\n]', '', input_str)
def get_random_ascii_chars(size, seed=0):
"""Generates binary string representation of a list of ASCII characters.
Args:
size: Integer quantity of characters to generate.
seed: A seed may be specified for deterministic behavior.
Int 0 is used as the default value.
Returns:
Binary encoded string representation of a list of characters of length
equal to size argument.
"""
random.seed(seed)
contents = str([random.choice(string.ascii_letters) for _ in range(size)])
contents = six.ensure_binary(contents)
random.seed() # Reset the seed for any other tests.
return contents
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/text_util.py
| 0.811527 | 0.236648 |
text_util.py
|
pypi
|
"""Helper functions for dealing with encryption keys used with cloud APIs."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import base64
import binascii
from hashlib import sha256
import re
import sys
import six
from gslib.exception import CommandException
from gslib.lazy_wrapper import LazyWrapper
MAX_DECRYPTION_KEYS = 100
VALID_CMEK_RE = LazyWrapper(
lambda: re.compile('projects/([^/]+)/'
'locations/([a-zA-Z0-9_-]{1,63})/'
'keyRings/([a-zA-Z0-9_-]{1,63})/'
'cryptoKeys/([a-zA-Z0-9_-]{1,63})$'))
class CryptoKeyType(object):
"""Enum of valid types of encryption keys used with cloud API requests."""
CSEK = 'CSEK'
CMEK = 'CMEK'
class CryptoKeyWrapper(object):
"""Class describing a crypto key used with cloud API requests.
This class should be instantiated via the `CryptoKeyWrapperFromKey` method.
"""
def __init__(self, crypto_key):
"""Initialize the CryptoKeyWrapper.
Args:
crypto_key: Base64-encoded string of a CSEK, or the name of a Cloud KMS
CMEK.
Raises:
CommandException: The specified crypto key was neither a CMEK key name nor
a valid base64-encoded string of a CSEK.
"""
self.crypto_key = crypto_key
# Base64-encoded CSEKs always have a length of 44 characters, whereas
# fully-qualified CMEK names are guaranteed to be longer than 45 characters.
if len(crypto_key) == 44:
self.crypto_type = CryptoKeyType.CSEK
self.crypto_alg = 'AES256' # Only currently supported algorithm for CSEK.
try:
self.crypto_key_sha256 = Base64Sha256FromBase64EncryptionKey(crypto_key)
except:
raise CommandException(
'Configured encryption_key or decryption_key looked like a CSEK, '
'but it was not a valid 44-character base64 string. Please '
'double-check your configuration and ensure the key is correct.')
else: # CMEK
try:
ValidateCMEK(crypto_key)
except CommandException as e:
raise CommandException(
'Configured encryption_key or decryption_key looked like a CMEK, '
'but the key failed validation:\n%s' % e.reason)
self.crypto_type = CryptoKeyType.CMEK
self.crypto_alg = None
self.crypto_key_sha256 = None
def CryptoKeyWrapperFromKey(crypto_key):
"""Returns a CryptoKeyWrapper for crypto_key, or None for no key."""
return CryptoKeyWrapper(crypto_key) if crypto_key else None
def FindMatchingCSEKInBotoConfig(key_sha256, boto_config):
"""Searches boto_config for a CSEK with the given base64-encoded SHA256 hash.
Args:
key_sha256: (str) Base64-encoded SHA256 hash of the AES256 encryption key.
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
(str) Base64-encoded encryption key string if a match is found, None
otherwise.
"""
if six.PY3:
if not isinstance(key_sha256, bytes):
key_sha256 = key_sha256.encode('ascii')
keywrapper = CryptoKeyWrapperFromKey(
boto_config.get('GSUtil', 'encryption_key', None))
if (keywrapper is not None and
keywrapper.crypto_type == CryptoKeyType.CSEK and
keywrapper.crypto_key_sha256 == key_sha256):
return keywrapper.crypto_key
for i in range(MAX_DECRYPTION_KEYS):
key_number = i + 1
keywrapper = CryptoKeyWrapperFromKey(
boto_config.get('GSUtil', 'decryption_key%s' % str(key_number), None))
if keywrapper is None:
# Reading 100 config values can take ~1ms in testing. To avoid adding
# this tax, stop reading keys as soon as we encounter a non-existent
# entry (in lexicographic order).
break
elif (keywrapper.crypto_type == CryptoKeyType.CSEK and
keywrapper.crypto_key_sha256 == key_sha256):
return keywrapper.crypto_key
def GetEncryptionKeyWrapper(boto_config):
"""Returns a CryptoKeyWrapper for the configured encryption key.
Reads in the value of the "encryption_key" attribute in boto_config, and if
present, verifies it is a valid base64-encoded string and returns a
CryptoKeyWrapper for it.
Args:
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
CryptoKeyWrapper for the specified encryption key, or None if no encryption
key was specified in boto_config.
"""
encryption_key = boto_config.get('GSUtil', 'encryption_key', None)
return CryptoKeyWrapper(encryption_key) if encryption_key else None
def Base64Sha256FromBase64EncryptionKey(csek_encryption_key):
if six.PY3:
if not isinstance(csek_encryption_key, bytes):
csek_encryption_key = csek_encryption_key.encode('ascii')
decoded_bytes = base64.b64decode(csek_encryption_key)
key_sha256 = _CalculateSha256FromString(decoded_bytes)
sha256_bytes = binascii.unhexlify(key_sha256)
sha256_base64 = base64.b64encode(sha256_bytes)
return sha256_base64.replace(b'\n', b'')
def ValidateCMEK(key):
if not key:
raise CommandException('KMS key is empty.')
if key.startswith('/'):
raise CommandException(
'KMS key should not start with leading slash (/): "%s"' % key)
if not VALID_CMEK_RE().match(key):
raise CommandException(
'Invalid KMS key name: "%s".\nKMS keys should follow the format '
'"projects/<project-id>/locations/<location>/keyRings/<keyring>/'
'cryptoKeys/<key-name>"' % key)
def _CalculateSha256FromString(input_string):
sha256_hash = sha256()
sha256_hash.update(input_string)
return sha256_hash.hexdigest()
def _GetAndVerifyBase64EncryptionKey(boto_config):
"""Reads the encryption key from boto_config and ensures it is base64-encoded.
Args:
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
(str) Base64-encoded encryption key string, or None if no encryption key
exists in configuration.
"""
encryption_key = boto_config.get('GSUtil', 'encryption_key', None)
if encryption_key:
# Ensure the key has a valid encoding.
try:
base64.b64decode(encryption_key)
except:
raise CommandException(
'Configured encryption_key is not a valid base64 string. Please '
'double-check your configuration and ensure the key is valid and in '
'base64 format.')
return encryption_key
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/encryption_helper.py
| 0.78469 | 0.203965 |
encryption_helper.py
|
pypi
|
"""Helper functions for Cloud API implementations."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import json
import re
import six
from gslib.cloud_api import ArgumentException
from gslib.utils.text_util import AddQueryParamToUrl
def GetCloudApiInstance(cls, thread_state=None):
"""Gets a gsutil Cloud API instance.
Since Cloud API implementations are not guaranteed to be thread-safe, each
thread needs its own instance. These instances are passed to each thread
via the thread pool logic in command.
Args:
cls: Command class to be used for single-threaded case.
thread_state: Per thread state from this thread containing a gsutil
Cloud API instance.
Returns:
gsutil Cloud API instance.
"""
return thread_state or cls.gsutil_api
def GetDownloadSerializationData(src_obj_metadata,
progress=0,
user_project=None):
"""Returns download serialization data.
There are five entries:
auto_transfer: JSON-specific field, always False.
progress: How much of the download has already been completed.
total_size: Total object size.
url: Implementation-specific field used for saving a metadata get call.
For JSON, this the download URL of the object.
For XML, this is a pickled boto key.
user_project: Project to be billed to, added as query param.
Args:
src_obj_metadata: Object to be downloaded.
progress: See above.
user_project: User project to add to query string.
Returns:
Serialization data for use with Cloud API GetObjectMedia.
"""
url = src_obj_metadata.mediaLink
if user_project:
url = AddQueryParamToUrl(url, 'userProject', user_project)
if six.PY3:
if isinstance(url, bytes):
url = url.decode('ascii')
serialization_dict = {
'auto_transfer': 'False',
'progress': progress,
'total_size': src_obj_metadata.size,
'url': url
}
return json.dumps(serialization_dict)
def ListToGetFields(list_fields=None):
"""Removes 'items/' from the input fields and converts it to a set.
Args:
list_fields: Iterable fields usable in ListBuckets/ListObjects calls.
Returns:
Set of fields usable in GetBucket/GetObjectMetadata calls (None implies
all fields should be returned).
"""
if list_fields:
get_fields = set()
for field in list_fields:
if field in ('kind', 'nextPageToken', 'prefixes'):
# These are not actually object / bucket metadata fields.
# They are fields specific to listing, so we don't consider them.
continue
get_fields.add(re.sub(r'items/', '', field))
return get_fields
def ValidateDstObjectMetadata(dst_obj_metadata):
"""Ensures dst_obj_metadata supplies the needed fields for copy and insert.
Args:
dst_obj_metadata: Metadata to validate.
Raises:
ArgumentException if metadata is invalid.
"""
if not dst_obj_metadata:
raise ArgumentException(
'No object metadata supplied for destination object.')
if not dst_obj_metadata.name:
raise ArgumentException(
'Object metadata supplied for destination object had no object name.')
if not dst_obj_metadata.bucket:
raise ArgumentException(
'Object metadata supplied for destination object had no bucket name.')
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/cloud_api_helper.py
| 0.949095 | 0.261072 |
cloud_api_helper.py
|
pypi
|
"""Helper functions for hashing functionality."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import base64
import binascii
import hashlib
import os
import six
from boto import config
import crcmod
from gslib.exception import CommandException
from gslib.utils.boto_util import UsingCrcmodExtension
from gslib.utils.constants import DEFAULT_FILE_BUFFER_SIZE
from gslib.utils.constants import MIN_SIZE_COMPUTE_LOGGING
from gslib.utils.constants import TRANSFER_BUFFER_SIZE
from gslib.utils.constants import UTF8
SLOW_CRCMOD_WARNING = """
WARNING: You have requested checksumming but your crcmod installation isn't
using the module's C extension, so checksumming will run very slowly. For help
installing the extension, please see "gsutil help crcmod".
"""
SLOW_CRCMOD_RSYNC_WARNING = """
WARNING: gsutil rsync uses hashes when modification time is not available at
both the source and destination. Your crcmod installation isn't using the
module's C extension, so checksumming will run very slowly. If this is your
first rsync since updating gsutil, this rsync can take significantly longer than
usual. For help installing the extension, please see "gsutil help crcmod".
"""
_SLOW_CRCMOD_DOWNLOAD_WARNING = """
WARNING: Downloading this composite object requires integrity checking with
CRC32c, but your crcmod installation isn't using the module's C extension,
so the hash computation will likely throttle download performance. For help
installing the extension, please see "gsutil help crcmod".
To disable slow integrity checking, see the "check_hashes" option in your
boto config file.
"""
_SLOW_CRC_EXCEPTION_TEXT = """
Downloading this composite object requires integrity checking with CRC32c,
but your crcmod installation isn't using the module's C extension, so the
hash computation will likely throttle download performance. For help
installing the extension, please see "gsutil help crcmod".
To download regardless of crcmod performance or to skip slow integrity
checks, see the "check_hashes" option in your boto config file.
NOTE: It is strongly recommended that you not disable integrity checks. Doing so
could allow data corruption to go undetected during uploading/downloading."""
_NO_HASH_CHECK_WARNING = """
WARNING: This download will not be validated since your crcmod installation
doesn't use the module's C extension, so the hash computation would likely
throttle download performance. For help in installing the extension, please
see "gsutil help crcmod".
To force integrity checking, see the "check_hashes" option in your boto config
file.
"""
# Configuration values for hashing.
CHECK_HASH_IF_FAST_ELSE_FAIL = 'if_fast_else_fail'
CHECK_HASH_IF_FAST_ELSE_SKIP = 'if_fast_else_skip'
CHECK_HASH_ALWAYS = 'always'
CHECK_HASH_NEVER = 'never'
# Table storing polynomial values of x^(2^k) mod CASTAGNOLI_POLY for all k < 31,
# where x^(2^k) and CASTAGNOLI_POLY are both considered polynomials. This is
# sufficient since x^(2^31) mod CASTAGNOLI_POLY = x.
X_POW_2K_TABLE = [
2, 4, 16, 256, 65536, 517762881, 984302966, 408362264, 1503875210,
2862076957, 3884826397, 1324787473, 621200174, 1758783527, 1416537776,
1180494764, 648569364, 2521473789, 994858823, 1728245375, 3498467999,
4059169852, 3345064394, 2828422810, 2429203150, 3336788029, 860151998,
2102628683, 1033187991, 4243778976, 1123580069
]
# Castagnoli polynomial and its degree.
CASTAGNOLI_POLY = 4812730177
DEGREE = 32
def ConcatCrc32c(crc_a, crc_b, num_bytes_in_b):
"""Computes CRC32C for concat(A, B) given crc(A), crc(B) and len(B).
An explanation of the algorithm can be found at
crcutil.googlecode.com/files/crc-doc.1.0.pdf.
Args:
crc_a: A 32-bit integer representing crc(A) with least-significant
coefficient first.
crc_b: Same as crc_a.
num_bytes_in_b: Length of B in bytes.
Returns:
CRC32C for concat(A, B)
"""
if not num_bytes_in_b:
return crc_a
return _ExtendByZeros(crc_a, 8 * num_bytes_in_b) ^ crc_b
def _CrcMultiply(p, q):
"""Multiplies two polynomials together modulo CASTAGNOLI_POLY.
Args:
p: The first polynomial.
q: The second polynomial.
Returns:
Result of the multiplication.
"""
result = 0
top_bit = 1 << DEGREE
for _ in range(DEGREE):
if p & 1:
result ^= q
q <<= 1
if q & top_bit:
q ^= CASTAGNOLI_POLY
p >>= 1
return result
def _ExtendByZeros(crc, num_bits):
"""Given crc representing polynomial P(x), compute P(x)*x^num_bits.
Args:
crc: crc respresenting polynomial P(x).
num_bits: number of bits in crc.
Returns:
P(x)*x^num_bits
"""
def _ReverseBits32(crc):
return int('{0:032b}'.format(crc, width=32)[::-1], 2)
crc = _ReverseBits32(crc)
i = 0
while num_bits != 0:
if num_bits & 1:
crc = _CrcMultiply(crc, X_POW_2K_TABLE[i % len(X_POW_2K_TABLE)])
i += 1
num_bits >>= 1
crc = _ReverseBits32(crc)
return crc
def _CalculateHashFromContents(fp, hash_alg):
"""Calculates a base64 digest of the contents of a seekable stream.
This function resets the file pointer to position 0.
Args:
fp: An already-open file object.
hash_alg: Instance of hashing class initialized to start state.
Returns:
Hash of the stream in hex string format.
"""
hash_dict = {'placeholder': hash_alg}
fp.seek(0)
CalculateHashesFromContents(fp, hash_dict)
fp.seek(0)
return hash_dict['placeholder'].hexdigest()
def CalculateHashesFromContents(fp, hash_dict, callback_processor=None):
"""Calculates hashes of the contents of a file.
Args:
fp: An already-open file object (stream will be consumed).
hash_dict: Dict of (string alg_name: initialized hashing class)
Hashing class will be populated with digests upon return.
callback_processor: Optional callback processing class that implements
Progress(integer amount of bytes processed).
"""
while True:
data = fp.read(DEFAULT_FILE_BUFFER_SIZE)
if not data:
break
if six.PY3:
if isinstance(data, str):
data = data.encode(UTF8)
for hash_alg in six.itervalues(hash_dict):
hash_alg.update(data)
if callback_processor:
callback_processor.Progress(len(data))
def CalculateB64EncodedCrc32cFromContents(fp):
"""Calculates a base64 CRC32c checksum of the contents of a seekable stream.
This function sets the stream position 0 before and after calculation.
Args:
fp: An already-open file object.
Returns:
CRC32c checksum of the file in base64 format.
"""
return _CalculateB64EncodedHashFromContents(fp,
crcmod.predefined.Crc('crc-32c'))
def CalculateB64EncodedMd5FromContents(fp):
"""Calculates a base64 MD5 digest of the contents of a seekable stream.
This function sets the stream position 0 before and after calculation.
Args:
fp: An already-open file object.
Returns:
MD5 digest of the file in base64 format.
"""
return _CalculateB64EncodedHashFromContents(fp, GetMd5())
def CalculateMd5FromContents(fp):
"""Calculates a base64 MD5 digest of the contents of a seekable stream.
This function sets the stream position 0 before and after calculation.
Args:
fp: An already-open file object.
Returns:
MD5 digest of the file in hex format.
"""
return _CalculateHashFromContents(fp, GetMd5())
def Base64EncodeHash(digest_value):
"""Returns the base64-encoded version of the input hex digest value."""
encoded_bytes = base64.b64encode(binascii.unhexlify(digest_value))
return encoded_bytes.rstrip(b'\n').decode(UTF8)
def Base64ToHexHash(base64_hash):
"""Returns the hex digest value of the input base64-encoded hash.
Args:
base64_hash: Base64-encoded hash, which may contain newlines and single or
double quotes.
Returns:
Hex digest of the input argument.
"""
decoded_bytes = base64.b64decode(base64_hash.strip('\n"\'').encode(UTF8))
return binascii.hexlify(decoded_bytes)
def _CalculateB64EncodedHashFromContents(fp, hash_alg):
"""Calculates a base64 digest of the contents of a seekable stream.
This function sets the stream position 0 before and after calculation.
Args:
fp: An already-open file object.
hash_alg: Instance of hashing class initialized to start state.
Returns:
Hash of the stream in base64 format.
"""
return Base64EncodeHash(_CalculateHashFromContents(fp, hash_alg))
def GetUploadHashAlgs():
"""Returns a dict of hash algorithms for validating an uploaded object.
This is for use only with single object uploads, not compose operations
such as those used by parallel composite uploads (though it can be used to
validate the individual components).
Returns:
dict of (algorithm_name: hash_algorithm)
"""
check_hashes_config = config.get('GSUtil', 'check_hashes',
CHECK_HASH_IF_FAST_ELSE_FAIL)
if check_hashes_config == 'never':
return {}
return {'md5': GetMd5}
def GetDownloadHashAlgs(logger, consider_md5=False, consider_crc32c=False):
"""Returns a dict of hash algorithms for validating an object.
Args:
logger: logging.Logger for outputting log messages.
consider_md5: If True, consider using a md5 hash.
consider_crc32c: If True, consider using a crc32c hash.
Returns:
Dict of (string, hash algorithm).
Raises:
CommandException if hash algorithms satisfying the boto config file
cannot be returned.
"""
check_hashes_config = config.get('GSUtil', 'check_hashes',
CHECK_HASH_IF_FAST_ELSE_FAIL)
if check_hashes_config == CHECK_HASH_NEVER:
return {}
hash_algs = {}
if consider_md5:
hash_algs['md5'] = GetMd5
elif consider_crc32c:
# If the cloud provider supplies a CRC, we'll compute a checksum to
# validate if we're using a native crcmod installation and MD5 isn't
# offered as an alternative.
if UsingCrcmodExtension():
hash_algs['crc32c'] = lambda: crcmod.predefined.Crc('crc-32c')
elif not hash_algs:
if check_hashes_config == CHECK_HASH_IF_FAST_ELSE_FAIL:
raise CommandException(_SLOW_CRC_EXCEPTION_TEXT)
elif check_hashes_config == CHECK_HASH_IF_FAST_ELSE_SKIP:
logger.warn(_NO_HASH_CHECK_WARNING)
elif check_hashes_config == CHECK_HASH_ALWAYS:
logger.warn(_SLOW_CRCMOD_DOWNLOAD_WARNING)
hash_algs['crc32c'] = lambda: crcmod.predefined.Crc('crc-32c')
else:
raise CommandException(
'Your boto config \'check_hashes\' option is misconfigured.')
return hash_algs
def GetMd5(byte_string=b''):
"""Returns md5 object, avoiding incorrect FIPS error on Red Hat systems.
Examples: GetMd5(b'abc')
GetMd5(bytes('abc', encoding='utf-8'))
Args:
byte_string (bytes): String in bytes form to hash. Don't include for empty
hash object, since md5(b'').digest() == md5().digest().
Returns:
md5 hash object.
"""
try:
return hashlib.md5(byte_string)
except ValueError:
# On Red Hat-based platforms, may catch a FIPS error.
# "usedforsecurity" flag only available on Red Hat systems or Python 3.9+.
# pylint:disable=unexpected-keyword-arg
return hashlib.md5(byte_string, usedforsecurity=False)
# pylint:enable=unexpected-keyword-arg
class HashingFileUploadWrapper(object):
"""Wraps an input stream in a hash digester and exposes a stream interface.
This class provides integrity checking during file uploads via the
following properties:
Calls to read will appropriately update digesters with all bytes read.
Calls to seek (assuming it is supported by the wrapped stream) using
os.SEEK_SET will catch up / reset the digesters to the specified
position. If seek is called with a different os.SEEK mode, the caller
must return to the original position using os.SEEK_SET before further
reads.
Calls to seek are fast if the desired position is equal to the position at
the beginning of the last read call (we only need to re-hash bytes
from that point on).
"""
def __init__(self, stream, digesters, hash_algs, src_url, logger):
"""Initializes the wrapper.
Args:
stream: Input stream.
digesters: dict of {string: hash digester} containing digesters, where
string is the name of the hash algorithm.
hash_algs: dict of {string: hash algorithm} for resetting and
recalculating digesters. String is the name of the hash algorithm.
src_url: Source FileUrl that is being copied.
logger: For outputting log messages.
"""
if not digesters:
raise CommandException('HashingFileUploadWrapper used with no digesters.')
elif not hash_algs:
raise CommandException('HashingFileUploadWrapper used with no hash_algs.')
self._orig_fp = stream
self._digesters = digesters
self._src_url = src_url
self._logger = logger
self._seek_away = None
self._digesters_previous = {}
for alg in self._digesters:
self._digesters_previous[alg] = self._digesters[alg].copy()
self._digesters_previous_mark = 0
self._digesters_current_mark = 0
self._hash_algs = hash_algs
@property
def mode(self):
"""Returns the mode of the underlying file descriptor, or None."""
return getattr(self._orig_fp, 'mode', None)
def read(self, size=-1): # pylint: disable=invalid-name
""""Reads from the wrapped file pointer and calculates hash digests.
Args:
size: The amount of bytes to read. If ommited or negative, the entire
contents of the file will be read, hashed, and returned.
Returns:
Bytes from the wrapped stream.
Raises:
CommandException if the position of the wrapped stream is unknown.
"""
if self._seek_away is not None:
raise CommandException('Read called on hashing file pointer in an '
'unknown position; cannot correctly compute '
'digest.')
data = self._orig_fp.read(size)
if isinstance(data, six.text_type):
data = data.encode(UTF8)
self._digesters_previous_mark = self._digesters_current_mark
for alg in self._digesters:
self._digesters_previous[alg] = self._digesters[alg].copy()
self._digesters[alg].update(data)
self._digesters_current_mark += len(data)
return data
def tell(self): # pylint: disable=invalid-name
"""Returns the current stream position."""
return self._orig_fp.tell()
def seekable(self): # pylint: disable=invalid-name
"""Returns true if the stream is seekable."""
return self._orig_fp.seekable()
def seek(self, offset, whence=os.SEEK_SET): # pylint: disable=invalid-name
"""Seeks in the wrapped file pointer and catches up hash digests.
Args:
offset: The offset to seek to.
whence: os.SEEK_CUR, or SEEK_END, SEEK_SET.
Returns:
Return value from the wrapped stream's seek call.
"""
if whence != os.SEEK_SET:
# We do not catch up hashes for non-absolute seeks, and rely on the
# caller to seek to an absolute position before reading.
self._seek_away = self._orig_fp.tell()
else:
# Hashes will be correct and it's safe to call read().
self._seek_away = None
if offset < self._digesters_previous_mark:
# This is earlier than our earliest saved digest, so we need to
# reset the digesters and scan from the beginning.
for alg in self._digesters:
self._digesters[alg] = self._hash_algs[alg]()
self._digesters_current_mark = 0
self._orig_fp.seek(0)
self._CatchUp(offset)
elif offset == self._digesters_previous_mark:
# Just load the saved digests.
self._digesters_current_mark = self._digesters_previous_mark
for alg in self._digesters:
self._digesters[alg] = self._digesters_previous[alg]
elif offset < self._digesters_current_mark:
# Reset the position to our previous digest and scan forward.
self._digesters_current_mark = self._digesters_previous_mark
for alg in self._digesters:
self._digesters[alg] = self._digesters_previous[alg]
self._orig_fp.seek(self._digesters_previous_mark)
self._CatchUp(offset - self._digesters_previous_mark)
else:
# Scan forward from our current digest and position.
self._orig_fp.seek(self._digesters_current_mark)
self._CatchUp(offset - self._digesters_current_mark)
return self._orig_fp.seek(offset, whence)
def _CatchUp(self, bytes_to_read):
"""Catches up hashes, but does not return data and uses little memory.
Before calling this function, digesters_current_mark should be updated
to the current location of the original stream and the self._digesters
should be current to that point (but no further).
Args:
bytes_to_read: Number of bytes to catch up from the original stream.
"""
if self._orig_fp.tell() != self._digesters_current_mark:
raise CommandException(
'Invalid mark when catching up hashes. Stream position %s, hash '
'position %s' % (self._orig_fp.tell(), self._digesters_current_mark))
for alg in self._digesters:
if bytes_to_read >= MIN_SIZE_COMPUTE_LOGGING:
self._logger.debug('Catching up %s for %s...', alg,
self._src_url.url_string)
self._digesters_previous[alg] = self._digesters[alg].copy()
self._digesters_previous_mark = self._digesters_current_mark
bytes_remaining = bytes_to_read
bytes_this_round = min(bytes_remaining, TRANSFER_BUFFER_SIZE)
while bytes_this_round:
data = self._orig_fp.read(bytes_this_round)
if isinstance(data, six.text_type):
data = data.encode(UTF8)
bytes_remaining -= bytes_this_round
for alg in self._digesters:
self._digesters[alg].update(data)
bytes_this_round = min(bytes_remaining, TRANSFER_BUFFER_SIZE)
self._digesters_current_mark += bytes_to_read
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/hashing_helper.py
| 0.885977 | 0.278508 |
hashing_helper.py
|
pypi
|
"""Contains helper objects for changing and deleting ACLs."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import re
from gslib.exception import CommandException
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
class ChangeType(object):
USER = 'User'
GROUP = 'Group'
PROJECT = 'Project'
class AclChange(object):
"""Represents a logical change to an access control list."""
public_scopes = ['AllAuthenticatedUsers', 'AllUsers']
id_scopes = ['UserById', 'GroupById']
email_scopes = ['UserByEmail', 'GroupByEmail']
domain_scopes = ['GroupByDomain']
project_scopes = ['Project']
scope_types = (public_scopes + id_scopes + email_scopes + domain_scopes +
project_scopes)
public_entity_all_users = 'allUsers'
public_entity_all_auth_users = 'allAuthenticatedUsers'
public_entity_types = (public_entity_all_users, public_entity_all_auth_users)
project_entity_prefixes = ('project-editors-', 'project-owners-',
'project-viewers-')
group_entity_prefix = 'group-'
user_entity_prefix = 'user-'
domain_entity_prefix = 'domain-'
project_entity_prefix = 'project-'
permission_shorthand_mapping = {
'R': 'READER',
'W': 'WRITER',
'FC': 'OWNER',
'O': 'OWNER',
'READ': 'READER',
'WRITE': 'WRITER',
'FULL_CONTROL': 'OWNER',
}
def __init__(self, acl_change_descriptor, scope_type):
"""Creates an AclChange object.
Args:
acl_change_descriptor: An acl change as described in the "ch" section of
the "acl" command's help.
scope_type: Either ChangeType.USER or ChangeType.GROUP or
ChangeType.PROJECT, specifying the extent of the scope.
"""
self.identifier = ''
self.raw_descriptor = acl_change_descriptor
self._Parse(acl_change_descriptor, scope_type)
self._Validate()
def __str__(self):
return 'AclChange<{0}|{1}|{2}>'.format(self.scope_type, self.perm,
self.identifier)
def _Parse(self, change_descriptor, scope_type):
"""Parses an ACL Change descriptor."""
def _ClassifyScopeIdentifier(text):
re_map = {
'AllAuthenticatedUsers': r'^(AllAuthenticatedUsers|AllAuth)$',
'AllUsers': '^(AllUsers|All)$',
'Email': r'^.+@.+\..+$',
'Id': r'^[0-9A-Fa-f]{64}$',
'Domain': r'^[^@]+\.[^@]+$',
'Project': r'(owners|editors|viewers)\-.+$',
}
for type_string, regex in re_map.items():
if re.match(regex, text, re.IGNORECASE):
return type_string
if change_descriptor.count(':') != 1:
raise CommandException(
'{0} is an invalid change description.'.format(change_descriptor))
scope_string, perm_token = change_descriptor.split(':')
perm_token = perm_token.upper()
if perm_token in self.permission_shorthand_mapping:
self.perm = self.permission_shorthand_mapping[perm_token]
else:
self.perm = perm_token
scope_class = _ClassifyScopeIdentifier(scope_string)
if scope_class == 'Domain':
# This may produce an invalid UserByDomain scope,
# which is good because then validate can complain.
self.scope_type = '{0}ByDomain'.format(scope_type)
self.identifier = scope_string
elif scope_class in ('Email', 'Id'):
self.scope_type = '{0}By{1}'.format(scope_type, scope_class)
self.identifier = scope_string
elif scope_class == 'AllAuthenticatedUsers':
self.scope_type = 'AllAuthenticatedUsers'
elif scope_class == 'AllUsers':
self.scope_type = 'AllUsers'
elif scope_class == 'Project':
self.scope_type = 'Project'
self.identifier = scope_string
else:
# This is just a fallback, so we set it to something
# and the validate step has something to go on.
self.scope_type = scope_string
def _Validate(self):
"""Validates a parsed AclChange object."""
def _ThrowError(msg):
raise CommandException('{0} is not a valid ACL change\n{1}'.format(
self.raw_descriptor, msg))
if self.scope_type not in self.scope_types:
_ThrowError('{0} is not a valid scope type'.format(self.scope_type))
if self.scope_type in self.public_scopes and self.identifier:
_ThrowError('{0} requires no arguments'.format(self.scope_type))
if self.scope_type in self.id_scopes and not self.identifier:
_ThrowError('{0} requires an id'.format(self.scope_type))
if self.scope_type in self.email_scopes and not self.identifier:
_ThrowError('{0} requires an email address'.format(self.scope_type))
if self.scope_type in self.domain_scopes and not self.identifier:
_ThrowError('{0} requires domain'.format(self.scope_type))
if self.perm not in self.permission_shorthand_mapping.values():
perms = ', '.join(set(self.permission_shorthand_mapping.values()))
_ThrowError('Allowed permissions are {0}'.format(perms))
def _YieldMatchingEntries(self, current_acl):
"""Generator that yields entries that match the change descriptor.
Args:
current_acl: A list of apitools_messages.BucketAccessControls or
ObjectAccessControls which will be searched for matching
entries.
Yields:
An apitools_messages.BucketAccessControl or ObjectAccessControl.
"""
for entry in current_acl:
if (self.scope_type in ('UserById', 'GroupById') and entry.entityId and
self.identifier == entry.entityId):
yield entry
elif (self.scope_type in ('UserByEmail', 'GroupByEmail') and
entry.email and self.identifier == entry.email):
yield entry
elif (self.scope_type == 'GroupByDomain' and entry.domain and
self.identifier == entry.domain):
yield entry
elif (self.scope_type == 'Project' and entry.projectTeam and
self.identifier == '%s-%s' %
(entry.projectTeam.team, entry.projectTeam.projectNumber)):
yield entry
elif (self.scope_type == 'AllUsers' and
entry.entity.lower() == self.public_entity_all_users.lower()):
yield entry
elif (self.scope_type == 'AllAuthenticatedUsers' and
entry.entity.lower() == self.public_entity_all_auth_users.lower()):
yield entry
def _AddEntry(self, current_acl, entry_class):
"""Adds an entry to current_acl."""
if self.scope_type == 'UserById':
entry = entry_class(entityId=self.identifier,
role=self.perm,
entity=self.user_entity_prefix + self.identifier)
elif self.scope_type == 'GroupById':
entry = entry_class(entityId=self.identifier,
role=self.perm,
entity=self.group_entity_prefix + self.identifier)
elif self.scope_type == 'Project':
entry = entry_class(entityId=self.identifier,
role=self.perm,
entity=self.project_entity_prefix + self.identifier)
elif self.scope_type == 'UserByEmail':
entry = entry_class(email=self.identifier,
role=self.perm,
entity=self.user_entity_prefix + self.identifier)
elif self.scope_type == 'GroupByEmail':
entry = entry_class(email=self.identifier,
role=self.perm,
entity=self.group_entity_prefix + self.identifier)
elif self.scope_type == 'GroupByDomain':
entry = entry_class(domain=self.identifier,
role=self.perm,
entity=self.domain_entity_prefix + self.identifier)
elif self.scope_type == 'AllAuthenticatedUsers':
entry = entry_class(entity=self.public_entity_all_auth_users,
role=self.perm)
elif self.scope_type == 'AllUsers':
entry = entry_class(entity=self.public_entity_all_users, role=self.perm)
else:
raise CommandException('Add entry to ACL got unexpected scope type %s.' %
self.scope_type)
current_acl.append(entry)
def _GetEntriesClass(self, current_acl):
# Entries will share the same class, so just return the first one.
for acl_entry in current_acl:
return acl_entry.__class__
# It's possible that a default object ACL is empty, so if we have
# an empty list, assume it is an object ACL.
return apitools_messages.ObjectAccessControl().__class__
def Execute(self, storage_url, current_acl, command_name, logger):
"""Executes the described change on an ACL.
Args:
storage_url: StorageUrl representing the object to change.
current_acl: A list of ObjectAccessControls or
BucketAccessControls to permute.
command_name: String name of comamnd being run (e.g., 'acl').
logger: An instance of logging.Logger.
Returns:
The number of changes that were made.
"""
logger.debug('Executing %s %s on %s', command_name, self.raw_descriptor,
storage_url)
if self.perm == 'WRITER':
if command_name == 'acl' and storage_url.IsObject():
logger.warning('Skipping %s on %s, as WRITER does not apply to objects',
self.raw_descriptor, storage_url)
return 0
elif command_name == 'defacl':
raise CommandException('WRITER cannot be set as a default object ACL '
'because WRITER does not apply to objects')
entry_class = self._GetEntriesClass(current_acl)
matching_entries = list(self._YieldMatchingEntries(current_acl))
change_count = 0
if matching_entries:
for entry in matching_entries:
if entry.role != self.perm:
entry.role = self.perm
change_count += 1
else:
self._AddEntry(current_acl, entry_class)
change_count = 1
logger.debug('New Acl:\n%s', str(current_acl))
return change_count
class AclDel(object):
"""Represents a logical change from an access control list."""
scope_regexes = {
r'All(Users)?$': 'AllUsers',
r'AllAuth(enticatedUsers)?$': 'AllAuthenticatedUsers',
}
def __init__(self, identifier):
self.raw_descriptor = '-d {0}'.format(identifier)
self.identifier = identifier
for regex, scope in self.scope_regexes.items():
if re.match(regex, self.identifier, re.IGNORECASE):
self.identifier = scope
self.scope_type = 'Any'
self.perm = 'NONE'
def _YieldMatchingEntries(self, current_acl):
"""Generator that yields entries that match the change descriptor.
Args:
current_acl: An instance of apitools_messages.BucketAccessControls or
ObjectAccessControls which will be searched for matching
entries.
Yields:
An apitools_messages.BucketAccessControl or ObjectAccessControl.
"""
for entry in current_acl:
if entry.entityId and self.identifier.lower() == entry.entityId.lower():
yield entry
elif entry.email and self.identifier.lower() == entry.email.lower():
yield entry
elif entry.domain and self.identifier.lower() == entry.domain.lower():
yield entry
elif (entry.projectTeam and self.identifier.lower() == '%s-%s'.lower() %
(entry.projectTeam.team, entry.projectTeam.projectNumber)):
yield entry
elif entry.entity.lower() == 'allusers' and self.identifier == 'AllUsers':
yield entry
elif (entry.entity.lower() == 'allauthenticatedusers' and
self.identifier == 'AllAuthenticatedUsers'):
yield entry
def Execute(self, storage_url, current_acl, command_name, logger):
logger.debug('Executing %s %s on %s', command_name, self.raw_descriptor,
storage_url)
matching_entries = list(self._YieldMatchingEntries(current_acl))
for entry in matching_entries:
current_acl.remove(entry)
logger.debug('New Acl:\n%s', str(current_acl))
return len(matching_entries)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/acl_helper.py
| 0.77223 | 0.160135 |
acl_helper.py
|
pypi
|
"""Helper module for the IAM command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from collections import defaultdict
from collections import namedtuple
import six
from apitools.base.protorpclite import protojson
from gslib.exception import CommandException
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
TYPES = set([
'user', 'deleted:user', 'serviceAccount', 'deleted:serviceAccount', 'group',
'deleted:group', 'domain', 'principal', 'principalSet', 'principalHierarchy'
])
DISCOURAGED_TYPES = set([
'projectOwner',
'projectEditor',
'projectViewer',
])
DISCOURAGED_TYPES_MSG = (
'Assigning roles (e.g. objectCreator, legacyBucketOwner) for project '
'convenience groups is not supported by gsutil, as it goes against the '
'principle of least privilege. Consider creating and using more granular '
'groups with which to assign permissions. See '
'https://cloud.google.com/iam/docs/using-iam-securely for more '
'information. Assigning a role to a project group can be achieved by '
'setting the IAM policy directly (see gsutil help iam for specifics).')
PUBLIC_MEMBERS = set([
'allUsers',
'allAuthenticatedUsers',
])
# This is a convenience class to handle returned results from
# BindingStringToTuple. is_grant is a boolean specifying if the
# bindings are to be granted or removed from a bucket / object,
# and bindings is a list of BindingsValueListEntry instances.
BindingsTuple = namedtuple('BindingsTuple', ['is_grant', 'bindings'])
# This is a special role value assigned to a specific member when all roles
# assigned to the member should be dropped in the policy. A member:DROP_ALL
# binding will be passed from BindingStringToTuple into PatchBindings.
# This role will only ever appear on client-side (i.e. user-generated). It
# will never be returned as a real role from an IAM get request. All roles
# returned by PatchBindings are guaranteed to be "real" roles, i.e. not a
# DROP_ALL role.
DROP_ALL = ''
def SerializeBindingsTuple(bindings_tuple):
"""Serializes the BindingsValueListEntry instances in a BindingsTuple.
This is necessary when passing instances of BindingsTuple through
Command.Apply, as apitools_messages classes are not by default pickleable.
Args:
bindings_tuple: A BindingsTuple instance to be serialized.
Returns:
A serialized BindingsTuple object.
"""
return (bindings_tuple.is_grant,
[protojson.encode_message(t) for t in bindings_tuple.bindings])
def DeserializeBindingsTuple(serialized_bindings_tuple):
(is_grant, bindings) = serialized_bindings_tuple
return BindingsTuple(is_grant=is_grant,
bindings=[
protojson.decode_message(
apitools_messages.Policy.BindingsValueListEntry,
t) for t in bindings
])
def BindingsToDict(bindings):
"""Converts a list of BindingsValueListEntry to a dictionary.
Args:
bindings: A list of BindingsValueListEntry instances.
Returns:
A {role: set(members)} dictionary.
"""
tmp_bindings = defaultdict(set)
for binding in bindings:
tmp_bindings[binding.role].update(binding.members)
return tmp_bindings
def IsEqualBindings(a, b):
(granted, removed) = DiffBindings(a, b)
return not granted.bindings and not removed.bindings
def DiffBindings(old, new):
"""Computes the difference between two BindingsValueListEntry lists.
Args:
old: The original list of BindingValuesListEntry instances
new: The updated list of BindingValuesListEntry instances
Returns:
A pair of BindingsTuple instances, one for roles granted between old and
new, and one for roles removed between old and new.
"""
tmp_old = BindingsToDict(old)
tmp_new = BindingsToDict(new)
granted = BindingsToDict([])
removed = BindingsToDict([])
for (role, members) in six.iteritems(tmp_old):
removed[role].update(members.difference(tmp_new[role]))
for (role, members) in six.iteritems(tmp_new):
granted[role].update(members.difference(tmp_old[role]))
granted = [
apitools_messages.Policy.BindingsValueListEntry(role=r, members=list(m))
for (r, m) in six.iteritems(granted)
if m
]
removed = [
apitools_messages.Policy.BindingsValueListEntry(role=r, members=list(m))
for (r, m) in six.iteritems(removed)
if m
]
return (BindingsTuple(True, granted), BindingsTuple(False, removed))
def PatchBindings(base, diff):
"""Patches a diff list of BindingsValueListEntry to the base.
Will remove duplicate members for any given role on a grant operation.
Args:
base: A list of BindingsValueListEntry instances.
diff: A BindingsTuple instance of diff to be applied.
Returns:
The computed difference, as a list of
apitools_messages.Policy.BindingsValueListEntry instances.
"""
# Convert the list of bindings into an {r: [m]} dictionary object.
tmp_base = BindingsToDict(base)
tmp_diff = BindingsToDict(diff.bindings)
# Patch the diff into base
if diff.is_grant:
for (role, members) in six.iteritems(tmp_diff):
if not role:
raise CommandException('Role must be specified for a grant request.')
tmp_base[role].update(members)
else:
for role in tmp_base:
tmp_base[role].difference_update(tmp_diff[role])
# Drop all members with the DROP_ALL role specifed from input.
tmp_base[role].difference_update(tmp_diff[DROP_ALL])
# Construct the BindingsValueListEntry list
bindings = [
apitools_messages.Policy.BindingsValueListEntry(role=r, members=list(m))
for (r, m) in six.iteritems(tmp_base)
if m
]
return bindings
def BindingStringToTuple(is_grant, input_str):
"""Parses an iam ch bind string to a list of binding tuples.
Args:
is_grant: If true, binding is to be appended to IAM policy; else, delete
this binding from the policy.
input_str: A string representing a member-role binding.
e.g. user:[email protected]:objectAdmin
user:[email protected]:objectAdmin,objectViewer
user:[email protected]
allUsers
deleted:user:[email protected]?uid=123:objectAdmin,objectViewer
deleted:serviceAccount:[email protected]?uid=123
Raises:
CommandException in the case of invalid input.
Returns:
A BindingsTuple instance.
"""
if not input_str.count(':'):
input_str += ':'
# Allows user specified PUBLIC_MEMBERS, DISCOURAGED_TYPES, and TYPES to be
# case insensitive.
tokens = input_str.split(":")
public_members = {s.lower(): s for s in PUBLIC_MEMBERS}
types = {s.lower(): s for s in TYPES}
discouraged_types = {s.lower(): s for s in DISCOURAGED_TYPES}
possible_public_member_or_type = tokens[0].lower()
possible_type = '%s:%s' % (tokens[0].lower(), tokens[1].lower())
if possible_public_member_or_type in public_members:
tokens[0] = public_members[possible_public_member_or_type]
elif possible_public_member_or_type in types:
tokens[0] = types[possible_public_member_or_type]
elif possible_public_member_or_type in discouraged_types:
tokens[0] = discouraged_types[possible_public_member_or_type]
elif possible_type in types:
(tokens[0], tokens[1]) = types[possible_type].split(':')
input_str = ":".join(tokens)
# We can remove project convenience members, but not add them.
removing_discouraged_type = not is_grant and tokens[0] in DISCOURAGED_TYPES
if input_str.count(':') == 1:
if '%s:%s' % (tokens[0], tokens[1]) in TYPES:
raise CommandException('Incorrect public member type for binding %s' %
input_str)
elif tokens[0] in PUBLIC_MEMBERS:
(member, roles) = tokens
elif tokens[0] in TYPES or removing_discouraged_type:
member = input_str
roles = DROP_ALL
else:
raise CommandException('Incorrect public member type for binding %s' %
input_str)
elif input_str.count(':') == 2:
if '%s:%s' % (tokens[0], tokens[1]) in TYPES:
# case "deleted:user:[email protected]?uid=1234"
member = input_str
roles = DROP_ALL
elif removing_discouraged_type:
(member_type, project_id, roles) = tokens
member = '%s:%s' % (member_type, project_id)
else:
(member_type, member_id, roles) = tokens
_check_member_type(member_type, input_str)
member = '%s:%s' % (member_type, member_id)
elif input_str.count(':') == 3:
# case "deleted:user:[email protected]?uid=1234:objectAdmin,objectViewer"
(member_type_p1, member_type_p2, member_id, roles) = input_str.split(':')
member_type = '%s:%s' % (member_type_p1, member_type_p2)
_check_member_type(member_type, input_str)
member = '%s:%s' % (member_type, member_id)
else:
raise CommandException('Invalid ch format %s' % input_str)
if is_grant and not roles:
raise CommandException('Must specify a role to grant.')
roles = [ResolveRole(r) for r in roles.split(',')]
bindings = [
apitools_messages.Policy.BindingsValueListEntry(members=[member], role=r)
for r in set(roles)
]
return BindingsTuple(is_grant=is_grant, bindings=bindings)
def _check_member_type(member_type, input_str):
if member_type in DISCOURAGED_TYPES:
raise CommandException(DISCOURAGED_TYPES_MSG)
elif member_type not in TYPES:
raise CommandException('Incorrect member type for binding %s' % input_str)
def ResolveRole(role):
if not role:
return DROP_ALL
if 'roles/' in role:
return role
return 'roles/storage.%s' % role
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/iam_helper.py
| 0.882839 | 0.216074 |
iam_helper.py
|
pypi
|
"""Helper for cat and cp streaming download."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import io
import sys
from boto import config
from gslib.cloud_api import EncryptionException
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.storage_url import StorageUrlFromString
from gslib.utils.encryption_helper import CryptoKeyWrapperFromKey
from gslib.utils.encryption_helper import FindMatchingCSEKInBotoConfig
from gslib.utils.metadata_util import ObjectIsGzipEncoded
from gslib.utils import text_util
_CAT_BUCKET_LISTING_FIELDS = [
'bucket',
'contentEncoding',
'crc32c',
'customerEncryption',
'generation',
'md5Hash',
'name',
'size',
]
class CatHelper(object):
"""Provides methods for the "cat" command and associated functionality."""
def __init__(self, command_obj):
"""Initializes the helper object.
Args:
command_obj: gsutil command instance of calling command.
"""
self.command_obj = command_obj
def _WriteBytesBufferedFileToFile(self, src_fd, dst_fd):
"""Copies contents of the source to the destination via buffered IO.
Buffered reads are necessary in the case where you're reading from a
source that produces more data than can fit into memory all at once. This
method does not close either file when finished.
Args:
src_fd: The already-open source file to read from.
dst_fd: The already-open destination file to write to.
"""
while True:
buf = src_fd.read(io.DEFAULT_BUFFER_SIZE)
if not buf:
break
text_util.write_to_fd(dst_fd, buf)
def CatUrlStrings(self,
url_strings,
show_header=False,
start_byte=0,
end_byte=None,
cat_out_fd=None):
"""Prints each of the url strings to stdout.
Args:
url_strings: String iterable.
show_header: If true, print a header per file.
start_byte: Starting byte of the file to print, used for constructing
range requests.
end_byte: Ending byte of the file to print; used for constructing range
requests. If this is negative, the start_byte is ignored and
and end range is sent over HTTP (such as range: bytes -9)
cat_out_fd: File descriptor to which output should be written. Defaults to
stdout if no file descriptor is supplied.
Returns:
0 on success.
Raises:
CommandException if no URLs can be found.
"""
printed_one = False
# This should refer to whatever sys.stdin refers to when this method is
# run, not when this method is defined, so we do the initialization here
# rather than define sys.stdin as the cat_out_fd parameter's default value.
if cat_out_fd is None:
cat_out_fd = sys.stdout
# We manipulate the stdout so that all other data other than the Object
# contents go to stderr.
old_stdout = sys.stdout
sys.stdout = sys.stderr
try:
if url_strings and url_strings[0] in ('-', 'file://-'):
self._WriteBytesBufferedFileToFile(sys.stdin, cat_out_fd)
else:
for url_str in url_strings:
did_some_work = False
# TODO: Get only the needed fields here.
for blr in self.command_obj.WildcardIterator(url_str).IterObjects(
bucket_listing_fields=_CAT_BUCKET_LISTING_FIELDS):
decryption_keywrapper = None
if (blr.root_object and blr.root_object.customerEncryption and
blr.root_object.customerEncryption.keySha256):
decryption_key = FindMatchingCSEKInBotoConfig(
blr.root_object.customerEncryption.keySha256, config)
if not decryption_key:
raise EncryptionException(
'Missing decryption key with SHA256 hash %s. No decryption '
'key matches object %s' %
(blr.root_object.customerEncryption.keySha256,
blr.url_string))
decryption_keywrapper = CryptoKeyWrapperFromKey(decryption_key)
did_some_work = True
if show_header:
if printed_one:
print()
print('==> %s <==' % blr)
printed_one = True
cat_object = blr.root_object
storage_url = StorageUrlFromString(blr.url_string)
if storage_url.IsCloudUrl():
compressed_encoding = ObjectIsGzipEncoded(cat_object)
self.command_obj.gsutil_api.GetObjectMedia(
cat_object.bucket,
cat_object.name,
cat_out_fd,
compressed_encoding=compressed_encoding,
start_byte=start_byte,
end_byte=end_byte,
object_size=cat_object.size,
generation=storage_url.generation,
decryption_tuple=decryption_keywrapper,
provider=storage_url.scheme)
else:
with open(storage_url.object_name, 'rb') as f:
self._WriteBytesBufferedFileToFile(f, cat_out_fd)
if not did_some_work:
raise CommandException(NO_URLS_MATCHED_TARGET % url_str)
finally:
sys.stdout = old_stdout
return 0
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/cat_helper.py
| 0.621656 | 0.177205 |
cat_helper.py
|
pypi
|
"""Utility functions for signurl command."""
import base64
from datetime import datetime
import hashlib
from gslib.utils.constants import UTF8
import six
from six.moves import urllib
_CANONICAL_REQUEST_FORMAT = ('{method}\n{resource}\n{query_string}\n{headers}'
'\n{signed_headers}\n{hashed_payload}')
_SIGNING_ALGO = 'GOOG4-RSA-SHA256'
_STRING_TO_SIGN_FORMAT = ('{signing_algo}\n{request_time}\n{credential_scope}'
'\n{hashed_request}')
_SIGNED_URL_FORMAT = ('https://{host}/{path}?x-goog-signature={sig}&'
'{query_string}')
_UNSIGNED_PAYLOAD = 'UNSIGNED-PAYLOAD'
def _NowUTC():
return datetime.utcnow()
def CreatePayload(client_id,
method,
duration,
path,
generation,
logger,
region,
signed_headers,
billing_project=None,
string_to_sign_debug=False):
"""Create a string that needs to be signed.
Args:
client_id: Client ID signing this URL.
method: The HTTP method to be used with the signed URL.
duration: timedelta for which the constructed signed URL should be valid.
path: String path to the bucket of object for signing, in the form
'bucket' or 'bucket/object'.
generation: If not None, specifies a version of an object for signing.
logger: logging.Logger for warning and debug output.
region: Geographic region in which the requested resource resides.
signed_headers: Dict containing the header info like host
content-type etc.
billing_project: Specify a user project to be billed for the request.
string_to_sign_debug: If true AND logger is enabled for debug level,
print string to sign to debug. Used to differentiate user's
signed URL from the probing permissions-check signed URL.
Returns:
A tuple where the 1st element is the string to sign.
The second element is the query string.
"""
signing_time = _NowUTC()
canonical_day = signing_time.strftime('%Y%m%d')
canonical_time = signing_time.strftime('%Y%m%dT%H%M%SZ')
canonical_scope = '{date}/{region}/storage/goog4_request'.format(
date=canonical_day, region=region)
signed_query_params = {
'x-goog-algorithm': _SIGNING_ALGO,
'x-goog-credential': client_id + '/' + canonical_scope,
'x-goog-date': canonical_time,
'x-goog-signedheaders': ';'.join(sorted(signed_headers.keys())),
'x-goog-expires': '%d' % duration.total_seconds()
}
if (billing_project is not None):
signed_query_params['userProject'] = billing_project
if generation is not None:
signed_query_params['generation'] = generation
canonical_resource = '/{}'.format(path)
canonical_query_string = '&'.join([
'{}={}'.format(param, urllib.parse.quote_plus(signed_query_params[param]))
for param in sorted(signed_query_params.keys())
])
canonical_headers = '\n'.join([
'{}:{}'.format(header.lower(), signed_headers[header])
for header in sorted(signed_headers.keys())
]) + '\n'
canonical_signed_headers = ';'.join(sorted(signed_headers.keys()))
canonical_request = _CANONICAL_REQUEST_FORMAT.format(
method=method,
resource=canonical_resource,
query_string=canonical_query_string,
headers=canonical_headers,
signed_headers=canonical_signed_headers,
hashed_payload=_UNSIGNED_PAYLOAD)
if six.PY3:
canonical_request = canonical_request.encode(UTF8)
canonical_request_hasher = hashlib.sha256()
canonical_request_hasher.update(canonical_request)
hashed_canonical_request = base64.b16encode(
canonical_request_hasher.digest()).lower().decode(UTF8)
string_to_sign = _STRING_TO_SIGN_FORMAT.format(
signing_algo=_SIGNING_ALGO,
request_time=canonical_time,
credential_scope=canonical_scope,
hashed_request=hashed_canonical_request)
if string_to_sign_debug and logger:
logger.debug(
'Canonical request (ignore opening/closing brackets): [[[%s]]]' %
canonical_request)
logger.debug('String to sign (ignore opening/closing brackets): [[[%s]]]' %
string_to_sign)
return string_to_sign, canonical_query_string
def GetFinalUrl(raw_signature, host, path, canonical_query_string):
"""Get the final signed url."""
signature = base64.b16encode(raw_signature).lower().decode()
return _SIGNED_URL_FORMAT.format(host=host,
path=path,
sig=signature,
query_string=canonical_query_string)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/signurl_helper.py
| 0.813164 | 0.177847 |
signurl_helper.py
|
pypi
|
"""Implementation of Retention Policy configuration command for buckets."""
from __future__ import absolute_import
from six.moves import input
from decimal import Decimal
import re
from gslib.exception import CommandException
from gslib.lazy_wrapper import LazyWrapper
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
SECONDS_IN_DAY = 24 * 60 * 60
SECONDS_IN_MONTH = 31 * SECONDS_IN_DAY
SECONDS_IN_YEAR = int(365.25 * SECONDS_IN_DAY)
_LOCK_PROMPT = (
'This will PERMANENTLY set the Retention Policy on gs://{} to:\n\n'
'{}\n\nThis setting cannot be reverted! Continue?')
# Regex to match retention period in years.
_RETENTION_IN_YEARS = LazyWrapper(lambda: re.compile(r'(?P<number>\d+)y$'))
# Regex to match retention period in months.
_RETENTION_IN_MONTHS = LazyWrapper(lambda: re.compile(r'(?P<number>\d+)m$'))
# Regex to match retention period in days.
_RETENTION_IN_DAYS = LazyWrapper(lambda: re.compile(r'(?P<number>\d+)d$'))
# Regex to match retention period in seconds.
_RETENTION_IN_SECONDS = LazyWrapper(lambda: re.compile(r'(?P<number>\d+)s$'))
def _ConfirmWithUserPrompt(question, default_response):
"""Prompts user to confirm an action with yes or no response.
Args:
question: Yes/No question to be used for the prompt.
default_response: Default response to the question: True, False
Returns:
Returns the rough equivalent duration in seconds.
"""
prompt = ''
if default_response:
prompt = '%s [%s|%s]: ' % (question, 'Y', 'n')
else:
prompt = '%s [%s|%s]: ' % (question, 'y', 'N')
while True:
response = input(prompt).lower()
if not response:
return default_response
if response not in ['y', 'yes', 'n', 'no']:
print('\tPlease respond with \'yes\'/\'y\' or \'no\'/\'n\'.')
continue
if response == 'yes' or response == 'y':
return True
if response == 'no' or response == 'n':
return False
def _RetentionPeriodToString(retention_period):
"""Converts Retention Period to Human readable format.
Args:
retention_period: Retention duration in seconds (integer value).
Returns:
Returns a string representing retention duration in human readable format.
"""
# TODO: add link to public documentation regarding conversion rates.
period = Decimal(retention_period)
duration_str = None
if period // SECONDS_IN_YEAR == period / SECONDS_IN_YEAR:
duration_str = '{} Year(s)'.format(period // SECONDS_IN_YEAR)
elif period // SECONDS_IN_MONTH == period / SECONDS_IN_MONTH:
duration_str = '{} Month(s)'.format(period // SECONDS_IN_MONTH)
elif period // SECONDS_IN_DAY == period / SECONDS_IN_DAY:
duration_str = '{} Day(s)'.format(period // SECONDS_IN_DAY)
elif period > SECONDS_IN_DAY:
duration_str = '{} Seconds (~{} Day(s))'.format(retention_period,
period // SECONDS_IN_DAY)
else:
duration_str = '{} Second(s)'.format(retention_period)
return (' Duration: {}').format(duration_str)
def RetentionPolicyToString(retention_policy, bucket_url):
"""Converts Retention Policy to Human readable format."""
retention_policy_str = ''
if retention_policy and retention_policy.retentionPeriod:
locked_string = '(LOCKED)' if retention_policy.isLocked else '(UNLOCKED)'
retention_period = _RetentionPeriodToString(
retention_policy.retentionPeriod)
retention_effective_time = ' Effective Time: {}'.format(
retention_policy.effectiveTime.strftime('%a, %d %b %Y %H:%M:%S GMT'))
retention_policy_str = ' Retention Policy {}:\n{}\n{}'.format(
locked_string, retention_period, retention_effective_time)
else:
retention_policy_str = '{} has no Retention Policy.'.format(bucket_url)
return retention_policy_str
def ConfirmLockRequest(bucket_url, retention_policy):
retention_policy = RetentionPolicyToString(retention_policy, bucket_url)
lock_prompt = _LOCK_PROMPT.format(bucket_url, retention_policy)
return _ConfirmWithUserPrompt(lock_prompt, False)
def UpdateObjectMetadataExceptionHandler(cls, e):
"""Exception handler that maintains state about post-completion status."""
cls.logger.error(e)
cls.everything_set_okay = False
def SetTempHoldFuncWrapper(cls, name_expansion_result, thread_state=None):
log_template = 'Setting Temporary Hold on %s...'
object_metadata_update = apitools_messages.Object(temporaryHold=True)
cls.ObjectUpdateMetadataFunc(object_metadata_update,
log_template,
name_expansion_result,
thread_state=thread_state)
def ReleaseTempHoldFuncWrapper(cls, name_expansion_result, thread_state=None):
log_template = 'Releasing Temporary Hold on %s...'
object_metadata_update = apitools_messages.Object(temporaryHold=False)
cls.ObjectUpdateMetadataFunc(object_metadata_update,
log_template,
name_expansion_result,
thread_state=thread_state)
def SetEventHoldFuncWrapper(cls, name_expansion_result, thread_state=None):
log_template = 'Setting Event-Based Hold on %s...'
object_metadata_update = apitools_messages.Object(eventBasedHold=True)
cls.ObjectUpdateMetadataFunc(object_metadata_update,
log_template,
name_expansion_result,
thread_state=thread_state)
def ReleaseEventHoldFuncWrapper(cls, name_expansion_result, thread_state=None):
log_template = 'Releasing Event-Based Hold on %s...'
object_metadata_update = apitools_messages.Object(eventBasedHold=False)
cls.ObjectUpdateMetadataFunc(object_metadata_update,
log_template,
name_expansion_result,
thread_state=thread_state)
def DaysToSeconds(days):
"""Converts duration specified in days to equivalent seconds.
Args:
days: Retention duration in number of days.
Returns:
Returns the equivalent duration in seconds.
"""
return days * SECONDS_IN_DAY
def MonthsToSeconds(months):
"""Converts duration specified in months to equivalent seconds.
GCS bucket lock API uses following duration equivalencies to convert
durations specified in terms of months or years to seconds:
- A month is considered to be 31 days or 2,678,400 seconds.
- A year is considered to be 365.25 days or 31,557,600 seconds.
Args:
months: Retention duration in number of months.
Returns:
Returns the rough equivalent duration in seconds.
"""
return months * SECONDS_IN_MONTH
def YearsToSeconds(years):
"""Converts duration specified in years to equivalent seconds.
GCS bucket lock API uses following duration equivalencies to convert
durations specified in terms of months or years to seconds:
- A month is considered to be 31 days or 2,678,400 seconds.
- A year is considered to be 365.25 days or 31,557,600 seconds.
Args:
years: Retention duration in number of years.
Returns:
Returns the rough equivalent duration in seconds.
"""
return years * SECONDS_IN_YEAR
def RetentionInYearsMatch(years):
"""Test whether the string matches retention in years pattern.
Args:
years: string to match for retention specified in years format.
Returns:
Returns a match object if the string matches the retention in years
pattern. The match object will contain a 'number' group for the duration
in number of years. Otherwise, None is returned.
"""
return _RETENTION_IN_YEARS().match(years)
def RetentionInMonthsMatch(months):
"""Test whether the string matches retention in months pattern.
Args:
months: string to match for retention specified in months format.
Returns:
Returns a match object if the string matches the retention in months
pattern. The match object will contain a 'number' group for the duration
in number of months. Otherwise, None is returned.
"""
return _RETENTION_IN_MONTHS().match(months)
def RetentionInDaysMatch(days):
"""Test whether the string matches retention in days pattern.
Args:
days: string to match for retention specified in days format.
Returns:
Returns a match object if the string matches the retention in days
pattern. The match object will contain a 'number' group for the duration
in number of days. Otherwise, None is returned.
"""
return _RETENTION_IN_DAYS().match(days)
def RetentionInSecondsMatch(seconds):
"""Test whether the string matches retention in seconds pattern.
Args:
seconds: string to match for retention specified in seconds format.
Returns:
Returns a match object if the string matches the retention in seconds
pattern. The match object will contain a 'number' group for the duration
in number of seconds. Otherwise, None is returned.
"""
return _RETENTION_IN_SECONDS().match(seconds)
def RetentionInSeconds(pattern):
"""Converts a retention period string pattern to equivalent seconds.
Args:
pattern: a string pattern that represents a retention period.
Returns:
Returns the retention period in seconds. If the pattern does not match
"""
seconds = None
year_match = RetentionInYearsMatch(pattern)
month_match = RetentionInMonthsMatch(pattern)
day_match = RetentionInDaysMatch(pattern)
second_match = RetentionInSecondsMatch(pattern)
if year_match:
seconds = YearsToSeconds(int(year_match.group('number')))
elif month_match:
seconds = MonthsToSeconds(int(month_match.group('number')))
elif day_match:
seconds = DaysToSeconds(int(day_match.group('number')))
elif second_match:
seconds = int(second_match.group('number'))
else:
raise CommandException('Incorrect retention period specified. '
'Please use one of the following formats '
'to specify the retention period : '
'<number>y, <number>m, <number>d, <number>s.')
return seconds
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/utils/retention_util.py
| 0.835819 | 0.239072 |
retention_util.py
|
pypi
|
"""Additional help about contributing code to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
We're open to incorporating gsutil code changes authored by users. Here
are some guidelines:
1. Before we can accept code submissions, we have to jump a couple of legal
hurdles. Please fill out either the individual or corporate Contributor
License Agreement:
- If you are an individual writing original source code and you're
sure you own the intellectual property,
then you'll need to sign an individual CLA
(https://cla.developers.google.com/about/google-individual).
- If you work for a company that wants to allow you to contribute your
work to gsutil, then you'll need to sign a corporate CLA
(https://cla.developers.google.com/about/google-corporate)
Follow either of the two links above to access the appropriate CLA and
instructions for how to sign and return it. Once we receive it, we'll
add you to the official list of contributors and be able to accept
your patches.
2. If you found a bug or have an idea for a feature enhancement, we suggest
you check https://github.com/GoogleCloudPlatform/gsutil/issues to see if it
has already been reported by another user. From there you can also
subscribe to updates to the issue.
3. If a GitHub issue doesn't already exist, create one about your idea before
sending actual code. Often we can discuss the idea and help propose things
that could save you later revision work.
4. We tend to avoid adding command line options that are of use to only
a very small fraction of users, especially if there's some other way
to accommodate such needs. Adding such options complicates the code and
also adds overhead to users having to read through an "alphabet soup"
list of option documentation.
5. While gsutil has a number of features specific to Google Cloud Storage,
it can also be used with other cloud storage providers. We're open to
including changes for making gsutil support features specific to other
providers, as long as those changes don't make gsutil work worse for Google
Cloud Storage. If you do make such changes we recommend including someone
with knowledge of the specific provider as a code reviewer (see below).
6. You can check out the gsutil code from the GitHub repository:
https://github.com/GoogleCloudPlatform/gsutil
To clone a read-only copy of the repository:
git clone git://github.com/GoogleCloudPlatform/gsutil.git
To push your own changes to GitHub, click the Fork button on the
repository page and clone the repository from your own fork.
7. The gsutil git repository uses git submodules to pull in external modules.
After checking out the repository, make sure to also pull the submodules
by entering into the gsutil top-level directory and run:
git submodule update --init --recursive
8. Please make sure to run all tests against your modified code. To
do this, change directories into the gsutil top-level directory and run:
./gsutil test
The above tests take a long time to run because they send many requests to
the production service. The gsutil test command has a -u argument that will
only run unit tests. These run quickly, as they are executed with an
in-memory mock storage service implementation. To run only the unit tests,
run:
./gsutil test -u
If you made changes to boto, please run the boto tests. For these tests you
need to use HMAC credentials (from gsutil config -a), because the current
boto test suite doesn't import the OAuth2 handler. You'll also need to
install some python modules. Change directories into the boto root
directory at third_party/boto and run:
pip install -r requirements.txt
(You probably need to run this command using sudo.)
Make sure each of the individual installations succeeded. If they don't
you may need to run the install command again.
Then ensure your .boto file has HMAC credentials defined (the boto tests
don't load the OAUTH2 plugin), and then change directories into boto's
tests directory and run:
python test.py unit
python test.py -t s3 -t gs -t ssl
9. Please consider contributing test code for your change, especially if the
change impacts any of the core gsutil code (like the gsutil cp command).
10. Please run the yapf linter with the config files in the root of the GitHub
repository.
yapf -irp .
11. When it's time to send us code, please submit a PR to the `gsutil GitHub
repository <https://github.com/GoogleCloudPlatform/gsutil>`_. For help on
making GitHub PRs, please refer to this
`GitHub help document <https://help.github.com/en/articles/about-pull-requests>`_.
""")
class CommandOptions(HelpProvider):
"""Additional help about contributing code to gsutil."""
# TODO: gsutil-beta: Add lint .rc file and linting instructions.
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='dev',
help_name_aliases=[
'development',
'developer',
'code',
'mods',
'software',
],
help_type='additional_help',
help_one_line_summary='Contributing Code to gsutil',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/dev.py
| 0.700792 | 0.388328 |
dev.py
|
pypi
|
"""Additional help about types of credentials and authentication."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
This help section provides details about various precautions taken by gsutil
to protect data security, as well as recommendations for how customers should
safeguard security.
<B>TRANSPORT LAYER SECURITY</B>
gsutil performs all operations using transport-layer encryption (HTTPS), to
protect against data leakage over shared network links. This is also important
because gsutil uses "bearer tokens" for authentication (OAuth2) as well as for
resumable upload identifiers, and such tokens must be protected from being
eavesdropped and reused.
gsutil also supports the older HMAC style of authentication via the XML API
(see `gsutil endpoints
<https://cloud.google.com/storage/docs/request-endpoints#gsutil>`_). While
HMAC authentication does not use bearer tokens (and thus is not subject to
eavesdropping/replay attacks), it's still important to encrypt data traffic.
Prior to gsutil release 4.0 it was possible to use HTTP instead of HTTPS by
setting the "is_secure" configuration parameter in the [Boto] section of the
boto configuration file to False. However, starting with gsutil version 4.0
setting is_secure to False is disallowed. For more details about different
credential options, see "gsutil help creds".
To add an extra layer of security, gsutil supports mutual TLS (mTLS) for
the Cloud Storage JSON API. With mTLS, the client verifies the server
certificate, and the server also verifies the client.
To find out more about how to enable mTLS, see the `install docs
<https://cloud.google.com/storage/docs/gsutil_install>`_.
<B>LOCAL FILE STORAGE SECURITY</B>
gsutil takes a number of precautions to protect against security exploits in
the files it stores locally:
- When the gsutil config (or gcloud init for Cloud SDK installs) command runs
it sets file protection mode 600 ("-rw-------") on the .boto
configuration file it generates, so only the user (or superuser) can read
it. This is important because these files contain security-sensitive
information, including credentials and proxy configuration.
- The gsutil config (or gcloud init for Cloud SDK installs) command also uses
file protection mode 600 for the private key file stored locally when you
create service account credentials.
- The default level of logging output from gsutil commands does not include
security-sensitive information, such as OAuth2 tokens and proxy
configuration information. (See the "RECOMMENDED USER PRECAUTIONS" section
below if you increase the level of debug output, using the gsutil -D
option.)
Note that protection modes are not supported on Windows, so if you
use gsutil on Windows we recommend using an encrypted file system and strong
account passwords.
<B>SECURITY-SENSITIVE FILES WRITTEN TEMPORARILY TO DISK BY GSUTIL</B>
gsutil buffers data in temporary files in several situations:
- While compressing data being uploaded via gsutil cp -z/-Z, gsutil
buffers the data in temporary files with protection 600, which it
deletes after the upload is complete (similarly for downloading files
that were uploaded with gsutil cp -z/-Z or some other process that sets the
Content-Encoding to "gzip"). However, if you kill the gsutil process
while the upload is under way the partially written file will be left
in place. See the "CHANGING TEMP DIRECTORIES" section in
"gsutil help cp" for details of where the temporary files are written
and how to change the temp directory location.
- When performing a resumable upload gsutil stores the upload ID (which,
as noted above, is a bearer token and thus should be safe-guarded) in a
file under ~/.gsutil/tracker-files with protection 600, and deletes this
file after the upload is complete. However, if the upload doesn't
complete successfully the tracker file is left in place so the resumable
upload can be re-attempted later. Over time it's possible to accumulate
these tracker files from aborted upload attempts, though resumable
upload IDs are only valid for 1 week, so the security risk only exists
for files less than that age. If you consider the risk of leaving
aborted upload IDs in the tracker directory too high you could modify
your upload scripts to delete the tracker files; or you could create a
cron job to clear the tracker directory periodically.
- The gsutil rsync command stores temporary files (with protection 600)
containing the names, sizes, and checksums of source and destination
directories/buckets, which it deletes after the rsync is complete.
However, if you kill the gsutil process while the rsync is under way the
listing files will be left in place.
Note that gsutil deletes temporary files using the standard OS unlink system
call, which does not perform `data wiping
<https://en.wikipedia.org/wiki/Data_erasure>`_. Thus, the content of such
temporary files can be recovered by a determined adversary.
<B>ACCESS CONTROL LISTS</B>
Unless you specify a different ACL (e.g., via the gsutil cp -a option), by
default objects written to a bucket use the default object ACL on that bucket.
Unless you modify that ACL (e.g., via the gsutil defacl command), by default
it will allow all project editors write access to the object and read/write
access to the object's metadata and will allow all project viewers read
access to the object.
The Google Cloud Storage access control system includes the ability to
specify that objects are publicly readable. Make sure you intend for any
objects you write with this permission to be public. Once "published", data
on the Internet can be copied to many places, so it's effectively impossible
to regain read control over an object written with this permission.
The Google Cloud Storage access control system includes the ability to
specify that buckets are publicly writable. While configuring a bucket this
way can be convenient for various purposes, we recommend against using this
permission - it can be abused for distributing illegal content, viruses, and
other malware, and the bucket owner is legally and financially responsible
for the content stored in their buckets. If you need to make content
available to customers who don't have Google accounts consider instead using
signed URLs (see "gsutil help signurl").
<B>SOFTWARE INTEGRITY AND UPDATES</B>
gsutil is distributed as a standalone bundle via tar and zip files stored in
the gs://pub bucket, as a PyPi module, and as part of the bundled Cloud
SDK release. Each of these distribution methods takes a variety of security
precautions to protect the integrity of the software. We strongly recommend
against getting a copy of gsutil from any other sources (such as mirror
sites).
<B>PROXY USAGE</B>
gsutil supports access via proxies, such as Squid and a number of commercial
products. A full description of their capabilities is beyond the scope of this
documentation, but proxies can be configured to support many security-related
functions, including virus scanning, Data Leakage Prevention, control over
which certificates/CA's are trusted, content type filtering, and many more
capabilities. Some of these features can slow or block legitimate gsutil
behavior. For example, virus scanning depends on decrypting file content,
which in turn requires that the proxy terminate the gsutil connection and
establish a new connection - and in some cases proxies will rewrite content in
ways that result in checksum validation errors and other problems.
For details on configuring proxies see the proxy help text in your .boto
configuration file (generated by the gsutil config or gcloud init command).
<B>ENCRYPTION AT REST</B>
All Google Cloud Storage data are automatically stored in an encrypted state,
but you can also provide your own encryption keys. For more information, see
`Cloud Storage Encryption
<https://cloud.google.com/storage/docs/encryption>`_.
<B>DATA PRIVACY</B>
Google will never ask you to share your credentials, password, or other
security-sensitive information. Beware of potential phishing scams where
someone attempts to impersonate Google and asks for such information.
<B>MEASUREMENT DATA</B>
The gsutil perfdiag command collects a variety of performance-related
measurements and details about your local system and network environment, for
use in troubleshooting performance problems. None of this information will be
sent to Google unless you choose to send it.
<B>RECOMMENDED USER PRECAUTIONS</B>
The first and foremost precaution is: Never share your credentials. Each user
should have distinct credentials.
If you run gsutil -D (to generate debugging output) it will include OAuth2
refresh and access tokens in the output. Make sure to redact this information
before sending this debug output to anyone during troubleshooting/tech support
interactions.
If you run gsutil --trace-token (to send a trace directly to Google),
sensitive information like OAuth2 tokens and the contents of any files
accessed during the trace may be included in the content of the trace.
Customer-supplied encryption key information in the .boto configuration is
security sensitive.
The proxy configuration information in the .boto configuration is
security-sensitive, especially if your proxy setup requires user and
password information. Even if your proxy setup doesn't require user and
password, the host and port number for your proxy is often considered
security-sensitive. Protect access to your .boto configuration file.
If you are using gsutil from a production environment (e.g., via a cron job
running on a host in your data center), use service account credentials rather
than individual user account credentials. These credentials were designed for
such use and, for example, protect you from losing access when an employee
leaves your company.
""")
class CommandOptions(HelpProvider):
"""Additional help about security and privacy considerations using gsutil."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='security',
help_name_aliases=['protection', 'privacy', 'proxies', 'proxy'],
help_type='additional_help',
help_one_line_summary='Security and Privacy Considerations',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/security.py
| 0.872225 | 0.552962 |
security.py
|
pypi
|
"""Additional help about gsutil command-level options."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>DESCRIPTION</B>
gsutil supports separate options for the top-level gsutil command and
the individual sub-commands (like cp, rm, etc.) The top-level options
control behavior of gsutil that apply across commands. For example, in
the command:
gsutil -m cp -p file gs://bucket/obj
the -m option applies to gsutil, while the -p option applies to the cp
sub-command.
<B>OPTIONS</B>
-D Shows HTTP requests/headers and additional debug info needed
when posting support requests, including exception stack traces.
CAUTION: The output from using this flag includes authentication
credentials. Before including this flag in your command, be sure
you understand how the command's output is used, and, if
necessary, remove or redact sensitive information.
-DD Same as -D, plus HTTP upstream payload.
-h Allows you to specify certain HTTP headers, for example:
gsutil -h "Cache-Control:public,max-age=3600" \\
-h "Content-Type:text/html" cp ...
Note that you need to quote the headers/values that
contain spaces (such as "Content-Disposition: attachment;
filename=filename.ext"), to avoid having the shell split them
into separate arguments.
The following headers are stored as object metadata and used
in future requests on the object:
Cache-Control
Content-Disposition
Content-Encoding
Content-Language
Content-Type
The following headers are used to check data integrity:
Content-MD5
gsutil also supports custom metadata headers with a matching
Cloud Storage Provider prefix, such as:
x-goog-meta-
Note that for gs:// URLs, the Cache Control header is specific to
the API being used. The XML API accepts any cache control headers
and returns them during object downloads. The JSON API respects
only the public, private, no-cache, max-age, and no-transform
cache control headers.
See "gsutil help setmeta" for the ability to set metadata
fields on objects after they have been uploaded.
-i Allows you to use the configured credentials to impersonate a
service account, for example:
gsutil -i "[email protected]" ls gs://pub
Note that this setting will be ignored by the XML API and S3. See
'gsutil help creds' for more information on impersonating service
accounts.
-m Causes supported operations (acl ch, acl set, cp, mv, rm, rsync,
and setmeta) to run in parallel. This can significantly improve
performance if you are performing operations on a large number of
files over a reasonably fast network connection.
gsutil performs the specified operation using a combination of
multi-threading and multi-processing. The number of threads
and processors are determined by ``parallel_thread_count`` and
``parallel_process_count``, respectively. These values are set in
the .boto configuration file or specified in individual requests
with the ``-o`` top-level flag. Because gsutil has no built-in
support for throttling requests, you should experiment with these
values. The optimal values can vary based on a number of factors,
including network speed, number of CPUs, and available memory.
Using the -m option can consume a significant amount of network
bandwidth and cause problems or make your performance worse if
you use a slower network. For example, if you start a large rsync
operation over a network link that's also used by a number of
other important jobs, there could be degraded performance in
those jobs. Similarly, the -m option can make your performance
worse, especially for cases that perform all operations locally,
because it can "thrash" your local disk.
To prevent such issues, reduce the values for
``parallel_thread_count`` and ``parallel_process_count``, or stop
using the -m option entirely. One tool that you can use to limit
how much I/O capacity gsutil consumes and prevent it from
monopolizing your local disk is `ionice
<http://www.tutorialspoint.com/unix_commands/ionice.htm>`_
(built in to many Linux systems). For example, the following
command reduces the I/O priority of gsutil so it doesn't
monopolize your local disk:
ionice -c 2 -n 7 gsutil -m rsync -r ./dir gs://some bucket
If a download or upload operation using parallel transfer fails
before the entire transfer is complete (e.g. failing after 300 of
1000 files have been transferred), you must restart the entire
transfer.
Also, although most commands normally fail upon encountering an
error when the -m flag is disabled, all commands continue to try
all operations when -m is enabled with multiple threads or
processes, and the number of failed operations (if any) are
reported as an exception at the end of the command's execution.
-o Set/override values in the boto configuration value, in the format
<section>:<name>=<value>, e.g. gsutil -o "Boto:proxy=host" ...
This will not pass the option to gsutil integration tests, which
run in a separate process.
-q Causes gsutil to perform operations quietly, i.e., without
reporting progress indicators of files being copied or removed,
etc. Errors are still reported. This option can be useful for
running gsutil from a cron job that logs its output to a file, for
which the only information desired in the log is failures.
-u Allows you to specify the ID or number of a user project to be
billed for the request. For example:
gsutil -u "bill-this-project" cp ...
""")
class CommandOptions(HelpProvider):
"""Additional help about gsutil command-level options."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='options',
help_name_aliases=['arg', 'args', 'cli', 'opt', 'opts'],
help_type='additional_help',
help_one_line_summary='Top-Level Command-Line Options',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/command_opts.py
| 0.86891 | 0.290776 |
command_opts.py
|
pypi
|
"""Additional help about using gsutil for production tasks."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
from gslib.utils.constants import RESUMABLE_THRESHOLD_MIB
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
If you use gsutil in large production tasks (such as uploading or
downloading many GiBs of data each night), there are a number of things
you can do to help ensure success. Specifically, this section discusses
how to script large production tasks around gsutil's resumable transfer
mechanism.
<B>BACKGROUND ON RESUMABLE TRANSFERS</B>
First, it's helpful to understand gsutil's resumable transfer mechanism,
and how your script needs to be implemented around this mechanism to work
reliably. gsutil uses resumable transfer support when you attempt to download
a file of any size or to upload a file larger than a configurable threshold
(by default, this threshold is %d MiB). If a transfer fails partway through
(e.g., because of an intermittent network problem), gsutil uses a
`truncated randomized binary exponential backoff-and-retry strategy
<https://cloud.google.com/storage/docs/retry-strategy#tools>`_ that by
default retries transfers up to 23 times over a 10 minute period of time. If
the transfer fails each of these attempts with no intervening progress,
gsutil gives up on the transfer, but keeps a "tracker" file for it in a
configurable location (the default location is ~/.gsutil/, in a file named
by a combination of the SHA1 hash of the name of the bucket and object being
transferred and the last 16 characters of the file name). When transfers
fail in this fashion, you can rerun gsutil at some later time (e.g., after
the networking problem has been resolved), and the resumable transfer picks
up where it left off.
<B>SCRIPTING DATA TRANSFER TASKS</B>
To script large production data transfer tasks around this mechanism,
you can implement a script that runs periodically, determines which file
transfers have not yet succeeded, and runs gsutil to copy them. Below,
we offer a number of suggestions about how this type of scripting should
be implemented:
1. When resumable transfers fail without any progress 23 times in a row
over the course of up to 10 minutes, it probably won't work to simply
retry the transfer immediately. A more successful strategy would be to
have a cron job that runs every 30 minutes, determines which transfers
need to be run, and runs them. If the network experiences intermittent
problems, the script picks up where it left off and will eventually
succeed (once the network problem has been resolved).
2. If your business depends on timely data transfer, you should consider
implementing some network monitoring. For example, you can implement
a task that attempts a small download every few minutes and raises an
alert if the attempt fails for several attempts in a row (or more or less
frequently depending on your requirements), so that your IT staff can
investigate problems promptly. As usual with monitoring implementations,
you should experiment with the alerting thresholds, to avoid false
positive alerts that cause your staff to begin ignoring the alerts.
3. There are a variety of ways you can determine what files remain to be
transferred. We recommend that you avoid attempting to get a complete
listing of a bucket containing many objects (e.g., tens of thousands
or more). One strategy is to structure your object names in a way that
represents your transfer process, and use gsutil prefix wildcards to
request partial bucket listings. For example, if your periodic process
involves downloading the current day's objects, you could name objects
using a year-month-day-object-ID format and then find today's objects by
using a command like gsutil ls "gs://bucket/2011-09-27-*". Note that it
is more efficient to have a non-wildcard prefix like this than to use
something like gsutil ls "gs://bucket/*-2011-09-27". The latter command
actually requests a complete bucket listing and then filters in gsutil,
while the former asks Google Storage to return the subset of objects
whose names start with everything up to the "*".
For data uploads, another technique would be to move local files from a "to
be processed" area to a "done" area as your script successfully copies
files to the cloud. You can do this in parallel batches by using a command
like:
gsutil -m cp -r to_upload/subdir_$i gs://bucket/subdir_$i
where i is a shell loop variable. Make sure to check the shell $status
variable is 0 after each gsutil cp command, to detect if some of the copies
failed, and rerun the affected copies.
With this strategy, the file system keeps track of all remaining work to
be done.
4. If you have really large numbers of objects in a single bucket
(say hundreds of thousands or more), you should consider tracking your
objects in a database instead of using bucket listings to enumerate
the objects. For example this database could track the state of your
downloads, so you can determine what objects need to be downloaded by
your periodic download script by querying the database locally instead
of performing a bucket listing.
5. Make sure you don't delete partially downloaded temporary files after a
transfer fails: gsutil picks up where it left off (and performs a hash
of the final downloaded content to ensure data integrity), so deleting
partially transferred files will cause you to lose progress and make
more wasteful use of your network.
6. If you have a fast network connection, you can speed up the transfer of
large numbers of files by using the gsutil -m (multi-threading /
multi-processing) option. Be aware, however, that gsutil doesn't attempt to
keep track of which files were downloaded successfully in cases where some
files failed to download. For example, if you use multi-threaded transfers
to download 100 files and 3 failed to download, it is up to your scripting
process to determine which transfers didn't succeed, and retry them. A
periodic check-and-run approach like outlined earlier would handle this
case.
If you use parallel transfers (gsutil -m) you might want to experiment with
the number of threads being used (via the parallel_thread_count setting
in the .boto config file). By default, gsutil uses 10 threads for Linux
and 24 threads for other operating systems. Depending on your network
speed, available memory, CPU load, and other conditions, this may or may
not be optimal. Try experimenting with higher or lower numbers of threads
to find the best number of threads for your environment.
""" % RESUMABLE_THRESHOLD_MIB)
class CommandOptions(HelpProvider):
"""Additional help about using gsutil for production tasks."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='prod',
help_name_aliases=[
'production',
'resumable',
'resumable upload',
'resumable transfer',
'resumable download',
'scripts',
'scripting',
],
help_type='additional_help',
help_one_line_summary='Scripting Production Transfers',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/prod.py
| 0.881175 | 0.377799 |
prod.py
|
pypi
|
"""Additional help about object versioning."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
Versioning-enabled buckets maintain noncurrent versions of objects, providing
a way to un-delete data that you accidentally deleted, or to retrieve older
versions of your data. Noncurrent objects are ignored by gsutil commands
unless you indicate it should do otherwise by setting a relevant command flag
or by including a specific generation number in your command. For example,
wildcards like ``*`` and ``**`` do not, by themselves, act on noncurrent
object versions.
When using gsutil cp, you cannot specify a version-specific URL as the
destination, because writes to Cloud Storage always create a new version.
Trying to specify a version-specific URL as the destination of ``gsutil cp``
results in an error. When you specify a noncurrent object as a source in a
copy command, you always create a new object version and retain the original
(even when using the command to restore a live version). You can use the
``gsutil mv`` command to simultaneously restore an object version and remove
the noncurrent copy that was used as the source.
You can turn versioning on or off for a bucket at any time. Turning
versioning off leaves existing object versions in place and simply causes
the bucket to delete the existing live version of the object whenever a new
version is uploaded.
Regardless of whether you have enabled versioning on a bucket, every object
has two associated positive integer fields:
- the generation, which is updated when a new object replaces an existing
object with the same name. Note that there is no guarantee that generation
numbers increase for successive versions, only that each new version has a
unique generation number.
- the metageneration, which identifies the metadata generation. It starts
at 1; is updated every time the metadata (e.g., ACL or Content-Type) for a
given content generation is updated; and gets reset when the generation
number changes.
Of these two integers, only the generation is used when working with versioned
data. Both generation and metageneration can be used with concurrency control.
To learn more about versioning and concurrency, see the following documentation:
- `Overview of Object Versioning
<https://cloud.google.com/storage/docs/object-versioning>`_
- `Guide for using Object Versioning
<https://cloud.google.com/storage/docs/using-object-versioning>`_
- The `reference page for the gsutil versioning command
<https://cloud.google.com/storage/docs/gsutil/commands/versioning>`_
- `Overview of generation numbers and preconditions
<https://cloud.google.com/storage/docs/generations-preconditions>`_
""")
class CommandOptions(HelpProvider):
"""Additional help about object versioning."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='versions',
help_name_aliases=['concurrency', 'concurrency control'],
help_type='additional_help',
help_one_line_summary='Object Versioning and Concurrency Control',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/versions.py
| 0.902559 | 0.255466 |
versions.py
|
pypi
|
"""Additional help about gsutil object and bucket naming."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>BUCKET NAME REQUIREMENTS</B>
Google Cloud Storage has a single namespace, so you will not be allowed
to create a bucket with a name already in use by another user. You can,
however, carve out parts of the bucket name space corresponding to your
company's domain name (see "DOMAIN NAMED BUCKETS").
Bucket names must conform to standard DNS naming conventions. This is
because a bucket name can appear in a DNS record as part of a CNAME
redirect. In addition to meeting DNS naming requirements, Google Cloud
Storage imposes other requirements on bucket naming. At a minimum, your
bucket names must meet the following requirements:
- Bucket names must contain only lowercase letters, numbers, dashes (-), and
dots (.).
- Bucket names must start and end with a number or letter.
- Bucket names must contain 3 to 63 characters. Names containing dots can
contain up to 222 characters, but each dot-separated component can be
no longer than 63 characters.
- Bucket names cannot be represented as an IPv4 address in dotted-decimal
notation (for example, 192.168.5.4).
- Bucket names cannot begin with the "goog" prefix.
- For DNS compliance, you should not have a period adjacent to another
period or dash. For example, ".." or "-." or ".-" are not acceptable.
<B>OBJECT NAME REQUIREMENTS</B>
Object names can contain any sequence of Unicode characters, of length 1-1024
bytes when UTF-8 encoded. Object names must not contain CarriageReturn,
CarriageReturnLineFeed, or the XML-disallowed surrogate blocks (xFFFE
or xFFFF).
We strongly recommend that you abide by the following object naming
conventions:
- Avoid using control characters that are illegal in XML 1.0 in your object
names (#x7F-#x84 and #x86-#x9F). These characters will cause XML listing
issues when you try to list your objects.
- Avoid using "#" in your object names. gsutil interprets object names ending
with #<numeric string> as version identifiers, so including "#" in object
names can make it difficult or impossible to perform various operations on
such objects using gsutil (see 'gsutil help versions').
- Avoid using "[", "]", "*", or "?" in your object names. gsutil interprets
these characters as wildcards, so including any of these characters in
object names can make it difficult or impossible to perform various wildcard
operations using gsutil (see 'gsutil help wildcards').
See also 'gsutil help encoding' about file/object name encoding requirements
and potential interoperability concerns.
<B>DOMAIN NAMED BUCKETS</B>
You can carve out parts of the Google Cloud Storage bucket name space
by creating buckets with domain names (like "example.com").
Before you can create a bucket name containing one or more '.' characters,
the following rules apply:
- If the name is a syntactically valid DNS name ending with a
currently-recognized top-level domain (such as .com), you will be required
to verify domain ownership.
- Otherwise you will be disallowed from creating the bucket.
If your project needs to use a domain-named bucket, you need to have
a team member both verify the domain and create the bucket. This is
because Google Cloud Storage checks for domain ownership against the
user who creates the bucket, so the user who creates the bucket must
also be verified as an owner or manager of the domain.
To verify as the owner or manager of a domain, use the Google Webmaster
Tools verification process. The Webmaster Tools verification process
provides three methods for verifying an owner or manager of a domain:
1. Adding a special Meta tag to a site's homepage.
2. Uploading a special HTML file to a site.
3. Adding a DNS TXT record to a domain's DNS configuration.
Meta tag verification and HTML file verification are easier to perform and
are probably adequate for most situations. DNS TXT record verification is
a domain-based verification method that is useful in situations where a
site wants to tightly control who can create domain-named buckets. Once
a site creates a DNS TXT record to verify ownership of a domain, it takes
precedence over meta tag and HTML file verification. For example, you might
have two IT staff members who are responsible for managing your site, called
"example.com." If they complete the DNS TXT record verification, only they
would be able to create buckets called "example.com", "reports.example.com",
"downloads.example.com", and other domain-named buckets.
Site-Based Verification
-----------------------
If you have administrative control over the HTML files that make up a site,
you can use one of the site-based verification methods to verify that you
control or own a site. When you do this, Google Cloud Storage lets you
create buckets representing the verified site and any sub-sites - provided
nobody has used the DNS TXT record method to verify domain ownership of a
parent of the site.
As an example, assume that nobody has used the DNS TXT record method to verify
ownership of the following domains: abc.def.example.com, def.example.com,
and example.com. In this case, Google Cloud Storage lets you create a bucket
named abc.def.example.com if you verify that you own or control any of the
following sites:
http://abc.def.example.com
http://def.example.com
http://example.com
Domain-Based Verification
-------------------------
If you have administrative control over a domain's DNS configuration, you can
use the DNS TXT record verification method to verify that you own or control a
domain. When you use the domain-based verification method to verify that you
own or control a domain, Google Cloud Storage lets you create buckets that
represent any subdomain under the verified domain. Furthermore, Google Cloud
Storage prevents anybody else from creating buckets under that domain unless
you add their name to the list of verified domain owners or they have verified
their domain ownership by using the DNS TXT record verification method.
For example, if you use the DNS TXT record verification method to verify your
ownership of the domain example.com, Google Cloud Storage will let you create
bucket names that represent any subdomain under the example.com domain, such
as abc.def.example.com, example.com/music/jazz, or abc.example.com/music/jazz.
Using the DNS TXT record method to verify domain ownership supersedes
verification by site-based verification methods. For example, if you
use the Meta tag method or HTML file method to verify domain ownership
of http://example.com, but someone else uses the DNS TXT record method
to verify ownership of the example.com domain, Google Cloud Storage will
not allow you to create a bucket named example.com. To create the bucket
example.com, the domain owner who used the DNS TXT method to verify domain
ownership must add you to the list of verified domain owners for example.com.
The DNS TXT record verification method is particularly useful if you manage
a domain for a large organization that has numerous subdomains because it
lets you control who can create buckets representing those domain names.
Note: If you use the DNS TXT record verification method to verify ownership of
a domain, you cannot create a CNAME record for that domain. RFC 1034 disallows
inclusion of any other resource records if there is a CNAME resource record
present. If you want to create a CNAME resource record for a domain, you must
use the Meta tag verification method or the HTML file verification method.
""")
class CommandOptions(HelpProvider):
"""Additional help about gsutil object and bucket naming."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='naming',
help_name_aliases=['domain', 'limits', 'name', 'names'],
help_type='additional_help',
help_one_line_summary='Object and Bucket Naming',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/naming.py
| 0.921869 | 0.412589 |
naming.py
|
pypi
|
"""Additional help about CRC32C and installing crcmod."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
To reduce the chance for `filename encoding interoperability problems
<https://en.wikipedia.org/wiki/Filename#Encoding_indication_interoperability>`_
gsutil uses `UTF-8 <https://en.wikipedia.org/wiki/UTF-8>`_ character encoding
when uploading and downloading files. Because UTF-8 is in widespread (and
growing) use, for most users nothing needs to be done to use UTF-8. Users with
files stored in other encodings (such as
`Latin 1 <https://en.wikipedia.org/wiki/ISO/IEC_8859-1>`_) must convert those
filenames to UTF-8 before attempting to upload the files.
The most common place where users who have filenames that use some other
encoding encounter a gsutil error is while uploading files using the recursive
(-R) option on the gsutil cp , mv, or rsync commands. When this happens you'll
get an error like this:
CommandException: Invalid Unicode path encountered
('dir1/dir2/file_name_with_\\xf6n_bad_chars').
gsutil cannot proceed with such files present.
Please remove or rename this file and try again.
Note that the invalid Unicode characters have been hex-encoded in this error
message because otherwise trying to print them would result in another
error.
If you encounter such an error you can either remove the problematic file(s)
or try to rename them and re-run the command. If you have a modest number of
such files the simplest thing to do is to think of a different name for the
file and manually rename the file (using local filesystem tools). If you have
too many files for that to be practical, you can use a bulk rename tool or
script.
Unicode errors for valid Unicode filepaths can be caused by lack of Python
locale configuration on Linux and Mac OSes. If your file paths are Unicode
and you get encoding errors, ensure the LANG environment variable is set
correctly. Typically, the LANG variable should be set to something like
"en_US.UTF-8" or "de_DE.UTF-8".
Note also that there's no restriction on the character encoding used in file
content - it can be UTF-8, a different encoding, or non-character
data (like audio or video content). The gsutil UTF-8 character encoding
requirement applies only to filenames.
<B>USING UNICODE FILENAMES ON WINDOWS</B>
Windows support for Unicode in the command shell (cmd.exe or powershell) is
somewhat painful, because Windows uses a Windows-specific character encoding
called `cp1252 <https://en.wikipedia.org/wiki/Windows-1252>`_. To use Unicode
characters you need to run this command in the command shell before the first
time you use gsutil in that shell:
chcp 65001
If you neglect to do this before using gsutil, the progress messages while
uploading files with Unicode names or listing buckets with Unicode object
names will look garbled (i.e., with different glyphs than you expect in the
output). If you simply run the chcp command and re-run the gsutil command, the
output should no longer look garbled.
gsutil attempts to translate between cp1252 encoding and UTF-8 in the main
places that Unicode encoding/decoding problems have been encountered to date
(traversing the local file system while uploading files, and printing Unicode
names while listing buckets). However, because gsutil must perform
translation, it is likely there are other erroneous edge cases when using
Windows with Unicode. If you encounter problems, you might consider instead
using cygwin (on Windows) or Linux or macOS - all of which support Unicode.
<B>USING UNICODE FILENAMES ON MACOS</B>
macOS stores filenames in decomposed form (also known as
`NFD normalization <https://en.wikipedia.org/wiki/Unicode_equivalence>`_).
For example, if a filename contains an accented "e" character, that character
will be converted to an "e" followed by an accent before being saved to the
filesystem. As a consequence, it's possible to have different name strings
for files uploaded from an operating system that doesn't enforce decomposed
form (like Ubuntu) from one that does (like macOS).
The following example shows how this behavior could lead to unexpected
results. Say you create a file with non-ASCII characters on Ubuntu. Ubuntu
stores that filename in its composed form. When you upload the file to the
cloud, it is stored as named. But if you use gsutil rysnc to bring the file to
a macOS machine and edit the file, then when you use gsutil rsync to bring
this version back to the cloud, you end up with two different objects, instead
of replacing the original. This is because macOS converted the filename to
a decomposed form, and Cloud Storage sees this as a different object name.
<B>CROSS-PLATFORM ENCODING PROBLEMS OF WHICH TO BE AWARE</B>
Using UTF-8 for all object names and filenames will ensure that gsutil doesn't
encounter character encoding errors while operating on the files.
Unfortunately, it's still possible that files uploaded / downloaded this way
can have interoperability problems, for a number of reasons unrelated to
gsutil. For example:
- Windows filenames are case-insensitive, while Google Cloud Storage, Linux,
and macOS are not. Thus, for example, if you have two filenames on Linux
differing only in case and upload both to Google Cloud Storage and then
subsequently download them to Windows, you will end up with just one file
whose contents came from the last of these files to be written to the
filesystem.
- macOS performs character encoding decomposition based on tables stored in
the OS, and the tables change between Unicode versions. Thus the encoding
used by an external library may not match that performed by the OS. It is
possible that two object names may translate to a single local filename.
- Windows console support for Unicode is difficult to use correctly.
For a more thorough list of such issues see `this presentation
<http://www.i18nguy.com/unicode/filename-issues-iuc33.pdf>`_
These problems mostly arise when sharing data across platforms (e.g.,
uploading data from a Windows machine to Google Cloud Storage, and then
downloading from Google Cloud Storage to a machine running macOS).
Unfortunately these problems are a consequence of the lack of a filename
encoding standard, and users need to be aware of the kinds of problems that
can arise when copying filenames across platforms.
There is one precaution users can exercise to prevent some of these problems:
When using the Windows console specify wildcards or folders (using the -R
option) rather than explicitly named individual files.
<B>CONVERTING FILENAMES TO UNICODE</B>
Open-source tools are available to convert filenames for non-Unicode files.
For example, to convert from latin1 (a common Windows encoding) to Unicode,
you can use
`Windows iconv <http://gnuwin32.sourceforge.net/packages/libiconv.htm>`_.
For Unix-based systems, you can use
`libiconv <https://www.gnu.org/software/libiconv/>`_.
""")
class CommandOptions(HelpProvider):
"""Additional help about filename encoding and interoperability problems."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='encoding',
help_name_aliases=[
'encodings',
'utf8',
'utf-8',
'latin1',
'unicode',
'interoperability',
],
help_type='additional_help',
help_one_line_summary='Filename encoding and interoperability problems',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/encoding.py
| 0.872062 | 0.389547 |
encoding.py
|
pypi
|
"""Additional help about types of credentials and authentication."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
gsutil currently supports several types of credentials/authentication, as
well as the ability to `access public data anonymously
<https://cloud.google.com/storage/docs/access-public-data>`_. Each of these
type of credentials is discussed in more detail below, along with
information about configuring and using credentials via either the Cloud SDK
or standalone installations of gsutil.
<B>Configuring/Using Credentials via Cloud SDK Distribution of gsutil</B>
When gsutil is installed/used via the Cloud SDK ("gcloud"), credentials are
stored by Cloud SDK in a non-user-editable file located under
~/.config/gcloud (any manipulation of credentials should be done via the
gcloud auth command). If you need to set up multiple credentials (e.g., one
for an individual user account and a second for a service account), the
gcloud auth command manages the credentials for you, and you switch between
credentials using the gcloud auth command as well (for more details see
https://cloud.google.com/sdk/gcloud/reference/auth).
Once credentials have been configured via gcloud auth, those credentials will
be used regardless of whether the user has any boto configuration files (which
are located at ~/.boto unless a different path is specified in the BOTO_CONFIG
environment variable). However, gsutil will still look for credentials in the
boto config file if a type of non-GCS credential is needed that's not stored
in the gcloud credential store (e.g., an HMAC credential for an S3 account).
<B>Configuring/Using Credentials via Standalone gsutil Distribution</B>
If you installed a standalone distribution of gsutil (downloaded from
https://pub.storage.googleapis.com/gsutil.tar.gz,
https://pub.storage.googleapis.com/gsutil.zip, or PyPi), credentials are
configured using the gsutil config command, and are stored in the
user-editable boto config file (located at ~/.boto unless a different path is
specified in the BOTO_CONFIG environment). In this case if you want to set up
multiple credentials (e.g., one for an individual user account and a second
for a service account), you run gsutil config once for each credential, and
save each of the generated boto config files (e.g., renaming one to
~/.boto_user_account and the second to ~/.boto_service_account), and you
switch between the credentials using the BOTO_CONFIG environment variable
(e.g., by running BOTO_CONFIG=~/.boto_user_account gsutil ls).
Note that when using the standalone version of gsutil with the JSON API you
can configure at most one of the following types of Google Cloud Storage
credentials in a single boto config file: OAuth2 User Account, OAuth2 Service
Account. In addition to these, you may also have S3 HMAC credentials
(necessary for using s3:// URLs) and Google Compute Engine Internal Service
Account credentials. Google Compute Engine Internal Service Account
credentials are used only when OAuth2 credentials are not present.
<B>SUPPORTED CREDENTIAL TYPES</B>
gsutil supports several types of credentials (the specific subset depends on
which distribution of gsutil you are using; see above discussion).
OAuth2 User Account:
This is the preferred type of credentials for authenticating requests on
behalf of a specific user (which is probably the most common use of gsutil).
This is the default type of credential that will be created when you run
"gsutil config" (or "gcloud init" for Cloud SDK installs).
For more details about OAuth2 authentication, see:
https://developers.google.com/accounts/docs/OAuth2#scenarios
HMAC:
This type of credential can be used by programs that are implemented using
HMAC authentication, which is an authentication mechanism supported by
certain other cloud storage service providers. This type of credential can
also be used for interactive use when moving data to/from service providers
that support HMAC credentials. This is the type of credential that will be
created when you run "gsutil config -a".
Note that it's possible to set up HMAC credentials for both Google Cloud
Storage and another service provider; or to set up OAuth2 user account
credentials for Google Cloud Storage and HMAC credentials for another
service provider. To do so, after you run the "gsutil config" command (or
"gcloud init" for Cloud SDK installs), you can edit the generated ~/.boto
config file and look for comments for where other credentials can be added.
For more details about HMAC authentication, see:
https://developers.google.com/storage/docs/reference/v1/getting-startedv1#keys
OAuth2 Service Account:
This is the preferred type of credential to use when authenticating on
behalf of a service or application (as opposed to a user). For example, if
you will run gsutil out of a nightly cron job to upload/download data,
using a service account allows the cron job not to depend on credentials of
an individual employee at your company. This is the type of credential that
will be configured when you run "gsutil config -e". To configure service
account credentials when installed via the Cloud SDK, run "gcloud auth
activate-service-account".
It is important to note that a service account is considered an Editor by
default for the purposes of API access, rather than an Owner. In particular,
the fact that Editors have OWNER access in the default object and
bucket ACLs, but the canned ACL options remove OWNER access from
Editors, can lead to unexpected results. The solution to this problem is to
use "gsutil acl ch" instead of "gsutil acl set <canned-ACL>" to change
permissions on a bucket.
To set up a service account for use with "gsutil config -e" or "gcloud auth
activate-service-account", see:
https://cloud.google.com/storage/docs/authentication#generating-a-private-key
For more details about OAuth2 service accounts, see:
https://developers.google.com/accounts/docs/OAuth2ServiceAccount
For further information about account roles, see:
https://developers.google.com/console/help/#DifferentRoles
Google Compute Engine Internal Service Account:
This is the type of service account used for accounts hosted by App Engine
or Google Compute Engine. Such credentials are created automatically for
you on Google Compute Engine when you run the gcloud compute instances
creates command and the credentials can be controlled with the --scopes
flag.
For more details about Google Compute Engine service accounts, see:
https://developers.google.com/compute/docs/authentication;
For more details about App Engine service accounts, see:
https://developers.google.com/appengine/docs/python/appidentity/overview
Service Account Impersonation:
Impersonating a service account is useful in scenarios where you need to
grant short-term access to specific resources. For example, if you have a
bucket of sensitive data that is typically read-only and want to
temporarily grant write access through a trusted service account.
You can specify which service account to use for impersonation by running
"gsutil -i", "gsutil config" and editing the boto configuration file, or
"gcloud config set auth/impersonate_service_account".
In order to impersonate, your original credentials need to be granted
roles/iam.serviceAccountTokenCreator on the target service account.
For more information see:
https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials
""")
class CommandOptions(HelpProvider):
"""Additional help about types of credentials and authentication."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='creds',
help_name_aliases=['credentials', 'authentication', 'auth', 'gcloud'],
help_type='additional_help',
help_one_line_summary='Credential Types Supporting Various Use Cases',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/creds.py
| 0.89973 | 0.495239 |
creds.py
|
pypi
|
"""Additional help about technical and billing support."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>TECHNICAL SUPPORT</B>
If you have any questions or encounter any problems with Google Cloud Storage,
please first read the `FAQ <https://cloud.google.com/storage/docs/faq>`_.
If you still have questions please use one of the following methods as
appropriate, providing the details noted below:
A) For API, tool usage, or other software development-related questions,
please search for and post questions on Stack Overflow, using the official
`google-cloud-storage tag
<http://stackoverflow.com/questions/tagged/google-cloud-storage>`_. Our
support team actively monitors questions to this tag and we'll do our best to
respond.
B) For gsutil bugs or feature requests, please check if there is already a
`existing GitHub issue <https://github.com/GoogleCloudPlatform/gsutil/issues>`_
that covers your request. If not, create a
`new GitHub issue <https://github.com/GoogleCloudPlatform/gsutil/issues/new>`_.
To help us diagnose any issues you encounter, when creating a new issue
please provide these details in addition to the description of your problem:
- The resource you are attempting to access (bucket name, object name),
assuming they are not sensitive.
- The operation you attempted (GET, PUT, etc.)
- The time and date (including timezone) at which you encountered the problem
- If you can use gsutil to reproduce your issue, specify the -D option to
display your request's HTTP details, and provide these details in the
issue.
Warning: The gsutil -d, -D, and -DD options will also print the authentication
header with authentication credentials for your Google Cloud Storage account.
Make sure to remove any "Authorization:" headers before you post HTTP details
to the issue. Note also that if you upload files large enough to use resumable
uploads, the resumable upload IDs are security-sensitive while an upload
is not yet complete, so should not be posted on public forums.
If you make any local modifications to gsutil, please make sure to use
a released copy of gsutil (instead of your locally modified copy) when
providing the gsutil -D output noted above. We cannot support versions
of gsutil that include local modifications. (However, we're open to user
contributions; see "gsutil help dev".)
<B>BILLING AND ACCOUNT QUESTIONS</B>
A) For billing documentation, please visit
https://cloud.google.com/storage/pricing.
If you want to cancel billing, follow the instructions at
`Cloud Storage FAQ <https://cloud.google.com/storage/docs/faq#disablebilling>`_.
Caution: When you disable billing, you also disable the Google Cloud Storage
service. Make sure you want to disable the Google Cloud Storage service
before you disable billing.
B) For support regarding billing, please see
`billing support <https://support.google.com/cloud/contact/cloud_platform_billing>`_.
For other questions regarding your account, Terms Of Service, Google
Cloud Console, or other administration-related questions please see
`Google Cloud Platform support <https://support.google.com/cloud/answer/6282346#gcp>`_.
""")
class CommandOptions(HelpProvider):
"""Additional help about technical and billing support."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='support',
help_name_aliases=[
'techsupport',
'tech support',
'technical support',
'billing',
'faq',
'questions',
],
help_type='additional_help',
help_one_line_summary='Google Cloud Storage Support',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/support.py
| 0.7413 | 0.287254 |
support.py
|
pypi
|
"""Additional help about object metadata."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW OF METADATA</B>
Objects can have associated metadata, which control aspects of how
GET requests are handled, including ``Content-Type``, ``Cache-Control``,
``Content-Disposition``, and ``Content-Encoding``. In addition, you can
set custom ``key:value`` metadata for use by your applications. For a
discussion of specific metadata properties, see the `metadata concept
page <https://cloud.google.com/storage/docs/metadata>`_.
There are two ways to set metadata on objects:
- At upload time you can specify one or more metadata properties to
associate with objects, using the ``gsutil -h option``. For example,
the following command would cause gsutil to set the ``Content-Type`` and
``Cache-Control`` for each of the files being uploaded from a local
directory named ``images``:
gsutil -h "Content-Type:text/html" \\
-h "Cache-Control:public, max-age=3600" cp -r images \\
gs://bucket/images
Note that -h is an option on the gsutil command, not the cp sub-command.
- You can set or remove metadata fields from already uploaded objects using
the ``gsutil setmeta`` command. See "gsutil help setmeta".
<B>SETTABLE FIELDS; FIELD VALUES</B>
You can't set some metadata fields, such as ETag and Content-Length. The
fields you can set are:
- ``Cache-Control``
- ``Content-Disposition``
- ``Content-Encoding``
- ``Content-Language``
- ``Content-Type``
- ``Custom-Time``
- Custom metadata
Field names are case-insensitive.
All fields and their values must consist only of ASCII characters, with the
exception of values for ``x-goog-meta-`` fields, which may contain arbitrary
Unicode values. Note that when setting metadata using the XML API, which sends
custom metadata as HTTP headers, Unicode characters are encoded using
UTF-8, then url-encoded to ASCII. For example:
gsutil setmeta -h "x-goog-meta-foo: ã" gs://bucket/object
stores the custom metadata key-value pair of ``foo`` and ``%C3%A3``.
Subsequently, running ``ls -L`` using the JSON API to list the object's
metadata prints ``%C3%A3``, while ``ls -L`` using the XML API
url-decodes this value automatically, printing the character ``ã``.
<B>VIEWING CURRENTLY SET METADATA</B>
You can see what metadata is currently set on an object by using:
gsutil ls -L gs://the_bucket/the_object
""")
class CommandOptions(HelpProvider):
"""Additional help about object metadata."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='metadata',
help_name_aliases=[
'cache-control',
'caching',
'content type',
'mime type',
'mime',
'type',
],
help_type='additional_help',
help_one_line_summary='Working With Object Metadata',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/metadata.py
| 0.922705 | 0.263985 |
metadata.py
|
pypi
|
"""Additional help about Access Control Lists."""
# TODO(iam-beta): Revise this to include new IAM syntax.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
Access Control Lists (ACLs) allow you to control who can read and write
your data, and who can read and write the ACLs themselves.
If not specified at the time an object is uploaded (e.g., via the gsutil cp
-a option), objects will be created with a default object ACL set on the
bucket (see "gsutil help defacl"). You can replace the ACL on an object
or bucket using the "gsutil acl set" command, or
modify the existing ACL using the "gsutil acl ch" command (see "gsutil help
acl").
<B>BUCKET VS OBJECT ACLS</B>
In Google Cloud Storage, the bucket ACL works as follows:
- Users granted READ access are allowed to list the bucket contents and read
bucket metadata other than its ACL.
- Users granted WRITE access are allowed READ access and also are allowed to
write and delete objects in that bucket, including overwriting previously
written objects.
- Users granted OWNER access are allowed WRITE access and also are allowed to
read and write the bucket's ACL.
The object ACL works as follows:
- Users granted READ access are allowed to read the object's data and
metadata.
- Users granted OWNER access are allowed READ access and also are allowed to
read and write the object's ACL.
A couple of points are worth noting, that sometimes surprise users:
1. There is no WRITE access for objects; attempting to set an ACL with WRITE
permission for an object will result in an error.
2. The bucket ACL plays no role in determining who can read objects; only the
object ACL matters for that purpose. This is different from how things
work in Linux file systems, where both the file and directory permission
control file read access. It also means, for example, that someone with
OWNER over the bucket may not have read access to objects in the bucket.
This is by design, and supports useful cases. For example, you might want
to set up bucket ownership so that a small group of administrators have
OWNER on the bucket (with the ability to delete data to control storage
costs), but not grant those users read access to the object data (which
might be sensitive data that should only be accessed by a different
specific group of users).
<B>ACCESSING PUBLIC OBJECTS</B>
Objects with public READ access can be accessed anonymously by gsutil, via
a browser, or via Cloud Storage APIs. For more details on accessing public
objects, see:
https://cloud.google.com/storage/docs/access-public-data
<B>CANNED ACLS</B>
The simplest way to set an ACL on a bucket or object is using a "canned
ACL". The available canned ACLs are:
project-private
Gives permission to the project team based on their roles. Anyone who is
part of the team has READ permission, and project owners and project editors
have OWNER permission. This is the default ACL for newly created
buckets. This is also the default ACL for newly created objects unless the
default object ACL for that bucket has been changed. For more details see
"gsutil help projects".
private
Gives the requester (and only the requester) OWNER permission for a
bucket or object.
public-read
Gives all users (whether logged in or anonymous) READ permission. When
you apply this to an object, anyone on the Internet can read the object
without authenticating.
NOTE: By default, publicly readable objects are served with a Cache-Control
header allowing such objects to be cached for 3600 seconds. If you need to
ensure that updates become visible immediately, you should set a
Cache-Control header of "Cache-Control:private, max-age=0, no-transform" on
such objects. For help doing this, see 'gsutil help setmeta'.
NOTE: Setting a bucket ACL to public-read will remove all OWNER and WRITE
permissions from everyone except the project owner group. Setting an object
ACL to public-read will remove all OWNER and WRITE permissions from
everyone except the object owner. For this reason, we recommend using
the "acl ch" command to make these changes; see "gsutil help acl ch" for
details.
public-read-write
Gives all users READ and WRITE permission. This ACL applies only to buckets.
NOTE: Setting a bucket to public-read-write will allow anyone on the
Internet to upload anything to your bucket. You will be responsible for this
content.
NOTE: Setting a bucket ACL to public-read-write will remove all OWNER
permissions from everyone except the project owner group. Setting an object
ACL to public-read-write will remove all OWNER permissions from
everyone except the object owner. For this reason, we recommend using
the "acl ch" command to make these changes; see "gsutil help acl ch" for
details.
authenticated-read
Gives the requester OWNER permission and gives all authenticated
Google account holders READ permission.
bucket-owner-read
Gives the requester OWNER permission and gives the bucket owner READ
permission. This is used only with objects.
bucket-owner-full-control
Gives the requester OWNER permission and gives the bucket owner
OWNER permission. This is used only with objects.
<B>ACL JSON</B>
When you use a canned ACL, it is translated into an JSON representation
that can later be retrieved and edited to specify more fine-grained
detail about who can read and write buckets and objects. By running
the "gsutil acl get" command you can retrieve the ACL JSON, and edit it to
customize the permissions.
As an example, if you create an object in a bucket that has no default
object ACL set and then retrieve the ACL on the object, it will look
something like this:
[
{
"entity": "group-00b4903a9740e42c29800f53bd5a9a62a2f96eb3f64a4313a115df3f3a776bf7",
"entityId": "00b4903a9740e42c29800f53bd5a9a62a2f96eb3f64a4313a115df3f3a776bf7",
"role": "OWNER"
},
{
"entity": "group-00b4903a977fd817e9da167bc81306489181a110456bb635f466d71cf90a0d51",
"entityId": "00b4903a977fd817e9da167bc81306489181a110456bb635f466d71cf90a0d51",
"role": "OWNER"
},
{
"entity": "00b4903a974898cc8fc309f2f2835308ba3d3df1b889d3fc7e33e187d52d8e71",
"entityId": "00b4903a974898cc8fc309f2f2835308ba3d3df1b889d3fc7e33e187d52d8e71",
"role": "READER"
}
]
The ACL consists collection of elements, each of which specifies an Entity
and a Role. Entities are the way you specify an individual or group of
individuals, and Roles specify what access they're permitted.
This particular ACL grants OWNER to two groups (which means members
of those groups are allowed to read the object and read and write the ACL),
and READ permission to a third group. The project groups are (in order)
the project owners group, editors group, and viewers group.
The 64 digit hex identifiers (following any prefixes like "group-") used in
this ACL are called canonical IDs. They are used to identify predefined
groups associated with the project that owns the bucket: the Project Owners,
Project Editors, and All Project Team Members groups. For more information
the permissions and roles of these project groups, see "gsutil help projects".
Here's an example of an ACL specified using the group-by-email and
group-by-domain entities:
[
{
"entity": "[email protected]"
"email": "[email protected]",
"role": "OWNER",
}
{
"domain": "example.com",
"entity": "domain-example.com"
"role": "READER",
},
]
This ACL grants members of an email group OWNER, and grants READ
access to any user in a domain (which must be a Google Apps for Business
domain). By applying email group grants to a collection of objects
you can edit access control for large numbers of objects at once via
http://groups.google.com. That way, for example, you can easily and quickly
change access to a group of company objects when employees join and leave
your company (i.e., without having to individually change ACLs across
potentially millions of objects).
<B>SHARING SCENARIOS</B>
For more detailed examples how to achieve various useful sharing use
cases see https://cloud.google.com/storage/docs/collaboration
""")
class CommandOptions(HelpProvider):
"""Additional help about Access Control Lists."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='acls',
help_name_aliases=[
'ACL',
'access control',
'access control list',
'authorization',
'canned',
'canned acl',
],
help_type='additional_help',
help_one_line_summary='Working With Access Control Lists',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/acls.py
| 0.417984 | 0.459501 |
acls.py
|
pypi
|
"""Additional help about wildcards."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>DESCRIPTION</B>
gsutil supports URI wildcards for files, buckets, and objects. For
example, the command:
gsutil cp gs://bucket/data/abc* .
copies all objects that start with gs://bucket/data/abc followed by any
number of characters within that subdirectory.
<B>WILDCARD CHARACTERS</B>
gsutil uses the following wildcards:
*
Match any number of characters within the current directory level. For
example, ``gs://my-bucket/abc/d*`` matches the object ``abc/def.txt``
but not the object ``abc/def/g.txt``.
**
Match any number of characters across directory boundaries. When used
as part of a local file path, the ``**`` wildcard should always be
immediately preceded by a directory delimiter. For example,
``my-directory/**.txt`` is valid, but ``my-directory/abc**`` is not.
NOTE: Some command shells expand wildcard matches prior to running the
gsutil command; however, most shells do not support recursive
wildcards (``**``). You can skip command shell wildcard expansion and
instead use gsutil's wildcarding support in such shells by
single-quoting (on Linux) or double-quoting (on Windows) the argument.
For example: ``gsutil cp 'data/**' gs://bucket``
?
Match a single character. For example ``gs://bucket/??.txt``
only matches objects with two characters followed by .txt.
[chars]
Match any of the specified characters. For example
``gs://bucket/[aeiou].txt`` matches objects that contain a single
vowel character followed by ``.txt``.
[char range]
Match any of the range of characters. For example
``gs://bucket/[a-m].txt`` matches objects that contain letters
a, b, c, ... or m, and end with ``.txt``.
You can combine wildcards to provide more powerful matches, for example:
gs://*/[a-m]??.j*g
Note that unless your command includes a flag to return `noncurrent
object versions
<https://cloud.google.com/storage/docs/object-versioning>`_ in the
results, these wildcards only match live object versions.
gsutil supports the same wildcards for both object and file names. Thus,
for example:
gsutil cp data/abc* gs://bucket
matches all files that start with ``abc`` in the ``data`` directory of
the local file system.
<B>POTENTIALLY SURPRISING BEHAVIOR WHEN USING WILDCARDS</B>
There are a couple of ways that using wildcards can result in surprising
behavior:
1. When using wildcards in bucket names, matches are limited to buckets in
the `project <https://cloud.google.com/storage/docs/projects>`_
specified in the ``-p`` flag. Some commands, such as ``gsutil rm``, do
not support the ``-p`` flag. If the ``-p`` flag is not or cannot be used
in a command, matches are limited to buckets in the default project.
2. Shells (like bash and zsh) can attempt to expand wildcards before passing
the arguments to gsutil. If the wildcard was supposed to refer to a cloud
object, this can result in surprising "Not found" errors (e.g., if the
shell tries to expand the wildcard ``gs://my-bucket/*`` on the local
machine, matching no local files, and failing the command).
Note that some shells include additional characters in their wildcard
character sets. For example, if you use zsh with the extendedglob option
enabled it treats ``#`` as a special character, which conflicts with
that character's use in referencing versioned objects (see
`Restore noncurrent object versions
<https://cloud.google.com/storage/docs/using-versioned-objects#restore>`_
for an example).
To avoid these problems, surround the wildcarded expression with single
quotes (on Linux) or double quotes (on Windows).
3. Attempting to specify a filename that contains wildcard characters won't
work, because gsutil tries to expand the wildcard characters rather
than using them as literal characters. For example, running the command:
gsutil cp './file[1]' gs://my-bucket
causes gsutil to try to match the ``[1]`` part as a wildcard.
There's an open issue to support a "raw" mode for gsutil to provide a
way to work with file names that contain wildcard characters, but until /
unless that support is implemented there's no really good way to use
gsutil with such file names. You could use a wildcard to name such files,
for example replacing the above command with:
gsutil cp './file*1*' gs://my-bucket
but that approach may be difficult to use in general.
<B>DIFFERENT BEHAVIOR FOR "DOT" FILES IN LOCAL FILE SYSTEM</B>
Per standard Unix behavior, the wildcard ``*`` only matches files that
don't start with a ``.`` character (to avoid confusion with the ``.`` and
``..`` directories present in all Unix directories). gsutil provides this
same behavior when using wildcards over a file system URI, but does not
provide this behavior over cloud URIs. For example, the following command
copies all objects from gs://bucket1 to gs://bucket2:
gsutil cp gs://bucket1/* gs://bucket2
but the following command copies only files that don't start with a ``.``
from the directory ``dir`` to gs://bucket1:
gsutil cp dir/* gs://bucket1
<B>EFFICIENCY CONSIDERATION: USING WILDCARDS OVER MANY OBJECTS</B>
It is more efficient, faster, and less network traffic-intensive
to use wildcards that have a non-wildcard object-name prefix, like:
gs://bucket/abc*.txt
than it is to use wildcards as the first part of the object name, like:
gs://bucket/*abc.txt
This is because the request for ``gs://bucket/abc*.txt`` asks the server to
send back the subset of results whose object name start with ``abc`` at the
bucket root, and then gsutil filters the result list for objects whose name
ends with ``.txt``. In contrast, ``gs://bucket/*abc.txt`` asks the server for
the complete list of objects in the bucket root, and then filters for those
objects whose name ends with ``abc.txt``. This efficiency consideration
becomes increasingly noticeable when you use buckets containing thousands or
more objects. It is sometimes possible to set up the names of your objects to
fit with expected wildcard matching patterns, to take advantage of the
efficiency of doing server-side prefix requests. See, for example
"gsutil help prod" for a concrete use case example.
<B>EFFICIENCY CONSIDERATION: USING MID-PATH WILDCARDS</B>
Suppose you have a bucket with these objects:
gs://bucket/obj1
gs://bucket/obj2
gs://bucket/obj3
gs://bucket/obj4
gs://bucket/dir1/obj5
gs://bucket/dir2/obj6
If you run the command:
gsutil ls gs://bucket/*/obj5
gsutil performs a /-delimited top-level bucket listing and then one bucket
listing for each subdirectory, for a total of 3 bucket listings:
GET /bucket/?delimiter=/
GET /bucket/?prefix=dir1/obj5&delimiter=/
GET /bucket/?prefix=dir2/obj5&delimiter=/
The more bucket listings your wildcard requires, the slower and more expensive
it becomes. The number of bucket listings required grows as:
- the number of wildcard components (e.g., "gs://bucket/a??b/c*/*/d"
has 3 wildcard components);
- the number of subdirectories that match each component; and
- the number of results (pagination is implemented using one GET
request per 1000 results, specifying markers for each).
If you want to use a mid-path wildcard, you might try instead using a
recursive wildcard, for example:
gsutil ls gs://bucket/**/obj5
This matches more objects than ``gs://bucket/*/obj5`` (since it spans
directories), but is implemented using a delimiter-less bucket listing
request (which means fewer bucket requests, though it lists the entire
bucket and filters locally, so that could require a non-trivial amount
of network traffic).
""")
class CommandOptions(HelpProvider):
"""Additional help about wildcards."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='wildcards',
help_name_aliases=['wildcard', '*', '**'],
help_type='additional_help',
help_one_line_summary='Wildcard Names',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/wildcards.py
| 0.926491 | 0.370852 |
wildcards.py
|
pypi
|
"""Additional help about CRC32C and installing crcmod."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
Google Cloud Storage provides a cyclic redundancy check (CRC) header that
allows clients to verify the integrity of object contents. For non-composite
objects Google Cloud Storage also provides an MD5 header to allow clients to
verify object integrity, but for composite objects only the CRC is available.
gsutil automatically performs integrity checks on all uploads and downloads.
Additionally, you can use the ``gsutil hash`` command to calculate a CRC for
any local file.
The CRC variant used by Google Cloud Storage is called CRC32C (Castagnoli),
which is not available in the standard Python distribution. The implementation
of CRC32C used by gsutil is provided by a third-party Python module called
`crcmod <https://pypi.python.org/pypi/crcmod>`_.
The crcmod module contains a pure-Python implementation of CRC32C, but using
it results in very poor performance. A Python C extension is also provided by
crcmod, which requires compiling into a binary module for use. gsutil ships
with a precompiled crcmod C extension for macOS; for other platforms, see
the installation instructions below.
At the end of each copy operation, the ``gsutil cp`` and ``gsutil rsync``
commands validate that the checksum of the source file/object matches the
checksum of the destination file/object. If the checksums do not match,
gsutil will delete the invalid copy and print a warning message. This very
rarely happens, but if it does, please contact [email protected].
<B>CONFIGURATION</B>
To determine if the compiled version of crcmod is available in your Python
environment, you can inspect the output of the ``gsutil version`` command for
the "compiled crcmod" entry:
$ gsutil version -l
...
compiled crcmod: True
...
If your crcmod library is compiled to a native binary, this value will be
True. If using the pure-Python version, the value will be False.
To control gsutil's behavior in response to crcmod's status, you can set the
"check_hashes" configuration variable. For details on this variable, see the
surrounding comments in your boto configuration file. If "check_hashes"
is not present in your configuration file, rerun ``gsutil config`` to
regenerate the file.
<B>INSTALLATION</B>
These installation instructions assume that:
- You have ``pip`` installed. Consult the `pip installation instructions
<https://pip.pypa.io/en/stable/installing/>`_ for details on how
to install ``pip``.
- Your installation of ``pip`` can be found in your ``PATH`` environment
variable. If it cannot, you may need to replace ``pip3`` in the commands
below with the full path to the executable.
- You are installing the crcmod package for use with your system installation
of Python, and thus use the ``sudo`` command. If installing crcmod for a
different Python environment (e.g. in a virtualenv), you should omit
``sudo`` from the commands below.
- You are using a Python 3 version with gsutil. You can determine which
Python version gsutil is using by running ``gsutil version -l`` and looking
for the ``python version: 2.x.x`` or ``python version: 3.x.x`` line.
CentOS, RHEL, and Fedora
------------------------
To compile and install crcmod:
yum install gcc python3-devel python3-setuptools redhat-rpm-config
sudo pip3 uninstall crcmod
sudo pip3 install --no-cache-dir -U crcmod
Debian and Ubuntu
-----------------
To compile and install crcmod:
sudo apt-get install gcc python3-dev python3-setuptools
sudo pip3 uninstall crcmod
sudo pip3 install --no-cache-dir -U crcmod
Enterprise SUSE
-----------------
To compile and install crcmod when using Enterprise SUSE for SAP 12:
sudo zypper install gcc python-devel
sudo pip uninstall crcmod
sudo pip install --no-cache-dir -U crcmod
To compile and install crcmod when using Enterprise SUSE for SAP 15:
sudo zypper install gcc python3-devel
sudo pip uninstall crcmod
sudo pip install --no-cache-dir -U crcmod
macOS
-----
gsutil distributes a pre-compiled version of crcmod for macOS, so you shouldn't
need to compile and install it yourself. If for some reason the pre-compiled
version is not being detected, please let the Google Cloud Storage team know
(see ``gsutil help support``).
To compile manually on macOS, you will first need to install
`XCode <https://developer.apple.com/xcode/>`_ and then run:
pip3 install -U crcmod
Windows
-------
An installer is available for the compiled version of crcmod from the Python
Package Index (PyPi) at the following URL:
https://pypi.python.org/pypi/crcmod/1.7
NOTE: If you have installed crcmod and gsutil hasn't detected it, it may have
been installed to the wrong directory. It should be located at
<python_dir>\\files\\Lib\\site-packages\\crcmod\\
In some cases the installer will incorrectly install to
<python_dir>\\Lib\\site-packages\\crcmod\\
Manually copying the crcmod directory to the correct location should resolve
the issue.
""")
class CommandOptions(HelpProvider):
"""Additional help about CRC32C and installing crcmod."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='crc32c',
help_name_aliases=['crc32', 'crc', 'crcmod'],
help_type='additional_help',
help_one_line_summary='CRC32C and Installing crcmod',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/addlhelp/crc32c.py
| 0.876317 | 0.3512 |
crc32c.py
|
pypi
|
"""Implementation of Unix-like mv command for cloud storage providers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.commands.cp import CP_SUB_ARGS
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.storage_url import StorageUrlFromString
from gslib.utils.constants import NO_MAX
_SYNOPSIS = """
gsutil mv [-p] src_url dst_url
gsutil mv [-p] src_url... dst_url
gsutil mv [-p] -I dst_url
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The ``gsutil mv`` command allows you to move data between your local file
system and the cloud, move data within the cloud, and move data between
cloud storage providers. For example, to move all objects from a
bucket to a local directory you could use:
gsutil mv gs://my_bucket/* dir
Similarly, to move all objects from a local directory to a bucket you could
use:
gsutil mv ./dir gs://my_bucket
<B>RENAMING GROUPS OF OBJECTS</B>
You can use the ``gsutil mv`` command to rename all objects with a given
prefix to have a new prefix. For example, the following command renames all
objects under gs://my_bucket/oldprefix to be under gs://my_bucket/newprefix,
otherwise preserving the naming structure:
gsutil mv gs://my_bucket/oldprefix gs://my_bucket/newprefix
Note that when using ``mv`` to rename groups of objects with a common
prefix, you cannot specify the source URL using wildcards; you must spell
out the complete name.
If you do a rename as specified above and you want to preserve ACLs, you
should use the ``-p`` option (see OPTIONS).
If you have a large number of files to move you might want to use the
``gsutil -m`` option, to perform a multi-threaded/multi-processing move:
gsutil -m mv gs://my_bucket/oldprefix gs://my_bucket/newprefix
<B>NON-ATOMIC OPERATION</B>
Unlike the case with many file systems, the gsutil mv command does not
perform a single atomic operation. Rather, it performs a copy from source
to destination followed by removing the source for each object.
A consequence of this is that, in addition to normal network and operation
charges, if you move a Nearline Storage, Coldline Storage, or Archive Storage
object, deletion and data retrieval charges apply. See the `documentation
<https://cloud.google.com/storage/pricing>`_ for pricing details.
<B>OPTIONS</B>
All options that are available for the gsutil cp command are also available
for the gsutil mv command (except for the -R flag, which is implied by the
``gsutil mv`` command). Please see the OPTIONS sections of "gsutil help cp"
for more information.
""")
class MvCommand(Command):
"""Implementation of gsutil mv command.
Note that there is no atomic rename operation - this command is simply
a shorthand for 'cp' followed by 'rm'.
"""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'mv',
command_name_aliases=['move', 'ren', 'rename'],
usage_synopsis=_SYNOPSIS,
min_args=1,
max_args=NO_MAX,
# Flags for mv are passed through to cp.
supported_sub_args=CP_SUB_ARGS,
file_url_ok=True,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()
],
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='mv',
help_name_aliases=['move', 'rename'],
help_type='command_help',
help_one_line_summary='Move/rename objects',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def RunCommand(self):
"""Command entry point for the mv command."""
# Check each source arg up, refusing to delete a bucket src URL (force users
# to explicitly do that as a separate operation).
for arg_to_check in self.args[0:-1]:
url = StorageUrlFromString(arg_to_check)
if url.IsCloudUrl() and (url.IsBucket() or url.IsProvider()):
raise CommandException('You cannot move a source bucket using the mv '
'command. If you meant to move\nall objects in '
'the bucket, you can use a command like:\n'
'\tgsutil mv %s/* %s' %
(arg_to_check, self.args[-1]))
# Insert command-line opts in front of args so they'll be picked up by cp
# and rm commands (e.g., for -p option). Use undocumented (internal
# use-only) cp -M option, which causes each original object to be deleted
# after successfully copying to its destination, and also causes naming
# behavior consistent with Unix mv naming behavior (see comments in
# ConstructDstUrl).
unparsed_args = ['-M']
if self.recursion_requested:
unparsed_args.append('-R')
unparsed_args.extend(self.unparsed_args)
self.command_runner.RunNamedCommand(
'cp',
args=unparsed_args,
headers=self.headers,
debug=self.debug,
trace_token=self.trace_token,
user_project=self.user_project,
parallel_operations=self.parallel_operations)
return 0
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/mv.py
| 0.904983 | 0.309721 |
mv.py
|
pypi
|
"""This module provides the pap command to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
import textwrap
from gslib import metrics
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.help_provider import CreateHelpText
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import NO_MAX
_SET_SYNOPSIS = """
gsutil pap set (enforced|unspecified) gs://<bucket_name>...
"""
_GET_SYNOPSIS = """
gsutil pap get gs://<bucket_name>...
"""
_SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n')
_SET_DESCRIPTION = """
<B>SET</B>
The ``pap set`` command configures public access prevention
for Cloud Storage buckets. If you set a bucket to be
``unspecified``, it uses public access prevention only if
the bucket is subject to the `public access prevention
<https://cloud.google.com/storage/docs/org-policy-constraints#public-access-prevention>`_
organization policy.
<B>SET EXAMPLES</B>
Configure ``redbucket`` and ``bluebucket`` to use public
access prevention:
gsutil pap set enforced gs://redbucket gs://bluebucket
"""
_GET_DESCRIPTION = """
<B>GET</B>
The ``pap get`` command returns public access prevention
values for the specified Cloud Storage buckets.
<B>GET EXAMPLES</B>
Check if ``redbucket`` and ``bluebucket`` are using public
access prevention:
gsutil pap get gs://redbucket gs://bluebucket
"""
_DESCRIPTION = """
The ``pap`` command is used to retrieve or configure the
`public access prevention
<https://cloud.google.com/storage/docs/public-access-prevention>`_ setting of
Cloud Storage buckets. This command has two sub-commands: ``get`` and ``set``.
""" + _GET_DESCRIPTION + _SET_DESCRIPTION
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
# Aliases to make these more likely to fit enforced one line.
IamConfigurationValue = apitools_messages.Bucket.IamConfigurationValue
class PapCommand(Command):
"""Implements the gsutil pap command."""
command_spec = Command.CreateCommandSpec(
'pap',
command_name_aliases=['publicaccessprevention'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=2,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'get': [CommandArgument.MakeNCloudURLsArgument(1),],
'set': [
CommandArgument('mode', choices=['enforced', 'unspecified']),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument()
],
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='pap',
help_name_aliases=['publicaccessprevention'],
help_type='command_help',
help_one_line_summary='Configure public access prevention',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
},
)
def _ValidateBucketListingRefAndReturnBucketName(self, blr):
if blr.storage_url.scheme != 'gs':
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
def _GetPublicAccessPrevention(self, blr):
"""Gets the public access prevention setting for a bucket."""
bucket_url = blr.storage_url
bucket_metadata = self.gsutil_api.GetBucket(bucket_url.bucket_name,
fields=['iamConfiguration'],
provider=bucket_url.scheme)
iam_config = bucket_metadata.iamConfiguration
public_access_prevention = iam_config.publicAccessPrevention or 'unspecified'
bucket = str(bucket_url).rstrip('/')
print('%s: %s' % (bucket, public_access_prevention))
def _SetPublicAccessPrevention(self, blr, setting_arg):
"""Sets the Public Access Prevention setting for a bucket enforced or unspecified."""
bucket_url = blr.storage_url
iam_config = IamConfigurationValue()
iam_config.publicAccessPrevention = setting_arg
bucket_metadata = apitools_messages.Bucket(iamConfiguration=iam_config)
print('Setting Public Access Prevention %s for %s' %
(setting_arg, str(bucket_url).rstrip('/')))
self.gsutil_api.PatchBucket(bucket_url.bucket_name,
bucket_metadata,
fields=['iamConfiguration'],
provider=bucket_url.scheme)
return 0
def _Pap(self):
"""Handles pap command on Cloud Storage buckets."""
subcommand = self.args.pop(0)
if subcommand not in ('get', 'set'):
raise CommandException('pap only supports get|set')
subcommand_func = None
subcommand_args = []
setting_arg = None
if subcommand == 'get':
subcommand_func = self._GetPublicAccessPrevention
elif subcommand == 'set':
subcommand_func = self._SetPublicAccessPrevention
setting_arg = self.args.pop(0)
subcommand_args.append(setting_arg)
if self.gsutil_api.GetApiSelector('gs') != ApiSelector.JSON:
raise CommandException('\n'.join(
textwrap.wrap(('The "%s" command can only be with the Cloud Storage '
'JSON API.') % self.command_name)))
# Iterate over bucket args, performing the specified subsubcommand.
some_matched = False
url_args = self.args
if not url_args:
self.RaiseWrongNumberOfArgumentsException()
for url_str in url_args:
# Throws a CommandException if the argument is not a bucket.
bucket_iter = self.GetBucketUrlIterFromArg(url_str)
for bucket_listing_ref in bucket_iter:
if self.gsutil_api.GetApiSelector(
bucket_listing_ref.storage_url.scheme) != ApiSelector.JSON:
raise CommandException('\n'.join(
textwrap.wrap(('The "%s" command can only be used for GCS '
'Buckets.') % self.command_name)))
some_matched = True
subcommand_func(bucket_listing_ref, *subcommand_args)
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(url_args))
return 0
def RunCommand(self):
"""Command entry point for the pap command."""
action_subcommand = self.args[0]
self.ParseSubOpts(check_args=True)
if action_subcommand == 'get' or action_subcommand == 'set':
metrics.LogCommandParams(sub_opts=self.sub_opts)
metrics.LogCommandParams(subcommands=[action_subcommand])
self._Pap()
else:
raise CommandException('Invalid subcommand "%s", use get|set instead.' %
action_subcommand)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/pap.py
| 0.892639 | 0.183265 |
pap.py
|
pypi
|
"""Implementation of logging configuration command for buckets."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
from apitools.base.py import encoding
from gslib import metrics
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
from gslib.storage_url import UrlsAreForSingleProvider
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import NO_MAX
from gslib.utils import text_util
_SET_SYNOPSIS = """
gsutil logging set on -b <logging_bucket_name> [-o <log_object_prefix>] gs://<bucket_name>...
gsutil logging set off gs://<bucket_name>...
"""
_GET_SYNOPSIS = """
gsutil logging get gs://<bucket_name>
"""
_SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') + '\n'
_SET_DESCRIPTION = """
<B>SET</B>
The set sub-command has two sub-commands:
<B>ON</B>
The "gsutil logging set on" command will enable usage logging of the
buckets named by the specified URLs, outputting log files in the specified
logging_bucket. Cloud Storage doesn't validate the existence of logging_bucket
so users should ensure it already exists, and all URLs must name buckets
(e.g., gs://bucket). The required bucket parameter specifies the
bucket to which the logs are written, and the optional log_object_prefix
parameter specifies the prefix for log object names. The default prefix
is the bucket name. For example, the command:
gsutil logging set on -b gs://my_logging_bucket -o UsageLog \\
gs://my_bucket1 gs://my_bucket2
will cause all read and write activity to objects in gs://mybucket1 and
gs://mybucket2 to be logged to objects prefixed with the name "UsageLog",
with those log objects written to the bucket gs://my_logging_bucket.
In addition to enabling logging on your bucket(s), you will also need to grant
[email protected] write access to the log bucket, using this
command:
gsutil acl ch -g [email protected]:W gs://my_logging_bucket
Note that log data may contain sensitive information, so you should make
sure to set an appropriate default bucket ACL to protect that data. (See
"gsutil help defacl".)
<B>OFF</B>
This command will disable usage logging of the buckets named by the
specified URLs. All URLs must name buckets (e.g., gs://bucket).
No logging data is removed from the log buckets when you disable logging,
but Google Cloud Storage will stop delivering new logs once you have
run this command.
"""
_GET_DESCRIPTION = """
<B>GET</B>
If logging is enabled for the specified bucket url, the server responds
with a JSON document that looks something like this:
{
"logBucket": "my_logging_bucket",
"logObjectPrefix": "UsageLog"
}
You can download log data from your log bucket using the gsutil cp command.
"""
_DESCRIPTION = """
Google Cloud Storage offers usage logs and storage logs in the form of CSV
files that you can download and view. Usage logs provide information for all
of the requests made on a specified bucket and are created hourly. Storage
logs provide information about the storage consumption of that bucket for
the last day and are created daily.
Once set up, usage logs and storage logs are automatically created as new
objects in a bucket that you specify. Usage logs and storage logs are
subject to the same pricing as other objects stored in Cloud Storage.
The logging command has two sub-commands:
""" + _SET_DESCRIPTION + _GET_DESCRIPTION + """
<B>USAGE LOG AND STORAGE DATA FIELDS</B>
For a complete list of usage log fields and storage data fields, see:
https://cloud.google.com/storage/docs/access-logs#format
"""
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
class LoggingCommand(Command):
"""Implementation of gsutil logging command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'logging',
command_name_aliases=['disablelogging', 'enablelogging', 'getlogging'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='b:o:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument('mode', choices=['on', 'off']),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='logging',
help_name_aliases=[
'loggingconfig',
'logs',
'log',
'getlogging',
'enablelogging',
'disablelogging',
],
help_type='command_help',
help_one_line_summary='Configure or retrieve logging on buckets',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
},
)
def _Get(self):
"""Gets logging configuration for a bucket."""
bucket_url, bucket_metadata = self.GetSingleBucketUrlFromArg(
self.args[0], bucket_fields=['logging'])
if bucket_url.scheme == 's3':
text_util.print_to_fd(self.gsutil_api.XmlPassThroughGetLogging(
bucket_url, provider=bucket_url.scheme),
end='')
else:
if (bucket_metadata.logging and bucket_metadata.logging.logBucket and
bucket_metadata.logging.logObjectPrefix):
text_util.print_to_fd(
str(encoding.MessageToJson(bucket_metadata.logging)))
else:
text_util.print_to_fd('%s has no logging configuration.' % bucket_url)
return 0
def _Enable(self):
"""Enables logging configuration for a bucket."""
# Disallow multi-provider 'logging set on' calls, because the schemas
# differ.
if not UrlsAreForSingleProvider(self.args):
raise CommandException('"logging set on" command spanning providers not '
'allowed.')
target_bucket_url = None
target_prefix = None
for opt, opt_arg in self.sub_opts:
if opt == '-b':
target_bucket_url = StorageUrlFromString(opt_arg)
if opt == '-o':
target_prefix = opt_arg
if not target_bucket_url:
raise CommandException('"logging set on" requires \'-b <log_bucket>\' '
'option')
if not target_bucket_url.IsBucket():
raise CommandException('-b option must specify a bucket URL.')
# Iterate over URLs, expanding wildcards and setting logging on each.
some_matched = False
for url_str in self.args:
bucket_iter = self.GetBucketUrlIterFromArg(url_str, bucket_fields=['id'])
for blr in bucket_iter:
url = blr.storage_url
some_matched = True
self.logger.info('Enabling logging on %s...', blr)
logging = apitools_messages.Bucket.LoggingValue(
logBucket=target_bucket_url.bucket_name,
logObjectPrefix=target_prefix or url.bucket_name)
bucket_metadata = apitools_messages.Bucket(logging=logging)
self.gsutil_api.PatchBucket(url.bucket_name,
bucket_metadata,
provider=url.scheme,
fields=['id'])
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(self.args))
return 0
def _Disable(self):
"""Disables logging configuration for a bucket."""
# Iterate over URLs, expanding wildcards, and disabling logging on each.
some_matched = False
for url_str in self.args:
bucket_iter = self.GetBucketUrlIterFromArg(url_str, bucket_fields=['id'])
for blr in bucket_iter:
url = blr.storage_url
some_matched = True
self.logger.info('Disabling logging on %s...', blr)
logging = apitools_messages.Bucket.LoggingValue()
bucket_metadata = apitools_messages.Bucket(logging=logging)
self.gsutil_api.PatchBucket(url.bucket_name,
bucket_metadata,
provider=url.scheme,
fields=['id'])
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(self.args))
return 0
def RunCommand(self):
"""Command entry point for the logging command."""
# Parse the subcommand and alias for the new logging command.
action_subcommand = self.args.pop(0)
if action_subcommand == 'get':
func = self._Get
metrics.LogCommandParams(subcommands=[action_subcommand])
elif action_subcommand == 'set':
state_subcommand = self.args.pop(0)
if not self.args:
self.RaiseWrongNumberOfArgumentsException()
if state_subcommand == 'on':
func = self._Enable
metrics.LogCommandParams(
subcommands=[action_subcommand, state_subcommand])
elif state_subcommand == 'off':
func = self._Disable
metrics.LogCommandParams(
subcommands=[action_subcommand, state_subcommand])
else:
raise CommandException(
('Invalid subcommand "%s" for the "%s %s" command.\n'
'See "gsutil help logging".') %
(state_subcommand, self.command_name, action_subcommand))
else:
raise CommandException(('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help logging".') %
(action_subcommand, self.command_name))
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
func()
return 0
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/logging.py
| 0.762159 | 0.175114 |
logging.py
|
pypi
|
"""Implementation of Unix-like du command for cloud storage providers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import locale
import sys
import six
from gslib.bucket_listing_ref import BucketListingObject
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.storage_url import ContainsWildcard
from gslib.storage_url import StorageUrlFromString
from gslib.utils import ls_helper
from gslib.utils.constants import NO_MAX
from gslib.utils.constants import S3_DELETE_MARKER_GUID
from gslib.utils.constants import UTF8
from gslib.utils.text_util import print_to_fd
from gslib.utils.unit_util import MakeHumanReadable
from gslib.utils import text_util
_SYNOPSIS = """
gsutil du url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The du command displays the amount of space in bytes used up by the
objects in a bucket, subdirectory, or project. The syntax emulates
the Linux ``du -b`` command, which reports the disk usage of files and subdirectories.
For example, the following command reports the total space used by all objects and
subdirectories under gs://your-bucket/dir:
gsutil du -s gs://your-bucket/dir
<B>OPTIONS</B>
-0 Ends each output line with a 0 byte rather than a newline. You
can use this to make the output machine-readable.
-a Includes noncurrent object versions for a bucket with Object
Versioning enabled. Also prints the
generation and metageneration number for each listed object.
-c Includes a total size at the end of the output.
-e Exclude a pattern from the report. Example: -e "*.o"
excludes any object that ends in ".o". Can be specified multiple
times.
-h Prints object sizes in human-readable format. For example, ``1 KiB``,
``234 MiB``, or ``2GiB``.
-s Displays only the total size for each argument.
-X Similar to ``-e``, but excludes patterns from the given file. The
patterns to exclude should be listed one per line.
<B>EXAMPLES</B>
To list the size of each object in a bucket:
gsutil du gs://bucketname
To list the size of each object in the ``prefix`` subdirectory:
gsutil du gs://bucketname/prefix/*
To print the total number of bytes in a bucket in human-readable form:
gsutil du -ch gs://bucketname
To see a summary of the total number of bytes in two given buckets:
gsutil du -s gs://bucket1 gs://bucket2
To list the size of each object in a bucket with Object Versioning
enabled, including noncurrent objects:
gsutil du -a gs://bucketname
To list the size of each object in a bucket, except objects that end in ".bak",
with each object printed ending in a null byte:
gsutil du -e "*.bak" -0 gs://bucketname
To list the size of each bucket in a project and the total size of the
project:
gsutil -o GSUtil:default_project_id=project-name du -shc
""")
class DuCommand(Command):
"""Implementation of gsutil du command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'du',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='0ace:hsX:',
file_url_ok=False,
provider_url_ok=True,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument(),
],
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='du',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Display object size usage',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def _PrintSummaryLine(self, num_bytes, name):
size_string = (MakeHumanReadable(num_bytes)
if self.human_readable else six.text_type(num_bytes))
text_util.print_to_fd('{size:<11} {name}'.format(
size=size_string, name=six.ensure_text(name)),
end=self.line_ending)
def _PrintInfoAboutBucketListingRef(self, bucket_listing_ref):
"""Print listing info for given bucket_listing_ref.
Args:
bucket_listing_ref: BucketListing being listed.
Returns:
Tuple (number of objects, object size)
Raises:
Exception: if calling bug encountered.
"""
obj = bucket_listing_ref.root_object
url_str = bucket_listing_ref.url_string
if (obj.metadata and
S3_DELETE_MARKER_GUID in obj.metadata.additionalProperties):
size_string = '0'
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
size_string = (MakeHumanReadable(obj.size)
if self.human_readable else str(obj.size))
num_bytes = obj.size
num_objs = 1
if not self.summary_only:
url_detail = '{size:<11} {url}{ending}'.format(
size=size_string,
url=six.ensure_text(url_str),
ending=six.ensure_text(self.line_ending))
print_to_fd(url_detail, file=sys.stdout, end='')
return (num_objs, num_bytes)
def RunCommand(self):
"""Command entry point for the du command."""
self.line_ending = '\n'
self.all_versions = False
self.produce_total = False
self.human_readable = False
self.summary_only = False
self.exclude_patterns = []
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-0':
self.line_ending = '\0'
elif o == '-a':
self.all_versions = True
elif o == '-c':
self.produce_total = True
elif o == '-e':
self.exclude_patterns.append(a)
elif o == '-h':
self.human_readable = True
elif o == '-s':
self.summary_only = True
elif o == '-X':
if a == '-':
f = sys.stdin
f_close = False
else:
f = open(a, 'r') if six.PY2 else open(a, 'r', encoding=UTF8)
f_close = True
self.exclude_patterns = [six.ensure_text(line.strip()) for line in f]
if f_close:
f.close()
if not self.args:
# Default to listing all gs buckets.
self.args = ['gs://']
total_bytes = 0
got_nomatch_errors = False
def _PrintObjectLong(blr):
return self._PrintInfoAboutBucketListingRef(blr)
def _PrintNothing(unused_blr=None):
pass
def _PrintDirectory(num_bytes, blr):
if not self.summary_only:
self._PrintSummaryLine(num_bytes, blr.url_string.encode(UTF8))
for url_arg in self.args:
top_level_storage_url = StorageUrlFromString(url_arg)
if top_level_storage_url.IsFileUrl():
raise CommandException('Only cloud URLs are supported for %s' %
self.command_name)
bucket_listing_fields = ['size']
listing_helper = ls_helper.LsHelper(
self.WildcardIterator,
self.logger,
print_object_func=_PrintObjectLong,
print_dir_func=_PrintNothing,
print_dir_header_func=_PrintNothing,
print_dir_summary_func=_PrintDirectory,
print_newline_func=_PrintNothing,
all_versions=self.all_versions,
should_recurse=True,
exclude_patterns=self.exclude_patterns,
fields=bucket_listing_fields)
# LsHelper expands to objects and prefixes, so perform a top-level
# expansion first.
if top_level_storage_url.IsProvider():
# Provider URL: use bucket wildcard to iterate over all buckets.
top_level_iter = self.WildcardIterator(
'%s://*' %
top_level_storage_url.scheme).IterBuckets(bucket_fields=['id'])
elif top_level_storage_url.IsBucket():
top_level_iter = self.WildcardIterator(
'%s://%s' % (top_level_storage_url.scheme,
top_level_storage_url.bucket_name)).IterBuckets(
bucket_fields=['id'])
else:
top_level_iter = [BucketListingObject(top_level_storage_url)]
for blr in top_level_iter:
storage_url = blr.storage_url
if storage_url.IsBucket() and self.summary_only:
storage_url = StorageUrlFromString(
storage_url.CreatePrefixUrl(wildcard_suffix='**'))
_, exp_objs, exp_bytes = listing_helper.ExpandUrlAndPrint(storage_url)
if (storage_url.IsObject() and exp_objs == 0 and
ContainsWildcard(url_arg) and not self.exclude_patterns):
got_nomatch_errors = True
total_bytes += exp_bytes
if self.summary_only:
self._PrintSummaryLine(exp_bytes,
blr.url_string.rstrip('/').encode(UTF8))
if self.produce_total:
self._PrintSummaryLine(total_bytes, 'total')
if got_nomatch_errors:
raise CommandException('One or more URLs matched no objects.')
return 0
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/du.py
| 0.728362 | 0.336195 |
du.py
|
pypi
|
"""This module provides the autoclass command to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
import textwrap
from gslib import metrics
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.help_provider import CreateHelpText
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils import text_util
from gslib.utils.constants import NO_MAX
_SET_SYNOPSIS = """
gsutil autoclass set (on|off) gs://<bucket_name>...
"""
_GET_SYNOPSIS = """
gsutil autoclass get gs://<bucket_name>...
"""
_SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n')
_SET_DESCRIPTION = """
<B>SET</B>
The "set" sub-command requires an additional sub-command, either "on" or
"off", which will enable or disable autoclass for the specified bucket(s).
"""
_GET_DESCRIPTION = """
<B>GET</B>
The "get" sub-command gets the current autoclass configuration for a
Bucket. The returned configuration will have following fields:
enabled: a boolean field indicating whether the feature is on or off.
toggleTime: a timestamp indicating when the enabled field was set.
"""
_DESCRIPTION = """
The Autoclass feature automatically selects the best storage class for
objects based on access patterns. This command has two sub-commands:
``get`` and ``set``.
""" + _GET_DESCRIPTION + _SET_DESCRIPTION
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
class AutoclassCommand(Command):
"""Implements the gsutil autoclass command."""
command_spec = Command.CreateCommandSpec(
'autoclass',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=2,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'get': [CommandArgument.MakeNCloudURLsArgument(1),],
'set': [
CommandArgument('mode', choices=['on', 'off']),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument()
],
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='autoclass',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Configure autoclass feature',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
},
)
def _get_autoclass(self, blr):
"""Gets the autoclass setting for a bucket."""
bucket_url = blr.storage_url
bucket_metadata = self.gsutil_api.GetBucket(bucket_url.bucket_name,
fields=['autoclass'],
provider=bucket_url.scheme)
bucket = str(bucket_url).rstrip('/')
if bucket_metadata.autoclass:
enabled = getattr(bucket_metadata.autoclass, 'enabled', False)
toggle_time = getattr(bucket_metadata.autoclass, 'toggleTime', None)
else:
enabled = False
toggle_time = None
print('{}:\n'
' Enabled: {}\n'
' Toggle Time: {}'.format(bucket, enabled, toggle_time))
def _set_autoclass(self, blr, setting_arg):
"""Turns autoclass on or off for a bucket."""
bucket_url = blr.storage_url
autoclass_config = apitools_messages.Bucket.AutoclassValue()
autoclass_config.enabled = (setting_arg == 'on')
bucket_metadata = apitools_messages.Bucket(autoclass=autoclass_config)
print('Setting Autoclass %s for %s' %
(setting_arg, str(bucket_url).rstrip('/')))
self.gsutil_api.PatchBucket(bucket_url.bucket_name,
bucket_metadata,
fields=['autoclass'],
provider=bucket_url.scheme)
return 0
def _autoclass(self):
"""Handles autoclass command on Cloud Storage buckets."""
subcommand = self.args.pop(0)
if subcommand not in ('get', 'set'):
raise CommandException('autoclass only supports get|set')
subcommand_func = None
subcommand_args = []
setting_arg = None
if subcommand == 'get':
subcommand_func = self._get_autoclass
elif subcommand == 'set':
subcommand_func = self._set_autoclass
setting_arg = self.args.pop(0)
text_util.InsistOnOrOff(setting_arg,
'Only on and off values allowed for set option')
subcommand_args.append(setting_arg)
if self.gsutil_api.GetApiSelector('gs') != ApiSelector.JSON:
raise CommandException('\n'.join(
textwrap.wrap(('The "%s" command can only be with the Cloud Storage '
'JSON API.') % self.command_name)))
# Iterate over bucket args, performing the specified subsubcommand.
some_matched = False
url_args = self.args
if not url_args:
self.RaiseWrongNumberOfArgumentsException()
for url_str in url_args:
# Throws a CommandException if the argument is not a bucket.
bucket_iter = self.GetBucketUrlIterFromArg(url_str)
for bucket_listing_ref in bucket_iter:
if self.gsutil_api.GetApiSelector(
bucket_listing_ref.storage_url.scheme) != ApiSelector.JSON:
raise CommandException('\n'.join(
textwrap.wrap(('The "%s" command can only be used for GCS '
'Buckets.') % self.command_name)))
some_matched = True
subcommand_func(bucket_listing_ref, *subcommand_args)
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(url_args))
return 0
def RunCommand(self):
"""Command entry point for the autoclass command."""
action_subcommand = self.args[0]
self.ParseSubOpts(check_args=True)
if action_subcommand == 'get' or action_subcommand == 'set':
metrics.LogCommandParams(sub_opts=self.sub_opts)
metrics.LogCommandParams(subcommands=[action_subcommand])
return self._autoclass()
else:
raise CommandException('Invalid subcommand "%s", use get|set instead.' %
action_subcommand)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/autoclass.py
| 0.879878 | 0.161056 |
autoclass.py
|
pypi
|
"""Implementation of acl command for cloud storage providers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import encoding
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import SetAclExceptionHandler
from gslib.command import SetAclFuncWrapper
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
from gslib.storage_url import UrlsAreForSingleProvider
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils import acl_helper
from gslib.utils.constants import NO_MAX
from gslib.utils.retry_util import Retry
_SET_SYNOPSIS = """
gsutil acl set [-f] [-r] [-a] <file-or-canned_acl_name> url...
"""
_GET_SYNOPSIS = """
gsutil acl get url
"""
_CH_SYNOPSIS = """
gsutil acl ch [-f] [-r] <grant>... url...
where each <grant> is one of the following forms:
-u <id>|<email>:<permission>
-g <id>|<email>|<domain>|All|AllAuth:<permission>
-p (viewers|editors|owners)-<project number>:<permission>
-d <id>|<email>|<domain>|All|AllAuth|(viewers|editors|owners)-<project number>
"""
_GET_DESCRIPTION = """
<B>GET</B>
The "acl get" command gets the ACL text for a bucket or object, which you can
save and edit for the acl set command.
"""
_SET_DESCRIPTION = """
<B>SET</B>
The "acl set" command allows you to set an Access Control List on one or
more buckets and objects. The file-or-canned_acl_name parameter names either
a canned ACL or the path to a file that contains ACL text. The simplest way
to use the "acl set" command is to specify one of the canned ACLs, e.g.,:
gsutil acl set private gs://bucket
If you want to make an object or bucket publicly readable or writable, it is
recommended to use "acl ch", to avoid accidentally removing OWNER permissions.
See the "acl ch" section for details.
See `Predefined ACLs
<https://cloud.google.com/storage/docs/access-control/lists#predefined-acl>`_
for a list of canned ACLs.
If you want to define more fine-grained control over your data, you can
retrieve an ACL using the "acl get" command, save the output to a file, edit
the file, and then use the "acl set" command to set that ACL on the buckets
and/or objects. For example:
gsutil acl get gs://bucket/file.txt > acl.txt
Make changes to acl.txt such as adding an additional grant, then:
gsutil acl set acl.txt gs://cats/file.txt
Note that you can set an ACL on multiple buckets or objects at once. For
example, to set ACLs on all .jpg files found in a bucket:
gsutil acl set acl.txt gs://bucket/**.jpg
If you have a large number of ACLs to update you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
update:
gsutil -m acl set acl.txt gs://bucket/**.jpg
Note that multi-threading/multi-processing is only done when the named URLs
refer to objects, which happens either if you name specific objects or
if you enumerate objects by using an object wildcard or specifying
the acl -r flag.
<B>SET OPTIONS</B>
The "set" sub-command has the following options
-R, -r Performs "acl set" request recursively, to all objects under
the specified URL.
-a Performs "acl set" request on all object versions.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. If some of the ACLs
couldn't be set, gsutil's exit status will be non-zero even if
this flag is set. This option is implicitly set when running
"gsutil -m acl...".
"""
_CH_DESCRIPTION = """
<B>CH</B>
The "acl ch" (or "acl change") command updates access control lists, similar
in spirit to the Linux chmod command. You can specify multiple access grant
additions and deletions in a single command run; all changes will be made
atomically to each object in turn. For example, if the command requests
deleting one grant and adding a different grant, the ACLs being updated will
never be left in an intermediate state where one grant has been deleted but
the second grant not yet added. Each change specifies a user or group grant
to add or delete, and for grant additions, one of R, W, O (for the
permission to be granted). A more formal description is provided in a later
section; below we provide examples.
<B>CH EXAMPLES</B>
Examples for "ch" sub-command:
Grant anyone on the internet READ access to the object example-object:
gsutil acl ch -u AllUsers:R gs://example-bucket/example-object
NOTE: By default, publicly readable objects are served with a Cache-Control
header allowing such objects to be cached for 3600 seconds. If you need to
ensure that updates become visible immediately, you should set a
Cache-Control header of "Cache-Control:private, max-age=0, no-transform" on
such objects. For help doing this, see "gsutil help setmeta".
Grant anyone on the internet WRITE access to the bucket example-bucket:
WARNING: this is not recommended as you will be responsible for the content
gsutil acl ch -u AllUsers:W gs://example-bucket
Grant the user [email protected] WRITE access to the bucket
example-bucket:
gsutil acl ch -u [email protected]:WRITE gs://example-bucket
Grant the group [email protected] OWNER access to all jpg files in
example-bucket:
gsutil acl ch -g [email protected]:O gs://example-bucket/**.jpg
Grant the owners of project example-project WRITE access to the bucket
example-bucket:
gsutil acl ch -p owners-example-project:W gs://example-bucket
NOTE: You can replace 'owners' with 'viewers' or 'editors' to grant access
to a project's viewers/editors respectively.
Remove access to the bucket example-bucket for the viewers of project number
12345:
gsutil acl ch -d viewers-12345 gs://example-bucket
NOTE: You cannot remove the project owners group from ACLs of gs:// buckets in
the given project. Attempts to do so will appear to succeed, but the service
will add the project owners group into the new set of ACLs before applying it.
Note that removing a project requires you to reference the project by
its number (which you can see with the acl get command) as opposed to its
project ID string.
Grant the user with the specified canonical ID READ access to all objects
in example-bucket that begin with folder/:
gsutil acl ch -r \\
-u 84fac329bceSAMPLE777d5d22b8SAMPLE785ac2SAMPLE2dfcf7c4adf34da46:R \\
gs://example-bucket/folder/
Grant the service account [email protected] WRITE access to
the bucket example-bucket:
gsutil acl ch -u [email protected]:W gs://example-bucket
Grant all users from the `G Suite
<https://www.google.com/work/apps/business/>`_ domain my-domain.org READ
access to the bucket gcs.my-domain.org:
gsutil acl ch -g my-domain.org:R gs://gcs.my-domain.org
Remove any current access by [email protected] from the bucket
example-bucket:
gsutil acl ch -d [email protected] gs://example-bucket
If you have a large number of objects to update, enabling multi-threading
with the gsutil -m flag can significantly improve performance. The
following command adds OWNER for [email protected] using
multi-threading:
gsutil -m acl ch -r -u [email protected]:O gs://example-bucket
Grant READ access to everyone from my-domain.org and to all authenticated
users, and grant OWNER to [email protected], for the buckets
my-bucket and my-other-bucket, with multi-threading enabled:
gsutil -m acl ch -r -g my-domain.org:R -g AllAuth:R \\
-u [email protected]:O gs://my-bucket/ gs://my-other-bucket
<B>CH ROLES</B>
You may specify the following roles with either their shorthand or
their full name:
R: READ
W: WRITE
O: OWNER
For more information on these roles and the access they grant, see the
permissions section of the `Access Control Lists page
<https://cloud.google.com/storage/docs/access-control/lists#permissions>`_.
<B>CH ENTITIES</B>
There are four different entity types: Users, Groups, All Authenticated Users,
and All Users.
Users are added with -u and a plain ID or email address, as in
"-u [email protected]:r". Note: Service Accounts are considered to be users.
Groups are like users, but specified with the -g flag, as in
"-g [email protected]:O". Groups may also be specified as a full
domain, as in "-g my-company.com:r".
AllAuthenticatedUsers and AllUsers are specified directly, as
in "-g AllUsers:R" or "-g AllAuthenticatedUsers:O". These are case
insensitive, and may be shortened to "all" and "allauth", respectively.
Removing roles is specified with the -d flag and an ID, email
address, domain, or one of AllUsers or AllAuthenticatedUsers.
Many entities' roles can be specified on the same command line, allowing
bundled changes to be executed in a single run. This will reduce the number of
requests made to the server.
<B>CH OPTIONS</B>
The "ch" sub-command has the following options
-d Remove all roles associated with the matching entity.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. With this option the
gsutil exit status will be 0 even if some ACLs couldn't be
changed.
-g Add or modify a group entity's role.
-p Add or modify a project viewers/editors/owners role.
-R, -r Performs acl ch request recursively, to all objects under the
specified URL.
-u Add or modify a user entity's role.
"""
_SYNOPSIS = (_SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') +
_CH_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = ("""
The acl command has three sub-commands:
""" + '\n'.join([_GET_DESCRIPTION, _SET_DESCRIPTION, _CH_DESCRIPTION]))
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_ch_help_text = CreateHelpText(_CH_SYNOPSIS, _CH_DESCRIPTION)
def _ApplyExceptionHandler(cls, exception):
cls.logger.error('Encountered a problem: %s', exception)
cls.everything_set_okay = False
def _ApplyAclChangesWrapper(cls, url_or_expansion_result, thread_state=None):
cls.ApplyAclChanges(url_or_expansion_result, thread_state=thread_state)
class AclCommand(Command):
"""Implementation of gsutil acl command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'acl',
command_name_aliases=['getacl', 'setacl', 'chacl'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='afRrg:u:d:p:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'set': [
CommandArgument.MakeFileURLOrCannedACLArgument(),
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
],
'get': [CommandArgument.MakeNCloudURLsArgument(1)],
'ch': [CommandArgument.MakeZeroOrMoreCloudURLsArgument()],
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='acl',
help_name_aliases=['getacl', 'setacl', 'chmod', 'chacl'],
help_type='command_help',
help_one_line_summary='Get, set, or change bucket and/or object ACLs',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
'ch': _ch_help_text
},
)
def _CalculateUrlsStartArg(self):
if not self.args:
self.RaiseWrongNumberOfArgumentsException()
if (self.args[0].lower() == 'set') or (self.command_alias_used == 'setacl'):
return 1
else:
return 0
def _SetAcl(self):
"""Parses options and sets ACLs on the specified buckets/objects."""
self.continue_on_error = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
try:
self.SetAclCommandHelper(SetAclFuncWrapper, SetAclExceptionHandler)
except AccessDeniedException as unused_e:
self._WarnServiceAccounts()
raise
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _ChAcl(self):
"""Parses options and changes ACLs on the specified buckets/objects."""
self.parse_versions = True
self.changes = []
self.continue_on_error = False
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-f':
self.continue_on_error = True
elif o == '-g':
if 'gserviceaccount.com' in a:
raise CommandException(
'Service accounts are considered users, not groups; please use '
'"gsutil acl ch -u" instead of "gsutil acl ch -g"')
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.GROUP))
elif o == '-p':
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.PROJECT))
elif o == '-u':
self.changes.append(
acl_helper.AclChange(a, scope_type=acl_helper.ChangeType.USER))
elif o == '-d':
self.changes.append(acl_helper.AclDel(a))
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
if not self.changes:
raise CommandException('Please specify at least one access change '
'with the -g, -u, or -d flags')
if (not UrlsAreForSingleProvider(self.args) or
StorageUrlFromString(self.args[0]).scheme != 'gs'):
raise CommandException(
'The "{0}" command can only be used with gs:// URLs'.format(
self.command_name))
self.everything_set_okay = True
self.ApplyAclFunc(_ApplyAclChangesWrapper,
_ApplyExceptionHandler,
self.args,
object_fields=['acl', 'generation', 'metageneration'])
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _RaiseForAccessDenied(self, url):
self._WarnServiceAccounts()
raise CommandException('Failed to set acl for %s. Please ensure you have '
'OWNER-role access to this resource.' % url)
@Retry(ServiceException, tries=3, timeout_secs=1)
def ApplyAclChanges(self, name_expansion_result, thread_state=None):
"""Applies the changes in self.changes to the provided URL.
Args:
name_expansion_result: NameExpansionResult describing the target object.
thread_state: If present, gsutil Cloud API instance to apply the changes.
"""
if thread_state:
gsutil_api = thread_state
else:
gsutil_api = self.gsutil_api
url = name_expansion_result.expanded_storage_url
if url.IsBucket():
bucket = gsutil_api.GetBucket(url.bucket_name,
provider=url.scheme,
fields=['acl', 'metageneration'])
current_acl = bucket.acl
elif url.IsObject():
gcs_object = encoding.JsonToMessage(apitools_messages.Object,
name_expansion_result.expanded_result)
current_acl = gcs_object.acl
if not current_acl:
self._RaiseForAccessDenied(url)
if self._ApplyAclChangesAndReturnChangeCount(url, current_acl) == 0:
self.logger.info('No changes to %s', url)
return
try:
if url.IsBucket():
preconditions = Preconditions(meta_gen_match=bucket.metageneration)
bucket_metadata = apitools_messages.Bucket(acl=current_acl)
gsutil_api.PatchBucket(url.bucket_name,
bucket_metadata,
preconditions=preconditions,
provider=url.scheme,
fields=['id'])
else: # Object
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
object_metadata = apitools_messages.Object(acl=current_acl)
try:
gsutil_api.PatchObjectMetadata(url.bucket_name,
url.object_name,
object_metadata,
preconditions=preconditions,
provider=url.scheme,
generation=url.generation,
fields=['id'])
except PreconditionException as e:
# Special retry case where we want to do an additional step, the read
# of the read-modify-write cycle, to fetch the correct object
# metadata before reattempting ACL changes.
self._RefetchObjectMetadataAndApplyAclChanges(url, gsutil_api)
self.logger.info('Updated ACL on %s', url)
except BadRequestException as e:
# Don't retry on bad requests, e.g. invalid email address.
raise CommandException('Received bad request from server: %s' % str(e))
except AccessDeniedException:
self._RaiseForAccessDenied(url)
except PreconditionException as e:
# For objects, retry attempts should have already been handled.
if url.IsObject():
raise CommandException(str(e))
# For buckets, raise PreconditionException and continue to next retry.
raise e
@Retry(PreconditionException, tries=3, timeout_secs=1)
def _RefetchObjectMetadataAndApplyAclChanges(self, url, gsutil_api):
"""Reattempts object ACL changes after a PreconditionException."""
gcs_object = gsutil_api.GetObjectMetadata(
url.bucket_name,
url.object_name,
provider=url.scheme,
fields=['acl', 'generation', 'metageneration'])
current_acl = gcs_object.acl
if self._ApplyAclChangesAndReturnChangeCount(url, current_acl) == 0:
self.logger.info('No changes to %s', url)
return
object_metadata = apitools_messages.Object(acl=current_acl)
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
gsutil_api.PatchObjectMetadata(url.bucket_name,
url.object_name,
object_metadata,
preconditions=preconditions,
provider=url.scheme,
generation=gcs_object.generation,
fields=['id'])
def _ApplyAclChangesAndReturnChangeCount(self, storage_url, acl_message):
modification_count = 0
for change in self.changes:
modification_count += change.Execute(storage_url, acl_message, 'acl',
self.logger)
return modification_count
def RunCommand(self):
"""Command entry point for the acl command."""
action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
self.def_acl = False
if action_subcommand == 'get':
metrics.LogCommandParams(subcommands=[action_subcommand])
self.GetAndPrintAcl(self.args[0])
elif action_subcommand == 'set':
metrics.LogCommandParams(subcommands=[action_subcommand])
self._SetAcl()
elif action_subcommand in ('ch', 'change'):
metrics.LogCommandParams(subcommands=[action_subcommand])
self._ChAcl()
else:
raise CommandException(
('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help acl".') % (action_subcommand, self.command_name))
return 0
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/acl.py
| 0.84241 | 0.188119 |
acl.py
|
pypi
|
"""This module provides the command to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
import getopt
import textwrap
from gslib import metrics
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.help_provider import CreateHelpText
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import NO_MAX
from gslib.utils.text_util import InsistOnOrOff
_SET_SYNOPSIS = """
gsutil bucketpolicyonly set (on|off) gs://<bucket_name>...
"""
_GET_SYNOPSIS = """
gsutil bucketpolicyonly get gs://<bucket_name>...
"""
_SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n')
_SET_DESCRIPTION = """
<B>SET</B>
The ``bucketpolicyonly set`` command enables or disables the uniform bucket-level
access feature on Google Cloud Storage buckets.
<B>SET EXAMPLES</B>
Configure your buckets to use uniform bucket-level access:
gsutil bucketpolicyonly set on gs://redbucket gs://bluebucket
Configure your buckets to NOT use uniform bucket-level access:
gsutil bucketpolicyonly set off gs://redbucket gs://bluebucket
"""
_GET_DESCRIPTION = """
<B>GET</B>
The ``bucketpolicyonly get`` command shows whether uniform bucket-level
access is enabled for the specified Cloud Storage bucket.
<B>GET EXAMPLES</B>
Check if your buckets are using uniform bucket-level access:
gsutil bucketpolicyonly get gs://redbucket gs://bluebucket
"""
_DESCRIPTION = """
The Bucket Policy Only feature is now known as `uniform bucket-level access
<https://cloud.google.com/storage/docs/uniform-bucket-level-access>`_.
The ``bucketpolicyonly`` command is still supported, but we recommend using
the equivalent ``ubla`` `command
<https://cloud.google.com/storage/docs/gsutil/commands/ubla>`_.
The ``bucketpolicyonly`` command is used to retrieve or configure the
uniform bucket-level access setting of Cloud Storage buckets. This command has
two sub-commands, ``get`` and ``set``.
""" + _GET_DESCRIPTION + _SET_DESCRIPTION
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
# Aliases to make these more likely to fit on one line.
IamConfigurationValue = apitools_messages.Bucket.IamConfigurationValue
BucketPolicyOnlyValue = IamConfigurationValue.BucketPolicyOnlyValue
class BucketPolicyOnlyCommand(Command):
"""Implements the gsutil bucketpolicyonly command."""
command_spec = Command.CreateCommandSpec(
'bucketpolicyonly',
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=2,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'get': [CommandArgument.MakeNCloudURLsArgument(1),],
'set': [
CommandArgument('mode', choices=['on', 'off']),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument()
],
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='bucketpolicyonly',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Configure uniform bucket-level access',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
},
)
def _ValidateBucketListingRefAndReturnBucketName(self, blr):
if blr.storage_url.scheme != 'gs':
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
def _GetBucketPolicyOnly(self, blr):
"""Gets the Bucket Policy Only setting for a bucket."""
self._ValidateBucketListingRefAndReturnBucketName(blr)
bucket_url = blr.storage_url
bucket_metadata = self.gsutil_api.GetBucket(bucket_url.bucket_name,
fields=['iamConfiguration'],
provider=bucket_url.scheme)
iam_config = bucket_metadata.iamConfiguration
bucket_policy_only = iam_config.bucketPolicyOnly
fields = {
'bucket': str(bucket_url).rstrip('/'),
'enabled': bucket_policy_only.enabled
}
locked_time_line = ''
if bucket_policy_only.lockedTime:
fields['locked_time'] = bucket_policy_only.lockedTime
locked_time_line = ' LockedTime: {locked_time}\n'
if bucket_policy_only:
print(('Bucket Policy Only setting for {bucket}:\n'
' Enabled: {enabled}\n' + locked_time_line).format(**fields))
def _SetBucketPolicyOnly(self, blr, setting_arg):
"""Sets the Bucket Policy Only setting for a bucket on or off."""
self._ValidateBucketListingRefAndReturnBucketName(blr)
bucket_url = blr.storage_url
iam_config = IamConfigurationValue()
iam_config.bucketPolicyOnly = BucketPolicyOnlyValue()
iam_config.bucketPolicyOnly.enabled = (setting_arg == 'on')
bucket_metadata = apitools_messages.Bucket(iamConfiguration=iam_config)
setting_verb = 'Enabling' if setting_arg == 'on' else 'Disabling'
print('%s Bucket Policy Only for %s...' %
(setting_verb, str(bucket_url).rstrip('/')))
self.gsutil_api.PatchBucket(bucket_url.bucket_name,
bucket_metadata,
fields=['iamConfiguration'],
provider=bucket_url.scheme)
return 0
def _BucketPolicyOnly(self):
"""Handles bucketpolicyonly command on a Cloud Storage bucket."""
subcommand = self.args.pop(0)
if subcommand not in ('get', 'set'):
raise CommandException('bucketpolicyonly only supports get|set')
subcommand_func = None
subcommand_args = []
setting_arg = None
if subcommand == 'get':
subcommand_func = self._GetBucketPolicyOnly
elif subcommand == 'set':
subcommand_func = self._SetBucketPolicyOnly
setting_arg = self.args.pop(0)
InsistOnOrOff(setting_arg,
'Only on and off values allowed for set option')
subcommand_args.append(setting_arg)
# Iterate over bucket args, performing the specified subsubcommand.
some_matched = False
url_args = self.args
if not url_args:
self.RaiseWrongNumberOfArgumentsException()
for url_str in url_args:
# Throws a CommandException if the argument is not a bucket.
bucket_iter = self.GetBucketUrlIterFromArg(url_str)
for bucket_listing_ref in bucket_iter:
some_matched = True
subcommand_func(bucket_listing_ref, *subcommand_args)
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(url_args))
return 0
def RunCommand(self):
"""Command entry point for the bucketpolicyonly command."""
if self.gsutil_api.GetApiSelector(provider='gs') != ApiSelector.JSON:
raise CommandException('\n'.join(
textwrap.wrap(
'The "%s" command can only be used with the Cloud Storage JSON API.'
% self.command_name)))
action_subcommand = self.args[0]
self.ParseSubOpts(check_args=True)
if action_subcommand == 'get' or action_subcommand == 'set':
metrics.LogCommandParams(sub_opts=self.sub_opts)
metrics.LogCommandParams(subcommands=[action_subcommand])
self._BucketPolicyOnly()
else:
raise CommandException('Invalid subcommand "%s", use get|set instead.' %
action_subcommand)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/bucketpolicyonly.py
| 0.884576 | 0.15511 |
bucketpolicyonly.py
|
pypi
|
"""Implementation of mb command for creating cloud storage buckets."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import re
import textwrap
from gslib.cloud_api import BadRequestException
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.commands.rpo import VALID_RPO_VALUES
from gslib.commands.rpo import VALID_RPO_VALUES_STRING
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import InvalidUrlError
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import NO_MAX
from gslib.utils.retention_util import RetentionInSeconds
from gslib.utils.text_util import InsistAscii
from gslib.utils.text_util import InsistOnOrOff
from gslib.utils.text_util import NormalizeStorageClass
_SYNOPSIS = """
gsutil mb [-b (on|off)] [-c <class>] [-l <location>] [-p <project>]
[--retention <time>] [--pap <setting>]
[--placement <region1>,<region2>]
[--rpo {}] gs://<bucket_name>...
""".format(VALID_RPO_VALUES_STRING)
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
Create one or more new buckets. Google Cloud Storage has a single namespace,
so you are not allowed to create a bucket with a name already in use by
another user. You can, however, carve out parts of the bucket name space
corresponding to your company's domain name (see "gsutil help naming").
If you don't specify a project ID or project number using the -p option, the
buckets are created using the default project ID specified in your `gsutil
configuration file <https://cloud.google.com/storage/docs/boto-gsutil>`_.
The -l option specifies the location for the buckets. Once a bucket is created
in a given location, it cannot be moved to a different location. Instead, you
need to create a new bucket, move the data over, and then delete the original
bucket.
<B>BUCKET STORAGE CLASSES</B>
You can specify one of the `storage classes
<https://cloud.google.com/storage/docs/storage-classes>`_ for a bucket
with the -c option.
Example:
gsutil mb -c nearline gs://some-bucket
See online documentation for
`pricing <https://cloud.google.com/storage/pricing>`_ and
`SLA <https://cloud.google.com/storage/sla>`_ details.
If you don't specify a -c option, the bucket is created with the
default storage class Standard Storage.
<B>BUCKET LOCATIONS</B>
You can specify one of the `available locations
<https://cloud.google.com/storage/docs/locations>`_ for a bucket
with the -l option.
Examples:
gsutil mb -l asia gs://some-bucket
gsutil mb -c standard -l us-east1 gs://some-bucket
If you don't specify a -l option, the bucket is created in the default
location (US).
<B>Retention Policy</B>
You can specify retention period in one of the following formats:
--retention <number>s
Specifies retention period of <number> seconds for objects in this bucket.
--retention <number>d
Specifies retention period of <number> days for objects in this bucket.
--retention <number>m
Specifies retention period of <number> months for objects in this bucket.
--retention <number>y
Specifies retention period of <number> years for objects in this bucket.
Examples:
gsutil mb --retention 1y gs://some-bucket
gsutil mb --retention 36m gs://some-bucket
If you don't specify a --retention option, the bucket is created with no
retention policy.
<B>OPTIONS</B>
--autoclass Enables the Autoclass feature that automatically
sets object storage classes.
-b <on|off> Specifies the uniform bucket-level access setting.
When "on", ACLs assigned to objects in the bucket are
not evaluated. Consequently, only IAM policies grant
access to objects in these buckets. Default is "off".
-c class Specifies the default storage class.
Default is "Standard".
-l location Can be any supported location. See
https://cloud.google.com/storage/docs/locations
for a discussion of this distinction. Default is US.
Locations are case insensitive.
-p project Specifies the project ID or project number to create
the bucket under.
-s class Same as -c.
--retention time Specifies the retention policy. Default is no retention
policy. This can only be set on gs:// buckets and
requires using the JSON API. For more details about
retention policy see "gsutil help retention"
--pap setting Specifies the public access prevention setting. Valid
values are "enforced" or "unspecified". When
"enforced", objects in this bucket cannot be made
publicly accessible. Default is "unspecified".
--placement reg1,reg2 Two regions that form the cutom dual-region.
Only regions within the same continent are or will ever
be valid. Invalid location pairs (such as
mixed-continent, or with unsupported regions)
will return an error.
--rpo setting Specifies the `replication setting <https://cloud.google.com/storage/docs/turbo-replication>`_.
This flag is not valid for single-region buckets,
and multi-region buckets only accept a value of
DEFAULT. Valid values for dual region buckets
are {rpo_values}. If unspecified, DEFAULT is applied
for dual-region and multi-region buckets.
""".format(rpo_values=VALID_RPO_VALUES_STRING))
# Regex to disallow buckets violating charset or not [3..255] chars total.
BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$')
# Regex to disallow buckets with individual DNS labels longer than 63.
TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}')
IamConfigurationValue = apitools_messages.Bucket.IamConfigurationValue
BucketPolicyOnlyValue = IamConfigurationValue.BucketPolicyOnlyValue
class MbCommand(Command):
"""Implementation of gsutil mb command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'mb',
command_name_aliases=['makebucket', 'createbucket', 'md', 'mkdir'],
usage_synopsis=_SYNOPSIS,
min_args=1,
max_args=NO_MAX,
supported_sub_args='b:c:l:p:s:',
supported_private_args=[
'autoclass', 'retention=', 'pap=', 'placement=', 'rpo='
],
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument()
],
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='mb',
help_name_aliases=[
'createbucket',
'makebucket',
'md',
'mkdir',
'location',
'dra',
'dras',
'reduced_availability',
'durable_reduced_availability',
'rr',
'reduced_redundancy',
'standard',
'storage class',
'nearline',
'nl',
],
help_type='command_help',
help_one_line_summary='Make buckets',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def RunCommand(self):
"""Command entry point for the mb command."""
autoclass = False
bucket_policy_only = None
location = None
storage_class = None
seconds = None
public_access_prevention = None
rpo = None
json_only_flags_in_command = []
placements = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '--autoclass':
autoclass = True
json_only_flags_in_command.append(o)
elif o == '-l':
location = a
elif o == '-p':
# Project IDs are sent as header values when using gs and s3 XML APIs.
InsistAscii(a, 'Invalid non-ASCII character found in project ID')
self.project_id = a
elif o == '-c' or o == '-s':
storage_class = NormalizeStorageClass(a)
elif o == '--retention':
seconds = RetentionInSeconds(a)
elif o == '--rpo':
rpo = a.strip()
if rpo not in VALID_RPO_VALUES:
raise CommandException(
'Invalid value for --rpo. Must be one of: {},'
' provided: {}'.format(VALID_RPO_VALUES_STRING, a))
json_only_flags_in_command.append(o)
elif o == '-b':
InsistOnOrOff(a, 'Only on and off values allowed for -b option')
bucket_policy_only = (a == 'on')
json_only_flags_in_command.append(o)
elif o == '--pap':
public_access_prevention = a
json_only_flags_in_command.append(o)
elif o == '--placement':
placements = a.split(',')
if len(placements) != 2:
raise CommandException(
'Please specify two regions separated by comma without space.'
' Specified: {}'.format(a))
json_only_flags_in_command.append(o)
bucket_metadata = apitools_messages.Bucket(location=location,
rpo=rpo,
storageClass=storage_class)
if autoclass:
bucket_metadata.autoclass = apitools_messages.Bucket.AutoclassValue(
enabled=autoclass)
if bucket_policy_only or public_access_prevention:
bucket_metadata.iamConfiguration = IamConfigurationValue()
iam_config = bucket_metadata.iamConfiguration
if bucket_policy_only:
iam_config.bucketPolicyOnly = BucketPolicyOnlyValue()
iam_config.bucketPolicyOnly.enabled = bucket_policy_only
if public_access_prevention:
iam_config.publicAccessPrevention = public_access_prevention
if placements:
placement_config = apitools_messages.Bucket.CustomPlacementConfigValue()
placement_config.dataLocations = placements
bucket_metadata.customPlacementConfig = placement_config
for bucket_url_str in self.args:
bucket_url = StorageUrlFromString(bucket_url_str)
if seconds is not None:
if bucket_url.scheme != 'gs':
raise CommandException('Retention policy can only be specified for '
'GCS buckets.')
retention_policy = (apitools_messages.Bucket.RetentionPolicyValue(
retentionPeriod=seconds))
bucket_metadata.retentionPolicy = retention_policy
if json_only_flags_in_command and self.gsutil_api.GetApiSelector(
bucket_url.scheme) != ApiSelector.JSON:
raise CommandException('The {} option(s) can only be used for GCS'
' Buckets with the JSON API'.format(
', '.join(json_only_flags_in_command)))
if not bucket_url.IsBucket():
raise CommandException('The mb command requires a URL that specifies a '
'bucket.\n"%s" is not valid.' % bucket_url)
if (not BUCKET_NAME_RE.match(bucket_url.bucket_name) or
TOO_LONG_DNS_NAME_COMP.search(bucket_url.bucket_name)):
raise InvalidUrlError('Invalid bucket name in URL "%s"' %
bucket_url.bucket_name)
self.logger.info('Creating %s...', bucket_url)
# Pass storage_class param only if this is a GCS bucket. (In S3 the
# storage class is specified on the key object.)
try:
self.gsutil_api.CreateBucket(bucket_url.bucket_name,
project_id=self.project_id,
metadata=bucket_metadata,
provider=bucket_url.scheme)
except BadRequestException as e:
if (e.status == 400 and e.reason == 'DotfulBucketNameNotUnderTld' and
bucket_url.scheme == 'gs'):
bucket_name = bucket_url.bucket_name
final_comp = bucket_name[bucket_name.rfind('.') + 1:]
raise CommandException('\n'.join(
textwrap.wrap(
'Buckets with "." in the name must be valid DNS names. The bucket'
' you are attempting to create (%s) is not a valid DNS name,'
' because the final component (%s) is not currently a valid part'
' of the top-level DNS tree.' % (bucket_name, final_comp))))
else:
raise
return 0
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/mb.py
| 0.907739 | 0.26121 |
mb.py
|
pypi
|
"""Implementation of website configuration command for buckets."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
from apitools.base.py import encoding
from gslib import metrics
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.help_provider import CreateHelpText
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import NO_MAX
_SET_SYNOPSIS = """
gsutil web set [-m <main_page_suffix>] [-e <error_page>] gs://<bucket_name>...
"""
_GET_SYNOPSIS = """
gsutil web get gs://<bucket_name>
"""
_SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n')
_SET_DESCRIPTION = """
<B>SET</B>
The ``gsutil web set`` command allows you to configure or disable the website
configuration on your buckets. The ``set`` sub-command has the following
options (omit both options to unset the configuration):
<B>SET OPTIONS</B>
-m <index.html> Specifies the object name to serve when a bucket
listing is requested via a custom domain.
-e <404.html> Specifies the error page to serve when a request is made
for a non-existent object via a custom domain
"""
_GET_DESCRIPTION = """
<B>GET</B>
The ``gsutil web get`` command retrieves the web semantics configuration for
a bucket and displays a JSON representation of the configuration.
In Google Cloud Storage, this would look like the following:
{
"notFoundPage": "404.html",
"mainPageSuffix": "index.html"
}
"""
_DESCRIPTION = """
Cloud Storage allows you to configure a bucket to behave like a static
website. When you set a configuration, requests made to the bucket via a
`custom domain
<https://cloud.google.com/storage/docs/request-endpoints#custom-domains>`_
work like any other website. For example, if you set a ``main_page_suffix``,
a subsequent GET bucket request through a custom domain serves the specified
"main" page instead of performing the usual bucket listing. Similarly, if
you set an ``error_page``, a subsequent GET object request through a custom
domain for a non-existent object serves the specified error page instead of
the standard Cloud Storage error.
See `Static website examples and tips
<https://cloud.google.com/storage/docs/static-website>`_ for additional
examples and information.
Notes:
1. Because the main page is only served when a bucket listing request is made
via a custom domain endpoint, you can continue to use ``gsutil ls`` as you
normally would for bucket listing.
2. The main_page_suffix applies to each subdirectory of the bucket. For
example, with the main_page_suffix configured to be index.html, a GET
request for ``http://www.example.com`` retrieves
``http://www.example.com/index.html`` but shows ``http://www.example.com`` as
the URL to the requester, and a GET request for
``http://www.example.com/photos`` retrieves
``http://www.example.com/photos/index.html`` but shows ``http://www.example.com/photos``
as the URL to the requester.
3. There is just one 404.html page. For example, a GET request for
``http://www.example.com/photos/missing`` retrieves
``http://www.example.com/404.html``, not
``http://www.example.com/photos/404.html``.
The web command has two sub-commands:
""" + _SET_DESCRIPTION + _GET_DESCRIPTION
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
class WebCommand(Command):
"""Implementation of gsutil web command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'web',
command_name_aliases=['setwebcfg', 'getwebcfg'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='m:e:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'set': [CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument()],
'get': [CommandArgument.MakeNCloudBucketURLsArgument(1)],
},
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='web',
help_name_aliases=['getwebcfg', 'setwebcfg'],
help_type='command_help',
help_one_line_summary=(
'Set a main page and/or error page for one or more buckets'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
},
)
def _GetWeb(self):
"""Gets website configuration for a bucket."""
bucket_url, bucket_metadata = self.GetSingleBucketUrlFromArg(
self.args[0], bucket_fields=['website'])
if bucket_url.scheme == 's3':
sys.stdout.write(
self.gsutil_api.XmlPassThroughGetWebsite(bucket_url,
provider=bucket_url.scheme))
else:
if bucket_metadata.website and (bucket_metadata.website.mainPageSuffix or
bucket_metadata.website.notFoundPage):
sys.stdout.write(
str(encoding.MessageToJson(bucket_metadata.website)) + '\n')
else:
sys.stdout.write('%s has no website configuration.\n' % bucket_url)
return 0
def _SetWeb(self):
"""Sets website configuration for a bucket."""
main_page_suffix = None
error_page = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-m':
main_page_suffix = a
elif o == '-e':
error_page = a
url_args = self.args
website = apitools_messages.Bucket.WebsiteValue(
mainPageSuffix=main_page_suffix, notFoundPage=error_page)
# Iterate over URLs, expanding wildcards and setting the website
# configuration on each.
some_matched = False
for url_str in url_args:
bucket_iter = self.GetBucketUrlIterFromArg(url_str, bucket_fields=['id'])
for blr in bucket_iter:
url = blr.storage_url
some_matched = True
self.logger.info('Setting website configuration on %s...', blr)
bucket_metadata = apitools_messages.Bucket(website=website)
self.gsutil_api.PatchBucket(url.bucket_name,
bucket_metadata,
provider=url.scheme,
fields=['id'])
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(url_args))
return 0
def RunCommand(self):
"""Command entry point for the web command."""
action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
if action_subcommand == 'get':
func = self._GetWeb
elif action_subcommand == 'set':
func = self._SetWeb
else:
raise CommandException(
('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help web".') % (action_subcommand, self.command_name))
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(subcommands=[action_subcommand],
sub_opts=self.sub_opts)
return func()
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/web.py
| 0.774242 | 0.151498 |
web.py
|
pypi
|
"""Implementation of cors configuration command for GCS buckets."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
from gslib import metrics
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
from gslib.storage_url import UrlsAreForSingleProvider
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import NO_MAX
from gslib.utils.translation_helper import CorsTranslation
from gslib.utils.translation_helper import REMOVE_CORS_CONFIG
_GET_SYNOPSIS = """
gsutil cors get gs://<bucket_name>
"""
_SET_SYNOPSIS = """
gsutil cors set cors-json-file gs://<bucket_name>...
"""
_GET_DESCRIPTION = """
<B>GET</B>
Gets the CORS configuration for a single bucket. The output from
"cors get" can be redirected into a file, edited and then updated using
"cors set".
"""
_SET_DESCRIPTION = """
<B>SET</B>
Sets the CORS configuration for one or more buckets. The
cors-json-file specified on the command line should be a path to a local
file containing a JSON document as described above.
"""
_SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') + '\n\n'
_DESCRIPTION = ("""
Gets or sets the Cross-Origin Resource Sharing (CORS) configuration on one or
more buckets. This command is supported for buckets only, not objects. An
example CORS JSON document looks like the following:
[
{
"origin": ["http://origin1.example.com"],
"responseHeader": ["Content-Type"],
"method": ["GET"],
"maxAgeSeconds": 3600
}
]
The above JSON document explicitly allows cross-origin GET requests from
http://origin1.example.com and may include the Content-Type response header.
The preflight request may be cached for 1 hour.
Note that requests to the authenticated browser download endpoint ``storage.cloud.google.com``
do not allow CORS requests. For more information about supported endpoints for CORS, see
`Cloud Storage CORS support <https://cloud.google.com/storage/docs/cross-origin#server-side-support>`_.
The following (empty) CORS JSON document removes all CORS configuration for
a bucket:
[]
The cors command has two sub-commands:
""" + '\n'.join([_GET_DESCRIPTION, _SET_DESCRIPTION]) + """
For more info about CORS generally, see https://www.w3.org/TR/cors/.
For more info about CORS in Cloud Storage, see the
`CORS concept page <https://cloud.google.com/storage/docs/configuring-cors>`_.
""")
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
class CorsCommand(Command):
"""Implementation of gsutil cors command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'cors',
command_name_aliases=['getcors', 'setcors'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'set': [
CommandArgument.MakeNFileURLsArgument(1),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'get': [CommandArgument.MakeNCloudBucketURLsArgument(1),]
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='cors',
help_name_aliases=[
'getcors',
'setcors',
'cross-origin',
],
help_type='command_help',
help_one_line_summary=(
'Get or set a CORS JSON document for one or more buckets'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
},
)
def _CalculateUrlsStartArg(self):
if not self.args:
self.RaiseWrongNumberOfArgumentsException()
if self.args[0].lower() == 'set':
return 2
else:
return 1
def _SetCors(self):
"""Sets CORS configuration on a Google Cloud Storage bucket."""
cors_arg = self.args[0]
url_args = self.args[1:]
# Disallow multi-provider 'cors set' requests.
if not UrlsAreForSingleProvider(url_args):
raise CommandException('"%s" command spanning providers not allowed.' %
self.command_name)
# Open, read and parse file containing JSON document.
cors_file = open(cors_arg, 'r')
cors_txt = cors_file.read()
cors_file.close()
self.api = self.gsutil_api.GetApiSelector(
StorageUrlFromString(url_args[0]).scheme)
# Iterate over URLs, expanding wildcards and setting the CORS on each.
some_matched = False
for url_str in url_args:
bucket_iter = self.GetBucketUrlIterFromArg(url_str, bucket_fields=['id'])
for blr in bucket_iter:
url = blr.storage_url
some_matched = True
self.logger.info('Setting CORS on %s...', blr)
if url.scheme == 's3':
self.gsutil_api.XmlPassThroughSetCors(cors_txt,
url,
provider=url.scheme)
else:
cors = CorsTranslation.JsonCorsToMessageEntries(cors_txt)
if not cors:
cors = REMOVE_CORS_CONFIG
bucket_metadata = apitools_messages.Bucket(cors=cors)
self.gsutil_api.PatchBucket(url.bucket_name,
bucket_metadata,
provider=url.scheme,
fields=['id'])
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(url_args))
return 0
def _GetCors(self):
"""Gets CORS configuration for a Google Cloud Storage bucket."""
bucket_url, bucket_metadata = self.GetSingleBucketUrlFromArg(
self.args[0], bucket_fields=['cors'])
if bucket_url.scheme == 's3':
sys.stdout.write(
self.gsutil_api.XmlPassThroughGetCors(bucket_url,
provider=bucket_url.scheme))
else:
if bucket_metadata.cors:
sys.stdout.write(
CorsTranslation.MessageEntriesToJson(bucket_metadata.cors))
else:
sys.stdout.write('%s has no CORS configuration.\n' % bucket_url)
return 0
def RunCommand(self):
"""Command entry point for the cors command."""
action_subcommand = self.args.pop(0)
if action_subcommand == 'get':
func = self._GetCors
elif action_subcommand == 'set':
func = self._SetCors
else:
raise CommandException(
('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help cors".') % (action_subcommand, self.command_name))
metrics.LogCommandParams(subcommands=[action_subcommand])
return func()
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/cors.py
| 0.730001 | 0.171165 |
cors.py
|
pypi
|
"""Implementation of Unix-like cat command for cloud storage providers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import re
import six
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.utils import cat_helper
from gslib.utils import constants
if six.PY3:
long = int
_SYNOPSIS = """
gsutil cat [-h] url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The cat command outputs the contents of one or more URLs to stdout.
While the cat command does not compute a checksum, it is otherwise
equivalent to doing:
gsutil cp url... -
(The final '-' causes gsutil to stream the output to stdout.)
WARNING: The gsutil cat command does not compute a checksum of the
downloaded data. Therefore, we recommend that users either perform
their own validation of the output of gsutil cat or use gsutil cp
or rsync (both of which perform integrity checking automatically).
<B>OPTIONS</B>
-h Prints short header for each object. For example:
gsutil cat -h gs://bucket/meeting_notes/2012_Feb/*.txt
This would print a header with the object name before the contents
of each text object that matched the wildcard.
-r range Causes gsutil to output just the specified byte range of the
object. Ranges are can be of these forms:
start-end (e.g., -r 256-5939)
start- (e.g., -r 256-)
-numbytes (e.g., -r -5)
where offsets start at 0, start-end means to return bytes start
through end (inclusive), start- means to return bytes start
through the end of the object, and -numbytes means to return the
last numbytes of the object. For example:
gsutil cat -r 256-939 gs://bucket/object
returns bytes 256 through 939, while:
gsutil cat -r -5 gs://bucket/object
returns the final 5 bytes of the object.
""")
class CatCommand(Command):
"""Implementation of gsutil cat command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'cat',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=1,
max_args=constants.NO_MAX,
supported_sub_args='hr:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[CommandArgument.MakeZeroOrMoreCloudURLsArgument()])
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='cat',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Concatenate object content to stdout',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
# Command entry point.
def RunCommand(self):
"""Command entry point for the cat command."""
show_header = False
request_range = None
start_byte = 0
end_byte = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-h':
show_header = True
elif o == '-r':
request_range = a.strip()
range_matcher = re.compile(
'^(?P<start>[0-9]+)-(?P<end>[0-9]*)$|^(?P<endslice>-[0-9]+)$')
range_match = range_matcher.match(request_range)
if not range_match:
raise CommandException('Invalid range (%s)' % request_range)
if range_match.group('start'):
start_byte = long(range_match.group('start'))
if range_match.group('end'):
end_byte = long(range_match.group('end'))
if range_match.group('endslice'):
start_byte = long(range_match.group('endslice'))
else:
self.RaiseInvalidArgumentException()
return cat_helper.CatHelper(self).CatUrlStrings(self.args,
show_header=show_header,
start_byte=start_byte,
end_byte=end_byte)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/cat.py
| 0.895283 | 0.260178 |
cat.py
|
pypi
|
"""Implementation of compose command for Google Cloud Storage."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from boto import config
from gslib.bucket_listing_ref import BucketListingObject
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.storage_url import ContainsWildcard
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.encryption_helper import GetEncryptionKeyWrapper
from gslib.utils.translation_helper import PreconditionsFromHeaders
MAX_COMPOSE_ARITY = 32
_SYNOPSIS = """
gsutil compose gs://bucket/obj1 [gs://bucket/obj2 ...] gs://bucket/composite
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The compose command creates a new object whose content is the concatenation
of a given sequence of source objects under the same bucket. gsutil uses
the content type of the first source object to determine the destination
object's content type. For more information, please see:
https://cloud.google.com/storage/docs/composite-objects
Note also that the ``gsutil cp`` command can automatically split uploads
for large files into multiple component objects, upload them in parallel,
and compose them into a final object. This will still perform all uploads
from a single machine. For extremely large files and/or very low
per-machine bandwidth, you may want to split the file and upload it from
multiple machines, and later compose these parts of the file manually.
Appending simply entails uploading your new data to a temporary object,
composing it with the growing append-target, and deleting the temporary
object:
$ echo 'new data' | gsutil cp - gs://bucket/data-to-append
$ gsutil compose gs://bucket/append-target gs://bucket/data-to-append \\
gs://bucket/append-target
$ gsutil rm gs://bucket/data-to-append
Note that there is a limit (currently %d) to the number of components that can
be composed in a single operation.
""" % (MAX_COMPOSE_ARITY))
class ComposeCommand(Command):
"""Implementation of gsutil compose command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'compose',
command_name_aliases=['concat'],
usage_synopsis=_SYNOPSIS,
min_args=1,
max_args=MAX_COMPOSE_ARITY + 1,
supported_sub_args='',
# Not files, just object names without gs:// prefix.
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[CommandArgument.MakeZeroOrMoreCloudURLsArgument()])
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='compose',
help_name_aliases=['concat'],
help_type='command_help',
help_one_line_summary=(
'Concatenate a sequence of objects into a new composite object.'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def CheckProvider(self, url):
if url.scheme != 'gs':
raise CommandException(
'"compose" called on URL with unsupported provider (%s).' % str(url))
# Command entry point.
def RunCommand(self):
"""Command entry point for the compose command."""
target_url_str = self.args[-1]
self.args = self.args[:-1]
target_url = StorageUrlFromString(target_url_str)
self.CheckProvider(target_url)
if target_url.HasGeneration():
raise CommandException('A version-specific URL (%s) cannot be '
'the destination for gsutil compose - abort.' %
target_url)
dst_obj_metadata = apitools_messages.Object(name=target_url.object_name,
bucket=target_url.bucket_name)
components = []
# Remember the first source object so we can get its content type.
first_src_url = None
for src_url_str in self.args:
if ContainsWildcard(src_url_str):
src_url_iter = self.WildcardIterator(src_url_str).IterObjects()
else:
src_url_iter = [BucketListingObject(StorageUrlFromString(src_url_str))]
for blr in src_url_iter:
src_url = blr.storage_url
self.CheckProvider(src_url)
if src_url.bucket_name != target_url.bucket_name:
raise CommandException('GCS does not support inter-bucket composing.')
if not first_src_url:
first_src_url = src_url
src_obj_metadata = (
apitools_messages.ComposeRequest.SourceObjectsValueListEntry(
name=src_url.object_name))
if src_url.HasGeneration():
src_obj_metadata.generation = int(src_url.generation)
components.append(src_obj_metadata)
# Avoid expanding too many components, and sanity check each name
# expansion result.
if len(components) > MAX_COMPOSE_ARITY:
raise CommandException('"compose" called with too many component '
'objects. Limit is %d.' % MAX_COMPOSE_ARITY)
if not components:
raise CommandException('"compose" requires at least 1 component object.')
first_src_obj_metadata = self.gsutil_api.GetObjectMetadata(
first_src_url.bucket_name,
first_src_url.object_name,
provider=first_src_url.scheme,
fields=['contentEncoding', 'contentType'])
dst_obj_metadata.contentType = first_src_obj_metadata.contentType
dst_obj_metadata.contentEncoding = first_src_obj_metadata.contentEncoding
preconditions = PreconditionsFromHeaders(self.headers or {})
self.logger.info('Composing %s from %d component object(s).', target_url,
len(components))
self.gsutil_api.ComposeObject(
components,
dst_obj_metadata,
preconditions=preconditions,
provider=target_url.scheme,
encryption_tuple=GetEncryptionKeyWrapper(config))
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/compose.py
| 0.752013 | 0.191706 |
compose.py
|
pypi
|
"""This module provides the notification command to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import getopt
import re
import time
import uuid
from datetime import datetime
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import PublishPermissionDeniedException
from gslib.command import Command
from gslib.command import NO_MAX
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.project_id import PopulateProjectId
from gslib.pubsub_api import PubsubApi
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.pubsub_apitools.pubsub_v1_messages import Binding
from gslib.utils import copy_helper
# Cloud Pub/Sub commands
_LIST_SYNOPSIS = """
gsutil notification list gs://<bucket_name>...
"""
_DELETE_SYNOPSIS = """
gsutil notification delete (<notificationConfigName>|gs://<bucket_name>)...
"""
_CREATE_SYNOPSIS = """
gsutil notification create -f (json|none) [-p <prefix>] [-t <topic>] \\
[-m <key>:<value>]... [-e <eventType>]... gs://<bucket_name>
"""
# Object Change Notification commands
_WATCHBUCKET_SYNOPSIS = """
gsutil notification watchbucket [-i <id>] [-t <token>] <app_url> gs://<bucket_name>
"""
_STOPCHANNEL_SYNOPSIS = """
gsutil notification stopchannel <channel_id> <resource_id>
"""
_SYNOPSIS = (
_CREATE_SYNOPSIS +
_DELETE_SYNOPSIS.lstrip('\n') +
_LIST_SYNOPSIS.lstrip('\n') +
_WATCHBUCKET_SYNOPSIS +
_STOPCHANNEL_SYNOPSIS.lstrip('\n') + '\n') # yapf: disable
_LIST_DESCRIPTION = """
<B>LIST</B>
The list sub-command provides a list of notification configs belonging to a
given bucket. The listed name of each notification config can be used with
the delete sub-command to delete that specific notification config.
For listing Object Change Notifications instead of Cloud Pub/Sub notification
subscription configs, add a -o flag.
<B>LIST EXAMPLES</B>
Fetch the list of notification configs for the bucket example-bucket:
gsutil notification list gs://example-bucket
The same as above, but for Object Change Notifications instead of Cloud
Pub/Sub notification subscription configs:
gsutil notification list -o gs://example-bucket
Fetch the notification configs in all buckets matching a wildcard:
gsutil notification list gs://example-*
Fetch all of the notification configs for buckets in the default project:
gsutil notification list gs://*
"""
_DELETE_DESCRIPTION = """
<B>DELETE</B>
The delete sub-command deletes notification configs from a bucket. If a
notification config name is passed as a parameter, that notification config
alone is deleted. If a bucket name is passed, all notification configs
associated with that bucket are deleted.
Cloud Pub/Sub topics associated with this notification config are not
deleted by this command. Those must be deleted separately, for example with
the gcloud command `gcloud beta pubsub topics delete`.
Object Change Notification subscriptions cannot be deleted with this command.
For that, see the command `gsutil notification stopchannel`.
<B>DELETE EXAMPLES</B>
Delete a single notification config (with ID 3) in the bucket example-bucket:
gsutil notification delete projects/_/buckets/example-bucket/notificationConfigs/3
Delete all notification configs in the bucket example-bucket:
gsutil notification delete gs://example-bucket
"""
_CREATE_DESCRIPTION = """
<B>CREATE</B>
The create sub-command creates a notification config on a bucket, establishing
a flow of event notifications from Cloud Storage to a Cloud Pub/Sub topic. As
part of creating this flow, the create command also verifies that the
destination Cloud Pub/Sub topic exists, creating it if necessary, and verifies
that the Cloud Storage bucket has permission to publish events to that topic,
granting the permission if necessary.
If a destination Cloud Pub/Sub topic is not specified with the -t flag, Cloud
Storage chooses a topic name in the default project whose ID is the same as
the bucket name. For example, if the default project ID specified is
'default-project' and the bucket being configured is gs://example-bucket, the
create command uses the Cloud Pub/Sub topic
"projects/default-project/topics/example-bucket".
In order to enable notifications, your project's `Cloud Storage service agent
<https://cloud.google.com/storage/docs/projects#service-accounts>`_ must have
the IAM permission "pubsub.topics.publish". This command checks to see if the
destination Cloud Pub/Sub topic grants the service agent this permission. If
not, the create command attempts to grant it.
A bucket can have up to 100 total notification configurations and up to 10
notification configurations set to trigger for a specific event.
<B>CREATE EXAMPLES</B>
Begin sending notifications of all changes to the bucket example-bucket
to the Cloud Pub/Sub topic projects/default-project/topics/example-bucket:
gsutil notification create -f json gs://example-bucket
The same as above, but specifies the destination topic ID 'files-to-process'
in the default project:
gsutil notification create -f json \\
-t files-to-process gs://example-bucket
The same as above, but specifies a Cloud Pub/Sub topic belonging to the
specific cloud project 'example-project':
gsutil notification create -f json \\
-t projects/example-project/topics/files-to-process gs://example-bucket
Create a notification config that only sends an event when a new object
has been created:
gsutil notification create -f json -e OBJECT_FINALIZE gs://example-bucket
Create a topic and notification config that only sends an event when
an object beginning with "photos/" is affected:
gsutil notification create -p photos/ gs://example-bucket
List all of the notificationConfigs in bucket example-bucket:
gsutil notification list gs://example-bucket
Delete all notitificationConfigs for bucket example-bucket:
gsutil notification delete gs://example-bucket
Delete one specific notificationConfig for bucket example-bucket:
gsutil notification delete \\
projects/_/buckets/example-bucket/notificationConfigs/1
<B>OPTIONS</B>
The create sub-command has the following options
-e Specify an event type filter for this notification config. Cloud
Storage only sends notifications of this type. You may specify this
parameter multiple times to allow multiple event types. If not
specified, Cloud Storage sends notifications for all event types.
The valid types are:
OBJECT_FINALIZE - An object has been created.
OBJECT_METADATA_UPDATE - The metadata of an object has changed.
OBJECT_DELETE - An object has been permanently deleted.
OBJECT_ARCHIVE - A live version of an object has become a
noncurrent version.
-f Specifies the payload format of notification messages. Must be
either "json" for a payload matches the object metadata for the
JSON API, or "none" to specify no payload at all. In either case,
notification details are available in the message attributes.
-m Specifies a key:value attribute that is appended to the set
of attributes sent to Cloud Pub/Sub for all events associated with
this notification config. You may specify this parameter multiple
times to set multiple attributes.
-p Specifies a prefix path filter for this notification config. Cloud
Storage only sends notifications for objects in this bucket whose
names begin with the specified prefix.
-s Skips creation and permission assignment of the Cloud Pub/Sub topic.
This is useful if the caller does not have permission to access
the topic in question, or if the topic already exists and has the
appropriate publish permission assigned.
-t The Cloud Pub/Sub topic to which notifications should be sent. If
not specified, this command chooses a topic whose project is your
default project and whose ID is the same as the Cloud Storage bucket
name.
<B>NEXT STEPS</B>
Once the create command has succeeded, Cloud Storage publishes a message to
the specified Cloud Pub/Sub topic when eligible changes occur. In order to
receive these messages, you must create a Pub/Sub subscription for your
Pub/Sub topic. To learn more about creating Pub/Sub subscriptions, see `the
Pub/Sub Subscriber Overview <https://cloud.google.com/pubsub/docs/subscriber>`_.
You can create a simple Pub/Sub subscription using the ``gcloud`` command-line
tool. For example, to create a new subscription on the topic "myNewTopic" and
attempt to pull messages from it, you could run:
gcloud beta pubsub subscriptions create --topic myNewTopic testSubscription
gcloud beta pubsub subscriptions pull --auto-ack testSubscription
"""
_WATCHBUCKET_DESCRIPTION = """
<B>WATCHBUCKET</B>
The watchbucket sub-command can be used to watch a bucket for object changes.
A service account must be used when running this command.
The app_url parameter must be an HTTPS URL to an application that will be
notified of changes to any object in the bucket. The URL endpoint must be
a verified domain on your project. See `Notification Authorization
<https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_
for details.
The optional id parameter can be used to assign a unique identifier to the
created notification channel. If not provided, a random UUID string is
generated.
The optional token parameter can be used to validate notifications events.
To do this, set this custom token and store it to later verify that
notification events contain the client token you expect.
<B>WATCHBUCKET EXAMPLES</B>
Watch the bucket example-bucket for changes and send notifications to an
application server running at example.com:
gsutil notification watchbucket https://example.com/notify \\
gs://example-bucket
Assign identifier my-channel-id to the created notification channel:
gsutil notification watchbucket -i my-channel-id \\
https://example.com/notify gs://example-bucket
Set a custom client token that is included with each notification event:
gsutil notification watchbucket -t my-client-token \\
https://example.com/notify gs://example-bucket
"""
_STOPCHANNEL_DESCRIPTION = """
<B>STOPCHANNEL</B>
The stopchannel sub-command can be used to stop sending change events to a
notification channel.
The channel_id and resource_id parameters should match the values from the
response of a bucket watch request.
<B>STOPCHANNEL EXAMPLES</B>
Stop the notification event channel with channel identifier channel1 and
resource identifier SoGqan08XDIFWr1Fv_nGpRJBHh8:
gsutil notification stopchannel channel1 SoGqan08XDIFWr1Fv_nGpRJBHh8
"""
_DESCRIPTION = """
You can use the ``notification`` command to configure
`Pub/Sub notifications for Cloud Storage
<https://cloud.google.com/storage/docs/pubsub-notifications>`_
and `Object change notification
<https://cloud.google.com/storage/docs/object-change-notification>`_ channels.
<B>CLOUD PUB/SUB</B>
The "create", "list", and "delete" sub-commands deal with configuring Cloud
Storage integration with Google Cloud Pub/Sub.
""" + _CREATE_DESCRIPTION + _LIST_DESCRIPTION + _DELETE_DESCRIPTION + """
<B>OBJECT CHANGE NOTIFICATIONS</B>
Object change notification is a separate, older feature within Cloud Storage
for generating notifications. This feature sends HTTPS messages to a client
application that you've set up separately. This feature is generally not
recommended, because Pub/Sub notifications are cheaper, easier to use, and
more flexible. For more information, see
`Object change notification
<https://cloud.google.com/storage/docs/object-change-notification>`_.
The "watchbucket" and "stopchannel" sub-commands enable and disable Object
change notifications.
""" + _WATCHBUCKET_DESCRIPTION + _STOPCHANNEL_DESCRIPTION + """
<B>NOTIFICATIONS AND PARALLEL COMPOSITE UPLOADS</B>
gsutil supports `parallel composite uploads
<https://cloud.google.com/storage/docs/uploads-downloads#parallel-composite-uploads>`_.
If enabled, an upload can result in multiple temporary component objects
being uploaded before the actual intended object is created. Any subscriber
to notifications for this bucket then sees a notification for each of these
components being created and deleted. If this is a concern for you, note
that parallel composite uploads can be disabled by setting
"parallel_composite_upload_threshold = 0" in your .boto config file.
Alternately, your subscriber code can filter out gsutil's parallel
composite uploads by ignoring any notification about objects whose names
contain (but do not start with) the following string:
"{composite_namespace}".
""".format(composite_namespace=copy_helper.PARALLEL_UPLOAD_TEMP_NAMESPACE)
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE = """
Watch bucket attempt failed:
{watch_error}
You attempted to watch a bucket with an application URL of:
{watch_url}
which is not authorized for your project. Please ensure that you are using
Service Account authentication and that the Service Account's project is
authorized for the application URL. Notification endpoint URLs must also be
whitelisted in your Cloud Console project. To do that, the domain must also be
verified using Google Webmaster Tools. For instructions, please see
`Notification Authorization
<https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_.
"""
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
# yapf: disable
_create_help_text = (
CreateHelpText(_CREATE_SYNOPSIS, _CREATE_DESCRIPTION))
_list_help_text = (
CreateHelpText(_LIST_SYNOPSIS, _LIST_DESCRIPTION))
_delete_help_text = (
CreateHelpText(_DELETE_SYNOPSIS, _DELETE_DESCRIPTION))
_watchbucket_help_text = (
CreateHelpText(_WATCHBUCKET_SYNOPSIS, _WATCHBUCKET_DESCRIPTION))
_stopchannel_help_text = (
CreateHelpText(_STOPCHANNEL_SYNOPSIS, _STOPCHANNEL_DESCRIPTION))
# yapf: enable
PAYLOAD_FORMAT_MAP = {
'none': 'NONE',
'json': 'JSON_API_V1',
}
class NotificationCommand(Command):
"""Implementation of gsutil notification command."""
# Notification names might look like one of these:
# canonical form: projects/_/buckets/bucket/notificationConfigs/3
# JSON API form: b/bucket/notificationConfigs/5
# Either of the above might start with a / if a user is copying & pasting.
def _GetNotificationPathRegex(self):
if not NotificationCommand._notification_path_regex:
NotificationCommand._notification_path_regex = re.compile(
('/?(projects/[^/]+/)?b(uckets)?/(?P<bucket>[^/]+)/'
'notificationConfigs/(?P<notification>[0-9]+)'))
return NotificationCommand._notification_path_regex
_notification_path_regex = None
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'notification',
command_name_aliases=[
'notify',
'notifyconfig',
'notifications',
'notif',
],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='i:t:m:t:of:e:p:s',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'watchbucket': [
CommandArgument.MakeFreeTextArgument(),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'stopchannel': [],
'list': [CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),],
'delete': [
# Takes a list of one of the following:
# notification: projects/_/buckets/bla/notificationConfigs/5,
# bucket: gs://foobar
CommandArgument.MakeZeroOrMoreCloudURLsArgument(),
],
'create': [
CommandArgument.MakeFreeTextArgument(), # Cloud Pub/Sub topic
CommandArgument.MakeNCloudBucketURLsArgument(1),
]
},
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='notification',
help_name_aliases=[
'watchbucket',
'stopchannel',
'notifyconfig',
],
help_type='command_help',
help_one_line_summary='Configure object change notification',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'create': _create_help_text,
'list': _list_help_text,
'delete': _delete_help_text,
'watchbucket': _watchbucket_help_text,
'stopchannel': _stopchannel_help_text,
},
)
def _WatchBucket(self):
"""Creates a watch on a bucket given in self.args."""
self.CheckArguments()
identifier = None
client_token = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-i':
identifier = a
if o == '-t':
client_token = a
identifier = identifier or str(uuid.uuid4())
watch_url = self.args[0]
bucket_arg = self.args[-1]
if not watch_url.lower().startswith('https://'):
raise CommandException('The application URL must be an https:// URL.')
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
self.logger.info('Watching bucket %s with application URL %s ...',
bucket_url, watch_url)
try:
channel = self.gsutil_api.WatchBucket(bucket_url.bucket_name,
watch_url,
identifier,
token=client_token,
provider=bucket_url.scheme)
except AccessDeniedException as e:
self.logger.warn(
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE.format(watch_error=str(e),
watch_url=watch_url))
raise
channel_id = channel.id
resource_id = channel.resourceId
client_token = channel.token
self.logger.info('Successfully created watch notification channel.')
self.logger.info('Watch channel identifier: %s', channel_id)
self.logger.info('Canonicalized resource identifier: %s', resource_id)
self.logger.info('Client state token: %s', client_token)
return 0
def _StopChannel(self):
channel_id = self.args[0]
resource_id = self.args[1]
self.logger.info('Removing channel %s with resource identifier %s ...',
channel_id, resource_id)
self.gsutil_api.StopChannel(channel_id, resource_id, provider='gs')
self.logger.info('Succesfully removed channel.')
return 0
def _ListChannels(self, bucket_arg):
"""Lists active channel watches on a bucket given in self.args."""
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
channels = self.gsutil_api.ListChannels(bucket_url.bucket_name,
provider='gs').items
self.logger.info(
'Bucket %s has the following active Object Change Notifications:',
bucket_url.bucket_name)
for idx, channel in enumerate(channels):
self.logger.info('\tNotification channel %d:', idx + 1)
self.logger.info('\t\tChannel identifier: %s', channel.channel_id)
self.logger.info('\t\tResource identifier: %s', channel.resource_id)
self.logger.info('\t\tApplication URL: %s', channel.push_url)
self.logger.info('\t\tCreated by: %s', channel.subscriber_email)
self.logger.info(
'\t\tCreation time: %s',
str(datetime.fromtimestamp(channel.creation_time_ms / 1000)))
return 0
def _Create(self):
self.CheckArguments()
# User-specified options
pubsub_topic = None
payload_format = None
custom_attributes = {}
event_types = []
object_name_prefix = None
should_setup_topic = True
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-e':
event_types.append(a)
elif o == '-f':
payload_format = a
elif o == '-m':
if ':' not in a:
raise CommandException(
'Custom attributes specified with -m should be of the form '
'key:value')
key, value = a.split(':')
custom_attributes[key] = value
elif o == '-p':
object_name_prefix = a
elif o == '-s':
should_setup_topic = False
elif o == '-t':
pubsub_topic = a
if payload_format not in PAYLOAD_FORMAT_MAP:
raise CommandException(
"Must provide a payload format with -f of either 'json' or 'none'")
payload_format = PAYLOAD_FORMAT_MAP[payload_format]
bucket_arg = self.args[-1]
bucket_url = StorageUrlFromString(bucket_arg)
if not bucket_url.IsCloudUrl() or not bucket_url.IsBucket():
raise CommandException(
"%s %s requires a GCS bucket name, but got '%s'" %
(self.command_name, self.subcommand_name, bucket_arg))
if bucket_url.scheme != 'gs':
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
bucket_name = bucket_url.bucket_name
self.logger.debug('Creating notification for bucket %s', bucket_url)
# Find the project this bucket belongs to
bucket_metadata = self.gsutil_api.GetBucket(bucket_name,
fields=['projectNumber'],
provider=bucket_url.scheme)
bucket_project_number = bucket_metadata.projectNumber
# If not specified, choose a sensible default for the Cloud Pub/Sub topic
# name.
if not pubsub_topic:
pubsub_topic = 'projects/%s/topics/%s' % (PopulateProjectId(None),
bucket_name)
if not pubsub_topic.startswith('projects/'):
# If a user picks a topic ID (mytopic) but doesn't pass the whole name (
# projects/my-project/topics/mytopic ), pick a default project.
pubsub_topic = 'projects/%s/topics/%s' % (PopulateProjectId(None),
pubsub_topic)
self.logger.debug('Using Cloud Pub/Sub topic %s', pubsub_topic)
just_modified_topic_permissions = False
if should_setup_topic:
# Ask GCS for the email address that represents GCS's permission to
# publish to a Cloud Pub/Sub topic from this project.
service_account = self.gsutil_api.GetProjectServiceAccount(
bucket_project_number, provider=bucket_url.scheme).email_address
self.logger.debug('Service account for project %d: %s',
bucket_project_number, service_account)
just_modified_topic_permissions = self._CreateTopic(
pubsub_topic, service_account)
for attempt_number in range(0, 2):
try:
create_response = self.gsutil_api.CreateNotificationConfig(
bucket_name,
pubsub_topic=pubsub_topic,
payload_format=payload_format,
custom_attributes=custom_attributes,
event_types=event_types if event_types else None,
object_name_prefix=object_name_prefix,
provider=bucket_url.scheme)
break
except PublishPermissionDeniedException:
if attempt_number == 0 and just_modified_topic_permissions:
# If we have just set the IAM policy, it may take up to 10 seconds to
# take effect.
self.logger.info(
'Retrying create notification in 10 seconds '
'(new permissions may take up to 10 seconds to take effect.)')
time.sleep(10)
else:
raise
notification_name = 'projects/_/buckets/%s/notificationConfigs/%s' % (
bucket_name, create_response.id)
self.logger.info('Created notification config %s', notification_name)
return 0
def _CreateTopic(self, pubsub_topic, service_account):
"""Assures that a topic exists, creating it if necessary.
Also adds GCS as a publisher on that bucket, if necessary.
Args:
pubsub_topic: name of the Cloud Pub/Sub topic to use/create.
service_account: the GCS service account that needs publish permission.
Returns:
true if we modified IAM permissions, otherwise false.
"""
pubsub_api = PubsubApi(logger=self.logger)
# Verify that the Pub/Sub topic exists. If it does not, create it.
try:
pubsub_api.GetTopic(topic_name=pubsub_topic)
self.logger.debug('Topic %s already exists', pubsub_topic)
except NotFoundException:
self.logger.debug('Creating topic %s', pubsub_topic)
pubsub_api.CreateTopic(topic_name=pubsub_topic)
self.logger.info('Created Cloud Pub/Sub topic %s', pubsub_topic)
# Verify that the service account is in the IAM policy.
policy = pubsub_api.GetTopicIamPolicy(topic_name=pubsub_topic)
binding = Binding(role='roles/pubsub.publisher',
members=['serviceAccount:%s' % service_account])
# This could be more extensive. We could, for instance, check for roles
# that are stronger that pubsub.publisher, like owner. We could also
# recurse up the hierarchy looking to see if there are project-level
# permissions. This can get very complex very quickly, as the caller
# may not necessarily have access to the project-level IAM policy.
# There's no danger in double-granting permission just to make sure it's
# there, though.
if binding not in policy.bindings:
policy.bindings.append(binding)
# transactional safety via etag field.
pubsub_api.SetTopicIamPolicy(topic_name=pubsub_topic, policy=policy)
return True
else:
self.logger.debug('GCS already has publish permission to topic %s.',
pubsub_topic)
return False
def _EnumerateNotificationsFromArgs(self, accept_notification_configs=True):
"""Yields bucket/notification tuples from command-line args.
Given a list of strings that are bucket names (gs://foo) or notification
config IDs, yield tuples of bucket names and their associated notifications.
Args:
accept_notification_configs: whether notification configs are valid args.
Yields:
Tuples of the form (bucket_name, Notification)
"""
path_regex = self._GetNotificationPathRegex()
for list_entry in self.args:
match = path_regex.match(list_entry)
if match:
if not accept_notification_configs:
raise CommandException(
'%s %s accepts only bucket names, but you provided %s' %
(self.command_name, self.subcommand_name, list_entry))
bucket_name = match.group('bucket')
notification_id = match.group('notification')
found = False
for notification in self.gsutil_api.ListNotificationConfigs(
bucket_name, provider='gs'):
if notification.id == notification_id:
yield (bucket_name, notification)
found = True
break
if not found:
raise NotFoundException('Could not find notification %s' % list_entry)
else:
storage_url = StorageUrlFromString(list_entry)
if not storage_url.IsCloudUrl():
raise CommandException(
'The %s command must be used on cloud buckets or notification '
'config names.' % self.command_name)
if storage_url.scheme != 'gs':
raise CommandException('The %s command only works on gs:// buckets.')
path = None
if storage_url.IsProvider():
path = 'gs://*'
elif storage_url.IsBucket():
path = list_entry
if not path:
raise CommandException(
'The %s command cannot be used on cloud objects, only buckets' %
self.command_name)
for blr in self.WildcardIterator(path).IterBuckets(
bucket_fields=['id']):
for notification in self.gsutil_api.ListNotificationConfigs(
blr.storage_url.bucket_name, provider='gs'):
yield (blr.storage_url.bucket_name, notification)
def _List(self):
self.CheckArguments()
if self.sub_opts:
if '-o' in dict(self.sub_opts):
for bucket_name in self.args:
self._ListChannels(bucket_name)
else:
for bucket_name, notification in self._EnumerateNotificationsFromArgs(
accept_notification_configs=False):
self._PrintNotificationDetails(bucket_name, notification)
return 0
def _PrintNotificationDetails(self, bucket, notification):
print('projects/_/buckets/{bucket}/notificationConfigs/{notification}\n'
'\tCloud Pub/Sub topic: {topic}'.format(
bucket=bucket,
notification=notification.id,
topic=notification.topic[len('//pubsub.googleapis.com/'):]))
if notification.custom_attributes:
print('\tCustom attributes:')
for attr in notification.custom_attributes.additionalProperties:
print('\t\t%s: %s' % (attr.key, attr.value))
filters = []
if notification.event_types:
filters.append('\t\tEvent Types: %s' %
', '.join(notification.event_types))
if notification.object_name_prefix:
filters.append("\t\tObject name prefix: '%s'" %
notification.object_name_prefix)
if filters:
print('\tFilters:')
for line in filters:
print(line)
self.logger.info('')
def _Delete(self):
for bucket_name, notification in self._EnumerateNotificationsFromArgs():
self._DeleteNotification(bucket_name, notification.id)
return 0
def _DeleteNotification(self, bucket_name, notification_id):
self.gsutil_api.DeleteNotificationConfig(bucket_name,
notification=notification_id,
provider='gs')
return 0
def _RunSubCommand(self, func):
try:
(self.sub_opts,
self.args) = getopt.getopt(self.args,
self.command_spec.supported_sub_args)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
return func(self)
except getopt.GetoptError:
self.RaiseInvalidArgumentException()
SUBCOMMANDS = {
'create': _Create,
'list': _List,
'delete': _Delete,
'watchbucket': _WatchBucket,
'stopchannel': _StopChannel
}
def RunCommand(self):
"""Command entry point for the notification command."""
self.subcommand_name = self.args.pop(0)
if self.subcommand_name in NotificationCommand.SUBCOMMANDS:
metrics.LogCommandParams(subcommands=[self.subcommand_name])
return self._RunSubCommand(
NotificationCommand.SUBCOMMANDS[self.subcommand_name])
else:
raise CommandException('Invalid subcommand "%s" for the %s command.' %
(self.subcommand_name, self.command_name))
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/notification.py
| 0.838051 | 0.167729 |
notification.py
|
pypi
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.metrics import LogCommandParams
from gslib.project_id import PopulateProjectId
from gslib.utils.cloud_api_helper import GetCloudApiInstance
from gslib.utils.text_util import InsistAscii
_CREATE_SYNOPSIS = """
gsutil hmac create [-p <project>] <service_account_email>
"""
_DELETE_SYNOPSIS = """
gsutil hmac delete [-p <project>] <access_id>
"""
_GET_SYNOPSIS = """
gsutil hmac get [-p <project>] <access_id>
"""
_LIST_SYNOPSIS = """
gsutil hmac list [-a] [-l] [-p <project>] [-u <service_account_email>]
"""
_UPDATE_SYNOPSIS = """
gsutil hmac update -s (ACTIVE|INACTIVE) [-e <etag>] [-p <project>] <access_id>
"""
_CREATE_DESCRIPTION = """
<B>CREATE</B>
The ``hmac create`` command creates an HMAC key for the specified service
account:
gsutil hmac create test.service.account@test_project.iam.gserviceaccount.com
The secret key material is only available upon creation, so be sure to store
the returned secret along with the access_id.
<B>CREATE OPTIONS</B>
The ``create`` sub-command has the following option
-p <project> Specify the ID or number of the project in which
to create a key.
"""
_DELETE_DESCRIPTION = """
<B>DELETE</B>
The ``hmac delete`` command permanently deletes the specified HMAC key:
gsutil hmac delete GOOG56JBMFZX6PMPTQ62VD2
Note that keys must be updated to be in the ``INACTIVE`` state before they can be
deleted.
<B>DELETE OPTIONS</B>
The "delete" sub-command has the following option
-p <project>. Specify the ID or number of the project from which to
delete a key.
"""
_GET_DESCRIPTION = """
<B>GET</B>
The ``hmac get`` command retrieves the specified HMAC key's metadata:
gsutil hmac get GOOG56JBMFZX6PMPTQ62VD2
Note that there is no option to retrieve a key's secret material after it has
been created.
<B>GET OPTIONS</B>
The ``get`` sub-command has the following option
-p <project> Specify the ID or number of the project from which to
get a key.
"""
_LIST_DESCRIPTION = """
<B>LIST</B>
The ``hmac list`` command lists the HMAC key metadata for keys in the
specified project. If no project is specified in the command, the default
project is used.
<B>LIST OPTIONS</B>
The ``list`` sub-command has the following options
-a Show all keys, including recently deleted
keys.
-l Use long listing format. Shows each key's full
metadata excluding the secret.
-p <project> Specify the ID or number of the project from
which to list keys.
-u <service_account_email> Filter keys for a single service account.
"""
_UPDATE_DESCRIPTION = """
<B>UPDATE</B>
The ``hmac update`` command sets the state of the specified key:
gsutil hmac update -s INACTIVE -e M42da= GOOG56JBMFZX6PMPTQ62VD2
Valid state arguments are ``ACTIVE`` and ``INACTIVE``. To set a key to state
``DELETED``, use the ``hmac delete`` command on an ``INACTIVE`` key. If an etag
is set in the command, it will only succeed if the provided etag matches the etag
of the stored key.
<B>UPDATE OPTIONS</B>
The ``update`` sub-command has the following options
-s <ACTIVE|INACTIVE> Sets the state of the specified key to either
``ACTIVE`` or ``INACTIVE``.
-e <etag> If provided, the update will only be performed
if the specified etag matches the etag of the
stored key.
-p <project> Specify ther ID or number of the project in
which to update a key.
"""
_SYNOPSIS = (_CREATE_SYNOPSIS + _DELETE_SYNOPSIS.lstrip('\n') +
_GET_SYNOPSIS.lstrip('\n') + _LIST_SYNOPSIS.lstrip('\n') +
_UPDATE_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = """
You can use the ``hmac`` command to interact with service account `HMAC keys
<https://cloud.google.com/storage/docs/authentication/hmackeys>`_.
The ``hmac`` command has five sub-commands:
""" + '\n'.join([
_CREATE_DESCRIPTION,
_DELETE_DESCRIPTION,
_GET_DESCRIPTION,
_LIST_DESCRIPTION,
_UPDATE_DESCRIPTION,
])
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_VALID_UPDATE_STATES = ['INACTIVE', 'ACTIVE']
_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
_create_help_text = CreateHelpText(_CREATE_SYNOPSIS, _CREATE_DESCRIPTION)
_delete_help_text = CreateHelpText(_DELETE_SYNOPSIS, _DELETE_DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_list_help_text = CreateHelpText(_LIST_SYNOPSIS, _LIST_DESCRIPTION)
_update_help_text = CreateHelpText(_UPDATE_SYNOPSIS, _UPDATE_DESCRIPTION)
def _AccessIdException(command_name, subcommand, synopsis):
return CommandException(
'%s %s requires an Access ID to be specified as the last argument.\n%s' %
(command_name, subcommand, synopsis))
def _KeyMetadataOutput(metadata):
"""Format the key metadata for printing to the console."""
def FormatInfo(name, value, new_line=True):
"""Format the metadata name-value pair into two aligned columns."""
width = 22
info_str = '\t%-*s %s' % (width, name + ':', value)
if new_line:
info_str += '\n'
return info_str
message = 'Access ID %s:\n' % metadata.accessId
message += FormatInfo('State', metadata.state)
message += FormatInfo('Service Account', metadata.serviceAccountEmail)
message += FormatInfo('Project', metadata.projectId)
message += FormatInfo('Time Created',
metadata.timeCreated.strftime(_TIME_FORMAT))
message += FormatInfo('Time Last Updated',
metadata.updated.strftime(_TIME_FORMAT))
message += FormatInfo('Etag', metadata.etag, new_line=False)
return message
class HmacCommand(Command):
"""Implementation of gsutil hmac command."""
command_spec = Command.CreateCommandSpec(
'hmac',
min_args=1,
max_args=8,
supported_sub_args='ae:lp:s:u:',
file_url_ok=True,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
usage_synopsis=_SYNOPSIS,
argparse_arguments={
'create': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'delete': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'get': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'list': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'update': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
},
)
help_spec = Command.HelpSpec(
help_name='hmac',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary=('CRUD operations on service account HMAC keys.'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'create': _create_help_text,
'delete': _delete_help_text,
'get': _get_help_text,
'list': _list_help_text,
'update': _update_help_text,
})
def _CreateHmacKey(self, thread_state=None):
"""Creates HMAC key for a service account."""
if self.args:
self.service_account_email = self.args[0]
else:
err_msg = ('%s %s requires a service account to be specified as the '
'last argument.\n%s')
raise CommandException(
err_msg %
(self.command_name, self.action_subcommand, _CREATE_SYNOPSIS))
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.CreateHmacKey(self.project_id,
self.service_account_email,
provider='gs')
print('%-12s %s' % ('Access ID:', response.metadata.accessId))
print('%-12s %s' % ('Secret:', response.secret))
def _DeleteHmacKey(self, thread_state=None):
"""Deletes an HMAC key."""
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_DELETE_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
gsutil_api.DeleteHmacKey(self.project_id, access_id, provider='gs')
def _GetHmacKey(self, thread_state=None):
"""Gets HMAC key from its Access Id."""
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_GET_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.GetHmacKey(self.project_id, access_id, provider='gs')
print(_KeyMetadataOutput(response))
def _ListHmacKeys(self, thread_state=None):
"""Lists HMAC keys for a project or service account."""
if self.args:
raise CommandException(
'%s %s received unexpected arguments.\n%s' %
(self.command_name, self.action_subcommand, _LIST_SYNOPSIS))
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.ListHmacKeys(self.project_id,
self.service_account_email,
self.show_all,
provider='gs')
short_list_format = '%s\t%-12s %s'
if self.long_list:
for item in response:
print(_KeyMetadataOutput(item))
print()
else:
for item in response:
print(short_list_format %
(item.accessId, item.state, item.serviceAccountEmail))
def _UpdateHmacKey(self, thread_state=None):
"""Update an HMAC key's state."""
if not self.state:
raise CommandException(
'A state flag must be supplied for %s %s\n%s' %
(self.command_name, self.action_subcommand, _UPDATE_SYNOPSIS))
elif self.state not in _VALID_UPDATE_STATES:
raise CommandException('The state flag value must be one of %s' %
', '.join(_VALID_UPDATE_STATES))
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_UPDATE_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.UpdateHmacKey(self.project_id,
access_id,
self.state,
self.etag,
provider='gs')
print(_KeyMetadataOutput(response))
def RunCommand(self):
"""Command entry point for the hmac command."""
if self.gsutil_api.GetApiSelector(provider='gs') != ApiSelector.JSON:
raise CommandException(
'The "hmac" command can only be used with the GCS JSON API')
self.action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
LogCommandParams(sub_opts=self.sub_opts)
self.service_account_email = None
self.state = None
self.show_all = False
self.long_list = False
self.etag = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-u':
self.service_account_email = a
elif o == '-p':
# Project IDs are sent as header values when using gs and s3 XML APIs.
InsistAscii(a, 'Invalid non-ASCII character found in project ID')
self.project_id = a
elif o == '-s':
self.state = a
elif o == '-a':
self.show_all = True
elif o == '-l':
self.long_list = True
elif o == '-e':
self.etag = a
if not self.project_id:
self.project_id = PopulateProjectId(None)
method_for_arg = {
'create': self._CreateHmacKey,
'delete': self._DeleteHmacKey,
'get': self._GetHmacKey,
'list': self._ListHmacKeys,
'update': self._UpdateHmacKey,
}
if self.action_subcommand not in method_for_arg:
raise CommandException('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help hmac".' %
(self.action_subcommand, self.command_name))
LogCommandParams(subcommands=[self.action_subcommand])
method_for_arg[self.action_subcommand]()
return 0
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/hmac.py
| 0.726523 | 0.188548 |
hmac.py
|
pypi
|
"""This module provides the rpo command to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
import textwrap
from gslib import metrics
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.help_provider import CreateHelpText
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import NO_MAX
VALID_RPO_VALUES = ('ASYNC_TURBO', 'DEFAULT')
VALID_RPO_VALUES_STRING = '({})'.format('|'.join(VALID_RPO_VALUES))
_SET_SYNOPSIS = """
gsutil rpo set {} gs://<bucket_name>...
""".format(VALID_RPO_VALUES_STRING)
_GET_SYNOPSIS = """
gsutil rpo get gs://<bucket_name>...
"""
_SYNOPSIS = _SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n')
_SET_DESCRIPTION = """
<B>SET</B>
The ``rpo set`` command configures turbo replication
for dual-region Google Cloud Storage buckets.
<B>SET EXAMPLES</B>
Configure your buckets to use turbo replication:
gsutil rpo set ASYNC_TURBO gs://redbucket gs://bluebucket
Configure your buckets to NOT use turbo replication:
gsutil rpo set DEFAULT gs://redbucket gs://bluebucket
"""
_GET_DESCRIPTION = """
<B>GET</B>
The ``rpo get`` command returns the replication setting
for the specified Cloud Storage buckets.
<B>GET EXAMPLES</B>
Check if your buckets are using turbo replication:
gsutil rpo get gs://redbucket gs://bluebucket
"""
_DESCRIPTION = """
The ``rpo`` command is used to retrieve or configure the
`replication setting
<https://cloud.google.com/storage/docs/turbo-replication>`_ of
dual-region Cloud Storage buckets.
This command has two sub-commands: ``get`` and ``set``.
""" + _GET_DESCRIPTION + _SET_DESCRIPTION
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
class RpoCommand(Command):
"""Implements the gsutil rpo command."""
command_spec = Command.CreateCommandSpec(
'rpo',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=2,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'get': [CommandArgument.MakeNCloudURLsArgument(1)],
'set': [
CommandArgument('mode', choices=list(VALID_RPO_VALUES)),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
})
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='rpo',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Configure replication',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text,
'set': _set_help_text,
},
)
def _ValidateBucketListingRefAndReturnBucketName(self, blr):
if blr.storage_url.scheme != 'gs':
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
def _GetRpo(self, blr):
"""Gets the rpo setting for a bucket."""
bucket_url = blr.storage_url
bucket_metadata = self.gsutil_api.GetBucket(bucket_url.bucket_name,
fields=['rpo'],
provider=bucket_url.scheme)
rpo = bucket_metadata.rpo
bucket = str(bucket_url).rstrip('/')
print('%s: %s' % (bucket, rpo))
def _SetRpo(self, blr, rpo_value):
"""Sets the rpo setting for a bucket."""
bucket_url = blr.storage_url
formatted_rpo_value = rpo_value
if formatted_rpo_value not in VALID_RPO_VALUES:
raise CommandException(
'Invalid value for rpo set.'
' Should be one of {}'.format(VALID_RPO_VALUES_STRING))
bucket_metadata = apitools_messages.Bucket(rpo=formatted_rpo_value)
self.logger.info('Setting rpo %s for %s' %
(formatted_rpo_value, str(bucket_url).rstrip('/')))
self.gsutil_api.PatchBucket(bucket_url.bucket_name,
bucket_metadata,
fields=['rpo'],
provider=bucket_url.scheme)
return 0
def _Rpo(self):
"""Handles rpo command on Cloud Storage buckets."""
subcommand = self.args.pop(0)
if subcommand not in ('get', 'set'):
raise CommandException('rpo only supports get|set')
subcommand_func = None
subcommand_args = []
if subcommand == 'get':
subcommand_func = self._GetRpo
elif subcommand == 'set':
subcommand_func = self._SetRpo
setting_arg = self.args.pop(0)
subcommand_args.append(setting_arg)
# TODO: Remove this as rpo should work for XML as well.
if self.gsutil_api.GetApiSelector('gs') != ApiSelector.JSON:
raise CommandException('\n'.join(
textwrap.wrap(('The "%s" command can only be with the Cloud Storage '
'JSON API.') % self.command_name)))
# Iterate over bucket args, performing the specified subsubcommand.
some_matched = False
url_args = self.args
if not url_args:
self.RaiseWrongNumberOfArgumentsException()
for url_str in url_args:
# Throws a CommandException if the argument is not a bucket.
bucket_iter = self.GetBucketUrlIterFromArg(url_str)
for bucket_listing_ref in bucket_iter:
# TODO: Make this more general when adding RPO support for the XML API.
if self.gsutil_api.GetApiSelector(
bucket_listing_ref.storage_url.scheme) != ApiSelector.JSON:
raise CommandException('\n'.join(
textwrap.wrap(('The "%s" command can only be used for GCS '
'buckets.') % self.command_name)))
some_matched = True
subcommand_func(bucket_listing_ref, *subcommand_args)
if not some_matched:
raise CommandException(NO_URLS_MATCHED_TARGET % list(url_args))
return 0
def RunCommand(self):
"""Command entry point for the rpo command."""
action_subcommand = self.args[0]
self.ParseSubOpts(check_args=True)
if action_subcommand == 'get' or action_subcommand == 'set':
metrics.LogCommandParams(sub_opts=self.sub_opts)
metrics.LogCommandParams(subcommands=[action_subcommand])
return self._Rpo()
else:
raise CommandException('Invalid subcommand "%s", use get|set instead.' %
action_subcommand)
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/commands/rpo.py
| 0.746693 | 0.18228 |
rpo.py
|
pypi
|
import re
from os import listdir
from os.path import join
from numpy import stack, float32, asarray
from glymur import Jp2k
from ..Util import Channels, Paths
from typing import Union
from datetime import datetime
from typing import Tuple
class FolderMetaData:
def __init__(self, y: int, m: int, d: int, n: int, r: int, tile: str):
self.year = y
self.month = m
self.day = d
self.n = n
self.r = r
self.tile = tile
@staticmethod
def from_folder_name(name: str):
# example name "S2A_MSIL1C_20191007T103021_N0208_R108_T32TMT_20191007T123034.SAFE"
date_format = re.compile(r"^.*MSI.{3}_(\d{4})(\d{2})(\d{2}).*")
img_info_format = re.compile(r"^.*_N(\d+)?.R(\d+)_.*")
matches_date = re.match(date_format, name)
matches_nr = re.match(img_info_format, name)
try:
y, m, d = [int(matches_date.group(i)) for i in [1, 2, 3]]
n, r = [int(matches_nr.group(i)) for i in [1, 2]]
except Exception as e:
print(f"{e}:: couldn't parse {name}")
y, m, d = (0, 0, 0)
n, r = (0, 0)
tile = name.split("_")[-2]
return FolderMetaData(y, m, d, n, r, tile)
class FolderData:
def __init__(self, folder_name: str,
edge_1: Tuple[float, float] = (0.0, 0.0),
edge_2: Tuple[float, float] = (109800.0, 109800.0)):
self.meta = FolderMetaData.from_folder_name(folder_name)
x_low = (min(int(edge_1[0] // 10), int(edge_2[0] // 10)))
x_high = (max(int(edge_1[0] // 10), int(edge_2[0] // 10)))
y_low = (min(int(edge_1[1] // 10), int(edge_2[1] // 10)))
y_high = (max(int(edge_1[1] // 10), int(edge_2[1] // 10)))
self.x_indices = (x_low, x_high)
self.y_indices = (y_low, y_high)
self._folder = folder_name
self._img_path = join(self._folder, f"GRANULE/{listdir(join(self._folder, 'GRANULE'))[0]}/IMG_DATA")
self._img_names = listdir(self._img_path)
data = [self._load_channel(Channels(i)) for i in [0, 1, 2, 3]]
self.data = stack(data, axis=-1)
def _load_channel(self, channel: Channels):
for img_name in self._img_names:
if img_name.split(".")[0].endswith(channel.sentinel_short()):
jp2 = Jp2k(join(self._img_path, img_name))
x = slice(self.x_indices[0], self.x_indices[1])
y = slice(self.y_indices[0], self.y_indices[1])
return asarray(jp2[y, x], dtype=float32) / 4096.0
def folders_time_frame(paths: Paths, time_start: Union[datetime, None], time_end: Union[datetime, None]):
folders = paths.folders
folder_meta = map(lambda name: (name, FolderMetaData.from_folder_name(name)), folders)
result = []
for (name, meta) in folder_meta:
folder_time = datetime(meta.year, meta.month, meta.day)
later_as_start = time_start is None or folder_time >= time_start
before_end = time_end is None or folder_time <= time_end
if later_as_start and before_end:
result.append(name)
return result
|
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/FolderData/Folder.py
| 0.550124 | 0.358662 |
Folder.py
|
pypi
|
# Copyright (c) 2003-2011 Ralph Meijer
# Copyright (c) 2012-2021 Jérôme Poisson
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --
# This program is based on Idavoll (http://idavoll.ik.nu/),
# originaly written by Ralph Meijer (http://ralphm.net/blog/)
# It is sublicensed under AGPL v3 (or any later version) as allowed by the original
# license.
# --
# Here is a copy of the original license:
# Copyright (c) 2003-2011 Ralph Meijer
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Interfaces for idavoll.
"""
from zope.interface import Attribute, Interface
class IBackendService(Interface):
""" Interface to a backend service of a pubsub service. """
def __init__(storage):
"""
@param storage: Object providing L{IStorage}.
"""
def supportsPublisherAffiliation():
""" Reports if the backend supports the publisher affiliation.
@rtype: C{bool}
"""
def supportsOutcastAffiliation():
""" Reports if the backend supports the publisher affiliation.
@rtype: C{bool}
"""
def supportsPersistentItems():
""" Reports if the backend supports persistent items.
@rtype: C{bool}
"""
def getNodeType(nodeIdentifier):
""" Return type of a node.
@return: a deferred that returns either 'leaf' or 'collection'
"""
def getNodes():
""" Returns list of all nodes.
@return: a deferred that returns a C{list} of node ids.
"""
def getNodeMetaData(nodeIdentifier):
""" Return meta data for a node.
@return: a deferred that returns a C{list} of C{dict}s with the
metadata.
"""
def createNode(nodeIdentifier, requestor):
""" Create a node.
@return: a deferred that fires when the node has been created.
"""
def registerPreDelete(preDeleteFn):
""" Register a callback that is called just before a node deletion.
The function C{preDeletedFn} is added to a list of functions to be
called just before deletion of a node. The callback C{preDeleteFn} is
called with the C{nodeIdentifier} that is about to be deleted and
should return a deferred that returns a list of deferreds that are to
be fired after deletion. The backend collects the lists from all these
callbacks before actually deleting the node in question. After
deletion all collected deferreds are fired to do post-processing.
The idea is that you want to be able to collect data from the node
before deleting it, for example to get a list of subscribers that have
to be notified after the node has been deleted. To do this,
C{preDeleteFn} fetches the subscriber list and passes this list to a
callback attached to a deferred that it sets up. This deferred is
returned in the list of deferreds.
"""
def deleteNode(nodeIdentifier, requestor):
""" Delete a node.
@return: a deferred that fires when the node has been deleted.
"""
def purgeNode(nodeIdentifier, requestor):
""" Removes all items in node from persistent storage """
def subscribe(nodeIdentifier, subscriber, requestor):
""" Request the subscription of an entity to a pubsub node.
Depending on the node's configuration and possible business rules, the
C{subscriber} is added to the list of subscriptions of the node with id
C{nodeIdentifier}. The C{subscriber} might be different from the
C{requestor}, and if the C{requestor} is not allowed to subscribe this
entity an exception should be raised.
@return: a deferred that returns the subscription state
"""
def unsubscribe(nodeIdentifier, subscriber, requestor):
""" Cancel the subscription of an entity to a pubsub node.
The subscription of C{subscriber} is removed from the list of
subscriptions of the node with id C{nodeIdentifier}. If the
C{requestor} is not allowed to unsubscribe C{subscriber}, an an
exception should be raised.
@return: a deferred that fires when unsubscription is complete.
"""
def getSubscribers(nodeIdentifier):
""" Get node subscriber list.
@return: a deferred that fires with the list of subscribers.
"""
def getSubscriptions(entity):
""" Report the list of current subscriptions with this pubsub service.
Report the list of the current subscriptions with all nodes within this
pubsub service, for the C{entity}.
@return: a deferred that returns the list of all current subscriptions
as tuples C{(nodeIdentifier, subscriber, subscription)}.
"""
def getAffiliations(entity):
""" Report the list of current affiliations with this pubsub service.
Report the list of the current affiliations with all nodes within this
pubsub service, for the C{entity}.
@return: a deferred that returns the list of all current affiliations
as tuples C{(nodeIdentifier, affiliation)}.
"""
def publish(nodeIdentifier, items, requestor):
""" Publish items to a pubsub node.
@return: a deferred that fires when the items have been published.
@rtype: L{Deferred<twisted.internet.defer.Deferred>}
"""
def registerNotifier(observerfn, *args, **kwargs):
""" Register callback which is called for notification. """
def getNotifications(nodeIdentifier, items):
"""
Get notification list.
This method is called to discover which entities should receive
notifications for the given items that have just been published to the
given node.
The notification list contains tuples (subscriber, subscriptions,
items) to result in one notification per tuple: the given subscriptions
yielded the given items to be notified to this subscriber. This
structure is needed allow for letting the subscriber know which
subscriptions yielded which notifications, while catering for
collection nodes and content-based subscriptions.
To minimize the amount of notifications per entity, implementers
should take care that if all items in C{items} were yielded
by the same set of subscriptions, exactly one tuple is for this
subscriber is returned, so that the subscriber would get exactly one
notification. Alternatively, one tuple per subscription combination.
@param nodeIdentifier: The identifier of the node the items were
published to.
@type nodeIdentifier: C{unicode}.
@param items: The list of published items as
L{Element<twisted.words.xish.domish.Element>}s.
@type items: C{list}
@return: The notification list as tuples of
(L{JID<twisted.words.protocols.jabber.jid.JID>},
C{list} of L{Subscription<wokkel.pubsub.Subscription>},
C{list} of L{Element<twisted.words.xish.domish.Element>}.
@rtype: C{list}
"""
def getItems(nodeIdentifier, requestor, maxItems=None, itemIdentifiers=[]):
""" Retrieve items from persistent storage
If C{maxItems} is given, return the C{maxItems} last published
items, else if C{itemIdentifiers} is not empty, return the items
requested. If neither is given, return all items.
@return: a deferred that returns the requested items
"""
def retractItem(nodeIdentifier, itemIdentifier, requestor):
""" Removes item in node from persistent storage """
class IStorage(Interface):
"""
Storage interface.
"""
def getNode(nodeIdentifier):
"""
Get Node.
@param nodeIdentifier: NodeID of the desired node.
@type nodeIdentifier: C{str}
@return: deferred that returns a L{INode} providing object.
"""
def getNodeIds():
"""
Return all NodeIDs.
@return: deferred that returns a list of NodeIDs (C{unicode}).
"""
def createNode(nodeIdentifier, owner, config):
"""
Create new node.
The implementation should make sure, the passed owner JID is stripped
of the resource (e.g. using C{owner.userhostJID()}). The passed config
is expected to have values for the fields returned by
L{getDefaultConfiguration}, as well as a value for
C{'pubsub#node_type'}.
@param nodeIdentifier: NodeID of the new node.
@type nodeIdentifier: C{unicode}
@param owner: JID of the new nodes's owner.
@type owner: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param config: Node configuration.
@type config: C{dict}
@return: deferred that fires on creation.
"""
def deleteNode(nodeIdentifier):
"""
Delete a node.
@param nodeIdentifier: NodeID of the new node.
@type nodeIdentifier: C{unicode}
@return: deferred that fires on deletion.
"""
def getAffiliations(entity):
"""
Get all affiliations for entity.
The implementation should make sure, the passed owner JID is stripped
of the resource (e.g. using C{owner.userhostJID()}).
@param entity: JID of the entity.
@type entity: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: deferred that returns a C{list} of tuples of the form
C{(nodeIdentifier, affiliation)}, where C{nodeIdentifier} is
of the type L{unicode} and C{affiliation} is one of
C{'owner'}, C{'publisher'} and C{'outcast'}.
"""
def getSubscriptions(entity):
"""
Get all subscriptions for an entity.
The implementation should make sure, the passed owner JID is stripped
of the resource (e.g. using C{owner.userhostJID()}).
@param entity: JID of the entity.
@type entity: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: deferred that returns a C{list} of tuples of the form
C{(nodeIdentifier, subscriber, state)}, where
C{nodeIdentifier} is of the type C{unicode}, C{subscriber} of
the type J{JID<twisted.words.protocols.jabber.jid.JID>}, and
C{state} is C{'subscribed'}, C{'pending'} or
C{'unconfigured'}.
"""
def getDefaultConfiguration(nodeType):
"""
Get the default configuration for the given node type.
@param nodeType: Either C{'leaf'} or C{'collection'}.
@type nodeType: C{str}
@return: The default configuration.
@rtype: C{dict}.
@raises: L{idavoll.error.NoCollections} if collections are not
supported.
"""
class INode(Interface):
"""
Interface to the class of objects that represent nodes.
"""
nodeType = Attribute("""The type of this node. One of {'leaf'},
{'collection'}.""")
nodeIdentifier = Attribute("""The node identifer of this node""")
def getType():
"""
Get node's type.
@return: C{'leaf'} or C{'collection'}.
"""
def getConfiguration():
"""
Get node's configuration.
The configuration must at least have two options:
C{pubsub#persist_items}, and C{pubsub#deliver_payloads}.
@return: C{dict} of configuration options.
"""
def getMetaData():
"""
Get node's meta data.
The meta data must be a superset of the configuration options, and
also at least should have a C{pubsub#node_type} entry.
@return: C{dict} of meta data.
"""
def setConfiguration(options):
"""
Set node's configuration.
The elements of {options} will set the new values for those
configuration items. This means that only changing items have to
be given.
@param options: a dictionary of configuration options.
@returns: a deferred that fires upon success.
"""
def getAffiliation(entity):
"""
Get affiliation of entity with this node.
@param entity: JID of entity.
@type entity: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: deferred that returns C{'owner'}, C{'publisher'}, C{'outcast'}
or C{None}.
"""
def getSubscription(subscriber):
"""
Get subscription to this node of subscriber.
@param subscriber: JID of the new subscriptions' entity.
@type subscriber: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: deferred that returns the subscription state (C{'subscribed'},
C{'pending'} or C{None}).
"""
def getSubscriptions(state=None):
"""
Get list of subscriptions to this node.
The optional C{state} argument filters the subscriptions to their
state.
@param state: Subscription state filter. One of C{'subscribed'},
C{'pending'}, C{'unconfigured'}.
@type state: C{str}
@return: a deferred that returns a C{list} of
L{wokkel.pubsub.Subscription}s.
"""
def addSubscription(subscriber, state, config):
"""
Add new subscription to this node with given state.
@param subscriber: JID of the new subscriptions' entity.
@type subscriber: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param state: C{'subscribed'} or C{'pending'}
@type state: C{str}
@param config: Subscription configuration.
@param config: C{dict}
@return: deferred that fires on subscription.
"""
def removeSubscription(subscriber):
"""
Remove subscription to this node.
@param subscriber: JID of the subscriptions' entity.
@type subscriber: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: deferred that fires on removal.
"""
def isSubscribed(entity):
"""
Returns whether entity has any subscription to this node.
Only returns C{True} when the subscription state (if present) is
C{'subscribed'} for any subscription that matches the bare JID.
@param subscriber: bare JID of the subscriptions' entity.
@type subscriber: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: deferred that returns a C{bool}.
"""
def getAffiliations():
"""
Get affiliations of entities with this node.
@return: deferred that returns a C{list} of tuples (jid, affiliation),
where jid is a L(JID<twisted.words.protocols.jabber.jid.JID>)
and affiliation is one of C{'owner'},
C{'publisher'}, C{'outcast'}.
"""
class ILeafNode(Interface):
"""
Interface to the class of objects that represent leaf nodes.
"""
def storeItems(items, publisher):
"""
Store items in persistent storage for later retrieval.
@param items: The list of items to be stored. Each item is the
L{domish} representation of the XML fragment as defined
for C{<item/>} in the
C{http://jabber.org/protocol/pubsub} namespace.
@type items: C{list} of {domish.Element}
@param publisher: JID of the publishing entity.
@type publisher: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: deferred that fires upon success.
"""
def removeItems(itemIdentifiers):
"""
Remove items by id.
@param itemIdentifiers: C{list} of item ids.
@return: deferred that fires with a C{list} of ids of the items that
were deleted
"""
def getItems(authorized_groups, unrestricted, maxItems=None):
""" Get all authorised items
If C{maxItems} is not given, all authorised items in the node are returned,
just like C{getItemsById}. Otherwise, C{maxItems} limits
the returned items to a maximum of that number of most recently
published and authorised items.
@param authorized_groups: we want to get items that these groups can access
@param unrestricted: if true, don't check permissions (i.e.: get all items)
@param maxItems: if given, a natural number (>0) that limits the
returned number of items.
@return: deferred that fires a C{list} of (item, access_model, id)
if unrestricted is True, else a C{list} of items.
"""
def countItems(authorized_groups, unrestricted):
""" Count the accessible items.
@param authorized_groups: we want to get items that these groups can access.
@param unrestricted: if true, don't check permissions (i.e.: get all items).
@return: deferred that fires a C{int}.
"""
def getIndex(authorized_groups, unrestricted, item):
""" Retrieve the index of the given item within the accessible window.
@param authorized_groups: we want to get items that these groups can access.
@param unrestricted: if true, don't check permissions (i.e.: get all items).
@param item: item identifier.
@return: deferred that fires a C{int}.
"""
def getItemsById(authorized_groups, unrestricted, itemIdentifiers):
"""
Get items by item id.
Each item in the returned list is a unicode string that
represent the XML of the item as it was published, including the
item wrapper with item id.
@param authorized_groups: we want to get items that these groups can access
@param unrestricted: if true, don't check permissions
@param itemIdentifiers: C{list} of item ids.
@return: deferred that fires a C{list} of (item, access_model, id)
if unrestricted is True, else a C{list} of items.
"""
def purge():
"""
Purge node of all items in persistent storage.
@return: deferred that fires when the node has been purged.
"""
def filterItemsWithPublisher(itemIdentifiers, requestor):
"""
Filter the given items by checking the items publisher against the requestor.
@param itemIdentifiers: C{list} of item ids.
@param requestor: JID of the requestor.
@type requestor: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: deferred that fires with a C{list} of item identifiers.
"""
class IGatewayStorage(Interface):
def addCallback(service, nodeIdentifier, callback):
"""
Register a callback URI.
The registered HTTP callback URI will have an Atom Entry documented
POSTed to it upon receiving a notification for the given pubsub node.
@param service: The XMPP entity that holds the node.
@type service: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nodeIdentifier: The identifier of the publish-subscribe node.
@type nodeIdentifier: C{unicode}.
@param callback: The callback URI to be registered.
@type callback: C{str}.
@rtype: L{Deferred<twisted.internet.defer.Deferred>}
"""
def removeCallback(service, nodeIdentifier, callback):
"""
Remove a registered callback URI.
The returned deferred will fire with a boolean that signals wether or
not this was the last callback unregistered for this node.
@param service: The XMPP entity that holds the node.
@type service: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nodeIdentifier: The identifier of the publish-subscribe node.
@type nodeIdentifier: C{unicode}.
@param callback: The callback URI to be unregistered.
@type callback: C{str}.
@rtype: L{Deferred<twisted.internet.defer.Deferred>}
"""
def getCallbacks(service, nodeIdentifier):
"""
Get the callbacks registered for this node.
Returns a deferred that fires with the set of HTTP callback URIs
registered for this node.
@param service: The XMPP entity that holds the node.
@type service: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nodeIdentifier: The identifier of the publish-subscribe node.
@type nodeIdentifier: C{unicode}.
@rtype: L{Deferred<twisted.internet.defer.Deferred>}
"""
def hasCallbacks(service, nodeIdentifier):
"""
Return wether there are callbacks registered for a node.
@param service: The XMPP entity that holds the node.
@type service: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nodeIdentifier: The identifier of the publish-subscribe node.
@type nodeIdentifier: C{unicode}.
@returns: Deferred that fires with a boolean.
@rtype: L{Deferred<twisted.internet.defer.Deferred>}
"""
|
/sat_pubsub-0.4.0-py3-none-any.whl/sat_pubsub/iidavoll.py
| 0.692434 | 0.189671 |
iidavoll.py
|
pypi
|
# sat-search
[](https://circleci.com/gh/sat-utils/sat-search)
Sat-search is a Python 3 library and a command line tool for discovering and downloading publicly available satellite imagery using a conformant API such as [sat-api](https://github.com/sat-utils/sat-api).
## Installation
Sat-search is a very lightweight application, with the only dependency being [sat-stac](https://github.com/sat-utils/sat-stac), which in turn has two dependencies: `requests` and `python-dateutil`. To install sat-search from PyPi:
```bash
$ pip install sat-search
```
From source repository:
```bash
$ git clone https://github.com/sat-utils/sat-search.git
$ cd sat-search
$ pip install .
```
#### Versions
The latest version of sat-search is 0.2.2, which uses [STAC v0.7.0](https://github.com/radiantearth/stac-spec/tree/v0.7.0). To install other versions of sat-search, specify the version in the call to pip.
```bash
pip install sat-search==0.2.0
```
The table below shows the corresponding versions between sat-search and STAC. Additional information can be found in the [CHANGELOG](CHANGELOG.md)
| sat-search | STAC |
| -------- | ---- |
| 0.1.x | 0.5.x - 0.6.x |
| 0.2.x | 0.5.x - 0.7.x |
| 0.3.x | 0.9.x |
## Using sat-search
With sat-search you can search a STAC compliant API with full querying support (if supported by the API). Search results can be saved as a GeoJSON file then loaded later. Assets can be downloaded by the asset key, or "color" (common_name of the band) if provided.
Sat-search is a Python 3 library that can incorporated into other applications. A [Jupyter notebook tutorial](tutorial-1.ipynb) is included that covers all the main features of the library.
Sat-search also comes with a Command Line Interface (CLI), which is explained more below.
#### The CLI
The sat-search CLI has an extensive online help that can be printed with the `-h` switch.
```
$ sat-search -h
usage: sat-search [-h] {search,load} ...
sat-search (v0.2.0)
positional arguments:
{search,load}
search Perform new search of items
load Load items from previous search
optional arguments:
-h, --help show this help message and exit
```
As can be seen there are two subcommands, `search` and `load`, each of which has it's own help.
#### `search`
```
$ sat-search search -h
usage: sat-search search [-h] [--version] [-v VERBOSITY]
[--print-md [PRINTMD [PRINTMD ...]]] [--print-cal]
[--save SAVE] [-c COLLECTION] [--ids [IDS [IDS ...]]]
[--bbox BBOX BBOX BBOX BBOX]
[--intersects INTERSECTS] [--datetime DATETIME]
[--sort [SORT [SORT ...]]] [--found]
[-p [PROPERTY [PROPERTY ...]]] [--url URL]
optional arguments:
-h, --help show this help message and exit
--version Print version and exit
-v VERBOSITY, --verbosity VERBOSITY
0:quiet, 1:error, 2:warning, 3:info, 4:debug (default: 2)
output options:
--print-md [PRINTMD [PRINTMD ...]]
Print specified metadata for matched scenes (default: None)
--print-cal Print calendar showing dates (default: False)
--save SAVE Save results as GeoJSON (default: None)
search options:
-c COLLECTION, --collection COLLECTION
Name of collection (default: None)
--ids [IDS [IDS ...]]
One or more scene IDs from provided collection
(ignores other parameters) (default: None)
--bbox BBOX BBOX BBOX BBOX
Bounding box (min lon, min lat, max lon, max lat)
(default: None)
--intersects INTERSECTS
GeoJSON Feature (file or string) (default: None)
--datetime DATETIME Single date/time or begin and end date/time (e.g.,
2017-01-01/2017-02-15) (default: None)
--sort [SORT [SORT ...]]
Sort by fields (default: None)
--found Only output how many Items found (default: False)
-p [PROPERTY [PROPERTY ...]], --property [PROPERTY [PROPERTY ...]]
Properties of form KEY=VALUE (<, >, <=, >=, =
supported) (default: None)
--url URL URL of the API (default: https://n34f767n91.execute-
api.us-east-1.amazonaws.com/prod)
--headers HEADERS
JSON Request Headers (file or string) (default: None)
```
**Search options**
- **collection** - Search only a specific collection. This is a shortcut, collection can also be provided as a property (e.g., `-p "collection=landsat-8-l1"`)
- **ids** - Fetch the Item for the provided IDs in the given collection (collection must be provided). All other search options will be ignored.
- **intersects** - Provide a GeoJSON Feature string or the name of a GeoJSON file containing a single Feature that is a Polygon of an AOI to be searched.
- **datetime** - Provide a single partial or full datetime (e.g., 2017, 2017-10, 2017-10-11, 2017-10-11T12:00), or two seperated by a slash that defines a range. e.g., 2017-01-01/2017-06-30 will search for scenes acquired in the first 6 months of 2017.
- **property** - Allows searching for any other scene properties by providing the pair as a string (e.g. `-p "landsat:row=42"`, `-p "eo:cloud_cover<10"`). Supported symbols include: =, <, >, >=, and <=
- **sort** - Sort by specific properties in ascending or descending order. A list of properties can be provided which will be used for sorting in that order of preference. By default a property will be sorted in descending order. To specify the order the property can be preceded with '<' (ascending) or '>' (descending). e.g., `--sort ">datetime" "<eo:cloud_cover" will sort by descending date, then by ascending cloud cover
- **found** - This will print out the total number of scenes found, then exit without fetching the actual items.
- **url** - The URL endpoint of a STAC compliant API, this can also be set with the environment variable SATUTILS_API_URL
**Output options**
These options control what to do with the search results, multiple switches can be provided.
- **print-md** - Prints a list of specific metadata fields for all the scenes. If given without any arguments it will print a list of the dates and scene IDs. Otherwise it will print a list of fields that are provided. (e.g., --print-md date eo:cloud_cover eo:platform will print a list of date, cloud cover, and the satellite platform such as WORLDVIEW03)
- **print-cal** - Prints a text calendar (see iumage below) with specific days colored depending on the platform of the scene (e.g. landsat-8), along with a legend.
- **save** - Saves results as a FeatureCollection. The FeatureCollection 'properties' contains all of the arguments used in the search and the 'features' contain all of the individual scenes, with individual scene metadata merged with collection level metadata (metadata fields that are the same across all one collection, such as eo:platform)

#### `load`
Scenes that were previously saved with `sat-search search --save ...` can be loaded with the `load` subcommand.
```
$ sat-search load -h
usage: sat-search load [-h] [--version] [-v VERBOSITY]
[--print-md [PRINTMD [PRINTMD ...]]] [--print-cal]
[--save SAVE] [--datadir DATADIR] [--filename FILENAME]
[--download [DOWNLOAD [DOWNLOAD ...]]]
items
positional arguments:
items GeoJSON file of Items
optional arguments:
-h, --help show this help message and exit
--version Print version and exit
-v VERBOSITY, --verbosity VERBOSITY
0:quiet, 1:error, 2:warning, 3:info, 4:debug (default:
2)
output options:
--print-md [PRINTMD [PRINTMD ...]]
Print specified metadata for matched scenes (default:
None)
--print-cal Print calendar showing dates (default: False)
--save SAVE Save results as GeoJSON (default: None)
download options:
--datadir DATADIR Directory pattern to save assets (default:
./${eo:platform}/${date})
--filename FILENAME Save assets with this filename pattern based on
metadata keys (default: ${id})
--download [DOWNLOAD [DOWNLOAD ...]]
Download assets (default: None)
```
Note that while the search options are gone, output options are still available and can be used with the search results loaded from the file. There is also a new series of options for downloading data.
#### Downloading assets
When loading results from a file, the user now has the option to download assets from the scenes.
**Download options**
These control the downloading of assets. Both datadir and filename can include metadata patterns that will be substituted per scene.
- **datadir** - This specifies where downloaded assets will be saved to. It can also be specified by setting the environment variable SATUTILS_DATADIR.
- **filename** - The name of the file to save. It can also be set by setting the environment variable SATUTILS_FILENAME
- **download** - Provide a list of keys to download these assets. More information on downloading data is provided below.
**Metadata patterns**
Metadata patterns can be used in **datadir** and **filename** in order to have custom path and filenames based on the Item metadata. For instance specifying datadir as "./${eo:platform}/${date}" will save assets for each Item under directories of the platform and the date. So a landsat-8 Item from June 20, 2018 will have it's assets saved in a directory './landsat-8/2017-06-20'. For filenames these work exactly the same way, except the filename will contain a suffix containing the asset key and the appropriate extension.
```
sat-search load scenes.json --download thumbnail MTL
```
In this case the defaults for `datadir` ("./${eo:platform}/${date}") and `filename` ("${id}") are used so the download files are saved like this:
```
landsat-8/
└── 2018-10-02
├── LC80090292018275LGN00_MTL.txt
├── LC80090292018275LGN00_thumbnail.jpg
├── LC80090302018275LGN00_MTL.txt
└── LC80090302018275LGN00_thumbnail.jpg
```
A shortcut to download all of the assets is available by providing "ALL" as the key to download. This will download every asset for every item.
```
sat-search load scenes.json --download ALL
```
## Tutorial
This [Jupyter notebook tutorial](tutorial-1.ipynb) covers all the main features of the library.
## About
sat-search was created by [Development Seed](<http://developmentseed.org>) and is part of a collection of tools called [sat-utils](https://github.com/sat-utils).
|
/sat-search-0.3.0rc1.tar.gz/sat-search-0.3.0rc1/README.md
| 0.525612 | 0.939471 |
README.md
|
pypi
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
XMPP Result Set Management protocol.
This protocol is specified in
U{XEP-0059<http://xmpp.org/extensions/xep-0059.html>}.
"""
from twisted.words.xish import domish
from twisted.words.protocols.jabber import error
from . import pubsub
import copy
NS_RSM = 'http://jabber.org/protocol/rsm'
class RSMError(error.StanzaError):
"""
RSM error.
"""
def __init__(self, text=None):
error.StanzaError.__init__(self, 'bad-request', text=text)
class RSMNotFoundError(Exception):
"""
An expected RSM element has not been found.
"""
class RSMRequest(object):
"""
A Result Set Management request.
@ivar max_: limit on the number of retrieved items.
@itype max_: C{int} or C{unicode}
@ivar index: starting index of the requested page.
@itype index: C{int} or C{unicode} or C{None}
@ivar after: ID of the element immediately preceding the page.
@itype after: C{unicode}
@ivar before: ID of the element immediately following the page.
@itype before: C{unicode}
"""
def __init__(self, max_=10, after=None, before=None, index=None):
self.max = int(max_)
if index is not None:
assert after is None and before is None
index = int(index)
self.index = index
if after is not None:
assert before is None
assert isinstance(after, str)
self.after = after
if before is not None:
assert isinstance(before, str)
self.before = before
def __str__(self):
return "RSM Request: max={0.max} after={0.after} before={0.before} index={0.index}".format(self)
@classmethod
def fromElement(cls, element):
"""Parse the given request element.
@param element: request containing a set element, or set element itself.
@type element: L{domish.Element}
@return: RSMRequest instance.
@rtype: L{RSMRequest}
"""
if element.name == 'set' and element.uri == NS_RSM:
set_elt = element
else:
try:
set_elt = next(element.elements(NS_RSM, 'set'))
except StopIteration:
raise RSMNotFoundError()
try:
before_elt = next(set_elt.elements(NS_RSM, 'before'))
except StopIteration:
before = None
else:
before = str(before_elt)
try:
after_elt = next(set_elt.elements(NS_RSM, 'after'))
except StopIteration:
after = None
else:
after = str(after_elt)
if not after:
raise RSMError("<after/> element can't be empty in RSM request")
try:
max_elt = next(set_elt.elements(NS_RSM, 'max'))
except StopIteration:
# FIXME: even if it doesn't make a lot of sense without it
# <max/> element is not mandatory in XEP-0059
raise RSMError("RSM request is missing its 'max' element")
else:
try:
max_ = int(str(max_elt))
except ValueError:
raise RSMError("bad value for 'max' element")
try:
index_elt = next(set_elt.elements(NS_RSM, 'index'))
except StopIteration:
index = None
else:
try:
index = int(str(index_elt))
except ValueError:
raise RSMError("bad value for 'index' element")
return RSMRequest(max_, after, before, index)
def toElement(self):
"""
Return the DOM representation of this RSM request.
@rtype: L{domish.Element}
"""
set_elt = domish.Element((NS_RSM, 'set'))
set_elt.addElement('max', content=str(self.max))
if self.index is not None:
set_elt.addElement('index', content=str(self.index))
if self.before is not None:
if self.before == '': # request the last page
set_elt.addElement('before')
else:
set_elt.addElement('before', content=self.before)
if self.after is not None:
set_elt.addElement('after', content=self.after)
return set_elt
def render(self, element):
"""Embed the DOM representation of this RSM request in the given element.
@param element: Element to contain the RSM request.
@type element: L{domish.Element}
@return: RSM request element.
@rtype: L{domish.Element}
"""
set_elt = self.toElement()
element.addChild(set_elt)
return set_elt
class RSMResponse(object):
"""
A Result Set Management response.
@ivar first: ID of the first element of the returned page.
@itype first: C{unicode}
@ivar last: ID of the last element of the returned page.
@itype last: C{unicode}
@ivar index: starting index of the returned page.
@itype index: C{int}
@ivar count: total number of items.
@itype count: C{int}
"""
def __init__(self, first=None, last=None, index=None, count=None):
if first is None:
assert last is None and index is None
if last is None:
assert first is None
self.first = first
self.last = last
if count is not None:
self.count = int(count)
else:
self.count = None
if index is not None:
self.index = int(index)
else:
self.index = None
def __str__(self):
return "RSM Request: first={0.first} last={0.last} index={0.index} count={0.count}".format(self)
@classmethod
def fromElement(cls, element):
"""Parse the given response element.
@param element: response element.
@type element: L{domish.Element}
@return: RSMResponse instance.
@rtype: L{RSMResponse}
"""
try:
set_elt = next(element.elements(NS_RSM, 'set'))
except StopIteration:
raise RSMNotFoundError()
try:
first_elt = next(set_elt.elements(NS_RSM, 'first'))
except StopIteration:
first = None
index = None
else:
first = str(first_elt)
try:
index = int(first_elt['index'])
except KeyError:
index = None
except ValueError:
raise RSMError("bad index in RSM response")
try:
last_elt = next(set_elt.elements(NS_RSM, 'last'))
except StopIteration:
if first is not None:
raise RSMError("RSM response is missing its 'last' element")
else:
last = None
else:
if first is None:
raise RSMError("RSM response is missing its 'first' element")
last = str(last_elt)
try:
count_elt = next(set_elt.elements(NS_RSM, 'count'))
except StopIteration:
count = None
else:
try:
count = int(str(count_elt))
except ValueError:
raise RSMError("invalid count in RSM response")
return RSMResponse(first, last, index, count)
def toElement(self):
"""
Return the DOM representation of this RSM request.
@rtype: L{domish.Element}
"""
set_elt = domish.Element((NS_RSM, 'set'))
if self.first is not None:
first_elt = set_elt.addElement('first', content=self.first)
if self.index is not None:
first_elt['index'] = str(self.index)
set_elt.addElement('last', content=self.last)
if self.count is not None:
set_elt.addElement('count', content=str(self.count))
return set_elt
def render(self, element):
"""Embed the DOM representation of this RSM response in the given element.
@param element: Element to contain the RSM response.
@type element: L{domish.Element}
@return: RSM request element.
@rtype: L{domish.Element}
"""
set_elt = self.toElement()
element.addChild(set_elt)
return set_elt
def toDict(self):
"""Return a dict representation of the object.
@return: a dict of strings.
@rtype: C{dict} binding C{unicode} to C{unicode}
"""
result = {}
for attr in ('first', 'last', 'index', 'count'):
value = getattr(self, attr)
if value is not None:
result[attr] = value
return result
class PubSubRequest(pubsub.PubSubRequest):
"""PubSubRequest extension to handle RSM.
@ivar rsm: RSM request instance.
@type rsm: L{RSMRequest}
"""
rsm = None
_parameters = copy.deepcopy(pubsub.PubSubRequest._parameters)
_parameters['items'].append('rsm')
def _parse_rsm(self, verbElement):
try:
self.rsm = RSMRequest.fromElement(verbElement.parent)
except RSMNotFoundError:
self.rsm = None
def _render_rsm(self, verbElement):
if self.rsm:
self.rsm.render(verbElement.parent)
class PubSubClient(pubsub.PubSubClient):
"""PubSubClient extension to handle RSM."""
_request_class = PubSubRequest
def items(self, service, nodeIdentifier, maxItems=None,
subscriptionIdentifier=None, sender=None, itemIdentifiers=None,
orderBy=None, rsm_request=None):
"""
Retrieve previously published items from a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param maxItems: Optional limit on the number of retrieved items.
@type maxItems: C{int}
@param subscriptionIdentifier: Optional subscription identifier. In
case the node has been subscribed to multiple times, this narrows
the results to the specific subscription.
@type subscriptionIdentifier: C{unicode}
@param itemIdentifiers: Identifiers of the items to be retrieved.
@type itemIdentifiers: C{set}
@param orderBy: Keys to order by
@type orderBy: L{list} of L{unicode}
@param ext_data: extension data.
@type ext_data: L{dict}
@return: a Deferred that fires a C{list} of C{tuple} of L{domish.Element}, L{RSMResponse}.
@rtype: L{defer.Deferred}
"""
# XXX: we have to copy initial method instead of calling it,
# as original cb remove all non item elements
request = self._request_class('items')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
if maxItems:
request.maxItems = str(int(maxItems))
request.subscriptionIdentifier = subscriptionIdentifier
request.sender = sender
request.itemIdentifiers = itemIdentifiers
request.orderBy = orderBy
request.rsm = rsm_request
def cb(iq):
items = []
pubsub_elt = iq.pubsub
if pubsub_elt.items:
for element in pubsub_elt.items.elements(pubsub.NS_PUBSUB, 'item'):
items.append(element)
try:
rsm_response = RSMResponse.fromElement(pubsub_elt)
except RSMNotFoundError:
rsm_response = None
return (items, rsm_response)
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
class PubSubService(pubsub.PubSubService):
"""PubSubService extension to handle RSM."""
_request_class = PubSubRequest
def _toResponse_items(self, elts, resource, request):
# default method only manage <item/> elements
# but we need to add RSM set element
rsm_elt = None
for idx, elt in enumerate(reversed(elts)):
if elt.name == "set" and elt.uri == NS_RSM:
rsm_elt = elts.pop(-1-idx)
break
response = pubsub.PubSubService._toResponse_items(self, elts,
resource, request)
if rsm_elt is not None:
response.addChild(rsm_elt)
return response
|
/sat_tmp-0.8.0b1-py3-none-any.whl/sat_tmp/wokkel/rsm.py
| 0.680985 | 0.213623 |
rsm.py
|
pypi
|
from __future__ import annotations
from dataclasses import dataclass
import numpy as np
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import *
class IndexSet:
numvars: int
def __init__(self):
self.numvars = 0
self._fieldnames = set()
def add_index_array(self, name: str, shape: Tuple[int, ...]):
length = np.prod(shape, dtype=np.int32)
res = np.arange(self.numvars + 1, self.numvars + 1 + length, dtype=np.int32)
res = res.reshape(shape)
res.flags.writeable = 0
self.numvars += length
self._fieldnames.add(name)
setattr(self, name, res)
def describe_idx_array(self, index_array: np.ndarray):
"""
convenience function to return the underlying array name and unraveled
index for each linear index in `index_array`.
"""
variables = {k: v for k, v in vars(self).items() if isinstance(v, np.ndarray)}
if np.any((index_array < 0) | (index_array >= self.numvars + 1)):
raise IndexError("index out of bounds")
res = [None] * np.prod(index_array.shape, dtype=int)
for i, needle in enumerate(index_array.flatten()):
if needle == self.numvars:
res[i] == "1"
continue
for k, v in variables.items():
start, stop = v.flatten()[[0, -1]]
rng = range(start, stop + 1)
if needle in rng:
idx = np.unravel_index(rng.index(needle), v.shape)
res[i] = k + str(list(idx))
# res[i] = str(f'{idx[1]}{idx[2]}')
break
else:
assert False, f"index {needle} not found?"
return np.array(res, dtype=object).reshape(index_array.shape)
def __repr__(self):
res = f"{self.__class__.__name__}(\n"
for fieldname in self._fieldnames:
field = getattr(self, fieldname)
min_val = field.ravel()[0]
max_val = field.ravel()[-1]
res += f" {fieldname} = np.arange({min_val}, {max_val + 1}).reshape({field.shape!r})\n"
res += ")"
return res
|
/sat-toolkit-0.2.1.tar.gz/sat-toolkit-0.2.1/sat_toolkit/util.py
| 0.842798 | 0.26977 |
util.py
|
pypi
|
import ssl
from os import getenv
from pathlib import Path
from ldap3 import Connection, Server, Tls
def get_connection():
"""
Provide an insecure client that allows us to read
NCSU LDAP based people attributes
"""
try:
tls = Tls(validate=ssl.CERT_NONE, version=ssl.PROTOCOL_TLSv1_2)
server = Server("ldap.ncsu.edu", use_ssl=True, tls=tls)
return Connection(server, auto_bind=True)
except Exception as ex:
print("This went off like a SpaceX launch.", ex)
def get_secure_connection(**settings):
"""
Provide a secure client that allows us to read LDAP based
people attributes from any LDAP store.
Parameters
----------
name : settings
A dictionary of values that define the ldap environment.
Should only contain the parameters used by the function.
name: server
The ldap host to connect to.
port:
The TCP port the ldap server is listening on.
username:
The ldap user to specify in the bind operation.
password:
The password associated to the bind user specified
in the username parameter.
Usage:
------
get_connection(server='dilbert', port=1000)
or
settings = {"server': 'dilbert', 'port': 1000}
get_connection(**settings)
"""
_server = None
_tls = None
try:
server_name = settings.get("server", getenv("server")) or "localhost"
tcp_port = settings.get("port", getenv("port")) or 389
ldap_user = settings.get("username", getenv("username")) or None
ldap_password = settings.get("password", getenv("password")) or None
if Path("intermediate.pem"):
_tls = Tls(
ciphers="ALL",
local_certificate_file="intermediate.pem",
validate=ssl.CERT_REQUIRED,
version=ssl.PROTOCOL_TLS,
)
_tls = Tls(ciphers="ALL", validate=ssl.CERT_NONE, version=ssl.PROTOCOL_TLS)
_server = Server(host=server_name, port=tcp_port, use_ssl=True, tls=_tls)
if not ldap_user:
return Connection(server=_server)
return Connection(server=_server, user=ldap_user, password=ldap_password, auto_bind=True)
except Exception as ex:
print("We encountered an error saving the universe.\n", ex)
def get_user_attributes(unity_id: str):
"""
Returns a dictionary that contains
the name, unity id, campus id and job title
for the person
Parameters
----------
name : unity_id
The unity id for a campus person
"""
_attributes = ["cn", "uid", "uidNumber", "title"]
try:
with get_connection() as conn:
conn.search("ou=people,dc=ncsu,dc=edu", f"(uid={unity_id})", attributes=_attributes)
person_data = {}
for e in conn.entries:
for attr in str(e).split("\n"):
if "DN" in attr:
continue
if "cn" in attr:
person_data["name"] = attr.split(":")[1].strip()
if "title" in attr:
person_data["title"] = attr.split(":")[1].strip()
if "uid" in attr:
person_data["unity_id"] = attr.split(":")[1].strip()
if "uidNumber" in attr:
person_data["campus_id"] = attr.split(":")[1].strip()
return person_data
except Exception as ex:
print("Hold on while we try that extension\n", ex)
|
/sat-utils-1.1.10.tar.gz/sat-utils-1.1.10/sat/ldap.py
| 0.584508 | 0.223165 |
ldap.py
|
pypi
|
# %% Imports
# Standard Library Imports
import warnings
# Third Party Imports
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from matplotlib.figure import Figure
from numpy import empty
# %% Main Function
def plotSchedule(
availability: list[list[list[tuple[float]]]],
target_labels: list[str],
sensor_labels: list[str],
fig: Figure,
scheduled: list[list[list[tuple]]] = None,
scheduled_targ_labels: list[str] = None,
scheduled_sensor_labels: list[str] = None,
) -> Figure:
"""Generates schedule availability plot for N targets and M sensors.
Args:
availability (`list[list[list[tuple[float]]]]`): [M[N[d]]] 3-deep
nested `list`. The lowest level contains a `tuple` of two `floats`
representing the start time and duration, respectively, of an availability
window. N must be the same for every M (all 1st-level nested lists
must be the same length). d does not need to be the same value for
every N (2nd-level nested lists don't need to be the same length).
target_labels (`list[str]`): N-long list of target names.
sensor_labels (`list[str]`): M-long list of sensor names.
fig (`Figure`): A matplotlib `Figure`.
scheduled (`list`): (Optional) Same format as availability. Defaults
to `None`.
scheduled_targ_labels (`list[str]`): (Optional) Q-long list of
strings of target names. Must be a subset of entries in
`target_labels`. Q must be less than N.
scheduled_sensor_labels (`list[str]`): (Optional) P-long list of
strings of sensor names. Must be a subset of entries in
`sensor_labels`. P must be less than M.
Returns:
`Figure`: Schedule availability plot as a `broken_barh` plot
from matplotlib. Colored blocks are the data from 'availability',
black bars are the data from 'scheduled'.
Notation:
M = number of sensors
N = number of targets
P = number of scheduled sensors
Q = number of scheduled targets
d = the number of access windows for a sensor-target per
Notes:
If `scheduled` is not `None`, then `scheduled_targ_labels` and
`scheduled_sensor_labels` are required.
Example Usage:
f = plt.figure()
avail = [
[[(2, 1)], [(4, 1)]], # access windows for Sensor A
[[], [(2, 3)]] # access windows for Sensor B
]
target_labels = ['1', '2']
sensor_labels = ['A', 'B']
f = plotSchedule(avail, target_labels, sensor_labels, f)
plt.show()
"""
# Warnings
if availability == []:
warnings.warn("No schedule availability. Plot not generated. ")
return
if type(target_labels) != list:
warnings.warn("target_labels must be a list")
return
if type(sensor_labels) != list:
warnings.warn("sensor_labels must be a list")
return
# Generate schedule availability (colored) bars
fig = genPlot(availability, target_labels, sensor_labels, fig, "wide", None, None)
# Generate scheduled (black) bars
if scheduled is not None:
# pad scheduled list with empty lists to be same dimensions as availability list
padded_sched = padSchedList(
availability,
target_labels,
sensor_labels,
scheduled,
scheduled_targ_labels,
scheduled_sensor_labels,
)
fig = genPlot(
padded_sched,
target_labels,
sensor_labels,
fig,
"thin",
scheduled_targ_labels,
scheduled_sensor_labels,
)
return fig
# %% Supporting Functions
def padSchedList(
super_list: list[list[list[tuple]]],
super_targs: list[str],
super_sens: list[str],
small_list: list[list[list[tuple]]],
small_targs: list[str],
small_sens: list[str],
) -> list[list[list[tuple]]]:
"""Pads `small_list` to be same dimensions as `super_list`.
Args:
super_list (`list[list[list[tuple]]]`): Availability.
super_targs (`list[str]`): Names of all targets in `super_list`.
super_sens (`list[str]`): Names of all sensors in `super_list`.
small_list (`list[list[list[tuple]]]`): Scheduled.
small_targs (`list[str]`): Names of all targets in `small_list`.
small_sens (`list[str]`): Names of all sensors in `small_list`.
Returns:
`list[list[list[tuple]]]`: Same dimensions as `super_list`, but with
contents of `small_list` corresponding to specified targets and
sensors. Entries corresponding to targets/sensors not included
in `small_list` are empty lists.
"""
padded_list = [[] for x in range(len(super_list))]
index_small = 0
for i, (sens, dat) in enumerate(zip(super_sens, super_list)):
# print(i, sens, dat)
if sens in small_sens:
padded_list[i] = small_list[index_small]
index_small += 1
# print(padded_list)
return padded_list
def genPlot(
dat: list[list[list[tuple[float]]]],
target_labels: list[str],
sensor_labels: list[str],
fig: Figure,
flag: str,
sched_targ_labels: list[str],
sched_sensor_labels: list[str],
):
"""Workhorse function for plotSchedule(). Generates broken_barh plots.
Args:
dat (`list[list[list[tuple[float]]]]`): [M[N[d]]] 3-deep
nested `list`. The lowest level contains a `tuple` of two `floats`
representing the start time and duration, respectively, of an availability
window. N must be the same for every M (all 1st-level nested lists
must be the same length). d does not need to be the same value for
every N (2nd-level nested lists don't need to be the same length).
target_labels (`list[str]`): N-long list of target names.
sensor_labels (`list[str]`): M-long list of sensor names.
fig (`Figure`): matplotlib `Figure`.
flag (`str`): 'wide' or 'thin' to set whether to create wide colored
bars or thin black bars.
sched_targ_labels (`list[str]`): Q-long list of
strings of target names. Must be a subset of entries in
`target_labels`. Q must be less than N.
sched_sensor_labels (`list[str]`): P-long list of
strings of sensor names. Must be a subset of entries in
`sensor_labels`. P must be less than M.
Returns:
`Figure`: Matplolib broken_hbar plot of access windows.
"""
# pick any colormap
cm = get_cmap(name="gist_rainbow")
# number of sets (number of sensors)
num_sensors = len(sensor_labels)
# number boxes per set (number of targets)
num_targets = len(target_labels)
# bar widths
w_big = 1
w_small = w_big / num_targets
w_vsmall = w_small / 2
# bar y-offsets
y_big = 1.5 * w_big
y_small = w_big / num_targets
y_vec = empty([num_sensors])
# set values depending on wide/thin flag
if flag == "wide":
# add subfigure to empty figure
ax = fig.add_subplot(1, 1, 1)
# extra offset multiplier
x = 0
# alpha value (transparency for plot)
al = 0.5
# width of a set of bars associated with one sensor
w_set = w_big / num_targets
def chooseCol(i):
return cm(i / num_targets)
elif flag == "thin":
# black bars must be overlaid on colored bars (no stand-alone black
# bars)
ax = plt.gca()
x = 1
al = 1
w_set = 0.5 * (w_big / num_targets)
def chooseCol(i):
return "black"
# loop through y-axis (sensors)
for j, sens in enumerate(sensor_labels):
# set y-offset per grouping of bars
y_offset = y_big * j + x * (w_vsmall / 2)
# save y-offset value for plotting
y_vec[j] = y_offset
# loop through each color bar
for i, targ in enumerate(target_labels):
if flag == "wide":
plt.broken_barh(
dat[j][i],
(y_offset + (i * y_small), w_set),
facecolor=chooseCol(i),
edgecolor="black",
alpha=al,
)
elif (
(flag == "thin") and (sens in sched_sensor_labels) and (targ in sched_targ_labels)
):
plt.broken_barh(
dat[j][i],
(y_offset + (i * y_small), w_set),
facecolor=chooseCol(i),
edgecolor="black",
alpha=al,
)
ax.set_yticks(y_vec + w_big / 2)
ax.set_yticklabels(sensor_labels)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Sensor ID")
ax.set_title("Schedule Availability")
leg1 = plt.legend(target_labels, title="Target ID", loc=1)
ax.add_artist(leg1)
if flag == "thin":
black_patch = mpatches.Patch(color="black", label="Scheduled")
leg2 = plt.legend(handles=[black_patch], loc=4)
ax.add_artist(leg2)
return fig
|
/sat_vis-0.0.3-py3-none-any.whl/sat_vis/schedule_plots.py
| 0.867724 | 0.651341 |
schedule_plots.py
|
pypi
|
from typing import Tuple
# Third Party Imports
from intervaltree import IntervalTree
# %% Function
def intTree2WindowList(schedule_tree: IntervalTree) -> Tuple[list, list, list]:
"""Converts an `IntervalTree`s to an ingestible for `plotSchedule()`.
Notation:
M = number of sensors
N = number of targets
d = number of availability windows for a given sensor-target pair
Args:
schedule_tree (`IntervalTree`): Intervals of sensor-target pair availability.
The data field in each `Interval` must be a `dict` with the following keys:
{
"target_id": unique_name,
"sensor_id": unique_name,
}
Returns:
windows (`list`): [M x N x d] list of lists of lists of pairs
of tuples, where each nested list is a pair of tuples
representing the time at the start of an availability window, and
the duration of that window. d can be any length for each `m` and `n`,
so long as each entry is a pair of tuples. N must be consistent
for each M.
sensor_ids (`list`): List of IDs of sensors that have availability.
target_ids (`list`): List of IDs of targets that have availability.
Note that all target and sensor names in `schedule_tree` must be unique.
"""
# convert IntervalTree to list of Intervals
list_tree = list(schedule_tree.items())
# get targets/sensors for each Interval
target_vec = [interval.data["target_id"] for interval in list_tree]
sensor_vec = [interval.data["sensor_id"] for interval in list_tree]
# get unique lists of target/sensor names
target_ids = list(set(target_vec))
sensor_ids = list(set(sensor_vec))
# get number of unique targets/sensors
num_targets = len(target_ids)
num_sensors = len(sensor_ids)
# get start times and durations of each interval
start_vec = [interval.begin for interval in list_tree]
dur_vec = [interval.end - interval.begin for interval in list_tree]
# Assemble inputs for schedule plot
# initialize availability windows (list of lists)
windows = [[[] for j in range(num_targets)] for i in range(num_sensors)]
for i, sens_name in enumerate(sensor_ids):
# print(f'i={i}, sens_name={sens_name}')
# get indices of Intervals with with given sensor
indices = [ctr for ctr, x in enumerate(sensor_vec) if x == sens_name]
# print(f'indices={indices}')
# loop through all targets for each sensor
for j, targ_name in enumerate(target_ids):
# print(f'j={j}, targ_name={targ_name}')
# get indices of Intervals with given target
indices_target = [ctr for ctr, x in enumerate(target_vec) if x == targ_name]
# get intersection of target-indices and sensor-indices
intersection = [item for item in indices if item in indices_target]
# print(f'intersection={intersection}')
# Next, need to assign intervals that have sense_name in the
# data field to windows.
list_of_starts = list(map(start_vec.__getitem__, intersection))
list_of_durs = list(map(dur_vec.__getitem__, intersection))
list_of_sets = [[] for x in range(len(list_of_starts))]
for k, (start, dur) in enumerate(zip(list_of_starts, list_of_durs)):
list_of_sets[k] = (start, dur)
windows[i][j] = list_of_sets
return windows, sensor_ids, target_ids
|
/sat_vis-0.0.3-py3-none-any.whl/sat_vis/int_tree_converter.py
| 0.881194 | 0.632928 |
int_tree_converter.py
|
pypi
|
# %% Imports
# Third Party Imports
from intervaltree import IntervalTree
from numpy import ndarray, zeros
# Sat Vis Imports
from sat_vis.visibility_func import visibilityFunc, zeroCrossingFit
# %% getVisibility
def getVisHist(
targets: list[dict],
sensors: list[dict],
x_targets: ndarray,
x_sensors: ndarray,
time: list,
planet_radius: float,
) -> tuple[IntervalTree, ndarray]:
# TODO: Move to SatVis repo
"""Generate visibility function history between sensors and targets.
Args:
targets (`list[dict]`): N-length list of dicts. Each dict must include an
'id' field.
sensors (`list[dict]`): M-length list of dicts. Each dict must include an
'id' field.
x_targets (`ndarray`): [T x 6 x N] State history of targets. The 1st-
dimension of the array is the [6x1] ECI state vector [position,
velocity] in km and km/s, respectively.
x_sensors (`ndarray`): [T x 6 x M] State history of sensors. The 1st-
dimension of the array is the [6x1] ECI state vector [position,
velocity] in km and km/s, respectively.
time (`list`): [T x 1] list of times corresponding to state histories.
planet_radius (`float`): assume spherical, km
Returns:
rise_set_tree (`IntervalTree`): `IntervalTree` instance of class
representing all intervals for which target-sensor pairs
can see each other. If no target-sensor pairs can see each
other during the input time window, the `IntervalTree` is empty.
The data field of each entry (`rise_set_tree[#].data`) is a dict
with the following keys:
{
"target_id": target_id,
"sensor_id": sensor_id
}
vis (`ndarray`): [M x N x T] array of visibility function values for
all target-sensor pairs for all time.
Notation:
N = Number of targets
M = Number of sensors
T = Length of time vector
"""
num_sensors = len(sensors)
num_targets = len(targets)
# preallocate visibility array and list of intervals
vis = zeros([num_sensors, num_targets, len(time)])
rise_set_ints = []
# Calculate visibility function values of all sensor-target pairs
# loop through sensor-platform pairs
counter = 0
for i_sensor in range(num_sensors):
for i_sat in range(num_targets):
# sensor-target pair name for labelling interval tree
pair_name = getPairName(targets[i_sat], sensors[i_sensor])
# loop through time to calc visibility function
for i_time in range(len(time)):
r1 = x_sensors[i_time, :3, i_sensor]
r2 = x_targets[i_time, :3, i_sat]
# calc visibility function (ignore supplemental outputs)
[vis[i_sensor, i_sat, i_time], _, _, _] = visibilityFunc(
r1=r1,
r2=r2,
RE=planet_radius,
hg=0,
)
_, _, new_tree = zeroCrossingFit(vis[i_sensor, i_sat], time, pair_name)
# extend list of Intervals (note Intervals are not same as
# IntervalTree)
rise_set_ints.extend(list(new_tree))
counter += 1
rise_set_tree = IntervalTree(rise_set_ints)
return rise_set_tree, vis
def getPairName(target: dict, sensor: dict) -> dict:
"""Create a target-sensor pair ID dict.
Args:
target (`dict`): Target object
sensor (`dict`): Sensor object
Returns:
pair_name (`dict`): {'target_id': target_id, 'sensor_id': sensor_id}
"""
target_id = target["id"]
sensor_id = sensor["id"]
pair_name = {"target_id": target_id, "sensor_id": sensor_id}
return pair_name
|
/sat_vis-0.0.3-py3-none-any.whl/sat_vis/vis_history.py
| 0.734405 | 0.71695 |
vis_history.py
|
pypi
|
import base64
import logging
import time
from datetime import datetime, timedelta
from importlib import resources
from uuid import uuid1
import xmltodict
from OpenSSL import crypto
from . import templates
from . import utils
import urllib
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger(__name__)
TEMPLATES = {
"Envelope": resources.read_text(templates.common, "Envelope.xml"),
"KeyInfo": resources.read_text(templates.common, "KeyInfo.xml"),
"Signature": resources.read_text(templates.common, "Signature.xml"),
"SignedInfo": resources.read_text(templates.common, "SignedInfo.xml"),
"Timestamp": resources.read_text(templates.login, "Timestamp.xml"),
"LoginEnvelope": resources.read_text(templates.login, "Envelope.xml"),
"SolicitaDescarga": resources.read_text(templates.query, "SolicitaDescarga.xml"),
"VerificaSolicitudDescarga": resources.read_text(
templates.verify, "VerificaSolicitudDescarga.xml"
),
"PeticionDescargaMasivaTercerosEntrada": resources.read_text(
templates.download, "PeticionDescargaMasivaTercerosEntrada.xml"
),
}
def ensure_login(f):
def wrapper(*args, **kwargs):
self = args[0]
self.login()
res = f(*args, **kwargs)
return res
return wrapper
class SAT:
"""Class to make a connection to the SAT"""
class DownloadType:
"""Helper to select the download type"""
ISSUED = "RfcEmisor"
RECEIVED = "RfcReceptor"
class RequestType:
"""Helper to select the request type"""
CFDI = "CFDI"
METADATA = "Metadata"
class NoRFCException(Exception):
"""If not valid RFC founded in the certificate"""
class NoIssuerException(Exception):
"""If not valid Issuer founded in the certificate"""
class RequestException(Exception):
"""If there is a problem in the request"""
class QueryException(Exception):
"""If not valid query"""
cert = None
key = None
password = None
key_pem = None
cert_pem = None
certificate = None
rfc = None
token = None
token_expires = None
def __init__(self, cert: bytes, key: bytes, password: str) -> None:
"""Loads the certificate, key file and password to stablish the connection to the SAT
Creates a object to manage the SAT connection.
Args:
cert (bytes): DER Certificate in raw binary
key (bytes): DER Key Certificate in raw binary
password (str): Key password in plain text (utf-8)
"""
self.cert = utils.binary_to_utf8(cert)
self.key = utils.binary_to_utf8(key)
self.password = password
self._load_certs()
self._compute_data_from_cert()
_logger.info("Data correctly loaded")
def _load_certs(self):
"""Loads the PEM version of the certificate and key file, also loads the crypto certificate
Convert the `cert` and `key` from DER to PEM and creates the real certificate (X509)
"""
self.key_pem = utils.der_to_pem(self.key, type="ENCRYPTED PRIVATE KEY")
self.cert_pem = utils.der_to_pem(self.cert, type="CERTIFICATE")
self.certificate = crypto.load_certificate(crypto.FILETYPE_PEM, self.cert_pem)
def _compute_data_from_cert(self):
"""Gets the RFC and Issuer directly from the certificate"""
self._get_rfc_from_cert()
self._get_issuer_from_cert()
def _get_rfc_from_cert(self):
"""Gets the RFC from the certificate
Raises:
NoRFCException: If not RFC founded
"""
subject_components = self.certificate.get_subject().get_components()
for c in subject_components:
if c[0] == b"x500UniqueIdentifier":
self.rfc = c[1].decode("UTF-8").split(" ")[0]
_logger.debug(f"RFC {self.rfc} loaded")
break
else:
raise self.NoRFCException()
def _get_issuer_from_cert(self):
"""Gets the Issuer from the certificate
Raises:
NoIssuerException: If not Issuer founded
"""
self.certificate.issuer = ",".join(
[
f'{c[0].decode("UTF-8")}={urllib.parse.quote(c[1].decode("UTF-8"))}'
for c in self.certificate.get_issuer().get_components()
]
)
if not self.certificate.issuer:
raise self.NoIssuerException()
_logger.debug(f"Issuer {self.certificate.issuer} loaded")
def _token_expired(self) -> bool:
"""Checks if the token expiration date is yet to come
Returns:
bool: True if not token or if is expired
"""
if not self.token or not self.token_expires:
_logger.debug("Token expired")
return True
return self.token_expires > datetime.utcnow()
def _create_common_envelope(self, template: str, data: dict) -> str:
_logger.debug("Creating Envelope")
_logger.debug(f"{template}")
_logger.debug(f"{data}")
query_data, query_data_signature = utils.prepare_template(template, data)
digest_value = utils.digest(query_data)
signed_info = utils.prepare_template(
TEMPLATES["SignedInfo"],
{
"uri": "",
"digest_value": digest_value,
},
)
key_info = utils.prepare_template(
TEMPLATES["KeyInfo"],
{
"issuer_name": self.certificate.issuer,
"serial_number": self.certificate.get_serial_number(),
"certificate": self.cert,
},
)
signature_value = self.sign(signed_info)
signature = utils.prepare_template(
TEMPLATES["Signature"],
{
"signed_info": signed_info,
"signature_value": signature_value,
"key_info": key_info,
},
)
envelope_content = utils.prepare_template(
query_data_signature,
{
"signature": signature,
},
)
envelope = utils.prepare_template(
TEMPLATES["Envelope"],
{
"content": envelope_content,
},
)
_logger.debug("Final Envelope")
_logger.debug(f"{envelope}")
return envelope
def sign(self, data) -> str:
"""Signs the `data` using SHA1 with the `key_pem` content"""
_logger.debug(f"Signing {data}")
private_key = crypto.load_privatekey(
crypto.FILETYPE_PEM, self.key_pem, passphrase=self.password
)
signed_data = utils.binary_to_utf8(crypto.sign(private_key, data, "sha1"))
return signed_data
def login(self, created: datetime = None, expires: datetime = None, uuid: str = None):
created = created or datetime.utcnow()
expires = expires or created + timedelta(minutes=5)
uuid = uuid or f"uuid-{uuid1()}-1"
"""If the current token is invalid, tries to login
Args:
created (datetime, optional): Creation date to be used in the session. Defaults to datetime.utcnow().
expires (datetime, optional): Expiration date to be used in the session. Defaults to datetime.utcnow()+timedelta(minutes=5).
uuid (str, optional): UUID date to be used in the session. Defaults to f'uuid-{uuid1()}-1'.
"""
if self._token_expired():
_logger.debug("Token expired, creating a new one")
self.token_expires = expires
self._login(created, expires, uuid)
_logger.debug("New token created")
def _login(self, created: datetime, expires: datetime, uuid: str):
"""Send login request to the SAT
Args:
created (datetime): Creation date to be used in the session
expires (datetime): Expiration date to be used in the session
uuid (str): UUID date to be used in the session
Raises:
RequestException: If there was an error in the request
"""
request_content = self._get_login_soap_body(created, expires, uuid)
response = utils.consume(
"http://DescargaMasivaTerceros.gob.mx/IAutenticacion/Autentica",
"https://cfdidescargamasivasolicitud.clouda.sat.gob.mx/Autenticacion/Autenticacion.svc",
request_content,
)
if response.status_code != 200:
raise self.RequestException(response.status_code, response.reason, request_content)
else:
self._get_login_data(utils.remove_namespaces(response.content.decode("UTF-8")))
def _get_login_soap_body(
self, created_object: datetime, expires_object: datetime, uuid: str
) -> str:
"""Creates the request body to be used in login
Args:
created_object (datetime): Creation date to be used in the session
expires_object (datetime): Expiration date to be used in the session
uuid (str): UUID date to be used in the session
Returns:
str: Content body
"""
created = created_object.isoformat()
expires = expires_object.isoformat()
timestamp = utils.prepare_template(
TEMPLATES["Timestamp"],
{
"created": created,
"expires": expires,
},
)
digest_value = utils.digest(timestamp)
signed_info = utils.prepare_template(
TEMPLATES["SignedInfo"],
{
"uri": "#_0",
"digest_value": digest_value,
},
)
signature_value = self.sign(signed_info)
_logger.debug(
f"""Creating Login Envelope with the next data
"created": {created},
"expires": {expires},
"uuid": {uuid},
"""
)
envelope = utils.prepare_template(
TEMPLATES["LoginEnvelope"],
{
"binary_security_token": self.cert,
"created": created,
"digest_value": digest_value,
"expires": expires,
"signature_value": signature_value,
"uuid": uuid,
},
)
return envelope
def _get_login_data(self, response: str) -> str:
"""Gets the token from the raw response"""
response_dict = xmltodict.parse(response)
self.token = response_dict["Envelope"]["Body"]["AutenticaResponse"]["AutenticaResult"]
@ensure_login
def query(self, start: datetime, end: datetime, download_type: str, request_type: str) -> str:
"""Creates a Query in the SAT system"""
request_content = self._get_query_soap_body(start, end, download_type, request_type)
response = utils.consume(
"http://DescargaMasivaTerceros.sat.gob.mx/ISolicitaDescargaService/SolicitaDescarga",
"https://cfdidescargamasivasolicitud.clouda.sat.gob.mx/SolicitaDescargaService.svc",
request_content,
token=self.token,
)
if response.status_code != 200:
raise self.RequestException(response.status_code, response.reason, request_content)
else:
id = self._get_query_id(utils.remove_namespaces(response.content.decode("UTF-8")))
return id
def _get_query_soap_body(
self, start: datetime, end: datetime, download_type: str, request_type: str
):
"""Creates the SOAP body to the query request"""
start = start.isoformat()
end = end.isoformat()
data = {
"start": start,
"end": end,
"rfc": self.rfc,
"download_type": download_type,
"request_type": request_type,
"signature": "",
}
envelope = self._create_common_envelope(TEMPLATES["SolicitaDescarga"], data)
return envelope
def _get_query_id(self, response: str) -> str:
"""Gets the Query ID from the raw response"""
response_dict = xmltodict.parse(response)
result = response_dict["Envelope"]["Body"]["SolicitaDescargaResponse"][
"SolicitaDescargaResult"
]
status_code = int(result.get("@CodEstatus", -1))
if status_code == 5000:
id = result["@IdSolicitud"]
return id
return None
@ensure_login
def verify(self, query_id: str) -> dict:
"""Checks the status of a Query"""
request_content = self._get_verify_soap_body(query_id)
response = utils.consume(
"http://DescargaMasivaTerceros.sat.gob.mx/IVerificaSolicitudDescargaService/VerificaSolicitudDescarga",
"https://cfdidescargamasivasolicitud.clouda.sat.gob.mx/VerificaSolicitudDescargaService.svc",
request_content,
token=self.token,
)
if response.status_code != 200:
raise self.RequestException(response.status_code, response.reason, request_content)
else:
data = self._get_verify_data(utils.remove_namespaces(response.content.decode("UTF-8")))
return data
def _get_verify_soap_body(self, query_id: str) -> str:
"""Creates the SOAP body to check the query status"""
data = {
"rfc": self.rfc,
"query_id": query_id,
"signature": "",
}
envelope = self._create_common_envelope(TEMPLATES["VerificaSolicitudDescarga"], data)
return envelope
def _get_verify_data(self, response: str) -> dict:
"""Gets the Query ID from the raw response"""
response_dict = xmltodict.parse(response)
result = response_dict["Envelope"]["Body"]["VerificaSolicitudDescargaResponse"][
"VerificaSolicitudDescargaResult"
]
data = {
"EstadoSolicitud": result["@EstadoSolicitud"],
"CodEstatus": result["@CodEstatus"],
"Mensaje": result["@Mensaje"],
"CodigoEstadoSolicitud": result["@CodigoEstadoSolicitud"],
"NumeroCFDIs": result["@NumeroCFDIs"],
"IdsPaquetes": [result["IdsPaquetes"]]
if result["@EstadoSolicitud"] == "3"
else "", # TODO Check what happens when multiple ids
}
return data
@ensure_login
def download(self, package_ids: (list, str)) -> dict:
"""Checks the status of a Query"""
if type(package_ids) == str:
package_ids = [package_ids]
downloads = {}
for package_id in package_ids:
request_content = self._get_download_soap_body(package_id)
response = utils.consume(
"http://DescargaMasivaTerceros.sat.gob.mx/IDescargaMasivaTercerosService/Descargar",
"https://cfdidescargamasiva.clouda.sat.gob.mx/DescargaMasivaService.svc",
request_content,
token=self.token,
)
if response.status_code != 200:
raise self.RequestException(response.status_code, response.reason, request_content)
else:
downloads[package_id] = self._get_download_data(
utils.remove_namespaces(response.content.decode("UTF-8"))
)
return downloads
def _get_download_soap_body(self, package_id: str) -> dict:
"""Creates the SOAP body to check the query status"""
data = {
"rfc": self.rfc,
"package_id": package_id,
"signature": "",
}
envelope = self._create_common_envelope(
TEMPLATES["PeticionDescargaMasivaTercerosEntrada"], data
)
return envelope
def _get_download_data(self, response: str) -> bytes:
"""Gets the Download data from the raw response"""
response_dict = xmltodict.parse(response)
package = response_dict["Envelope"]["Body"]["RespuestaDescargaMasivaTercerosSalida"][
"Paquete"
]
return package and base64.b64decode(package)
def wait_query(self, query_id: str, retries: int = 10, wait_seconds: int = 2) -> list:
for _ in range(retries):
verification = self.verify(query_id)
if verification["EstadoSolicitud"] == "3":
return verification["IdsPaquetes"]
time.sleep(wait_seconds)
else:
raise TimeoutError("The query is not yet resolved")
|
/sat_ws-3.23.2-py3-none-any.whl/sat/sat.py
| 0.673084 | 0.163212 |
sat.py
|
pypi
|
import base64
import hashlib
import logging
import re
import textwrap
import requests
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger(__name__)
def clean_xml(xml: str) -> str:
"""Clean a XML string to be used in SAT request.
Removes all the spaces and new line characters between tags.
Args:
xml (str): XML to be cleaned.
Returns:
str: XML clean.
"""
return re.sub(r"\s+(?=[<>])", "", xml).strip()
def remove_namespaces(xml):
return re.sub(r"[souh]:", "", xml)
def prepare_template(template: str, data: dict) -> str: # TODO simplify
"""Takes a XML template and fill the `variable` (data betwen {}) fields.
Args:
template (str): Template to be processed.
data (dict): Variables to be replaced.
Returns:
str: Template with variables replaced.
"""
template_clean = clean_xml(template)
final_template = template_clean.format(**data)
if "signature" in data.keys() and not data.get("signature"):
data["signature"] = "{signature}"
template_signature_to_replace = template_clean.format(**data)
return (final_template, template_signature_to_replace)
return final_template
def binary_to_utf8(binary: bytes) -> str:
"""Takes a bytes object an returns the string represents it.
Args:
binary (bytes): Raw binary to be process.
Returns:
str: binary in base64 in utf-8.
"""
return base64.encodebytes(binary).decode("UTF-8")
def digest(data: str) -> str:
return binary_to_utf8(hashlib.sha1(data.encode("UTF-8")).digest())
def der_to_pem(der_data: str, type: str) -> str:
"""Convert DER data into PEM.
Args:
der_data (str): DER data to be convert.
type (str): Type of certificate to be created (`ENCRYPTED PRIVATE KEY`, `CERTIFICATE`, etc).
Returns:
str: Certificate converted.
"""
wrapped = "\n".join(textwrap.wrap(der_data, 64))
pem = f"-----BEGIN {type}-----\n{wrapped}\n-----END {type}-----\n"
return pem
def consume(soap_action, uri, body, token=None):
headers = {
"Content-type": 'text/xml; charset="utf-8"',
"Accept": "text/xml",
"Cache-Control": "no-cache",
"SOAPAction": soap_action,
}
if token:
headers["Authorization"] = f'WRAP access_token="{token}"'
response = requests.post(uri, body, headers=headers)
return response
|
/sat_ws-3.23.2-py3-none-any.whl/sat/utils.py
| 0.55652 | 0.237786 |
utils.py
|
pypi
|
import logging
from dataclasses import dataclass, field, is_dataclass
from datetime import datetime
from typing import Any, Dict, List, Optional, Set
from .concepto import Concepto
@dataclass
class CFDI:
UUID: str
Fecha: datetime
Total: float
# XML fields
Version: Optional[str] = None
Sello: Optional[str] = None
UsoCFDIReceptor: Optional[str] = None
RegimenFiscalEmisor: Optional[str] = None
CondicionesDePago: Optional[str] = None
CfdiRelacionados: Set[str] = field(default_factory=set)
Folio: Optional[str] = None
Serie: Optional[str] = None
NoCertificado: Optional[str] = None
Certificado: Optional[str] = None
TipoDeComprobante: Optional[str] = None
LugarExpedicion: Optional[str] = None
FormaPago: Optional[str] = None
MetodoPago: Optional[str] = None
Moneda: Optional[str] = None
TipoCambio: Optional[float] = None
SubTotal: Optional[float] = None
Conceptos: List[Concepto] = field(default_factory=list)
xml: Optional[str] = None
Exportacion: str = ""
Periodicidad: str = ""
Meses: str = ""
# CSV Fields
RfcEmisor: Optional[str] = None
NombreEmisor: Optional[str] = None
RfcReceptor: Optional[str] = None
NombreReceptor: Optional[str] = None
RfcPac: Optional[str] = None
FechaCertificacionSat: Optional[datetime] = None
EfectoComprobante: Optional[str] = None
Estatus: Optional[str] = None
FechaCancelacion: Optional[datetime] = None
# Extras
_extras: Dict[str, Any] = field(default_factory=dict)
cfdis_related: Set["CFDI"] = field(default_factory=set)
@property
def extras(self) -> Dict[str, Any]:
return self._extras or {}
def add_extra(self, key: str, value: Any):
self._extras[key] = value
def clean_extras(self):
self._extras = {}
def __post_init__(self):
self.CfdiRelacionados = set(self.CfdiRelacionados or {})
self.cfdis_related = set()
self._extras = dict(self._extras or {})
self.UUID = self.UUID.upper()
def __bool__(self):
return bool(self.UUID)
def merge(self, other: "CFDI"):
for attrib, value in self.__dict__.items():
other_value = getattr(other, attrib)
if not other_value:
continue
if value and value != other_value:
logging.debug("Inconsistent Information '%s' != '%s'", value, other_value)
setattr(self, attrib, other_value)
def to_dict(self) -> Dict[str, Any]:
dict_repr: Dict[str, Any] = {}
def _to_dict(dict_repr, obj):
for f in obj.__dataclass_fields__.values():
if not f.init:
continue
value = getattr(obj, f.name)
if not value:
continue
if isinstance(value, list):
dict_repr[f.name] = [_to_dict({}, item) for item in value]
elif is_dataclass(value):
dict_repr[f.name] = _to_dict({}, value)
else:
dict_repr[f.name] = value
return dict_repr
_to_dict(dict_repr, self)
return dict_repr
@classmethod
def reduce(cls, cfdis: List["CFDI"]) -> List["CFDI"]:
by_uuid: Dict[str, List["CFDI"]] = {}
for cfdi in cfdis:
if cfdi.UUID not in by_uuid:
by_uuid[cfdi.UUID] = []
by_uuid[cfdi.UUID].append(cfdi)
for cfdis_by_uuid in by_uuid.values():
while len(cfdis_by_uuid) > 1:
cfdi = cfdis_by_uuid.pop()
cfdis_by_uuid[0].merge(cfdi)
return [cfdi for cfdi_g in by_uuid.values() for cfdi in cfdi_g]
def __hash__(self):
return hash(self.UUID)
def __eq__(self, other):
return self.UUID == other.UUID
def add_related(self, cfdfis: Dict[str, "CFDI"]):
self.__post_init__()
for uuid in self.CfdiRelacionados:
if uuid in cfdfis:
self.cfdis_related.add(cfdfis[uuid])
|
/sat_ws-3.23.2-py3-none-any.whl/mx_edi/core/cfdi.py
| 0.773644 | 0.264198 |
cfdi.py
|
pypi
|
import logging
from . import templates, utils
from .certificate_handler import CertificateHandler
_logger = logging.getLogger(__name__)
class EnvelopeSigner:
certificate_handler: CertificateHandler
def __init__(self, certificate_handler: CertificateHandler):
self.certificate_handler = certificate_handler
def create_common_envelope(self, template: str, data: dict) -> str:
_logger.debug("Creating Envelope")
_logger.debug("%s", template)
_logger.debug("%s", data)
query_data_signature = utils.prepare_template(template, data)
data["signature"] = ""
query_data = utils.prepare_template(template, data)
digest_value = utils.digest(query_data)
signed_info = utils.prepare_template(
templates.SignedInfo,
{
"uri": "",
"digest_value": digest_value,
},
)
key_info = utils.prepare_template(
templates.KeyInfo,
{
"issuer_name": self.certificate_handler.certificate.issuer,
"serial_number": self.certificate_handler.certificate.get_serial_number(),
"certificate": self.certificate_handler.cert.replace("\n", ""),
},
)
signature_value = self.certificate_handler.sign(signed_info)
signature = utils.prepare_template(
templates.Signature,
{
"signed_info": signed_info,
"signature_value": signature_value,
"key_info": key_info,
},
)
envelope_content = utils.prepare_template(
query_data_signature,
{
"signature": signature,
},
)
envelope = utils.prepare_template(
templates.Envelope,
{
"content": envelope_content,
},
)
_logger.debug("Final Envelope")
_logger.debug("%s", envelope)
return envelope
|
/sat_ws-3.23.2-py3-none-any.whl/mx_edi/connectors/sat/envelope_signer.py
| 0.461259 | 0.170473 |
envelope_signer.py
|
pypi
|
import logging
from typing import Dict, Tuple
from xml.sax.saxutils import escape
from requests import Response
from mx_edi.connectors.sat import envelope_signer
from . import templates, utils
from .certificate_handler import CertificateHandler
from .envelope_signer import EnvelopeSigner
from .sat_login_handler import SATLoginHandler
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger(__name__)
class SATConnector:
"""Class to make a connection to the SAT"""
login_handler: SATLoginHandler
envelope_signer: EnvelopeSigner
rfc: str
def __init__(self, cert: bytes, key: bytes, password: bytes) -> None:
"""Loads the certificate, key file and password to stablish the connection to the SAT
Creates a object to manage the SAT connection.
Args:
cert (bytes): DER Certificate in raw binary
key (bytes): DER Key Certificate in raw binary
password (bytes): Key password in binary
"""
certificate_handler = CertificateHandler(cert, key, password)
self.rfc = escape(certificate_handler.unique_identifier)
self.login_handler = SATLoginHandler(certificate_handler)
self.envelope_signer = EnvelopeSigner(certificate_handler)
_logger.info("Data correctly loaded")
def _get_rfc_issued_field(self, download_type: str) -> Tuple[str, str]:
issued = f' RfcEmisor="{self.rfc}"' if download_type == "RfcEmisor" else ""
received = (
f"<des:RfcReceptores><des:RfcReceptor>{self.rfc}</des:RfcReceptor></des:RfcReceptores>"
if download_type == "RfcReceptor"
else ""
)
return issued, received
def get_envelope_query(self, data: Dict[str, str]) -> str:
download_type = data["download_type"]
rfc_issued, rfc_received = self._get_rfc_issued_field(download_type)
data["rfc_issued"] = rfc_issued
data["rfc_received"] = rfc_received
data["rfc"] = self.rfc
return self.envelope_signer.create_common_envelope(
templates.SolicitaDescarga,
data,
)
def send_query(self, envelope: str) -> Response:
return utils.consume(
"http://DescargaMasivaTerceros.sat.gob.mx/ISolicitaDescargaService/SolicitaDescarga",
"https://cfdidescargamasivasolicitud.clouda.sat.gob.mx/SolicitaDescargaService.svc",
envelope,
token=self.login_handler.token,
)
def verify_query(self, data: Dict[str, str]) -> Response:
data["rfc"] = self.rfc
envelope = self.envelope_signer.create_common_envelope(
templates.VerificaSolicitudDescarga,
data,
)
return utils.consume(
"http://DescargaMasivaTerceros.sat.gob.mx/IVerificaSolicitudDescargaService/VerificaSolicitudDescarga",
"https://cfdidescargamasivasolicitud.clouda.sat.gob.mx/VerificaSolicitudDescargaService.svc",
envelope,
token=self.login_handler.token,
)
def download_package(self, data: Dict[str, str]) -> Response:
"""Get the binary response for a package"""
data["rfc"] = self.rfc
envelope = self.envelope_signer.create_common_envelope(
templates.PeticionDescargaMasivaTercerosEntrada,
data,
)
return utils.consume(
"http://DescargaMasivaTerceros.sat.gob.mx/IDescargaMasivaTercerosService/Descargar",
"https://cfdidescargamasiva.clouda.sat.gob.mx/DescargaMasivaService.svc",
envelope,
token=self.login_handler.token,
)
|
/sat_ws-3.23.2-py3-none-any.whl/mx_edi/connectors/sat/sat_connector.py
| 0.788705 | 0.171442 |
sat_connector.py
|
pypi
|
import base64
import hashlib
import logging
import re
import textwrap
import requests
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger(__name__)
class RequestException(Exception):
"""If there is a problem in the request"""
def clean_xml(xml: str) -> str:
"""Clean a XML string to be used in SAT request.
Removes all the spaces and new line characters between tags.
Args:
xml (str): XML to be cleaned.
Returns:
str: XML clean.
"""
return xml.strip()
def remove_namespaces(xml):
return re.sub(r"[souh]:", "", xml)
def prepare_template(template: str, data: dict) -> str:
"""Takes a XML template and fill the `variable` (data betwen {}) fields.
Args:
template (str): Template to be processed.
data (dict): Variables to be replaced.
Returns:
str: Template with variables replaced.
"""
template_clean = clean_xml(template)
return template_clean.format(**data)
def binary_to_utf8(binary: bytes) -> str:
"""Takes a bytes object an returns the string represents it.
Args:
binary (bytes): Raw binary to be process.
Returns:
str: binary in base64 in utf-8.
"""
return base64.encodebytes(binary).decode("UTF-8")
def digest(data: str) -> str:
return binary_to_utf8(hashlib.sha1(data.encode("UTF-8")).digest())[:-1]
def der_to_pem(der_data: str, cert_type: str) -> str:
"""Convert DER data into PEM.
Args:
der_data (str): DER data to be convert.
cert_type (str): Type of certificate to be created
(`ENCRYPTED PRIVATE KEY`, `CERTIFICATE`, etc).
Returns:
str: Certificate converted.
"""
wrapped = "\n".join(textwrap.wrap(der_data, 64))
return f"-----BEGIN {cert_type}-----\n{wrapped}\n-----END {cert_type}-----\n"
def consume(soap_action, uri, body, token=None) -> requests.Response:
headers = {
"Content-type": 'text/xml; charset="utf-8"',
"Accept": "text/xml",
"Cache-Control": "no-cache",
"SOAPAction": soap_action,
}
if token:
headers["Authorization"] = f'WRAP access_token="{token}"'
return requests.post(uri, body, headers=headers)
def check_response(response: requests.Response):
if response.status_code != 200:
raise RequestException(response.status_code, response.reason)
|
/sat_ws-3.23.2-py3-none-any.whl/mx_edi/connectors/sat/utils.py
| 0.814274 | 0.203075 |
utils.py
|
pypi
|
import logging
import urllib
from OpenSSL import crypto # type: ignore
from . import utils
_logger = logging.getLogger(__name__)
class NoUniqueIdentifierException(Exception):
"""If not valid RFC founded in the certificate"""
class NoIssuerException(Exception):
"""If not valid Issuer founded in the certificate"""
class CertificateHandler:
cert: str
key: str
password: bytes
unique_identifier: str
certificate: crypto.X509
key_pem: str
cert_pem: str
def __init__(self, cert_binary: bytes, key_binary: bytes, password: bytes):
self.cert = utils.binary_to_utf8(cert_binary)
self.key = utils.binary_to_utf8(key_binary)
self.password = password
self._load_certs()
self._compute_data_from_cert()
def _load_certs(self):
"""Loads the PEM version of the certificate and key file, also loads the crypto certificate
Convert the `cert` and `key` from DER to PEM and creates the real certificate (X509)
"""
self.key_pem = utils.der_to_pem(self.key, cert_type="ENCRYPTED PRIVATE KEY")
self.cert_pem = utils.der_to_pem(self.cert, cert_type="CERTIFICATE")
self.certificate = crypto.load_certificate(crypto.FILETYPE_PEM, self.cert_pem)
def _compute_data_from_cert(self):
"""Gets the RFC and Issuer directly from the certificate"""
self._get_rfc_from_cert()
self._get_issuer_from_cert()
def _get_rfc_from_cert(self):
"""Gets the RFC from the certificate
Raises:
NoUniqueIdentifierException: If not RFC founded
"""
subject_components = self.certificate.get_subject().get_components()
for c in subject_components:
if c[0] == b"x500UniqueIdentifier":
self.unique_identifier = c[1].decode("UTF-8").split(" ")[0]
_logger.debug("x500UniqueIdentifier %s loaded", self.unique_identifier)
break
else:
raise NoUniqueIdentifierException()
def _get_issuer_from_cert(self):
"""Gets the Issuer from the certificate
Raises:
NoIssuerException: If not Issuer founded
"""
self.certificate.issuer = ",".join(
f'{c[0].decode("UTF-8")}={urllib.parse.quote(c[1].decode("UTF-8"))}'
for c in self.certificate.get_issuer().get_components()
)
if not self.certificate.issuer:
raise NoIssuerException()
_logger.debug("Issuer %s loaded", self.certificate.issuer)
def sign(self, data: str) -> str:
"""Signs the `data` using SHA1 with the `key_pem` content"""
_logger.debug("Signing %s", data)
private_key = crypto.load_privatekey(
crypto.FILETYPE_PEM, self.key_pem, passphrase=self.password
)
signed_data = crypto.sign(private_key, data, "sha1")
return utils.binary_to_utf8(signed_data).replace("\n", "")
|
/sat_ws-3.23.2-py3-none-any.whl/mx_edi/connectors/sat/certificate_handler.py
| 0.64646 | 0.215919 |
certificate_handler.py
|
pypi
|
import logging
import time
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from requests import Response
from . import utils
from .enums import DownloadType, RequestType
from .package import Package
from .response_parsers import QueryParser, VerifyParser
from .sat_connector import SATConnector
_logger = logging.getLogger(__name__)
DEFAULT_TIME_WINDOW = timedelta(days=30)
class QueryException(Exception):
"""If not valid query"""
class Query:
download_type: Optional[DownloadType]
request_type: Optional[RequestType]
start: datetime
end: datetime
identifier: str
status: int
request_status: int
query_status: int
message: str
status_code: int
cfdi_qty: int
packages: List[Package]
sent_date: datetime
verified_date: datetime
def __init__(
self,
download_type: DownloadType = None,
request_type: RequestType = None,
*,
start: datetime = None,
end: datetime = None,
identifier: str = None,
):
self.download_type = download_type
self.request_type = request_type
# Set start as current time in Mexico timezone
self.end = end or datetime.now()
self.start = start or self.end - DEFAULT_TIME_WINDOW
self.identifier = identifier or ""
def _get_query_xml(self, connector: SATConnector) -> str:
"""
Returns the query XML to be sent to the SAT
"""
data = self.soap_send()
return connector.get_envelope_query(data)
def send(self, connector: SATConnector):
query_xml = self._get_query_xml(connector)
response = connector.send_query(query_xml)
self._process_send_response(response)
def soap_send(self) -> Dict[str, str]:
"""Creates the SOAP body to the send request"""
start = self.start.isoformat()
end = self.end.isoformat()
if not (self.download_type and self.request_type):
raise QueryException("If query is sent, download type and request type must be set")
return {
"start": start,
"end": end,
"download_type": self.download_type.value,
"request_type": self.request_type.value,
"signature": "{signature}",
}
def _process_send_response(self, response: Response):
response_clean = self._set_request_status_check_and_clean_response(response)
parsed = QueryParser.parse(response_clean)
self.status = int(parsed["CodEstatus"])
self.identifier = parsed["IdSolicitud"]
self.sent_date = datetime.now()
def verify(self, connector: SATConnector):
data = self.soap_verify()
response = connector.verify_query(data)
self._process_verify_response(response)
def soap_verify(self) -> Dict[str, str]:
"""Creates the SOAP body to the verify request"""
return {
"identifier": self.identifier,
"signature": "{signature}",
}
def _process_verify_response(self, response: Response):
response_clean = self._set_request_status_check_and_clean_response(response)
try:
parsed = VerifyParser.parse(response_clean)
except KeyError as e:
_logger.error("Missing key %s in query ID %s", e, self.identifier)
raise
self.status = int(parsed["CodEstatus"])
self.query_status = int(parsed["EstadoSolicitud"])
self.message = parsed["Mensaje"]
self.status_code = int(parsed["CodigoEstadoSolicitud"])
self.cfdi_qty = int(parsed["NumeroCFDIs"])
self.packages = Package.from_ids(parsed["IdsPaquetes"], self.request_type)
self.verified_date = datetime.now()
def _set_request_status_check_and_clean_response(self, response):
self.request_status = response.status_code
utils.check_response(response)
return utils.remove_namespaces(response.content.decode("UTF-8"))
def download(self, connector: SATConnector):
for package in self.packages:
package.download(connector)
def get_packages(
self, connector: SATConnector, retries: int = 10, wait_seconds: int = 2
) -> List[Package]:
for _ in range(retries):
self.verify(connector)
if self.query_status > 3:
raise QueryException(f"EstadoSolicitud({self.status_code})")
if self.query_status == 3:
return self.packages
time.sleep(wait_seconds)
raise TimeoutError("The query is not yet resolved")
|
/sat_ws-3.23.2-py3-none-any.whl/mx_edi/connectors/sat/query.py
| 0.768386 | 0.156201 |
query.py
|
pypi
|
import logging
from datetime import datetime
from typing import Any, Callable, Dict, List, Tuple
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from zipfile import ZipFile
from ....core import CFDI, Concepto
from .cfdi_parser import CFDIParser, MissingData
from .utils import get_attr
_logger = logging.getLogger(__name__)
CFDI_NS = {
"3.2": "{http://www.sat.gob.mx/cfd/3}",
"3.3": "{http://www.sat.gob.mx/cfd/3}",
"4.0": "{http://www.sat.gob.mx/cfd/4}",
}
TFD_NS = "{http://www.sat.gob.mx/TimbreFiscalDigital}"
tax_code_2_name = {
"001": "ISR",
"002": "IVA",
"003": "IEPS",
}
tax_types = {
"Traslados": "Traslado",
"Retenciones": "Retencion",
}
all_taxes = {
f"{tax_type}{tax_name}" for tax_type in tax_types for tax_name in tax_code_2_name.values()
}
def str_to_datetime(datetime_str: str) -> datetime:
datetime_str = datetime_str[:19] # Remove `Z` at the end
return datetime.fromisoformat(datetime_str)
class XML2CFDI(CFDIParser):
root_elements: Dict[str, Callable] = {
"Version": str,
"Sello": str,
"CondicionesDePago": str,
"Folio": str,
"Serie": str,
"NoCertificado": str,
"Certificado": str,
"TipoDeComprobante": str,
"Fecha": str_to_datetime,
"LugarExpedicion": str,
"FormaPago": str,
"MetodoPago": str,
"Moneda": str,
"TipoCambio": float,
"SubTotal": float,
"Total": float,
"Exportacion": str,
"Periodicidad": str,
"Meses": str,
}
@classmethod
def _get_root_data(cls, xml: Element) -> Dict[str, Any]:
data = {}
for field, caster in cls.root_elements.items():
attr = get_attr(xml, field)
if not attr:
continue
try:
data[field] = caster(get_attr(xml, field))
except ValueError:
version = get_attr(xml, "Version")
ns = CFDI_NS[version]
complemento = xml.find(f"{ns}Complemento")
if not complemento:
_logger.warning("Complemento not found")
continue
uuid = get_attr(
complemento.find(f"{TFD_NS}TimbreFiscalDigital"),
"UUID",
)
_logger.warning("Invalid value `%s` for field `%s`. UUID: `%s`", attr, field, uuid)
continue
return data
@classmethod
def _get_impuestos(cls, concepto, ns: str) -> Dict[str, float]:
"""Get the sum of the taxes in Concepto"""
xml_impuestos = concepto.find(f"{ns}Impuestos")
res: Dict[str, float] = {tax: 0 for tax in all_taxes}
if xml_impuestos is None:
return res
for type_group, type_name in tax_types.items():
xml_tax_group = xml_impuestos.find(f"{ns}{type_group}")
if xml_tax_group is None:
continue
xml_taxs = xml_tax_group.findall(f"{ns}{type_name}")
if not xml_taxs:
continue
for xml_tax in xml_taxs:
code = get_attr(xml_tax, "Impuesto")
res[f"{type_group}{tax_code_2_name[code]}"] += float(
get_attr(xml_tax, "Importe", 0)
)
return res
@classmethod
def _get_conceptos(cls, xml: Element, ns: str) -> List[Concepto]:
xml_conceptos = xml.find(f"{ns}Conceptos")
if not xml_conceptos:
return []
return [
Concepto(
Descripcion=get_attr(concepto, "Descripcion"),
Cantidad=float(get_attr(concepto, "Cantidad")),
ValorUnitario=float(get_attr(concepto, "ValorUnitario")),
Importe=float(get_attr(concepto, "Importe")),
Descuento=float(get_attr(concepto, "Descuento", 0)),
ObjetoImp=get_attr(concepto, "ObjetoImp"),
ClaveProdServ=get_attr(concepto, "ClaveProdServ"),
**cls._get_impuestos(concepto, ns),
)
for concepto in xml_conceptos.findall(f"{ns}Concepto")
]
@classmethod
def parse(cls, xml: Element, xml_string: str = None) -> CFDI:
data = cls._get_root_data(xml)
ns = CFDI_NS[data["Version"]]
complemento = xml.find(f"{ns}Complemento")
if not complemento:
raise MissingData(f"{ns}Complemento")
CfdiRelacionados = xml.find(f"{ns}CfdiRelacionados")
if CfdiRelacionados:
data["CfdiRelacionados"] = {
get_attr(cfdi_relacionado, "UUID")
for cfdi_relacionado in CfdiRelacionados.findall(f"{ns}CfdiRelacionado")
}
uuid = get_attr(
complemento.find(f"{TFD_NS}TimbreFiscalDigital"),
"UUID",
)
emisor = xml.find(f"{ns}Emisor")
receptor = xml.find(f"{ns}Receptor")
data["RfcEmisor"] = get_attr(emisor, "Rfc")
data["NombreEmisor"] = get_attr(emisor, "Nombre")
data["RegimenFiscalEmisor"] = get_attr(emisor, "RegimenFiscal")
data["RfcReceptor"] = get_attr(receptor, "Rfc")
data["NombreReceptor"] = get_attr(receptor, "Nombre")
data["UsoCFDIReceptor"] = get_attr(receptor, "UsoCFDI")
data["UUID"] = uuid
data["Conceptos"] = cls._get_conceptos(xml, ns)
data["xml"] = xml_string
cfdi = CFDI(**data)
tax_sums = {
tax: sum(getattr(concepto, tax, 0) for concepto in cfdi.Conceptos) for tax in all_taxes
}
cfdi.add_extra(
"computed",
{
**tax_sums,
"Neto": sum(concepto.Importe for concepto in cfdi.Conceptos)
- sum(concepto.Descuento for concepto in cfdi.Conceptos),
"ImpuestosRetenidos": sum(
amount for tax, amount in tax_sums.items() if tax.startswith("Retencion")
),
},
)
return cfdi
@classmethod
def _get_xmls(cls, files: List[str]) -> List[Tuple[Element, str]]:
return [(ElementTree.fromstring(xml_file), xml_file) for xml_file in files]
@classmethod
def parse_zip(cls, zipfile: ZipFile) -> List["CFDI"]:
xml_files = cls._get_files(zipfile)
xmls = cls._get_xmls(xml_files)
return [cls.parse(xml[0], xml[1]) for xml in xmls]
|
/sat_ws-3.23.2-py3-none-any.whl/mx_edi/connectors/sat/package_parsers/xml2cfdi.py
| 0.757615 | 0.260072 |
xml2cfdi.py
|
pypi
|
# satadd: CLI pipeline for Planet, Satellogic, Google Earth Engine and Digital Globe Imagery
[](https://doi.org/10.5281/zenodo.1450622)
[](https://badge.fury.io/py/satadd)
Cite as
```
Samapriya Roy. (2018, October 6). samapriya/satadd: satadd: CLI pipeline for Planet, Satellogic,
Google Earth Engine and Digital Globe Imagery (Version 0.0.3). Zenodo. http://doi.org/10.5281/zenodo.1450622
```
Google Earth Engine opened the door for the possibility of getting a curated list of public datasets already ingested in an analysis platform. With over 450+ raster datasets alone, they form one of the most unique collections for publicly available datasets and are still growing. While this was happening for free and open source datasets more and more data was coming in and companies were opening their doors to open data and has missions to include researchers, users, developers and everyone else who wanted to use this datasets. This included but is not limited to companies like Planet, Digital Globe and Satellogic making large chunks of their datasets open for users. Also introduction of high temporal resolution datasets like PlanetScope, high spatial resolution like Skysat and Digital Globe and high spectral resolution images like hyperspectral data from Satellogic was changing our approach to problem solving. While there has been development in building standard API and data access methods there is still room for growth and standrardization and above all easy access to these resources. Planet's [Open California Program](https://www.planet.com/products/open-california/), the [Education and Research Program](https://www.planet.com/markets/education-and-research/) , Digital Globe's [Open Data Program](https://www.digitalglobe.com/opendata) and [Education and Research program under Satellogic and their Open Data](https://github.com/satellogic/open-impact) it became obvious that questions we can ask from these sensors could get interesting.
This tool was built with a focus on the same issues and borrow parts from my other projects such as [ppipe](https://pypi.org/project/ppipe/) for handling Planet's datasets, [gee2drive](https://pypi.org/project/gee2drive/) to handle download collections already available in Google Earth Engine (GEE), [pygbdx](https://pypi.org/project/pygbdx/) which is a relatively new project to explore Digital Globe assets and I have now integrated tools to access and download Satellogic imagery. Core components from a lot of these tools have gone into building [satadd](https://pypi.org/project/satadd/) based on the idea of adding satelite data as needed. These tools include authentications setups for every account, and access to datasets, metadata among other tools. This was not build however for heavy lifting though I have tested this on hundreds and thousands of assets delivery so it behaves robustly for now. The tool is build and rebuilt as companies change their authentication protocal and delivery mechanisms and allow for improving many aspects of data delivery and preprocessing in the next iterations.
While almost all of these tools allow for local export, GEE only exports for now to Google Drive or your Google Cloud Storage Buckets, though what is lost in the delivery endpoints is gained in the fact that GEE is already a mature platform to analyze and look at open datasets but also allows you to bring private datasets into GEE for analysis. So while data download and local analysis may have been the norm, it serves us well to think about posting analysis rather in analysis engines. But that is a discussion for a different time. At this point, I am hoping that this tool alows you to do exactly what the intentions might have been from different providers and to bring them together. Since this tool downloads data it is indeed bandwidth heavy and requires a steady internet connection. This tools handles authentication, downloading, and talking to different API end points and services. In the future I am hoping to include additional preprocessing and delivery to non local endpoints like existing ftp, servers or buckets.
## Table of contents
* [Installation](#installation)
* [Getting started](#getting-started)
* [satadd Satellite Data Download Addon](#satadd-satellite-data-download-addon)
* [Initialize and Authenticate](#initialize-and-authenticate)
* [Planet Tools](#satadd-refresh)
* [GBDX Tools](#satadd-idsearch)
* [Satellogic Tools](#satadd-intersect)
* [GEE Tools](#satadd-bandtype)
## Installation
This assumes that you have native python & pip installed in your system, you can test this by going to the terminal (or windows command prompt) and trying
```python``` and then ```pip list```
If you get no errors and you have python 2.7.14 or higher you should be good to go. Please note that I have released this as a python 2.7 but can be easily modified for python 3.
**This toolbox also uses some functionality from GDAL**
For installing GDAL in Ubuntu
```
sudo add-apt-repository ppa:ubuntugis/ppa && sudo apt-get update
sudo apt-get install gdal-bin
sudo apt-get install python-gdal
```
For Windows I found this [guide](https://sandbox.idre.ucla.edu/sandbox/tutorials/installing-gdal-for-windows) from UCLA
It has been brought to my notice that installing shapely on windows is not simply ```pip install shapely``` so install Shapely separely and [use instructions from their pypi project page](https://pypi.org/project/Shapely/) for Windows installation **Shapely is important requirement for the tool but since the installation varies based on the operating system but install it using the earlier instructions anyways before the next steps**. On other operating systems ```pip install shapely``` should work just fine.
To install **satadd**
You can install using two methods
```pip install satadd```
or you can also try
```
git clone https://github.com/samapriya/satadd.git
cd satadd
python setup.py install
```
For linux use sudo. This release also contains a windows installer which bypasses the need for you to have admin permission, it does however require you to have python in the system path meaning when you open up command prompt you should be able to type python and start it within the command prompt window. Post installation using the installer you can just call satadd using the command prompt similar to calling python. Give it a go post installation type
```
satadd -h
```
Installation is an optional step; the application can be also run directly by executing satadd.py script. The advantage of having it installed is being able to execute satadd as any command line tool. I recommend installation within virtual environment. If you don't want to install, browse into the satadd folder and try ```python satadd.py``` to get to the same result.
## Getting started
As usual, to print help:
```
usage: satadd.py [-h]
{planetkey,dginit,satinit,eeinit,dasync,savedsearch,metadata,simple_search,footprint,satraster,satmeta,metalist,reproject,refresh,idsearch,intersect,band
type,export}
...
Simple CLI for piping Planet, Satellogic,GEE & GBDX Assets
positional arguments:
{planetkey,dginit,satinit,eeinit,dasync,savedsearch,metadata,simple_search,footprint,satraster,satmeta,metalist,reproject,refresh,idsearch,intersect,bandtype,export}
planetkey Setting up planet API Key
dginit Initialize Digital Globe GBDX
satinit Initialize Satellogic Tokens
eeinit Initialize Google Earth Engine
credrefresh Refresh Satellogic & GBDX tokens
dasync Uses the Planet Client Async Downloader to download Planet Assets: Does not require activation
savedsearch Tool to download saved searches from Planet Explorer
metadata Tool to tabulate and convert all metadata files from Planet
Item and Asset types for Ingestion into GEE
simple_search Simple search to look for DG assets that intersect your AOI handles KML/SHP/GEOJSON
metadata Exports metadata for simple search into constitutent folders as JSON files
footprint Exports footprint for metadata files extracted earlier
and converts them to individual geometries (GeoJSON)and combined geometry (GeoJSON) file
satraster Filter and download Satellogic Imagery
satlist Get url for band list based on filtered Satellogic Imagery
multiproc Multiprocess based downloader based on satlist
satmeta Filter and download Satellogic Metadata
metalist Generates Basic Metadata list per scene for Satellogic Imagery
reproject Batch reproject rasters using EPSG code
eerefresh Refreshes your personal asset list and GEE Asset list
idsearch Does possible matches using asset name to give you asseth id/full path
intersect Exports a report of all assets(Personal & GEE) intersecting with provided geometry
bandtype Prints GEE bandtype and generates list to be used for export
export Export GEE Collections based on filter
optional arguments:
-h, --help show this help message and exit
```
To obtain help for a specific functionality, simply call it with _help_ switch, e.g.: `satadd idsearch -h`. If you didn't install satadd, then you can run it just by going to *satadd* directory and running `python satadd.py [arguments go here]`
### satadd Satellite Data Download Addon
This tool is designed to augment to the existing facilty of image export using a CLI, whereby you can pass it arguments to filter based on an area of interest geojson file, a start and end date for collection

### Initialize and Authenticate
This is an autosuggestive terminal which uses the gee2add package to perform all of the functions but has autosuggest for Earth Engine catalog and your own personal catalog. This way you can get access to image id without needing the catalog id in the javascript codeeditor.
```
planetkey Setting up planet API Key
dginit Initialize Digital Globe GBDX
satinit Initialize Satellogic Tokens
eeinit Initialize Google Earth Engine
credrefresh Refresh Satellogic & GBDX tokens
```
Each of these authentication tools allow you to link and save credentials for each of these services you can check them by typing something like ```satadd planetkey```. Certain services require the authentication tokens to be refreshed you can simply access it using ```satadd credrefresh```.
### Planet Tools
The Planet Toolsets consists of tools required to access control and download planet labs assets (PlanetScope and RapidEye OrthoTiles) as well as parse metadata in a tabular form which maybe required by other applications. These tools are designed to interact with [Planet's Python Client](https://pypi.org/project/planet/) and the saved search featured embedded in [Planet Explorer](https://www.planet.com/explorer/) and will allow you to access and download planet imagery and metadata as needed. This also allows you to process the metadata incase you are ingesting this to GEE.
```
dasync Uses the Planet Client Async Downloader to download Planet Assets: Does not require activation
savedsearch Tool to download saved searches from Planet Explorer
metadata Tool to tabulate and convert all metadata files from Planet
Item and Asset types for Ingestion into GEE
```
### GBDX Tools
This is a simple cli to Digital Globe's GBDX platform, this was designed from the perspective of community user (the freely available tier). This platform allows you to access all of DG's Open data and also open Ikonos data along with Landsat and Sentinel datasets. You can create a [notebook acccount here](https://notebooks.geobigdata.io). The notebook setup offers additional tools, a GUI and interactive framework while CLI simplifies some of the operational needs of batch processing and performing calls using your own local machine. This tool will allow you to perform a simple seach using a geometry to get asset summary, export the metadata as json file and also image footprint as a combined and individual geojson files.
```
simple_search Simple search to look for DG assets that intersect your AOI handles KML/SHP/GEOJSON
metadata Exports metadata for simple search into constitutent folders as JSON files
footprint Exports footprint for metadata files extracted earlier
and converts them to individual geometries (GeoJSON)and combined geometry (GeoJSON) file
```
### Satellogic Tools
This tool allows you to access the [open data shared by Satellogic](https://github.com/satellogic/open-impact) and filter and pass a geometry object to get both micro(multiband) and macro (hyperspectral) rasters, metadata and basic metadalist. The download tool is a multipart downloader to handle quick downloads. The metalist tool can be used to create a simple metadata list for you to batch upload imagery into GEE for analysis. The reproject tool is included to handle batch reprojections as needed. The tool uses geometry passed as a geojson object go to [geojson.io](http://geojson.io). Satlist produces the band list urls and you can then use the multiproc tool to use multiprocessing to download the links.
```
satraster Filter and download Satellogic Imagery
satlist Get url for band list based on filtered Satellogic Imagery
multiproc Multiprocess based downloader based on satlist
satmeta Filter and download Satellogic Metadata
metalist Generates Basic Metadata list per scene for Satellogic Imagery
reproject Batch reproject rasters using EPSG code
```
### GEE Tools
This tool allows you to use the gee2drive tool functionalities to explore, match and export existing collections in GEE. Export requires all the bandtypes to be of the same kind. For the past couple of months I have [maintained a catalog of the most current Google Earth Engine assets](https://github.com/samapriya/Earth-Engine-Datasets-List), within their raster data catalog. I update this list every week. This tool downloads the most current version of this list, and allows the user to explore band types and export a collection as needed.
```
eerefresh Refreshes your personal asset list and GEE Asset list
idsearch Does possible matches using asset name to give you asseth id/full path
intersect Exports a report of all assets(Personal & GEE) intersecting with provided geometry
bandtype Prints GEE bandtype and generates list to be used for export
export Export GEE Collections based on filter
```
## Changelog
### v0.0.4
- Fixed issue with Shapely install on windows
- Updated credrefresh to better refresh gbdx tokens
### v0.0.3
- Added better filename parsing for Satellogic images
- Added error handling for multiprocessing download of Satellogic images
### v0.0.2
- Now searches for all DG and non DG assets available within GBDX
- Added capability to create url list for rasters and download support using multiprocessing
|
/satadd-0.0.4.tar.gz/satadd-0.0.4/README.md
| 0.812123 | 0.969671 |
README.md
|
pypi
|
import pygame as pg
from SatanGameEngine.Colors import *
import time
class Rect:
def __init__(self, window, x=0, y=0, width=0, height=0, color=COLOR.WHITE, image=None):
self.window = window
self.x = x
self.y = y
self.width = width
self.height = height
self.color = color
self.image = image
def draw(self):
if self.image:
self.window.blit(self.image, (self.x, self.y))
else:
pg.draw.rect(self.window, self.color, (self.x, self.y, self.width, self.height))
def update(self):
pass
def handle_input(self):
pass
def get_new_pos(self):
# Return the updated position
return (self.x, self.y)
class Player(Rect):
def __init__(self, window, x=0, y=0, width=0, height=0, color=COLOR.WHITE, speed=1, can_sprint=True, sprint_multiplier=2, image=None):
super().__init__(window, x, y, width, height, color)
self.window = window
self.x = x
self.y = y
self.width = width
self.height = height
self.color = color
self.image = image
self.keys = []
self.velocity_x = 0 # Velocity in x direction
self.velocity_y = 0 # Velocity in y direction
self.speed = speed
self.can_sprint = can_sprint
self.sprinting = False
self.sprint_multiplier = sprint_multiplier # Speed multiplier when sprinting
def update(self):
self.x += self.velocity_x
self.y += self.velocity_y
def handle_input(self):
self.keys = pg.key.get_pressed()
if self.keys[pg.K_w]:
self.velocity_y = -self.speed
elif self.keys[pg.K_s]:
self.velocity_y = self.speed
else:
self.velocity_y = 0
if self.keys[pg.K_a]:
self.velocity_x = -self.speed
elif self.keys[pg.K_d]:
self.velocity_x = self.speed
else:
self.velocity_x = 0
if self.keys[pg.K_LSHIFT]:
if self.can_sprint == True:
self.sprinting = True
self.speed = self.sprint_multiplier
else:
self.sprinting = False
self.speed = 1
else:
self.sprinting = False
self.speed = 1
class Background(Rect):
def __init__(self, window, image=None):
super().__init__(window, 0, 0, window.get_width(), window.get_height())
self.image = image
def draw(self):
if self.image:
self.window.blit(self.image, (0, 0))
else:
super().draw()
|
/satan_game_engine-0.0.1-py3-none-any.whl/SatanGameEngine/GameObjects.py
| 0.646237 | 0.168823 |
GameObjects.py
|
pypi
|
import os
from typing import List
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
BLOCK_LENGTH = 16
class SatelEncryption:
"""Encryptor and decryptor for Satel integration protocol.
:param integration_key:
Satel integration key to be used for encrypting and decrypting data.
"""
def __init__(self, integration_key: str):
encryption_key = self.integration_key_to_encryption_key(
integration_key)
self.cipher = Cipher(algorithms.AES(encryption_key), modes.ECB())
@classmethod
def integration_key_to_encryption_key(cls, integration_key: str) -> bytes:
"""Convert Satel integration key into encryption key.
:param integration_key: Satel integration key
:returns: encryption key
"""
integration_key_bytes = bytes(integration_key, 'ascii')
key = [0] * 24
for i in range(12):
key[i] = key[i + 12] = integration_key_bytes[i] if len(
integration_key_bytes) > i else 0x20
return bytes(key)
@classmethod
def _bytes_to_blocks(cls, message: bytes, block_len: int) -> List[bytes]:
"""Split message into list of blocks of equal length."""
return [message[i:i + block_len] for i in
range(0, len(message), block_len)]
def encrypt(self, data: bytes) -> bytes:
"""Encrypt protocol data unit.
:param data: data to be encrypted
:returns: encrypted data
"""
if len(data) < BLOCK_LENGTH:
data += b'\x00' * (BLOCK_LENGTH - len(data))
encrypted_data = []
encryptor = self.cipher.encryptor()
cv = [0] * BLOCK_LENGTH
cv = list(encryptor.update(bytes(cv)))
for block in self._bytes_to_blocks(data, BLOCK_LENGTH):
p = list(block)
if len(block) == BLOCK_LENGTH:
p = [a ^ b for a, b in zip(p, cv)]
p = list(encryptor.update(bytes(p)))
cv = list(p)
else:
cv = list(encryptor.update(bytes(cv)))
p = [a ^ b for a, b in zip(p, cv)]
encrypted_data += p
return bytes(encrypted_data)
def decrypt(self, data: bytes) -> bytes:
"""Decrypt message.
:param data: data to be decrypted
:returns: decrypted data
"""
decrypted_data = []
cv = [0] * BLOCK_LENGTH
decryptor = self.cipher.decryptor()
encryptor = self.cipher.encryptor()
cv = list(encryptor.update(bytes(cv)))
for block in self._bytes_to_blocks(data, BLOCK_LENGTH):
temp = list(block)
c = list(block)
if len(block) == BLOCK_LENGTH:
c = list(decryptor.update(bytes(c)))
c = [a ^ b for a, b in zip(c, cv)]
cv = list(temp)
else:
cv = list(encryptor.update(bytes(cv)))
c = [a ^ b for a, b in zip(c, cv)]
decrypted_data += c
return bytes(decrypted_data)
class EncryptedCommunicationHandler:
"""Handler for Satel encrypted communications.
:param integration_key:
Satel integration key to be used for encrypting and decrypting data.
"""
next_id_s: int = 0
def __init__(self, integration_key: str):
self._rolling_counter: int = 0
# There will be a new value of id_s for each instance . As there will
# be rather one client this doesn't have much use. However id_s value
# may show how many reconnections there where.
self._id_s: int = EncryptedCommunicationHandler.next_id_s
EncryptedCommunicationHandler.next_id_s += 1
self._id_r: int = 0
self._satel_encryption = SatelEncryption(integration_key)
def _prepare_header(self) -> bytes:
header = (os.urandom(2) +
self._rolling_counter.to_bytes(2, 'big') +
self._id_s.to_bytes(1, 'big') +
self._id_r.to_bytes(1, 'big'))
self._rolling_counter += 1
self._rolling_counter &= 0xFFFF
self._id_s = header[4]
return header
def prepare_pdu(self, message: bytes) -> bytes:
"""Prepare protocol data unit.
:param message: message to be included in PDU
:returns: encrypted PDU with given message
"""
pdu = self._prepare_header() + message
encrypted_pdu = self._satel_encryption.encrypt(pdu)
return encrypted_pdu
def extract_data_from_pdu(self, pdu: bytes) -> bytes:
"""Extract data from protocol data unit.
:param pdu: PDU from which a message to be extracted
:returns: extracted message
"""
decrypted_pdu = self._satel_encryption.decrypt(pdu)
header = decrypted_pdu[:6]
data = decrypted_pdu[6:]
self._id_r = header[4]
if (self._id_s & 0xFF) != decrypted_pdu[5]:
raise RuntimeError(
f'Incorrect value of ID_S, received \\x{decrypted_pdu[5]:x} '
f'but expected \\x{self._id_s:x}\n'
'Decrypted data: %s' % ''.join(
'\\x{:02x}'.format(x) for x in decrypted_pdu))
return bytes(data)
|
/satel_integra2-0.4.2.tar.gz/satel_integra2-0.4.2/satel_integra2/encryption.py
| 0.855066 | 0.479077 |
encryption.py
|
pypi
|
from math import pi
from .utils import heavenly_body_radius
import warnings
class Satellite(object):
def __init__(self, name, altitude, eccentricity, inclination, right_ascension, perigee, ta, beam,
focus="earth", rads=True):
self._name = name
self._altitude = altitude
self._focus = focus
self._true_alt = self.altitude + self.__get_radius()
self._eccentricity = eccentricity
self._beam = beam
if not rads:
self.inclination = inclination
self.right_ascension = right_ascension
self.perigee = perigee
self.ta = ta
self.inclination_r, self.right_ascension_r, self.perigee_r, self.ta_r = self.__convert_to_rads()
else:
self.inclination_r = inclination
self.right_ascension_r = right_ascension
self.perigee_r = perigee
self.ta_r = ta
self.inclination, self.right_ascension, self.perigee, self.ta = self.__convert_to_degs()
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def altitude(self):
return self._altitude
@altitude.setter
def altitude(self, new_alt):
if new_alt < 100:
return ValueError("Satellite's orbital altitude must be over the Karman line.")
else:
self._altitude = new_alt
self._true_alt = new_alt + self.__get_radius()
@property
def true_alt(self):
return self._true_alt
@property
def eccentricity(self):
return self._eccentricity
@eccentricity.setter
def eccentricity(self, new_e):
if new_e < 0:
return ValueError("Eccentricity can't be set below a perfect circle.")
else:
self._eccentricity = new_e
@property
def beam(self):
return self._beam
@beam.setter
def beam(self, new_beam):
if new_beam < 0:
return ValueError("Beam width must be between 0 and 180 degrees")
elif new_beam > 180:
return ValueError("Beam width must be between 0 and 180 degrees")
else:
self._beam = new_beam
def __convert_to_rads(self, value=None):
to_rad = pi / 180
if value:
return value * to_rad
else:
return self.inclination * to_rad, self.right_ascension * to_rad, self.perigee * to_rad, self.ta * to_rad
def __convert_to_degs(self, value=None):
to_deg = 180 / pi
if value:
return value * to_deg
else:
return self.inclination_r * to_deg, self.right_ascension_r * to_deg, self.perigee_r * to_deg, \
self.ta_r * to_deg
def __get_radius(self):
return heavenly_body_radius[self._focus.lower()]
def __repr__(self):
return "{0}, {1}, {2}, {3}, {4}, {5}, {6}".format(self.name, self.altitude, self.eccentricity,
self.inclination, self.right_ascension, self.perigee, self.ta)
def __str__(self):
return "Satellite Name: {0}, Alt: {1}, e: {2}, " \
"Inclination: {3}, RA: {4}, Periapsis: {5}, Anomaly: {6}".format(self.name, self.altitude,
self.eccentricity, self.inclination,
self.right_ascension, self.perigee,
self.ta)
def as_dict(self, rads=True):
if rads:
sat = {"Name": self.name,
"Orbital Elements": {
"Eccentricity": self.eccentricity,
"Right Ascension": self.right_ascension_r,
"Semi-major Axis": self.true_alt,
"Arg. Periapsis": self.perigee_r,
"Mean Anomaly": self.ta_r,
"Inclination": self.inclination_r
},
"Beam Width": self.beam}
else:
sat = {"Name": self.name,
"Orbital Elements": {
"Eccentricity": self.eccentricity,
"Right Ascension": self.right_ascension,
"Semi-major Axis": self.true_alt,
"Arg. Periapsis": self.perigee,
"Mean Anomaly": self.ta,
"Inclination": self.inclination
},
"Beam Width": self.beam}
sat['Focus'] = self._focus
sat['Type'] = 'satellite'
return sat
def as_xml(self, epoch_date='2017-Jan-18 00:00:00', fov=1):
warnings.warn("XML support is depreciated and not supported from PIGI 0.8.5 onward", DeprecationWarning)
return '\t\t< Entity Type = "Satellite" Name = "{0}" >\n' \
'\t\t\t<PropertySection Name="UserProperties">\n' \
'\t\t\t\t<StringPropertyValue name="PlanetName" value="Earth"/>\n' \
'\t\t\t\t<StringPropertyValue name="CatalogName" value="{0}"/>\n' \
'\t\t\t\t<StringPropertyValue name="MeshName" value="SaberBox.mesh"/>\n' \
'\t\t\t\t<StringPropertyValue name="BindingsFile" value=""/>\n' \
'\t\t\t\t<IntPropertyValue name="ManualOrbitalElements" value="0"/>\n' \
'\t\t\t\t<StringPropertyValue name="AssemblyFile" value=""/>\n' \
'\t\t\t\t<StringPropertyValue name="SystemMapSourceId" value=""/>\n' \
'\t\t\t\t<StringPropertyValue name="ResourceGroup" value="Autodetect"/>\n' \
'\t\t\t\t<StringPropertyValue name="MetricSourceIds" value=""/>\n' \
'\t\t\t\t<FloatPropertyValue name="BeamWidth" value="{1}"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="SGP4 Parameters">\n' \
'\t\t\t\t<FloatPropertyValue name="B Star" value="-1.1606e-005"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Eccentricity" value="{2}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="RAAN" value="{3}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Semi-major axis" value="{4}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Arg. Perigee" value="{5}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Mean Anomaly" value="{6}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Inclination" value="{7}"/>\n' \
'\t\t\t\t<TimestampPropertyValue name="Epoch" value="{8}"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Fov">\n' \
'\t\t\t\t<EnumPropertyValue name="Enabled" value="{9}"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="TargetSceneNode">\n' \
'\t\t\t\t<ArrayPropertyValue name="Position" value="[0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Orientation" value="[1, 0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Scale" value="[200, 200, 200]"/>\n' \
'\t\t\t\t<EnumPropertyValue name="Debug" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Billboard">\n' \
'\t\t\t\t<ArrayPropertyValue name="Position" value="[0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Orientation" value="[1, 0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Scale" value="[200, 200, 200]"/>\n' \
'\t\t\t\t<EnumPropertyValue name="Debug" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Favourite">\n' \
'\t\t\t\t<EnumPropertyValue name="favourite" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t</Entity>\n'.format(self.name, self.beam, self.eccentricity, self.right_ascension_r,
self.true_alt, self.perigee_r, self.ta_r, self.inclination_r,
epoch_date, fov)
|
/satellite_constellation-0.2.2-py3-none-any.whl/satellite_constellation/Satellite.py
| 0.887853 | 0.272629 |
Satellite.py
|
pypi
|
import math
import numpy as np
def mod(x, y):
"""
Mappable modulo function
:param x: First number
:param y: Second number
:return: x % y
"""
return [a % b for a, b in zip(x, y)]
heavenly_body_radius = { # [km]
"earth": 6371,
"luna": 1737,
"mars": 3390,
"venus": 6052,
"mercury": 2440,
"sol": 695700,
"jupiter": 69911,
"saturn": 58232,
"uranus": 25362,
"neptune": 24622,
"pluto": 1188,
}
heavenly_body_mass = { # [kg]
"earth": 5.972*10**24,
"luna": 73.46*10**21,
"mars": 641.71*10**21,
"venus": 4867.5*10**21,
"mercury": 330.11*10**21,
"sol": 1.9885*10**30,
"jupiter": 1.8982*10**27,
"saturn": 5.6834*10**26,
"uranus": 8.6810*10**25 ,
"neptune": 1.02413*10**26,
"pluto": 13.03*10**21,
}
heavenly_body_period = { # [days]
"earth": 1,
"luna": 27.321661,
"mars": 1.02595675,
"venus": 243.0187,
"mercury": 58.6462,
"sol": 25.379995,
"jupiter": 0.41007,
"saturn": 0.426,
"uranus": 0.71833,
"neptune": 0.67125,
"pluto": 6.38718,
}
constants = {
"G" : 6.67408*10**(-11), # Gravitational constant [m^3 kg^-1 s^-2]
"wE" : 7.2921159*10**(-5), # Earth angular velocity [rad/s]
"J2E" : 10826269*10**(-3), # Earth J2 constant
}
def proper_round(num, dec=0): # Add exception check for no decimal point found
num = str(num)[:str(num).index('.')+dec+2]
if num[-1]>='5':
return float(num[:-2-(not dec)]+str(int(num[-2-(not dec)])+1))
return float(num[:-1])
def polar2cart(r, phi, theta):
return [
r * math.sin(phi) * math.cos(theta),
r * math.sin(theta) * math.sin(phi),
r * math.cos(phi)
]
def rotate(vec, ang, ax='x'):
if ax == 'x':
r_x = np.array([[1, 0, 0],
[0, math.cos(ang), -1 * math.sin(ang)],
[0, math.sin(ang), math.cos(ang)]])
return np.matmul(r_x, vec)
elif ax == 'y':
r_y = np.array([[math.cos(ang), 0, math.sin(ang)],
[0, 1, 0],
[-math.sin(ang), 0, math.cos(ang)]])
return np.matmul(r_y, vec)
elif ax == 'z':
r_z = np.array([[math.cos(ang), -math.sin(ang), 0],
[math.sin(ang), math.cos(ang), 0],
[0, 0, 1]])
return np.matmul(r_z, vec)
|
/satellite_constellation-0.2.2-py3-none-any.whl/satellite_constellation/utils.py
| 0.68458 | 0.558026 |
utils.py
|
pypi
|
import warnings
class GroundStation(object):
def __init__(self, name, lat, long, elevation, beam_width):
self.__name = name
self.__lat = lat
self.__long = long
self.__elevation = elevation
self.__beam = beam_width
@property
def name(self):
return self.__name
@name.setter
def name(self, new_name):
self.__name = new_name
@property
def lat(self):
return self.__lat
@lat.setter
def lat(self, new_lat):
if (new_lat < -90) or (new_lat > 90):
return ValueError("Latitude must be between -90 and 90")
else:
self.__lat = new_lat
@property
def long(self):
return self.__long
@long.setter
def long(self, new_long):
if (new_long < -180) or (new_long > 180):
return ValueError("Longitude must be between -180 and 180")
else:
self.__long = new_long
@property
def elevation(self):
return self.__elevation
@elevation.setter
def elevation(self, new_elev):
if new_elev < 0:
return ValueError("Elevation must be above 0")
if new_elev > 8900:
return ValueError("Elevation must be on the ground")
else:
self.__elevation = new_elev
@property
def beam(self):
return self.__beam
@beam.setter
def beam(self, new_beam):
if (new_beam < 0) or (new_beam > 180):
return ValueError("Beam width must be between 0 and 180 degrees")
self.__beam = new_beam
def as_xml(self):
warnings.warn("XML support is depreciated and not supported from PIGI 0.8.5 onward", DeprecationWarning)
return '\t\t<Entity Type="GroundStation" Name="{0}">\n' \
'\t\t\t<PropertySection Name="UserProperties">\n' \
'\t\t\t\t<FloatPropertyValue name="Latitude" value="{1}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Longitude" value="{2}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Elevation" value="{3}"/>\n' \
'\t\t\t\t<FloatPropertyValue name="BeamWidth" value="{4}"/>\n' \
'\t\t\t\t<StringPropertyValue name="PlanetName" value="Earth"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Animation">\n' \
'\t\t\t\t<ArrayPropertyValue name="Position" value="[6339.69, -699.193, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Orientation" value="[1, 0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Scale" value="[1, 1, 1]"/>\n' \
'\t\t\t\t<EnumPropertyValue name="Debug" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Time Input">\n' \
'\t\t\t\t<TimestampPropertyValue name="Timepoint" value="2016-May-07 08:32:21.059611"/>\n' \
'\t\t\t\t<DurationPropertyValue name="Duration" value="2"/>\n' \
'\t\t\t\t<DurationPropertyValue name="StartOffset" value="-1"/>\n' \
'\t\t\t\t<DurationPropertyValue name="Timestep" value="0.000694444"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Favourite">\n' \
'\t\t\t\t<EnumPropertyValue name="favourite" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Mesh">\n' \
'\t\t\t\t<ArrayPropertyValue name="Position" value="[0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Orientation" value="[1, 0, 0, 0]"/>\n' \
'\t\t\t\t<ArrayPropertyValue name="Scale" value="[500, 500, 500]"/>\n' \
'\t\t\t\t<StringPropertyValue name="name" value="GroundStation.mesh"/>\n' \
'\t\t\t\t<EnumPropertyValue name="Debug" value="0"/>\n' \
'\t\t\t\t<StringPropertyValue name="group" value="SolarSystem"/>\n' \
'\t\t\t\t<EnumPropertyValue name="visibility" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t\t<PropertySection Name="Look Angles">\n' \
'\t\t\t\t<ArrayPropertyValue name="LatLon" value="[0, 0]"/>\n' \
'\t\t\t\t<FloatPropertyValue name="Elevation" value="0"/>\n' \
'\t\t\t</PropertySection>\n' \
'\t\t</Entity>\n'.format(self.name, self.lat, self.long, self.elevation, self.beam)
def __repr__(self):
return "{0}, {1}, {2}, {3}, {4}".format(self.name, self.lat, self.long, self.elevation, self.beam)
def __str__(self):
return "Ground Station: {0}\n" \
"Latitude: {1}, Longitude: {2}" \
"Elevation: {3}, Beam Width: [4}".format(self.name, self.lat, self.long, self.elevation, self.beam)
def as_dict(self):
return {"Name": self.name,
"Latitude": self.lat,
"Longitude": self.long,
"Elevation": self.elevation,
"Beam Width": self.beam,
"Type": 'station'}
|
/satellite_constellation-0.2.2-py3-none-any.whl/satellite_constellation/GroundStation.py
| 0.807271 | 0.351895 |
GroundStation.py
|
pypi
|
import matplotlib.pyplot as plt
from satellite_constellation.Constellation import *
from satellite_constellation.utils import *
def draw_walker(walker_constellation):
r = walker_constellation.altitude + heavenly_body_radius[walker_constellation.focus]
t = np.linspace(0, 2 * math.pi, 100)
x1, y1, z1 = r * np.cos(t), r * np.sin(t), 0 * t
fig = plt.figure()
fig.canvas.manager.set_window_title('Walker Constellation')
perspectives = [[0, 0], [90, 0], [45, 45]]
ax = [plt.subplot(2, 2, 1, projection='3d'), plt.subplot(2, 2, 2, projection='3d'),
plt.subplot(2, 2, 3, projection='3d')]
for idx in range(3):
ax[idx].view_init(elev=perspectives[idx][0], azim=perspectives[idx][1])
ax[idx].set_xlim(-r, r)
ax[idx].set_ylim(-r, r)
ax[idx].set_zlim(-r, r)
ax[idx].plot(x1, y1, z1, '--', linewidth=0.1, color='r') # Plot equatorial circle
ax[idx].zaxis.set_tick_params(labelsize=3)
ax[idx].xaxis.set_tick_params(labelsize=3)
ax[idx].yaxis.set_tick_params(labelsize=3)
ax[idx].set_xlabel("X", fontsize=3)
ax[idx].set_ylabel("Y", fontsize=3)
ax[idx].set_zlabel("Z", fontsize=3)
for idy in range(walker_constellation.num_planes): # Plot orbital planes
ang = idy * 360 / walker_constellation.num_planes
t = np.linspace(0, 2 * math.pi, 100)
plt.plot(r * np.cos(t), r * np.sin(t), 0)
x, y, z = r * np.cos(t), r * np.sin(t), 0 * t
for idz in range(100):
coords = np.array([x[idz], y[idz], z[idz]])
rot_coords = rotate(coords, walker_constellation.inclination * math.pi / 180, 'x')
rot_coords = rotate(rot_coords, ang * math.pi / 180, 'z')
x[idz] = rot_coords[0]
y[idz] = rot_coords[1]
z[idz] = rot_coords[2]
ax[idx].plot(x, y, z, '--', linewidth=0.5)
for idy in range(walker_constellation.num_planes): # Plot satellites
for idz in range(walker_constellation.sats_per_plane):
ctr = idz + idy * (walker_constellation.sats_per_plane)
x_i, y_i, z_i = sat_x, sat_y, sat_z = polar2cart(r, (90) * math.pi / 180,
(walker_constellation.perigee_positions[
ctr] +
+ walker_constellation.ta[ctr]
+ walker_constellation.raan[ctr]) * math.pi / 180)
coords = np.array([x_i, y_i, z_i])
coords = rotate(coords, 90 * math.pi / 180, 'z')
coords = rotate(coords, walker_constellation.inclination * math.pi / 180, 'x')
coords = rotate(coords, (walker_constellation.raan[ctr]) * math.pi / 180, 'z')
ax[idx].scatter(coords[0], coords[1], coords[2])
return fig
def draw_flower(flower_constellation):
a = flower_constellation.semi_major
b = a * math.sqrt(1 - math.pow(flower_constellation.eccentricity, 2))
f = (flower_constellation.altitude + heavenly_body_radius[flower_constellation.focus]) * 10 ** 3
disp = a - f
t = np.linspace(0, 2 * math.pi, 100)
r = heavenly_body_radius[flower_constellation.focus] * 10 ** 3
x1, y1, z1 = r * np.cos(t), r * np.sin(t), 0 * t
x2, y2, z2 = r * np.cos(t), 0 * t, r * np.sin(t)
x3, y3, z3 = 0*t, r * np.cos(t), r * np.sin(t)
fig = plt.figure()
fig.canvas.manager.set_window_title('Flower Constellation')
r = a
perspectives = [[0, 0], [90, 0], [45, 45]]
ax = [plt.subplot(2, 2, 1, projection='3d'), plt.subplot(2, 2, 2, projection='3d'),
plt.subplot(2, 2, 3, projection='3d')]
for idx in range(3):
ax[idx].view_init(elev=perspectives[idx][0], azim=perspectives[idx][1])
ax[idx].set_xlim(-3 / 2 * a, 3 / 2 * a)
ax[idx].set_ylim(-3 / 2 * a, 3 / 2 * a)
ax[idx].set_zlim(-3 / 2 * a, 3 / 2 * a)
ax[idx].plot(x1, y1, z1, '--', linewidth=0.1, color='r') # Plot equatorial circle
ax[idx].plot(x2, y2, z2, '--', linewidth=0.1, color='r') # Plot equatorial circle
ax[idx].plot(x3, y3, z3, '--', linewidth=0.1, color='r') # Plot equatorial circle
ax[idx].zaxis.set_tick_params(labelsize=3)
ax[idx].xaxis.set_tick_params(labelsize=3)
ax[idx].yaxis.set_tick_params(labelsize=3)
ax[idx].set_xlabel("X", fontsize=3)
ax[idx].set_ylabel("Y", fontsize=3)
ax[idx].set_zlabel("Z", fontsize=3)
for idy in range(min(flower_constellation.num_orbits,flower_constellation.num_satellites)): # Plot orbital planes
x, y, z = disp + a * np.cos(t), b * np.sin(t), 0 * t
for idz in range(100):
coords = np.array([x[idz], y[idz], z[idz]])
coords = rotate(coords, flower_constellation.raan[idy] * math.pi / 180, 'z')
coords = rotate(coords, flower_constellation.inclination * math.pi / 180, 'x')
x[idz] = coords[0]
y[idz] = coords[1]
z[idz] = coords[2]
ax[idx].plot(x, y, z, '--', linewidth=0.5)
for idy in range(flower_constellation.num_satellites): # Plot satellites
ang = (flower_constellation.true_anomaly[idy] + 180) * math.pi / 180
x_i, y_i, z_i = disp + a * np.cos(ang), b * np.sin(ang), 0
coords = np.array([x_i, y_i, z_i])
coords = rotate(coords, flower_constellation.raan[idy] * math.pi / 180, 'z')
coords = rotate(coords, flower_constellation.inclination * math.pi / 180, 'x')
ax[idx].scatter(coords[0], coords[1], coords[2], s=2)
return fig
|
/satellite_constellation-0.2.2-py3-none-any.whl/satellite_constellation/visualiser.py
| 0.763219 | 0.633439 |
visualiser.py
|
pypi
|
import numpy as np
import pandas as pd
import xarray as xr
import metpy.calc as mpcalc
from metpy.units import units
from weather.utils import (
extract_coordinates,
extract_latlons,
)
xr.set_options(keep_attrs=True)
@xr.register_dataset_accessor('copebr')
class CopeBRDatasetExtension:
"""
This class is an `xr.Dataset` extension. It works as a dataset
layer with the purpose of enhancing the dataset with new methods.
The expect input dataset is an `netCDF4` file from Copernicus API;
this extension will work on certain data variables, the method that
extracts with the correct parameters can be found in `extract_reanalysis`
module.
Usage:
```
import satellite_weather_downloader as sat
ds = sat.load_dataset('file/path')
RJ_geocode = 3304557
rio_df = ds.copebr.to_dataframe(RJ_geocode)
rio_ds = ds.copebr.ds_from_geocode(RJ_geocode)
```
The original dataset will be parsed into Brazilian's data format and can
be sliced by a Geocode from any City in Brazil, according to IBGE geocodes.
The expect output when the requested data is not `raw` is:
date : datetime object.
temp_min : Minimum┐
temp_med : Average├─ temperature in `celcius degrees` given a geocode.
temp_max : Maximum┘
precip_min : Minimum┐
precip_med : Average├─ of total precipitation in `mm` given a geocode.
precip_max : Maximum┘
pressao_min: Minimum┐
pressao_med: Average├─ sea level pressure in `hPa` given a geocode.
pressao_max: Maximum┘
umid_min : Minimum┐
umid_med : Average├─ percentage of relative humidity given a geocode.
umid_max : Maximum┘
"""
def __init__(self, xarray_ds: xr.Dataset) -> None:
self._ds = xarray_ds
def ds_from_geocode(self, geocode: int, raw=False) -> xr.Dataset:
"""
This is the most important method of the extension. It will
slice the dataset according to the geocode provided, do the
math and the parse of the units to Br's format, and reduce by
min, mean and max by day, if the `raw` is false.
Attrs:
geocode (str or int): Geocode of a city in Brazil according to IBGE.
raw (bool) : If raw is set to True, the DataFrame returned will
contain data in 3 hours intervals. Default return
will aggregate these values into 24 hours interval.
Returns:
xr.Dataset: The final dataset with the data parsed into Br's format.
if not `raw`, will group the data by day, taking it's
min, mean and max values. If `raw`, the data corresponds
to a 3h interval range for each day in the dataset.
"""
lats, lons = self._get_latlons(geocode)
geocode_ds = self._convert_to_br_units(
self._slice_dataset_by_coord(
dataset=self._ds, lats=lats, lons=lons
)
)
if raw:
return geocode_ds
gb = geocode_ds.resample(time='1D')
gmin, gmean, gmax = (
self._reduce_by(gb, np.min, 'min'),
self._reduce_by(gb, np.mean, 'med'),
self._reduce_by(gb, np.max, 'max'),
)
final_ds = xr.combine_by_coords([gmin, gmean, gmax], data_vars='all')
return final_ds
def to_dataframe(self, geocode: int, raw=False) -> pd.DataFrame:
"""
Returns a DataFrame with the values related to the geocode of a brazilian
city according to IBGE's format. Extract the values using `ds_from_geocode()`
and return `xr.Dataset.to_dataframe()` from Xarray, inserting the geocode into
the final DataFrame.
Attrs:
geocode (str or int): Geocode of a city in Brazil according to IBGE.
raw (bool) : If raw is set to True, the DataFrame returned will
contain data in 3 hours intervals. Default return
will aggregate these values into 24 hours interval.
Returns:
pd.DataFrame: Similar to `ds.copebr.ds_from_geocode(geocode).to_dataframe()`
but with an extra column with the geocode, in order to differ
the data when inserting into a database, for instance.
"""
ds = self.ds_from_geocode(geocode, raw)
df = ds.to_dataframe()
geocodes = [geocode for g in range(len(df))]
df.insert(0, 'geocodigo', geocodes)
return df
def _get_latlons(self, geocode: int) -> tuple:
"""
Extract Latitude and Longitude from a Brazilian's city
according to IBGE's geocode format.
"""
lat, lon = extract_latlons.from_geocode(int(geocode))
N, S, E, W = extract_coordinates.from_latlon(lat, lon)
lats = [N, S]
lons = [E, W]
match geocode:
case 4108304: # Foz do Iguaçu
lats = [-25.5]
lons = [-54.5, -54.75]
return lats, lons
def _slice_dataset_by_coord(
self, dataset: xr.Dataset, lats: list[int], lons: list[int]
) -> xr.Dataset:
"""
Slices a dataset using latitudes and longitudes, returns a dataset
with the mean values between the coordinates.
"""
ds = dataset.sel(latitude=lats, longitude=lons, method='nearest')
return ds.mean(dim=['latitude', 'longitude'])
def _convert_to_br_units(self, dataset: xr.Dataset) -> xr.Dataset:
"""
Parse the units according to Brazil's standard unit measures.
Rename their unit names and long names as well.
"""
ds = dataset
vars = list(ds.data_vars.keys())
if 't2m' in vars:
# Convert Kelvin to Celsius degrees
ds['t2m'] = ds.t2m - 273.15
ds['t2m'].attrs = {'units': 'degC', 'long_name': 'Temperatura'}
if 'tp' in vars:
# Convert meters to millimeters
ds['tp'] = ds.tp * 1000
ds['tp'] = ds.tp.round(5)
ds['tp'].attrs = {'units': 'mm', 'long_name': 'Precipitação'}
if 'msl' in vars:
# Convert Pa to ATM
ds['msl'] = ds.msl * 0.00000986923
ds['msl'].attrs = {
'units': 'atm',
'long_name': 'Pressão ao Nível do Mar',
}
if 'd2m' in vars:
# Calculate Relative Humidity percentage and add to Dataset
ds['d2m'] = ds.d2m - 273.15
rh = (
mpcalc.relative_humidity_from_dewpoint(
ds['t2m'] * units.degC, ds['d2m'] * units.degC
)
* 100
)
# Replacing the variable instead of dropping. d2m won't be used.
ds['d2m'] = rh
ds['d2m'].attrs = {
'units': 'pct',
'long_name': 'Umidade Relativa do Ar',
}
with_br_vars = {
't2m': 'temp',
'tp': 'precip',
'msl': 'pressao',
'd2m': 'umid',
}
return ds.rename(with_br_vars)
def _reduce_by(self, ds: xr.Dataset, func, prefix: str):
"""
Applies a function to each coordinate in the dataset and
replace the `data_vars` names to it's corresponding prefix.
"""
ds = ds.apply(func=func)
return ds.rename(
dict(
zip(
list(ds.data_vars),
list(map(lambda x: f'{x}_{prefix}', list(ds.data_vars))),
)
)
)
|
/satellite-copernicus-0.1.1.tar.gz/satellite-copernicus-0.1.1/weather/xr_extensions.py
| 0.862482 | 0.895796 |
xr_extensions.py
|
pypi
|
import datetime
from datetime import timedelta, datetime
import numpy as np
__author__ = 'Marcelo Ferreira da Costa Gomes'
"""
Return Brazilian epidemiological week from passed date
"""
def extractweekday(x=datetime):
# Extract weekday as [Sun-Sat] |-> [0-6]
# isoweekday() returns weekday with [Mon-Sun] as [1-7]
w = x.isoweekday() % 7
return w
def firstepiday(year=int):
day = datetime.strptime('%s-01-01' % year, '%Y-%m-%d')
day_week = extractweekday(day)
# Whe want day1 to correspond to the first day of the first epiweek.
# That is, we need the Sunday corresponding to epiweek=%Y01
# If first day of the year is between Sunday and Wednesday,
# epiweek 01 includes it. Otherwise, it is still the last epiweek
# of the previous year
if day_week < 4:
day = day - timedelta(days=day_week)
else:
day = day + timedelta(days=(7 - day_week))
return day
def lastepiday(year=int):
day = datetime.strptime('%s-12-31' % year, '%Y-%m-%d')
day_week = extractweekday(day)
# Whe want day to correspond to the last day of the last epiweek.
# That is, we need the corresponding Saturday
# If the last day of the year is between Sunday and Tuesday,
# epiweek 01 of the next year includes it.
# Otherwise, it is still the last epiweek of the current year
if day_week < 3:
day = day - timedelta(days=(day_week + 1))
else:
day = day + timedelta(days=(6 - day_week))
return day
def episem(x, sep='W', out='YW'):
"""
Return Brazilian corresponding epidemiological week from x.
:param x: Input date. Can be a string in the format %Y-%m-%d or
datetime
:param sep: Year and week separator.
:param out: Output format. 'YW' returns sep.join(epiyear,epiweek).
'Y' returns epiyear only. 'W' returns epiweek only.
:return: str
"""
x = str(x)
def out_format(year, week, out, sep='W'):
if out == 'YW':
return '%s%s%02d' % (year, sep, week)
if out == 'Y':
return '%s' % (year)
if out == 'W':
return '%02d' % week
if type(x) != datetime:
if str(x) == '' or x is None or (type(x) != str and np.isnan(x)):
return None
x = datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
epiyear = x.year
epiend = lastepiday(epiyear)
if x > epiend:
epiyear += 1
return out_format(epiyear, 1, out, sep)
epistart = firstepiday(epiyear)
# If current date is before its year first epiweek,
# then our base year is the previous one
if x < epistart:
epiyear -= 1
epistart = firstepiday(epiyear)
epiweek = int(((x - epistart) / 7).days) + 1
return out_format(epiyear, epiweek, out, sep)
def episem2date(epi_year_week: str, weekday: int = 0):
r"""
Function to obtain first day of corresponding Brazilian epidemiological
week provided
Function \code{episem2date} uses the Brazilian definition of
epidemiological
week and returns the date of the corresponding o the provided epi. week,
using week day requested. Uses Sunday as default, the first week day by
Brazilian epi.
week definition.
@see https://github.com/marfcg/leos.opportunity.estimator/
blob/master/R/episem2date.R
@name episem2date
@param epiyearweek Epidemiological week in the format "%Y\[*\]%W"
where Y and W defined by the Brazilian epidemiological week system.
The separator between Y and W is irrelevant. Ex.: 2014W02
@param weekday Week day to be used as representative of the epi. week.
Uses Date week day classification. 0: Sunday, 6:Saturday. Default: 0
@return Date corresponding to the Sunday of epiyearweek
@export
@examples
epiyearweek <- '2014W02'
episem2date(epiyearweek)
Test:
dt = datetime.now()
yw1 = int(episem(dt, sep=''))
dt1 = episem2date(yw1)
yw2 = int(episem(dt1, sep=''))
assert yw1 == yw2
:param epi_year_week:
:param weekday:
:return:
"""
# force str format
epi_year_week = str(epi_year_week)
# Separate year and week:
if len(epi_year_week) not in [6, 7]:
raise Exception('Epi Year Week not valid.')
epiyear = int(epi_year_week[:4])
epiweek = int(epi_year_week[-2:])
# Obtain sunday of first epiweek of epiyear
# day.one
date_1 = datetime.strptime('%s-01-01' % epiyear, '%Y-%m-%d')
# day.one.week
date_1_w = int(date_1.strftime('%w'))
# Check wether week day of Jan 1st was before or after a Wednesday
# and set the start of the epiyear accordingly
epiweek_day_1 = (
date_1 - timedelta(days=date_1_w)
if date_1_w <= 3
else date_1 + timedelta(days=7 - date_1_w)
)
return epiweek_day_1 + timedelta(days=7 * (epiweek - 1) + weekday)
|
/satellite-copernicus-0.1.1.tar.gz/satellite-copernicus-0.1.1/weather/utils/episem.py
| 0.680772 | 0.552841 |
episem.py
|
pypi
|
from weather.utils.globals import LATITUDES, LONGITUDES
def from_latlon(lat, lon) -> tuple:
"""
Firstly, the closest coordinate to the city is found. It is then calculated the
relative position in the quadrant, to define the other three coordinates which
will later become the average values to a specific city. Take the example the
center of a city that is represented by the dot:
N
┌──────┬──────┐
│ │ │
│ 2 1 │
│ │ │
W │ ─── ─┼─ ─── │ E
│ │ │
│ 3 4 │
│ . │ │
└──────┴──────┘
▲ S
│
closest coord
Other three coordinates are taken to a measure as close as possible in case the
closest coordinate is far off the center of the city.
Let's take, for instance, Rio de Janeiro. Rio's center coordinates are:
latitude: -22.876652
longitude: -43.227875
The closest data point collected would be the coordinate: (-23.0, -43.25). In some
cases, the closest point is still far from the city itself, or could be in the sea,
for example. Because of this, other three coordinates will be inserted and then
the average value is returned from a certain date, defined in
`extract_reanalysis.download()` method. The coordinates returned from Rio would be:
[-23.0, -43.25] = S, W
[-22.75, -43.0] = N, E
"""
closest_lat = min(LATITUDES, key=lambda x: abs(x - lat))
closest_lon = min(LONGITUDES, key=lambda x: abs(x - lon))
first_quadr = lat - closest_lat < 0 and lon - closest_lon < 0
second_quadr = lat - closest_lat > 0 and lon - closest_lon < 0
third_quadr = lat - closest_lat > 0 and lon - closest_lon > 0
fourth_quadr = lat - closest_lat < 0 and lon - closest_lon > 0
if first_quadr:
north, east = closest_lat, closest_lon
i_south = [i - 1 for i, x in enumerate(LATITUDES) if x == north].pop()
i_west = [i - 1 for i, y in enumerate(LONGITUDES) if y == east].pop()
south, west = LATITUDES[i_south], LONGITUDES[i_west]
if second_quadr:
north, west = closest_lat, closest_lon
i_south = [i - 1 for i, x in enumerate(LATITUDES) if x == north].pop()
i_east = [i + 1 for i, y in enumerate(LONGITUDES) if y == west].pop()
south, east = LATITUDES[i_south], LONGITUDES[i_east]
if third_quadr:
south, west = closest_lat, closest_lon
i_north = [i + 1 for i, x in enumerate(LATITUDES) if x == south].pop()
i_east = [i + 1 for i, y in enumerate(LONGITUDES) if y == west].pop()
north, east = LATITUDES[i_north], LONGITUDES[i_east]
if fourth_quadr:
south, east = closest_lat, closest_lon
i_north = [i - 1 for i, x in enumerate(LATITUDES) if x == south].pop()
i_west = [i - 1 for i, y in enumerate(LONGITUDES) if y == east].pop()
north, west = LATITUDES[i_north], LONGITUDES[i_west]
return north, south, east, west
|
/satellite-copernicus-0.1.1.tar.gz/satellite-copernicus-0.1.1/weather/utils/extract_coordinates.py
| 0.833596 | 0.888904 |
extract_coordinates.py
|
pypi
|
import logging
import uuid
from typing import Optional
import cdsapi
from weather.utils.globals import CDSAPIRC_PATH
credentials = 'url: https://cds.climate.copernicus.eu/api/v2\n' 'key: '
def _interactive_con(answer):
"""
Asks for UID and Key via input.
Attrs:
answer (str): If != no, return uid and key for later verification.
Returns:
uid (input) : UID before verification.
key (input) : API Key before verification.
"""
no = ['N', 'n', 'No', 'no', 'NO']
if answer not in no:
uid = str(input('Insert UID: '))
key = str(input('Insert API Key: '))
return uid, key
else:
logging.info('Usage: `cds_weather.connect(uid, key)`')
def _check_credentials(uid, key):
"""
Simple evaluation for prevent credentials misspelling.
Attrs:
uid (str): UID found in Copernicus User page.
key (str): API Key found in Copernicus User page.
Returns:
uid (str): UID ready to be stored at $HOME/.cdsapirc.
key (str): API ready to be stored at $HOME/.cdsapirc.
"""
valid_uid = eval('len(uid) == 6')
valid_key = eval('uuid.UUID(key).version == 4')
if not valid_uid:
return logging.error('Invalid UID.')
if not valid_key:
return logging.error('Invalid API Key.')
return uid, key
def connect(
uid: Optional[str] = None,
key: Optional[str] = None,
):
"""
`connect()` will be responsible for inserting the credentials in
the $HOME directory. If the file exists, it will simply call
`cdsapi.Client().status()` and return a instance of the Client.
If the credentials are passed with the `connect(api,key)` method or
via `_interactive_con()`, the values are evaluated and stored at
`$HOME/.cdsapirc` file, returning the Client instance as well.
Attrs:
uid (opt(str)) : UID found in Copernicus User page.
key (opt(str)) : API Key found in Copernicus User page.
Returns:
cdsapi.Client(): Instance of the Copernicus API Client, used
for requesting data from the API.
"""
if not uid and not key:
try:
status = cdsapi.Client().status()
logging.info('Credentials file configured.')
logging.info(status['info'])
logging.warning(status['warning'])
except Exception as e:
logging.error(e)
answer = input('Enter interactive mode? (y/n): ')
uid_answer, key_answer = _interactive_con(answer)
uid, key = _check_credentials(uid_answer, key_answer)
with open(CDSAPIRC_PATH, 'w') as f:
f.write(credentials + f'{uid}:{key}')
logging.info(f'Credentials stored at {CDSAPIRC_PATH}')
logging.info(cdsapi.Client().status()['info'])
finally:
return cdsapi.Client()
try:
uid, key = _check_credentials(uid, key)
with open(CDSAPIRC_PATH, 'w') as f:
f.write(credentials + f'{uid}:{key}')
logging.info(f'Credentials stored at {CDSAPIRC_PATH}')
return cdsapi.Client()
except Exception as e:
logging.error(e)
|
/satellite-copernicus-0.1.1.tar.gz/satellite-copernicus-0.1.1/downloader/utils/connection.py
| 0.826327 | 0.16132 |
connection.py
|
pypi
|
from .czml import (CZML, Billboard, CZMLPacket, Description, Label,
Path, Position, Point)
from sgp4.earth_gravity import wgs72
from sgp4.io import twoline2rv
from datetime import datetime, timedelta
import pytz
import random
import math
class satellite():
'''
Creates an instance of a satellite to be included in the CZML document
based on a TLE
'''
id = ''
name = ''
description = ''
color = ''
image = ("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNS" +
"R0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAADJSURBVDhPnZ" +
"HRDcMgEEMZjVEYpaNklIzSEfLfD4qNnXAJSFWfhO7w2Zc0Tf9QG2rXrEzSUeZLOGm47WoH95x3" +
"Hl3jEgilvDgsOQUTqsNl68ezEwn1vae6lceSEEYvvWNT/Rxc4CXQNGadho1NXoJ+9iaqc2xi2x" +
"bt23PJCDIB6TQjOC6Bho/sDy3fBQT8PrVhibU7yBFcEPaRxOoeTwbwByCOYf9VGp1BYI1BA+Ee" +
"HhmfzKbBoJEQwn1yzUZtyspIQUha85MpkNIXB7GizqDEECsAAAAASUVORK5CYII=")
marker_scale = 1.5
show_label = True
show_path = True
start_time = datetime.utcnow().replace(tzinfo=pytz.UTC)
end_time = start_time + timedelta(hours=24)
tle = []
tle_obj = None
czmlMarker = None
czmlLabel = None
czmlPath = None
czmlPosition = None
def __init__(self, tle, name=None, description=None, color=None, image=None,
marker_scale=None, use_default_image=True, start_time=None, end_time=None,
show_label=True, show_path=True):
# Validate the inputs
self.id = int(tle[1][2:7])
if name is None:
self.tle = self.__check_tle_for_names(tle)
self.name = tle[0]
else:
self.tle = self.__check_tle(tle)
self.name = name
if len(tle) == 3:
self.tle = tle[1:]
else:
self.tle = tle
if description is not None:
self.description = description
else:
self.description = 'Orbit of Satellite: ' + self.name
self.color = self.__check_color(color)
self.show_label = show_label
self.show_path = show_path
if image is not None:
self.image = image
elif not use_default_image:
self.image = None
self.marker_scale = 10
self.marker_scale = marker_scale or self.marker_scale
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
self.tle_obj = twoline2rv(self.tle[0], self.tle[1], wgs72)
def __check_tle_for_names(self, tle):
'''
Checks if TLE has a name by seeing if 3 records exist
(name, TLE1, TLE2) when name_list is None
'''
if len(tle) != 3:
raise Exception(f"Satellite TLE only has {len(tle)} elements. Expected 3 " +
f"or pass in a name.\nTLE:\n{tle}")
return tle[1:]
def __check_tle(self, tle):
'''
Checks if TLE has either 2 or 3 records
[name (optional), TLE1, TLE2]
'''
if len(tle) not in [2,3]:
raise Exception(f"Satellite TLE only has {len(tle)} elements. Expected 2 or 3." +
f"(first line containing name is optional\nTLE:\n{tle}")
return tle
def __check_color(self, color):
'''
Checks if color is valid or generates a random one
'''
if color is not None and len(color) not in [3,4]:
raise Exception(f"Color for {self.name} only has {len(color)} elements. Expected 3 or 4." +
"(last one, alpha, being optional)")
elif color is not None:
for x in color:
if x is None or x < 0 or x > 255:
raise Exception(f"Color value {x} is not supported. Expected value between 0 and 255.")
else:
color = [random.randrange(256) for x in range(3)]
if len(color) == 3:
# Default missing alpha to 255
color.append(255)
return color
def build_marker(self,
image=None,
show_marker=True,
size=None,
color=None,
outlineColor=[255, 255, 255, 128],
outlineWidth=2,
rebuild=False):
'''
Creates the satellite marker (i.e. billboard)
'''
if self.czmlMarker is None or rebuild:
image = image or self.image
size = size or self.marker_scale
color = {"rgba": color or self.color}
outlineColor = {"rgba": outlineColor}
if image is not None:
self.czmlMarker = Billboard(scale=size,
show=show_marker)
self.czmlMarker.image = image
else:
self.czmlMarker = Point(show=True,
color=color,
pixelSize=size,
outlineColor=outlineColor,
outlineWidth=outlineWidth)
return self.czmlMarker
def build_label(self,
show=None,
color=None,
font='11pt Lucida Console',
hOrigin='LEFT',
vOrigin='CENTER',
outlineColor=[0, 0, 0, 255],
outlineWidth=2,
pixelOffset={"cartesian2": [12, 0]},
style='FILL_AND_OUTLINE',
rebuild=False):
'''
Creates the satellite label
'''
if self.czmlLabel is None or rebuild:
self.czmlLabel = Label(text=self.name, show=show or self.show_label)
self.czmlLabel.fillColor = {"rgba": color or self.color}
self.czmlLabel.font = font
self.czmlLabel.horizontalOrigin = hOrigin
self.czmlLabel.verticalOrigin = vOrigin
self.czmlLabel.outlineColor = {"rgba": outlineColor}
self.czmlLabel.outlineWidth = outlineWidth
self.czmlLabel.pixelOffset = pixelOffset
self.czmlLabel.style = style
return self.czmlLabel
def build_path(self,
show=None,
color=None,
interval=None,
width=1,
resolution=120,
lead_times=None,
trail_times=None,
start_time=None,
end_time=None,
rebuild=False):
'''
Creates the satellite path
'''
if self.czmlPath is None or rebuild:
if interval is None:
interval = self.start_time.isoformat() + "/" + self.end_time.isoformat()
self.czmlPath = Path()
self.czmlPath.show=[{"interval": interval, "boolean": show or self.show_path}]
self.czmlPath.width = width
self.czmlPath.material = {"solidColor": {"color": {"rgba": color or self.color}}}
self.czmlPath.resolution = resolution
if lead_times is None and trail_times is None:
lead_times, trail_times = self.build_lead_trail_times(start_time, end_time)
self.czmlPath.leadTime = lead_times
self.czmlPath.trailTime = trail_times
return self.czmlPath
def build_position(self,
start_time=None,
end_time=None,
interpolationAlgorithm = "LAGRANGE",
interpolationDegree = 5,
referenceFrame = "INERTIAL",
tle_object=None,
step=300,
rebuild=False):
'''
Creates the satellite positions and settings
'''
start_time = start_time or self.start_time
end_time = end_time or self.end_time
tle_object = tle_object or self.tle_obj
if self.czmlPosition is None:
self.czmlPosition = Position()
self.czmlPosition.interpolationAlgorithm = interpolationAlgorithm
self.czmlPosition.interpolationDegree = interpolationDegree
self.czmlPosition.referenceFrame = referenceFrame
self.czmlPosition.epoch = start_time.isoformat()
number_of_positions = int((end_time - start_time).total_seconds()/300)
number_of_positions += 5 # so there is more than 1
time_step=0
positions=[]
for _ in range(number_of_positions):
current_time = start_time + timedelta(seconds=time_step)
eci_position, _ = tle_object.propagate(current_time.year, current_time.month,
current_time.day, current_time.hour,
current_time.minute, current_time.second)
positions.append(time_step)
positions.append(eci_position[0] * 1000) # converts km's to m's
positions.append(eci_position[1] * 1000)
positions.append(eci_position[2] * 1000)
time_step += step
self.czmlPosition.cartesian = positions
return self.czmlPosition
def get_orbital_time(self):
'''
Extracts the number of orbits per day from the tle and calcualtes the
time per orbit in minutes
'''
return (24.0/float(self.tle[1][52:63]))*60.0
def build_lead_trail_times(self, start_time=None, end_time=None):
'''
Builds the lead and trail time for the orbit path
'''
start_time = start_time or self.start_time
end_time = end_time or self.end_time
minutes_in_sim = int((end_time - start_time).total_seconds()/60)
left_over_minutes = minutes_in_sim % self.get_orbital_time()
number_of_full_orbits = math.floor(minutes_in_sim / self.get_orbital_time())
sp_start = start_time
sp_end = sp_start + timedelta(minutes=left_over_minutes)
sp_interval = (sp_start.isoformat() + '/' + sp_end.isoformat())
orbital_time_in_seconds = (self.get_orbital_time() * 60.0)
lead_times=[]
trail_times=[]
for _ in range(number_of_full_orbits + 1):
lead_times.append({
"interval": sp_interval,
"epoch": sp_start.isoformat(),
"number": [
0, orbital_time_in_seconds,
orbital_time_in_seconds, 0
]
})
trail_times.append({
"interval": sp_interval,
"epoch": sp_start.isoformat(),
"number": [
0, 0,
orbital_time_in_seconds, orbital_time_in_seconds
]
})
sp_start = sp_end
sp_end = (sp_start + timedelta(minutes=self.get_orbital_time()))
sp_interval = (sp_start.isoformat() + '/' + sp_end.isoformat())
return lead_times, trail_times
def build_lead_time(self, start_time=None, end_time=None):
'''
Builds the lead time for the orbit path
'''
start_time = start_time or self.start_time
end_time = end_time or self.end_time
minutes_in_sim = int((end_time - start_time).total_seconds()/60)
left_over_minutes = minutes_in_sim % self.get_orbital_time()
number_of_full_orbits = math.floor(minutes_in_sim / self.get_orbital_time())
sp_start = start_time
sp_end = sp_start + timedelta(minutes=left_over_minutes)
sp_interval = (sp_start.isoformat() + '/' + sp_end.isoformat())
orbital_time_in_seconds = (self.get_orbital_time() * 60.0)
lead_times=[]
for _ in range(number_of_full_orbits + 1):
lead_times.append({
"interval": sp_interval,
"epoch": sp_start.isoformat(),
"number": [
0, orbital_time_in_seconds,
orbital_time_in_seconds, 0
]
})
sp_start = sp_end
sp_end = (sp_start + timedelta(minutes=self.get_orbital_time()))
sp_interval = (sp_start.isoformat() + '/' + sp_end.isoformat())
return lead_times
def build_trail_time(self, start_time=None, end_time=None):
'''
Builds the trail time for the orbit path
'''
start_time = start_time or self.start_time
end_time = end_time or self.end_time
minutes_in_sim = int((end_time - start_time).total_seconds()/60)
left_over_minutes = minutes_in_sim % self.get_orbital_time()
number_of_full_orbits = math.floor(minutes_in_sim / self.get_orbital_time())
sp_start = start_time
sp_end = sp_start + timedelta(minutes=left_over_minutes)
sp_interval = (sp_start.isoformat() + '/' + sp_end.isoformat())
orbital_time_in_seconds = (self.get_orbital_time() * 60.0)
trail_times=[]
for _ in range(number_of_full_orbits + 1):
trail_times.append({
"interval": sp_interval,
"epoch": sp_start.isoformat(),
"number": [
0, 0,
orbital_time_in_seconds, orbital_time_in_seconds
]
})
sp_start = sp_end
sp_end = (sp_start + timedelta(minutes=self.get_orbital_time()))
sp_interval = (sp_start.isoformat() + '/' + sp_end.isoformat())
return trail_times
class satellite_czml():
'''
Generates the CZML document used by Cesium for plotting Satellites
using TLE entries
'''
start_time = datetime.utcnow().replace(tzinfo=pytz.UTC)
end_time = start_time + timedelta(hours=24)
speed_multiplier = 60
default_seed = 0
ignore_bad_tles=False
satellites = {}
def __init__(self, tle_list=None, satellite_list=None, start_time=None, end_time=None,
name_list=None, description_list=None, color_list=None, image_list=None,
use_default_image=True, marker_scale_list=None, speed_multiplier=None,
show_label=True, show_path=True, use_utc=True, seed=None,
ignore_bad_tles=False):
'''
Initialize satellite_czml object
'''
# Set the seed now before we generate colors
self.set_seed(seed)
# Set speed multiplier
self.set_speed_multiplier(speed_multiplier)
# Validate the inputs and default to list of None's if None
if satellite_list==None and tle_list==None:
raise TypeError("Missing a required argument: 'tle_list' or 'satellite_list'")
elif satellite_list is not None:
self.start_time = satellite_list[0].start_time
self.end_time = satellite_list[0].end_time
for sat in satellite_list:
self.add_satellite(sat)
else:
ex_len = len(tle_list)
name_list = self.__check_list(ex_len, name_list, 'name_list')
description_list = self.__check_list(ex_len, description_list, 'description_list')
color_list = self.__check_list(ex_len, color_list, 'color_list')
image_list = self.__check_list(ex_len, image_list, 'image_list')
marker_scale_list = self.__check_list(ex_len, marker_scale_list, 'marker_scale_list')
if start_time != None or end_time != None:
self.set_start_end_time(start_time or self.start_time,
end_time or self.end_time)
# Determine if we ignore bad TLEs
self.ignore_bad_tles = ignore_bad_tles
# Create Satellite for each TLE in list
for i,tle in enumerate(tle_list):
try:
sat = satellite(tle=tle,
name=name_list[i],
description=description_list[i],
color=color_list[i],
image=image_list[i],
marker_scale=marker_scale_list[i],
use_default_image=use_default_image,
start_time=self.start_time,
end_time=self.end_time,
show_label=show_label,
show_path=show_path)
self.add_satellite(sat)
except Exception as e:
if not self.ignore_bad_tles:
raise Exception(f'Failed to create the satellite object: {name_list[i]}\nError:\n{e}')
def __check_list(self, tle_len, lst, lst_name=None):
'''
Checks that the list contains the same number of elements
as the number of TLEs. If None, default to a list of Nones.
'''
if lst != None and len(lst) != tle_len:
if lst_name != None:
lst_name = 'list'
raise Exception(f"Number of elements in {lst_name} is {len(lst)} " +
f"and doesn't match number of TLEs: {tle_len}")
return False
return lst or [None for x in range(tle_len)]
def add_satellite(self, sat):
'''
Adds (or updates) instance of Satellite
'''
self.satellites[sat.id] = sat
return True
def get_satellite(self, id):
'''
Returns instance of Satellite
'''
return self.satellites[id]
def remove_satellite(self, id):
'''
Removes instance of Satellite
'''
del self.satellites[id]
return True
def set_start_end_time(self, start_time, end_time, set_utc=True):
'''
Sets the start and end time
'''
if set_utc == True:
start_time = start_time.replace(tzinfo=pytz.UTC)
end_time = end_time.replace(tzinfo=pytz.UTC)
self.start_time = start_time
self.end_time = end_time
return True
def set_speed_multiplier(self, speed):
'''
Sets the speed multiplier (how fast the satellites move)
'''
self.speed_multiplier = speed or self.speed_multiplier
return True
def set_seed(self, seed):
'''
Set the random seed. Only effects satellites not yet added.
'''
random.seed(seed or self.default_seed)
return True
def get_czml(self):
'''
Returns a CZML string
'''
# Initialize the CZML document
interval = self.start_time.isoformat() + "/" + self.end_time.isoformat()
doc = CZML()
packet = CZMLPacket(id='document', version='1.0')
packet.clock = {"interval": interval,
"currentTime": self.start_time.isoformat(),
"multiplier": self.speed_multiplier,
"range": "LOOP_STOP",
"step": "SYSTEM_CLOCK_MULTIPLIER"}
doc.packets.append(packet)
# Add each satellite
for id, sat in self.satellites.items():
# Initialize satellite CZML data
try:
sat_packet = CZMLPacket(id=id)
sat_packet.availability = interval
sat_packet.description = Description(sat.description)
if sat.image is None:
sat_packet.point = sat.build_marker()
else:
sat_packet.billboard = sat.build_marker()
sat_packet.label = sat.build_label()
sat_packet.path = sat.build_path()
sat_packet.position = sat.build_position()
doc.packets.append(sat_packet)
except Exception as e:
if not self.ignore_bad_tles:
raise Exception(f'Failed to generate CZML for satellite ID {id}: {sat.name}\nError:\n{e}')
return str(doc)
|
/satellite_czml-0.1.2-py3-none-any.whl/satellite_czml/satellite_czml.py
| 0.64579 | 0.243845 |
satellite_czml.py
|
pypi
|
from satellite_images_fusion.algorithms import gram_fusion_cpu, gram_fusion_gpu, high_frecuency_modulation_cpu, high_frecuency_modulation_gpu, hpf_fusion_cpu,\
hpf_fusion_gpu, mean_value_fusion_cpu, mean_value_fusion_gpu
from satellite_images_fusion.utils import utils
from satellite_images_fusion.metrics import metrics as mt
METHODS_FUSION = {
'high_pass_filter': {'cpu': hpf_fusion_cpu.fusion_hpf_cpu, 'gpu': hpf_fusion_gpu.fusion_hpf_gpu},
'mean_value': {'cpu': mean_value_fusion_cpu.fusion_mean_value_cpu, 'gpu': mean_value_fusion_gpu.fusion_mean_value_gpu},
'high_frecuency_modulation': {'cpu': high_frecuency_modulation_cpu.fusion_high_pass_cpu, 'gpu': high_frecuency_modulation_gpu.fusion_high_pass_gpu},
'gram_schmidt': {'cpu': gram_fusion_cpu.fusion_gram_cpu, 'gpu': gram_fusion_gpu.fusion_gram_gpu}
}
METRICS_METHODS = {'mse': mt.mse,
'rmse': mt.rmse,
'bias': mt.bias,
'correlation': mt.correlation_coeff}
def generate_fusion_images(multispectral_path, pancromatic_path, method_fusion, fusioned_image_path, device_fusion="cpu", geographical_info=True):
fusion_algorithm = METHODS_FUSION[method_fusion][device_fusion]
multi_image, multi_info = utils.read_image(multispectral_path)
pan_image, pan_info = utils.read_image(pancromatic_path)
image_fusioned = fusion_algorithm(multi_image, pan_image)
if geographical_info:
utils.save_image_with_info(fusioned_image_path, image_fusioned, multi_info)
else:
utils.save_image_without_info(fusioned_image_path, image_fusioned)
def generate_quality_metrics(fusioned_image_path, original_image_path, metrics=['mse', 'rmse', 'bias', 'correlation']):
results = {}
fusioned_image, _ = utils.read_image(fusioned_image_path)
original_image, _ = utils.read_image(original_image_path)
for metric in metrics:
if metric in METRICS_METHODS.keys():
results[metric] = METRICS_METHODS[metric](fusioned_image, original_image)
else:
print(f"Metric {metric} is not defined")
return results
|
/satellite_images_fusion-1.1.2.tar.gz/satellite_images_fusion-1.1.2/satellite_images_fusion/invoker.py
| 0.553988 | 0.318843 |
invoker.py
|
pypi
|
import pycuda.autoinit
import numpy as np
import skimage.io
from scipy import ndimage
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import skcuda.misc as misc
import skimage
from pycuda.elementwise import ElementwiseKernel
from cupyx.scipy.ndimage import filters
import cupy as cp
results_operations = []
def fusion_gram_gpu(multispectral_image, panchromatic_image):
panchromatic_float = panchromatic_image.astype(np.float32)
panchromatic_copy = panchromatic_float
num_bands = int(multispectral_image.shape[2])
band_iterator = 0
bands_list = []
while band_iterator < num_bands:
band = multispectral_image[:, :, band_iterator]
float_band = band.astype(np.float32)
panchromatic_copy = panchromatic_copy + float_band
float_band = float_band.astype(np.uint8)
bands_list.append(float_band)
band_iterator = band_iterator + 1
total_bands = num_bands + 1
fusion_mean = panchromatic_copy / total_bands
bands_list.insert(0, fusion_mean)
fusioned_image = np.stack(bands_list, axis=2)
mat_escalar = get_scalar(fusioned_image)
bands_matrix = create_bands(mat_escalar, panchromatic_float)
fusion_bands = merge_bands(bands_matrix)
final_image = merge_image(fusion_bands)
return final_image
def get_scalar(fusioned_image):
global results_operations
fusioned_float = fusioned_image.astype(np.float32)
matrix_tmp_gpu = gpuarray.to_gpu(fusioned_float)
num_bands = int(fusioned_image.shape[2])
matrix_tmp = np.empty_like(fusioned_image)
for band_iterator in range(num_bands):
matrix_tmp[:, :, band_iterator] = fusioned_image[:, :, band_iterator]
for m in range(band_iterator):
fusion_cupy = cp.array(fusioned_image[:, :, band_iterator])
matrix_tmp_cupy = cp.array(matrix_tmp[:, :, m])
num_cp = cp.vdot(fusion_cupy, matrix_tmp_cupy)
den_cp = cp.vdot(matrix_tmp_cupy, matrix_tmp_cupy)
num = num_cp.get()
den = den_cp.get()
result = num / den
results_operations.append(result)
matrix_tmp_gpu[:, :, band_iterator] = gpuarray.to_gpu(
matrix_tmp[:, :, band_iterator].astype(np.float32)) - result * gpuarray.to_gpu(
matrix_tmp[:, :, m].astype(np.float32))
matrix_tmp = matrix_tmp_gpu.get()
return matrix_tmp
def create_bands(tmp_matrix, pan_float):
num_bands = int(tmp_matrix.shape[2])
image_list = []
band_iterator = 1
while band_iterator < num_bands:
image = tmp_matrix[:, :, band_iterator]
float_image = image.astype(np.float32)
image_list.append(float_image)
band_iterator = band_iterator + 1
image_list.insert(0, pan_float)
matrix_temp = np.stack(image_list, axis=2)
return matrix_temp
def merge_bands(matrix_tmp):
global results_operations
temporal_bands_gpu = gpuarray.to_gpu(matrix_tmp)
tmp_bands = np.empty_like(matrix_tmp)
num_bands = int(matrix_tmp.shape[2])
band_iterator = 0
for n in range(num_bands):
tmp_bands[:, :, n] = matrix_tmp[:, :, n]
for m in range(n):
temporal_bands_gpu[:, :, n] = gpuarray.to_gpu(tmp_bands[:, :, n].astype(np.float32)) + results_operations[
band_iterator] * gpuarray.to_gpu(matrix_tmp[:, :, m].astype(np.float32))
tmp_bands = temporal_bands_gpu.get()
band_iterator = band_iterator + 1
return tmp_bands
def merge_image(bandas_temp):
final_list = []
num_bands = int(bandas_temp.shape[2])
for band_iterator in range(1, num_bands):
final_image = bandas_temp[:, :, band_iterator]
float_image = final_image.astype(np.float32)
greater_fitted_values = fit_greater_values(float_image)
float_image = greater_fitted_values.astype(np.float32)
negative_fitted_values = fit_negative_values(float_image)
float_image = negative_fitted_values.astype(np.float32)
final_image = float_image.astype(np.uint8)
final_list.append(final_image)
gram_fusion = np.stack(final_list, axis=2)
return gram_fusion
def fit_greater_values(matrix):
matrix = matrix.astype(np.float32)
matrix_gpu = gpuarray.to_gpu(matrix)
matrix_gpu_new = gpuarray.empty_like(matrix_gpu)
fit_positive = ElementwiseKernel(
"float *x, float *z",
"if(x[i] > 255){z[i] = 255.0;}else{z[i] = x[i];}",
"adjust_value")
fit_positive(matrix_gpu, matrix_gpu_new)
return matrix_gpu_new.get()
def fit_negative_values(matrix):
matrix_gpu = gpuarray.to_gpu(matrix)
new_matrix_gpu = gpuarray.empty_like(matrix_gpu)
fit_negative = ElementwiseKernel(
"float *x, float *z",
"if(x[i] < 0){z[i] = 0.0;}else{z[i] = x[i];}",
"adjust_value")
fit_negative(matrix_gpu, new_matrix_gpu)
return new_matrix_gpu.get()
|
/satellite_images_fusion-1.1.2.tar.gz/satellite_images_fusion-1.1.2/satellite_images_fusion/algorithms/gram_fusion_gpu.py
| 0.409457 | 0.514095 |
gram_fusion_gpu.py
|
pypi
|
import numpy as np
from scipy import ndimage
def fusion_hpf_cpu(multispectral_image, panchromatic_image):
float_list = []
band_iterator = 0
panchromatic_float = panchromatic_image.astype(np.float32)
num_bands = int(multispectral_image.shape[2])
while band_iterator < num_bands:
band = multispectral_image[:, :, band_iterator]
band_float = band.astype(np.float32)
float_list.append(band_float)
band_iterator = band_iterator + 1
image = create_filter(panchromatic_float)
variance = get_variance(multispectral_image, num_bands)
final_image = merge_image(panchromatic_image, variance, float_list, num_bands, image)
return final_image
def create_filter(float_panchromatic):
filter = np.array([[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, 80, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1]]) * (1 / 106);
image = ndimage.correlate(float_panchromatic, filter, mode='constant').astype(np.int8)
image[image < 0] = 0
return image
def get_variance(multispectral_image, num_bands):
zeros_matrix = np.zeros((num_bands, 2))
for n in range(num_bands):
zeros_matrix[n][0] = n
variance = np.std(multispectral_image[:, :, n])
zeros_matrix[n][1] = variance
return zeros_matrix
def merge_image(panchromatic_image, zeros_matrix, float_list, num_bands, image_data):
std_panchromatic = np.std(panchromatic_image)
sum_variance = 0
for i in zeros_matrix:
sum_variance += i[1]
total_var = sum_variance / (std_panchromatic * 0.65)
bands_list = []
for j in range(num_bands):
base = float_list[j] + image_data * total_var
base[base > 255] = 255
base[base < 0] = 0
base = base.astype(np.uint8)
bands_list.append(base)
fusioned_image = np.stack(bands_list, axis=2)
return fusioned_image
|
/satellite_images_fusion-1.1.2.tar.gz/satellite_images_fusion-1.1.2/satellite_images_fusion/algorithms/hpf_fusion_cpu.py
| 0.549157 | 0.493103 |
hpf_fusion_cpu.py
|
pypi
|
import numpy as np
results_operations = []
def fusion_gram_cpu(multispectral_image, panchromatic_image):
panchromatic_float = panchromatic_image.astype(np.float32)
panchromatic_copy = panchromatic_float
num_bands = int(multispectral_image.shape[2])
band = 0
bands_list = []
while band < num_bands:
local_band = multispectral_image[:, :, band]
float_band = local_band.astype(np.float32)
panchromatic_copy = panchromatic_copy + float_band
float_band = float_band.astype(np.uint8)
bands_list.append(float_band)
band = band + 1
total_bands = num_bands + 1
fusion_mean = panchromatic_copy / total_bands
bands_list.insert(0, fusion_mean)
fusioned_image = np.stack(bands_list, axis=2)
mat_scalar = get_scalar(fusioned_image)
bands_matrix = create_bands(mat_scalar, panchromatic_float)
fusion_bands = merge_bands(bands_matrix)
final_image = merge_image(fusion_bands)
return final_image
def get_scalar(fusioned_image):
global results_operations
num_bands = int(fusioned_image.shape[2])
matriz_temp = np.empty_like(fusioned_image)
for band in range(num_bands):
matriz_temp[:, :, band] = fusioned_image[:, :, band]
for m in range(band):
num = np.vdot(fusioned_image[:, :, band], matriz_temp[:, :, m])
den = np.vdot(matriz_temp[:, :, m], matriz_temp[:, :, m])
result_tmp = num / den
results_operations.append(result_tmp)
matriz_temp[:, :, band] = matriz_temp[:, :, band] - result_tmp * matriz_temp[:, :, m]
return matriz_temp
def create_bands(tmp_matrix, pan_float):
num_bands = int(tmp_matrix.shape[2])
image_list = []
band_iterator = 1
while band_iterator < num_bands:
image = tmp_matrix[:, :, band_iterator]
float_image = image.astype(np.float32)
image_list.append(float_image)
band_iterator = band_iterator + 1
image_list.insert(0, pan_float)
tmp_matrix = np.stack(image_list, axis=2)
return tmp_matrix
def merge_bands(matrix_tmp):
global results_operations
temporal_bands = np.empty_like(matrix_tmp)
num_bands = int(matrix_tmp.shape[2])
band_iterator = 0
for band_value in range(num_bands):
temporal_bands[:, :, band_value] = matrix_tmp[:, :, band_value]
for m in range(band_value):
temporal_bands[:, :, band_value] = temporal_bands[:, :, band_value] + results_operations[band_iterator] * matrix_tmp[:, :, m]
band_iterator = band_iterator + 1
return temporal_bands
def merge_image(bandas_temp):
final_list = []
num_bands = int(bandas_temp.shape[2])
for band_iterator in range(1, num_bands):
final_image = bandas_temp[:, :, band_iterator]
final_image[final_image > 255] = 255
final_image[final_image < 0] = 0
final_image = final_image.astype(np.uint8)
final_list.append(final_image)
gram_fusion = np.stack(final_list, axis=2)
return gram_fusion
|
/satellite_images_fusion-1.1.2.tar.gz/satellite_images_fusion-1.1.2/satellite_images_fusion/algorithms/gram_fusion_cpu.py
| 0.445771 | 0.622631 |
gram_fusion_cpu.py
|
pypi
|
import pycuda.autoinit
import numpy as np
import skimage.io
from scipy import ndimage
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import skcuda.linalg as linalg
import skcuda.misc as misc
import skimage
from pycuda.elementwise import ElementwiseKernel
from cupyx.scipy.ndimage import filters
import cupy as cp
initial_filter = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]) * (1 / 9)
second_filter = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) * (1 / 9)
def fusion_gpu(band, initial_filter, second_filter):
initial_filter_gpu = gpuarray.to_gpu(initial_filter)
second_filter_gpu = gpuarray.to_gpu(second_filter)
band_gpu = gpuarray.to_gpu(band)
division = misc.divide(band_gpu, second_filter_gpu)
multiplication = linalg.multiply(division, initial_filter_gpu)
fusioned_gpu = band_gpu + multiplication
return fusioned_gpu.get()
def fit_negative_values(matrix):
matrix_gpu = gpuarray.to_gpu(matrix)
matrix_gpu_new = gpuarray.empty_like(matrix_gpu)
fit_negative = ElementwiseKernel(
"float *x, float *z",
"if(x[i] < 0){z[i] = 0.0;}else{z[i] = x[i];}",
"adjust_value")
fit_negative(matrix_gpu, matrix_gpu_new)
return matrix_gpu_new.get()
def fit_greater_values(matrix):
matrix_gpu = gpuarray.to_gpu(matrix)
matrix_gpu_new = gpuarray.empty_like(matrix_gpu)
fit_positive = ElementwiseKernel(
"float *x, float *z",
"if(x[i] > 255){z[i] = 255.0;}else{z[i] = x[i];}",
"adjust_value")
fit_positive(matrix_gpu, matrix_gpu_new)
return matrix_gpu_new.get()
def fusion_high_pass_gpu(multi, pan):
linalg.init()
union_list = []
num_bands = int(multi.shape[2])
panchromatic_float = pan.astype(np.float32)
initial_filter_cupy = cp.array(initial_filter)
second_filter_cupy = cp.array(second_filter)
panchromatic_cupy = cp.array(panchromatic_float)
image_initial_filter_cupy = filters.correlate(panchromatic_cupy, initial_filter_cupy, mode='constant')
image_initial_filter_cpu = image_initial_filter_cupy.get()
image_initial_filter_gpu = gpuarray.to_gpu(image_initial_filter_cpu)
image_second_filter_cupy = filters.correlate(panchromatic_cupy, second_filter_cupy, mode='constant')
image_second_filter_cpu = image_second_filter_cupy.get()
image_second_filter_gpu = gpuarray.to_gpu(image_second_filter_cpu)
fitted_negative_initial_filter = fit_negative_values(image_initial_filter_gpu.astype(np.float32))
fitted_negative_second_filter = fit_negative_values(image_second_filter_gpu.astype(np.float32))
band_iterator = 0
while band_iterator < num_bands:
band = multi[:, :, band_iterator]
float_band = band.astype(np.float32)
fusion_bands = fusion_gpu(float_band, fitted_negative_initial_filter, fitted_negative_second_filter)
fusion_bands = fit_greater_values(fusion_bands)
result_image = fusion_bands.astype(np.uint8)
union_list.append(result_image)
band_iterator = band_iterator + 1
fusioned_image = np.stack(union_list, axis=2)
return fusioned_image
|
/satellite_images_fusion-1.1.2.tar.gz/satellite_images_fusion-1.1.2/satellite_images_fusion/algorithms/high_frecuency_modulation_gpu.py
| 0.497559 | 0.462352 |
high_frecuency_modulation_gpu.py
|
pypi
|
import pycuda.autoinit
import numpy as np
import skimage.io
from scipy import ndimage
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import skcuda.misc as misc
import skimage
from pycuda.elementwise import ElementwiseKernel
from cupyx.scipy.ndimage import filters
import cupy as cp
def fusion_hpf_gpu(multispectral_image, panchromatic_image):
float_list = []
band_iterator = 0
panchromatic_float = panchromatic_image.astype(np.float32)
num_bands = int(multispectral_image.shape[2])
while band_iterator < num_bands:
band = multispectral_image[:, :, band_iterator]
float_band = band.astype(np.float32)
float_list.append(float_band)
band_iterator = band_iterator + 1
image = create_filter(panchromatic_float)
variance = get_variance(multispectral_image, num_bands)
final_image = merge_image(panchromatic_image, variance, float_list, num_bands, image)
return final_image
def create_filter(float_panchromatic):
filter = np.array([[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, 80, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1]]) * (1 / 106);
panchromatic_cupy = cp.array(float_panchromatic)
filter_cupy = cp.array(filter)
image_cupy = filters.correlate(panchromatic_cupy, filter_cupy, mode='constant')
image_cpu = image_cupy.get()
fitted_image = fit_negative_values(image_cpu)
return fitted_image
def fit_negative_values(matrix):
matrix_gpu = gpuarray.to_gpu(matrix)
new_matrix_gpu = gpuarray.empty_like(matrix_gpu)
fit_negative = ElementwiseKernel(
"float *x, float *z",
"if(x[i] < 0){z[i] = 0.0;}else{z[i] = x[i];}",
"adjust_value")
fit_negative(matrix_gpu, new_matrix_gpu)
return new_matrix_gpu.get()
def get_variance(multispectral_image, num_bands):
zeros_matrix = np.zeros((num_bands, 2))
for n in range(num_bands):
zeros_matrix[n][0] = n
matrix_gpu = gpuarray.to_gpu(multispectral_image[:, :, n].astype(np.float32))
variance_gpu = misc.std(matrix_gpu)
zeros_matrix[n][1] = variance_gpu.get()
return zeros_matrix
def merge_image(multispectral_image, zeros_matrix, float_list, num_bands, image_data):
panchromatic_gpu = gpuarray.to_gpu(multispectral_image.astype(np.float32))
std_panchromatic_gpu = misc.std(panchromatic_gpu)
sum_variance = 0
for i in zeros_matrix:
sum_variance += i[1]
total_var = sum_variance / (std_panchromatic_gpu * 0.65)
bands_list = []
image_gpu = gpuarray.to_gpu(image_data)
for j in range(num_bands):
list_gpu = gpuarray.to_gpu(float_list[j])
multi = image_gpu * total_var.get()
base_temp = list_gpu + multi
greater_fitted_values = fit_greater_values(base_temp)
float_image = greater_fitted_values.astype(np.float32)
negative_fitted_values = fit_negative_values(float_image)
float_image = negative_fitted_values.astype(np.float32)
base = float_image.astype(np.uint8)
bands_list.append(base)
fusioned_image = np.stack(bands_list, axis=2)
return fusioned_image
def fit_greater_values(matrix):
matrix_gpu = matrix.astype(np.float32)
matrix_gpu_new = gpuarray.empty_like(matrix_gpu)
fit_positive = ElementwiseKernel(
"float *x, float *z",
"if(x[i] > 255){z[i] = 255.0;}else{z[i] = x[i];}",
"adjust_value")
fit_positive(matrix_gpu, matrix_gpu_new)
return matrix_gpu_new.get()
|
/satellite_images_fusion-1.1.2.tar.gz/satellite_images_fusion-1.1.2/satellite_images_fusion/algorithms/hpf_fusion_gpu.py
| 0.443359 | 0.463323 |
hpf_fusion_gpu.py
|
pypi
|
import rasterio
from rasterio.merge import merge
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import geopandas as gpd
def merge_tif_files(tiff_lst, out_fp):
"""
Function for merging .tif files into one .tif file.
Good for first downloading a wider region for different shapes, cropping them for each shapes and then merging them again with this function.
@oaram tiff_lst: A python list of .tif files to be merged.
@param out_fp: The .tif file to write to.
"""
src_files_to_mosaic = []
for tiff in tiff_lst:
src = rasterio.open(tiff)
src_files_to_mosaic.append(src)
src.close()
# Merge
mosaic, out_trans = merge(src_files_to_mosaic)
# Copy the metadata
out_meta = src.meta.copy()
# Update the metadata
out_meta.update({"driver": "GTiff",
"height": mosaic.shape[1],
"width": mosaic.shape[2],
"transform": out_trans,
"crs": "+proj=utm +zone=35 +ellps=GRS80 +units=m +no_defs" })
# Write the mosaic raster to disk
with rasterio.open(out_fp, "w", **out_meta) as dest:
dest.write(mosaic)
dest.close()
def plot_tif_file(path_to_tif_file):
"""
Plot a .tif file.
@param path_to_tif_file: path to the tif file.
"""
src = rasterio.open(path_to_tif_file)
plot_out_image = np.clip(src.read()[2::-1],
0,2200)/2200 # out_image[2::-1] selects the first three items, reversed
plt.figure(figsize=(10,10))
rasterio.plot.show(plot_out_image,
transform=src.transform)
src.close()
def switch_crs(list_coor_x, list_coor_y, crs_from, crs_to):
"""
This function changes the crs of a given list of coordinates.
@param list_coor_x: A list of X coordinates.
@param list_coor_y: A list of Y coordinates.
@param crs_from: the current crs the coordinates are in.
@param crs_to: the crs with the coordinates have to be coverted to.
@return gdf: A geopandas dataframe with the coordinates in them.
"""
df = pd.DataFrame(
{
'Latitude_orginal': list_coor_x,
'Longitude_orginal': list_coor_y}
)
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df.Longitude_orginal, df.Latitude_orginal))
gdf = gdf.set_crs(crs_from, allow_override=True)
gdf = gdf.to_crs(epsg=crs_to)
return gdf
|
/satellite_images_nso-1.2.5-py3-none-any.whl/satellite_images_nso/miscellaneous/miscellaneous.py
| 0.710729 | 0.447219 |
miscellaneous.py
|
pypi
|
from satellite_images_nso._manipulation import nso_manipulator
import pandas as pd
from matplotlib import pyplot
import rasterio
import numpy as np
from numpy import median
import glob
import os
import io
import requests
import platform
import time
import logging
"""
Helper functions.
"""
def __get_season_for_month(month):
"""
This method get the season for a specific month for a number of a month.
@param month: A month in number
@return the season in string format, and the season in string format.
"""
season = int(month)%12 // 3 + 1
season_str = ""
if season == 1:
season_str = "Winter"
if season == 2:
season_str = "Spring"
if season == 3:
season_str = "Summer"
if season == 4 :
season_str = "Fall"
return season_str, season
def __creation_date(path_to_file):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
def __is_file_older_than_x_days(file, days=1):
"""
Comparison check to see if a file is older than a couple of days.
@param file: Path to a file to check if it's older than specific date.
@param days: The days which are older to check.
"""
file_time = __creation_date(file)
# Check against 24 hours
return ((time.time() - file_time) / 3600 > 24*days)
"""
End helper functions.
"""
def extract_multi_date_normalisation_coefficients(path_to_tif_files):
"""
This method generates coefficients for a folder of .tif files.
@param path_to_tif_files: Path to a folder where all the .tif files are located.
@return: A pandas dataframe with the median 75th quantiles.
"""
multidate_coefficents = pd.DataFrame([],columns=["Season","Blue_median_75", "Green_median_75", "Red_median_75", "Nir_median_75"])
for season_cur in ["Winter","Summer","Spring","Fall"]:
blue_count = []
green_count = []
red_count = []
nir_count = []
count = 0
for file in glob.glob(path_to_tif_files+"*"):
if ".csv" not in file:
season = __get_season_for_month(file.split("/")[-1][4:6])[0]
if season == season_cur:
df = nso_manipulator.tranform_vector_to_pixel_df(file)
#print("-----file: "+str(file)+"--------")
#print(df['green'].min())
#print(df['blue'].min())
#print(df['red'].min())
#print(df['nir'].min())
#print(df[['blue','green','red','nir']].min())
#print(df[['blue','green','red','nir']].max())
#print(df[['blue','green','red','nir']].quantile(0.75))
#src = rasterio.open(file).read()
#plot_out_image = np.clip(src[2::-1],
# 0,2200)/2200
blue_mean_add, green_mean_add, red_mean_add, nir_mean_add = df[['blue','green','red','nir']].quantile(0.75)
blue_count.append(blue_mean_add)
green_count.append(green_mean_add)
red_count.append(red_mean_add)
nir_count.append(nir_mean_add)
#rasterio.plot.show(plot_out_image)
#pyplot.show()
print("----------- Season Medians 75 percentile----:"+season_cur)
blue_count_median = median(blue_count)
green_count_median = median(green_count)
red_count_median = median(red_count)
nir_count_median = median(nir_count)
print("--Blue_median:"+str(blue_count_median))
print("--Green_median:"+str(green_count_median))
print("--Red_median:"+str(red_count_median))
print("--NIR_median:"+str(nir_count_median))
multidate_coefficents.loc[len(multidate_coefficents)] = [season_cur, blue_count_median, green_count_median, red_count_median, nir_count_median]
return multidate_coefficents
def multidate_normalisation_75th_percentile(path_to_tif):
"""
Normalisa a .tif file based on 75th percentile point.
@param path_to_tif: Path to a .tif file.
@return: returns a .tif file with 75th percentile normalisation.
"""
script_dir = os.path.dirname(__file__)
coefficients = script_dir+"/coefficients/Multi-date-index-coefficients_pd.csv"
multidate_coefficents = ""
if __is_file_older_than_x_days(coefficients,1) == True:
# Check the PZH blob storage for the most recent multi data coefficients.
url="https://a804bee12d94d498fbfe55e2.blob.core.windows.net/satellite-images-nso/coefficients/Multi-date-index-coefficients_pd.csv"
s=requests.get(url).content
if s is not None and s != '':
print("Downloading most recent coefficients")
logging.info("Downloading most recent coefficients")
multidate_coefficents = pd.read_csv(io.StringIO(s.decode('utf-8')))
if multidate_coefficents == "":
print("Using local coefficients")
logging.info("Using local coefficients")
multidate_coefficents = pd.read_csv(script_dir+"/coefficients/Multi-date-index-coefficients_pd.csv")
season = __get_season_for_month(path_to_tif.split("/")[-1][4:6])[0]
multidate_coefficents = multidate_coefficents[multidate_coefficents['Season'] == season]
print("-------- Multi-date Relative Normalisation for file: \n"+path_to_tif)
df = nso_manipulator.tranform_vector_to_pixel_df(path_to_tif)
blue_mean_current, green_mean_current, red_mean_current, nir_mean_current = df[['blue','green','red','nir']].quantile(0.75)
blue_diff_add = multidate_coefficents['Blue_median_75'].values[0]-blue_mean_current
green_diff_add = multidate_coefficents['Green_median_75'].values[0]-green_mean_current
red_diff_add = multidate_coefficents['Red_median_75'].values[0]-red_mean_current
nir_diff_add = multidate_coefficents['Nir_median_75'].values[0]-nir_mean_current
src = rasterio.open(path_to_tif).read(masked=True)
meta = rasterio.open(path_to_tif).meta.copy()
fig, (axrgb, axhist) = pyplot.subplots(1, 2, figsize=(14,7))
plot_out_image = np.clip(src[2::-1],
0,2200)/2200
rasterio.plot.show(plot_out_image, ax=axrgb, title="Original")
src[0] = src[0]+blue_diff_add
src[1] = src[1]+green_diff_add
src[2] = src[2]+red_diff_add
src[3] = src[3]+nir_diff_add
ahn_outpath = path_to_tif.split(".")[0]+"_"+str(season)+"_normalised.tif"
print("Saving file to:")
print(ahn_outpath)
plot_out_image_2 = np.clip(src[2::-1],
0,2200)/2200
rasterio.plot.show(plot_out_image_2, ax=axhist, title="Multi-date Relative Normalisation")
pyplot.show()
with rasterio.open(ahn_outpath, 'w', **meta) as outds:
outds.write(src)
def multi_date_dark_spot_normalisation(path_to_tif, satellite_image_name = False, X_specific_point = False, Y_specific_point =False, ):
"""
Black point normalisation based on the most darkest point in the landsat image.
Save a new .tif file with the normalisation.
The coefficients are again pre calculated, if you wish to use your own replace the black point coefficients .csv file.
@param path_to_tif: Path to the .tif to be used.
@param X_specific_point: X coordinates of a specific point in the .tif file on which you want to normalize. In RD coordinates!
@param Y_specific_point: Y coordinates of a specific point in the .tif file on which you want to normalize. In RD coordinates!
"""
script_dir = os.path.dirname(__file__)
coefficients = script_dir+"/coefficients/dark-spot-coefficients_pd.csv"
dark_spot_coefficents = pd.DataFrame()
if __is_file_older_than_x_days(coefficients,1) == True:
# Check the PZH blob storage for the most recent multi data coefficients.
url="https://a804bee12d94d498fbfe55e2.blob.core.windows.net/satellite-images-nso/coefficients/dark-spot-coefficients_pd.csv"
s=requests.get(url).content
if s is not None and s != '':
print("Downloading most recent coefficients")
logging.info("Downloading most recent coefficients")
dark_spot_coefficents = pd.read_csv(io.StringIO(s.decode('utf-8')))
if dark_spot_coefficents.empty:
print("Using local coefficients")
logging.info("Using local coefficients")
script_dir = os.path.dirname(__file__)
dark_spot_coefficents = pd.read_csv(script_dir+"/coefficients/dark-spot-coefficients_pd.csv")
print("-------- Dark Spot Normalisation for file: \n"+path_to_tif)
df = nso_manipulator.tranform_vector_to_pixel_df(path_to_tif)
blue_diff_add = 0
green_diff_add = 0
red_diff_add = 0
nir_diff_add = 0
if X_specific_point == False:
if satellite_image_name != False:
print(satellite_image_name)
dark_spot_coefficents_file = dark_spot_coefficents[dark_spot_coefficents['filename'].str.contains(satellite_image_name)]
if dark_spot_coefficents_file.empty:
print("No coefficients found for satellite image! Defaulting to nothing!")
logging.info(f'No coefficients found for satellite image {satellite_image_name}! Defaulting to nothing!')
else:
blue_diff_add = dark_spot_coefficents_file['blue_coefficients'].values[0]
green_diff_add = dark_spot_coefficents_file['green_coefficients'].values[0]
red_diff_add = dark_spot_coefficents_file['red_coefficients'].values[0]
nir_diff_add = dark_spot_coefficents_file['nir_coefficients'].values[0]
else:
blue_current,green_current,red_current,nir_current = df[(round(df['Y'],6) == round(Y_specific_point,6)) & (round(df['X'],6) == round(X_specific_point,6))]
blue_diff_add = dark_spot_coefficents['blue_coefficients'].values[0]-blue_current
green_diff_add = dark_spot_coefficents['green_coefficients'].values[0]-green_current
red_diff_add = dark_spot_coefficents['red_coefficients'].values[0]-red_current
nir_diff_add = dark_spot_coefficents['nir_coefficients'].values[0]-nir_current
print(blue_diff_add, green_diff_add, red_diff_add, nir_diff_add )
src = rasterio.open(path_to_tif).read(masked=True)
meta = rasterio.open(path_to_tif).meta.copy()
fig, (axrgb, axhist) = pyplot.subplots(1, 2, figsize=(14,7))
plot_out_image = np.clip(src[2::-1],
0,2200)/2200
rasterio.plot.show(plot_out_image, ax=axrgb, title="Original")
src[0] = src[0]+blue_diff_add
src[1] = src[1]+green_diff_add
src[2] = src[2]+red_diff_add
src[3] = src[3]+nir_diff_add
ahn_outpath = path_to_tif.split(".")[0]+"_dark_point_normalised.tif"
print("Saving file to:")
print(ahn_outpath)
logging.info(f'Saving file to {ahn_outpath}')
plot_out_image_2 = np.clip(src[2::-1],
0,2200)/2200
rasterio.plot.show(plot_out_image_2, ax=axhist, title="Multi-date Dark Point Relative Normalisation")
pyplot.show()
with rasterio.open(ahn_outpath, 'w', **meta) as outds:
outds.write(src)
def plot_tif_with_RGBI_coefficients(path_to_tif, red_diff_add, green_diff_add, blue_diff_add, nir_diff_add, save_new_image = True):
"""
This method changes RGBI values based on coefficients of RGBI values.
@param path_to_tif: The .tif file which RGBI value have to be altered.
@param red_diff_add: The red coefficients which have to be added.
@param green_diff_add: The green coefficients which have to be added.
@param blue_diff_add: The blue coefficients which have to be added.
@param nir_diff_add: The infrared values which have to be added.
"""
print(path_to_tif)
print(blue_diff_add, green_diff_add, red_diff_add, nir_diff_add )
src = rasterio.open(path_to_tif).read(masked=True)
meta = rasterio.open(path_to_tif).meta.copy()
fig, (axrgb, axhist) = pyplot.subplots(1, 2, figsize=(14,7))
plot_out_image = np.clip(src[2::-1],
0,2200)/2200
rasterio.plot.show(plot_out_image, ax=axrgb, title="Original")
src[0] = src[0]+blue_diff_add
src[1] = src[1]+green_diff_add
src[2] = src[2]+red_diff_add
src[3] = src[3]+nir_diff_add
plot_out_image_2 = np.clip(src[2::-1],
0,2200)/2200
rasterio.plot.show(plot_out_image_2, ax=axhist, title="Multi-date Dark Point Relative Normalisation")
pyplot.show()
# Save the new image.
outpath = path_to_tif.split(".")[0]+"_dark_point_normalised.tif"
with rasterio.open(outpath, 'w', **meta) as outds:
outds.write(src)
|
/satellite_images_nso-1.2.5-py3-none-any.whl/satellite_images_nso/__normalisation/normalisation.py
| 0.431704 | 0.383843 |
normalisation.py
|
pypi
|
import geopandas as gpd
import numpy as np
import pandas as pd
import earthpy.plot as ep
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import logging
import tqdm
"""
This class is used for various NVDI calculations.
For more information check:
https://en.wikipedia.org/wiki/Enhanced_vegetation_index
"""
def aggregate_ndvi_habitat(ndvi_geo_df: gpd.geodataframe.GeoDataFrame) -> pd.Series:
"""
Calculate the aggregated statistics for NDVI
@param ndvi_geo_df: geopandas dataframe, with ndvi values for each pixel
@return pandas series: with 'mean', 'std', 'min', 'max', 'count' in the row axis
"""
return ndvi_geo_df['ndvi'].agg(['mean', 'std', 'min', 'max', 'count'])
def generate_ndvi_channel(tile):
"""
Generate ndvi channel from 2 bands.
@param tile: rgbi tile to calculate to the NDVI from.
@return a NDVI channel.
"""
print("Generating NDVI channel...")
red = tile[0]
nir = tile[3]
ndvi = []
# No more numpy way for looping through these array's which lead to not good ndvi calculation.
# Now we loop through each pixel directly
for i in tqdm.tqdm(range(len(red))):
ndvi_x = []
for x in range(len(red[i])):
upper_ndvi = (int(nir[i][x])-int(red[i][x]))
lower_ndvi = (int(nir[i][x])+int(red[i][x]))
if lower_ndvi == 0:
ndvi_x.append(0)
else:
ndvi_cur = upper_ndvi/lower_ndvi
ndvi_cur = (ndvi_cur*100)+100
ndvi_x.append(int(ndvi_cur))
ndvi.append(ndvi_x)
return np.array(ndvi)
def normalized_diff(b1: np.array, b2: np.array) -> np.array:
"""Take two n-dimensional numpy arrays and calculate the normalized
difference.
Math will be calculated (b1-b2) / (b1 + b2). b1 is NIR, b2 is Red
Parameters
----------
b1, b2 : numpy arrays
Two numpy arrays that will be used to calculate the normalized
difference. Math will be calculated (b1-b2) / (b1+b2).
Returns
----------
n_diff : numpy array
The element-wise result of (b1-b2) / (b1+b2) calculation. Inf values
are set to nan. Array returned as masked if result includes nan values.
"""
logging.info('Calculating NDVI')
if not (b1.shape == b2.shape):
raise ValueError("Both arrays should have the same dimensions")
b1 = b1.astype('f4')
b2 = b2.astype('f4')
# Ignore warning for division by zero
with np.errstate(divide="ignore"):#np.seterr(divide='ignore', invalid='ignore')
n_diff = (b1 - b2) / (b1 + b2)
# Set inf values to nan and provide custom warning
if np.isinf(n_diff).any():
warnings.warn(
"Divide by zero produced infinity values that will be replaced "
"with nan values",
Warning,
)
n_diff[np.isinf(n_diff)] = np.nan
# Mask invalid values
if np.isnan(n_diff).any():
n_diff = np.ma.masked_invalid(n_diff)
return n_diff
def enhanced_vegetation_index(red: np.array, blue: np.array, nir: np.array, L: float=1, c1: float=6, c2: float=7.5, G: float=2.5) -> np.array:
"""
DEPRECATED MIGHT BE USED IN THE FUTURE!
This function makes groups out of the NVDI values.
For a nicer plot.
@param red: numpy array color values for red channel
@param blue: numpy array color values for blue channel
@param nir: numpy array values for infra-red channel
@param L: float see Wikipedia
@param c1: float see Wikipedia
@param c2: float see Wikipedia
@param G: float see Wikipedia
@return numpy array, vegetation index for each pixel
"""
if not ((red.shape == blue.shape) and (blue.shape == nir.shape)):
raise ValueError("All arrays should have the same dimensions")
# Ignore warning for division by zero
red = red.astype('f4')
blue = blue.astype('f4')
nir = nir.astype('f4')
with np.errstate(divide="ignore"):#np.seterr(divide='ignore', invalid='ignore')
evi = G * (nir - red) / (nir + c1 * red - c2 * blue + L)
# Set inf values to nan and provide custom warning
if np.isinf(evi).any():
warnings.warn(
"Divide by zero produced infinity values that will be replaced "
"with nan values",
Warning,
)
evi[np.isinf(evi)] = np.nan
# Mask invalid values
if np.isnan(evi).any():
evi = np.ma.masked_invalid(evi)
return evi
def make_ndvi_plot(path, title, plot, save_figure =True):
""" Plot NVDI data
@path: path to the numpy array with the NVDI data.
@title: A title on the plot.
@plot: bool for whether to see the plots in de output or not
"""
data_ndvi = np.load(path, allow_pickle=True)
#plot ndvi
if plot:
ep.plot_bands(data_ndvi,
figsize=(28, 12),
cmap='RdYlGn',
scale=False,
vmin=-1, vmax=1,
title=title)
plt.show()
logging.info('Plotted NDVI image')
# Create classes and apply to NDVI results
ndvi_class_bins = [-np.inf, 0, 0.1, 0.25, 0.4, np.inf]
ndvi_class = np.digitize(data_ndvi, ndvi_class_bins)
# Apply the nodata mask to the newly classified NDVI data
ndvi_class = np.ma.masked_where(
np.ma.getmask(data_ndvi), ndvi_class
)
np.unique(ndvi_class)
# Define color map
nbr_colors = ["gray", "y", "yellowgreen", "g", "darkgreen"]
nbr_cmap = ListedColormap(nbr_colors)
# Define class names
ndvi_cat_names = [
"No Vegetation",
"Bare Area",
"Low Vegetation",
"Moderate Vegetation",
"High Vegetation",
]
legend_titles = ndvi_cat_names
#plot ndvi classes
fig, ax = plt.subplots(figsize=(28, 12))
im = ax.imshow(ndvi_class, cmap=nbr_cmap)
ep.draw_legend(im_ax=im, titles=legend_titles)
ax.set_title(
" Normalized Difference Vegetation Index (NDVI) Classes",
fontsize=14,
)
ax.set_axis_off()
#Auto adjust subplot to fit figure size
plt.tight_layout()
#save fig
path_corr=path.replace("\\","/")+"_classes.png"
plt.savefig(path_corr)
logging.info(f'Saved ndvi classes figure {path_corr}')
print(f'Saved figure {path_corr}')
#plot figure
if plot:
print('Plotting NDVI classes image')
plt.show()
else:
plt.close()
return path_corr
|
/satellite_images_nso-1.2.5-py3-none-any.whl/satellite_images_nso/_nvdi/calculate_nvdi.py
| 0.869548 | 0.767494 |
calculate_nvdi.py
|
pypi
|
from typing import Container
from azure.storage.blob import ContainerClient
import pandas as pd
from os import path
"""
Class for working with Azure blob storage.
Used for downloading and checking if files where already uploaded to blobstorage.
@Author: Michael de Winter, Jeroen Esseveld
"""
class blob_container:
def __init__(self, connection_string: str, container_name: str):
"""
Init a blob storage container.
"""
self.container = ContainerClient.from_connection_string(conn_str=connection_string,\
container_name=container_name)
def create_df_current_tiff_files(self,blob_url,folder =""):
"""
Create a pandas dataframe of the current .tif stored in the blob storage.
Basically it stores meta data about the cropped .tif images in the databases.
"""
urls = []
filenames = []
for blob in self.container.list_blobs(prefix = folder):
if '.tif' in blob['name']:
urls.append(blob_url+"/"+blob['container']+"/"+blob['name'])
filenames.append(blob['name'])
df_filenames = pd.DataFrame(filenames, columns=['filename'])
df_filenames['datetime']= df_filenames['filename'].str.split("_").str[0]+" "+df_filenames['filename'].str.split("_").str[1]
df_filenames['download_urls'] = urls
return df_filenames
def upload_file_rm_blob(self,path_to_file, name):
with open(path_to_file, "rb") as data:
self.container.upload_blob(name,data)
def get_container(self):
return self.container
def check_new_tiff_file(owned_files, nso_files):
"""
Check whether NSO provides new tiff files with respect to the list of stored tiff files.
"""
owned_files = "#".join([path.basename(file) for file in owned_files])
nso_files = [path.basename(file) for file in nso_files]
return list(filter(lambda x: x not in owned_files, nso_files))
|
/satellite_images_nso-1.2.5-py3-none-any.whl/satellite_images_nso/__blob_storage/blob_storage.py
| 0.584627 | 0.198841 |
blob_storage.py
|
pypi
|
from typing import Union
import dask
import dask.array as da # type: ignore
import dask.dataframe as dd # type: ignore
import numpy as np # type: ignore
import xarray as xr # type: ignore
from loguru import logger # type: ignore
from sqlalchemy.engine import Connectable # type: ignore
from . import brazil
xr.set_options(keep_attrs=True)
@xr.register_dataset_accessor("copebr")
class CopeBRDatasetExtension:
"""
xarray.Dataset.copebr
---------------------
This class is an `xr.Dataset` extension. It works as a dataset
layer with the purpose of enhancing the dataset with new methods.
The expect input dataset is an `netCDF4` file from Copernicus API;
this extension will work on certain data variables, the method that
extracts with the correct parameters can be found in `extract_reanalysis`
module.
Usage:
```
import satellite.weather as sat
ds = sat.load_dataset('file/path')
RJ_geocode = 3304557
rio_df = ds.copebr.to_dataframe(RJ_geocode)
rio_ds = ds.copebr.ds_from_geocode(RJ_geocode)
```
The original dataset will be parsed into Brazilian's data format and can
be sliced by a Geocode from any City in Brazil, according to IBGE geocodes.
The expect output when the requested data is not `raw` is:
date : datetime object.
temp_min : Minimum┐
temp_med : Average├─ temperature in `celcius degrees` given a geocode.
temp_max : Maximum┘
precip_min : Minimum┐
precip_med : Average├─ of total precipitation in `mm` given a geocode.
precip_max : Maximum┘
precip_tot : Total precipitation in `mm` given a geocode.
pressao_min: Minimum┐
pressao_med: Average├─ sea level pressure in `hPa` given a geocode.
pressao_max: Maximum┘
umid_min : Minimum┐
umid_med : Average├─ percentage of relative humidity given a geocode.
umid_max : Maximum┘
"""
def __init__(self, xarray_ds: xr.Dataset) -> None:
self._ds = xarray_ds
def to_dataframe(self, geocodes: Union[list, int], raw: bool = False):
df = _final_dataframe(dataset=self._ds, geocodes=geocodes, raw=raw)
if type(df) == dask.dataframe.core.DataFrame:
df = df.compute()
df = df.reset_index(drop=True)
return df
def to_sql(
self,
geocodes: Union[list, int],
con: Connectable,
tablename: str,
schema: str,
raw: bool = False,
) -> None:
"""
Reads the data for each geocode and insert the rows into the
database one by one, created by sqlalchemy engine with the URI.
This method is convenient to prevent the memory overhead when
executing with a large amount of geocodes.
"""
geocodes = [geocodes] if isinstance(geocodes, int) else geocodes
for geocode in geocodes:
_geocode_to_sql(
dataset=self._ds,
geocode=geocode,
con=con,
schema=schema,
tablename=tablename,
raw=raw,
)
logger.debug(f"{geocode} updated on {schema}.{tablename}")
def geocode_ds(self, geocode: int, raw: bool = False):
return _geocode_ds(self._ds, geocode, raw)
def _final_dataframe(dataset: xr.Dataset, geocodes: Union[list, int], raw=False):
geocodes = [geocodes] if isinstance(geocodes, int) else geocodes
dfs = []
for geocode in geocodes:
dfs.append(_geocode_to_dataframe(dataset, geocode, raw))
final_df = dd.concat(dfs)
if final_df.index.name == "time":
final_df = final_df.reset_index(drop=False)
if raw:
final_df = final_df.rename(columns={"time": "datetime"})
else:
final_df = final_df.rename(columns={"time": "date"})
return final_df
def _geocode_to_sql(
dataset: xr.Dataset,
geocode: int,
con: Connectable,
schema: str,
tablename: str,
raw: bool,
):
ds = _geocode_ds(dataset, geocode, raw)
df = ds.to_dataframe()
del ds
geocodes = [geocode for g in range(len(df))]
df = df.assign(geocodigo=geocodes)
df = df.reset_index(drop=False)
if raw:
df = df.rename(columns={"time": "datetime"})
else:
df = df.rename(columns={"time": "date"})
df.to_sql(
name=tablename,
schema=schema,
con=con,
if_exists="append",
index=False,
)
del df
def _geocode_to_dataframe(dataset: xr.Dataset, geocode: int, raw=False):
"""
Returns a DataFrame with the values related to the geocode of a
brazilian city according to IBGE's format. Extract the values
using `ds_from_geocode()` and return `xr.Dataset.to_dataframe()`
from Xarray, inserting the geocode into the final DataFrame.
Attrs:
geocode (str or int): Geocode of a city in Brazil according to IBGE.
raw (bool) : If raw is set to True, the DataFrame returned
will contain data in 3 hours intervals.
Default return will aggregate these values
into 24 hours interval.
Returns:
pd.DataFrame: Similar to `ds_from_geocode(geocode).to_dataframe()`
but with an extra column with the geocode, in order
to differ the data when inserting into a database,
for instance.
"""
ds = _geocode_ds(dataset, geocode, raw)
df = ds.to_dataframe()
del ds
geocode = [geocode for g in range(len(df))]
df = df.assign(geocodigo=da.from_array(geocode))
return df
def _geocode_ds(ds: xr.Dataset, geocode: int, raw=False):
"""
This is the most important method of the extension. It will
slice the dataset according to the geocode provided, do the
math and the parse of the units to Br's format, and reduce by
min, mean and max by day, if the `raw` is false.
Attrs:
geocode (str|int): Geocode of a Brazilian city according to IBGE.
raw (bool) : If raw is set to True, the DataFrame returned
will contain data in 3 hours intervals. Default
return will aggregate these values into 24h
interval.
Returns:
xr.Dataset: The final dataset with the data parsed into Br's
format. If not `raw`, will group the data by day,
taking it's min, mean and max values. If `raw`,
the data corresponds to a 3h interval range for
each day in the dataset.
"""
lats, lons = _get_latlons(geocode)
geocode_ds = _convert_to_br_units(
_slice_dataset_by_coord(dataset=ds, lats=lats, lons=lons)
)
if raw:
return geocode_ds
geocode_ds = geocode_ds.sortby("time")
gb = geocode_ds.resample(time="1D")
gmin, gmean, gmax, gtot = (
_reduce_by(gb, np.min, "min"),
_reduce_by(gb, np.mean, "med"),
_reduce_by(gb, np.max, "max"),
_reduce_by(gb, np.sum, "tot"),
)
final_ds = xr.combine_by_coords(
[gmin, gmean, gmax, gtot.precip_tot], data_vars="all"
)
return final_ds
def _slice_dataset_by_coord(dataset: xr.Dataset, lats: list[int], lons: list[int]):
"""
Slices a dataset using latitudes and longitudes, returns a dataset
with the mean values between the coordinates.
"""
ds = dataset.sel(latitude=lats, longitude=lons, method="nearest")
return ds.mean(dim=["latitude", "longitude"])
def _convert_to_br_units(dataset: xr.Dataset) -> xr.Dataset:
"""
Parse the units according to Brazil's standard unit measures.
Rename their unit names and long names as well.
"""
ds = dataset
vars = list(ds.data_vars.keys())
if "t2m" in vars:
# Convert Kelvin to Celsius degrees
ds["t2m"] = ds.t2m - 273.15
ds["t2m"].attrs = {"units": "degC", "long_name": "Temperatura"}
if "d2m" in vars:
# Calculate Relative Humidity percentage and add to Dataset
ds["d2m"] = ds.d2m - 273.15
e = 6.112 * np.exp(17.67 * ds.d2m / (ds.d2m + 243.5))
es = 6.112 * np.exp(17.67 * ds.t2m / (ds.t2m + 243.5))
rh = (e / es) * 100
# Replacing the variable instead of dropping. d2m won't be used.
ds["d2m"] = rh
ds["d2m"].attrs = {
"units": "pct",
"long_name": "Umidade Relativa do Ar",
}
if "tp" in vars:
# Convert meters to millimeters
ds["tp"] = ds.tp * 1000
ds["tp"] = ds.tp.round(5)
ds["tp"].attrs = {"units": "mm", "long_name": "Precipitação"}
if "msl" in vars:
# Convert Pa to ATM
ds["msl"] = ds.msl * 0.00000986923
ds["msl"].attrs = {
"units": "atm",
"long_name": "Pressão ao Nível do Mar",
}
with_br_vars = {
"t2m": "temp",
"tp": "precip",
"msl": "pressao",
"d2m": "umid",
}
return ds.rename(with_br_vars)
def _reduce_by(ds: xr.Dataset, func, prefix: str):
"""
Applies a function to each coordinate in the dataset and
replace the `data_vars` names to it's corresponding prefix.
"""
ds = ds.apply(func=func)
return ds.rename(
dict(
zip(
list(ds.data_vars),
list(map(lambda x: f"{x}_{prefix}", list(ds.data_vars))),
)
)
)
def _get_latlons(geocode: int) -> tuple[list[float], list[float]]:
"""
Extract Latitude and Longitude from a Brazilian's city
according to IBGE's geocode format.
"""
lat, lon = brazil.extract_latlons.from_geocode(int(geocode))
N, S, E, W = brazil.extract_coordinates.from_latlon(lat, lon)
lats = [N, S]
lons = [E, W]
match geocode:
case 4108304: # Foz do Iguaçu
lats = [-25.5]
lons = [-54.5, -54.75]
return lats, lons
|
/satellite_weather_downloader-1.9.0.tar.gz/satellite_weather_downloader-1.9.0/satellite/weather/copebr.py
| 0.873862 | 0.853547 |
copebr.py
|
pypi
|
from typing import Union
import xarray as xr
import numpy as np
from loguru import logger
from matplotlib.path import Path # type: ignore
from shapely.geometry.polygon import Polygon # type: ignore
from . import brazil # type: ignore
@xr.register_dataset_accessor("DSEI")
class CopeDSEIDatasetExtension:
"""
xarray.Dataset.DSEI
-------------------
Usage:
```
import satellite_weather as sat
ds = sat.load_dataset('file/path')
ds.DSEI['Yanomami']
ds.DSEI.get_polygon('Yanomami')
```
"""
DSEIs = brazil.DSEI.areas.DSEI_DF
_dsei_df = None
def __init__(self, xarray_ds: xr.Dataset) -> None:
self._ds = xarray_ds
self._grid = self.__do_grid()
def load_polygons(self):
df = brazil.DSEI.areas.load_polygons_df()
self._dsei_df = df
logger.info("DSEI Polygons loaded")
def get_polygon(self, dsei: Union[str, int]) -> Polygon:
if self._dsei_df is None:
logger.error("Polygons are not loaded. Use `.DSEI.load_poligons()`")
return None
polygon = self.__do_polygon(dsei)
return polygon
def __getitem__(self, __dsei: Union[str, int] = None):
try:
return self.__do_dataset(__dsei)
except AttributeError:
if self._dsei_df is None:
logger.error("Polygons are not loaded. Use `.DSEI.load_poligons()`")
return None
logger.error(f"{__dsei} not found. List all DSEIs with `.DSEI.info()`")
return None
def __do_grid(self):
x, y = np.meshgrid(self._ds.longitude, self._ds.latitude)
x, y = x.flatten(), y.flatten()
grid = np.vstack((x, y)).T
return grid
def __do_polygon(self, __dsei: Union[str, int]) -> Polygon:
if isinstance(__dsei, str):
cod = float(self.DSEIs[self.DSEIs.DSEI == __dsei].code)
polygon = self._dsei_df[self._dsei_df.cod_dsei == cod].geometry.item()
elif isinstance(__dsei, int):
polygon = self._dsei_df[
self._dsei_df.cod_dsei == float(__dsei)
].geometry.item()
return polygon
def __do_dataset(self, __dsei: Union[str, int]) -> xr.Dataset:
polygon = self.__do_polygon(__dsei)
path_coords = Path(list(polygon.exterior.coords))
p_filter = path_coords.contains_points(self._grid)
lons, lats = self._grid[p_filter].T
ds = self._ds.sel(latitude=lats, longitude=lons)
return ds
|
/satellite_weather_downloader-1.9.0.tar.gz/satellite_weather_downloader-1.9.0/satellite/weather/dsei.py
| 0.901064 | 0.711368 |
dsei.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.