text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Check if the operation has finished.
<END_TASK>
<USER_TASK:>
Description:
def poll(self):
"""Check if the operation has finished.
:rtype: bool
:returns: A boolean indicating if the current operation has completed.
:raises ValueError: if the operation
has already completed.
""" |
if self.complete:
raise ValueError("The operation has completed.")
operation_pb = self._get_operation()
self._update_state(operation_pb)
return self.complete |
<SYSTEM_TASK:>
Parses the response to a ``ReadModifyWriteRow`` request.
<END_TASK>
<USER_TASK:>
Description:
def _parse_rmw_row_response(row_response):
"""Parses the response to a ``ReadModifyWriteRow`` request.
:type row_response: :class:`.data_v2_pb2.Row`
:param row_response: The response row (with only modified cells) from a
``ReadModifyWriteRow`` request.
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell. For example:
.. code:: python
{
u'col-fam-id': {
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
},
u'col-fam-id2': {
b'col-name3-but-other-fam': [
(b'foo', datetime.datetime(...)),
],
},
}
""" |
result = {}
for column_family in row_response.row.families:
column_family_id, curr_family = _parse_family_pb(column_family)
result[column_family_id] = curr_family
return result |
<SYSTEM_TASK:>
Parses a Family protobuf into a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def _parse_family_pb(family_pb):
"""Parses a Family protobuf into a dictionary.
:type family_pb: :class:`._generated.data_pb2.Family`
:param family_pb: A protobuf
:rtype: tuple
:returns: A string and dictionary. The string is the name of the
column family and the dictionary has column names (within the
family) as keys and cell lists as values. Each cell is
represented with a two-tuple with the value (in bytes) and the
timestamp for the cell. For example:
.. code:: python
{
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
}
""" |
result = {}
for column in family_pb.columns:
result[column.qualifier] = cells = []
for cell in column.cells:
val_pair = (cell.value, _datetime_from_microseconds(cell.timestamp_micros))
cells.append(val_pair)
return family_pb.name, result |
<SYSTEM_TASK:>
Gets the total mutations size for current row
<END_TASK>
<USER_TASK:>
Description:
def get_mutations_size(self):
""" Gets the total mutations size for current row
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_get_mutations_size]
:end-before: [END bigtable_row_get_mutations_size]
""" |
mutation_size = 0
for mutation in self._get_mutations():
mutation_size += mutation.ByteSize()
return mutation_size |
<SYSTEM_TASK:>
Sets a value in this row.
<END_TASK>
<USER_TASK:>
Description:
def set_cell(self, column_family_id, column, value, timestamp=None):
"""Sets a value in this row.
The cell is determined by the ``row_key`` of this :class:`DirectRow`
and the ``column``. The ``column`` must be in an existing
:class:`.ColumnFamily` (as determined by ``column_family_id``).
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_set_cell]
:end-before: [END bigtable_row_set_cell]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
""" |
self._set_cell(column_family_id, column, value, timestamp=timestamp, state=None) |
<SYSTEM_TASK:>
Deletes cell in this row.
<END_TASK>
<USER_TASK:>
Description:
def delete_cell(self, column_family_id, column, time_range=None):
"""Deletes cell in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_delete_cell]
:end-before: [END bigtable_row_delete_cell]
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family that will have a
cell deleted.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
""" |
self._delete_cells(
column_family_id, [column], time_range=time_range, state=None
) |
<SYSTEM_TASK:>
Deletes cells in this row.
<END_TASK>
<USER_TASK:>
Description:
def delete_cells(self, column_family_id, columns, time_range=None):
"""Deletes cells in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_delete_cells]
:end-before: [END bigtable_row_delete_cells]
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then
the entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
""" |
self._delete_cells(column_family_id, columns, time_range=time_range, state=None) |
<SYSTEM_TASK:>
Makes a ``CheckAndMutateRow`` API request.
<END_TASK>
<USER_TASK:>
Description:
def commit(self):
"""Makes a ``CheckAndMutateRow`` API request.
If no mutations have been created in the row, no request is made.
The mutations will be applied conditionally, based on whether the
filter matches any cells in the :class:`ConditionalRow` or not. (Each
method which adds a mutation has a ``state`` parameter for this
purpose.)
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: bool
:returns: Flag indicating if the filter was matched (which also
indicates which set of mutations were applied by the server).
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
""" |
true_mutations = self._get_mutations(state=True)
false_mutations = self._get_mutations(state=False)
num_true_mutations = len(true_mutations)
num_false_mutations = len(false_mutations)
if num_true_mutations == 0 and num_false_mutations == 0:
return
if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS:
raise ValueError(
"Exceed the maximum allowable mutations (%d). Had %s true "
"mutations and %d false mutations."
% (MAX_MUTATIONS, num_true_mutations, num_false_mutations)
)
data_client = self._table._instance._client.table_data_client
resp = data_client.check_and_mutate_row(
table_name=self._table.name,
row_key=self._row_key,
predicate_filter=self._filter.to_pb(),
true_mutations=true_mutations,
false_mutations=false_mutations,
)
self.clear()
return resp.predicate_matched |
<SYSTEM_TASK:>
Appends a value to an existing cell.
<END_TASK>
<USER_TASK:>
Description:
def append_cell_value(self, column_family_id, column, value):
"""Appends a value to an existing cell.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_append_cell_value]
:end-before: [END bigtable_row_append_cell_value]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes
:param value: The value to append to the existing value in the cell. If
the targeted cell is unset, it will be treated as
containing the empty string.
""" |
column = _to_bytes(column)
value = _to_bytes(value)
rule_pb = data_v2_pb2.ReadModifyWriteRule(
family_name=column_family_id, column_qualifier=column, append_value=value
)
self._rule_pb_list.append(rule_pb) |
<SYSTEM_TASK:>
Increments a value in an existing cell.
<END_TASK>
<USER_TASK:>
Description:
def increment_cell_value(self, column_family_id, column, int_value):
"""Increments a value in an existing cell.
Assumes the value in the cell is stored as a 64 bit integer
serialized to bytes.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_increment_cell_value]
:end-before: [END bigtable_row_increment_cell_value]
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type int_value: int
:param int_value: The value to increment the existing value in the cell
by. If the targeted cell is unset, it will be treated
as containing a zero. Otherwise, the targeted cell
must contain an 8-byte value (interpreted as a 64-bit
big-endian signed integer), or the entire request
will fail.
""" |
column = _to_bytes(column)
rule_pb = data_v2_pb2.ReadModifyWriteRule(
family_name=column_family_id,
column_qualifier=column,
increment_amount=int_value,
)
self._rule_pb_list.append(rule_pb) |
<SYSTEM_TASK:>
Makes a ``ReadModifyWriteRow`` API request.
<END_TASK>
<USER_TASK:>
Description:
def commit(self):
"""Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
""" |
num_mutations = len(self._rule_pb_list)
if num_mutations == 0:
return {}
if num_mutations > MAX_MUTATIONS:
raise ValueError(
"%d total append mutations exceed the maximum "
"allowable %d." % (num_mutations, MAX_MUTATIONS)
)
data_client = self._table._instance._client.table_data_client
row_response = data_client.read_modify_write_row(
table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list
)
# Reset modifications after commit-ing request.
self.clear()
# NOTE: We expect row_response.key == self._row_key but don't check.
return _parse_rmw_row_response(row_response) |
<SYSTEM_TASK:>
Creates a ExponentialTimeout object given a gapic retry configuration.
<END_TASK>
<USER_TASK:>
Description:
def _timeout_from_retry_config(retry_params):
"""Creates a ExponentialTimeout object given a gapic retry configuration.
Args:
retry_params (dict): The retry parameter values, for example::
{
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
"initial_rpc_timeout_millis": 120000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 120000,
"total_timeout_millis": 600000
}
Returns:
google.api_core.retry.ExponentialTimeout: The default time object for
the method.
""" |
return timeout.ExponentialTimeout(
initial=(retry_params["initial_rpc_timeout_millis"] / _MILLIS_PER_SECOND),
maximum=(retry_params["max_rpc_timeout_millis"] / _MILLIS_PER_SECOND),
multiplier=retry_params["rpc_timeout_multiplier"],
deadline=(retry_params["total_timeout_millis"] / _MILLIS_PER_SECOND),
) |
<SYSTEM_TASK:>
Creates default retry and timeout objects for each method in a gapic
<END_TASK>
<USER_TASK:>
Description:
def parse_method_configs(interface_config):
"""Creates default retry and timeout objects for each method in a gapic
interface config.
Args:
interface_config (Mapping): The interface config section of the full
gapic library config. For example, If the full configuration has
an interface named ``google.example.v1.ExampleService`` you would
pass in just that interface's configuration, for example
``gapic_config['interfaces']['google.example.v1.ExampleService']``.
Returns:
Mapping[str, MethodConfig]: A mapping of RPC method names to their
configuration.
""" |
# Grab all the retry codes
retry_codes_map = {
name: retry_codes
for name, retry_codes in six.iteritems(interface_config.get("retry_codes", {}))
}
# Grab all of the retry params
retry_params_map = {
name: retry_params
for name, retry_params in six.iteritems(
interface_config.get("retry_params", {})
)
}
# Iterate through all the API methods and create a flat MethodConfig
# instance for each one.
method_configs = {}
for method_name, method_params in six.iteritems(
interface_config.get("methods", {})
):
retry_params_name = method_params.get("retry_params_name")
if retry_params_name is not None:
retry_params = retry_params_map[retry_params_name]
retry_ = _retry_from_retry_config(
retry_params, retry_codes_map[method_params["retry_codes_name"]]
)
timeout_ = _timeout_from_retry_config(retry_params)
# No retry config, so this is a non-retryable method.
else:
retry_ = None
timeout_ = timeout.ConstantTimeout(
method_params["timeout_millis"] / _MILLIS_PER_SECOND
)
method_configs[method_name] = MethodConfig(retry=retry_, timeout=timeout_)
return method_configs |
<SYSTEM_TASK:>
Return True the future is done, False otherwise.
<END_TASK>
<USER_TASK:>
Description:
def done(self):
"""Return True the future is done, False otherwise.
This still returns True in failure cases; checking :meth:`result` or
:meth:`exception` is the canonical way to assess success or failure.
""" |
return self._exception != self._SENTINEL or self._result != self._SENTINEL |
<SYSTEM_TASK:>
Return the exception raised by the call, if any.
<END_TASK>
<USER_TASK:>
Description:
def exception(self, timeout=None):
"""Return the exception raised by the call, if any.
This blocks until the message has successfully been published, and
returns the exception. If the call succeeded, return None.
Args:
timeout (Union[int, float]): The number of seconds before this call
times out and raises TimeoutError.
Raises:
TimeoutError: If the request times out.
Returns:
Exception: The exception raised by the call, if any.
""" |
# Wait until the future is done.
if not self._completed.wait(timeout=timeout):
raise exceptions.TimeoutError("Timed out waiting for result.")
# If the batch completed successfully, this should return None.
if self._result != self._SENTINEL:
return None
# Okay, this batch had an error; this should return it.
return self._exception |
<SYSTEM_TASK:>
Attach the provided callable to the future.
<END_TASK>
<USER_TASK:>
Description:
def add_done_callback(self, fn):
"""Attach the provided callable to the future.
The provided function is called, with this future as its only argument,
when the future finishes running.
""" |
if self.done():
return fn(self)
self._callbacks.append(fn) |
<SYSTEM_TASK:>
Set the result of the future to the provided result.
<END_TASK>
<USER_TASK:>
Description:
def set_result(self, result):
"""Set the result of the future to the provided result.
Args:
result (Any): The result
""" |
# Sanity check: A future can only complete once.
if self.done():
raise RuntimeError("set_result can only be called once.")
# Set the result and trigger the future.
self._result = result
self._trigger() |
<SYSTEM_TASK:>
Set the result of the future to the given exception.
<END_TASK>
<USER_TASK:>
Description:
def set_exception(self, exception):
"""Set the result of the future to the given exception.
Args:
exception (:exc:`Exception`): The exception raised.
""" |
# Sanity check: A future can only complete once.
if self.done():
raise RuntimeError("set_exception can only be called once.")
# Set the exception and trigger the future.
self._exception = exception
self._trigger() |
<SYSTEM_TASK:>
Trigger all callbacks registered to this Future.
<END_TASK>
<USER_TASK:>
Description:
def _trigger(self):
"""Trigger all callbacks registered to this Future.
This method is called internally by the batch once the batch
completes.
Args:
message_id (str): The message ID, as a string.
""" |
self._completed.set()
for callback in self._callbacks:
callback(self) |
<SYSTEM_TASK:>
Creates a new read session. A read session divides the contents of a
<END_TASK>
<USER_TASK:>
Description:
def create_read_session(
self,
table_reference,
parent,
table_modifiers=None,
requested_streams=None,
read_options=None,
format_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new read session. A read session divides the contents of a
BigQuery table into one or more streams, which can then be used to read
data from the table. The read session also specifies properties of the
data to be read, such as a list of columns or a push-down filter describing
the rows to be returned.
A particular row can be read by at most one stream. When the caller has
reached the end of each stream in the session, then all the data in the
table has been read.
Read sessions automatically expire 24 hours after they are created and do
not require manual clean-up by the caller.
Example:
>>> from google.cloud import bigquery_storage_v1beta1
>>>
>>> client = bigquery_storage_v1beta1.BigQueryStorageClient()
>>>
>>> # TODO: Initialize `table_reference`:
>>> table_reference = {}
>>>
>>> # TODO: Initialize `parent`:
>>> parent = ''
>>>
>>> response = client.create_read_session(table_reference, parent)
Args:
table_reference (Union[dict, ~google.cloud.bigquery_storage_v1beta1.types.TableReference]): Required. Reference to the table to read.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigquery_storage_v1beta1.types.TableReference`
parent (str): Required. String of the form ``projects/{project_id}`` indicating the
project this ReadSession is associated with. This is the project that
will be billed for usage.
table_modifiers (Union[dict, ~google.cloud.bigquery_storage_v1beta1.types.TableModifiers]): Optional. Any modifiers to the Table (e.g. snapshot timestamp).
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigquery_storage_v1beta1.types.TableModifiers`
requested_streams (int): Optional. Initial number of streams. If unset or 0, we will
provide a value of streams so as to produce reasonable throughput. Must be
non-negative. The number of streams may be lower than the requested number,
depending on the amount parallelism that is reasonable for the table and
the maximum amount of parallelism allowed by the system.
Streams must be read starting from offset 0.
read_options (Union[dict, ~google.cloud.bigquery_storage_v1beta1.types.TableReadOptions]): Optional. Read options for this session (e.g. column selection, filters).
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigquery_storage_v1beta1.types.TableReadOptions`
format_ (~google.cloud.bigquery_storage_v1beta1.types.DataFormat): Data output format. Currently default to Avro.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigquery_storage_v1beta1.types.ReadSession` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "create_read_session" not in self._inner_api_calls:
self._inner_api_calls[
"create_read_session"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_read_session,
default_retry=self._method_configs["CreateReadSession"].retry,
default_timeout=self._method_configs["CreateReadSession"].timeout,
client_info=self._client_info,
)
request = storage_pb2.CreateReadSessionRequest(
table_reference=table_reference,
parent=parent,
table_modifiers=table_modifiers,
requested_streams=requested_streams,
read_options=read_options,
format=format_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [
("table_reference.project_id", table_reference.project_id),
("table_reference.dataset_id", table_reference.dataset_id),
]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( # pragma: no cover
routing_header
)
metadata.append(routing_metadata) # pragma: no cover
return self._inner_api_calls["create_read_session"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Creates additional streams for a ReadSession. This API can be used to
<END_TASK>
<USER_TASK:>
Description:
def batch_create_read_session_streams(
self,
session,
requested_streams,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates additional streams for a ReadSession. This API can be used to
dynamically adjust the parallelism of a batch processing task upwards by
adding additional workers.
Example:
>>> from google.cloud import bigquery_storage_v1beta1
>>>
>>> client = bigquery_storage_v1beta1.BigQueryStorageClient()
>>>
>>> # TODO: Initialize `session`:
>>> session = {}
>>>
>>> # TODO: Initialize `requested_streams`:
>>> requested_streams = 0
>>>
>>> response = client.batch_create_read_session_streams(session, requested_streams)
Args:
session (Union[dict, ~google.cloud.bigquery_storage_v1beta1.types.ReadSession]): Required. Must be a non-expired session obtained from a call to
CreateReadSession. Only the name field needs to be set.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigquery_storage_v1beta1.types.ReadSession`
requested_streams (int): Required. Number of new streams requested. Must be positive.
Number of added streams may be less than this, see CreateReadSessionRequest
for more information.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigquery_storage_v1beta1.types.BatchCreateReadSessionStreamsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "batch_create_read_session_streams" not in self._inner_api_calls:
self._inner_api_calls[
"batch_create_read_session_streams"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_create_read_session_streams,
default_retry=self._method_configs[
"BatchCreateReadSessionStreams"
].retry,
default_timeout=self._method_configs[
"BatchCreateReadSessionStreams"
].timeout,
client_info=self._client_info,
)
request = storage_pb2.BatchCreateReadSessionStreamsRequest(
session=session, requested_streams=requested_streams
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("session.name", session.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( # pragma: no cover
routing_header
)
metadata.append(routing_metadata) # pragma: no cover
return self._inner_api_calls["batch_create_read_session_streams"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Build an API representation of this object.
<END_TASK>
<USER_TASK:>
Description:
def to_api_repr(self):
"""Build an API representation of this object.
Returns:
Dict[str, Any]:
A dictionary in the format used by the BigQuery API.
""" |
config = copy.deepcopy(self._properties)
if self.options is not None:
r = self.options.to_api_repr()
if r != {}:
config[self.options._RESOURCE_NAME] = r
return config |
<SYSTEM_TASK:>
Run linters.
<END_TASK>
<USER_TASK:>
Description:
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
""" |
session.install('flake8', *LOCAL_DEPS)
session.install('-e', '.')
session.run(
'flake8', os.path.join('google', 'cloud', 'bigquery_storage_v1beta1'))
session.run('flake8', 'tests') |
<SYSTEM_TASK:>
Convert a string representation of a binary operator to an enum.
<END_TASK>
<USER_TASK:>
Description:
def _enum_from_op_string(op_string):
"""Convert a string representation of a binary operator to an enum.
These enums come from the protobuf message definition
``StructuredQuery.FieldFilter.Operator``.
Args:
op_string (str): A comparison operation in the form of a string.
Acceptable values are ``<``, ``<=``, ``==``, ``>=``
and ``>``.
Returns:
int: The enum corresponding to ``op_string``.
Raises:
ValueError: If ``op_string`` is not a valid operator.
""" |
try:
return _COMPARISON_OPERATORS[op_string]
except KeyError:
choices = ", ".join(sorted(_COMPARISON_OPERATORS.keys()))
msg = _BAD_OP_STRING.format(op_string, choices)
raise ValueError(msg) |
<SYSTEM_TASK:>
Convert a string representation of a direction to an enum.
<END_TASK>
<USER_TASK:>
Description:
def _enum_from_direction(direction):
"""Convert a string representation of a direction to an enum.
Args:
direction (str): A direction to order by. Must be one of
:attr:`~.firestore.Query.ASCENDING` or
:attr:`~.firestore.Query.DESCENDING`.
Returns:
int: The enum corresponding to ``direction``.
Raises:
ValueError: If ``direction`` is not a valid direction.
""" |
if isinstance(direction, int):
return direction
if direction == Query.ASCENDING:
return enums.StructuredQuery.Direction.ASCENDING
elif direction == Query.DESCENDING:
return enums.StructuredQuery.Direction.DESCENDING
else:
msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING)
raise ValueError(msg) |
<SYSTEM_TASK:>
Convert a specific protobuf filter to the generic filter type.
<END_TASK>
<USER_TASK:>
Description:
def _filter_pb(field_or_unary):
"""Convert a specific protobuf filter to the generic filter type.
Args:
field_or_unary (Union[google.cloud.proto.firestore.v1beta1.\
query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\
firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A
field or unary filter to convert to a generic filter.
Returns:
google.cloud.firestore_v1beta1.types.\
StructuredQuery.Filter: A "generic" filter.
Raises:
ValueError: If ``field_or_unary`` is not a field or unary filter.
""" |
if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter):
return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary)
elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter):
return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary)
else:
raise ValueError("Unexpected filter type", type(field_or_unary), field_or_unary) |
<SYSTEM_TASK:>
Convert a cursor pair to a protobuf.
<END_TASK>
<USER_TASK:>
Description:
def _cursor_pb(cursor_pair):
"""Convert a cursor pair to a protobuf.
If ``cursor_pair`` is :data:`None`, just returns :data:`None`.
Args:
cursor_pair (Optional[Tuple[list, bool]]): Two-tuple of
* a list of field values.
* a ``before`` flag
Returns:
Optional[google.cloud.firestore_v1beta1.types.Cursor]: A
protobuf cursor corresponding to the values.
""" |
if cursor_pair is not None:
data, before = cursor_pair
value_pbs = [_helpers.encode_value(value) for value in data]
return query_pb2.Cursor(values=value_pbs, before=before) |
<SYSTEM_TASK:>
Parse a query response protobuf to a document snapshot.
<END_TASK>
<USER_TASK:>
Description:
def _query_response_to_snapshot(response_pb, collection, expected_prefix):
"""Parse a query response protobuf to a document snapshot.
Args:
response_pb (google.cloud.proto.firestore.v1beta1.\
firestore_pb2.RunQueryResponse): A
collection (~.firestore_v1beta1.collection.CollectionReference): A
reference to the collection that initiated the query.
expected_prefix (str): The expected prefix for fully-qualified
document names returned in the query results. This can be computed
directly from ``collection`` via :meth:`_parent_info`.
Returns:
Optional[~.firestore.document.DocumentSnapshot]: A
snapshot of the data returned in the query. If ``response_pb.document``
is not set, the snapshot will be :data:`None`.
""" |
if not response_pb.HasField("document"):
return None
document_id = _helpers.get_doc_id(response_pb.document, expected_prefix)
reference = collection.document(document_id)
data = _helpers.decode_dict(response_pb.document.fields, collection._client)
snapshot = document.DocumentSnapshot(
reference,
data,
exists=True,
read_time=response_pb.read_time,
create_time=response_pb.document.create_time,
update_time=response_pb.document.update_time,
)
return snapshot |
<SYSTEM_TASK:>
Project documents matching query to a limited set of fields.
<END_TASK>
<USER_TASK:>
Description:
def select(self, field_paths):
"""Project documents matching query to a limited set of fields.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
If the current query already has a projection set (i.e. has already
called :meth:`~.firestore_v1beta1.query.Query.select`), this
will overwrite it.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query. Acts as
a copy of the current query, modified with the newly added
projection.
Raises:
ValueError: If any ``field_path`` is invalid.
""" |
field_paths = list(field_paths)
for field_path in field_paths:
field_path_module.split_field_path(field_path) # raises
new_projection = query_pb2.StructuredQuery.Projection(
fields=[
query_pb2.StructuredQuery.FieldReference(field_path=field_path)
for field_path in field_paths
]
)
return self.__class__(
self._parent,
projection=new_projection,
field_filters=self._field_filters,
orders=self._orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
) |
<SYSTEM_TASK:>
Filter the query on a field.
<END_TASK>
<USER_TASK:>
Description:
def where(self, field_path, op_string, value):
"""Filter the query on a field.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
Returns a new :class:`~.firestore_v1beta1.query.Query` that
filters on a specific field path, according to an operation (e.g.
``==`` or "equals") and a particular value to be paired with that
operation.
Args:
field_path (str): A field path (``.``-delimited list of
field names) for the field to filter on.
op_string (str): A comparison operation in the form of a string.
Acceptable values are ``<``, ``<=``, ``==``, ``>=``
and ``>``.
value (Any): The value to compare the field against in the filter.
If ``value`` is :data:`None` or a NaN, then ``==`` is the only
allowed operation.
Returns:
~.firestore_v1beta1.query.Query: A filtered query. Acts as a
copy of the current query, modified with the newly added filter.
Raises:
ValueError: If ``field_path`` is invalid.
ValueError: If ``value`` is a NaN or :data:`None` and
``op_string`` is not ``==``.
""" |
field_path_module.split_field_path(field_path) # raises
if value is None:
if op_string != _EQ_OP:
raise ValueError(_BAD_OP_NAN_NULL)
filter_pb = query_pb2.StructuredQuery.UnaryFilter(
field=query_pb2.StructuredQuery.FieldReference(field_path=field_path),
op=enums.StructuredQuery.UnaryFilter.Operator.IS_NULL,
)
elif _isnan(value):
if op_string != _EQ_OP:
raise ValueError(_BAD_OP_NAN_NULL)
filter_pb = query_pb2.StructuredQuery.UnaryFilter(
field=query_pb2.StructuredQuery.FieldReference(field_path=field_path),
op=enums.StructuredQuery.UnaryFilter.Operator.IS_NAN,
)
elif isinstance(value, (transforms.Sentinel, transforms._ValueList)):
raise ValueError(_INVALID_WHERE_TRANSFORM)
else:
filter_pb = query_pb2.StructuredQuery.FieldFilter(
field=query_pb2.StructuredQuery.FieldReference(field_path=field_path),
op=_enum_from_op_string(op_string),
value=_helpers.encode_value(value),
)
new_filters = self._field_filters + (filter_pb,)
return self.__class__(
self._parent,
projection=self._projection,
field_filters=new_filters,
orders=self._orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
) |
<SYSTEM_TASK:>
Modify the query to add an order clause on a specific field.
<END_TASK>
<USER_TASK:>
Description:
def order_by(self, field_path, direction=ASCENDING):
"""Modify the query to add an order clause on a specific field.
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
Successive :meth:`~.firestore_v1beta1.query.Query.order_by` calls
will further refine the ordering of results returned by the query
(i.e. the new "order by" fields will be added to existing ones).
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
direction (Optional[str]): The direction to order by. Must be one
of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to
:attr:`ASCENDING`.
Returns:
~.firestore_v1beta1.query.Query: An ordered query. Acts as a
copy of the current query, modified with the newly added
"order by" constraint.
Raises:
ValueError: If ``field_path`` is invalid.
ValueError: If ``direction`` is not one of :attr:`ASCENDING` or
:attr:`DESCENDING`.
""" |
field_path_module.split_field_path(field_path) # raises
order_pb = self._make_order(field_path, direction)
new_orders = self._orders + (order_pb,)
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=new_orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
) |
<SYSTEM_TASK:>
Limit a query to return a fixed number of results.
<END_TASK>
<USER_TASK:>
Description:
def limit(self, count):
"""Limit a query to return a fixed number of results.
If the current query already has a limit set, this will overwrite it.
Args:
count (int): Maximum number of documents to return that match
the query.
Returns:
~.firestore_v1beta1.query.Query: A limited query. Acts as a
copy of the current query, modified with the newly added
"limit" filter.
""" |
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=self._orders,
limit=count,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
) |
<SYSTEM_TASK:>
Skip to an offset in a query.
<END_TASK>
<USER_TASK:>
Description:
def offset(self, num_to_skip):
"""Skip to an offset in a query.
If the current query already has specified an offset, this will
overwrite it.
Args:
num_to_skip (int): The number of results to skip at the beginning
of query results. (Must be non-negative.)
Returns:
~.firestore_v1beta1.query.Query: An offset query. Acts as a
copy of the current query, modified with the newly added
"offset" field.
""" |
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=self._orders,
limit=self._limit,
offset=num_to_skip,
start_at=self._start_at,
end_at=self._end_at,
) |
<SYSTEM_TASK:>
Set values to be used for a ``start_at`` or ``end_at`` cursor.
<END_TASK>
<USER_TASK:>
Description:
def _cursor_helper(self, document_fields, before, start):
"""Set values to be used for a ``start_at`` or ``end_at`` cursor.
The values will later be used in a query protobuf.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
before (bool): Flag indicating if the document in
``document_fields`` should (:data:`False`) or
shouldn't (:data:`True`) be included in the result set.
start (Optional[bool]): determines if the cursor is a ``start_at``
cursor (:data:`True`) or an ``end_at`` cursor (:data:`False`).
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start at" cursor.
""" |
if isinstance(document_fields, tuple):
document_fields = list(document_fields)
elif isinstance(document_fields, document.DocumentSnapshot):
if document_fields.reference._path[:-1] != self._parent._path:
raise ValueError(
"Cannot use snapshot from another collection as a cursor."
)
else:
# NOTE: We copy so that the caller can't modify after calling.
document_fields = copy.deepcopy(document_fields)
cursor_pair = document_fields, before
query_kwargs = {
"projection": self._projection,
"field_filters": self._field_filters,
"orders": self._orders,
"limit": self._limit,
"offset": self._offset,
}
if start:
query_kwargs["start_at"] = cursor_pair
query_kwargs["end_at"] = self._end_at
else:
query_kwargs["start_at"] = self._start_at
query_kwargs["end_at"] = cursor_pair
return self.__class__(self._parent, **query_kwargs) |
<SYSTEM_TASK:>
Start query results at a particular document value.
<END_TASK>
<USER_TASK:>
Description:
def start_at(self, document_fields):
"""Start query results at a particular document value.
The result set will **include** the document specified by
``document_fields``.
If the current query already has specified a start cursor -- either
via this method or
:meth:`~.firestore_v1beta1.query.Query.start_after` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start at" cursor.
""" |
return self._cursor_helper(document_fields, before=True, start=True) |
<SYSTEM_TASK:>
Start query results after a particular document value.
<END_TASK>
<USER_TASK:>
Description:
def start_after(self, document_fields):
"""Start query results after a particular document value.
The result set will **exclude** the document specified by
``document_fields``.
If the current query already has specified a start cursor -- either
via this method or
:meth:`~.firestore_v1beta1.query.Query.start_at` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start after" cursor.
""" |
return self._cursor_helper(document_fields, before=False, start=True) |
<SYSTEM_TASK:>
End query results before a particular document value.
<END_TASK>
<USER_TASK:>
Description:
def end_before(self, document_fields):
"""End query results before a particular document value.
The result set will **exclude** the document specified by
``document_fields``.
If the current query already has specified an end cursor -- either
via this method or
:meth:`~.firestore_v1beta1.query.Query.end_at` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"end before" cursor.
""" |
return self._cursor_helper(document_fields, before=True, start=False) |
<SYSTEM_TASK:>
End query results at a particular document value.
<END_TASK>
<USER_TASK:>
Description:
def end_at(self, document_fields):
"""End query results at a particular document value.
The result set will **include** the document specified by
``document_fields``.
If the current query already has specified an end cursor -- either
via this method or
:meth:`~.firestore_v1beta1.query.Query.end_before` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"end at" cursor.
""" |
return self._cursor_helper(document_fields, before=False, start=False) |
<SYSTEM_TASK:>
Convert all the filters into a single generic Filter protobuf.
<END_TASK>
<USER_TASK:>
Description:
def _filters_pb(self):
"""Convert all the filters into a single generic Filter protobuf.
This may be a lone field filter or unary filter, may be a composite
filter or may be :data:`None`.
Returns:
google.cloud.firestore_v1beta1.types.\
StructuredQuery.Filter: A "generic" filter representing the
current query's filters.
""" |
num_filters = len(self._field_filters)
if num_filters == 0:
return None
elif num_filters == 1:
return _filter_pb(self._field_filters[0])
else:
composite_filter = query_pb2.StructuredQuery.CompositeFilter(
op=enums.StructuredQuery.CompositeFilter.Operator.AND,
filters=[_filter_pb(filter_) for filter_ in self._field_filters],
)
return query_pb2.StructuredQuery.Filter(composite_filter=composite_filter) |
<SYSTEM_TASK:>
Convert the current query into the equivalent protobuf.
<END_TASK>
<USER_TASK:>
Description:
def _to_protobuf(self):
"""Convert the current query into the equivalent protobuf.
Returns:
google.cloud.firestore_v1beta1.types.StructuredQuery: The
query protobuf.
""" |
projection = self._normalize_projection(self._projection)
orders = self._normalize_orders()
start_at = self._normalize_cursor(self._start_at, orders)
end_at = self._normalize_cursor(self._end_at, orders)
query_kwargs = {
"select": projection,
"from": [
query_pb2.StructuredQuery.CollectionSelector(
collection_id=self._parent.id
)
],
"where": self._filters_pb(),
"order_by": orders,
"start_at": _cursor_pb(start_at),
"end_at": _cursor_pb(end_at),
}
if self._offset is not None:
query_kwargs["offset"] = self._offset
if self._limit is not None:
query_kwargs["limit"] = wrappers_pb2.Int32Value(value=self._limit)
return query_pb2.StructuredQuery(**query_kwargs) |
<SYSTEM_TASK:>
Read the documents in the collection that match this query.
<END_TASK>
<USER_TASK:>
Description:
def stream(self, transaction=None):
"""Read the documents in the collection that match this query.
This sends a ``RunQuery`` RPC and then returns an iterator which
consumes each document returned in the stream of ``RunQueryResponse``
messages.
.. note::
The underlying stream of responses will time out after
the ``max_rpc_timeout_millis`` value set in the GAPIC
client configuration for the ``RunQuery`` API. Snapshots
not consumed from the iterator before that point will be lost.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that this query will
run in.
Yields:
~.firestore_v1beta1.document.DocumentSnapshot: The next
document that fulfills the query.
""" |
parent_path, expected_prefix = self._parent._parent_info()
response_iterator = self._client._firestore_api.run_query(
parent_path,
self._to_protobuf(),
transaction=_helpers.get_transaction_id(transaction),
metadata=self._client._rpc_metadata,
)
for response in response_iterator:
snapshot = _query_response_to_snapshot(
response, self._parent, expected_prefix
)
if snapshot is not None:
yield snapshot |
<SYSTEM_TASK:>
Monitor the documents in this collection that match this query.
<END_TASK>
<USER_TASK:>
Description:
def on_snapshot(self, callback):
"""Monitor the documents in this collection that match this query.
This starts a watch on this query using a background thread. The
provided callback is run on the snapshot of the documents.
Args:
callback(~.firestore.query.QuerySnapshot): a callback to run when
a change occurs.
Example:
from google.cloud import firestore_v1beta1
db = firestore_v1beta1.Client()
query_ref = db.collection(u'users').where("user", "==", u'Ada')
def on_snapshot(docs, changes, read_time):
for doc in docs:
print(u'{} => {}'.format(doc.id, doc.to_dict()))
# Watch this query
query_watch = query_ref.on_snapshot(on_snapshot)
# Terminate this watch
query_watch.unsubscribe()
""" |
return Watch.for_query(
self, callback, document.DocumentSnapshot, document.DocumentReference
) |
<SYSTEM_TASK:>
Construct the API resource representation of this access entry
<END_TASK>
<USER_TASK:>
Description:
def to_api_repr(self):
"""Construct the API resource representation of this access entry
Returns:
Dict[str, object]: Access entry represented as an API resource
""" |
resource = {self.entity_type: self.entity_id}
if self.role is not None:
resource["role"] = self.role
return resource |
<SYSTEM_TASK:>
Construct a dataset reference from dataset ID string.
<END_TASK>
<USER_TASK:>
Description:
def from_string(cls, dataset_id, default_project=None):
"""Construct a dataset reference from dataset ID string.
Args:
dataset_id (str):
A dataset ID in standard SQL format. If ``default_project``
is not specified, this must included both the project ID and
the dataset ID, separated by ``.``.
default_project (str):
Optional. The project ID to use when ``dataset_id`` does not
include a project ID.
Returns:
DatasetReference:
Dataset reference parsed from ``dataset_id``.
Examples:
>>> DatasetReference.from_string('my-project-id.some_dataset')
DatasetReference('my-project-id', 'some_dataset')
Raises:
ValueError:
If ``dataset_id`` is not a fully-qualified dataset ID in
standard SQL format.
""" |
output_dataset_id = dataset_id
output_project_id = default_project
parts = dataset_id.split(".")
if len(parts) == 1 and not default_project:
raise ValueError(
"When default_project is not set, dataset_id must be a "
"fully-qualified dataset ID in standard SQL format. "
'e.g. "project.dataset_id", got {}'.format(dataset_id)
)
elif len(parts) == 2:
output_project_id, output_dataset_id = parts
elif len(parts) > 2:
raise ValueError(
"Too many parts in dataset_id. Expected a fully-qualified "
"dataset ID in standard SQL format. e.g. "
'"project.dataset_id", got {}'.format(dataset_id)
)
return cls(output_project_id, output_dataset_id) |
<SYSTEM_TASK:>
Convert a JSON blob to the native object.
<END_TASK>
<USER_TASK:>
Description:
def _item_to_blob(iterator, item):
"""Convert a JSON blob to the native object.
.. note::
This assumes that the ``bucket`` attribute has been
added to the iterator after being created.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a blob.
:rtype: :class:`.Blob`
:returns: The next blob in the page.
""" |
name = item.get("name")
blob = Blob(name, bucket=iterator.bucket)
blob._set_properties(item)
return blob |
<SYSTEM_TASK:>
Set the properties for the current object.
<END_TASK>
<USER_TASK:>
Description:
def _set_properties(self, value):
"""Set the properties for the current object.
:type value: dict or :class:`google.cloud.storage.batch._FutureDict`
:param value: The properties to be set.
""" |
self._label_removals.clear()
return super(Bucket, self)._set_properties(value) |
<SYSTEM_TASK:>
Factory constructor for blob object.
<END_TASK>
<USER_TASK:>
Description:
def blob(
self,
blob_name,
chunk_size=None,
encryption_key=None,
kms_key_name=None,
generation=None,
):
"""Factory constructor for blob object.
.. note::
This will not make an HTTP request; it simply instantiates
a blob object owned by this bucket.
:type blob_name: str
:param blob_name: The name of the blob to be instantiated.
:type chunk_size: int
:param chunk_size: The size of a chunk of data whenever iterating
(in bytes). This must be a multiple of 256 KB per
the API specification.
:type encryption_key: bytes
:param encryption_key:
Optional 32 byte encryption key for customer-supplied encryption.
:type kms_key_name: str
:param kms_key_name:
Optional resource name of KMS key used to encrypt blob's content.
:type generation: long
:param generation: Optional. If present, selects a specific revision of
this object.
:rtype: :class:`google.cloud.storage.blob.Blob`
:returns: The blob object created.
""" |
return Blob(
name=blob_name,
bucket=self,
chunk_size=chunk_size,
encryption_key=encryption_key,
kms_key_name=kms_key_name,
generation=generation,
) |
<SYSTEM_TASK:>
Creates current bucket.
<END_TASK>
<USER_TASK:>
Description:
def create(self, client=None, project=None, location=None):
"""Creates current bucket.
If the bucket already exists, will raise
:class:`google.cloud.exceptions.Conflict`.
This implements "storage.buckets.insert".
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:type project: str
:param project: Optional. The project under which the bucket is to
be created. If not passed, uses the project set on
the client.
:raises ValueError: if :attr:`user_project` is set.
:raises ValueError: if ``project`` is None and client's
:attr:`project` is also None.
:type location: str
:param location: Optional. The location of the bucket. If not passed,
the default location, US, will be used. See
https://cloud.google.com/storage/docs/bucket-locations
""" |
if self.user_project is not None:
raise ValueError("Cannot create bucket with 'user_project' set.")
client = self._require_client(client)
if project is None:
project = client.project
if project is None:
raise ValueError("Client project not set: pass an explicit project.")
query_params = {"project": project}
properties = {key: self._properties[key] for key in self._changes}
properties["name"] = self.name
if location is not None:
properties["location"] = location
api_response = client._connection.api_request(
method="POST",
path="/b",
query_params=query_params,
data=properties,
_target_object=self,
)
self._set_properties(api_response) |
<SYSTEM_TASK:>
Get a blob object by name.
<END_TASK>
<USER_TASK:>
Description:
def get_blob(
self, blob_name, client=None, encryption_key=None, generation=None, **kwargs
):
"""Get a blob object by name.
This will return None if the blob doesn't exist:
.. literalinclude:: snippets.py
:start-after: [START get_blob]
:end-before: [END get_blob]
If :attr:`user_project` is set, bills the API request to that project.
:type blob_name: str
:param blob_name: The name of the blob to retrieve.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:type encryption_key: bytes
:param encryption_key:
Optional 32 byte encryption key for customer-supplied encryption.
See
https://cloud.google.com/storage/docs/encryption#customer-supplied.
:type generation: long
:param generation: Optional. If present, selects a specific revision of
this object.
:type kwargs: dict
:param kwargs: Keyword arguments to pass to the
:class:`~google.cloud.storage.blob.Blob` constructor.
:rtype: :class:`google.cloud.storage.blob.Blob` or None
:returns: The blob object if it exists, otherwise None.
""" |
blob = Blob(
bucket=self,
name=blob_name,
encryption_key=encryption_key,
generation=generation,
**kwargs
)
try:
# NOTE: This will not fail immediately in a batch. However, when
# Batch.finish() is called, the resulting `NotFound` will be
# raised.
blob.reload(client=client)
except NotFound:
return None
else:
return blob |
<SYSTEM_TASK:>
Return an iterator used to find blobs in the bucket.
<END_TASK>
<USER_TASK:>
Description:
def list_blobs(
self,
max_results=None,
page_token=None,
prefix=None,
delimiter=None,
versions=None,
projection="noAcl",
fields=None,
client=None,
):
"""Return an iterator used to find blobs in the bucket.
If :attr:`user_project` is set, bills the API request to that project.
:type max_results: int
:param max_results:
(Optional) The maximum number of blobs in each page of results
from this request. Non-positive values are ignored. Defaults to
a sensible value set by the API.
:type page_token: str
:param page_token:
(Optional) If present, return the next batch of blobs, using the
value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing the
token.
:type prefix: str
:param prefix: (Optional) prefix used to filter blobs.
:type delimiter: str
:param delimiter: (Optional) Delimiter, used with ``prefix`` to
emulate hierarchy.
:type versions: bool
:param versions: (Optional) Whether object versions should be returned
as separate blobs.
:type projection: str
:param projection: (Optional) If used, must be 'full' or 'noAcl'.
Defaults to ``'noAcl'``. Specifies the set of
properties to return.
:type fields: str
:param fields: (Optional) Selector specifying which fields to include
in a partial response. Must be a list of fields. For
example to get a partial response with just the next
page token and the language of each blob returned:
``'items/contentLanguage,nextPageToken'``.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of all :class:`~google.cloud.storage.blob.Blob`
in this bucket matching the arguments.
""" |
extra_params = {"projection": projection}
if prefix is not None:
extra_params["prefix"] = prefix
if delimiter is not None:
extra_params["delimiter"] = delimiter
if versions is not None:
extra_params["versions"] = versions
if fields is not None:
extra_params["fields"] = fields
if self.user_project is not None:
extra_params["userProject"] = self.user_project
client = self._require_client(client)
path = self.path + "/o"
iterator = page_iterator.HTTPIterator(
client=client,
api_request=client._connection.api_request,
path=path,
item_to_value=_item_to_blob,
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
page_start=_blobs_page_start,
)
iterator.bucket = self
iterator.prefixes = set()
return iterator |
<SYSTEM_TASK:>
Delete this bucket.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, force=False, client=None):
"""Delete this bucket.
The bucket **must** be empty in order to submit a delete request. If
``force=True`` is passed, this will first attempt to delete all the
objects / blobs in the bucket (i.e. try to empty the bucket).
If the bucket doesn't exist, this will raise
:class:`google.cloud.exceptions.NotFound`. If the bucket is not empty
(and ``force=False``), will raise
:class:`google.cloud.exceptions.Conflict`.
If ``force=True`` and the bucket contains more than 256 objects / blobs
this will cowardly refuse to delete the objects (or the bucket). This
is to prevent accidental bucket deletion and to prevent extremely long
runtime of this method.
If :attr:`user_project` is set, bills the API request to that project.
:type force: bool
:param force: If True, empties the bucket's objects then deletes it.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`ValueError` if ``force`` is ``True`` and the bucket
contains more than 256 objects / blobs.
""" |
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
if force:
blobs = list(
self.list_blobs(
max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, client=client
)
)
if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
message = (
"Refusing to delete bucket with more than "
"%d objects. If you actually want to delete "
"this bucket, please delete the objects "
"yourself before calling Bucket.delete()."
) % (self._MAX_OBJECTS_FOR_ITERATION,)
raise ValueError(message)
# Ignore 404 errors on delete.
self.delete_blobs(blobs, on_error=lambda blob: None, client=client)
# We intentionally pass `_target_object=None` since a DELETE
# request has no response value (whether in a standard request or
# in a batch request).
client._connection.api_request(
method="DELETE",
path=self.path,
query_params=query_params,
_target_object=None,
) |
<SYSTEM_TASK:>
Deletes a blob from the current bucket.
<END_TASK>
<USER_TASK:>
Description:
def delete_blob(self, blob_name, client=None, generation=None):
"""Deletes a blob from the current bucket.
If the blob isn't found (backend 404), raises a
:class:`google.cloud.exceptions.NotFound`.
For example:
.. literalinclude:: snippets.py
:start-after: [START delete_blob]
:end-before: [END delete_blob]
If :attr:`user_project` is set, bills the API request to that project.
:type blob_name: str
:param blob_name: A blob name to delete.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:type generation: long
:param generation: Optional. If present, permanently deletes a specific
revision of this object.
:raises: :class:`google.cloud.exceptions.NotFound` (to suppress
the exception, call ``delete_blobs``, passing a no-op
``on_error`` callback, e.g.:
.. literalinclude:: snippets.py
:start-after: [START delete_blobs]
:end-before: [END delete_blobs]
""" |
client = self._require_client(client)
blob = Blob(blob_name, bucket=self, generation=generation)
# We intentionally pass `_target_object=None` since a DELETE
# request has no response value (whether in a standard request or
# in a batch request).
client._connection.api_request(
method="DELETE",
path=blob.path,
query_params=blob._query_params,
_target_object=None,
) |
<SYSTEM_TASK:>
Deletes a list of blobs from the current bucket.
<END_TASK>
<USER_TASK:>
Description:
def delete_blobs(self, blobs, on_error=None, client=None):
"""Deletes a list of blobs from the current bucket.
Uses :meth:`delete_blob` to delete each individual blob.
If :attr:`user_project` is set, bills the API request to that project.
:type blobs: list
:param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
blob names to delete.
:type on_error: callable
:param on_error: (Optional) Takes single argument: ``blob``. Called
called once for each blob raising
:class:`~google.cloud.exceptions.NotFound`;
otherwise, the exception is propagated.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`~google.cloud.exceptions.NotFound` (if
`on_error` is not passed).
""" |
for blob in blobs:
try:
blob_name = blob
if not isinstance(blob_name, six.string_types):
blob_name = blob.name
self.delete_blob(blob_name, client=client)
except NotFound:
if on_error is not None:
on_error(blob)
else:
raise |
<SYSTEM_TASK:>
Copy the given blob to the given bucket, optionally with a new name.
<END_TASK>
<USER_TASK:>
Description:
def copy_blob(
self,
blob,
destination_bucket,
new_name=None,
client=None,
preserve_acl=True,
source_generation=None,
):
"""Copy the given blob to the given bucket, optionally with a new name.
If :attr:`user_project` is set, bills the API request to that project.
:type blob: :class:`google.cloud.storage.blob.Blob`
:param blob: The blob to be copied.
:type destination_bucket: :class:`google.cloud.storage.bucket.Bucket`
:param destination_bucket: The bucket into which the blob should be
copied.
:type new_name: str
:param new_name: (optional) the new name for the copied file.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:type preserve_acl: bool
:param preserve_acl: Optional. Copies ACL from old blob to new blob.
Default: True.
:type source_generation: long
:param source_generation: Optional. The generation of the blob to be
copied.
:rtype: :class:`google.cloud.storage.blob.Blob`
:returns: The new Blob.
""" |
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
if source_generation is not None:
query_params["sourceGeneration"] = source_generation
if new_name is None:
new_name = blob.name
new_blob = Blob(bucket=destination_bucket, name=new_name)
api_path = blob.path + "/copyTo" + new_blob.path
copy_result = client._connection.api_request(
method="POST",
path=api_path,
query_params=query_params,
_target_object=new_blob,
)
if not preserve_acl:
new_blob.acl.save(acl={}, client=client)
new_blob._set_properties(copy_result)
return new_blob |
<SYSTEM_TASK:>
Rename the given blob using copy and delete operations.
<END_TASK>
<USER_TASK:>
Description:
def rename_blob(self, blob, new_name, client=None):
"""Rename the given blob using copy and delete operations.
If :attr:`user_project` is set, bills the API request to that project.
Effectively, copies blob to the same bucket with a new name, then
deletes the blob.
.. warning::
This method will first duplicate the data and then delete the
old blob. This means that with very large objects renaming
could be a very (temporarily) costly or a very slow operation.
:type blob: :class:`google.cloud.storage.blob.Blob`
:param blob: The blob to be renamed.
:type new_name: str
:param new_name: The new name for this blob.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`Blob`
:returns: The newly-renamed blob.
""" |
same_name = blob.name == new_name
new_blob = self.copy_blob(blob, self, new_name, client=client)
if not same_name:
blob.delete(client=client)
return new_blob |
<SYSTEM_TASK:>
Set default KMS encryption key for objects in the bucket.
<END_TASK>
<USER_TASK:>
Description:
def default_kms_key_name(self, value):
"""Set default KMS encryption key for objects in the bucket.
:type value: str or None
:param value: new KMS key name (None to clear any existing key).
""" |
encryption_config = self._properties.get("encryption", {})
encryption_config["defaultKmsKeyName"] = value
self._patch_property("encryption", encryption_config) |
<SYSTEM_TASK:>
Retrieve or set labels assigned to this bucket.
<END_TASK>
<USER_TASK:>
Description:
def labels(self):
"""Retrieve or set labels assigned to this bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
.. note::
The getter for this property returns a dict which is a *copy*
of the bucket's labels. Mutating that dict has no effect unless
you then re-assign the dict via the setter. E.g.:
>>> labels = bucket.labels
>>> labels['new_key'] = 'some-label'
>>> del labels['old_key']
>>> bucket.labels = labels
>>> bucket.update()
:setter: Set labels for this bucket.
:getter: Gets the labels for this bucket.
:rtype: :class:`dict`
:returns: Name-value pairs (string->string) labelling the bucket.
""" |
labels = self._properties.get("labels")
if labels is None:
return {}
return copy.deepcopy(labels) |
<SYSTEM_TASK:>
Set labels assigned to this bucket.
<END_TASK>
<USER_TASK:>
Description:
def labels(self, mapping):
"""Set labels assigned to this bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
:type mapping: :class:`dict`
:param mapping: Name-value pairs (string->string) labelling the bucket.
""" |
# If any labels have been expressly removed, we need to track this
# so that a future .patch() call can do the correct thing.
existing = set([k for k in self.labels.keys()])
incoming = set([k for k in mapping.keys()])
self._label_removals = self._label_removals.union(existing.difference(incoming))
# Actually update the labels on the object.
self._patch_property("labels", copy.deepcopy(mapping)) |
<SYSTEM_TASK:>
Retrieve or set lifecycle rules configured for this bucket.
<END_TASK>
<USER_TASK:>
Description:
def lifecycle_rules(self):
"""Retrieve or set lifecycle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. note::
The getter for this property returns a list which contains
*copies* of the bucket's lifecycle rules mappings. Mutating the
list or one of its dicts has no effect unless you then re-assign
the dict via the setter. E.g.:
>>> rules = bucket.lifecycle_rules
>>> rules.append({'origin': '/foo', ...})
>>> rules[1]['rule']['action']['type'] = 'Delete'
>>> del rules[0]
>>> bucket.lifecycle_rules = rules
>>> bucket.update()
:setter: Set lifestyle rules for this bucket.
:getter: Gets the lifestyle rules for this bucket.
:rtype: generator(dict)
:returns: A sequence of mappings describing each lifecycle rule.
""" |
info = self._properties.get("lifecycle", {})
for rule in info.get("rule", ()):
action_type = rule["action"]["type"]
if action_type == "Delete":
yield LifecycleRuleDelete.from_api_repr(rule)
elif action_type == "SetStorageClass":
yield LifecycleRuleSetStorageClass.from_api_repr(rule)
else:
raise ValueError("Unknown lifecycle rule: {}".format(rule)) |
<SYSTEM_TASK:>
Set lifestyle rules configured for this bucket.
<END_TASK>
<USER_TASK:>
Description:
def lifecycle_rules(self, rules):
"""Set lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
:type entries: list of dictionaries
:param entries: A sequence of mappings describing each lifecycle rule.
""" |
rules = [dict(rule) for rule in rules] # Convert helpers if needed
self._patch_property("lifecycle", {"rule": rules}) |
<SYSTEM_TASK:>
Enable access logging for this bucket.
<END_TASK>
<USER_TASK:>
Description:
def enable_logging(self, bucket_name, object_prefix=""):
"""Enable access logging for this bucket.
See https://cloud.google.com/storage/docs/access-logs
:type bucket_name: str
:param bucket_name: name of bucket in which to store access logs
:type object_prefix: str
:param object_prefix: prefix for access log filenames
""" |
info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix}
self._patch_property("logging", info) |
<SYSTEM_TASK:>
Retrieve the effective time of the bucket's retention policy.
<END_TASK>
<USER_TASK:>
Description:
def retention_policy_effective_time(self):
"""Retrieve the effective time of the bucket's retention policy.
:rtype: datetime.datetime or ``NoneType``
:returns: point-in time at which the bucket's retention policy is
effective, or ``None`` if the property is not
set locally.
""" |
policy = self._properties.get("retentionPolicy")
if policy is not None:
timestamp = policy.get("effectiveTime")
if timestamp is not None:
return _rfc3339_to_datetime(timestamp) |
<SYSTEM_TASK:>
Retrieve or set the retention period for items in the bucket.
<END_TASK>
<USER_TASK:>
Description:
def retention_period(self):
"""Retrieve or set the retention period for items in the bucket.
:rtype: int or ``NoneType``
:returns: number of seconds to retain items after upload or release
from event-based lock, or ``None`` if the property is not
set locally.
""" |
policy = self._properties.get("retentionPolicy")
if policy is not None:
period = policy.get("retentionPeriod")
if period is not None:
return int(period) |
<SYSTEM_TASK:>
Set the retention period for items in the bucket.
<END_TASK>
<USER_TASK:>
Description:
def retention_period(self, value):
"""Set the retention period for items in the bucket.
:type value: int
:param value:
number of seconds to retain items after upload or release from
event-based lock.
:raises ValueError: if the bucket's retention policy is locked.
""" |
policy = self._properties.setdefault("retentionPolicy", {})
if value is not None:
policy["retentionPeriod"] = str(value)
else:
policy = None
self._patch_property("retentionPolicy", policy) |
<SYSTEM_TASK:>
Configure website-related properties.
<END_TASK>
<USER_TASK:>
Description:
def configure_website(self, main_page_suffix=None, not_found_page=None):
"""Configure website-related properties.
See https://cloud.google.com/storage/docs/hosting-static-website
.. note::
This (apparently) only works
if your bucket name is a domain name
(and to do that, you need to get approved somehow...).
If you want this bucket to host a website, just provide the name
of an index page and a page to use when a blob isn't found:
.. literalinclude:: snippets.py
:start-after: [START configure_website]
:end-before: [END configure_website]
You probably should also make the whole bucket public:
.. literalinclude:: snippets.py
:start-after: [START make_public]
:end-before: [END make_public]
This says: "Make the bucket public, and all the stuff already in
the bucket, and anything else I add to the bucket. Just make it
all public."
:type main_page_suffix: str
:param main_page_suffix: The page to use as the main page
of a directory.
Typically something like index.html.
:type not_found_page: str
:param not_found_page: The file to use when a page isn't found.
""" |
data = {"mainPageSuffix": main_page_suffix, "notFoundPage": not_found_page}
self._patch_property("website", data) |
<SYSTEM_TASK:>
Retrieve the IAM policy for the bucket.
<END_TASK>
<USER_TASK:>
Description:
def get_iam_policy(self, client=None):
"""Retrieve the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.api_core.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``getIamPolicy`` API request.
""" |
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
info = client._connection.api_request(
method="GET",
path="%s/iam" % (self.path,),
query_params=query_params,
_target_object=None,
)
return Policy.from_api_repr(info) |
<SYSTEM_TASK:>
Update the IAM policy for the bucket.
<END_TASK>
<USER_TASK:>
Description:
def set_iam_policy(self, policy, client=None):
"""Update the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type policy: :class:`google.api_core.iam.Policy`
:param policy: policy instance used to update bucket's IAM policy.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.api_core.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``setIamPolicy`` API request.
""" |
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
resource = policy.to_api_repr()
resource["resourceId"] = self.path
info = client._connection.api_request(
method="PUT",
path="%s/iam" % (self.path,),
query_params=query_params,
data=resource,
_target_object=None,
)
return Policy.from_api_repr(info) |
<SYSTEM_TASK:>
Update bucket's ACL, revoking read access for anonymous users.
<END_TASK>
<USER_TASK:>
Description:
def make_private(self, recursive=False, future=False, client=None):
"""Update bucket's ACL, revoking read access for anonymous users.
:type recursive: bool
:param recursive: If True, this will make all blobs inside the bucket
private as well.
:type future: bool
:param future: If True, this will make all objects created in the
future private as well.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises ValueError:
If ``recursive`` is True, and the bucket contains more than 256
blobs. This is to prevent extremely long runtime of this
method. For such buckets, iterate over the blobs returned by
:meth:`list_blobs` and call
:meth:`~google.cloud.storage.blob.Blob.make_private`
for each blob.
""" |
self.acl.all().revoke_read()
self.acl.save(client=client)
if future:
doa = self.default_object_acl
if not doa.loaded:
doa.reload(client=client)
doa.all().revoke_read()
doa.save(client=client)
if recursive:
blobs = list(
self.list_blobs(
projection="full",
max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
client=client,
)
)
if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
message = (
"Refusing to make private recursively with more than "
"%d objects. If you actually want to make every object "
"in this bucket private, iterate through the blobs "
"returned by 'Bucket.list_blobs()' and call "
"'make_private' on each one."
) % (self._MAX_OBJECTS_FOR_ITERATION,)
raise ValueError(message)
for blob in blobs:
blob.acl.all().revoke_read()
blob.acl.save(client=client) |
<SYSTEM_TASK:>
Create a signed upload policy for uploading objects.
<END_TASK>
<USER_TASK:>
Description:
def generate_upload_policy(self, conditions, expiration=None, client=None):
"""Create a signed upload policy for uploading objects.
This method generates and signs a policy document. You can use
`policy documents`_ to allow visitors to a website to upload files to
Google Cloud Storage without giving them direct write access.
For example:
.. literalinclude:: snippets.py
:start-after: [START policy_document]
:end-before: [END policy_document]
.. _policy documents:
https://cloud.google.com/storage/docs/xml-api\
/post-object#policydocument
:type expiration: datetime
:param expiration: Optional expiration in UTC. If not specified, the
policy will expire in 1 hour.
:type conditions: list
:param conditions: A list of conditions as described in the
`policy documents`_ documentation.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: dict
:returns: A dictionary of (form field name, form field value) of form
fields that should be added to your HTML upload form in order
to attach the signature.
""" |
client = self._require_client(client)
credentials = client._base_connection.credentials
_signing.ensure_signed_credentials(credentials)
if expiration is None:
expiration = _NOW() + datetime.timedelta(hours=1)
conditions = conditions + [{"bucket": self.name}]
policy_document = {
"expiration": _datetime_to_rfc3339(expiration),
"conditions": conditions,
}
encoded_policy_document = base64.b64encode(
json.dumps(policy_document).encode("utf-8")
)
signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document))
fields = {
"bucket": self.name,
"GoogleAccessId": credentials.signer_email,
"policy": encoded_policy_document.decode("utf-8"),
"signature": signature.decode("utf-8"),
}
return fields |
<SYSTEM_TASK:>
Lock the bucket's retention policy.
<END_TASK>
<USER_TASK:>
Description:
def lock_retention_policy(self, client=None):
"""Lock the bucket's retention policy.
:raises ValueError:
if the bucket has no metageneration (i.e., new or never reloaded);
if the bucket has no retention policy assigned;
if the bucket's retention policy is already locked.
""" |
if "metageneration" not in self._properties:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
policy = self._properties.get("retentionPolicy")
if policy is None:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
if policy.get("isLocked"):
raise ValueError("Bucket's retention policy is already locked.")
client = self._require_client(client)
query_params = {"ifMetagenerationMatch": self.metageneration}
if self.user_project is not None:
query_params["userProject"] = self.user_project
path = "/b/{}/lockRetentionPolicy".format(self.name)
api_response = client._connection.api_request(
method="POST", path=path, query_params=query_params, _target_object=self
)
self._set_properties(api_response) |
<SYSTEM_TASK:>
Generates a signed URL for this bucket.
<END_TASK>
<USER_TASK:>
Description:
def generate_signed_url(
self,
expiration=None,
api_access_endpoint=_API_ACCESS_ENDPOINT,
method="GET",
headers=None,
query_parameters=None,
client=None,
credentials=None,
version=None,
):
"""Generates a signed URL for this bucket.
.. note::
If you are on Google Compute Engine, you can't generate a signed
URL using GCE service account. Follow `Issue 50`_ for updates on
this. If you'd like to be able to generate a signed URL from GCE,
you can use a standard service account from a JSON file rather
than a GCE service account.
.. _Issue 50: https://github.com/GoogleCloudPlatform/\
google-auth-library-python/issues/50
If you have a bucket that you want to allow access to for a set
amount of time, you can use this method to generate a URL that
is only valid within a certain time period.
This is particularly useful if you don't want publicly
accessible buckets, but don't want to require users to explicitly
log in.
:type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
:param expiration: Point in time when the signed URL should expire.
:type api_access_endpoint: str
:param api_access_endpoint: Optional URI base.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
:type headers: dict
:param headers:
(Optional) Additional HTTP headers to be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers
Requests using the signed URL *must* pass the specified header
(name and value) with each request for the URL.
:type query_parameters: dict
:param query_parameters:
(Optional) Additional query paramtersto be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers#query
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: (Optional) The OAuth2 credentials to use to sign
the URL. Defaults to the credentials stored on the
client used.
:type version: str
:param version: (Optional) The version of signed credential to create.
Must be one of 'v2' | 'v4'.
:raises: :exc:`ValueError` when version is invalid.
:raises: :exc:`TypeError` when expiration is not a valid type.
:raises: :exc:`AttributeError` if credentials is not an instance
of :class:`google.auth.credentials.Signing`.
:rtype: str
:returns: A signed URL you can use to access the resource
until expiration.
""" |
if version is None:
version = "v2"
elif version not in ("v2", "v4"):
raise ValueError("'version' must be either 'v2' or 'v4'")
resource = "/{bucket_name}".format(bucket_name=self.name)
if credentials is None:
client = self._require_client(client)
credentials = client._credentials
if version == "v2":
helper = generate_signed_url_v2
else:
helper = generate_signed_url_v4
return helper(
credentials,
resource=resource,
expiration=expiration,
api_access_endpoint=api_access_endpoint,
method=method.upper(),
headers=headers,
query_parameters=query_parameters,
) |
<SYSTEM_TASK:>
Convert a datetime to microseconds since the unix epoch.
<END_TASK>
<USER_TASK:>
Description:
def to_microseconds(value):
"""Convert a datetime to microseconds since the unix epoch.
Args:
value (datetime.datetime): The datetime to covert.
Returns:
int: Microseconds since the unix epoch.
""" |
if not value.tzinfo:
value = value.replace(tzinfo=pytz.utc)
# Regardless of what timezone is on the value, convert it to UTC.
value = value.astimezone(pytz.utc)
# Convert the datetime to a microsecond timestamp.
return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond |
<SYSTEM_TASK:>
Convert a microsecond-precision timestamp to datetime.
<END_TASK>
<USER_TASK:>
Description:
def from_rfc3339(value):
"""Convert a microsecond-precision timestamp to datetime.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
""" |
return datetime.datetime.strptime(value, _RFC3339_MICROS).replace(tzinfo=pytz.utc) |
<SYSTEM_TASK:>
Creates a cluster in a project.
<END_TASK>
<USER_TASK:>
Description:
def create_cluster(
self,
project_id,
region,
cluster,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(project_id, region, cluster)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Cluster`
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``CreateClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "create_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"create_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_cluster,
default_retry=self._method_configs["CreateCluster"].retry,
default_timeout=self._method_configs["CreateCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.CreateClusterRequest(
project_id=project_id, region=region, cluster=cluster, request_id=request_id
)
operation = self._inner_api_calls["create_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
clusters_pb2.Cluster,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
) |
<SYSTEM_TASK:>
Deletes a cluster in a project.
<END_TASK>
<USER_TASK:>
Description:
def delete_cluster(
self,
project_id,
region,
cluster_name,
cluster_uuid=None,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.delete_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail
(with error NOT\_FOUND) if cluster with specified UUID does not exist.
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``DeleteClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "delete_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"delete_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_cluster,
default_retry=self._method_configs["DeleteCluster"].retry,
default_timeout=self._method_configs["DeleteCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DeleteClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster_uuid=cluster_uuid,
request_id=request_id,
)
operation = self._inner_api_calls["delete_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
) |
<SYSTEM_TASK:>
Gets the resource representation for a cluster in a project.
<END_TASK>
<USER_TASK:>
Description:
def get_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the resource representation for a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.get_cluster(project_id, region, cluster_name)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "get_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"get_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_cluster,
default_retry=self._method_configs["GetCluster"].retry,
default_timeout=self._method_configs["GetCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.GetClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
return self._inner_api_calls["get_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Gets cluster diagnostic information. After the operation completes, the
<END_TASK>
<USER_TASK:>
Description:
def diagnose_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets cluster diagnostic information. After the operation completes, the
Operation.response field contains ``DiagnoseClusterOutputLocation``.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.diagnose_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "diagnose_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"diagnose_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.diagnose_cluster,
default_retry=self._method_configs["DiagnoseCluster"].retry,
default_timeout=self._method_configs["DiagnoseCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DiagnoseClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
operation = self._inner_api_calls["diagnose_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=clusters_pb2.DiagnoseClusterResults,
) |
<SYSTEM_TASK:>
Commit this batch after sufficient time has elapsed.
<END_TASK>
<USER_TASK:>
Description:
def monitor(self):
"""Commit this batch after sufficient time has elapsed.
This simply sleeps for ``self._settings.max_latency`` seconds,
and then calls commit unless the batch has already been committed.
""" |
# NOTE: This blocks; it is up to the calling code to call it
# in a separate thread.
# Sleep for however long we should be waiting.
time.sleep(self._settings.max_latency)
_LOGGER.debug("Monitor is waking up")
return self._commit() |
<SYSTEM_TASK:>
Publish a single message.
<END_TASK>
<USER_TASK:>
Description:
def publish(self, message):
"""Publish a single message.
Add the given message to this object; this will cause it to be
published once the batch either has enough messages or a sufficient
period of time has elapsed.
This method is called by :meth:`~.PublisherClient.publish`.
Args:
message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message.
Returns:
Optional[~google.api_core.future.Future]: An object conforming to
the :class:`~concurrent.futures.Future` interface or :data:`None`.
If :data:`None` is returned, that signals that the batch cannot
accept a message.
""" |
# Coerce the type, just in case.
if not isinstance(message, types.PubsubMessage):
message = types.PubsubMessage(**message)
future = None
with self._state_lock:
if not self.will_accept(message):
return future
new_size = self._size + message.ByteSize()
new_count = len(self._messages) + 1
overflow = (
new_size > self.settings.max_bytes
or new_count >= self._settings.max_messages
)
if not self._messages or not overflow:
# Store the actual message in the batch's message queue.
self._messages.append(message)
self._size = new_size
# Track the future on this batch (so that the result of the
# future can be set).
future = futures.Future(completed=threading.Event())
self._futures.append(future)
# Try to commit, but it must be **without** the lock held, since
# ``commit()`` will try to obtain the lock.
if overflow:
self.commit()
return future |
<SYSTEM_TASK:>
URL path for change set APIs.
<END_TASK>
<USER_TASK:>
Description:
def path(self):
"""URL path for change set APIs.
:rtype: str
:returns: the path based on project, zone, and change set names.
""" |
return "/projects/%s/managedZones/%s/changes/%s" % (
self.zone.project,
self.zone.name,
self.name,
) |
<SYSTEM_TASK:>
Update name of the change set.
<END_TASK>
<USER_TASK:>
Description:
def name(self, value):
"""Update name of the change set.
:type value: str
:param value: New name for the changeset.
""" |
if not isinstance(value, six.string_types):
raise ValueError("Pass a string")
self._properties["id"] = value |
<SYSTEM_TASK:>
Append a record set to the 'additions' for the change set.
<END_TASK>
<USER_TASK:>
Description:
def add_record_set(self, record_set):
"""Append a record set to the 'additions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
""" |
if not isinstance(record_set, ResourceRecordSet):
raise ValueError("Pass a ResourceRecordSet")
self._additions += (record_set,) |
<SYSTEM_TASK:>
Append a record set to the 'deletions' for the change set.
<END_TASK>
<USER_TASK:>
Description:
def delete_record_set(self, record_set):
"""Append a record set to the 'deletions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
""" |
if not isinstance(record_set, ResourceRecordSet):
raise ValueError("Pass a ResourceRecordSet")
self._deletions += (record_set,) |
<SYSTEM_TASK:>
Creates a sink bound to the current client.
<END_TASK>
<USER_TASK:>
Description:
def sink(self, name, filter_=None, destination=None):
"""Creates a sink bound to the current client.
:type name: str
:param name: the name of the sink to be constructed.
:type filter_: str
:param filter_: (optional) the advanced logs filter expression
defining the entries exported by the sink. If not
passed, the instance should already exist, to be
refreshed via :meth:`Sink.reload`.
:type destination: str
:param destination: destination URI for the entries exported by
the sink. If not passed, the instance should
already exist, to be refreshed via
:meth:`Sink.reload`.
:rtype: :class:`google.cloud.logging.sink.Sink`
:returns: Sink created with the current client.
""" |
return Sink(name, filter_, destination, client=self) |
<SYSTEM_TASK:>
Creates a metric bound to the current client.
<END_TASK>
<USER_TASK:>
Description:
def metric(self, name, filter_=None, description=""):
"""Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`google.cloud.logging.metric.Metric`
:returns: Metric created with the current client.
""" |
return Metric(name, filter_, client=self, description=description) |
<SYSTEM_TASK:>
Return the default logging handler based on the local environment.
<END_TASK>
<USER_TASK:>
Description:
def get_default_handler(self, **kw):
"""Return the default logging handler based on the local environment.
:type kw: dict
:param kw: keyword args passed to handler constructor
:rtype: :class:`logging.Handler`
:returns: The default log handler based on the environment
""" |
gke_cluster_name = retrieve_metadata_server(_GKE_CLUSTER_NAME)
if (
_APPENGINE_FLEXIBLE_ENV_VM in os.environ
or _APPENGINE_INSTANCE_ID in os.environ
):
return AppEngineHandler(self, **kw)
elif gke_cluster_name is not None:
return ContainerEngineHandler(**kw)
else:
return CloudLoggingHandler(self, **kw) |
<SYSTEM_TASK:>
Attach default Stackdriver logging handler to the root logger.
<END_TASK>
<USER_TASK:>
Description:
def setup_logging(
self, log_level=logging.INFO, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS, **kw
):
"""Attach default Stackdriver logging handler to the root logger.
This method uses the default log handler, obtained by
:meth:`~get_default_handler`, and attaches it to the root Python
logger, so that a call such as ``logging.warn``, as well as all child
loggers, will report to Stackdriver logging.
:type log_level: int
:param log_level: (Optional) Python logging log level. Defaults to
:const:`logging.INFO`.
:type excluded_loggers: tuple
:param excluded_loggers: (Optional) The loggers to not attach the
handler to. This will always include the
loggers in the path of the logging client
itself.
:type kw: dict
:param kw: keyword args passed to handler constructor
""" |
handler = self.get_default_handler(**kw)
setup_logging(handler, log_level=log_level, excluded_loggers=excluded_loggers) |
<SYSTEM_TASK:>
Return a fully-qualified key_ring string.
<END_TASK>
<USER_TASK:>
Description:
def key_ring_path(cls, project, location, key_ring):
"""Return a fully-qualified key_ring string.""" |
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/keyRings/{key_ring}",
project=project,
location=location,
key_ring=key_ring,
) |
<SYSTEM_TASK:>
Return a fully-qualified crypto_key_path string.
<END_TASK>
<USER_TASK:>
Description:
def crypto_key_path_path(cls, project, location, key_ring, crypto_key_path):
"""Return a fully-qualified crypto_key_path string.""" |
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key_path=**}",
project=project,
location=location,
key_ring=key_ring,
crypto_key_path=crypto_key_path,
) |
<SYSTEM_TASK:>
Return a fully-qualified crypto_key_version string.
<END_TASK>
<USER_TASK:>
Description:
def crypto_key_version_path(
cls, project, location, key_ring, crypto_key, crypto_key_version
):
"""Return a fully-qualified crypto_key_version string.""" |
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}",
project=project,
location=location,
key_ring=key_ring,
crypto_key=crypto_key,
crypto_key_version=crypto_key_version,
) |
<SYSTEM_TASK:>
Create a new ``KeyRing`` in a given Project and Location.
<END_TASK>
<USER_TASK:>
Description:
def create_key_ring(
self,
parent,
key_ring_id,
key_ring,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Create a new ``KeyRing`` in a given Project and Location.
Example:
>>> from google.cloud import kms_v1
>>>
>>> client = kms_v1.KeyManagementServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `key_ring_id`:
>>> key_ring_id = ''
>>>
>>> # TODO: Initialize `key_ring`:
>>> key_ring = {}
>>>
>>> response = client.create_key_ring(parent, key_ring_id, key_ring)
Args:
parent (str): Required. The resource name of the location associated with the
``KeyRings``, in the format ``projects/*/locations/*``.
key_ring_id (str): Required. It must be unique within a location and match the regular
expression ``[a-zA-Z0-9_-]{1,63}``
key_ring (Union[dict, ~google.cloud.kms_v1.types.KeyRing]): A ``KeyRing`` with initial field values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.kms_v1.types.KeyRing`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.kms_v1.types.KeyRing` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "create_key_ring" not in self._inner_api_calls:
self._inner_api_calls[
"create_key_ring"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_key_ring,
default_retry=self._method_configs["CreateKeyRing"].retry,
default_timeout=self._method_configs["CreateKeyRing"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateKeyRingRequest(
parent=parent, key_ring_id=key_ring_id, key_ring=key_ring
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_key_ring"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Create a new ``CryptoKey`` within a ``KeyRing``.
<END_TASK>
<USER_TASK:>
Description:
def create_crypto_key(
self,
parent,
crypto_key_id,
crypto_key,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Create a new ``CryptoKey`` within a ``KeyRing``.
``CryptoKey.purpose`` and ``CryptoKey.version_template.algorithm`` are
required.
Example:
>>> from google.cloud import kms_v1
>>> from google.cloud.kms_v1 import enums
>>>
>>> client = kms_v1.KeyManagementServiceClient()
>>>
>>> parent = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
>>> crypto_key_id = 'my-app-key'
>>> purpose = enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
>>> seconds = 2147483647
>>> next_rotation_time = {'seconds': seconds}
>>> seconds_2 = 604800
>>> rotation_period = {'seconds': seconds_2}
>>> crypto_key = {'purpose': purpose, 'next_rotation_time': next_rotation_time, 'rotation_period': rotation_period}
>>>
>>> response = client.create_crypto_key(parent, crypto_key_id, crypto_key)
Args:
parent (str): Required. The ``name`` of the KeyRing associated with the
``CryptoKeys``.
crypto_key_id (str): Required. It must be unique within a KeyRing and match the regular
expression ``[a-zA-Z0-9_-]{1,63}``
crypto_key (Union[dict, ~google.cloud.kms_v1.types.CryptoKey]): A ``CryptoKey`` with initial field values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.kms_v1.types.CryptoKey`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.kms_v1.types.CryptoKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "create_crypto_key" not in self._inner_api_calls:
self._inner_api_calls[
"create_crypto_key"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_crypto_key,
default_retry=self._method_configs["CreateCryptoKey"].retry,
default_timeout=self._method_configs["CreateCryptoKey"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateCryptoKeyRequest(
parent=parent, crypto_key_id=crypto_key_id, crypto_key=crypto_key
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_crypto_key"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Return a fully-qualified span string.
<END_TASK>
<USER_TASK:>
Description:
def span_path(cls, project, trace, span):
"""Return a fully-qualified span string.""" |
return google.api_core.path_template.expand(
"projects/{project}/traces/{trace}/spans/{span}",
project=project,
trace=trace,
span=span,
) |
<SYSTEM_TASK:>
Sends new spans to new or existing traces. You cannot update
<END_TASK>
<USER_TASK:>
Description:
def batch_write_spans(
self,
name,
spans,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sends new spans to new or existing traces. You cannot update
existing spans.
Example:
>>> from google.cloud import trace_v2
>>>
>>> client = trace_v2.TraceServiceClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `spans`:
>>> spans = []
>>>
>>> client.batch_write_spans(name, spans)
Args:
name (str): Required. The name of the project where the spans belong. The format is
``projects/[PROJECT_ID]``.
spans (list[Union[dict, ~google.cloud.trace_v2.types.Span]]): A list of new spans. The span names must not match existing
spans, or the results are undefined.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Span`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "batch_write_spans" not in self._inner_api_calls:
self._inner_api_calls[
"batch_write_spans"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_write_spans,
default_retry=self._method_configs["BatchWriteSpans"].retry,
default_timeout=self._method_configs["BatchWriteSpans"].timeout,
client_info=self._client_info,
)
request = tracing_pb2.BatchWriteSpansRequest(name=name, spans=spans)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["batch_write_spans"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Creates a new span.
<END_TASK>
<USER_TASK:>
Description:
def create_span(
self,
name,
span_id,
display_name,
start_time,
end_time,
parent_span_id=None,
attributes=None,
stack_trace=None,
time_events=None,
links=None,
status=None,
same_process_as_parent_span=None,
child_span_count=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new span.
Example:
>>> from google.cloud import trace_v2
>>>
>>> client = trace_v2.TraceServiceClient()
>>>
>>> name = client.span_path('[PROJECT]', '[TRACE]', '[SPAN]')
>>>
>>> # TODO: Initialize `span_id`:
>>> span_id = ''
>>>
>>> # TODO: Initialize `display_name`:
>>> display_name = {}
>>>
>>> # TODO: Initialize `start_time`:
>>> start_time = {}
>>>
>>> # TODO: Initialize `end_time`:
>>> end_time = {}
>>>
>>> response = client.create_span(name, span_id, display_name, start_time, end_time)
Args:
name (str): The resource name of the span in the following format:
::
projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
[TRACE\_ID] is a unique identifier for a trace within a project; it is a
32-character hexadecimal encoding of a 16-byte array.
[SPAN\_ID] is a unique identifier for a span within a trace; it is a
16-character hexadecimal encoding of an 8-byte array.
span_id (str): The [SPAN\_ID] portion of the span's resource name.
display_name (Union[dict, ~google.cloud.trace_v2.types.TruncatableString]): A description of the span's operation (up to 128 bytes). Stackdriver
Trace displays the description in the {% dynamic print
site\_values.console\_name %}. For example, the display name can be a
qualified method name or a file name and a line number where the
operation is called. A best practice is to use the same display name
within an application and at the same call point. This makes it easier
to correlate spans in different traces.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.TruncatableString`
start_time (Union[dict, ~google.cloud.trace_v2.types.Timestamp]): The start time of the span. On the client side, this is the time kept by
the local machine where the span execution starts. On the server side, this
is the time when the server's application handler starts running.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Timestamp`
end_time (Union[dict, ~google.cloud.trace_v2.types.Timestamp]): The end time of the span. On the client side, this is the time kept by
the local machine where the span execution ends. On the server side, this
is the time when the server application handler stops running.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Timestamp`
parent_span_id (str): The [SPAN\_ID] of this span's parent span. If this is a root span, then
this field must be empty.
attributes (Union[dict, ~google.cloud.trace_v2.types.Attributes]): A set of attributes on the span. You can have up to 32 attributes per
span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Attributes`
stack_trace (Union[dict, ~google.cloud.trace_v2.types.StackTrace]): Stack trace captured at the start of the span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.StackTrace`
time_events (Union[dict, ~google.cloud.trace_v2.types.TimeEvents]): A set of time events. You can have up to 32 annotations and 128 message
events per span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.TimeEvents`
links (Union[dict, ~google.cloud.trace_v2.types.Links]): Links associated with the span. You can have up to 128 links per Span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Links`
status (Union[dict, ~google.cloud.trace_v2.types.Status]): An optional final status for this span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Status`
same_process_as_parent_span (Union[dict, ~google.cloud.trace_v2.types.BoolValue]): (Optional) Set this parameter to indicate whether this span is in
the same process as its parent. If you do not set this parameter,
Stackdriver Trace is unable to take advantage of this helpful
information.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.BoolValue`
child_span_count (Union[dict, ~google.cloud.trace_v2.types.Int32Value]): An optional number of child spans that were generated while this span
was active. If set, allows implementation to detect missing child spans.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Int32Value`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.trace_v2.types.Span` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "create_span" not in self._inner_api_calls:
self._inner_api_calls[
"create_span"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_span,
default_retry=self._method_configs["CreateSpan"].retry,
default_timeout=self._method_configs["CreateSpan"].timeout,
client_info=self._client_info,
)
request = trace_pb2.Span(
name=name,
span_id=span_id,
display_name=display_name,
start_time=start_time,
end_time=end_time,
parent_span_id=parent_span_id,
attributes=attributes,
stack_trace=stack_trace,
time_events=time_events,
links=links,
status=status,
same_process_as_parent_span=same_process_as_parent_span,
child_span_count=child_span_count,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_span"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Wraps a user callback so that if an exception occurs the message is
<END_TASK>
<USER_TASK:>
Description:
def _wrap_callback_errors(callback, message):
"""Wraps a user callback so that if an exception occurs the message is
nacked.
Args:
callback (Callable[None, Message]): The user callback.
message (~Message): The Pub/Sub message.
""" |
try:
callback(message)
except Exception:
# Note: the likelihood of this failing is extremely low. This just adds
# a message to a queue, so if this doesn't work the world is in an
# unrecoverable state and this thread should just bail.
_LOGGER.exception(
"Top-level exception occurred in callback while processing a " "message"
)
message.nack() |
<SYSTEM_TASK:>
Return the current ack deadline based on historical time-to-ack.
<END_TASK>
<USER_TASK:>
Description:
def ack_deadline(self):
"""Return the current ack deadline based on historical time-to-ack.
This method is "sticky". It will only perform the computations to
check on the right ack deadline if the histogram has gained a
significant amount of new information.
Returns:
int: The ack deadline.
""" |
target = min([self._last_histogram_size * 2, self._last_histogram_size + 100])
if len(self.ack_histogram) > target:
self._ack_deadline = self.ack_histogram.percentile(percent=99)
return self._ack_deadline |
<SYSTEM_TASK:>
Return the current load.
<END_TASK>
<USER_TASK:>
Description:
def load(self):
"""Return the current load.
The load is represented as a float, where 1.0 represents having
hit one of the flow control limits, and values between 0.0 and 1.0
represent how close we are to them. (0.5 means we have exactly half
of what the flow control setting allows, for example.)
There are (currently) two flow control settings; this property
computes how close the manager is to each of them, and returns
whichever value is higher. (It does not matter that we have lots of
running room on setting A if setting B is over.)
Returns:
float: The load value.
""" |
if self._leaser is None:
return 0
return max(
[
self._leaser.message_count / self._flow_control.max_messages,
self._leaser.bytes / self._flow_control.max_bytes,
]
) |
<SYSTEM_TASK:>
Check the current load and pause the consumer if needed.
<END_TASK>
<USER_TASK:>
Description:
def maybe_pause_consumer(self):
"""Check the current load and pause the consumer if needed.""" |
if self.load >= 1.0:
if self._consumer is not None and not self._consumer.is_paused:
_LOGGER.debug("Message backlog over load at %.2f, pausing.", self.load)
self._consumer.pause() |
<SYSTEM_TASK:>
Check the current load and resume the consumer if needed.
<END_TASK>
<USER_TASK:>
Description:
def maybe_resume_consumer(self):
"""Check the current load and resume the consumer if needed.""" |
# If we have been paused by flow control, check and see if we are
# back within our limits.
#
# In order to not thrash too much, require us to have passed below
# the resume threshold (80% by default) of each flow control setting
# before restarting.
if self._consumer is None or not self._consumer.is_paused:
return
if self.load < self.flow_control.resume_threshold:
self._consumer.resume()
else:
_LOGGER.debug("Did not resume, current load is %s", self.load) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.