text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Send a request using a separate unary request instead of over the
<END_TASK>
<USER_TASK:>
Description:
def _send_unary_request(self, request):
"""Send a request using a separate unary request instead of over the
stream.
Args:
request (types.StreamingPullRequest): The stream request to be
mapped into unary requests.
""" |
if request.ack_ids:
self._client.acknowledge(
subscription=self._subscription, ack_ids=list(request.ack_ids)
)
if request.modify_deadline_ack_ids:
# Send ack_ids with the same deadline seconds together.
deadline_to_ack_ids = collections.defaultdict(list)
for n, ack_id in enumerate(request.modify_deadline_ack_ids):
deadline = request.modify_deadline_seconds[n]
deadline_to_ack_ids[deadline].append(ack_id)
for deadline, ack_ids in six.iteritems(deadline_to_ack_ids):
self._client.modify_ack_deadline(
subscription=self._subscription,
ack_ids=ack_ids,
ack_deadline_seconds=deadline,
)
_LOGGER.debug("Sent request(s) over unary RPC.") |
<SYSTEM_TASK:>
Queue a request to be sent to the RPC.
<END_TASK>
<USER_TASK:>
Description:
def send(self, request):
"""Queue a request to be sent to the RPC.""" |
if self._UNARY_REQUESTS:
try:
self._send_unary_request(request)
except exceptions.GoogleAPICallError:
_LOGGER.debug(
"Exception while sending unary RPC. This is typically "
"non-fatal as stream requests are best-effort.",
exc_info=True,
)
else:
self._rpc.send(request) |
<SYSTEM_TASK:>
Sends an empty request over the streaming pull RPC.
<END_TASK>
<USER_TASK:>
Description:
def heartbeat(self):
"""Sends an empty request over the streaming pull RPC.
This always sends over the stream, regardless of if
``self._UNARY_REQUESTS`` is set or not.
""" |
if self._rpc is not None and self._rpc.is_active:
self._rpc.send(types.StreamingPullRequest()) |
<SYSTEM_TASK:>
Return the initial request for the RPC.
<END_TASK>
<USER_TASK:>
Description:
def _get_initial_request(self):
"""Return the initial request for the RPC.
This defines the initial request that must always be sent to Pub/Sub
immediately upon opening the subscription.
Returns:
google.cloud.pubsub_v1.types.StreamingPullRequest: A request
suitable for being the first request on the stream (and not
suitable for any other purpose).
""" |
# Any ack IDs that are under lease management need to have their
# deadline extended immediately.
if self._leaser is not None:
# Explicitly copy the list, as it could be modified by another
# thread.
lease_ids = list(self._leaser.ack_ids)
else:
lease_ids = []
# Put the request together.
request = types.StreamingPullRequest(
modify_deadline_ack_ids=list(lease_ids),
modify_deadline_seconds=[self.ack_deadline] * len(lease_ids),
stream_ack_deadline_seconds=self.ack_histogram.percentile(99),
subscription=self._subscription,
)
# Return the initial request.
return request |
<SYSTEM_TASK:>
Determine if an error on the RPC stream should be recovered.
<END_TASK>
<USER_TASK:>
Description:
def _should_recover(self, exception):
"""Determine if an error on the RPC stream should be recovered.
If the exception is one of the retryable exceptions, this will signal
to the consumer thread that it should "recover" from the failure.
This will cause the stream to exit when it returns :data:`False`.
Returns:
bool: Indicates if the caller should recover or shut down.
Will be :data:`True` if the ``exception`` is "acceptable", i.e.
in a list of retryable / idempotent exceptions.
""" |
exception = _maybe_wrap_exception(exception)
# If this is in the list of idempotent exceptions, then we want to
# recover.
if isinstance(exception, _RETRYABLE_STREAM_ERRORS):
_LOGGER.info("Observed recoverable stream error %s", exception)
return True
_LOGGER.info("Observed non-recoverable stream error %s", exception)
return False |
<SYSTEM_TASK:>
Add a "change" to this batch to create a document.
<END_TASK>
<USER_TASK:>
Description:
def create(self, reference, document_data):
"""Add a "change" to this batch to create a document.
If the document given by ``reference`` already exists, then this
batch will fail when :meth:`commit`-ed.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference to be created in this batch.
document_data (dict): Property names and values to use for
creating a document.
""" |
write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)
self._add_write_pbs(write_pbs) |
<SYSTEM_TASK:>
Add a "change" to replace a document.
<END_TASK>
<USER_TASK:>
Description:
def set(self, reference, document_data, merge=False):
"""Add a "change" to replace a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.set` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference):
A document reference that will have values set in this batch.
document_data (dict):
Property names and values to use for replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
""" |
if merge is not False:
write_pbs = _helpers.pbs_for_set_with_merge(
reference._document_path, document_data, merge
)
else:
write_pbs = _helpers.pbs_for_set_no_merge(
reference._document_path, document_data
)
self._add_write_pbs(write_pbs) |
<SYSTEM_TASK:>
Add a "change" to update a document.
<END_TASK>
<USER_TASK:>
Description:
def update(self, reference, field_updates, option=None):
"""Add a "change" to update a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.update` for
more information on ``field_updates`` and ``option``.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
field_updates (dict): Field names or paths to update and values
to update with.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
""" |
if option.__class__.__name__ == "ExistsOption":
raise ValueError("you must not pass an explicit write option to " "update.")
write_pbs = _helpers.pbs_for_update(
reference._document_path, field_updates, option
)
self._add_write_pbs(write_pbs) |
<SYSTEM_TASK:>
Add a "change" to delete a document.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, reference, option=None):
"""Add a "change" to delete a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.delete` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
""" |
write_pb = _helpers.pb_for_delete(reference._document_path, option)
self._add_write_pbs([write_pb]) |
<SYSTEM_TASK:>
Commit the changes accumulated in this batch.
<END_TASK>
<USER_TASK:>
Description:
def commit(self):
"""Commit the changes accumulated in this batch.
Returns:
List[google.cloud.proto.firestore.v1beta1.\
write_pb2.WriteResult, ...]: The write results corresponding
to the changes committed, returned in the same order as the
changes were applied to this batch. A write result contains an
``update_time`` field.
""" |
commit_response = self._client._firestore_api.commit(
self._client._database_string,
self._write_pbs,
transaction=None,
metadata=self._client._rpc_metadata,
)
self._write_pbs = []
self.write_results = results = list(commit_response.write_results)
self.commit_time = commit_response.commit_time
return results |
<SYSTEM_TASK:>
Ensures an input is a tuple or list.
<END_TASK>
<USER_TASK:>
Description:
def _ensure_tuple_or_list(arg_name, tuple_or_list):
"""Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
""" |
if not isinstance(tuple_or_list, (tuple, list)):
raise TypeError(
"Expected %s to be a tuple or list. "
"Received %r" % (arg_name, tuple_or_list)
)
return list(tuple_or_list) |
<SYSTEM_TASK:>
Convert non-none datetime to microseconds.
<END_TASK>
<USER_TASK:>
Description:
def _microseconds_from_datetime(value):
"""Convert non-none datetime to microseconds.
:type value: :class:`datetime.datetime`
:param value: The timestamp to convert.
:rtype: int
:returns: The timestamp, in microseconds.
""" |
if not value.tzinfo:
value = value.replace(tzinfo=UTC)
# Regardless of what timezone is on the value, convert it to UTC.
value = value.astimezone(UTC)
# Convert the datetime to a microsecond timestamp.
return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond |
<SYSTEM_TASK:>
Convert a zoneless ISO8601 time string to naive datetime time
<END_TASK>
<USER_TASK:>
Description:
def _time_from_iso8601_time_naive(value):
"""Convert a zoneless ISO8601 time string to naive datetime time
:type value: str
:param value: The time string to convert
:rtype: :class:`datetime.time`
:returns: A datetime time object created from the string
:raises ValueError: if the value does not match a known format.
""" |
if len(value) == 8: # HH:MM:SS
fmt = _TIMEONLY_NO_FRACTION
elif len(value) == 15: # HH:MM:SS.micros
fmt = _TIMEONLY_W_MICROS
else:
raise ValueError("Unknown time format: {}".format(value))
return datetime.datetime.strptime(value, fmt).time() |
<SYSTEM_TASK:>
Convert a microsecond-precision timestamp to a native datetime.
<END_TASK>
<USER_TASK:>
Description:
def _rfc3339_to_datetime(dt_str):
"""Convert a microsecond-precision timestamp to a native datetime.
:type dt_str: str
:param dt_str: The string to convert.
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the string.
""" |
return datetime.datetime.strptime(dt_str, _RFC3339_MICROS).replace(tzinfo=UTC) |
<SYSTEM_TASK:>
Convert a timestamp to a string.
<END_TASK>
<USER_TASK:>
Description:
def _datetime_to_rfc3339(value, ignore_zone=True):
"""Convert a timestamp to a string.
:type value: :class:`datetime.datetime`
:param value: The datetime object to be converted to a string.
:type ignore_zone: bool
:param ignore_zone: If True, then the timezone (if any) of the datetime
object is ignored.
:rtype: str
:returns: The string representing the datetime stamp.
""" |
if not ignore_zone and value.tzinfo is not None:
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
return value.strftime(_RFC3339_MICROS) |
<SYSTEM_TASK:>
Converts bytes to a unicode value, if necessary.
<END_TASK>
<USER_TASK:>
Description:
def _bytes_to_unicode(value):
"""Converts bytes to a unicode value, if necessary.
:type value: bytes
:param value: bytes value to attempt string conversion on.
:rtype: str
:returns: The original value converted to unicode (if bytes) or as passed
in if it started out as unicode.
:raises ValueError: if the value could not be converted to unicode.
""" |
result = value.decode("utf-8") if isinstance(value, six.binary_type) else value
if isinstance(result, six.text_type):
return result
else:
raise ValueError("%r could not be converted to unicode" % (value,)) |
<SYSTEM_TASK:>
Converts an Any protobuf to the specified message type
<END_TASK>
<USER_TASK:>
Description:
def _from_any_pb(pb_type, any_pb):
"""Converts an Any protobuf to the specified message type
Args:
pb_type (type): the type of the message that any_pb stores an instance
of.
any_pb (google.protobuf.any_pb2.Any): the object to be converted.
Returns:
pb_type: An instance of the pb_type message.
Raises:
TypeError: if the message could not be converted.
""" |
msg = pb_type()
if not any_pb.Unpack(msg):
raise TypeError(
"Could not convert {} to {}".format(
any_pb.__class__.__name__, pb_type.__name__
)
)
return msg |
<SYSTEM_TASK:>
Convert a datetime object to a Timestamp protobuf.
<END_TASK>
<USER_TASK:>
Description:
def _datetime_to_pb_timestamp(when):
"""Convert a datetime object to a Timestamp protobuf.
:type when: :class:`datetime.datetime`
:param when: the datetime to convert
:rtype: :class:`google.protobuf.timestamp_pb2.Timestamp`
:returns: A timestamp protobuf corresponding to the object.
""" |
ms_value = _microseconds_from_datetime(when)
seconds, micros = divmod(ms_value, 10 ** 6)
nanos = micros * 10 ** 3
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) |
<SYSTEM_TASK:>
Convert a duration protobuf to a Python timedelta object.
<END_TASK>
<USER_TASK:>
Description:
def _duration_pb_to_timedelta(duration_pb):
"""Convert a duration protobuf to a Python timedelta object.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type duration_pb: :class:`google.protobuf.duration_pb2.Duration`
:param duration_pb: A protobuf duration object.
:rtype: :class:`datetime.timedelta`
:returns: The converted timedelta object.
""" |
return datetime.timedelta(
seconds=duration_pb.seconds, microseconds=(duration_pb.nanos / 1000.0)
) |
<SYSTEM_TASK:>
Validate a URI path and get the leaf object's name.
<END_TASK>
<USER_TASK:>
Description:
def _name_from_project_path(path, project, template):
"""Validate a URI path and get the leaf object's name.
:type path: str
:param path: URI path containing the name.
:type project: str
:param project: (Optional) The project associated with the request. It is
included for validation purposes. If passed as None,
disables validation.
:type template: str
:param template: Template regex describing the expected form of the path.
The regex must have two named groups, 'project' and
'name'.
:rtype: str
:returns: Name parsed from ``path``.
:raises ValueError: if the ``path`` is ill-formed or if the project from
the ``path`` does not agree with the ``project``
passed in.
""" |
if isinstance(template, str):
template = re.compile(template)
match = template.match(path)
if not match:
raise ValueError(
'path "%s" did not match expected pattern "%s"' % (path, template.pattern)
)
if project is not None:
found_project = match.group("project")
if found_project != project:
raise ValueError(
"Project from client (%s) should agree with "
"project from resource(%s)." % (project, found_project)
)
return match.group("name") |
<SYSTEM_TASK:>
Makes a secure channel for an RPC service.
<END_TASK>
<USER_TASK:>
Description:
def make_secure_channel(credentials, user_agent, host, extra_options=()):
"""Makes a secure channel for an RPC service.
Uses / depends on gRPC.
:type credentials: :class:`google.auth.credentials.Credentials`
:param credentials: The OAuth2 Credentials to use for creating
access tokens.
:type user_agent: str
:param user_agent: The user agent to be used with API requests.
:type host: str
:param host: The host for the service.
:type extra_options: tuple
:param extra_options: (Optional) Extra gRPC options used when creating the
channel.
:rtype: :class:`grpc._channel.Channel`
:returns: gRPC secure channel with credentials attached.
""" |
target = "%s:%d" % (host, http_client.HTTPS_PORT)
http_request = google.auth.transport.requests.Request()
user_agent_option = ("grpc.primary_user_agent", user_agent)
options = (user_agent_option,) + extra_options
return google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, target, options=options
) |
<SYSTEM_TASK:>
Makes a secure stub for an RPC service.
<END_TASK>
<USER_TASK:>
Description:
def make_secure_stub(credentials, user_agent, stub_class, host, extra_options=()):
"""Makes a secure stub for an RPC service.
Uses / depends on gRPC.
:type credentials: :class:`google.auth.credentials.Credentials`
:param credentials: The OAuth2 Credentials to use for creating
access tokens.
:type user_agent: str
:param user_agent: The user agent to be used with API requests.
:type stub_class: type
:param stub_class: A gRPC stub type for a given service.
:type host: str
:param host: The host for the service.
:type extra_options: tuple
:param extra_options: (Optional) Extra gRPC options passed when creating
the channel.
:rtype: object, instance of ``stub_class``
:returns: The stub object used to make gRPC requests to a given API.
""" |
channel = make_secure_channel(
credentials, user_agent, host, extra_options=extra_options
)
return stub_class(channel) |
<SYSTEM_TASK:>
Makes an insecure stub for an RPC service.
<END_TASK>
<USER_TASK:>
Description:
def make_insecure_stub(stub_class, host, port=None):
"""Makes an insecure stub for an RPC service.
Uses / depends on gRPC.
:type stub_class: type
:param stub_class: A gRPC stub type for a given service.
:type host: str
:param host: The host for the service. May also include the port
if ``port`` is unspecified.
:type port: int
:param port: (Optional) The port for the service.
:rtype: object, instance of ``stub_class``
:returns: The stub object used to make gRPC requests to a given API.
""" |
if port is None:
target = host
else:
# NOTE: This assumes port != http_client.HTTPS_PORT:
target = "%s:%d" % (host, port)
channel = grpc.insecure_channel(target)
return stub_class(channel) |
<SYSTEM_TASK:>
Return a function that will detect a single feature.
<END_TASK>
<USER_TASK:>
Description:
def _create_single_feature_method(feature):
"""Return a function that will detect a single feature.
Args:
feature (enum): A specific feature defined as a member of
:class:`~enums.Feature.Type`.
Returns:
function: A helper function to detect just that feature.
""" |
# Define the function properties.
fx_name = feature.name.lower()
if "detection" in fx_name:
fx_doc = "Perform {0}.".format(fx_name.replace("_", " "))
else:
fx_doc = "Return {desc} information.".format(desc=fx_name.replace("_", " "))
# Provide a complete docstring with argument and return value
# information.
fx_doc += """
Args:
image (:class:`~.{module}.types.Image`): The image to analyze.
max_results (int):
Number of results to return, does not apply for
TEXT_DETECTION, DOCUMENT_TEXT_DETECTION, or CROP_HINTS.
retry (int): Number of retries to do before giving up.
timeout (int): Number of seconds before timing out.
kwargs (dict): Additional properties to be set on the
:class:`~.{module}.types.AnnotateImageRequest`.
Returns:
:class:`~.{module}.types.AnnotateImageResponse`: The API response.
"""
# Get the actual feature value to send.
feature_value = {"type": feature}
# Define the function to be returned.
def inner(self, image, max_results=None, retry=None, timeout=None, **kwargs):
"""Return a single feature annotation for the given image.
Intended for use with functools.partial, to create the particular
single-feature methods.
"""
copied_features = feature_value.copy()
if max_results is not None:
copied_features["max_results"] = max_results
request = dict(image=image, features=[copied_features], **kwargs)
response = self.annotate_image(request, retry=retry, timeout=timeout)
return response
# Set the appropriate function metadata.
inner.__name__ = fx_name
inner.__doc__ = fx_doc
# Return the final function.
return inner |
<SYSTEM_TASK:>
Schedule the callback to be called asynchronously in a thread pool.
<END_TASK>
<USER_TASK:>
Description:
def schedule(self, callback, *args, **kwargs):
"""Schedule the callback to be called asynchronously in a thread pool.
Args:
callback (Callable): The function to call.
args: Positional arguments passed to the function.
kwargs: Key-word arguments passed to the function.
Returns:
None
""" |
self._executor.submit(callback, *args, **kwargs) |
<SYSTEM_TASK:>
Shuts down the scheduler and immediately end all pending callbacks.
<END_TASK>
<USER_TASK:>
Description:
def shutdown(self):
"""Shuts down the scheduler and immediately end all pending callbacks.
""" |
# Drop all pending item from the executor. Without this, the executor
# will block until all pending items are complete, which is
# undesirable.
try:
while True:
self._executor._work_queue.get(block=False)
except queue.Empty:
pass
self._executor.shutdown() |
<SYSTEM_TASK:>
Lists the specified events.
<END_TASK>
<USER_TASK:>
Description:
def list_events(
self,
project_name,
group_id,
service_filter=None,
time_range=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists the specified events.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `group_id`:
>>> group_id = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_events(project_name, group_id):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_events(project_name, group_id).pages:
... for element in page:
... # process element
... pass
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
group_id (str): [Required] The group for which events shall be returned.
service_filter (Union[dict, ~google.cloud.errorreporting_v1beta1.types.ServiceContextFilter]): [Optional] List only ErrorGroups which belong to a service context that
matches the filter. Data for all service contexts is returned if this
field is not specified.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.ServiceContextFilter`
time_range (Union[dict, ~google.cloud.errorreporting_v1beta1.types.QueryTimeRange]): [Optional] List only data for the given time range. If not set a default
time range is used. The field time\_range\_begin in the response will
specify the beginning of this time range.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.QueryTimeRange`
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.errorreporting_v1beta1.types.ErrorEvent` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "list_events" not in self._inner_api_calls:
self._inner_api_calls[
"list_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_events,
default_retry=self._method_configs["ListEvents"].retry,
default_timeout=self._method_configs["ListEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.ListEventsRequest(
project_name=project_name,
group_id=group_id,
service_filter=service_filter,
time_range=time_range,
page_size=page_size,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_events"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="error_events",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator |
<SYSTEM_TASK:>
Deletes all error events of a given project.
<END_TASK>
<USER_TASK:>
Description:
def delete_events(
self,
project_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "delete_events" not in self._inner_api_calls:
self._inner_api_calls[
"delete_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_events,
default_retry=self._method_configs["DeleteEvents"].retry,
default_timeout=self._method_configs["DeleteEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["delete_events"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Document that owns the current collection.
<END_TASK>
<USER_TASK:>
Description:
def parent(self):
"""Document that owns the current collection.
Returns:
Optional[~.firestore_v1beta1.document.DocumentReference]: The
parent document, if the current collection is not a
top-level collection.
""" |
if len(self._path) == 1:
return None
else:
parent_path = self._path[:-1]
return self._client.document(*parent_path) |
<SYSTEM_TASK:>
Create a sub-document underneath the current collection.
<END_TASK>
<USER_TASK:>
Description:
def document(self, document_id=None):
"""Create a sub-document underneath the current collection.
Args:
document_id (Optional[str]): The document identifier
within the current collection. If not provided, will default
to a random 20 character string composed of digits,
uppercase and lowercase and letters.
Returns:
~.firestore_v1beta1.document.DocumentReference: The child
document.
""" |
if document_id is None:
document_id = _auto_id()
child_path = self._path + (document_id,)
return self._client.document(*child_path) |
<SYSTEM_TASK:>
Get fully-qualified parent path and prefix for this collection.
<END_TASK>
<USER_TASK:>
Description:
def _parent_info(self):
"""Get fully-qualified parent path and prefix for this collection.
Returns:
Tuple[str, str]: Pair of
* the fully-qualified (with database and project) path to the
parent of this collection (will either be the database path
or a document path).
* the prefix to a document in this collection.
""" |
parent_doc = self.parent
if parent_doc is None:
parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join(
(self._client._database_string, "documents")
)
else:
parent_path = parent_doc._document_path
expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id))
return parent_path, expected_prefix |
<SYSTEM_TASK:>
Create a document in the Firestore database with the provided data.
<END_TASK>
<USER_TASK:>
Description:
def add(self, document_data, document_id=None):
"""Create a document in the Firestore database with the provided data.
Args:
document_data (dict): Property names and values to use for
creating the document.
document_id (Optional[str]): The document identifier within the
current collection. If not provided, an ID will be
automatically assigned by the server (the assigned ID will be
a random 20 character string composed of digits,
uppercase and lowercase letters).
Returns:
Tuple[google.protobuf.timestamp_pb2.Timestamp, \
~.firestore_v1beta1.document.DocumentReference]: Pair of
* The ``update_time`` when the document was created (or
overwritten).
* A document reference for the created document.
Raises:
~google.cloud.exceptions.Conflict: If ``document_id`` is provided
and the document already exists.
""" |
if document_id is None:
parent_path, expected_prefix = self._parent_info()
document_pb = document_pb2.Document()
created_document_pb = self._client._firestore_api.create_document(
parent_path,
collection_id=self.id,
document_id=None,
document=document_pb,
mask=None,
metadata=self._client._rpc_metadata,
)
new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix)
document_ref = self.document(new_document_id)
set_result = document_ref.set(document_data)
return set_result.update_time, document_ref
else:
document_ref = self.document(document_id)
write_result = document_ref.create(document_data)
return write_result.update_time, document_ref |
<SYSTEM_TASK:>
List all subdocuments of the current collection.
<END_TASK>
<USER_TASK:>
Description:
def list_documents(self, page_size=None):
"""List all subdocuments of the current collection.
Args:
page_size (Optional[int]]): The maximum number of documents
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.DocumentReference]:
iterator of subdocuments of the current collection. If the
collection does not exist at the time of `snapshot`, the
iterator will be empty
""" |
parent, _ = self._parent_info()
iterator = self._client._firestore_api.list_documents(
parent,
self.id,
page_size=page_size,
show_missing=True,
metadata=self._client._rpc_metadata,
)
iterator.collection = self
iterator.item_to_value = _item_to_document_ref
return iterator |
<SYSTEM_TASK:>
Create a "select" query with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def select(self, field_paths):
"""Create a "select" query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.select` for
more information on this method.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query.
""" |
query = query_mod.Query(self)
return query.select(field_paths) |
<SYSTEM_TASK:>
Create a "where" query with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def where(self, field_path, op_string, value):
"""Create a "where" query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.where` for
more information on this method.
Args:
field_path (str): A field path (``.``-delimited list of
field names) for the field to filter on.
op_string (str): A comparison operation in the form of a string.
Acceptable values are ``<``, ``<=``, ``==``, ``>=``
and ``>``.
value (Any): The value to compare the field against in the filter.
If ``value`` is :data:`None` or a NaN, then ``==`` is the only
allowed operation.
Returns:
~.firestore_v1beta1.query.Query: A filtered query.
""" |
query = query_mod.Query(self)
return query.where(field_path, op_string, value) |
<SYSTEM_TASK:>
Create an "order by" query with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def order_by(self, field_path, **kwargs):
"""Create an "order by" query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.order_by` for
more information on this method.
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
kwargs (Dict[str, Any]): The keyword arguments to pass along
to the query. The only supported keyword is ``direction``,
see :meth:`~.firestore_v1beta1.query.Query.order_by` for
more information.
Returns:
~.firestore_v1beta1.query.Query: An "order by" query.
""" |
query = query_mod.Query(self)
return query.order_by(field_path, **kwargs) |
<SYSTEM_TASK:>
Create a limited query with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def limit(self, count):
"""Create a limited query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.limit` for
more information on this method.
Args:
count (int): Maximum number of documents to return that match
the query.
Returns:
~.firestore_v1beta1.query.Query: A limited query.
""" |
query = query_mod.Query(self)
return query.limit(count) |
<SYSTEM_TASK:>
Skip to an offset in a query with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def offset(self, num_to_skip):
"""Skip to an offset in a query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.offset` for
more information on this method.
Args:
num_to_skip (int): The number of results to skip at the beginning
of query results. (Must be non-negative.)
Returns:
~.firestore_v1beta1.query.Query: An offset query.
""" |
query = query_mod.Query(self)
return query.offset(num_to_skip) |
<SYSTEM_TASK:>
Start query at a cursor with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def start_at(self, document_fields):
"""Start query at a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
""" |
query = query_mod.Query(self)
return query.start_at(document_fields) |
<SYSTEM_TASK:>
Start query after a cursor with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def start_after(self, document_fields):
"""Start query after a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
""" |
query = query_mod.Query(self)
return query.start_after(document_fields) |
<SYSTEM_TASK:>
End query before a cursor with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def end_before(self, document_fields):
"""End query before a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.end_before` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
""" |
query = query_mod.Query(self)
return query.end_before(document_fields) |
<SYSTEM_TASK:>
End query at a cursor with this collection as parent.
<END_TASK>
<USER_TASK:>
Description:
def end_at(self, document_fields):
"""End query at a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.end_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
""" |
query = query_mod.Query(self)
return query.end_at(document_fields) |
<SYSTEM_TASK:>
Read the documents in this collection.
<END_TASK>
<USER_TASK:>
Description:
def stream(self, transaction=None):
"""Read the documents in this collection.
This sends a ``RunQuery`` RPC and then returns an iterator which
consumes each document returned in the stream of ``RunQueryResponse``
messages.
.. note::
The underlying stream of responses will time out after
the ``max_rpc_timeout_millis`` value set in the GAPIC
client configuration for the ``RunQuery`` API. Snapshots
not consumed from the iterator before that point will be lost.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that the query will
run in.
Yields:
~.firestore_v1beta1.document.DocumentSnapshot: The next
document that fulfills the query.
""" |
query = query_mod.Query(self)
return query.stream(transaction=transaction) |
<SYSTEM_TASK:>
Get list of supported languages for translation.
<END_TASK>
<USER_TASK:>
Description:
def get_languages(self, target_language=None):
"""Get list of supported languages for translation.
Response
See
https://cloud.google.com/translate/docs/discovering-supported-languages
:type target_language: str
:param target_language: (Optional) The language used to localize
returned language names. Defaults to the
target language on the current client.
:rtype: list
:returns: List of dictionaries. Each dictionary contains a supported
ISO 639-1 language code (using the dictionary key
``language``). If ``target_language`` is passed, each
dictionary will also contain the name of each supported
language (localized to the target language).
""" |
query_params = {}
if target_language is None:
target_language = self.target_language
if target_language is not None:
query_params["target"] = target_language
response = self._connection.api_request(
method="GET", path="/languages", query_params=query_params
)
return response.get("data", {}).get("languages", ()) |
<SYSTEM_TASK:>
Detect the language of a string or list of strings.
<END_TASK>
<USER_TASK:>
Description:
def detect_language(self, values):
"""Detect the language of a string or list of strings.
See https://cloud.google.com/translate/docs/detecting-language
:type values: str or list
:param values: String or list of strings that will have
language detected.
:rtype: dict or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys
* ``confidence``: The confidence in language detection, a
float between 0 and 1.
* ``input``: The corresponding input value.
* ``language``: The detected language (as an ISO 639-1
language code).
though the key ``confidence`` may not always be present.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
detections is not equal to the number of values.
:class:`ValueError <exceptions.ValueError>` if a value
produces a list of detections with 0 or multiple results
in it.
""" |
single_value = False
if isinstance(values, six.string_types):
single_value = True
values = [values]
data = {"q": values}
response = self._connection.api_request(
method="POST", path="/detect", data=data
)
detections = response.get("data", {}).get("detections", ())
if len(values) != len(detections):
raise ValueError(
"Expected same number of values and detections", values, detections
)
for index, value in enumerate(values):
# Empirically, even clearly ambiguous text like "no" only returns
# a single detection, so we replace the list of detections with
# the single detection contained.
if len(detections[index]) == 1:
detections[index] = detections[index][0]
else:
message = (
"Expected a single detection per value, API " "returned %d"
) % (len(detections[index]),)
raise ValueError(message, value, detections[index])
detections[index]["input"] = value
# The ``isReliable`` field is deprecated.
detections[index].pop("isReliable", None)
if single_value:
return detections[0]
else:
return detections |
<SYSTEM_TASK:>
Translate a string or list of strings.
<END_TASK>
<USER_TASK:>
Description:
def translate(
self,
values,
target_language=None,
format_=None,
source_language=None,
customization_ids=(),
model=None,
):
"""Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:type values: str or list
:param values: String or list of strings to translate.
:type target_language: str
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:type format_: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type source_language: str
:param source_language: (Optional) The language of the text to
be translated.
:type customization_ids: str or list
:param customization_ids: (Optional) ID or list of customization IDs
for translation. Sets the ``cid`` parameter
in the query.
:type model: str
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:rtype: str or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
""" |
single_value = False
if isinstance(values, six.string_types):
single_value = True
values = [values]
if target_language is None:
target_language = self.target_language
if isinstance(customization_ids, six.string_types):
customization_ids = [customization_ids]
data = {
"target": target_language,
"q": values,
"cid": customization_ids,
"format": format_,
"source": source_language,
"model": model,
}
response = self._connection.api_request(method="POST", path="", data=data)
translations = response.get("data", {}).get("translations", ())
if len(values) != len(translations):
raise ValueError(
"Expected iterations to have same length", values, translations
)
for value, translation in six.moves.zip(values, translations):
translation["input"] = value
if single_value:
return translations[0]
else:
return translations |
<SYSTEM_TASK:>
Add messages to be managed by the leaser.
<END_TASK>
<USER_TASK:>
Description:
def add(self, items):
"""Add messages to be managed by the leaser.""" |
for item in items:
# Add the ack ID to the set of managed ack IDs, and increment
# the size counter.
if item.ack_id not in self._leased_messages:
self._leased_messages[item.ack_id] = _LeasedMessage(
added_time=time.time(), size=item.byte_size
)
self._bytes += item.byte_size
else:
_LOGGER.debug("Message %s is already lease managed", item.ack_id) |
<SYSTEM_TASK:>
Remove messages from lease management.
<END_TASK>
<USER_TASK:>
Description:
def remove(self, items):
"""Remove messages from lease management.""" |
# Remove the ack ID from lease management, and decrement the
# byte counter.
for item in items:
if self._leased_messages.pop(item.ack_id, None) is not None:
self._bytes -= item.byte_size
else:
_LOGGER.debug("Item %s was not managed.", item.ack_id)
if self._bytes < 0:
_LOGGER.debug("Bytes was unexpectedly negative: %d", self._bytes)
self._bytes = 0 |
<SYSTEM_TASK:>
Maintain all of the leases being managed.
<END_TASK>
<USER_TASK:>
Description:
def maintain_leases(self):
"""Maintain all of the leases being managed.
This method modifies the ack deadline for all of the managed
ack IDs, then waits for most of that time (but with jitter), and
repeats.
""" |
while self._manager.is_active and not self._stop_event.is_set():
# Determine the appropriate duration for the lease. This is
# based off of how long previous messages have taken to ack, with
# a sensible default and within the ranges allowed by Pub/Sub.
p99 = self._manager.ack_histogram.percentile(99)
_LOGGER.debug("The current p99 value is %d seconds.", p99)
# Make a copy of the leased messages. This is needed because it's
# possible for another thread to modify the dictionary while
# we're iterating over it.
leased_messages = copy.copy(self._leased_messages)
# Drop any leases that are well beyond max lease time. This
# ensures that in the event of a badly behaving actor, we can
# drop messages and allow Pub/Sub to resend them.
cutoff = time.time() - self._manager.flow_control.max_lease_duration
to_drop = [
requests.DropRequest(ack_id, item.size)
for ack_id, item in six.iteritems(leased_messages)
if item.added_time < cutoff
]
if to_drop:
_LOGGER.warning(
"Dropping %s items because they were leased too long.", len(to_drop)
)
self._manager.dispatcher.drop(to_drop)
# Remove dropped items from our copy of the leased messages (they
# have already been removed from the real one by
# self._manager.drop(), which calls self.remove()).
for item in to_drop:
leased_messages.pop(item.ack_id)
# Create a streaming pull request.
# We do not actually call `modify_ack_deadline` over and over
# because it is more efficient to make a single request.
ack_ids = leased_messages.keys()
if ack_ids:
_LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids))
# NOTE: This may not work as expected if ``consumer.active``
# has changed since we checked it. An implementation
# without any sort of race condition would require a
# way for ``send_request`` to fail when the consumer
# is inactive.
self._manager.dispatcher.modify_ack_deadline(
[requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids]
)
# Now wait an appropriate period of time and do this again.
#
# We determine the appropriate period of time based on a random
# period between 0 seconds and 90% of the lease. This use of
# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases
# where there are many clients.
snooze = random.uniform(0.0, p99 * 0.9)
_LOGGER.debug("Snoozing lease management for %f seconds.", snooze)
self._stop_event.wait(timeout=snooze)
_LOGGER.info("%s exiting.", _LEASE_WORKER_NAME) |
<SYSTEM_TASK:>
Uses the gapic client to report the error.
<END_TASK>
<USER_TASK:>
Description:
def report_error_event(self, error_report):
"""Uses the gapic client to report the error.
:type error_report: dict
:param error_report:
payload of the error report formatted according to
https://cloud.google.com/error-reporting/docs/formatting-error-messages
This object should be built using
Use
:meth:~`google.cloud.error_reporting.client._build_error_report`
""" |
project_name = self._gapic_api.project_path(self._project)
error_report_payload = report_errors_service_pb2.ReportedErrorEvent()
ParseDict(error_report, error_report_payload)
self._gapic_api.report_error_event(project_name, error_report_payload) |
<SYSTEM_TASK:>
Return a fully-qualified table string.
<END_TASK>
<USER_TASK:>
Description:
def table_path(cls, project, instance, table):
"""Return a fully-qualified table string.""" |
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/tables/{table}",
project=project,
instance=instance,
table=table,
) |
<SYSTEM_TASK:>
Streams back the contents of all requested rows in key order, optionally
<END_TASK>
<USER_TASK:>
Description:
def read_rows(
self,
table_name,
app_profile_id=None,
rows=None,
filter_=None,
rows_limit=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Streams back the contents of all requested rows in key order, optionally
applying the same Reader filter to each. Depending on their size,
rows and cells may be broken up across multiple responses, but
atomicity of each row will still be preserved. See the
ReadRowsResponse documentation for details.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> for element in client.read_rows(table_name):
... # process element
... pass
Args:
table_name (str): The unique name of the table from which to read. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowSet`
filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset,
reads the entirety of each row.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
rows_limit (long): The read will terminate after committing to N rows' worth of results. The
default (zero) is to return all results.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "read_rows" not in self._inner_api_calls:
self._inner_api_calls[
"read_rows"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.read_rows,
default_retry=self._method_configs["ReadRows"].retry,
default_timeout=self._method_configs["ReadRows"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.ReadRowsRequest(
table_name=table_name,
app_profile_id=app_profile_id,
rows=rows,
filter=filter_,
rows_limit=rows_limit,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["read_rows"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Mutates multiple rows in a batch. Each individual row is mutated
<END_TASK>
<USER_TASK:>
Description:
def mutate_rows(
self,
table_name,
entries,
app_profile_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates multiple rows in a batch. Each individual row is mutated
atomically as in MutateRow, but the entire batch is not executed
atomically.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> for element in client.mutate_rows(table_name, entries):
... # process element
... pass
Args:
table_name (str): The unique name of the table to which the mutations should be applied.
entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk.
Each entry is applied as an atomic mutation, but the entries may be
applied in arbitrary order (even between entries for the same row).
At least one entry must be specified, and in total the entries can
contain at most 100000 mutations.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Entry`
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "mutate_rows" not in self._inner_api_calls:
self._inner_api_calls[
"mutate_rows"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.mutate_rows,
default_retry=self._method_configs["MutateRows"].retry,
default_timeout=self._method_configs["MutateRows"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.MutateRowsRequest(
table_name=table_name, entries=entries, app_profile_id=app_profile_id
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["mutate_rows"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Mutates a row atomically based on the output of a predicate Reader filter.
<END_TASK>
<USER_TASK:>
Description:
def check_and_mutate_row(
self,
table_name,
row_key,
app_profile_id=None,
predicate_filter=None,
true_mutations=None,
false_mutations=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates a row atomically based on the output of a predicate Reader filter.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> response = client.check_and_mutate_row(table_name, row_key)
Args:
table_name (str): The unique name of the table to which the conditional mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the conditional mutation should be applied.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending
on whether or not any results are yielded, either ``true_mutations`` or
``false_mutations`` will be executed. If unset, checks that the row
contains any values at all.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``false_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``true_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
""" |
# Wrap the transport method to add retry and timeout logic.
if "check_and_mutate_row" not in self._inner_api_calls:
self._inner_api_calls[
"check_and_mutate_row"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.check_and_mutate_row,
default_retry=self._method_configs["CheckAndMutateRow"].retry,
default_timeout=self._method_configs["CheckAndMutateRow"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.CheckAndMutateRowRequest(
table_name=table_name,
row_key=row_key,
app_profile_id=app_profile_id,
predicate_filter=predicate_filter,
true_mutations=true_mutations,
false_mutations=false_mutations,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["check_and_mutate_row"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
<SYSTEM_TASK:>
Instance name used in requests.
<END_TASK>
<USER_TASK:>
Description:
def name(self):
"""Instance name used in requests.
.. note::
This property will not change if ``instance_id`` does not,
but the return value is not cached.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_instance_name]
:end-before: [END bigtable_instance_name]
The instance name is of the form
``"projects/{project}/instances/{instance_id}"``
:rtype: str
:returns: Return a fully-qualified instance string.
""" |
return self._client.instance_admin_client.instance_path(
project=self._client.project, instance=self.instance_id
) |
<SYSTEM_TASK:>
Check whether the instance already exists.
<END_TASK>
<USER_TASK:>
Description:
def exists(self):
"""Check whether the instance already exists.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_check_instance_exists]
:end-before: [END bigtable_check_instance_exists]
:rtype: bool
:returns: True if the table exists, else False.
""" |
try:
self._client.instance_admin_client.get_instance(name=self.name)
return True
# NOTE: There could be other exceptions that are returned to the user.
except NotFound:
return False |
<SYSTEM_TASK:>
Gets the access control policy for an instance resource.
<END_TASK>
<USER_TASK:>
Description:
def get_iam_policy(self):
"""Gets the access control policy for an instance resource.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_get_iam_policy]
:end-before: [END bigtable_get_iam_policy]
:rtype: :class:`google.cloud.bigtable.policy.Policy`
:returns: The current IAM policy of this instance
""" |
instance_admin_client = self._client.instance_admin_client
resp = instance_admin_client.get_iam_policy(resource=self.name)
return Policy.from_pb(resp) |
<SYSTEM_TASK:>
Sets the access control policy on an instance resource. Replaces any
<END_TASK>
<USER_TASK:>
Description:
def set_iam_policy(self, policy):
"""Sets the access control policy on an instance resource. Replaces any
existing policy.
For more information about policy, please see documentation of
class `google.cloud.bigtable.policy.Policy`
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_set_iam_policy]
:end-before: [END bigtable_set_iam_policy]
:type policy: :class:`google.cloud.bigtable.policy.Policy`
:param policy: A new IAM policy to replace the current IAM policy
of this instance
:rtype: :class:`google.cloud.bigtable.policy.Policy`
:returns: The current IAM policy of this instance.
""" |
instance_admin_client = self._client.instance_admin_client
resp = instance_admin_client.set_iam_policy(
resource=self.name, policy=policy.to_pb()
)
return Policy.from_pb(resp) |
<SYSTEM_TASK:>
Factory to create a cluster associated with this instance.
<END_TASK>
<USER_TASK:>
Description:
def cluster(
self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None
):
"""Factory to create a cluster associated with this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_create_cluster]
:end-before: [END bigtable_create_cluster]
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type instance: :class:`~google.cloud.bigtable.instance.Instance`
:param instance: The instance where the cluster resides.
:type location_id: str
:param location_id: (Creation Only) The location where this cluster's
nodes and storage reside. For best performance,
clients should be located as close as possible to
this cluster.
For list of supported locations refer to
https://cloud.google.com/bigtable/docs/locations
:type serve_nodes: int
:param serve_nodes: (Optional) The number of nodes in the cluster.
:type default_storage_type: int
:param default_storage_type: (Optional) The type of storage
Possible values are represented by the
following constants:
:data:`google.cloud.bigtable.enums.StorageType.SSD`.
:data:`google.cloud.bigtable.enums.StorageType.SHD`,
Defaults to
:data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`.
:rtype: :class:`~google.cloud.bigtable.instance.Cluster`
:returns: a cluster owned by this instance.
""" |
return Cluster(
cluster_id,
self,
location_id=location_id,
serve_nodes=serve_nodes,
default_storage_type=default_storage_type,
) |
<SYSTEM_TASK:>
List the clusters in this instance.
<END_TASK>
<USER_TASK:>
Description:
def list_clusters(self):
"""List the clusters in this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_list_clusters_on_instance]
:end-before: [END bigtable_list_clusters_on_instance]
:rtype: tuple
:returns:
(clusters, failed_locations), where 'clusters' is list of
:class:`google.cloud.bigtable.instance.Cluster`, and
'failed_locations' is a list of locations which could not
be resolved.
""" |
resp = self._client.instance_admin_client.list_clusters(self.name)
clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters]
return clusters, resp.failed_locations |
<SYSTEM_TASK:>
Factory to create a table associated with this instance.
<END_TASK>
<USER_TASK:>
Description:
def table(self, table_id, mutation_timeout=None, app_profile_id=None):
"""Factory to create a table associated with this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_create_table]
:end-before: [END bigtable_create_table]
:type table_id: str
:param table_id: The ID of the table.
:type app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
:rtype: :class:`Table <google.cloud.bigtable.table.Table>`
:returns: The table owned by this instance.
""" |
return Table(
table_id,
self,
app_profile_id=app_profile_id,
mutation_timeout=mutation_timeout,
) |
<SYSTEM_TASK:>
List the tables in this instance.
<END_TASK>
<USER_TASK:>
Description:
def list_tables(self):
"""List the tables in this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_list_tables]
:end-before: [END bigtable_list_tables]
:rtype: list of :class:`Table <google.cloud.bigtable.table.Table>`
:returns: The list of tables owned by the instance.
:raises: :class:`ValueError <exceptions.ValueError>` if one of the
returned tables has a name that is not of the expected format.
""" |
table_list_pb = self._client.table_admin_client.list_tables(self.name)
result = []
for table_pb in table_list_pb:
table_prefix = self.name + "/tables/"
if not table_pb.name.startswith(table_prefix):
raise ValueError(
"Table name {} not of expected format".format(table_pb.name)
)
table_id = table_pb.name[len(table_prefix) :]
result.append(self.table(table_id))
return result |
<SYSTEM_TASK:>
Factory to create AppProfile associated with this instance.
<END_TASK>
<USER_TASK:>
Description:
def app_profile(
self,
app_profile_id,
routing_policy_type=None,
description=None,
cluster_id=None,
allow_transactional_writes=None,
):
"""Factory to create AppProfile associated with this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_create_app_profile]
:end-before: [END bigtable_create_app_profile]
:type app_profile_id: str
:param app_profile_id: The ID of the AppProfile. Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type: routing_policy_type: int
:param: routing_policy_type: The type of the routing policy.
Possible values are represented
by the following constants:
:data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY`
:data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE`
:type: description: str
:param: description: (Optional) Long form description of the use
case for this AppProfile.
:type: cluster_id: str
:param: cluster_id: (Optional) Unique cluster_id which is only required
when routing_policy_type is
ROUTING_POLICY_TYPE_SINGLE.
:type: allow_transactional_writes: bool
:param: allow_transactional_writes: (Optional) If true, allow
transactional writes for
ROUTING_POLICY_TYPE_SINGLE.
:rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>`
:returns: AppProfile for this instance.
""" |
return AppProfile(
app_profile_id,
self,
routing_policy_type=routing_policy_type,
description=description,
cluster_id=cluster_id,
allow_transactional_writes=allow_transactional_writes,
) |
<SYSTEM_TASK:>
Convert a mapping to a row tuple using the schema.
<END_TASK>
<USER_TASK:>
Description:
def _row_from_mapping(mapping, schema):
"""Convert a mapping to a row tuple using the schema.
Args:
mapping (Dict[str, object])
Mapping of row data: must contain keys for all required fields in
the schema. Keys which do not correspond to a field in the schema
are ignored.
schema (List[google.cloud.bigquery.schema.SchemaField]):
The schema of the table destination for the rows
Returns:
Tuple[object]:
Tuple whose elements are ordered according to the schema.
Raises:
ValueError: If schema is empty.
""" |
if len(schema) == 0:
raise ValueError(_TABLE_HAS_NO_SCHEMA)
row = []
for field in schema:
if field.mode == "REQUIRED":
row.append(mapping[field.name])
elif field.mode == "REPEATED":
row.append(mapping.get(field.name, ()))
elif field.mode == "NULLABLE":
row.append(mapping.get(field.name))
else:
raise ValueError("Unknown field mode: {}".format(field.mode))
return tuple(row) |
<SYSTEM_TASK:>
Convert a JSON row to the native object.
<END_TASK>
<USER_TASK:>
Description:
def _item_to_row(iterator, resource):
"""Convert a JSON row to the native object.
.. note::
This assumes that the ``schema`` attribute has been
added to the iterator after being created, which
should be done by the caller.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a row.
:rtype: :class:`~google.cloud.bigquery.table.Row`
:returns: The next row in the page.
""" |
return Row(
_helpers._row_tuple_from_json(resource, iterator.schema),
iterator._field_to_index,
) |
<SYSTEM_TASK:>
Helper to convert a string or Table to TableReference.
<END_TASK>
<USER_TASK:>
Description:
def _table_arg_to_table_ref(value, default_project=None):
"""Helper to convert a string or Table to TableReference.
This function keeps TableReference and other kinds of objects unchanged.
""" |
if isinstance(value, six.string_types):
value = TableReference.from_string(value, default_project=default_project)
if isinstance(value, (Table, TableListItem)):
value = value.reference
return value |
<SYSTEM_TASK:>
Helper to convert a string or TableReference to a Table.
<END_TASK>
<USER_TASK:>
Description:
def _table_arg_to_table(value, default_project=None):
"""Helper to convert a string or TableReference to a Table.
This function keeps Table and other kinds of objects unchanged.
""" |
if isinstance(value, six.string_types):
value = TableReference.from_string(value, default_project=default_project)
if isinstance(value, TableReference):
value = Table(value)
if isinstance(value, TableListItem):
newvalue = Table(value.reference)
newvalue._properties = value._properties
value = newvalue
return value |
<SYSTEM_TASK:>
Construct a table reference from table ID string.
<END_TASK>
<USER_TASK:>
Description:
def from_string(cls, table_id, default_project=None):
"""Construct a table reference from table ID string.
Args:
table_id (str):
A table ID in standard SQL format. If ``default_project``
is not specified, this must included a project ID, dataset
ID, and table ID, each separated by ``.``.
default_project (str):
Optional. The project ID to use when ``table_id`` does not
include a project ID.
Returns:
TableReference: Table reference parsed from ``table_id``.
Examples:
>>> TableReference.from_string('my-project.mydataset.mytable')
TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable')
Raises:
ValueError:
If ``table_id`` is not a fully-qualified table ID in
standard SQL format.
""" |
from google.cloud.bigquery.dataset import DatasetReference
(
output_project_id,
output_dataset_id,
output_table_id,
) = _helpers._parse_3_part_id(
table_id, default_project=default_project, property_name="table_id"
)
return cls(
DatasetReference(output_project_id, output_dataset_id), output_table_id
) |
<SYSTEM_TASK:>
Construct a BigQuery Storage API representation of this table.
<END_TASK>
<USER_TASK:>
Description:
def to_bqstorage(self):
"""Construct a BigQuery Storage API representation of this table.
Install the ``google-cloud-bigquery-storage`` package to use this
feature.
If the ``table_id`` contains a partition identifier (e.g.
``my_table$201812``) or a snapshot identifier (e.g.
``mytable@1234567890``), it is ignored. Use
:class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions`
to filter rows by partition. Use
:class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers`
to select a specific snapshot to read from.
Returns:
google.cloud.bigquery_storage_v1beta1.types.TableReference:
A reference to this table in the BigQuery Storage API.
Raises:
ValueError:
If the :mod:`google.cloud.bigquery_storage_v1beta1` module
cannot be imported.
""" |
if bigquery_storage_v1beta1 is None:
raise ValueError(_NO_BQSTORAGE_ERROR)
table_ref = bigquery_storage_v1beta1.types.TableReference()
table_ref.project_id = self._project
table_ref.dataset_id = self._dataset_id
table_id = self._table_id
if "@" in table_id:
table_id = table_id.split("@")[0]
if "$" in table_id:
table_id = table_id.split("$")[0]
table_ref.table_id = table_id
return table_ref |
<SYSTEM_TASK:>
Return a value for key, with a default value if it does not exist.
<END_TASK>
<USER_TASK:>
Description:
def get(self, key, default=None):
"""Return a value for key, with a default value if it does not exist.
Args:
key (str): The key of the column to access
default (object):
The default value to use if the key does not exist. (Defaults
to :data:`None`.)
Returns:
object:
The value associated with the provided key, or a default value.
Examples:
When the key exists, the value associated with it is returned.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x')
'a'
The default value is :data:`None` when the key does not exist.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z')
None
The default value can be overrided with the ``default`` parameter.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '')
''
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '')
''
""" |
index = self._xxx_field_to_index.get(key)
if index is None:
return default
return self._xxx_values[index] |
<SYSTEM_TASK:>
Construct a tqdm progress bar object, if tqdm is installed.
<END_TASK>
<USER_TASK:>
Description:
def _get_progress_bar(self, progress_bar_type):
"""Construct a tqdm progress bar object, if tqdm is installed.""" |
if tqdm is None:
if progress_bar_type is not None:
warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3)
return None
description = "Downloading"
unit = "rows"
try:
if progress_bar_type == "tqdm":
return tqdm.tqdm(desc=description, total=self.total_rows, unit=unit)
elif progress_bar_type == "tqdm_notebook":
return tqdm.tqdm_notebook(
desc=description, total=self.total_rows, unit=unit
)
elif progress_bar_type == "tqdm_gui":
return tqdm.tqdm_gui(desc=description, total=self.total_rows, unit=unit)
except (KeyError, TypeError):
# Protect ourselves from any tqdm errors. In case of
# unexpected tqdm behavior, just fall back to showing
# no progress bar.
warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3)
return None |
<SYSTEM_TASK:>
Create a pandas DataFrame by loading all pages of a query.
<END_TASK>
<USER_TASK:>
Description:
def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None):
"""Create a pandas DataFrame by loading all pages of a query.
Args:
bqstorage_client ( \
google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient \
):
**Beta Feature** Optional. A BigQuery Storage API client. If
supplied, use the faster BigQuery Storage API to fetch rows
from BigQuery. This API is a billable API.
This method requires the ``fastavro`` and
``google-cloud-bigquery-storage`` libraries.
Reading from a specific partition or snapshot is not
currently supported by this method.
**Caution**: There is a known issue reading small anonymous
query result tables with the BQ Storage API. When a problem
is encountered reading a table, the tabledata.list method
from the BigQuery API is used, instead.
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
progress_bar_type (Optional[str]):
If set, use the `tqdm <https://tqdm.github.io/>`_ library to
display a progress bar while the data downloads. Install the
``tqdm`` package to use this feature.
Possible values of ``progress_bar_type`` include:
``None``
No progress bar.
``'tqdm'``
Use the :func:`tqdm.tqdm` function to print a progress bar
to :data:`sys.stderr`.
``'tqdm_notebook'``
Use the :func:`tqdm.tqdm_notebook` function to display a
progress bar as a Jupyter notebook widget.
``'tqdm_gui'``
Use the :func:`tqdm.tqdm_gui` function to display a
progress bar as a graphical dialog box.
..versionadded:: 1.11.0
Returns:
pandas.DataFrame:
A :class:`~pandas.DataFrame` populated with row data and column
headers from the query results. The column headers are derived
from the destination table's schema.
Raises:
ValueError:
If the :mod:`pandas` library cannot be imported, or the
:mod:`google.cloud.bigquery_storage_v1beta1` module is
required but cannot be imported.
""" |
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
if dtypes is None:
dtypes = {}
progress_bar = self._get_progress_bar(progress_bar_type)
if bqstorage_client is not None:
try:
return self._to_dataframe_bqstorage(
bqstorage_client, dtypes, progress_bar=progress_bar
)
except google.api_core.exceptions.Forbidden:
# Don't hide errors such as insufficient permissions to create
# a read session, or the API is not enabled. Both of those are
# clearly problems if the developer has explicitly asked for
# BigQuery Storage API support.
raise
except google.api_core.exceptions.GoogleAPICallError:
# There is a known issue with reading from small anonymous
# query results tables, so some errors are expected. Rather
# than throw those errors, try reading the DataFrame again, but
# with the tabledata.list API.
pass
return self._to_dataframe_tabledata_list(dtypes, progress_bar=progress_bar) |
<SYSTEM_TASK:>
Create an empty dataframe.
<END_TASK>
<USER_TASK:>
Description:
def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=None):
"""Create an empty dataframe.
Args:
bqstorage_client (Any):
Ignored. Added for compatibility with RowIterator.
dtypes (Any):
Ignored. Added for compatibility with RowIterator.
progress_bar_type (Any):
Ignored. Added for compatibility with RowIterator.
Returns:
pandas.DataFrame:
An empty :class:`~pandas.DataFrame`.
""" |
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
return pandas.DataFrame() |
<SYSTEM_TASK:>
Prepares headers and body for a batch request.
<END_TASK>
<USER_TASK:>
Description:
def _prepare_batch_request(self):
"""Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
""" |
if len(self._requests) == 0:
raise ValueError("No deferred requests")
multi = MIMEMultipart()
for method, uri, headers, body in self._requests:
subrequest = MIMEApplicationHTTP(method, uri, headers, body)
multi.attach(subrequest)
# The `email` package expects to deal with "native" strings
if six.PY3: # pragma: NO COVER Python3
buf = io.StringIO()
else:
buf = io.BytesIO()
generator = Generator(buf, False, 0)
generator.flatten(multi)
payload = buf.getvalue()
# Strip off redundant header text
_, body = payload.split("\n\n", 1)
return dict(multi._headers), body |
<SYSTEM_TASK:>
Apply all the batch responses to the futures created.
<END_TASK>
<USER_TASK:>
Description:
def _finish_futures(self, responses):
"""Apply all the batch responses to the futures created.
:type responses: list of (headers, payload) tuples.
:param responses: List of headers and payloads from each response in
the batch.
:raises: :class:`ValueError` if no requests have been deferred.
""" |
# If a bad status occurs, we track it, but don't raise an exception
# until all futures have been populated.
exception_args = None
if len(self._target_objects) != len(responses):
raise ValueError("Expected a response for every request.")
for target_object, subresponse in zip(self._target_objects, responses):
if not 200 <= subresponse.status_code < 300:
exception_args = exception_args or subresponse
elif target_object is not None:
try:
target_object._properties = subresponse.json()
except ValueError:
target_object._properties = subresponse.content
if exception_args is not None:
raise exceptions.from_http_response(exception_args) |
<SYSTEM_TASK:>
Perform a ``StreamingRead`` API request for rows in a table.
<END_TASK>
<USER_TASK:>
Description:
def read(self, table, columns, keyset, index="", limit=0, partition=None):
"""Perform a ``StreamingRead`` API request for rows in a table.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type limit: int
:param limit: (Optional) maximum number of rows to return.
Incompatible with ``partition``.
:type partition: bytes
:param partition: (Optional) one of the partition tokens returned
from :meth:`partition_read`. Incompatible with
``limit``.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
:raises ValueError:
for reuse of single-use snapshots, or if a transaction ID is
already pending for multiple-use snapshots.
""" |
if self._read_request_count > 0:
if not self._multi_use:
raise ValueError("Cannot re-use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction ID pending.")
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
restart = functools.partial(
api.streaming_read,
self._session.name,
table,
columns,
keyset._to_pb(),
transaction=transaction,
index=index,
limit=limit,
partition_token=partition,
metadata=metadata,
)
iterator = _restart_on_unavailable(restart)
self._read_request_count += 1
if self._multi_use:
return StreamedResultSet(iterator, source=self)
else:
return StreamedResultSet(iterator) |
<SYSTEM_TASK:>
Perform an ``ExecuteStreamingSql`` API request.
<END_TASK>
<USER_TASK:>
Description:
def execute_sql(
self,
sql,
params=None,
param_types=None,
query_mode=None,
partition=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
):
"""Perform an ``ExecuteStreamingSql`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type query_mode:
:class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode`
:param query_mode: Mode governing return of results / query plan. See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1
:type partition: bytes
:param partition: (Optional) one of the partition tokens returned
from :meth:`partition_query`.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
:raises ValueError:
for reuse of single-use snapshots, or if a transaction ID is
already pending for multiple-use snapshots.
""" |
if self._read_request_count > 0:
if not self._multi_use:
raise ValueError("Cannot re-use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction ID pending.")
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
params_pb = Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
params_pb = None
database = self._session._database
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
api = database.spanner_api
restart = functools.partial(
api.execute_streaming_sql,
self._session.name,
sql,
transaction=transaction,
params=params_pb,
param_types=param_types,
query_mode=query_mode,
partition_token=partition,
seqno=self._execute_sql_count,
metadata=metadata,
retry=retry,
timeout=timeout,
)
iterator = _restart_on_unavailable(restart)
self._read_request_count += 1
self._execute_sql_count += 1
if self._multi_use:
return StreamedResultSet(iterator, source=self)
else:
return StreamedResultSet(iterator) |
<SYSTEM_TASK:>
Perform a ``ParitionRead`` API request for rows in a table.
<END_TASK>
<USER_TASK:>
Description:
def partition_read(
self,
table,
columns,
keyset,
index="",
partition_size_bytes=None,
max_partitions=None,
):
"""Perform a ``ParitionRead`` API request for rows in a table.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
""" |
if not self._multi_use:
raise ValueError("Cannot use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction not started.")
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
partition_options = PartitionOptions(
partition_size_bytes=partition_size_bytes, max_partitions=max_partitions
)
response = api.partition_read(
session=self._session.name,
table=table,
columns=columns,
key_set=keyset._to_pb(),
transaction=transaction,
index=index,
partition_options=partition_options,
metadata=metadata,
)
return [partition.partition_token for partition in response.partitions] |
<SYSTEM_TASK:>
Perform a ``ParitionQuery`` API request.
<END_TASK>
<USER_TASK:>
Description:
def partition_query(
self,
sql,
params=None,
param_types=None,
partition_size_bytes=None,
max_partitions=None,
):
"""Perform a ``ParitionQuery`` API request.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of bytes
:returns: a sequence of partition tokens
:raises ValueError:
for single-use snapshots, or if a transaction ID is
already associtated with the snapshot.
""" |
if not self._multi_use:
raise ValueError("Cannot use single-use snapshot.")
if self._transaction_id is None:
raise ValueError("Transaction not started.")
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
params_pb = Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
params_pb = None
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
transaction = self._make_txn_selector()
partition_options = PartitionOptions(
partition_size_bytes=partition_size_bytes, max_partitions=max_partitions
)
response = api.partition_query(
session=self._session.name,
sql=sql,
transaction=transaction,
params=params_pb,
param_types=param_types,
partition_options=partition_options,
metadata=metadata,
)
return [partition.partition_token for partition in response.partitions] |
<SYSTEM_TASK:>
Begin a read-only transaction on the database.
<END_TASK>
<USER_TASK:>
Description:
def begin(self):
"""Begin a read-only transaction on the database.
:rtype: bytes
:returns: the ID for the newly-begun transaction.
:raises ValueError:
if the transaction is already begun, committed, or rolled back.
""" |
if not self._multi_use:
raise ValueError("Cannot call 'begin' on single-use snapshots")
if self._transaction_id is not None:
raise ValueError("Read-only transaction already begun")
if self._read_request_count > 0:
raise ValueError("Read-only transaction already pending")
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
txn_selector = self._make_txn_selector()
response = api.begin_transaction(
self._session.name, txn_selector.begin, metadata=metadata
)
self._transaction_id = response.id
return self._transaction_id |
<SYSTEM_TASK:>
Returns the user-agent string for this client info.
<END_TASK>
<USER_TASK:>
Description:
def to_user_agent(self):
"""Returns the user-agent string for this client info.""" |
# Note: the order here is important as the internal metrics system
# expects these items to be in specific locations.
ua = ""
if self.user_agent is not None:
ua += "{user_agent} "
ua += "gl-python/{python_version} "
if self.grpc_version is not None:
ua += "grpc/{grpc_version} "
ua += "gax/{api_core_version} "
if self.gapic_version is not None:
ua += "gapic/{gapic_version} "
if self.client_library_version is not None:
ua += "gccl/{client_library_version} "
return ua.format(**self.__dict__).strip() |
<SYSTEM_TASK:>
Create an instance of the gapic Trace API.
<END_TASK>
<USER_TASK:>
Description:
def make_trace_api(client):
"""
Create an instance of the gapic Trace API.
Args:
client (~google.cloud.trace.client.Client): The client that holds
configuration details.
Returns:
A :class:`~google.cloud.trace._gapic._TraceAPI` instance with the
proper configurations.
""" |
generated = trace_service_client.TraceServiceClient(
credentials=client._credentials, client_info=_CLIENT_INFO
)
return _TraceAPI(generated, client) |
<SYSTEM_TASK:>
Convert a JSON bucket to the native object.
<END_TASK>
<USER_TASK:>
Description:
def _item_to_bucket(iterator, item):
"""Convert a JSON bucket to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a bucket.
:rtype: :class:`.Bucket`
:returns: The next bucket in the page.
""" |
name = item.get("name")
bucket = Bucket(iterator.client, name)
bucket._set_properties(item)
return bucket |
<SYSTEM_TASK:>
Get the email address of the project's GCS service account
<END_TASK>
<USER_TASK:>
Description:
def get_service_account_email(self, project=None):
"""Get the email address of the project's GCS service account
:type project: str
:param project:
(Optional) Project ID to use for retreiving GCS service account
email address. Defaults to the client's project.
:rtype: str
:returns: service account email address
""" |
if project is None:
project = self.project
path = "/projects/%s/serviceAccount" % (project,)
api_response = self._base_connection.api_request(method="GET", path=path)
return api_response["email_address"] |
<SYSTEM_TASK:>
Factory constructor for bucket object.
<END_TASK>
<USER_TASK:>
Description:
def bucket(self, bucket_name, user_project=None):
"""Factory constructor for bucket object.
.. note::
This will not make an HTTP request; it simply instantiates
a bucket object owned by this client.
:type bucket_name: str
:param bucket_name: The name of the bucket to be instantiated.
:type user_project: str
:param user_project: (Optional) the project ID to be billed for API
requests made via the bucket.
:rtype: :class:`google.cloud.storage.bucket.Bucket`
:returns: The bucket object created.
""" |
return Bucket(client=self, name=bucket_name, user_project=user_project) |
<SYSTEM_TASK:>
Get a bucket by name.
<END_TASK>
<USER_TASK:>
Description:
def get_bucket(self, bucket_name):
"""Get a bucket by name.
If the bucket isn't found, this will raise a
:class:`google.cloud.exceptions.NotFound`.
For example:
.. literalinclude:: snippets.py
:start-after: [START get_bucket]
:end-before: [END get_bucket]
This implements "storage.buckets.get".
:type bucket_name: str
:param bucket_name: The name of the bucket to get.
:rtype: :class:`google.cloud.storage.bucket.Bucket`
:returns: The bucket matching the name provided.
:raises: :class:`google.cloud.exceptions.NotFound`
""" |
bucket = Bucket(self, name=bucket_name)
bucket.reload(client=self)
return bucket |
<SYSTEM_TASK:>
Get all buckets in the project associated to the client.
<END_TASK>
<USER_TASK:>
Description:
def list_buckets(
self,
max_results=None,
page_token=None,
prefix=None,
projection="noAcl",
fields=None,
project=None,
):
"""Get all buckets in the project associated to the client.
This will not populate the list of blobs available in each
bucket.
.. literalinclude:: snippets.py
:start-after: [START list_buckets]
:end-before: [END list_buckets]
This implements "storage.buckets.list".
:type max_results: int
:param max_results: Optional. The maximum number of buckets to return.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of buckets, using the
value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing the
token.
:type prefix: str
:param prefix: Optional. Filter results to buckets whose names begin
with this prefix.
:type projection: str
:param projection:
(Optional) Specifies the set of properties to return. If used, must
be 'full' or 'noAcl'. Defaults to 'noAcl'.
:type fields: str
:param fields:
(Optional) Selector specifying which fields to include in a partial
response. Must be a list of fields. For example to get a partial
response with just the next page token and the language of each
bucket returned: 'items/id,nextPageToken'
:type project: str
:param project: (Optional) the project whose buckets are to be listed.
If not passed, uses the project set on the client.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:raises ValueError: if both ``project`` is ``None`` and the client's
project is also ``None``.
:returns: Iterator of all :class:`~google.cloud.storage.bucket.Bucket`
belonging to this project.
""" |
if project is None:
project = self.project
if project is None:
raise ValueError("Client project not set: pass an explicit project.")
extra_params = {"project": project}
if prefix is not None:
extra_params["prefix"] = prefix
extra_params["projection"] = projection
if fields is not None:
extra_params["fields"] = fields
return page_iterator.HTTPIterator(
client=self,
api_request=self._connection.api_request,
path="/b",
item_to_value=_item_to_bucket,
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
) |
<SYSTEM_TASK:>
Check that a stream was opened in read-binary mode.
<END_TASK>
<USER_TASK:>
Description:
def _check_mode(stream):
"""Check that a stream was opened in read-binary mode.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:raises: :exc:`ValueError` if the ``stream.mode`` is a valid attribute
and is not among ``rb``, ``r+b`` or ``rb+``.
""" |
mode = getattr(stream, "mode", None)
if isinstance(stream, gzip.GzipFile):
if mode != gzip.READ:
raise ValueError(
"Cannot upload gzip files opened in write mode: use "
"gzip.GzipFile(filename, mode='rb')"
)
else:
if mode is not None and mode not in ("rb", "r+b", "rb+"):
raise ValueError(
"Cannot upload files opened in text mode: use "
"open(filename, mode='rb') or open(filename, mode='r+b')"
) |
<SYSTEM_TASK:>
Get the email address of the project's BigQuery service account
<END_TASK>
<USER_TASK:>
Description:
def get_service_account_email(self, project=None):
"""Get the email address of the project's BigQuery service account
Note:
This is the service account that BigQuery uses to manage tables
encrypted by a key in KMS.
Args:
project (str, optional):
Project ID to use for retreiving service account email.
Defaults to the client's project.
Returns:
str: service account email address
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> client.get_service_account_email()
[email protected]
""" |
if project is None:
project = self.project
path = "/projects/%s/serviceAccount" % (project,)
api_response = self._connection.api_request(method="GET", path=path)
return api_response["email"] |
<SYSTEM_TASK:>
List projects for the project associated with this client.
<END_TASK>
<USER_TASK:>
Description:
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY):
"""List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
:type max_results: int
:param max_results: (Optional) maximum number of projects to return,
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token:
(Optional) Token representing a cursor into the projects. If
not passed, the API will return the first page of projects.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client.
""" |
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="/projects",
item_to_value=_item_to_project,
items_key="projects",
page_token=page_token,
max_results=max_results,
) |
<SYSTEM_TASK:>
List datasets for the project associated with this client.
<END_TASK>
<USER_TASK:>
Description:
def list_datasets(
self,
project=None,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
):
"""List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (str):
Optional. Project ID to use for retreiving datasets. Defaults
to the client's project.
include_all (bool):
Optional. True if results include hidden datasets. Defaults
to False.
filter (str):
Optional. An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
max_results (int):
Optional. Maximum number of datasets to return.
page_token (str):
Optional. Token representing a cursor into the datasets. If
not passed, the API will return the first page of datasets.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project.
""" |
extra_params = {}
if project is None:
project = self.project
if include_all:
extra_params["all"] = True
if filter:
# TODO: consider supporting a dict of label -> value for filter,
# and converting it into a string here.
extra_params["filter"] = filter
path = "/projects/%s/datasets" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_dataset,
items_key="datasets",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
) |
<SYSTEM_TASK:>
Fetch the table referenced by ``table``.
<END_TASK>
<USER_TASK:>
Description:
def get_table(self, table, retry=DEFAULT_RETRY):
"""Fetch the table referenced by ``table``.
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A reference to the table to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.table.Table:
A ``Table`` instance.
""" |
table_ref = _table_arg_to_table_ref(table, default_project=self.project)
api_response = self._call_api(retry, method="GET", path=table_ref.path)
return Table.from_api_repr(api_response) |
<SYSTEM_TASK:>
Change some fields of a dataset.
<END_TASK>
<USER_TASK:>
Description:
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY):
"""Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change (e.g. "friendly_name").
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance.
""" |
partial = dataset._build_resource(fields)
if dataset.etag is not None:
headers = {"If-Match": dataset.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=dataset.path, data=partial, headers=headers
)
return Dataset.from_api_repr(api_response) |
<SYSTEM_TASK:>
Change some fields of a table.
<END_TASK>
<USER_TASK:>
Description:
def update_table(self, table, fields, retry=DEFAULT_RETRY):
"""Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, it will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
Args:
table (google.cloud.bigquery.table.Table): The table to update.
fields (Sequence[str]):
The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
retry (google.api_core.retry.Retry):
(Optional) A description of how to retry the API call.
Returns:
google.cloud.bigquery.table.Table:
The table resource returned from the API call.
""" |
partial = table._build_resource(fields)
if table.etag is not None:
headers = {"If-Match": table.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=table.path, data=partial, headers=headers
)
return Table.from_api_repr(api_response) |
<SYSTEM_TASK:>
Get the query results object for a query job.
<END_TASK>
<USER_TASK:>
Description:
def _get_query_results(
self, job_id, retry, project=None, timeout_ms=None, location=None
):
"""Get the query results object for a query job.
Arguments:
job_id (str): Name of the query job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
project (str):
(Optional) project ID for the query job (defaults to the
project of the client).
timeout_ms (int):
(Optional) number of milliseconds the the API call should
wait for the query to complete before the request times out.
location (str): Location of the query job.
Returns:
google.cloud.bigquery.query._QueryResults:
A new ``_QueryResults`` instance.
""" |
extra_params = {"maxResults": 0}
if project is None:
project = self.project
if timeout_ms is not None:
extra_params["timeoutMs"] = timeout_ms
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/queries/{}".format(project, job_id)
# This call is typically made in a polling loop that checks whether the
# job is complete (from QueryJob.done(), called ultimately from
# QueryJob.result()). So we don't need to poll here.
resource = self._call_api(
retry, method="GET", path=path, query_params=extra_params
)
return _QueryResults.from_api_repr(resource) |
<SYSTEM_TASK:>
Detect correct job type from resource and instantiate.
<END_TASK>
<USER_TASK:>
Description:
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`google.cloud.bigquery.job.LoadJob`,
:class:`google.cloud.bigquery.job.CopyJob`,
:class:`google.cloud.bigquery.job.ExtractJob`,
or :class:`google.cloud.bigquery.job.QueryJob`
:returns: the job instance, constructed via the resource
""" |
config = resource.get("configuration", {})
if "load" in config:
return job.LoadJob.from_api_repr(resource, self)
elif "copy" in config:
return job.CopyJob.from_api_repr(resource, self)
elif "extract" in config:
return job.ExtractJob.from_api_repr(resource, self)
elif "query" in config:
return job.QueryJob.from_api_repr(resource, self)
return job.UnknownJob.from_api_repr(resource, self) |
<SYSTEM_TASK:>
Attempt to cancel a job from a job ID.
<END_TASK>
<USER_TASK:>
Description:
def cancel_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY):
"""Attempt to cancel a job from a job ID.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel
Arguments:
job_id (str): Unique job identifier.
Keyword Arguments:
project (str):
(Optional) ID of the project which owns the job (defaults to
the client's project).
location (str): Location where the job was run.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
Union[google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob]:
Job instance, based on the resource returned by the API.
""" |
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}/cancel".format(project, job_id)
resource = self._call_api(
retry, method="POST", path=path, query_params=extra_params
)
return self.job_from_resource(resource["job"]) |
<SYSTEM_TASK:>
List jobs for the project associated with this client.
<END_TASK>
<USER_TASK:>
Description:
def list_jobs(
self,
project=None,
max_results=None,
page_token=None,
all_users=None,
state_filter=None,
retry=DEFAULT_RETRY,
min_creation_time=None,
max_creation_time=None,
):
"""List jobs for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/list
Args:
project (str, optional):
Project ID to use for retreiving datasets. Defaults
to the client's project.
max_results (int, optional):
Maximum number of jobs to return.
page_token (str, optional):
Opaque marker for the next "page" of jobs. If not
passed, the API will return the first page of jobs. The token
marks the beginning of the iterator to be returned and the
value of the ``page_token`` can be accessed at
``next_page_token`` of
:class:`~google.api_core.page_iterator.HTTPIterator`.
all_users (bool, optional):
If true, include jobs owned by all users in the project.
Defaults to :data:`False`.
state_filter (str, optional):
If set, include only jobs matching the given state. One of:
* ``"done"``
* ``"pending"``
* ``"running"``
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
min_creation_time (datetime.datetime, optional):
Min value for job creation time. If set, only jobs created
after or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
max_creation_time (datetime.datetime, optional):
Max value for job creation time. If set, only jobs created
before or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
Returns:
google.api_core.page_iterator.Iterator:
Iterable of job instances.
""" |
extra_params = {
"allUsers": all_users,
"stateFilter": state_filter,
"minCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(min_creation_time)
),
"maxCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(max_creation_time)
),
"projection": "full",
}
extra_params = {
param: value for param, value in extra_params.items() if value is not None
}
if project is None:
project = self.project
path = "/projects/%s/jobs" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_job,
items_key="jobs",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
) |
<SYSTEM_TASK:>
Starts a job for loading data into a table from CloudStorage.
<END_TASK>
<USER_TASK:>
Description:
def load_table_from_uri(
self,
source_uris,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Starts a job for loading data into a table from CloudStorage.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load
Arguments:
source_uris (Union[str, Sequence[str]]):
URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
destination (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
""" |
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(source_uris, six.string_types):
source_uris = [source_uris]
destination = _table_arg_to_table_ref(destination, default_project=self.project)
load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config)
load_job._begin(retry=retry)
return load_job |
<SYSTEM_TASK:>
Upload the contents of this table from a file-like object.
<END_TASK>
<USER_TASK:>
Description:
def load_table_from_file(
self,
file_obj,
destination,
rewind=False,
size=None,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
):
"""Upload the contents of this table from a file-like object.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
file_obj (file): A file handle opened in binary mode for reading.
destination (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
rewind (bool):
If True, seek to the beginning of the file handle before
reading the file.
size (int):
The number of bytes to read from the file handle. If size is
``None`` or large, resumable upload will be used. Otherwise,
multipart upload will be used.
num_retries (int): Number of upload retries. Defaults to 6.
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If ``size`` is not passed in and can not be determined, or if
the ``file_obj`` can be detected to be a file opened in text
mode.
""" |
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
destination = _table_arg_to_table_ref(destination, default_project=self.project)
job_ref = job._JobReference(job_id, project=project, location=location)
load_job = job.LoadJob(job_ref, None, destination, self, job_config)
job_resource = load_job.to_api_repr()
if rewind:
file_obj.seek(0, os.SEEK_SET)
_check_mode(file_obj)
try:
if size is None or size >= _MAX_MULTIPART_SIZE:
response = self._do_resumable_upload(
file_obj, job_resource, num_retries
)
else:
response = self._do_multipart_upload(
file_obj, job_resource, size, num_retries
)
except resumable_media.InvalidResponse as exc:
raise exceptions.from_http_response(exc.response)
return self.job_from_resource(response.json()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.