text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Creates a function equivalent to a_func, but that retries on certain <END_TASK> <USER_TASK:> Description: def retryable(a_func, retry_options, **kwargs): """Creates a function equivalent to a_func, but that retries on certain exceptions. Args: a_func (callable): A callable. retry_options (RetryOptions): Configures the exceptions upon which the callable should retry, and the parameters to the exponential backoff retry algorithm. kwargs: Addtional arguments passed through to the callable. Returns: Callable: A function that will retry on exception. """
delay_mult = retry_options.backoff_settings.retry_delay_multiplier max_delay_millis = retry_options.backoff_settings.max_retry_delay_millis has_timeout_settings = _has_timeout_settings(retry_options.backoff_settings) if has_timeout_settings: timeout_mult = retry_options.backoff_settings.rpc_timeout_multiplier max_timeout = (retry_options.backoff_settings.max_rpc_timeout_millis / _MILLIS_PER_SECOND) total_timeout = (retry_options.backoff_settings.total_timeout_millis / _MILLIS_PER_SECOND) def inner(*args): """Equivalent to ``a_func``, but retries upon transient failure. Retrying is done through an exponential backoff algorithm configured by the options in ``retry``. """ delay = retry_options.backoff_settings.initial_retry_delay_millis exc = errors.RetryError('Retry total timeout exceeded before any' 'response was received') if has_timeout_settings: timeout = ( retry_options.backoff_settings.initial_rpc_timeout_millis / _MILLIS_PER_SECOND) now = time.time() deadline = now + total_timeout else: timeout = None deadline = None while deadline is None or now < deadline: try: to_call = add_timeout_arg(a_func, timeout, **kwargs) return to_call(*args) except Exception as exception: # pylint: disable=broad-except code = config.exc_to_code(exception) if code not in retry_options.retry_codes: raise errors.RetryError( 'Exception occurred in retry method that was not' ' classified as transient', exception) exc = errors.RetryError( 'Retry total timeout exceeded with exception', exception) # Sleep a random number which will, on average, equal the # expected delay. to_sleep = random.uniform(0, delay * 2) time.sleep(to_sleep / _MILLIS_PER_SECOND) delay = min(delay * delay_mult, max_delay_millis) if has_timeout_settings: now = time.time() timeout = min( timeout * timeout_mult, max_timeout, deadline - now) raise exc return inner
<SYSTEM_TASK:> Creates a ``GaxError`` or subclass. <END_TASK> <USER_TASK:> Description: def create_error(msg, cause=None): """Creates a ``GaxError`` or subclass. Attributes: msg (string): describes the error that occurred. cause (Exception, optional): the exception raised by a lower layer of the RPC stack (for example, gRPC) that caused this exception, or None if this exception originated in GAX. Returns: .GaxError: The exception that wraps ``cause``. """
status_code = config.exc_to_code(cause) status_name = config.NAME_STATUS_CODES.get(status_code) if status_name == 'INVALID_ARGUMENT': return InvalidArgumentError(msg, cause=cause) else: return GaxError(msg, cause=cause)
<SYSTEM_TASK:> Gets the latest state of a long-running operation. Clients can use this <END_TASK> <USER_TASK:> Description: def get_operation(self, name, options=None): """ Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. Example: >>> from google.gapic.longrunning import operations_client >>> api = operations_client.OperationsClient() >>> name = '' >>> response = api.get_operation(name) Args: name (string): The name of the operation resource. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Returns: A :class:`google.longrunning.operations_pb2.Operation` instance. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid. """
# Create the request object. request = operations_pb2.GetOperationRequest(name=name) return self._get_operation(request, options)
<SYSTEM_TASK:> Starts asynchronous cancellation on a long-running operation. The server <END_TASK> <USER_TASK:> Description: def cancel_operation(self, name, options=None): """ Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns ``google.rpc.Code.UNIMPLEMENTED``. Clients can use ``Operations.GetOperation`` or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an ``Operation.error`` value with a ``google.rpc.Status.code`` of 1, corresponding to ``Code.CANCELLED``. Example: >>> from google.gapic.longrunning import operations_client >>> api = operations_client.OperationsClient() >>> name = '' >>> api.cancel_operation(name) Args: name (string): The name of the operation resource to be cancelled. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid. """
# Create the request object. request = operations_pb2.CancelOperationRequest(name=name) self._cancel_operation(request, options)
<SYSTEM_TASK:> Deletes a long-running operation. This method indicates that the client is <END_TASK> <USER_TASK:> Description: def delete_operation(self, name, options=None): """ Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns ``google.rpc.Code.UNIMPLEMENTED``. Example: >>> from google.gapic.longrunning import operations_client >>> api = operations_client.OperationsClient() >>> name = '' >>> api.delete_operation(name) Args: name (string): The name of the operation resource to be deleted. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid. """
# Create the request object. request = operations_pb2.DeleteOperationRequest(name=name) self._delete_operation(request, options)
<SYSTEM_TASK:> Raise ValueError if more than one keyword argument is not none. <END_TASK> <USER_TASK:> Description: def check_oneof(**kwargs): """Raise ValueError if more than one keyword argument is not none. Args: kwargs (dict): The keyword arguments sent to the function. Returns: None Raises: ValueError: If more than one entry in kwargs is not none. """
# Sanity check: If no keyword arguments were sent, this is fine. if not kwargs: return None not_nones = [val for val in kwargs.values() if val is not None] if len(not_nones) > 1: raise ValueError('Only one of {fields} should be set.'.format( fields=', '.join(sorted(kwargs.keys())), ))
<SYSTEM_TASK:> Creates a function that transforms an API call into a bundling call. <END_TASK> <USER_TASK:> Description: def _bundleable(desc): """Creates a function that transforms an API call into a bundling call. It transform a_func from an API call that receives the requests and returns the response into a callable that receives the same request, and returns a :class:`bundling.Event`. The returned Event object can be used to obtain the eventual result of the bundled call. Args: desc (gax.BundleDescriptor): describes the bundling that a_func supports. Returns: Callable: takes the API call's request and keyword args and returns a bundling.Event object. """
def inner(a_func, settings, request, **kwargs): """Schedules execution of a bundling task.""" if not settings.bundler: return a_func(request, **kwargs) the_id = bundling.compute_bundle_id( request, desc.request_discriminator_fields) return settings.bundler.schedule(a_func, the_id, desc, request, kwargs) return inner
<SYSTEM_TASK:> Creates a function that yields an iterable to performs page-streaming. <END_TASK> <USER_TASK:> Description: def _page_streamable(page_descriptor): """Creates a function that yields an iterable to performs page-streaming. Args: page_descriptor (:class:`PageDescriptor`): indicates the structure of page streaming to be performed. Returns: Callable: A function that returns an iterator. """
def inner(a_func, settings, request, **kwargs): """Actual page-streaming based on the settings.""" page_iterator = gax.PageIterator( a_func, page_descriptor, settings.page_token, request, **kwargs) if settings.flatten_pages: return gax.ResourceIterator(page_iterator) else: return page_iterator return inner
<SYSTEM_TASK:> Constructs a dictionary mapping method names to _CallSettings. <END_TASK> <USER_TASK:> Description: def construct_settings( service_name, client_config, config_override, retry_names, bundle_descriptors=None, page_descriptors=None, metrics_headers=(), kwargs=None): """Constructs a dictionary mapping method names to _CallSettings. The ``client_config`` parameter is parsed from a client configuration JSON file of the form: .. code-block:: json { "interfaces": { "google.fake.v1.ServiceName": { "retry_codes": { "idempotent": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], "non_idempotent": [] }, "retry_params": { "default": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.2, "max_retry_delay_millis": 1000, "initial_rpc_timeout_millis": 2000, "rpc_timeout_multiplier": 1.5, "max_rpc_timeout_millis": 30000, "total_timeout_millis": 45000 } }, "methods": { "CreateFoo": { "retry_codes_name": "idempotent", "retry_params_name": "default", "timeout_millis": 30000 }, "Publish": { "retry_codes_name": "non_idempotent", "retry_params_name": "default", "bundling": { "element_count_threshold": 40, "element_count_limit": 200, "request_byte_threshold": 90000, "request_byte_limit": 100000, "delay_threshold_millis": 100 } } } } } } Args: service_name (str): The fully-qualified name of this service, used as a key into the client config file (in the example above, this value would be ``google.fake.v1.ServiceName``). client_config (dict): A dictionary parsed from the standard API client config file. bundle_descriptors (Mapping[str, BundleDescriptor]): A dictionary of method names to BundleDescriptor objects for methods that are bundling-enabled. page_descriptors (Mapping[str, PageDescriptor]): A dictionary of method names to PageDescriptor objects for methods that are page streaming-enabled. config_override (str): A dictionary in the same structure of client_config to override the settings. Usually client_config is supplied from the default config and config_override will be specified by users. retry_names (Mapping[str, object]): A dictionary mapping the strings referring to response status codes to the Python objects representing those codes. metrics_headers (Mapping[str, str]): Dictionary of headers to be passed for analytics. Sent as a dictionary; eventually becomes a space-separated string (e.g. 'foo/1.0.0 bar/3.14.1'). kwargs (dict): The keyword arguments to be passed to the API calls. Returns: dict: A dictionary mapping method names to _CallSettings. Raises: KeyError: If the configuration for the service in question cannot be located in the provided ``client_config``. """
# pylint: disable=too-many-locals # pylint: disable=protected-access defaults = {} bundle_descriptors = bundle_descriptors or {} page_descriptors = page_descriptors or {} kwargs = kwargs or {} # Sanity check: It is possible that we got this far but some headers # were specified with an older library, which sends them as... # kwargs={'metadata': [('x-goog-api-client', 'foo/1.0 bar/3.0')]} # # Note: This is the final format we will send down to GRPC shortly. # # Remove any x-goog-api-client header that may have been present # in the metadata list. if 'metadata' in kwargs: kwargs['metadata'] = [value for value in kwargs['metadata'] if value[0].lower() != 'x-goog-api-client'] # Fill out the metrics headers with GAX and GRPC info, and convert # to a string in the format that the GRPC layer expects. kwargs.setdefault('metadata', []) kwargs['metadata'].append( ('x-goog-api-client', metrics.stringify(metrics.fill(metrics_headers))) ) try: service_config = client_config['interfaces'][service_name] except KeyError: raise KeyError('Client configuration not found for service: {}' .format(service_name)) overrides = config_override.get('interfaces', {}).get(service_name, {}) for method in service_config.get('methods'): method_config = service_config['methods'][method] overriding_method = overrides.get('methods', {}).get(method, {}) snake_name = _upper_camel_to_lower_under(method) if overriding_method and overriding_method.get('timeout_millis'): timeout = overriding_method['timeout_millis'] else: timeout = method_config['timeout_millis'] timeout /= _MILLIS_PER_SECOND bundle_descriptor = bundle_descriptors.get(snake_name) bundling_config = method_config.get('bundling', None) if overriding_method and 'bundling' in overriding_method: bundling_config = overriding_method['bundling'] bundler = _construct_bundling(bundling_config, bundle_descriptor) retry_options = _merge_retry_options( _construct_retry(method_config, service_config['retry_codes'], service_config['retry_params'], retry_names), _construct_retry(overriding_method, overrides.get('retry_codes'), overrides.get('retry_params'), retry_names)) defaults[snake_name] = gax._CallSettings( timeout=timeout, retry=retry_options, page_descriptor=page_descriptors.get(snake_name), bundler=bundler, bundle_descriptor=bundle_descriptor, kwargs=kwargs) return defaults
<SYSTEM_TASK:> Updates a_func to wrap exceptions with GaxError <END_TASK> <USER_TASK:> Description: def _catch_errors(a_func, to_catch): """Updates a_func to wrap exceptions with GaxError Args: a_func (callable): A callable. to_catch (list[Exception]): Configures the exceptions to wrap. Returns: Callable: A function that will wrap certain exceptions with GaxError """
def inner(*args, **kwargs): """Wraps specified exceptions""" try: return a_func(*args, **kwargs) # pylint: disable=catching-non-exception except tuple(to_catch) as exception: utils.raise_with_traceback( gax.errors.create_error('RPC failed', cause=exception)) return inner
<SYSTEM_TASK:> Converts an rpc call into an API call governed by the settings. <END_TASK> <USER_TASK:> Description: def create_api_call(func, settings): """Converts an rpc call into an API call governed by the settings. In typical usage, ``func`` will be a callable used to make an rpc request. This will mostly likely be a bound method from a request stub used to make an rpc call. The result is created by applying a series of function decorators defined in this module to ``func``. ``settings`` is used to determine which function decorators to apply. The result is another callable which for most values of ``settings`` has has the same signature as the original. Only when ``settings`` configures bundling does the signature change. Args: func (Callable[Sequence[object], object]): is used to make a bare rpc call. settings (_CallSettings): provides the settings for this call Returns: Callable[Sequence[object], object]: a bound method on a request stub used to make an rpc call Raises: ValueError: if ``settings`` has incompatible values, e.g, if bundling and page_streaming are both configured """
def base_caller(api_call, _, *args): """Simply call api_call and ignore settings.""" return api_call(*args) def inner(request, options=None): """Invoke with the actual settings.""" this_options = _merge_options_metadata(options, settings) this_settings = settings.merge(this_options) if this_settings.retry and this_settings.retry.retry_codes: api_call = gax.retry.retryable( func, this_settings.retry, **this_settings.kwargs) else: api_call = gax.retry.add_timeout_arg( func, this_settings.timeout, **this_settings.kwargs) api_call = _catch_errors(api_call, gax.config.API_ERRORS) return api_caller(api_call, this_settings, request) if settings.page_descriptor: if settings.bundler and settings.bundle_descriptor: raise ValueError('The API call has incompatible settings: ' 'bundling and page streaming') api_caller = _page_streamable(settings.page_descriptor) elif settings.bundler and settings.bundle_descriptor: api_caller = _bundleable(settings.bundle_descriptor) else: api_caller = base_caller return inner
<SYSTEM_TASK:> Retrieve the given key off of the object. <END_TASK> <USER_TASK:> Description: def get(pb_or_dict, key, default=_SENTINEL): """Retrieve the given key off of the object. If a default is specified, return it if the key is not found, otherwise raise KeyError. Args: pb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key to retrieve from the object in question. default (Any): If the key is not present on the object, and a default is set, returns that default instead. A type-appropriate falsy default is generally recommended, as protobuf messages almost always have default values for unset values and it is not always possible to tell the difference between a falsy value and an unset one. If no default is set, raises KeyError for not found values. Returns: Any: The return value from the underlying message or dict. Raises: KeyError: If the key is not found. Note that, for unset values, messages and dictionaries may not have consistent behavior. TypeError: If pb_or_dict is not a Message or Mapping. """
# We may need to get a nested key. Resolve this. key, subkey = _resolve_subkeys(key) # Attempt to get the value from the two types of objects we know baout. # If we get something else, complain. if isinstance(pb_or_dict, Message): answer = getattr(pb_or_dict, key, default) elif isinstance(pb_or_dict, collections.Mapping): answer = pb_or_dict.get(key, default) else: raise TypeError('Tried to fetch a key %s on an invalid object; ' 'expected a dict or protobuf message.') # If the object we got back is our sentinel, raise KeyError; this is # a "not found" case. if answer is _SENTINEL: raise KeyError(key) # If a subkey exists, call this method recursively against the answer. if subkey and answer is not default: return get(answer, subkey, default=default) # Return the value. return answer
<SYSTEM_TASK:> Set the given key on the object. <END_TASK> <USER_TASK:> Description: def set(pb_or_dict, key, value): """Set the given key on the object. Args: pb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key on the object in question. value (Any): The value to set. Raises: TypeError: If pb_or_dict is not a Message or Mapping. """
# pylint: disable=redefined-builtin,too-many-branches # redefined-builtin: We want 'set' to be part of the public interface. # too-many-branches: This method is inherently complex. # Sanity check: Is our target object valid? if not isinstance(pb_or_dict, (collections.MutableMapping, Message)): raise TypeError('Tried to set a key %s on an invalid object; ' 'expected a dict or protobuf message.' % key) # We may be setting a nested key. Resolve this. key, subkey = _resolve_subkeys(key) # If a subkey exists, then get that object and call this method # recursively against it using the subkey. if subkey is not None: if isinstance(pb_or_dict, collections.MutableMapping): pb_or_dict.setdefault(key, {}) set(get(pb_or_dict, key), subkey, value) return # Attempt to set the value on the types of objects we know how to deal # with. if isinstance(pb_or_dict, collections.MutableMapping): pb_or_dict[key] = value elif isinstance(value, (collections.MutableSequence, tuple)): # Clear the existing repeated protobuf message of any elements # currently inside it. while getattr(pb_or_dict, key): getattr(pb_or_dict, key).pop() # Write our new elements to the repeated field. for item in value: if isinstance(item, collections.Mapping): getattr(pb_or_dict, key).add(**item) else: getattr(pb_or_dict, key).extend([item]) elif isinstance(value, collections.Mapping): # Assign the dictionary values to the protobuf message. for item_key, item_value in value.items(): set(getattr(pb_or_dict, key), item_key, item_value) elif isinstance(value, Message): # Assign the protobuf message values to the protobuf message. for item_key, item_value in value.ListFields(): set(getattr(pb_or_dict, key), item_key.name, item_value) else: setattr(pb_or_dict, key, value)
<SYSTEM_TASK:> Set the key on the object to the value if the current value is falsy. <END_TASK> <USER_TASK:> Description: def setdefault(pb_or_dict, key, value): """Set the key on the object to the value if the current value is falsy. Because protobuf Messages do not distinguish between unset values and falsy ones particularly well, this method treats any falsy value (e.g. 0, empty list) as a target to be overwritten, on both Messages and dictionaries. Args: pb_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key on the object in question. value (Any): The value to set. Raises: TypeError: If pb_or_dict is not a Message or Mapping. """
if not get(pb_or_dict, key, default=None): set(pb_or_dict, key, value)
<SYSTEM_TASK:> Given a key which may actually be a nested key, return the top level <END_TASK> <USER_TASK:> Description: def _resolve_subkeys(key, separator='.'): """Given a key which may actually be a nested key, return the top level key and any nested subkeys as separate values. Args: key (str): A string that may or may not contain the separator. separator (str): The namespace separator. Defaults to `.`. Returns: Tuple[str, str]: The key and subkey(s). """
subkey = None if separator in key: index = key.index(separator) subkey = key[index + 1:] key = key[:index] return key, subkey
<SYSTEM_TASK:> Returns new _CallSettings merged from this and a CallOptions object. <END_TASK> <USER_TASK:> Description: def merge(self, options): """Returns new _CallSettings merged from this and a CallOptions object. Note that passing if the CallOptions instance specifies a page_token, the merged _CallSettings will have ``flatten_pages`` disabled. This permits toggling per-resource/per-page page streaming. Args: options (CallOptions): an instance whose values override those in this object. If None, ``merge`` returns a copy of this object Returns: CallSettings: The merged settings and options. """
if not options: return _CallSettings( timeout=self.timeout, retry=self.retry, page_descriptor=self.page_descriptor, page_token=self.page_token, bundler=self.bundler, bundle_descriptor=self.bundle_descriptor, kwargs=self.kwargs) else: if options.timeout == OPTION_INHERIT: timeout = self.timeout else: timeout = options.timeout if options.retry == OPTION_INHERIT: retry = self.retry else: retry = options.retry if options.page_token == OPTION_INHERIT: page_token = self.page_token else: page_token = options.page_token if options.is_bundling: bundler = self.bundler else: bundler = None if options.kwargs == OPTION_INHERIT: kwargs = self.kwargs else: kwargs = self.kwargs.copy() kwargs.update(options.kwargs) return _CallSettings( timeout=timeout, retry=retry, page_descriptor=self.page_descriptor, page_token=page_token, bundler=bundler, bundle_descriptor=self.bundle_descriptor, kwargs=kwargs)
<SYSTEM_TASK:> If last Operation's value of `done` is true, returns false; <END_TASK> <USER_TASK:> Description: def cancel(self): """If last Operation's value of `done` is true, returns false; otherwise, issues OperationsClient.cancel_operation and returns true. """
if self.done(): return False self._client.cancel_operation(self._operation.name) return True
<SYSTEM_TASK:> Enters polling loop on OperationsClient.get_operation, and once <END_TASK> <USER_TASK:> Description: def result(self, timeout=None): """Enters polling loop on OperationsClient.get_operation, and once Operation.done is true, then returns Operation.response if successful or throws GaxError if not successful. This method will wait up to timeout seconds. If the call hasn't completed in timeout seconds, then a RetryError will be raised. timeout can be an int or float. If timeout is not specified or None, there is no limit to the wait time. """
# Check exceptional case: raise if no response if not self._poll(timeout).HasField('response'): raise GaxError(self._operation.error.message) # Return expected result return _from_any(self._result_type, self._operation.response)
<SYSTEM_TASK:> Enters a polling loop on OperationsClient.get_operation, and once the <END_TASK> <USER_TASK:> Description: def add_done_callback(self, fn): # pylint: disable=invalid-name """Enters a polling loop on OperationsClient.get_operation, and once the operation is done or cancelled, calls the function with this _OperationFuture. Added callables are called in the order that they were added. """
if self._operation.done: _try_callback(self, fn) else: self._queue.put(dill.dumps(fn)) if self._process is None: self._process = mp.Process(target=self._execute_tasks) self._process.start()
<SYSTEM_TASK:> Generates the JOIN sql for the join tables and join condition <END_TASK> <USER_TASK:> Description: def get_sql(self): """ Generates the JOIN sql for the join tables and join condition :rtype: str :return: the JOIN sql for the join tables and join condition """
return '{0} {1} ON {2}'.format(self.join_type, self.right_table.get_sql(), self.get_condition())
<SYSTEM_TASK:> Sets the left table for this join clause. If no table is specified, the first table <END_TASK> <USER_TASK:> Description: def set_left_table(self, left_table=None): """ Sets the left table for this join clause. If no table is specified, the first table in the query will be used :type left_table: str or dict or :class:`Table <querybuilder.tables.Table>` or None :param left_table: The left table being joined with. This can be a string of the table name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table in the query. """
if left_table: self.left_table = TableFactory( table=left_table, owner=self.owner, ) else: self.left_table = self.get_left_table()
<SYSTEM_TASK:> Returns the left table if one was specified, otherwise the first <END_TASK> <USER_TASK:> Description: def get_left_table(self): """ Returns the left table if one was specified, otherwise the first table in the query is returned :rtype: :class:`Table <querybuilder.tables.Table>` :return: the left table if one was specified, otherwise the first table in the query """
if self.left_table: return self.left_table if len(self.owner.tables): return self.owner.tables[0]
<SYSTEM_TASK:> Fix for django 1.10 to replace deprecated code. Keep support for django 1.7 <END_TASK> <USER_TASK:> Description: def get_all_related_objects(self, table): """ Fix for django 1.10 to replace deprecated code. Keep support for django 1.7 """
# Django 1.7 method if hasattr(table.model._meta, 'get_all_related_objects'): return table.model._meta.get_all_related_objects() else: # Django > 1.7 return [ f for f in table.model._meta.get_fields() if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete ]
<SYSTEM_TASK:> Sets the right table for this join clause and try to automatically set the condition <END_TASK> <USER_TASK:> Description: def set_right_table(self, table): """ Sets the right table for this join clause and try to automatically set the condition if one isn't specified """
self.right_table = table if self.left_table is None: return # find table prefix if type(self.left_table) is ModelTable and type(self.right_table) is ModelTable: # loop through fields to find the field for this model # check if this join type is for a related field for field in self.get_all_related_objects(self.left_table): related_model = field.model if hasattr(field, 'related_model'): related_model = field.related_model if related_model == self.right_table.model: if self.right_table.field_prefix is None: self.right_table.field_prefix = field.get_accessor_name() if len(self.right_table.field_prefix) > 4 and self.right_table.field_prefix[-4:] == '_set': self.right_table.field_prefix = self.right_table.field_prefix[:-4] return # check if this join type is for a foreign key for field in self.left_table.model._meta.fields: if ( field.get_internal_type() == 'OneToOneField' or field.get_internal_type() == 'ForeignKey' ): if field.remote_field.model == self.right_table.model: if self.right_table.field_prefix is None: self.right_table.field_prefix = field.name return
<SYSTEM_TASK:> Determines the condition to be used in the condition part of the join sql. <END_TASK> <USER_TASK:> Description: def get_condition(self): """ Determines the condition to be used in the condition part of the join sql. :return: The condition for the join clause :rtype: str or None """
if self.condition: return self.condition if type(self.right_table) is ModelTable and type(self.right_table) is ModelTable: # loop through fields to find the field for this model # check if this join type is for a related field for field in self.get_all_related_objects(self.right_table): related_model = field.model if hasattr(field, 'related_model'): related_model = field.related_model if related_model == self.left_table.model: table_join_field = field.field.column # self.table_join_name = field.get_accessor_name() condition = '{0}.{1} = {2}.{3}'.format( self.right_table.get_identifier(), self.right_table.model._meta.pk.name, self.left_table.get_identifier(), table_join_field, ) return condition # check if this join type is for a foreign key for field in self.right_table.model._meta.fields: if ( field.get_internal_type() == 'OneToOneField' or field.get_internal_type() == 'ForeignKey' ): if field.remote_field.model == self.left_table.model: table_join_field = field.column # self.table_join_name = field.name condition = '{0}.{1} = {2}.{3}'.format( self.right_table.get_identifier(), table_join_field, self.left_table.get_identifier(), self.left_table.model._meta.pk.name ) return condition return None
<SYSTEM_TASK:> Builds and returns the WHERE portion of the sql <END_TASK> <USER_TASK:> Description: def get_sql(self): """ Builds and returns the WHERE portion of the sql :return: the WHERE portion of the sql :rtype: str """
# reset arg index and args self.arg_index = 0 self.args = {} # build the WHERE sql portion if needed if len(self.wheres): where = self.build_where_part(self.wheres) return 'WHERE {0} '.format(where) return ''
<SYSTEM_TASK:> Gets the condition value based on the operator and value <END_TASK> <USER_TASK:> Description: def get_condition_value(self, operator, value): """ Gets the condition value based on the operator and value :param operator: the condition operator name :type operator: str :param value: the value to be formatted based on the condition operator :type value: object :return: the comparison operator from the Where class's comparison_map :rtype: str """
if operator in ('contains', 'icontains'): value = '%{0}%'.format(value) elif operator == 'startswith': value = '{0}%'.format(value) return value
<SYSTEM_TASK:> Set the query param in self.args based on the prefix and arg index <END_TASK> <USER_TASK:> Description: def set_arg(self, value): """ Set the query param in self.args based on the prefix and arg index and auto increment the arg_index :return: the string placeholder for the arg :rtype: str """
named_arg = '{0}A{1}'.format(self.arg_prefix, self.arg_index) self.args[named_arg] = value self.arg_index += 1 return named_arg
<SYSTEM_TASK:> Gets the name to reference the sorted field <END_TASK> <USER_TASK:> Description: def get_name(self, use_alias=True): """ Gets the name to reference the sorted field :return: the name to reference the sorted field :rtype: str """
if self.desc: direction = 'DESC' else: direction = 'ASC' if use_alias: return '{0} {1}'.format(self.field.get_identifier(), direction) return '{0} {1}'.format(self.field.get_select_sql(), direction)
<SYSTEM_TASK:> Generates the sql used for the limit clause of a Query <END_TASK> <USER_TASK:> Description: def get_sql(self): """ Generates the sql used for the limit clause of a Query :return: the sql for the limit clause of a Query :rtype: str """
sql = '' if self.limit and self.limit > 0: sql += 'LIMIT {0} '.format(self.limit) if self.offset and self.offset > 0: sql += 'OFFSET {0} '.format(self.offset) return sql
<SYSTEM_TASK:> Sets the default values for this instance <END_TASK> <USER_TASK:> Description: def init_defaults(self): """ Sets the default values for this instance """
self.sql = '' self.tables = [] self.joins = [] self._where = Where() self.groups = [] self.sorters = [] self._limit = None self.table_prefix = '' self.is_inner = False self.with_tables = [] self._distinct = False self.distinct_ons = [] self.field_names = [] self.field_names_pk = None self.values = []
<SYSTEM_TASK:> Adds a ``Table`` and any optional fields to the list of tables <END_TASK> <USER_TASK:> Description: def from_table(self, table=None, fields='*', schema=None, **kwargs): """ Adds a ``Table`` and any optional fields to the list of tables this query is selecting from. :type table: str or dict or :class:`Table <querybuilder.tables.Table>` or :class:`Query <querybuilder.query.Query>` or :class:`ModelBase <django:django.db.models.base.ModelBase>` :param table: The table to select fields from. This can be a string of the table name, a dict of {'alias': table}, a ``Table`` instance, a Query instance, or a django Model instance :type fields: str or tuple or list or Field :param fields: The fields to select from ``table``. Defaults to '*'. This can be a single field, a tuple of fields, or a list of fields. Each field can be a string or ``Field`` instance :type schema: str :param schema: This is not implemented, but it will be a string of the db schema name :param kwargs: Any additional parameters to be passed into the constructor of ``TableFactory`` :return: self :rtype: :class:`Query <querybuilder.query.Query>` """
# self.mark_dirty() self.tables.append(TableFactory( table=table, fields=fields, schema=schema, owner=self, **kwargs )) return self
<SYSTEM_TASK:> Bulk inserts a list of values into a table <END_TASK> <USER_TASK:> Description: def insert_into(self, table=None, field_names=None, values=None, **kwargs): """ Bulk inserts a list of values into a table :type table: str or dict or :class:`Table <querybuilder.tables.Table>` or :class:`Query <querybuilder.query.Query>` or :class:`ModelBase <django:django.db.models.base.ModelBase>` :param table: The table to select fields from. This can be a string of the table name, a dict of {'alias': table}, a ``Table`` instance, a Query instance, or a django Model instance :type field_names: list :param field_names: A list of ordered field names that relate to the data in the values list :type values: list of list :param values: A list each values list with the values in the same order as the field names :param kwargs: Any additional parameters to be passed into the constructor of ``TableFactory`` :return: self :rtype: :class:`Query <querybuilder.query.Query>` """
table = TableFactory( table=table, **kwargs ) self.tables.append(table) self.field_names = field_names self.values = values return self
<SYSTEM_TASK:> Bulk updates rows in a table <END_TASK> <USER_TASK:> Description: def update_table(self, table=None, field_names=None, values=None, pk=None, **kwargs): """ Bulk updates rows in a table :type table: str or dict or :class:`Table <querybuilder.tables.Table>` or :class:`Query <querybuilder.query.Query>` or :class:`ModelBase <django:django.db.models.base.ModelBase>` :param table: The table to select fields from. This can be a string of the table name, a dict of {'alias': table}, a ``Table`` instance, a Query instance, or a django Model instance :type field_names: list :param field_names: A list of ordered field names that relate to the data in the values list :type values: list of list :param values: A list each values list with the values in the same order as the field names :type pk: int :param pk: The name of the primary key in the table and field_names :param kwargs: Any additional parameters to be passed into the constructor of ``TableFactory`` :rtype: :class:`Query <querybuilder.query.Query>` :return: self """
table = TableFactory( table=table, **kwargs ) self.tables.append(table) self.field_names = field_names self.values = values self.field_names_pk = pk
<SYSTEM_TASK:> Joins a table to another table based on a condition and adds fields from the joined table <END_TASK> <USER_TASK:> Description: def join(self, right_table=None, fields=None, condition=None, join_type='JOIN', schema=None, left_table=None, extract_fields=True, prefix_fields=False, field_prefix=None, allow_duplicates=False): """ Joins a table to another table based on a condition and adds fields from the joined table to the returned fields. :type right_table: str or dict or :class:`Table <querybuilder.tables.Table>` :param right_table: The table being joined with. This can be a string of the table name, a dict of {'alias': table}, or a ``Table`` instance :type fields: str or tuple or list or :class:`Field <querybuilder.fields.Field>` :param fields: The fields to select from ``right_table``. Defaults to `None`. This can be a single field, a tuple of fields, or a list of fields. Each field can be a string or ``Field`` instance :type condition: str :param condition: The join condition specifying the fields being joined. If the two tables being joined are instances of ``ModelTable`` then the condition should be created automatically. :type join_type: str :param join_type: The type of join (JOIN, LEFT JOIN, INNER JOIN, etc). Defaults to 'JOIN' :type schema: str :param schema: This is not implemented, but it will be a string of the db schema name :type left_table: str or dict or Table :param left_table: The left table being joined with. This can be a string of the table name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table in the query. :type extract_fields: bool :param extract_fields: If True and joining with a ``ModelTable``, then '*' fields will be converted to individual fields for each column in the table. Defaults to True. :type prefix_fields: bool :param prefix_fields: If True, then the joined table will have each of its field names prefixed with the field_prefix. If not field_prefix is specified, a name will be generated based on the join field name. This is usually used with nesting results in order to create models in python or javascript. Defaults to True. :type field_prefix: str :param field_prefix: The field prefix to be used in front of each field name if prefix_fields is set to True. If no field_prefix is set, one will be automatically created based on the join field name. :rtype: :class:`Query <querybuilder.query.Query>` :return: self """
# self.mark_dirty() # TODO: fix bug when joining from simple table to model table with no condition # it assumes left_table.model # if there is no left table, assume the query's first table # TODO: add test for auto left table to replace old auto left table # if left_table is None and len(self.tables): # left_table = self.tables[0] # left_table = TableFactory(left_table) # right_table = TableFactory(right_table) # create the join item new_join_item = Join( left_table=left_table, right_table=right_table, fields=fields, condition=condition, join_type=join_type, schema=schema, owner=self, extract_fields=extract_fields, prefix_fields=prefix_fields, field_prefix=field_prefix, ) # check if this table is already joined upon # TODO: add test for this if allow_duplicates is False: for join_item in self.joins: if join_item.right_table.get_identifier() == new_join_item.right_table.get_identifier(): if join_item.left_table.get_identifier() == new_join_item.left_table.get_identifier(): return self self.joins.append(new_join_item) return self
<SYSTEM_TASK:> Wrapper for ``self.join`` with a default join of 'LEFT JOIN' <END_TASK> <USER_TASK:> Description: def join_left(self, right_table=None, fields=None, condition=None, join_type='LEFT JOIN', schema=None, left_table=None, extract_fields=True, prefix_fields=False, field_prefix=None, allow_duplicates=False): """ Wrapper for ``self.join`` with a default join of 'LEFT JOIN' :type right_table: str or dict or :class:`Table <querybuilder.tables.Table>` :param right_table: The table being joined with. This can be a string of the table name, a dict of {'alias': table}, or a ``Table`` instance :type fields: str or tuple or list or :class:`Field <querybuilder.fields.Field>` :param fields: The fields to select from ``right_table``. Defaults to `None`. This can be a single field, a tuple of fields, or a list of fields. Each field can be a string or ``Field`` instance :type condition: str :param condition: The join condition specifying the fields being joined. If the two tables being joined are instances of ``ModelTable`` then the condition should be created automatically. :type join_type: str :param join_type: The type of join (JOIN, LEFT JOIN, INNER JOIN, etc). Defaults to 'JOIN' :type schema: str :param schema: This is not implemented, but it will be a string of the db schema name :type left_table: str or dict or :class:`Table <querybuilder.tables.Table>` :param left_table: The left table being joined with. This can be a string of the table name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table in the query. :type extract_fields: bool :param extract_fields: If True and joining with a ``ModelTable``, then '*' fields will be converted to individual fields for each column in the table. Defaults to True. :type prefix_fields: bool :param prefix_fields: If True, then the joined table will have each of its field names prefixed with the field_prefix. If not field_prefix is specified, a name will be generated based on the join field name. This is usually used with nesting results in order to create models in python or javascript. Defaults to True. :type field_prefix: str :param field_prefix: The field prefix to be used in front of each field name if prefix_fields is set to True. If no field_prefix is set, one will be automatically created based on the join field name. :return: self :rtype: :class:`Query <querybuilder.query.Query>` """
return self.join( right_table=right_table, fields=fields, condition=condition, join_type=join_type, schema=schema, left_table=left_table, extract_fields=extract_fields, prefix_fields=prefix_fields, field_prefix=field_prefix, allow_duplicates=allow_duplicates )
<SYSTEM_TASK:> Adds a where condition as a Q object to the query's ``Where`` instance. <END_TASK> <USER_TASK:> Description: def where(self, q=None, where_type='AND', **kwargs): """ Adds a where condition as a Q object to the query's ``Where`` instance. :type q: :class:`Q <django:django.db.models.Q>` :param q: A django ``Q`` instance. This will be added to the query's ``Where`` object. If no Q object is passed, the kwargs will be examined for params to be added to Q objects :param where_type: str :param where_type: The connection type of the where condition ('AND', 'OR') :return: self :rtype: :class:`Query <querybuilder.query.Query>` """
# self.mark_dirty() if q is not None: self._where.wheres.add(q, where_type) if len(kwargs): for key, value in kwargs.items(): q = Q(**{ key: value }) self._where.wheres.add(q, where_type) return self
<SYSTEM_TASK:> Adds a group by clause to the query by adding a ``Group`` instance to the query's <END_TASK> <USER_TASK:> Description: def group_by(self, field=None, table=None, allow_duplicates=False): """ Adds a group by clause to the query by adding a ``Group`` instance to the query's groups list :type field: str or dict or :class:`Field <querybuilder.fields.Field>` :param field: This can be a string of a field name, a dict of {'alias': field}, or a ``Field`` instance :type table: str or dict or :class:`Table <querybuilder.table.Table>` :param table: Optional. This can be a string of a table name, a dict of {'alias': table}, or a ``Table`` instance. A table only needs to be supplied in more complex queries where the field name is ambiguous. :return: self :rtype: :class:`Query <querybuilder.query.Query>` """
new_group_item = Group( field=field, table=table, ) if allow_duplicates is False: for group_item in self.groups: if group_item.field.get_identifier() == new_group_item.field.get_identifier(): return self self.groups.append(new_group_item) return self
<SYSTEM_TASK:> Adds an order by clause to the query by adding a ``Sorter`` instance to the query's <END_TASK> <USER_TASK:> Description: def order_by(self, field=None, table=None, desc=False): """ Adds an order by clause to the query by adding a ``Sorter`` instance to the query's sorters list :type field: str or dict or :class:`Field <querybuilder.fields.Field>` :param field: This can be a string of a field name, a dict of {'alias': field}, or a ``Field`` instance :type table: str or dict or :class:`Table <querybuilder.table.Table>` :param table: Optional. This can be a string of a table name, a dict of {'alias': table}, or a ``Table`` instance. A table only needs to be supplied in more complex queries where the field name is ambiguous. :type desc: bool :param desc: Set to True to sort by this field in DESC order or False to sort by this field in ASC order. Defaults to False. :rtype: :class:`Query <querybuilder.query.Query>` :return: self """
self.sorters.append(Sorter( field=field, table=table, desc=desc )) return self
<SYSTEM_TASK:> Checks if there are any tables referenced by the same identifier and updated the <END_TASK> <USER_TASK:> Description: def check_name_collisions(self): """ Checks if there are any tables referenced by the same identifier and updated the auto_alias accordingly. This is called when generating the sql for a query and should only be called internally. """
table_index = 0 table_names = {} for table in self.tables + self.with_tables: table_prefix = 'T{0}'.format(table_index) auto_alias = '{0}{1}'.format(self.table_prefix, table_prefix) identifier = table.get_identifier() if identifier is None or identifier in table_names: table.auto_alias = auto_alias table_names[identifier] = True # prefix inner query args and update self args if type(table) is QueryTable: table.query.prefix_args(auto_alias) table.query.table_prefix = auto_alias table_index += 1
<SYSTEM_TASK:> Generates the sql for this query and returns the sql as a string. <END_TASK> <USER_TASK:> Description: def get_sql(self, debug=False, use_cache=True): """ Generates the sql for this query and returns the sql as a string. :type debug: bool :param debug: If True, the sql will be returned in a format that is easier to read and debug. Defaults to False :type use_cache: bool :param use_cache: If True, the query will returned the cached sql if it exists rather then generating the sql again. If False, the sql will be generated again. Defaults to True. :rtype: str :return: The generated sql for this query """
# TODO: enable caching # if self.sql and use_cache and not debug: # return self.sql # auto alias any naming collisions self.check_name_collisions() # if debugging, return the debug formatted sql if debug: return self.format_sql() # build each part of the query sql = '' sql += self.build_withs() sql += self.build_select_fields() sql += self.build_from_table() sql += self.build_joins() sql += self.build_where() sql += self.build_groups() sql += self.build_order_by() sql += self.build_limit() # remove any whitespace from the beginning and end of the sql self.sql = sql.strip() return self.sql
<SYSTEM_TASK:> Builds the sql in a format that is easy for humans to read and debug <END_TASK> <USER_TASK:> Description: def format_sql(self): """ Builds the sql in a format that is easy for humans to read and debug :return: The formatted sql for this query :rtype: str """
# TODO: finish adding the other parts of the sql generation sql = '' # build SELECT select_segment = self.build_select_fields() select_segment = select_segment.replace('SELECT ', '', 1) fields = [field.strip() for field in select_segment.split(',')] sql += 'SELECT\n\t{0}\n'.format(',\n\t'.join(fields)) # build FROM from_segment = self.build_from_table() from_segment = from_segment.replace('FROM ', '', 1) tables = [table.strip() for table in from_segment.split(',')] sql += 'FROM\n\t{0}\n'.format(',\n\t'.join(tables)) # build ORDER BY order_by_segment = self.build_order_by() if len(order_by_segment): order_by_segment = order_by_segment.replace('ORDER BY ', '', 1) sorters = [sorter.strip() for sorter in order_by_segment.split(',')] sql += 'ORDER BY\n\t{0}\n'.format(',\n\t'.join(sorters)) # build LIMIT limit_segment = self.build_limit() if len(limit_segment): if 'LIMIT' in limit_segment: limit_segment = limit_segment.replace('LIMIT ', 'LIMIT\n\t', 1) if 'OFFSET' in limit_segment: limit_segment = limit_segment.replace('OFFSET ', '\nOFFSET\n\t', 1) elif 'OFFSET' in limit_segment: limit_segment = limit_segment.replace('OFFSET ', 'OFFSET\n\t', 1) sql += limit_segment return sql
<SYSTEM_TASK:> Generates the sql for the SELECT portion of the query <END_TASK> <USER_TASK:> Description: def build_select_fields(self): """ Generates the sql for the SELECT portion of the query :return: the SELECT portion of the query :rtype: str """
field_sql = [] # get the field sql for each table for table in self.tables: field_sql += table.get_field_sql() # get the field sql for each join table for join_item in self.joins: field_sql += join_item.right_table.get_field_sql() # combine all field sql separated by a comma sql = 'SELECT {0}{1} '.format(self.get_distinct_sql(), ', '.join(field_sql)) return sql
<SYSTEM_TASK:> Generates the sql for the FROM portion of the query <END_TASK> <USER_TASK:> Description: def build_from_table(self): """ Generates the sql for the FROM portion of the query :return: the FROM portion of the query :rtype: str """
table_parts = [] # get the table sql for each table for table in self.tables: sql = table.get_sql() if len(sql): table_parts.append(sql) # combine all table sql separated by a comma sql = 'FROM {0} '.format(', '.join(table_parts)) return sql
<SYSTEM_TASK:> Generates the sql for the JOIN portion of the query <END_TASK> <USER_TASK:> Description: def build_joins(self): """ Generates the sql for the JOIN portion of the query :return: the JOIN portion of the query :rtype: str """
join_parts = [] # get the sql for each join object for join_item in self.joins: join_parts.append(join_item.get_sql()) # if there are any joins, combine them if len(join_parts): combined_joins = ' '.join(join_parts) return '{0} '.format(combined_joins) return ''
<SYSTEM_TASK:> Generates the sql for the GROUP BY portion of the query <END_TASK> <USER_TASK:> Description: def build_groups(self): """ Generates the sql for the GROUP BY portion of the query :return: the GROUP BY portion of the query :rtype: str """
# check if there are any groupings if len(self.groups): groups = [] # get the group sql for each grouping for group in self.groups: groups.append(group.get_name()) return 'GROUP BY {0} '.format(', '.join(groups)) return ''
<SYSTEM_TASK:> Generates the sql for the ORDER BY portion of the query <END_TASK> <USER_TASK:> Description: def build_order_by(self, use_alias=True): """ Generates the sql for the ORDER BY portion of the query :type use_alias: bool :param use_alias: If True, the alias for the field will be used in the order by. This is an option before query windows do not use the alias. Defaults to True. :return: the ORDER BY portion of the query :rtype: str """
# check if there are any sorters if len(self.sorters): sorters = [] # get the sql for each sorter for sorter in self.sorters: sorters.append(sorter.get_name(use_alias=use_alias)) return 'ORDER BY {0} '.format(', '.join(sorters)) return ''
<SYSTEM_TASK:> Finds a table by name or alias. The FROM tables and JOIN tables <END_TASK> <USER_TASK:> Description: def find_table(self, table): """ Finds a table by name or alias. The FROM tables and JOIN tables are included in the search. :type table: str or :class:`ModelBase <django:django.db.models.base.ModelBase>` :param table: string of the table name or alias or a ModelBase instance :return: The table if it is found, otherwise None :rtype: Table or None """
table = TableFactory(table) identifier = table.get_identifier() join_tables = [join_item.right_table for join_item in self.joins] for table in (self.tables + join_tables): if table.get_identifier() == identifier: return table return None
<SYSTEM_TASK:> Wraps the query by selecting all fields from itself <END_TASK> <USER_TASK:> Description: def wrap(self, alias=None): """ Wraps the query by selecting all fields from itself :rtype: :class:`Query <querybuilder.query.Query>` :return: The wrapped query """
field_names = self.get_field_names() query = Query(self.connection).from_table(deepcopy(self), alias=alias) self.__dict__.update(query.__dict__) # set explicit field names self.tables[0].set_fields(field_names) field_names = self.get_field_names() return self
<SYSTEM_TASK:> Deeply copies everything in the query object except the connection object is shared <END_TASK> <USER_TASK:> Description: def copy(self): """ Deeply copies everything in the query object except the connection object is shared """
connection = self.connection del self.connection copied_query = deepcopy(self) copied_query.connection = connection self.connection = connection return copied_query
<SYSTEM_TASK:> Gets the args for the query which will be escaped when being executed by the <END_TASK> <USER_TASK:> Description: def get_args(self): """ Gets the args for the query which will be escaped when being executed by the db. All inner queries are inspected and their args are combined with this query's args. :return: all args for this query as a dict :rtype: dict """
for table in self.tables + self.with_tables: if type(table) is QueryTable: self._where.args.update(table.query.get_args()) return self._where.args
<SYSTEM_TASK:> Runs EXPLAIN on this query <END_TASK> <USER_TASK:> Description: def explain(self, sql=None, sql_args=None): """ Runs EXPLAIN on this query :type sql: str or None :param sql: The sql to run EXPLAIN on. If None is specified, the query will use ``self.get_sql()`` :type sql_args: dict or None :param sql_args: A dictionary of the arguments to be escaped in the query. If None and sql is None, the query will use ``self.get_args()`` :rtype: list of str :return: list of each line of output from the EXPLAIN statement """
cursor = self.get_cursor() if sql is None: sql = self.get_sql() sql_args = self.get_args() elif sql_args is None: sql_args = {} cursor.execute('EXPLAIN {0}'.format(sql), sql_args) rows = self._fetch_all_as_dict(cursor) return rows
<SYSTEM_TASK:> Executes the SELECT statement and returns the rows as a list of dictionaries or a list of <END_TASK> <USER_TASK:> Description: def select(self, return_models=False, nest=False, bypass_safe_limit=False, sql=None, sql_args=None): """ Executes the SELECT statement and returns the rows as a list of dictionaries or a list of model instances :type return_models: bool :param return_models: Set to True to return a list of models instead of a list of dictionaries. Defaults to False :type nest: bool :param nest: Set to True to treat all double underscores in keynames as nested data. This will convert all keys with double underscores to dictionaries keyed off of the left side of the underscores. Ex: {"id": 1", "account__id": 1, "account__name": "Name"} becomes {"id": 1, "account": {"id": 1, "name": "Name"}} :type bypass_safe_limit: bool :param bypass_safe_limit: Ignores the safe_limit option even if the safe_limit is enabled :type sql: str or None :param sql: The sql to execute in the SELECT statement. If one is not specified, then the query will use ``self.get_sql()`` :type sql_args: str or None :param sql_args: The sql args to be used in the SELECT statement. If none are specified, then the query wil use ``self.get_args()`` :rtype: list of dict :return: list of dictionaries of the rows """
# Check if we need to set a safe limit if bypass_safe_limit is False: if Query.enable_safe_limit: if self.count() > Query.safe_limit: self.limit(Query.safe_limit) # determine which sql to use if sql is None: sql = self.get_sql() # determine which sql args to use if sql_args is None: sql_args = self.get_args() # get the cursor to execute the query cursor = self.get_cursor() # execute the query cursor.execute(sql, sql_args) # get the results as a list of dictionaries rows = self._fetch_all_as_dict(cursor) # check if models should be returned instead of dictionaries if return_models: # set nesting to true, so the nested models can easily load the data nest = True # build model map of map name to model model_map = {} for join_item in self.joins: model_map[join_item.right_table.field_prefix] = join_item.right_table.model # check if results should be nested if nest: # convert keys with double underscores to dictionaries for row in rows: _row = row.copy() for key, value in _row.items(): set_value_for_keypath(row, key, value, True, '__') if '__' in key: row.pop(key) # create models if needed if return_models: model_class = self.tables[0].model new_rows = [] for row in rows: model = model_class() # assign all non-model keys first because django 1.5 requires # that the model has an id set before setting a property that is # a foreign key for key, value in row.items(): if key not in model_map: setattr(model, key, value) # assign all model instances for key, value in row.items(): if key in model_map: child_model = model_map[key]() for child_key, child_value in value.items(): setattr(child_model, child_key, child_value) value = child_model setattr(model, key, value) new_rows.append(model) rows = new_rows return rows
<SYSTEM_TASK:> If one of the unique_fields is the model's AutoField, return the field name, otherwise return None <END_TASK> <USER_TASK:> Description: def get_auto_field_name(self, model_class): """ If one of the unique_fields is the model's AutoField, return the field name, otherwise return None """
# Get auto field name (a model can only have one AutoField) for field in model_class._meta.fields: if isinstance(field, AutoField): return field.column return None
<SYSTEM_TASK:> Performs an upsert with the set of models defined in rows. If the unique field which is meant <END_TASK> <USER_TASK:> Description: def upsert(self, rows, unique_fields, update_fields, return_rows=False, return_models=False): """ Performs an upsert with the set of models defined in rows. If the unique field which is meant to cause a conflict is an auto increment field, then the field should be excluded when its value is null. In this case, an upsert will be performed followed by a bulk_create """
if len(rows) == 0: return ModelClass = self.tables[0].model rows_with_null_auto_field_value = [] # Get auto field name (a model can only have one AutoField) auto_field_name = self.get_auto_field_name(ModelClass) # Check if unique fields list contains an auto field if auto_field_name in unique_fields: # Separate the rows that need to be inserted vs the rows that need to be upserted rows_with_null_auto_field_value = [row for row in rows if getattr(row, auto_field_name) is None] rows = [row for row in rows if getattr(row, auto_field_name) is not None] return_value = [] if rows: sql, sql_args = self.get_upsert_sql( rows, unique_fields, update_fields, auto_field_name=auto_field_name, return_rows=return_rows or return_models ) # get the cursor to execute the query cursor = self.get_cursor() # execute the upsert query cursor.execute(sql, sql_args) if return_rows or return_models: return_value.extend(self._fetch_all_as_dict(cursor)) if rows_with_null_auto_field_value: sql, sql_args = self.get_upsert_sql( rows_with_null_auto_field_value, unique_fields, update_fields, auto_field_name=auto_field_name, only_insert=True, return_rows=return_rows or return_models ) # get the cursor to execute the query cursor = self.get_cursor() # execute the upsert query cursor.execute(sql, sql_args) if return_rows or return_models: return_value.extend(self._fetch_all_as_dict(cursor)) if return_models: ModelClass = self.tables[0].model model_objects = [ ModelClass(**row_dict) for row_dict in return_value ] # Set the state to indicate the object has been loaded from db for model_object in model_objects: model_object._state.adding = False model_object._state.db = 'default' return_value = model_objects return return_value
<SYSTEM_TASK:> Copies the query object and alters the field list and order by to do a more efficient count <END_TASK> <USER_TASK:> Description: def get_count_query(self): """ Copies the query object and alters the field list and order by to do a more efficient count """
query_copy = self.copy() if not query_copy.tables: raise Exception('No tables specified to do a count') for table in query_copy.tables: del table.fields[:] query_copy.tables[0].add_field(CountField('*')) del query_copy.sorters[:] return query_copy
<SYSTEM_TASK:> Returns a COUNT of the query by wrapping the query and performing a COUNT <END_TASK> <USER_TASK:> Description: def count(self, field='*'): """ Returns a COUNT of the query by wrapping the query and performing a COUNT aggregate of the specified field :param field: the field to pass to the COUNT aggregate. Defaults to '*' :type field: str :return: The number of rows that the query will return :rtype: int """
rows = self.get_count_query().select(bypass_safe_limit=True) return list(rows[0].values())[0]
<SYSTEM_TASK:> Iterates over the result set and converts each row to a dictionary <END_TASK> <USER_TASK:> Description: def _fetch_all_as_dict(self, cursor): """ Iterates over the result set and converts each row to a dictionary :return: A list of dictionaries where each row is a dictionary :rtype: list of dict """
desc = cursor.description return [ dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall() ]
<SYSTEM_TASK:> Generates the sql for this query window and returns the sql as a string. <END_TASK> <USER_TASK:> Description: def get_sql(self, debug=False, use_cache=True): """ Generates the sql for this query window and returns the sql as a string. :type debug: bool :param debug: If True, the sql will be returned in a format that is easier to read and debug. Defaults to False :type use_cache: bool :param use_cache: If True, the query will returned the cached sql if it exists rather then generating the sql again. If False, the sql will be generated again. Defaults to True. :rtype: str :return: The generated sql for this query window """
# TODO: implement caching and debug sql = '' sql += self.build_partition_by_fields() sql += self.build_order_by(use_alias=False) sql += self.build_limit() sql = sql.strip() sql = 'OVER ({0})'.format(sql) self.sql = sql return self.sql
<SYSTEM_TASK:> Returns the value of a keypath in a dictionary <END_TASK> <USER_TASK:> Description: def value_for_keypath(dict, keypath): """ Returns the value of a keypath in a dictionary if the keypath exists or None if the keypath does not exist. """
if len(keypath) == 0: return dict keys = keypath.split('.') value = dict for key in keys: if key in value: value = value[key] else: return None return value
<SYSTEM_TASK:> Sets the value for a keypath in a dictionary <END_TASK> <USER_TASK:> Description: def set_value_for_keypath(item, keypath, value, create_if_needed=False, delimeter='.'): """ Sets the value for a keypath in a dictionary if the keypath exists. This modifies the original dictionary. """
if len(keypath) == 0: return None keys = keypath.split(delimeter) if len(keys) > 1: key = keys[0] if create_if_needed: item[key] = item.get(key, {}) if key in item: if set_value_for_keypath(item[key], delimeter.join(keys[1:]), value, create_if_needed=create_if_needed, delimeter=delimeter): return item return None if create_if_needed: item[keypath] = item.get(keypath, {}) if keypath in item: item[keypath] = value return item else: return None
<SYSTEM_TASK:> Gets the alias for the field or the auto_alias if one is set. <END_TASK> <USER_TASK:> Description: def get_alias(self): """ Gets the alias for the field or the auto_alias if one is set. If there isn't any kind of alias, None is returned. :return: The field alias, auto_alias, or None :rtype: str or None """
alias = None if self.alias: alias = self.alias elif self.auto_alias: alias = self.auto_alias if self.table and self.table.prefix_fields: field_prefix = self.table.get_field_prefix() if alias: alias = '{0}__{1}'.format(field_prefix, alias) else: alias = '{0}__{1}'.format(field_prefix, self.name) return alias
<SYSTEM_TASK:> Gets the SELECT field portion for the field without the alias. If the field <END_TASK> <USER_TASK:> Description: def get_select_sql(self): """ Gets the SELECT field portion for the field without the alias. If the field has a table, it will be included here like table.field :return: Gets the SELECT field portion for the field without the alias :rtype: str """
if self.table: return '{0}.{1}'.format(self.table.get_identifier(), self.name) return '{0}'.format(self.name)
<SYSTEM_TASK:> Setter for the table of this field. Also sets the inner field's table. <END_TASK> <USER_TASK:> Description: def set_table(self, table): """ Setter for the table of this field. Also sets the inner field's table. """
super(MultiField, self).set_table(table) if self.field and self.field.table is None: self.field.set_table(self.table)
<SYSTEM_TASK:> Adds this field to the field's table and optionally group by it <END_TASK> <USER_TASK:> Description: def add_to_table(self, field, alias, add_group=False): """ Adds this field to the field's table and optionally group by it :param field: The field to add to the table :type field: str or :class:`Field <querybuilder.fields.Field>` :param alias: The alias for the field :type alias: str :param add_group: Whether or not the table should group by this field :type: bool """
self.table.add_field({ alias: field }) if add_group: self.table.owner.group_by(alias)
<SYSTEM_TASK:> Gets the alias for the table or the auto_alias if one is set. <END_TASK> <USER_TASK:> Description: def get_alias(self): """ Gets the alias for the table or the auto_alias if one is set. If there isn't any kind of alias, None is returned. :returns: The table alias, auto_alias, or None :rtype: str or None """
alias = None if self.alias: alias = self.alias elif self.auto_alias: alias = self.auto_alias return alias
<SYSTEM_TASK:> Adds a field to this table <END_TASK> <USER_TASK:> Description: def add_field(self, field): """ Adds a field to this table :param field: This can be a string of a field name, a dict of {'alias': field}, or a ``Field`` instance :type field: str or dict or Field """
field = FieldFactory( field, ) field.set_table(self) # make sure field is not already added field_name = field.get_name() for existing_field in self.fields: if existing_field.get_name() == field_name: return None self.before_add_field(field) field.before_add() if field.ignore is False: self.fields.append(field) return field
<SYSTEM_TASK:> Removes a field from this table <END_TASK> <USER_TASK:> Description: def remove_field(self, field): """ Removes a field from this table :param field: This can be a string of a field name, a dict of {'alias': field}, or a ``Field`` instance :type field: str or dict or :class:`Field <querybuilder.fields.Field>` """
new_field = FieldFactory( field, ) new_field.set_table(self) new_field_identifier = new_field.get_identifier() for field in self.fields: if field.get_identifier() == new_field_identifier: self.fields.remove(field) return field return None
<SYSTEM_TASK:> Adds all of the passed fields to the table's current field list <END_TASK> <USER_TASK:> Description: def add_fields(self, fields): """ Adds all of the passed fields to the table's current field list :param fields: The fields to select from ``table``. This can be a single field, a tuple of fields, or a list of fields. Each field can be a string or ``Field`` instance :type fields: str or tuple or list of str or list of Field or :class:`Field <querybuilder.fields.Field>` """
if isinstance(fields, string_types): fields = [fields] elif type(fields) is tuple: fields = list(fields) field_objects = [self.add_field(field) for field in fields] return field_objects
<SYSTEM_TASK:> Finds a field by name or alias. <END_TASK> <USER_TASK:> Description: def find_field(self, field=None, alias=None): """ Finds a field by name or alias. :param field: string of the field name or alias, dict of {'alias': field}, or a Field instance :type field: str or dict or Field :returns: The field if it is found, otherwise None :rtype: :class:`Field <querybuilder.fields.Field>` or None """
if alias: field = alias field = FieldFactory(field, table=self, alias=alias) identifier = field.get_identifier() for field in self.fields: if field.get_identifier() == identifier: return field return None
<SYSTEM_TASK:> Sets the name of the table to the passed in table value <END_TASK> <USER_TASK:> Description: def init_defaults(self): """ Sets the name of the table to the passed in table value """
super(SimpleTable, self).init_defaults() self.name = self.table
<SYSTEM_TASK:> Sets a model instance variable to the table value and sets the name to the <END_TASK> <USER_TASK:> Description: def init_defaults(self): """ Sets a model instance variable to the table value and sets the name to the table name as determined from the model class """
super(ModelTable, self).init_defaults() self.model = self.table self.name = self.model._meta.db_table
<SYSTEM_TASK:> Sets a query instance variable to the table value <END_TASK> <USER_TASK:> Description: def init_defaults(self): """ Sets a query instance variable to the table value """
super(QueryTable, self).init_defaults() self.query = self.table self.query.is_inner = True
<SYSTEM_TASK:> Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run` <END_TASK> <USER_TASK:> Description: def run_command(cmd_to_run): """ Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run` to temporary files. Using the temporary files gets around subprocess.PIPE's issues with handling large buffers. Note: this command will block the python process until `cmd_to_run` has completed. Returns a tuple, containing the stderr and stdout as strings. """
with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file: # Run the command popen = subprocess.Popen(cmd_to_run, stdout=stdout_file, stderr=stderr_file) popen.wait() stderr_file.seek(0) stdout_file.seek(0) stderr = stderr_file.read() stdout = stdout_file.read() if six.PY3: stderr = stderr.decode() stdout = stdout.decode() return stderr, stdout
<SYSTEM_TASK:> Writes the contents of the Extension to the logging system. <END_TASK> <USER_TASK:> Description: def log(self, level, prefix = ''): """Writes the contents of the Extension to the logging system. """
logging.log(level, "%sname: %s", prefix, self.__name) logging.log(level, "%soptions: %s", prefix, self.__options)
<SYSTEM_TASK:> Returns the array of arguments that would be given to <END_TASK> <USER_TASK:> Description: def specbits(self): """Returns the array of arguments that would be given to iptables for the current Extension. """
bits = [] for opt in sorted(self.__options): # handle the case where this is a negated option m = re.match(r'^! (.*)', opt) if m: bits.extend(['!', "--%s" % m.group(1)]) else: bits.append("--%s" % opt) optval = self.__options[opt] if isinstance(optval, list): bits.extend(optval) else: bits.append(optval) return bits
<SYSTEM_TASK:> Writes the contents of the Rule to the logging system. <END_TASK> <USER_TASK:> Description: def log(self, level, prefix = ''): """Writes the contents of the Rule to the logging system. """
logging.log(level, "%sin interface: %s", prefix, self.in_interface) logging.log(level, "%sout interface: %s", prefix, self.out_interface) logging.log(level, "%ssource: %s", prefix, self.source) logging.log(level, "%sdestination: %s", prefix, self.destination) logging.log(level, "%smatches:", prefix) for match in self.matches: match.log(level, prefix + ' ') if self.jump: logging.log(level, "%sjump:", prefix) self.jump.log(level, prefix + ' ')
<SYSTEM_TASK:> Returns the array of arguments that would be given to <END_TASK> <USER_TASK:> Description: def specbits(self): """Returns the array of arguments that would be given to iptables for the current Rule. """
def host_bits(opt, optval): # handle the case where this is a negated value m = re.match(r'^!\s*(.*)', optval) if m: return ['!', opt, m.group(1)] else: return [opt, optval] bits = [] if self.protocol: bits.extend(host_bits('-p', self.protocol)) if self.in_interface: bits.extend(host_bits('-i', self.in_interface)) if self.out_interface: bits.extend(host_bits('-o', self.out_interface)) if self.source: bits.extend(host_bits('-s', self.source)) if self.destination: bits.extend(host_bits('-d', self.destination)) for mod in self.matches: bits.extend(['-m', mod.name()]) bits.extend(mod.specbits()) if self.goto: bits.extend(['-g', self.goto.name()]) bits.extend(self.goto.specbits()) elif self.jump: bits.extend(['-j', self.jump.name()]) bits.extend(self.jump.specbits()) return bits
<SYSTEM_TASK:> Parse the rules for the specified chain. <END_TASK> <USER_TASK:> Description: def parse_rules(data, chain): """ Parse the rules for the specified chain. """
rules = [] for line in data.splitlines(True): m = re_rule.match(line) if m and m.group(3) == chain: rule = parse_rule(m.group(4)) rule.packets = int(m.group(1)) rule.bytes = int(m.group(2)) rules.append(rule) return rules
<SYSTEM_TASK:> Opens a connection to the database. <END_TASK> <USER_TASK:> Description: def _get_new_connection(self, conn_params): """Opens a connection to the database."""
self.__connection_string = conn_params.get('connection_string', '') conn = self.Database.connect(**conn_params) return conn
<SYSTEM_TASK:> Returns a dict of parameters suitable for get_new_connection. <END_TASK> <USER_TASK:> Description: def get_connection_params(self): """Returns a dict of parameters suitable for get_new_connection."""
from django.conf import settings settings_dict = self.settings_dict options = settings_dict.get('OPTIONS', {}) autocommit = options.get('autocommit', False) conn_params = { 'server': settings_dict['HOST'], 'database': settings_dict['NAME'], 'user': settings_dict['USER'], 'port': settings_dict.get('PORT', '1433'), 'password': settings_dict['PASSWORD'], 'timeout': self.command_timeout, 'autocommit': autocommit, 'use_mars': options.get('use_mars', False), 'load_balancer': options.get('load_balancer', None), 'failover_partner': options.get('failover_partner', None), 'use_tz': utc if getattr(settings, 'USE_TZ', False) else None, } for opt in _SUPPORTED_OPTIONS: if opt in options: conn_params[opt] = options[opt] self.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None return conn_params
<SYSTEM_TASK:> Creates a cursor. Assumes that a connection is established. <END_TASK> <USER_TASK:> Description: def create_cursor(self, name=None): """Creates a cursor. Assumes that a connection is established."""
cursor = self.connection.cursor() cursor.tzinfo_factory = self.tzinfo_factory return cursor
<SYSTEM_TASK:> Start the firewall. <END_TASK> <USER_TASK:> Description: def start(self): """Start the firewall."""
self.clear() self.setDefaultPolicy() self.acceptIcmp() self.acceptInput('lo')
<SYSTEM_TASK:> Returns a list of Rules in the specified chain. <END_TASK> <USER_TASK:> Description: def list_rules(self, chainname): """Returns a list of Rules in the specified chain. """
data = self.__run([self.__iptables_save, '-t', self.__name, '-c']) return netfilter.parser.parse_rules(data, chainname)
<SYSTEM_TASK:> Commits any buffered commands. This is only useful if <END_TASK> <USER_TASK:> Description: def commit(self): """Commits any buffered commands. This is only useful if auto_commit is False. """
while len(self.__buffer) > 0: self.__run(self.__buffer.pop(0))
<SYSTEM_TASK:> lets you type a page number to go to <END_TASK> <USER_TASK:> Description: async def numbered_page(self): """lets you type a page number to go to"""
to_delete = [] to_delete.append(await self.bot.send_message(self.message.channel, 'What page do you want to go to?')) msg = await self.bot.wait_for_message(author=self.author, channel=self.message.channel, check=lambda m: m.content.isdigit(), timeout=30.0) if msg is not None: page = int(msg.content) to_delete.append(msg) if page != 0 and page <= self.maximum_pages: await self.show_page(page) else: to_delete.append(await self.bot.say('Invalid page given. (%s/%s)' % (page, self.maximum_pages))) await asyncio.sleep(5) else: to_delete.append(await self.bot.send_message(self.message.channel, 'Took too long.')) await asyncio.sleep(5) try: await self.bot.delete_messages(to_delete) except Exception: pass
<SYSTEM_TASK:> shows this message <END_TASK> <USER_TASK:> Description: async def show_help(self): """shows this message"""
e = discord.Embed() messages = ['Welcome to the interactive paginator!\n'] messages.append('This interactively allows you to see pages of text by navigating with ' \ 'reactions. They are as follows:\n') for (emoji, func) in self.reaction_emojis: messages.append('%s %s' % (emoji, func.__doc__)) e.description = '\n'.join(messages) e.colour = 0x738bd7 # blurple e.set_footer(text='We were on page %s before this message.' % self.current_page) await self.bot.edit_message(self.message, embed=e) async def go_back_to_current_page(): await asyncio.sleep(60.0) await self.show_current_page() self.bot.loop.create_task(go_back_to_current_page())
<SYSTEM_TASK:> Actually paginate the entries and run the interactive loop if necessary. <END_TASK> <USER_TASK:> Description: async def paginate(self): """Actually paginate the entries and run the interactive loop if necessary."""
await self.show_page(1, first=True) while self.paginating: react = await self.bot.wait_for_reaction(message=self.message, check=self.react_check, timeout=120.0) if react is None: self.paginating = False try: await self.bot.clear_reactions(self.message) except: pass finally: break try: await self.bot.remove_reaction(self.message, react.reaction.emoji, react.user) except: pass # can't remove it so don't bother doing so await self.match()
<SYSTEM_TASK:> Toggles sending DMs to owner. <END_TASK> <USER_TASK:> Description: async def _senddms(self): """Toggles sending DMs to owner."""
data = self.bot.config.get("meta", {}) tosend = data.get('send_dms', True) data['send_dms'] = not tosend await self.bot.config.put('meta', data) await self.bot.responses.toggle(message="Forwarding of DMs to owner has been {status}.", success=data['send_dms'])
<SYSTEM_TASK:> Repeats a command a specified number of times. <END_TASK> <USER_TASK:> Description: async def _do(self, ctx, times: int, *, command): """Repeats a command a specified number of times."""
msg = copy.copy(ctx.message) msg.content = command for i in range(times): await self.bot.process_commands(msg)
<SYSTEM_TASK:> Disables a command for this server. <END_TASK> <USER_TASK:> Description: async def disable(self, ctx, *, command: str): """Disables a command for this server. You must have Manage Server permissions or the Bot Admin role to use this command. """
command = command.lower() if command in ('enable', 'disable'): return await self.bot.responses.failure(message='Cannot disable that command.') if command not in self.bot.commands: return await self.bot.responses.failure(message='Command "{}" was not found.'.format(command)) guild_id = ctx.message.server.id cmds = self.config.get('commands', {}) entries = cmds.get(guild_id, []) entries.append(command) cmds[guild_id] = entries await self.config.put('commands', cmds) await self.bot.responses.success(message='"%s" command disabled in this server.' % command)
<SYSTEM_TASK:> Enables a command for this server. <END_TASK> <USER_TASK:> Description: async def enable(self, ctx, *, command: str): """Enables a command for this server. You must have Manage Server permissions or the Bot Admin role to use this command. """
command = command.lower() guild_id = ctx.message.server.id cmds = self.config.get('commands', {}) entries = cmds.get(guild_id, []) try: entries.remove(command) except KeyError: await self.bot.responses.failure(message='The command does not exist or is not disabled.') else: cmds[guild_id] = entries await self.config.put('commands', cmds) await self.bot.responses.success(message='"%s" command enabled in this server.' % command)
<SYSTEM_TASK:> Handles the bot's ignore lists. <END_TASK> <USER_TASK:> Description: async def ignore(self, ctx): """Handles the bot's ignore lists. To use these commands, you must have the Bot Admin role or have Manage Channels permissions. These commands are not allowed to be used in a private message context. Users with Manage Roles or Bot Admin role can still invoke the bot in ignored channels. """
if ctx.invoked_subcommand is None: await self.bot.say('Invalid subcommand passed: {0.subcommand_passed}'.format(ctx))
<SYSTEM_TASK:> Tells you what channels are currently ignored in this server. <END_TASK> <USER_TASK:> Description: async def ignore_list(self, ctx): """Tells you what channels are currently ignored in this server."""
ignored = self.config.get('ignored', []) channel_ids = set(c.id for c in ctx.message.server.channels) result = [] for channel in ignored: if channel in channel_ids: result.append('<#{}>'.format(channel)) if result: await self.bot.responses.basic(title="Ignored Channels:", message='\n\n{}'.format(', '.join(result))) else: await self.bot.responses.failure(message='I am not ignoring any channels here.')
<SYSTEM_TASK:> Ignores a specific channel from being processed. <END_TASK> <USER_TASK:> Description: async def channel_cmd(self, ctx, *, channel : discord.Channel = None): """Ignores a specific channel from being processed. If no channel is specified, the current channel is ignored. If a channel is ignored then the bot does not process commands in that channel until it is unignored. """
if channel is None: channel = ctx.message.channel ignored = self.config.get('ignored', []) if channel.id in ignored: await self.bot.responses.failure(message='That channel is already ignored.') return ignored.append(channel.id) await self.config.put('ignored', ignored) await self.bot.responses.success(message='Channel <#{}> will be ignored.'.format(channel.id))
<SYSTEM_TASK:> Ignores every channel in the server from being processed. <END_TASK> <USER_TASK:> Description: async def _all(self, ctx): """Ignores every channel in the server from being processed. This works by adding every channel that the server currently has into the ignore list. If more channels are added then they will have to be ignored by using the ignore command. To use this command you must have Manage Server permissions along with Manage Channels permissions. You could also have the Bot Admin role. """
ignored = self.config.get('ignored', []) channels = ctx.message.server.channels ignored.extend(c.id for c in channels if c.type == discord.ChannelType.text) await self.config.put('ignored', list(set(ignored))) # make unique await self.bot.responses.success(message='All channels ignored.')
<SYSTEM_TASK:> Unignores channels from being processed. <END_TASK> <USER_TASK:> Description: async def unignore(self, ctx, *channels: discord.Channel): """Unignores channels from being processed. If no channels are specified, it unignores the current channel. To use this command you must have the Manage Channels permission or have the Bot Admin role. """
if len(channels) == 0: channels = (ctx.message.channel,) # a set is the proper data type for the ignore list # however, JSON only supports arrays and objects not sets. ignored = self.config.get('ignored', []) result = [] for channel in channels: try: ignored.remove(channel.id) except ValueError: pass else: result.append('<#{}>'.format(channel.id)) await self.config.put('ignored', ignored) await self.bot.responses.success(message='Channel(s) {} will no longer be ignored.'.format(', '.join(result)))
<SYSTEM_TASK:> Unignores all channels in this server from being processed. <END_TASK> <USER_TASK:> Description: async def unignore_all(self, ctx): """Unignores all channels in this server from being processed. To use this command you must have the Manage Channels permission or have the Bot Admin role. """
channels = [c for c in ctx.message.server.channels if c.type is discord.ChannelType.text] await ctx.invoke(self.unignore, *channels)
<SYSTEM_TASK:> Cleans up the bot's messages from the channel. <END_TASK> <USER_TASK:> Description: async def cleanup(self, ctx, search : int = 100): """Cleans up the bot's messages from the channel. If a search number is specified, it searches that many messages to delete. If the bot has Manage Messages permissions, then it will try to delete messages that look like they invoked the bot as well. After the cleanup is completed, the bot will send you a message with which people got their messages deleted and their count. This is useful to see which users are spammers. To use this command you must have Manage Messages permission or have the Bot Mod role. """
spammers = Counter() channel = ctx.message.channel prefixes = self.bot.command_prefix if callable(prefixes): prefixes = prefixes(self.bot, ctx.message) def is_possible_command_invoke(entry): valid_call = any(entry.content.startswith(prefix) for prefix in prefixes) return valid_call and not entry.content[1:2].isspace() can_delete = channel.permissions_for(channel.server.me).manage_messages if not can_delete: api_calls = 0 async for entry in self.bot.logs_from(channel, limit=search, before=ctx.message): if api_calls and api_calls % 5 == 0: await asyncio.sleep(1.1) if entry.author == self.bot.user: await self.bot.delete_message(entry) spammers['Bot'] += 1 api_calls += 1 if is_possible_command_invoke(entry): try: await self.bot.delete_message(entry) except discord.Forbidden: continue else: spammers[entry.author.display_name] += 1 api_calls += 1 else: predicate = lambda m: m.author == self.bot.user or is_possible_command_invoke(m) deleted = await self.bot.purge_from(channel, limit=search, before=ctx.message, check=predicate) spammers = Counter(m.author.display_name for m in deleted) deleted = sum(spammers.values()) messages = ['%s %s removed.' % (deleted, 'message was' if deleted == 1 else 'messages were')] if deleted: messages.append('') spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True) messages.extend(map(lambda t: '**{0[0]}**: {0[1]}'.format(t), spammers)) msg = await self.bot.responses.basic(title="Removed Messages:", message='\n'.join(messages)) await asyncio.sleep(10) await self.bot.delete_message(msg)
<SYSTEM_TASK:> Bans a user from using the bot. <END_TASK> <USER_TASK:> Description: async def plonk(self, ctx, *, member: discord.Member): """Bans a user from using the bot. This bans a person from using the bot in the current server. There is no concept of a global ban. This ban can be bypassed by having the Manage Server permission. To use this command you must have the Manage Server permission or have a Bot Admin role. """
plonks = self.config.get('plonks', {}) guild_id = ctx.message.server.id db = plonks.get(guild_id, []) if member.id in db: await self.bot.responses.failure(message='That user is already bot banned in this server.') return db.append(member.id) plonks[guild_id] = db await self.config.put('plonks', plonks) await self.bot.responses.success(message='%s has been banned from using the bot in this server.' % member)