text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Creates a space with given attributes.
<END_TASK>
<USER_TASK:>
Description:
def create(self, attributes=None, **kwargs):
"""
Creates a space with given attributes.
""" |
if attributes is None:
attributes = {}
if 'default_locale' not in attributes:
attributes['default_locale'] = self.client.default_locale
return super(SpacesProxy, self).create(resource_id=None, attributes=attributes) |
<SYSTEM_TASK:>
Provides access to editor interfaces management methods.
<END_TASK>
<USER_TASK:>
Description:
def editor_interfaces(self, space_id, environment_id, content_type_id):
"""
Provides access to editor interfaces management methods.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/editor-interface
:return: :class:`EditorInterfacesProxy <contentful_management.editor_interfaces_proxy.EditorInterfacesProxy>` object.
:rtype: contentful.editor_interfaces_proxy.EditorInterfacesProxy
Usage:
>>> editor_interfaces_proxy = client.editor_interfaces('cfexampleapi', 'master', 'cat')
<EditorInterfacesProxy space_id="cfexampleapi" environment_id="master" content_type_id="cat">
""" |
return EditorInterfacesProxy(self, space_id, environment_id, content_type_id) |
<SYSTEM_TASK:>
Validates that required parameters are present.
<END_TASK>
<USER_TASK:>
Description:
def _validate_configuration(self):
"""
Validates that required parameters are present.
""" |
if not self.access_token:
raise ConfigurationException(
'You will need to initialize a client with an Access Token'
)
if not self.api_url:
raise ConfigurationException(
'The client configuration needs to contain an API URL'
)
if not self.default_locale:
raise ConfigurationException(
'The client configuration needs to contain a Default Locale'
)
if not self.api_version or self.api_version < 1:
raise ConfigurationException(
'The API Version must be a positive number'
) |
<SYSTEM_TASK:>
Performs the requested HTTP request.
<END_TASK>
<USER_TASK:>
Description:
def _http_request(self, method, url, request_kwargs=None):
"""
Performs the requested HTTP request.
""" |
kwargs = request_kwargs if request_kwargs is not None else {}
headers = self._request_headers()
headers.update(self.additional_headers)
if 'headers' in kwargs:
headers.update(kwargs['headers'])
kwargs['headers'] = headers
if self._has_proxy():
kwargs['proxies'] = self._proxy_parameters()
request_url = self._url(
url,
file_upload=kwargs.pop('file_upload', False)
)
request_method = getattr(requests, method)
response = request_method(request_url, **kwargs)
if response.status_code == 429:
raise RateLimitExceededError(response)
return response |
<SYSTEM_TASK:>
Wrapper for the HTTP requests,
<END_TASK>
<USER_TASK:>
Description:
def _request(self, method, url, query_or_data=None, **kwargs):
"""
Wrapper for the HTTP requests,
rate limit backoff is handled here,
responses are processed with ResourceBuilder.
""" |
if query_or_data is None:
query_or_data = {}
request_method = getattr(self, '_http_{0}'.format(method))
response = retry_request(self)(request_method)(url, query_or_data, **kwargs)
if self.raw_mode:
return response
if response.status_code >= 300:
error = get_error(response)
if self.raise_errors:
raise error
return error
# Return response object on NoContent
if response.status_code == 204 or not response.text:
return response
return ResourceBuilder(
self,
self.default_locale,
response.json()
).build() |
<SYSTEM_TASK:>
Wrapper for the HTTP POST request.
<END_TASK>
<USER_TASK:>
Description:
def _post(self, url, attributes=None, **kwargs):
"""
Wrapper for the HTTP POST request.
""" |
return self._request('post', url, attributes, **kwargs) |
<SYSTEM_TASK:>
Wrapper for the HTTP PUT request.
<END_TASK>
<USER_TASK:>
Description:
def _put(self, url, attributes=None, **kwargs):
"""
Wrapper for the HTTP PUT request.
""" |
return self._request('put', url, attributes, **kwargs) |
<SYSTEM_TASK:>
Gets all assets of a space.
<END_TASK>
<USER_TASK:>
Description:
def all(self, query=None, **kwargs):
"""
Gets all assets of a space.
""" |
if query is None:
query = {}
normalize_select(query)
return super(AssetsProxy, self).all(query, **kwargs) |
<SYSTEM_TASK:>
Gets a single asset by ID.
<END_TASK>
<USER_TASK:>
Description:
def find(self, asset_id, query=None, **kwargs):
"""
Gets a single asset by ID.
""" |
if query is None:
query = {}
normalize_select(query)
return super(AssetsProxy, self).find(asset_id, query=query, **kwargs) |
<SYSTEM_TASK:>
Updates the entry with attributes.
<END_TASK>
<USER_TASK:>
Description:
def update(self, attributes=None):
"""
Updates the entry with attributes.
""" |
if attributes is None:
attributes = {}
attributes['content_type_id'] = self.sys['content_type'].id
return super(Entry, self).update(attributes) |
<SYSTEM_TASK:>
Creates an upload for the given file or path.
<END_TASK>
<USER_TASK:>
Description:
def create(self, file_or_path, **kwargs):
"""
Creates an upload for the given file or path.
""" |
opened = False
if isinstance(file_or_path, str_type()):
file_or_path = open(file_or_path, 'rb')
opened = True
elif not getattr(file_or_path, 'read', False):
raise Exception("A file or path to a file is required for this operation.")
try:
return self.client._post(
self._url(),
file_or_path,
headers=self._resource_class.create_headers({}),
file_upload=True
)
finally:
if opened:
file_or_path.close() |
<SYSTEM_TASK:>
Finds an upload by ID.
<END_TASK>
<USER_TASK:>
Description:
def find(self, upload_id, **kwargs):
"""
Finds an upload by ID.
""" |
return super(UploadsProxy, self).find(upload_id, file_upload=True) |
<SYSTEM_TASK:>
Deletes an upload by ID.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, upload_id):
"""
Deletes an upload by ID.
""" |
return super(UploadsProxy, self).delete(upload_id, file_upload=True) |
<SYSTEM_TASK:>
Returns the JSON Representation of the content type field.
<END_TASK>
<USER_TASK:>
Description:
def to_json(self):
"""
Returns the JSON Representation of the content type field.
""" |
result = {
'name': self.name,
'id': self._real_id(),
'type': self.type,
'localized': self.localized,
'omitted': self.omitted,
'required': self.required,
'disabled': self.disabled,
'validations': [v.to_json() for v in self.validations]
}
if self.type == 'Array':
result['items'] = self.items
if self.type == 'Link':
result['linkType'] = self.link_type
return result |
<SYSTEM_TASK:>
Attributes for webhook creation.
<END_TASK>
<USER_TASK:>
Description:
def create_attributes(klass, attributes, previous_object=None):
"""
Attributes for webhook creation.
""" |
result = super(Webhook, klass).create_attributes(attributes, previous_object)
if 'topics' not in result:
raise Exception("Topics ('topics') must be provided for this operation.")
return result |
<SYSTEM_TASK:>
Returns the URI for the editor interface.
<END_TASK>
<USER_TASK:>
Description:
def base_url(self, space_id, content_type_id, environment_id=None, **kwargs):
"""
Returns the URI for the editor interface.
""" |
return "spaces/{0}{1}/content_types/{2}/editor_interface".format(
space_id,
'/environments/{0}'.format(environment_id) if environment_id is not None else '',
content_type_id
) |
<SYSTEM_TASK:>
Returns the JSON representation of the editor interface.
<END_TASK>
<USER_TASK:>
Description:
def to_json(self):
"""
Returns the JSON representation of the editor interface.
""" |
result = super(EditorInterface, self).to_json()
result.update({'controls': self.controls})
return result |
<SYSTEM_TASK:>
Returns the JSON Representation of the content type field validation.
<END_TASK>
<USER_TASK:>
Description:
def to_json(self):
"""
Returns the JSON Representation of the content type field validation.
""" |
result = {}
for k, v in self._data.items():
result[camel_case(k)] = v
return result |
<SYSTEM_TASK:>
Creates the objects from the JSON response.
<END_TASK>
<USER_TASK:>
Description:
def build(self):
"""
Creates the objects from the JSON response.
""" |
if self.json['sys']['type'] == 'Array':
return self._build_array()
return self._build_item(self.json) |
<SYSTEM_TASK:>
Finds a single resource by ID related to the current space.
<END_TASK>
<USER_TASK:>
Description:
def find(self, resource_id, query=None):
"""
Finds a single resource by ID related to the current space.
""" |
return self.proxy.find(resource_id, query=query) |
<SYSTEM_TASK:>
Internal mechanism to try to send data to multiple Solr Hosts if
<END_TASK>
<USER_TASK:>
Description:
def _retry(function):
"""
Internal mechanism to try to send data to multiple Solr Hosts if
the query fails on the first one.
""" |
def inner(self, **kwargs):
last_exception = None
#for host in self.router.get_hosts(**kwargs):
for host in self.host:
try:
return function(self, host, **kwargs)
except SolrError as e:
self.logger.exception(e)
raise
except ConnectionError as e:
self.logger.exception("Tried connecting to Solr, but couldn't because of the following exception.")
if '401' in e.__str__():
raise
last_exception = e
# raise the last exception after contacting all hosts instead of returning None
if last_exception is not None:
raise last_exception
return inner |
<SYSTEM_TASK:>
Starts virtual display which will be
<END_TASK>
<USER_TASK:>
Description:
def start_virtual_display(self, width=1440, height=900,
colordepth=24, **kwargs):
"""Starts virtual display which will be
destroyed after test execution will be end
*Arguments:*
- width: a width to be set in pixels
- height: a height to be set in pixels
- color_depth: a color depth to be used
- kwargs: extra parameters
*Example:*
| Start Virtual Display |
| Start Virtual Display | 1920 | 1080 |
| Start Virtual Display | ${1920} | ${1080} | ${16} |
""" |
if self._display is None:
logger.info("Using virtual display: '{0}x{1}x{2}'".format(
width, height, colordepth))
self._display = Xvfb(int(width), int(height),
int(colordepth), **kwargs)
self._display.start()
atexit.register(self._display.stop) |
<SYSTEM_TASK:>
Returns a slightly slimmed down version of the clusterstatus api command. It also gets count of documents in each shard on each replica and returns
<END_TASK>
<USER_TASK:>
Description:
def clusterstatus(self):
"""
Returns a slightly slimmed down version of the clusterstatus api command. It also gets count of documents in each shard on each replica and returns
it as doc_count key for each replica.
""" |
res = self.cluster_status_raw()
cluster = res['cluster']['collections']
out = {}
try:
for collection in cluster:
out[collection] = {}
for shard in cluster[collection]['shards']:
out[collection][shard] = {}
for replica in cluster[collection]['shards'][shard]['replicas']:
out[collection][shard][replica] = cluster[collection]['shards'][shard]['replicas'][replica]
if out[collection][shard][replica]['state'] != 'active':
out[collection][shard][replica]['doc_count'] = False
else:
out[collection][shard][replica]['doc_count'] = self._get_collection_counts(
out[collection][shard][replica])
except Exception as e:
self.logger.error("Couldn't parse response from clusterstatus API call")
self.logger.exception(e)
return out |
<SYSTEM_TASK:>
Queries each core to get individual counts for each core for each shard.
<END_TASK>
<USER_TASK:>
Description:
def _get_collection_counts(self, core_data):
"""
Queries each core to get individual counts for each core for each shard.
""" |
if core_data['base_url'] not in self.solr_clients:
from SolrClient import SolrClient
self.solr_clients['base_url'] = SolrClient(core_data['base_url'], log=self.logger)
try:
return self.solr_clients['base_url'].query(core_data['core'],
{'q': '*:*',
'rows': 0,
'distrib': 'false',
}).get_num_found()
except Exception as e:
self.logger.error("Couldn't get Counts for {}/{}".format(core_data['base_url'], core_data['core']))
self.logger.exception(e)
return False |
<SYSTEM_TASK:>
Force this session to switch to new keys. Normally this is done
<END_TASK>
<USER_TASK:>
Description:
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end)
""" |
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return |
<SYSTEM_TASK:>
send a message, but block if we're in key negotiation. this is used
<END_TASK>
<USER_TASK:>
Description:
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
""" |
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(DEBUG, 'Dropping user packet because connection is dead.')
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.isSet():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException('Key-exchange timed out waiting for key negotiation')
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release() |
<SYSTEM_TASK:>
Add a string to the stream.
<END_TASK>
<USER_TASK:>
Description:
def add_string(self, s):
"""
Add a string to the stream.
@param s: string to add
@type s: str
""" |
self.add_int(len(s))
self.packet.write(s)
return self |
<SYSTEM_TASK:>
Attempt to authenticate to the given transport using any of the private
<END_TASK>
<USER_TASK:>
Description:
def agent_auth(transport, username):
"""
Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent.
""" |
agent = ssh.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
return
for key in agent_keys:
print 'Trying ssh-agent key %s' % hexlify(key.get_fingerprint()),
try:
transport.auth_publickey(username, key)
print '... success!'
return
except ssh.SSHException:
print '... nope.' |
<SYSTEM_TASK:>
read data out of the prefetch buffer, if possible. if the data isn't
<END_TASK>
<USER_TASK:>
Description:
def _read_prefetch(self, size):
"""
read data out of the prefetch buffer, if possible. if the data isn't
in the buffer, return None. otherwise, behaves like a normal read.
""" |
# while not closed, and haven't fetched past the current position, and haven't reached EOF...
while True:
offset = self._data_in_prefetch_buffers(self._realpos)
if offset is not None:
break
if self._prefetch_done or self._closed:
break
self.sftp._read_response()
self._check_exception()
if offset is None:
self._prefetching = False
return None
prefetch = self._prefetch_data[offset]
del self._prefetch_data[offset]
buf_offset = self._realpos - offset
if buf_offset > 0:
self._prefetch_data[offset] = prefetch[:buf_offset]
prefetch = prefetch[buf_offset:]
if size < len(prefetch):
self._prefetch_data[self._realpos + size] = prefetch[size:]
prefetch = prefetch[:size]
return prefetch |
<SYSTEM_TASK:>
Subclasses call this method to initialize the BufferedFile.
<END_TASK>
<USER_TASK:>
Description:
def _set_mode(self, mode='r', bufsize=-1):
"""
Subclasses call this method to initialize the BufferedFile.
""" |
# set bufsize in any event, because it's used for readline().
self._bufsize = self._DEFAULT_BUFSIZE
if bufsize < 0:
# do no buffering by default, because otherwise writes will get
# buffered in a way that will probably confuse people.
bufsize = 0
if bufsize == 1:
# apparently, line buffering only affects writes. reads are only
# buffered if you call readline (directly or indirectly: iterating
# over a file will indirectly call readline).
self._flags |= self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED
elif bufsize > 1:
self._bufsize = bufsize
self._flags |= self.FLAG_BUFFERED
self._flags &= ~self.FLAG_LINE_BUFFERED
elif bufsize == 0:
# unbuffered
self._flags &= ~(self.FLAG_BUFFERED | self.FLAG_LINE_BUFFERED)
if ('r' in mode) or ('+' in mode):
self._flags |= self.FLAG_READ
if ('w' in mode) or ('+' in mode):
self._flags |= self.FLAG_WRITE
if ('a' in mode):
self._flags |= self.FLAG_WRITE | self.FLAG_APPEND
self._size = self._get_size()
self._pos = self._realpos = self._size
if ('b' in mode):
self._flags |= self.FLAG_BINARY
if ('U' in mode):
self._flags |= self.FLAG_UNIVERSAL_NEWLINE
# built-in file objects have this attribute to store which kinds of
# line terminations they've seen:
# <http://www.python.org/doc/current/lib/built-in-funcs.html>
self.newlines = None |
<SYSTEM_TASK:>
Validate a REST API version is supported by the library and target array.
<END_TASK>
<USER_TASK:>
Description:
def _check_rest_version(self, version):
"""Validate a REST API version is supported by the library and target array.""" |
version = str(version)
if version not in self.supported_rest_versions:
msg = "Library is incompatible with REST API version {0}"
raise ValueError(msg.format(version))
array_rest_versions = self._list_available_rest_versions()
if version not in array_rest_versions:
msg = "Array is incompatible with REST API version {0}"
raise ValueError(msg.format(version))
return LooseVersion(version) |
<SYSTEM_TASK:>
Return the newest REST API version supported by target array.
<END_TASK>
<USER_TASK:>
Description:
def _choose_rest_version(self):
"""Return the newest REST API version supported by target array.""" |
versions = self._list_available_rest_versions()
versions = [LooseVersion(x) for x in versions if x in self.supported_rest_versions]
if versions:
return max(versions)
else:
raise PureError(
"Library is incompatible with all REST API versions supported"
"by the target array.") |
<SYSTEM_TASK:>
Return a list of the REST API versions supported by the array
<END_TASK>
<USER_TASK:>
Description:
def _list_available_rest_versions(self):
"""Return a list of the REST API versions supported by the array""" |
url = "https://{0}/api/api_version".format(self._target)
data = self._request("GET", url, reestablish_session=False)
return data["version"] |
<SYSTEM_TASK:>
Use username and password to obtain and return an API token.
<END_TASK>
<USER_TASK:>
Description:
def _obtain_api_token(self, username, password):
"""Use username and password to obtain and return an API token.""" |
data = self._request("POST", "auth/apitoken",
{"username": username, "password": password},
reestablish_session=False)
return data["api_token"] |
<SYSTEM_TASK:>
Create snapshots of the listed volumes.
<END_TASK>
<USER_TASK:>
Description:
def create_snapshots(self, volumes, **kwargs):
"""Create snapshots of the listed volumes.
:param volumes: List of names of the volumes to snapshot.
:type volumes: list of str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the new snapshots.
:rtype: ResponseDict
""" |
data = {"source": volumes, "snap": True}
data.update(kwargs)
return self._request("POST", "volume", data) |
<SYSTEM_TASK:>
Create a volume and return a dictionary describing it.
<END_TASK>
<USER_TASK:>
Description:
def create_volume(self, volume, size, **kwargs):
"""Create a volume and return a dictionary describing it.
:param volume: Name of the volume to be created.
:type volume: str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:type size: int or str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the created volume.
:rtype: ResponseDict
.. note::
The maximum volume size supported is 4 petabytes (4 * 2^50).
.. note::
If size is an int, it must be a multiple of 512.
.. note::
If size is a string, it must consist of an integer followed by a
valid suffix.
Accepted Suffixes
====== ======== ======
Suffix Size Bytes
====== ======== ======
S Sector (2^9)
K Kilobyte (2^10)
M Megabyte (2^20)
G Gigabyte (2^30)
T Terabyte (2^40)
P Petabyte (2^50)
====== ======== ======
""" |
data = {"size": size}
data.update(kwargs)
return self._request("POST", "volume/{0}".format(volume), data) |
<SYSTEM_TASK:>
Extend a volume to a new, larger size.
<END_TASK>
<USER_TASK:>
Description:
def extend_volume(self, volume, size):
"""Extend a volume to a new, larger size.
:param volume: Name of the volume to be extended.
:type volume: str
:type size: int or str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:returns: A dictionary mapping "name" to volume and "size" to the volume's
new size in bytes.
:rtype: ResponseDict
.. note::
The new size must be larger than the volume's old size.
.. note::
The maximum volume size supported is 4 petabytes (4 * 2^50).
.. note::
If size is an int, it must be a multiple of 512.
.. note::
If size is a string, it must consist of an integer followed by a
valid suffix.
Accepted Suffixes
====== ======== ======
Suffix Size Bytes
====== ======== ======
S Sector (2^9)
K Kilobyte (2^10)
M Megabyte (2^20)
G Gigabyte (2^30)
T Terabyte (2^40)
P Petabyte (2^50)
====== ======== ======
""" |
return self.set_volume(volume, size=size, truncate=False) |
<SYSTEM_TASK:>
Truncate a volume to a new, smaller size.
<END_TASK>
<USER_TASK:>
Description:
def truncate_volume(self, volume, size):
"""Truncate a volume to a new, smaller size.
:param volume: Name of the volume to truncate.
:type volume: str
:param size: Size in bytes, or string representing the size of the
volume to be created.
:type size: int or str
:returns: A dictionary mapping "name" to volume and "size" to the
volume's new size in bytes.
:rtype: ResponseDict
.. warnings also::
Data may be irretrievably lost in this operation.
.. note::
A snapshot of the volume in its previous state is taken and
immediately destroyed, but it is available for recovery for
the 24 hours following the truncation.
""" |
return self.set_volume(volume, size=size, truncate=True) |
<SYSTEM_TASK:>
Create a connection between a host and a volume.
<END_TASK>
<USER_TASK:>
Description:
def connect_host(self, host, volume, **kwargs):
"""Create a connection between a host and a volume.
:param host: Name of host to connect to volume.
:type host: str
:param volume: Name of volume to connect to host.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST host/:host/volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection between the host and volume.
:rtype: ResponseDict
""" |
return self._request(
"POST", "host/{0}/volume/{1}".format(host, volume), kwargs) |
<SYSTEM_TASK:>
Create a shared connection between a host group and a volume.
<END_TASK>
<USER_TASK:>
Description:
def connect_hgroup(self, hgroup, volume, **kwargs):
"""Create a shared connection between a host group and a volume.
:param hgroup: Name of hgroup to connect to volume.
:type hgroup: str
:param volume: Name of volume to connect to hgroup.
:type volume: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST hgroup/:hgroup/volume/:volume**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection between the hgroup and volume.
:rtype: ResponseDict
""" |
return self._request(
"POST", "hgroup/{0}/volume/{1}".format(hgroup, volume), kwargs) |
<SYSTEM_TASK:>
Return a dictionary describing the connected offload target.
<END_TASK>
<USER_TASK:>
Description:
def get_offload(self, name, **kwargs):
"""Return a dictionary describing the connected offload target.
:param offload: Name of offload target to get information about.
:type offload: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET offload/::offload**
:type \*\*kwargs: optional
:returns: A dictionary describing the offload connection.
:rtype: ResponseDict
""" |
# Unbox if a list to accommodate a bug in REST 1.14
result = self._request("GET", "offload/{0}".format(name), kwargs)
if isinstance(result, list):
headers = result.headers
result = ResponseDict(result[0])
result.headers = headers
return result |
<SYSTEM_TASK:>
Create a subnet.
<END_TASK>
<USER_TASK:>
Description:
def create_subnet(self, subnet, prefix, **kwargs):
"""Create a subnet.
:param subnet: Name of subnet to be created.
:type subnet: str
:param prefix: Routing prefix of subnet to be created.
:type prefix: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST subnet/:subnet**
:type \*\*kwargs: optional
:returns: A dictionary describing the created subnet.
:rtype: ResponseDict
.. note::
prefix should be specified as an IPv4 CIDR address.
("xxx.xxx.xxx.xxx/nn", representing prefix and prefix length)
.. note::
Requires use of REST API 1.5 or later.
""" |
data = {"prefix": prefix}
data.update(kwargs)
return self._request("POST", "subnet/{0}".format(subnet), data) |
<SYSTEM_TASK:>
Create a vlan interface
<END_TASK>
<USER_TASK:>
Description:
def create_vlan_interface(self, interface, subnet, **kwargs):
"""Create a vlan interface
:param interface: Name of interface to be created.
:type interface: str
:param subnet: Subnet associated with interface to be created
:type subnet: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST network/vif/:vlan_interface**
:type \*\*kwargs: optional
:returns: A dictionary describing the created interface
:rtype: ResponseDict
.. note::
Requires use of REST API 1.5 or later.
""" |
data = {"subnet": subnet}
data.update(kwargs)
return self._request("POST", "network/vif/{0}".format(interface), data) |
<SYSTEM_TASK:>
Set an admin's password.
<END_TASK>
<USER_TASK:>
Description:
def set_password(self, admin, new_password, old_password):
"""Set an admin's password.
:param admin: Name of admin whose password is to be set.
:type admin: str
:param new_password: New password for admin.
:type new_password: str
:param old_password: Current password of admin.
:type old_password: str
:returns: A dictionary mapping "name" to admin.
:rtype: ResponseDict
""" |
return self.set_admin(admin, password=new_password,
old_password=old_password) |
<SYSTEM_TASK:>
Disable the directory service.
<END_TASK>
<USER_TASK:>
Description:
def disable_directory_service(self, check_peer=False):
"""Disable the directory service.
:param check_peer: If True, disables server authenticity
enforcement. If False, disables directory
service integration.
:type check_peer: bool, optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict
""" |
if check_peer:
return self.set_directory_service(check_peer=False)
return self.set_directory_service(enabled=False) |
<SYSTEM_TASK:>
Enable the directory service.
<END_TASK>
<USER_TASK:>
Description:
def enable_directory_service(self, check_peer=False):
"""Enable the directory service.
:param check_peer: If True, enables server authenticity
enforcement. If False, enables directory
service integration.
:type check_peer: bool, optional
:returns: A dictionary describing the status of the directory service.
:rtype: ResponseDict
""" |
if check_peer:
return self.set_directory_service(check_peer=True)
return self.set_directory_service(enabled=True) |
<SYSTEM_TASK:>
Create an SNMP manager.
<END_TASK>
<USER_TASK:>
Description:
def create_snmp_manager(self, manager, host, **kwargs):
"""Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict
""" |
data = {"host": host}
data.update(kwargs)
return self._request("POST", "snmp/{0}".format(manager), data) |
<SYSTEM_TASK:>
Connect this array with another one.
<END_TASK>
<USER_TASK:>
Description:
def connect_array(self, address, connection_key, connection_type, **kwargs):
"""Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later.
""" |
data = {"management_address": address,
"connection_key": connection_key,
"type": connection_type}
data.update(kwargs)
return self._request("POST", "array/connection", data) |
<SYSTEM_TASK:>
Create snapshot of pgroup from specified source.
<END_TASK>
<USER_TASK:>
Description:
def create_pgroup_snapshot(self, source, **kwargs):
"""Create snapshot of pgroup from specified source.
:param source: Name of pgroup of which to take snapshot.
:type source: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A dictionary describing the created snapshot.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
""" |
# In REST 1.4, support was added for snapshotting multiple pgroups. As a
# result, the endpoint response changed from an object to an array of
# objects. To keep the response type consistent between REST versions,
# we unbox the response when creating a single snapshot.
result = self.create_pgroup_snapshots([source], **kwargs)
if self._rest_version >= LooseVersion("1.4"):
headers = result.headers
result = ResponseDict(result[0])
result.headers = headers
return result |
<SYSTEM_TASK:>
Create snapshots of pgroups from specified sources.
<END_TASK>
<USER_TASK:>
Description:
def create_pgroup_snapshots(self, sources, **kwargs):
"""Create snapshots of pgroups from specified sources.
:param sources: Names of pgroups of which to take snapshots.
:type sources: list of str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pgroup**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing the created snapshots.
:rtype: ResponseList
.. note::
Requires use of REST API 1.2 or later.
""" |
data = {"source": sources, "snap": True}
data.update(kwargs)
return self._request("POST", "pgroup", data) |
<SYSTEM_TASK:>
Eradicate a destroyed pgroup.
<END_TASK>
<USER_TASK:>
Description:
def eradicate_pgroup(self, pgroup, **kwargs):
"""Eradicate a destroyed pgroup.
:param pgroup: Name of pgroup to be eradicated.
:type pgroup: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**DELETE pgroup/:pgroup**
:type \*\*kwargs: optional
:returns: A dictionary mapping "name" to pgroup.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.2 or later.
""" |
eradicate = {"eradicate": True}
eradicate.update(kwargs)
return self._request("DELETE", "pgroup/{0}".format(pgroup), eradicate) |
<SYSTEM_TASK:>
Clone an existing pod to a new one.
<END_TASK>
<USER_TASK:>
Description:
def clone_pod(self, source, dest, **kwargs):
"""Clone an existing pod to a new one.
:param source: Name of the pod the be cloned.
:type source: str
:param dest: Name of the target pod to clone into
:type dest: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST pod/:pod**
:type \*\*kwargs: optional
:returns: A dictionary describing the created pod
:rtype: ResponseDict
.. note::
Requires use of REST API 1.13 or later.
""" |
data = {"source": source}
data.update(kwargs)
return self._request("POST", "pod/{0}".format(dest), data) |
<SYSTEM_TASK:>
Remove arrays from a pod.
<END_TASK>
<USER_TASK:>
Description:
def remove_pod(self, pod, array, **kwargs):
"""Remove arrays from a pod.
:param pod: Name of the pod.
:type pod: str
:param array: Array to remove from pod.
:type array: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**DELETE pod/:pod**/array/:array**
:type \*\*kwargs: optional
:returns: A dictionary mapping "name" to pod and "array" to the pod's
new array list.
:rtype: ResponseDict
.. note::
Requires use of REST API 1.13 or later.
""" |
return self._request("DELETE", "pod/{0}/array/{1}".format(pod, array), kwargs) |
<SYSTEM_TASK:>
Return an iterator over all pages of a REST operation.
<END_TASK>
<USER_TASK:>
Description:
def page_through(page_size, function, *args, **kwargs):
"""Return an iterator over all pages of a REST operation.
:param page_size: Number of elements to retrieve per call.
:param function: FlashArray function that accepts limit as an argument.
:param \*args: Positional arguments to be passed to function.
:param \*\*kwargs: Keyword arguments to be passed to function.
:returns: An iterator of tuples containing a page of results for the
function(\*args, \*\*kwargs) and None, or None and a PureError
if a call to retrieve a page fails.
:rtype: iterator
.. note::
Requires use of REST API 1.7 or later.
Only works with functions that accept limit as an argument.
Iterator will retrieve page_size elements per call
Iterator will yield None and an error if a call fails. The next
call will repeat the same call, unless the caller sends in an
alternate page token.
""" |
kwargs["limit"] = page_size
def get_page(token):
page_kwargs = kwargs.copy()
if token:
page_kwargs["token"] = token
return function(*args, **page_kwargs)
def page_generator():
token = None
while True:
try:
response = get_page(token)
token = response.headers.get("x-next-token")
except PureError as err:
yield None, err
else:
if response:
sent_token = yield response, None
if sent_token is not None:
token = sent_token
else:
return
return page_generator() |
<SYSTEM_TASK:>
Read as close to N bytes as possible, blocking as long as necessary.
<END_TASK>
<USER_TASK:>
Description:
def read_all(self, n, check_rekey=False):
"""
Read as close to N bytes as possible, blocking as long as necessary.
@param n: number of bytes to read
@type n: int
@return: the data read
@rtype: str
@raise EOFError: if the socket was closed before all the bytes could
be read
""" |
out = ''
# handle over-reading from reading the banner line
if len(self.__remainder) > 0:
out = self.__remainder[:n]
self.__remainder = self.__remainder[n:]
n -= len(out)
if PY22:
return self._py22_read_all(n, out)
while n > 0:
got_timeout = False
try:
x = self.__socket.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
except socket.timeout:
got_timeout = True
except socket.error, e:
# on Linux, sometimes instead of socket.timeout, we get
# EAGAIN. this is a bug in recent (> 2.6.9) kernels but
# we need to work around it.
if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN):
got_timeout = True
elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR):
# syscall interrupted; try again
pass
elif self.__closed:
raise EOFError()
else:
raise
if got_timeout:
if self.__closed:
raise EOFError()
if check_rekey and (len(out) == 0) and self.__need_rekey:
raise NeedRekeyException()
self._check_keepalive()
return out |
<SYSTEM_TASK:>
Write a block of data using the current cipher, as an SSH block.
<END_TASK>
<USER_TASK:>
Description:
def send_message(self, data):
"""
Write a block of data using the current cipher, as an SSH block.
""" |
# encrypt this sucka
data = str(data)
cmd = ord(data[0])
if cmd in MSG_NAMES:
cmd_name = MSG_NAMES[cmd]
else:
cmd_name = '$%x' % cmd
orig_len = len(data)
self.__write_lock.acquire()
try:
if self.__compress_engine_out is not None:
data = self.__compress_engine_out(data)
packet = self._build_packet(data)
if self.__dump_packets:
self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, orig_len))
self._log(DEBUG, util.format_binary(packet, 'OUT: '))
if self.__block_engine_out != None:
out = self.__block_engine_out.encrypt(packet)
else:
out = packet
# + mac
if self.__block_engine_out != None:
payload = struct.pack('>I', self.__sequence_number_out) + packet
out += compute_hmac(self.__mac_key_out, payload, self.__mac_engine_out)[:self.__mac_size_out]
self.__sequence_number_out = (self.__sequence_number_out + 1) & 0xffffffffL
self.write_all(out)
self.__sent_bytes += len(out)
self.__sent_packets += 1
if ((self.__sent_packets >= self.REKEY_PACKETS) or (self.__sent_bytes >= self.REKEY_BYTES)) \
and not self.__need_rekey:
# only ask once for rekeying
self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes sent)' %
(self.__sent_packets, self.__sent_bytes))
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
self._trigger_rekey()
finally:
self.__write_lock.release() |
<SYSTEM_TASK:>
Parses the given line of text to find the names for the host,
<END_TASK>
<USER_TASK:>
Description:
def from_line(cls, line):
"""
Parses the given line of text to find the names for the host,
the type of key, and the key data. The line is expected to be in the
format used by the openssh known_hosts file.
Lines are expected to not have leading or trailing whitespace.
We don't bother to check for comments or empty lines. All of
that should be taken care of before sending the line to us.
@param line: a line from an OpenSSH known_hosts file
@type line: str
""" |
fields = line.split(' ')
if len(fields) < 3:
# Bad number of fields
return None
fields = fields[:3]
names, keytype, key = fields
names = names.split(',')
# Decide what kind of key we're looking at and create an object
# to hold it accordingly.
try:
if keytype == 'ssh-rsa':
key = RSAKey(data=base64.decodestring(key))
elif keytype == 'ssh-dss':
key = DSSKey(data=base64.decodestring(key))
else:
return None
except binascii.Error, e:
raise InvalidHostKey(line, e)
return cls(names, key) |
<SYSTEM_TASK:>
Returns a string in OpenSSH known_hosts file format, or None if
<END_TASK>
<USER_TASK:>
Description:
def to_line(self):
"""
Returns a string in OpenSSH known_hosts file format, or None if
the object is not in a valid state. A trailing newline is
included.
""" |
if self.valid:
return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(),
self.key.get_base64())
return None |
<SYSTEM_TASK:>
Return True if the given key is associated with the given hostname
<END_TASK>
<USER_TASK:>
Description:
def check(self, hostname, key):
"""
Return True if the given key is associated with the given hostname
in this dictionary.
@param hostname: hostname (or IP) of the SSH server
@type hostname: str
@param key: the key to check
@type key: L{PKey}
@return: C{True} if the key is associated with the hostname; C{False}
if not
@rtype: bool
""" |
k = self.lookup(hostname)
if k is None:
return False
host_key = k.get(key.get_name(), None)
if host_key is None:
return False
return str(host_key) == str(key) |
<SYSTEM_TASK:>
Return a "hashed" form of the hostname, as used by openssh when storing
<END_TASK>
<USER_TASK:>
Description:
def hash_host(hostname, salt=None):
"""
Return a "hashed" form of the hostname, as used by openssh when storing
hashed hostnames in the known_hosts file.
@param hostname: the hostname to hash
@type hostname: str
@param salt: optional salt to use when hashing (must be 20 bytes long)
@type salt: str
@return: the hashed hostname
@rtype: str
""" |
if salt is None:
salt = rng.read(SHA.digest_size)
else:
if salt.startswith('|1|'):
salt = salt.split('|')[2]
salt = base64.decodestring(salt)
assert len(salt) == SHA.digest_size
hmac = HMAC.HMAC(salt, hostname, SHA).digest()
hostkey = '|1|%s|%s' % (base64.encodestring(salt), base64.encodestring(hmac))
return hostkey.replace('\n', '') |
<SYSTEM_TASK:>
Return a dict of config options for a given hostname.
<END_TASK>
<USER_TASK:>
Description:
def lookup(self, hostname):
"""
Return a dict of config options for a given hostname.
The host-matching rules of OpenSSH's C{ssh_config} man page are used,
which means that all configuration options from matching host
specifications are merged, with more specific hostmasks taking
precedence. In other words, if C{"Port"} is set under C{"Host *"}
and also C{"Host *.example.com"}, and the lookup is for
C{"ssh.example.com"}, then the port entry for C{"Host *.example.com"}
will win out.
The keys in the returned dict are all normalized to lowercase (look for
C{"port"}, not C{"Port"}. No other processing is done to the keys or
values.
@param hostname: the hostname to lookup
@type hostname: str
""" |
matches = [x for x in self._config if fnmatch.fnmatch(hostname, x['host'])]
# Move * to the end
_star = matches.pop(0)
matches.append(_star)
ret = {}
for m in matches:
for k,v in m.iteritems():
if not k in ret:
ret[k] = v
ret = self._expand_variables(ret, hostname)
del ret['host']
return ret |
<SYSTEM_TASK:>
Construct a URL from the given components.
<END_TASK>
<USER_TASK:>
Description:
def url(proto, server, port=None, uri=None):
"""Construct a URL from the given components.""" |
url_parts = [proto, '://', server]
if port:
port = int(port)
if port < 1 or port > 65535:
raise ValueError('invalid port value')
if not ((proto == 'http' and port == 80) or
(proto == 'https' and port == 443)):
url_parts.append(':')
url_parts.append(str(port))
if uri:
url_parts.append('/')
url_parts.append(requests.utils.quote(uri.strip('/')))
url_parts.append('/')
return ''.join(url_parts) |
<SYSTEM_TASK:>
Create a URL from the specified parts.
<END_TASK>
<USER_TASK:>
Description:
def make_url(self, container=None, resource=None, query_items=None):
"""Create a URL from the specified parts.""" |
pth = [self._base_url]
if container:
pth.append(container.strip('/'))
if resource:
pth.append(resource)
else:
pth.append('')
url = '/'.join(pth)
if isinstance(query_items, (list, tuple, set)):
url += RestHttp._list_query_str(query_items)
query_items = None
p = requests.PreparedRequest()
p.prepare_url(url, query_items)
return p.url |
<SYSTEM_TASK:>
Upload a file using multi-part encoding.
<END_TASK>
<USER_TASK:>
Description:
def upload_file_mp(self, container, src_file_path, dst_name=None,
content_type=None):
"""Upload a file using multi-part encoding.""" |
if not os.path.exists(src_file_path):
raise RuntimeError('file not found: ' + src_file_path)
if not dst_name:
dst_name = os.path.basename(src_file_path)
if not content_type:
content_type = "application/octet.stream"
url = self.make_url(container, None, None)
headers = self._base_headers
with open(src_file_path, 'rb') as up_file:
files = {'file': (dst_name, up_file, content_type)}
try:
rsp = requests.post(url, headers=headers, files=files,
timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
return self._handle_response(rsp) |
<SYSTEM_TASK:>
Create a new session or attach to existing.
<END_TASK>
<USER_TASK:>
Description:
def new_session(self, server=None, session_name=None, user_name=None,
existing_session=None):
"""Create a new session or attach to existing.
Normally, this function is called automatically, and gets its parameter
values from the environment. It is provided as a public function for
cases when extra control over session creation is required in an
automation script that is adapted to use ReST.
WARNING: This function is not part of the original StcPython.py and if
called directly by an automation script, then that script will not be
able to revert to using the non-ReST API until the call to this
function is removed.
Arguments:
server -- STC server (Lab Server) address. If not set get
value from STC_SERVER_ADDRESS environment variable.
session_name -- Name part of session ID. If not set get value from
STC_SESSION_NAME environment variable.
user_name -- User portion of session ID. If not set get name of
user this script is running as.
existing_session -- Behavior when session already exists. Recognized
values are 'kill' and 'join'. If not set get value
from EXISTING_SESSION environment variable. If not
set to recognized value, raise exception if session
already exists.
See also: stchttp.StcHttp(), stchttp.new_session()
Return:
The internal StcHttp object that is used for this session. This allows
the caller to perform additional interactions with the STC ReST API
beyond what the adapter provides.
""" |
if not server:
server = os.environ.get('STC_SERVER_ADDRESS')
if not server:
raise EnvironmentError('STC_SERVER_ADDRESS not set')
self._stc = stchttp.StcHttp(server)
if not session_name:
session_name = os.environ.get('STC_SESSION_NAME')
if not session_name or session_name == '__NEW_TEST_SESSION__':
session_name = None
if not user_name:
try:
# Try to get the name of the current user.
user_name = getpass.getuser()
except:
pass
if not existing_session:
# Try to get existing_session from environ if not passed in.
existing_session = os.environ.get('EXISTING_SESSION')
if existing_session:
existing_session = existing_session.lower()
if existing_session == 'kill':
# Kill any existing session and create a new one.
self._stc.new_session(user_name, session_name, True)
return self._stc
if existing_session == 'join':
# Create a new session, or join if already exists.
try:
self._stc.new_session(user_name, session_name, False)
except RuntimeError as e:
if str(e).find('already exists') >= 0:
sid = ' - '.join((session_name, user_name))
self._stc.join_session(sid)
else:
raise
return self._stc
# Create a new session, raise exception if session already exists.
self._stc.new_session(user_name, session_name, False)
return self._stc |
<SYSTEM_TASK:>
This isn't used in Production,
<END_TASK>
<USER_TASK:>
Description:
def setup(app):
"""
This isn't used in Production,
but allows this module to be used as a standalone extension.
""" |
app.add_directive('readthedocs-embed', EmbedDirective)
app.add_config_value('readthedocs_embed_project', '', 'html')
app.add_config_value('readthedocs_embed_version', '', 'html')
app.add_config_value('readthedocs_embed_doc', '', 'html')
return app |
<SYSTEM_TASK:>
Point media files at our media server.
<END_TASK>
<USER_TASK:>
Description:
def finalize_media(app):
"""Point media files at our media server.""" |
if (app.builder.name == 'readthedocssinglehtmllocalmedia' or
app.builder.format != 'html' or
not hasattr(app.builder, 'script_files')):
return # Use local media for downloadable files
# Pull project data from conf.py if it exists
context = app.builder.config.html_context
STATIC_URL = context.get('STATIC_URL', DEFAULT_STATIC_URL)
js_file = '{}javascript/readthedocs-doc-embed.js'.format(STATIC_URL)
if sphinx.version_info < (1, 8):
app.builder.script_files.append(js_file)
else:
app.add_js_file(js_file) |
<SYSTEM_TASK:>
Add Read the Docs content to Sphinx body content.
<END_TASK>
<USER_TASK:>
Description:
def update_body(app, pagename, templatename, context, doctree):
"""
Add Read the Docs content to Sphinx body content.
This is the most reliable way to inject our content into the page.
""" |
STATIC_URL = context.get('STATIC_URL', DEFAULT_STATIC_URL)
online_builders = [
'readthedocs', 'readthedocsdirhtml', 'readthedocssinglehtml'
]
if app.builder.name == 'readthedocssinglehtmllocalmedia':
if 'html_theme' in context and context['html_theme'] == 'sphinx_rtd_theme':
theme_css = '_static/css/theme.css'
else:
theme_css = '_static/css/badge_only.css'
elif app.builder.name in online_builders:
if 'html_theme' in context and context['html_theme'] == 'sphinx_rtd_theme':
theme_css = '%scss/sphinx_rtd_theme.css' % STATIC_URL
else:
theme_css = '%scss/badge_only.css' % STATIC_URL
else:
# Only insert on our HTML builds
return
inject_css = True
# Starting at v0.4.0 of the sphinx theme, the theme CSS should not be injected
# This decouples the theme CSS (which is versioned independently) from readthedocs.org
if theme_css.endswith('sphinx_rtd_theme.css'):
try:
import sphinx_rtd_theme
inject_css = LooseVersion(sphinx_rtd_theme.__version__) < LooseVersion('0.4.0')
except ImportError:
pass
if inject_css and theme_css not in app.builder.css_files:
if sphinx.version_info < (1, 8):
app.builder.css_files.insert(0, theme_css)
else:
app.add_css_file(theme_css)
# This is monkey patched on the signal because we can't know what the user
# has done with their `app.builder.templates` before now.
if not hasattr(app.builder.templates.render, '_patched'):
# Janky monkey patch of template rendering to add our content
old_render = app.builder.templates.render
def rtd_render(self, template, render_context):
"""
A decorator that renders the content with the users template renderer,
then adds the Read the Docs HTML content at the end of body.
"""
# Render Read the Docs content
template_context = render_context.copy()
template_context['rtd_css_url'] = '{}css/readthedocs-doc-embed.css'.format(STATIC_URL)
template_context['rtd_analytics_url'] = '{}javascript/readthedocs-analytics.js'.format(
STATIC_URL,
)
source = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'_templates',
'readthedocs-insert.html.tmpl'
)
templ = open(source).read()
rtd_content = app.builder.templates.render_string(templ, template_context)
# Handle original render function
content = old_render(template, render_context)
end_body = content.lower().find('</head>')
# Insert our content at the end of the body.
if end_body != -1:
content = content[:end_body] + rtd_content + "\n" + content[end_body:]
else:
log.debug("File doesn't look like HTML. Skipping RTD content addition")
return content
rtd_render._patched = True
app.builder.templates.render = types.MethodType(rtd_render,
app.builder.templates) |
<SYSTEM_TASK:>
Generate JSON artifacts for each page.
<END_TASK>
<USER_TASK:>
Description:
def generate_json_artifacts(app, pagename, templatename, context, doctree):
"""
Generate JSON artifacts for each page.
This way we can skip generating this in other build step.
""" |
try:
# We need to get the output directory where the docs are built
# _build/json.
build_json = os.path.abspath(
os.path.join(app.outdir, '..', 'json')
)
outjson = os.path.join(build_json, pagename + '.fjson')
outdir = os.path.dirname(outjson)
if not os.path.exists(outdir):
os.makedirs(outdir)
with open(outjson, 'w+') as json_file:
to_context = {
key: context.get(key, '')
for key in KEYS
}
json.dump(to_context, json_file, indent=4)
except TypeError:
log.exception(
'Fail to encode JSON for page {page}'.format(page=outjson)
)
except IOError:
log.exception(
'Fail to save JSON output for page {page}'.format(page=outjson)
)
except Exception as e:
log.exception(
'Failure in JSON search dump for page {page}'.format(page=outjson)
) |
<SYSTEM_TASK:>
Copy and patch searchtools
<END_TASK>
<USER_TASK:>
Description:
def _copy_searchtools(self, renderer=None):
"""Copy and patch searchtools
This uses the included Sphinx version's searchtools, but patches it to
remove automatic initialization. This is a fork of
``sphinx.util.fileutil.copy_asset``
""" |
log.info(bold('copying searchtools... '), nonl=True)
if sphinx.version_info < (1, 8):
search_js_file = 'searchtools.js_t'
else:
search_js_file = 'searchtools.js'
path_src = os.path.join(
package_dir, 'themes', 'basic', 'static', search_js_file
)
if os.path.exists(path_src):
path_dest = os.path.join(self.outdir, '_static', 'searchtools.js')
if renderer is None:
# Sphinx 1.4 used the renderer from the existing builder, but
# the pattern for Sphinx 1.5 is to pass in a renderer separate
# from the builder. This supports both patterns for future
# compatibility
if sphinx.version_info < (1, 5):
renderer = self.templates
else:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
with codecs.open(path_src, 'r', encoding='utf-8') as h_src:
with codecs.open(path_dest, 'w', encoding='utf-8') as h_dest:
data = h_src.read()
data = self.REPLACEMENT_PATTERN.sub(self.REPLACEMENT_TEXT, data)
h_dest.write(renderer.render_string(
data,
self.get_static_readthedocs_context()
))
else:
log.warning('Missing {}'.format(search_js_file))
log.info('done') |
<SYSTEM_TASK:>
Return dictionary of STC and API information.
<END_TASK>
<USER_TASK:>
Description:
def stc_system_info(stc_addr):
"""Return dictionary of STC and API information.
If a session already exists, then use it to get STC information and avoid
taking the time to start a new session. A session is necessary to get
STC information.
""" |
stc = stchttp.StcHttp(stc_addr)
sessions = stc.sessions()
if sessions:
# If a session already exists, use it to get STC information.
stc.join_session(sessions[0])
sys_info = stc.system_info()
else:
# Create a new session to get STC information.
stc.new_session('anonymous')
try:
sys_info = stc.system_info()
finally:
# Make sure the temporary session in terminated.
stc.end_session()
return sys_info |
<SYSTEM_TASK:>
Create a new test session.
<END_TASK>
<USER_TASK:>
Description:
def new_session(self, user_name=None, session_name=None,
kill_existing=False, analytics=None):
"""Create a new test session.
The test session is identified by the specified user_name and optional
session_name parameters. If a session name is not specified, then the
server will create one.
Arguments:
user_name -- User name part of session ID.
session_name -- Session name part of session ID.
kill_existing -- If there is an existing session, with the same session
name and user name, then terminate it before creating
a new session
analytics -- Optional boolean value to disable or enable analytics
for new session. None will use setting configured on
server.
Return:
True is session started, False if session was already started.
""" |
if self.started():
return False
if not session_name or not session_name.strip():
session_name = ''
if not user_name or not user_name.strip():
user_name = ''
params = {'userid': user_name, 'sessionname': session_name}
if analytics not in (None, ''):
params['analytics'] = str(analytics).lower()
try:
status, data = self._rest.post_request('sessions', None, params)
except resthttp.RestHttpError as e:
if kill_existing and str(e).find('already exists') >= 0:
self.end_session('kill', ' - '.join((session_name, user_name)))
else:
raise RuntimeError('failed to create session: ' + str(e))
# Starting session
if self._dbg_print:
print('===> starting session')
status, data = self._rest.post_request('sessions', None, params)
if self._dbg_print:
print('===> OK, started')
sid = data['session_id']
if self._dbg_print:
print('===> session ID:', sid)
print('===> URL:', self._rest.make_url('sessions', sid))
self._rest.add_header('X-STC-API-Session', sid)
self._sid = sid
return sid |
<SYSTEM_TASK:>
End this test session.
<END_TASK>
<USER_TASK:>
Description:
def end_session(self, end_tcsession=True, sid=None):
"""End this test session.
A session can be ended in three ways, depending on the value of the
end_tcsession parameter:
- end_tcsession=None:
Stop using session locally, do not contact server.
- end_tcsession=False:
End client controller, but leave test session on server.
- end_tcsession=True:
End client controller and terminate test session (default).
- end_tcsession='kill':
Forcefully terminate test session.
Specifying end_tcsession=False is useful to do before attaching an STC
GUI or legacy automation script, so that there are not multiple
controllers to interfere with each other.
When the session is ended, it is no longer available. Clients should
export any result or log files, that they want to preserve, before the
session is ended.
Arguments
end_tcsession -- How to end the session (see above)
sid -- ID of session to end. None to use current session.
Return:
True if session ended, false if session was not started.
""" |
if not sid or sid == self._sid:
if not self.started():
return False
sid = self._sid
self._sid = None
self._rest.del_header('X-STC-API-Session')
if end_tcsession is None:
if self._dbg_print:
print('===> detached from session')
return True
try:
if end_tcsession:
if self._dbg_print:
print('===> deleting session:', sid)
if end_tcsession == 'kill':
status, data = self._rest.delete_request(
'sessions', sid, 'kill')
else:
status, data = self._rest.delete_request('sessions', sid)
count = 0
while 1:
time.sleep(5)
if self._dbg_print:
print('===> checking if session ended')
ses_list = self.sessions()
if not ses_list or sid not in ses_list:
break
count += 1
if count == 3:
raise RuntimeError("test session has not stopped")
if self._dbg_print:
print('===> ok - deleted test session')
else:
# Ending client session is supported on version >= 2.1.5
if self._get_api_version() < (2, 1, 5):
raise RuntimeError('option no available on server')
status, data = self._rest.delete_request(
'sessions', sid, 'false')
if self._dbg_print:
print('===> OK - detached REST API from test session')
except resthttp.RestHttpError as e:
raise RuntimeError('failed to end session: ' + str(e))
return True |
<SYSTEM_TASK:>
Get information on session.
<END_TASK>
<USER_TASK:>
Description:
def session_info(self, session_id=None):
"""Get information on session.
If session_id is None, the default, then return information about this
session. If a session ID is given, then get information about that
session.
Arguments:
session_id -- Id of session to get info for, if not this session.
Return:
Dictionary of session information.
""" |
if not session_id:
if not self.started():
return []
session_id = self._sid
status, data = self._rest.get_request('sessions', session_id)
return data |
<SYSTEM_TASK:>
Get list of files, for this session, on server.
<END_TASK>
<USER_TASK:>
Description:
def files(self):
"""Get list of files, for this session, on server.""" |
self._check_session()
status, data = self._rest.get_request('files')
return data |
<SYSTEM_TASK:>
Get the BLL version this session is connected to.
<END_TASK>
<USER_TASK:>
Description:
def bll_version(self):
"""Get the BLL version this session is connected to.
Return:
Version string if session started. None if session not started.
""" |
if not self.started():
return None
status, data = self._rest.get_request('objects', 'system1',
['version', 'name'])
return data['version'] |
<SYSTEM_TASK:>
Delete the specified object.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, handle):
"""Delete the specified object.
Arguments:
handle -- Handle of object to delete.
""" |
self._check_session()
self._rest.delete_request('objects', str(handle)) |
<SYSTEM_TASK:>
Sets or modifies one or more object attributes or relations.
<END_TASK>
<USER_TASK:>
Description:
def config(self, handle, attributes=None, **kwattrs):
"""Sets or modifies one or more object attributes or relations.
Arguments can be supplied either as a dictionary or as keyword
arguments. Examples:
stc.config('port1', location='//10.1.2.3/1/1')
stc.config('port2', {'location': '//10.1.2.3/1/2'})
Arguments:
handle -- Handle of object to modify.
attributes -- Dictionary of attributes (name-value pairs).
kwattrs -- Optional keyword attributes (name=value pairs).
""" |
self._check_session()
if kwattrs:
if attributes:
attributes.update(kwattrs)
else:
attributes = kwattrs
self._rest.put_request('objects', str(handle), attributes) |
<SYSTEM_TASK:>
Get list of chassis known to test session.
<END_TASK>
<USER_TASK:>
Description:
def chassis(self):
"""Get list of chassis known to test session.""" |
self._check_session()
status, data = self._rest.get_request('chassis')
return data |
<SYSTEM_TASK:>
Get information about the specified chassis.
<END_TASK>
<USER_TASK:>
Description:
def chassis_info(self, chassis):
"""Get information about the specified chassis.""" |
if not chassis or not isinstance(chassis, str):
raise RuntimeError('missing chassis address')
self._check_session()
status, data = self._rest.get_request('chassis', chassis)
return data |
<SYSTEM_TASK:>
Get list of connections.
<END_TASK>
<USER_TASK:>
Description:
def connections(self):
"""Get list of connections.""" |
self._check_session()
status, data = self._rest.get_request('connections')
return data |
<SYSTEM_TASK:>
Get Boolean connected status of the specified chassis.
<END_TASK>
<USER_TASK:>
Description:
def is_connected(self, chassis):
"""Get Boolean connected status of the specified chassis.""" |
self._check_session()
try:
status, data = self._rest.get_request('connections', chassis)
except resthttp.RestHttpError as e:
if int(e) == 404:
# 404 NOT FOUND means the chassis in unknown, so return false.
return False
return bool(data and data.get('IsConnected')) |
<SYSTEM_TASK:>
Establish connection to one or more chassis.
<END_TASK>
<USER_TASK:>
Description:
def connect(self, chassis_list):
"""Establish connection to one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names)
Return:
List of chassis addresses.
""" |
self._check_session()
if not isinstance(chassis_list, (list, tuple, set, dict, frozenset)):
chassis_list = (chassis_list,)
if len(chassis_list) == 1:
status, data = self._rest.put_request(
'connections', chassis_list[0])
data = [data]
else:
params = {chassis: True for chassis in chassis_list}
params['action'] = 'connect'
status, data = self._rest.post_request('connections', None, params)
return data |
<SYSTEM_TASK:>
Remove connection with one or more chassis.
<END_TASK>
<USER_TASK:>
Description:
def disconnect(self, chassis_list):
"""Remove connection with one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names)
""" |
self._check_session()
if not isinstance(chassis_list, (list, tuple, set, dict, frozenset)):
chassis_list = (chassis_list,)
if len(chassis_list) == 1:
self._rest.delete_request('connections', chassis_list[0])
else:
params = {chassis: True for chassis in chassis_list}
params['action'] = 'disconnect'
self._rest.post_request('connections', None, params) |
<SYSTEM_TASK:>
Get help information about Automation API.
<END_TASK>
<USER_TASK:>
Description:
def help(self, subject=None, args=None):
"""Get help information about Automation API.
The following values can be specified for the subject:
None -- gets an overview of help.
'commands' -- gets a list of API functions
command name -- get info about the specified command.
object type -- get info about the specified object type
handle value -- get info about the object type referred to
Arguments:
subject -- Optional. Subject to get help on.
args -- Optional. Additional arguments for searching help. These
are used when the subject is 'list'.
Return:
String of help information.
""" |
if subject:
if subject not in (
'commands', 'create', 'config', 'get', 'delete', 'perform',
'connect', 'connectall', 'disconnect', 'disconnectall',
'apply', 'log', 'help'):
self._check_session()
status, data = self._rest.get_request('help', subject, args)
else:
status, data = self._rest.get_request('help')
if isinstance(data, (list, tuple, set)):
return ' '.join((str(i) for i in data))
return data['message'] |
<SYSTEM_TASK:>
Write a diagnostic message to a log file or to standard output.
<END_TASK>
<USER_TASK:>
Description:
def log(self, level, msg):
"""Write a diagnostic message to a log file or to standard output.
Arguments:
level -- Severity level of entry. One of: INFO, WARN, ERROR, FATAL.
msg -- Message to write to log.
""" |
self._check_session()
level = level.upper()
allowed_levels = ('INFO', 'WARN', 'ERROR', 'FATAL')
if level not in allowed_levels:
raise ValueError('level must be one of: ' +
', '.join(allowed_levels))
self._rest.post_request(
'log', None, {'log_level': level.upper(), 'message': msg}) |
<SYSTEM_TASK:>
Download the specified file from the server.
<END_TASK>
<USER_TASK:>
Description:
def download(self, file_name, save_as=None):
"""Download the specified file from the server.
Arguments:
file_name -- Name of file resource to save.
save_as -- Optional path name to write file to. If not specified,
then file named by the last part of the resource path is
downloaded to current directory.
Return: (save_path, bytes)
save_path -- Path where downloaded file was saved.
bytes -- Bytes downloaded.
""" |
self._check_session()
try:
if save_as:
save_as = os.path.normpath(save_as)
save_dir = os.path.dirname(save_as)
if save_dir:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
elif not os.path.isdir(save_dir):
raise RuntimeError(save_dir + " is not a directory")
status, save_path, bytes = self._rest.download_file(
'files', file_name, save_as, 'application/octet-stream')
except resthttp.RestHttpError as e:
raise RuntimeError('failed to download "%s": %s' % (file_name, e))
return save_path, bytes |
<SYSTEM_TASK:>
Download all available files.
<END_TASK>
<USER_TASK:>
Description:
def download_all(self, dst_dir=None):
"""Download all available files.
Arguments:
dst_dir -- Optional destination directory to write files to. If not
specified, then files are downloaded current directory.
Return:
Dictionary of {file_name: file_size, ..}
""" |
saved = {}
save_as = None
for f in self.files():
if dst_dir:
save_as = os.path.join(dst_dir, f.split('/')[-1])
name, bytes = self.download(f, save_as)
saved[name] = bytes
return saved |
<SYSTEM_TASK:>
Upload the specified file to the server.
<END_TASK>
<USER_TASK:>
Description:
def upload(self, src_file_path, dst_file_name=None):
"""Upload the specified file to the server.""" |
self._check_session()
status, data = self._rest.upload_file(
'files', src_file_path, dst_file_name)
return data |
<SYSTEM_TASK:>
Wait until sequencer is finished.
<END_TASK>
<USER_TASK:>
Description:
def wait_until_complete(self, timeout=None):
"""Wait until sequencer is finished.
This method blocks your application until the sequencer has completed
its operation. It returns once the sequencer has finished.
Arguments:
timeout -- Optional. Seconds to wait for sequencer to finish. If this
time is exceeded, then an exception is raised.
Return:
Sequencer testState value.
""" |
timeout_at = None
if timeout:
timeout_at = time.time() + int(timeout)
sequencer = self.get('system1', 'children-sequencer')
while True:
cur_test_state = self.get(sequencer, 'state')
if 'PAUSE' in cur_test_state or 'IDLE' in cur_test_state:
break
time.sleep(2)
if timeout_at and time.time() >= timeout_at:
raise RuntimeError('wait_until_complete timed out after %s sec'
% timeout)
return self.get(sequencer, 'testState') |
<SYSTEM_TASK:>
Temporarily attaches a receiver to the provided ``signal`` within the scope
<END_TASK>
<USER_TASK:>
Description:
def mock_signal_receiver(signal, wraps=None, **kwargs):
"""
Temporarily attaches a receiver to the provided ``signal`` within the scope
of the context manager.
The mocked receiver is returned as the ``as`` target of the ``with``
statement.
To have the mocked receiver wrap a callable, pass the callable as the
``wraps`` keyword argument. All other keyword arguments provided are passed
through to the signal's ``connect`` method.
>>> with mock_signal_receiver(post_save, sender=Model) as receiver:
>>> Model.objects.create()
>>> assert receiver.call_count = 1
""" |
if wraps is None:
def wraps(*args, **kwrags):
return None
receiver = mock.Mock(wraps=wraps)
signal.connect(receiver, **kwargs)
yield receiver
signal.disconnect(receiver) |
<SYSTEM_TASK:>
Get a SharedMock that returns self for most attributes and a new copy of
<END_TASK>
<USER_TASK:>
Description:
def QuerySetMock(model, *return_value):
"""
Get a SharedMock that returns self for most attributes and a new copy of
itself for any method that ordinarily generates QuerySets.
Set the results to two items:
>>> class Post(object): pass
>>> objects = QuerySetMock(Post, 'return', 'values')
>>> assert list(objects.filter()) == list(objects.all())
Force an exception:
>>> objects = QuerySetMock(Post, Exception())
Chain calls:
>>> objects.all().filter(filter_arg='dummy')
""" |
def make_get(self, model):
def _get(*a, **k):
results = list(self)
if len(results) > 1:
raise model.MultipleObjectsReturned
try:
return results[0]
except IndexError:
raise model.DoesNotExist
return _get
def make_qs_returning_method(self):
def _qs_returning_method(*a, **k):
return copy.deepcopy(self)
return _qs_returning_method
def make_getitem(self):
def _getitem(k):
if isinstance(k, slice):
self.__start = k.start
self.__stop = k.stop
else:
return list(self)[k]
return self
return _getitem
def make_iterator(self):
def _iterator(*a, **k):
if len(return_value) == 1 and isinstance(return_value[0], Exception):
raise return_value[0]
start = getattr(self, '__start', None)
stop = getattr(self, '__stop', None)
for x in return_value[start:stop]:
yield x
return _iterator
actual_model = model
if actual_model:
model = mock.MagicMock(spec=actual_model())
else:
model = mock.MagicMock()
m = SharedMock(reserved=['count', 'exists'] + QUERYSET_RETURNING_METHODS)
m.__start = None
m.__stop = None
m.__iter__.side_effect = lambda: iter(m.iterator())
m.__getitem__.side_effect = make_getitem(m)
if hasattr(m, "__nonzero__"):
# Python 2
m.__nonzero__.side_effect = lambda: bool(return_value)
m.exists.side_effect = m.__nonzero__
else:
# Python 3
m.__bool__.side_effect = lambda: bool(return_value)
m.exists.side_effect = m.__bool__
m.__len__.side_effect = lambda: len(return_value)
m.count.side_effect = m.__len__
m.model = model
m.get = make_get(m, actual_model)
for method_name in QUERYSET_RETURNING_METHODS:
setattr(m, method_name, make_qs_returning_method(m))
# Note since this is a SharedMock, *all* auto-generated child
# attributes will have the same side_effect ... might not make
# sense for some like count().
m.iterator.side_effect = make_iterator(m)
return m |
<SYSTEM_TASK:>
Generate a token string from bytes arrays. The token in the session is user
<END_TASK>
<USER_TASK:>
Description:
def csrf_token():
"""
Generate a token string from bytes arrays. The token in the session is user
specific.
""" |
if "_csrf_token" not in session:
session["_csrf_token"] = os.urandom(128)
return hmac.new(app.secret_key, session["_csrf_token"],
digestmod=sha1).hexdigest() |
<SYSTEM_TASK:>
Checks that token is correct, aborting if not
<END_TASK>
<USER_TASK:>
Description:
def check_csrf_token():
"""Checks that token is correct, aborting if not""" |
if request.method in ("GET",): # not exhaustive list
return
token = request.form.get("csrf_token")
if token is None:
app.logger.warning("Expected CSRF Token: not present")
abort(400)
if not safe_str_cmp(token, csrf_token()):
app.logger.warning("CSRF Token incorrect")
abort(400) |
<SYSTEM_TASK:>
Get the HTTPRequest object from thread storage or from a callee by searching
<END_TASK>
<USER_TASK:>
Description:
def get_request(cls):
"""
Get the HTTPRequest object from thread storage or from a callee by searching
each frame in the call stack.
""" |
request = cls.get_global('request')
if request:
return request
try:
stack = inspect.stack()
except IndexError:
# in some cases this may return an index error
# (pyc files dont match py files for example)
return
for frame, _, _, _, _, _ in stack:
if 'request' in frame.f_locals:
if isinstance(frame.f_locals['request'], HttpRequest):
request = frame.f_locals['request']
cls.set_global('request', request)
return request |
<SYSTEM_TASK:>
Takes a Sijax response object and returns a
<END_TASK>
<USER_TASK:>
Description:
def _make_response(sijax_response):
"""Takes a Sijax response object and returns a
valid Flask response object.""" |
from types import GeneratorType
if isinstance(sijax_response, GeneratorType):
# Streaming response using a generator (non-JSON response).
# Upon returning a response, Flask would automatically destroy
# the request data and uploaded files - done by `flask.ctx.RequestContext.auto_pop()`
# We can't allow that, since the user-provided callback we're executing
# from within the generator may want to access request data/files.
# That's why we'll tell Flask to preserve the context and we'll clean up ourselves.
request.environ['flask._preserve_context'] = True
# Clean-up code taken from `flask.testing.TestingClient`
def clean_up_context():
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
# As per the WSGI specification, `close()` would be called on iterator responses.
# Let's wrap the iterator in another one, which will forward that `close()` call to our clean-up callback.
response = Response(ClosingIterator(sijax_response, clean_up_context), direct_passthrough=True)
else:
# Non-streaming response - a single JSON string
response = Response(sijax_response)
return response |
<SYSTEM_TASK:>
Executes a callback and returns the proper response.
<END_TASK>
<USER_TASK:>
Description:
def execute_callback(self, *args, **kwargs):
"""Executes a callback and returns the proper response.
Refer to :meth:`sijax.Sijax.execute_callback` for more details.
""" |
response = self._sijax.execute_callback(*args, **kwargs)
return _make_response(response) |
<SYSTEM_TASK:>
Displays the row of buttons for delete and save.
<END_TASK>
<USER_TASK:>
Description:
def submit_row(context):
"""
Displays the row of buttons for delete and save.
""" |
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
} |
<SYSTEM_TASK:>
Auto-discover INSTALLED_APPS nexus.py modules and fail silently when
<END_TASK>
<USER_TASK:>
Description:
def autodiscover(site=None):
"""
Auto-discover INSTALLED_APPS nexus.py modules and fail silently when
not present. This forces an import on them to register any api bits they
may want.
Specifying ``site`` will register all auto discovered modules with the new site.
""" |
# Bail out if autodiscover didn't finish loading from a previous call so
# that we avoid running autodiscover again when the URLconf is loaded by
# the exception handler to resolve the handler500 view. This prevents an
# admin.py module with errors from re-registering models and raising a
# spurious AlreadyRegistered exception (see #8245).
global LOADING
if LOADING:
return
LOADING = True
if site:
orig_site = globals()['site']
globals()['site'] = locals()['site']
import imp
from django.utils.importlib import import_module
from django.conf import settings
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an api.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for admin.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own admin registration.
try:
app_path = import_module(app).__path__
except (AttributeError, ImportError):
continue
# Step 2: use imp.find_module to find the app's admin.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its admin.py doesn't exist
try:
imp.find_module('nexus_modules', app_path)
except ImportError:
continue
# Step 3: import the app's admin file. If this has errors we want them
# to bubble up.
import_module("%s.nexus_modules" % app)
# # load builtins
# from gargoyle.builtins import *
if site:
globals()['site'] = orig_site
# autodiscover was successful, reset loading flag.
LOADING = False |
<SYSTEM_TASK:>
Create a path for a given file, in such a way
<END_TASK>
<USER_TASK:>
Description:
def _get_run_breadcrumbs(cls, source_type, data_object, task_attempt):
"""Create a path for a given file, in such a way
that files end up being organized and browsable by run
""" |
# We cannot generate the path unless connect to a TaskAttempt
# and a run
if not task_attempt:
return []
# If multiple tasks exist, use the original.
task = task_attempt.tasks.earliest('datetime_created')
if task is None:
return []
run = task.run
if run is None:
return []
breadcrumbs = [
run.name,
"task-%s" % str(task.uuid)[0:8],
"attempt-%s" % str(task_attempt.uuid)[0:8],
]
# Include any ancestors if run is nested
while run.parent is not None:
run = run.parent
breadcrumbs = [run.name] + breadcrumbs
# Prepend first breadcrumb with datetime and id
breadcrumbs[0] = "%s-%s-%s" % (
run.datetime_created.strftime('%Y-%m-%dT%H.%M.%SZ'),
str(run.uuid)[0:8],
breadcrumbs[0])
breadcrumbs = ['runs'] + breadcrumbs
return breadcrumbs |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.