text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
List all apps.
<END_TASK>
<USER_TASK:>
Description:
def list_apps(self, cmd=None, embed_tasks=False, embed_counts=False,
embed_deployments=False, embed_readiness=False,
embed_last_task_failure=False, embed_failures=False,
embed_task_stats=False, app_id=None, label=None, **kwargs):
"""List all apps.
:param str cmd: if passed, only show apps with a matching `cmd`
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:param str app_id: if passed, only show apps with an 'id' that matches or contains this value
:param str label: if passed, only show apps with the selected labels
:param kwargs: arbitrary search filters
:returns: list of applications
:rtype: list[:class:`marathon.models.app.MarathonApp`]
""" |
params = {}
if cmd:
params['cmd'] = cmd
if app_id:
params['id'] = app_id
if label:
params['label'] = label
embed_params = {
'app.tasks': embed_tasks,
'app.counts': embed_counts,
'app.deployments': embed_deployments,
'app.readiness': embed_readiness,
'app.lastTaskFailure': embed_last_task_failure,
'app.failures': embed_failures,
'app.taskStats': embed_task_stats
}
filtered_embed_params = [k for (k, v) in embed_params.items() if v]
if filtered_embed_params:
params['embed'] = filtered_embed_params
response = self._do_request('GET', '/v2/apps', params=params)
apps = self._parse_response(
response, MarathonApp, is_list=True, resource_name='apps')
for k, v in kwargs.items():
apps = [o for o in apps if getattr(o, k) == v]
return apps |
<SYSTEM_TASK:>
Get a single app.
<END_TASK>
<USER_TASK:>
Description:
def get_app(self, app_id, embed_tasks=False, embed_counts=False,
embed_deployments=False, embed_readiness=False,
embed_last_task_failure=False, embed_failures=False,
embed_task_stats=False):
"""Get a single app.
:param str app_id: application ID
:param bool embed_tasks: embed tasks in result
:param bool embed_counts: embed all task counts
:param bool embed_deployments: embed all deployment identifier
:param bool embed_readiness: embed all readiness check results
:param bool embed_last_task_failure: embeds the last task failure
:param bool embed_failures: shorthand for embed_last_task_failure
:param bool embed_task_stats: embed task stats in result
:returns: application
:rtype: :class:`marathon.models.app.MarathonApp`
""" |
params = {}
embed_params = {
'app.tasks': embed_tasks,
'app.counts': embed_counts,
'app.deployments': embed_deployments,
'app.readiness': embed_readiness,
'app.lastTaskFailure': embed_last_task_failure,
'app.failures': embed_failures,
'app.taskStats': embed_task_stats
}
filtered_embed_params = [k for (k, v) in embed_params.items() if v]
if filtered_embed_params:
params['embed'] = filtered_embed_params
response = self._do_request(
'GET', '/v2/apps/{app_id}'.format(app_id=app_id), params=params)
return self._parse_response(response, MarathonApp, resource_name='app') |
<SYSTEM_TASK:>
Update an app.
<END_TASK>
<USER_TASK:>
Description:
def update_app(self, app_id, app, force=False, minimal=True):
"""Update an app.
Applies writable settings in `app` to `app_id`
Note: this method can not be used to rename apps.
:param str app_id: target application ID
:param app: application settings
:type app: :class:`marathon.models.app.MarathonApp`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
""" |
# Changes won't take if version is set - blank it for convenience
app.version = None
params = {'force': force}
data = app.to_json(minimal=minimal)
response = self._do_request(
'PUT', '/v2/apps/{app_id}'.format(app_id=app_id), params=params, data=data)
return response.json() |
<SYSTEM_TASK:>
Update multiple apps.
<END_TASK>
<USER_TASK:>
Description:
def update_apps(self, apps, force=False, minimal=True):
"""Update multiple apps.
Applies writable settings in elements of apps either by upgrading existing ones or creating new ones
:param apps: sequence of application settings
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
""" |
json_repr_apps = []
for app in apps:
# Changes won't take if version is set - blank it for convenience
app.version = None
json_repr_apps.append(app.json_repr(minimal=minimal))
params = {'force': force}
encoder = MarathonMinimalJsonEncoder if minimal else MarathonJsonEncoder
data = json.dumps(json_repr_apps, cls=encoder, sort_keys=True)
response = self._do_request(
'PUT', '/v2/apps', params=params, data=data)
return response.json() |
<SYSTEM_TASK:>
Roll an app back to a previous version.
<END_TASK>
<USER_TASK:>
Description:
def rollback_app(self, app_id, version, force=False):
"""Roll an app back to a previous version.
:param str app_id: application ID
:param str version: application version
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
""" |
params = {'force': force}
data = json.dumps({'version': version})
response = self._do_request(
'PUT', '/v2/apps/{app_id}'.format(app_id=app_id), params=params, data=data)
return response.json() |
<SYSTEM_TASK:>
Stop and destroy an app.
<END_TASK>
<USER_TASK:>
Description:
def delete_app(self, app_id, force=False):
"""Stop and destroy an app.
:param str app_id: application ID
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
""" |
params = {'force': force}
response = self._do_request(
'DELETE', '/v2/apps/{app_id}'.format(app_id=app_id), params=params)
return response.json() |
<SYSTEM_TASK:>
Scale an app.
<END_TASK>
<USER_TASK:>
Description:
def scale_app(self, app_id, instances=None, delta=None, force=False):
"""Scale an app.
Scale an app to a target number of instances (with `instances`), or scale the number of
instances up or down by some delta (`delta`). If the resulting number of instances would be negative,
desired instances will be set to zero.
If both `instances` and `delta` are passed, use `instances`.
:param str app_id: application ID
:param int instances: [optional] the number of instances to scale to
:param int delta: [optional] the number of instances to scale up or down by
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
""" |
if instances is None and delta is None:
marathon.log.error('instances or delta must be passed')
return
try:
app = self.get_app(app_id)
except NotFoundError:
marathon.log.error('App "{app}" not found'.format(app=app_id))
return
desired = instances if instances is not None else (
app.instances + delta)
return self.update_app(app.id, MarathonApp(instances=desired), force=force) |
<SYSTEM_TASK:>
Create and start a group.
<END_TASK>
<USER_TASK:>
Description:
def create_group(self, group):
"""Create and start a group.
:param :class:`marathon.models.group.MarathonGroup` group: the group to create
:returns: success
:rtype: dict containing the version ID
""" |
data = group.to_json()
response = self._do_request('POST', '/v2/groups', data=data)
return response.json() |
<SYSTEM_TASK:>
Update a group.
<END_TASK>
<USER_TASK:>
Description:
def update_group(self, group_id, group, force=False, minimal=True):
"""Update a group.
Applies writable settings in `group` to `group_id`
Note: this method can not be used to rename groups.
:param str group_id: target group ID
:param group: group settings
:type group: :class:`marathon.models.group.MarathonGroup`
:param bool force: apply even if a deployment is in progress
:param bool minimal: ignore nulls and empty collections
:returns: a dict containing the deployment id and version
:rtype: dict
""" |
# Changes won't take if version is set - blank it for convenience
group.version = None
params = {'force': force}
data = group.to_json(minimal=minimal)
response = self._do_request(
'PUT', '/v2/groups/{group_id}'.format(group_id=group_id), data=data, params=params)
return response.json() |
<SYSTEM_TASK:>
Roll a group back to a previous version.
<END_TASK>
<USER_TASK:>
Description:
def rollback_group(self, group_id, version, force=False):
"""Roll a group back to a previous version.
:param str group_id: group ID
:param str version: group version
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deployment id and version
:rtype: dict
""" |
params = {'force': force}
response = self._do_request(
'PUT',
'/v2/groups/{group_id}/versions/{version}'.format(
group_id=group_id, version=version),
params=params)
return response.json() |
<SYSTEM_TASK:>
Stop and destroy a group.
<END_TASK>
<USER_TASK:>
Description:
def delete_group(self, group_id, force=False):
"""Stop and destroy a group.
:param str group_id: group ID
:param bool force: apply even if a deployment is in progress
:returns: a dict containing the deleted version
:rtype: dict
""" |
params = {'force': force}
response = self._do_request(
'DELETE', '/v2/groups/{group_id}'.format(group_id=group_id), params=params)
return response.json() |
<SYSTEM_TASK:>
Scale a group by a factor.
<END_TASK>
<USER_TASK:>
Description:
def scale_group(self, group_id, scale_by):
"""Scale a group by a factor.
:param str group_id: group ID
:param int scale_by: factor to scale by
:returns: a dict containing the deployment id and version
:rtype: dict
""" |
data = {'scaleBy': scale_by}
response = self._do_request(
'PUT', '/v2/groups/{group_id}'.format(group_id=group_id), data=json.dumps(data))
return response.json() |
<SYSTEM_TASK:>
List running tasks, optionally filtered by app_id.
<END_TASK>
<USER_TASK:>
Description:
def list_tasks(self, app_id=None, **kwargs):
"""List running tasks, optionally filtered by app_id.
:param str app_id: if passed, only show tasks for this application
:param kwargs: arbitrary search filters
:returns: list of tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`]
""" |
response = self._do_request(
'GET', '/v2/apps/%s/tasks' % app_id if app_id else '/v2/tasks')
tasks = self._parse_response(
response, MarathonTask, is_list=True, resource_name='tasks')
[setattr(t, 'app_id', app_id)
for t in tasks if app_id and t.app_id is None]
for k, v in kwargs.items():
tasks = [o for o in tasks if getattr(o, k) == v]
return tasks |
<SYSTEM_TASK:>
Kill a list of given tasks.
<END_TASK>
<USER_TASK:>
Description:
def kill_given_tasks(self, task_ids, scale=False, force=None):
"""Kill a list of given tasks.
:param list[str] task_ids: tasks to kill
:param bool scale: if true, scale down the app by the number of tasks killed
:param bool force: if true, ignore any current running deployments
:return: True on success
:rtype: bool
""" |
params = {'scale': scale}
if force is not None:
params['force'] = force
data = json.dumps({"ids": task_ids})
response = self._do_request(
'POST', '/v2/tasks/delete', params=params, data=data)
return response == 200 |
<SYSTEM_TASK:>
Kill all tasks belonging to app.
<END_TASK>
<USER_TASK:>
Description:
def kill_tasks(self, app_id, scale=False, wipe=False,
host=None, batch_size=0, batch_delay=0):
"""Kill all tasks belonging to app.
:param str app_id: application ID
:param bool scale: if true, scale down the app by the number of tasks killed
:param str host: if provided, only terminate tasks on this Mesos slave
:param int batch_size: if non-zero, terminate tasks in groups of this size
:param int batch_delay: time (in seconds) to wait in between batched kills. If zero, automatically determine
:returns: list of killed tasks
:rtype: list[:class:`marathon.models.task.MarathonTask`]
""" |
def batch(iterable, size):
sourceiter = iter(iterable)
while True:
batchiter = itertools.islice(sourceiter, size)
yield itertools.chain([next(batchiter)], batchiter)
if batch_size == 0:
# Terminate all at once
params = {'scale': scale, 'wipe': wipe}
if host:
params['host'] = host
response = self._do_request(
'DELETE', '/v2/apps/{app_id}/tasks'.format(app_id=app_id), params)
# Marathon is inconsistent about what type of object it returns on the multi
# task deletion endpoint, depending on the version of Marathon. See:
# https://github.com/mesosphere/marathon/blob/06a6f763a75fb6d652b4f1660685ae234bd15387/src/main/scala/mesosphere/marathon/api/v2/AppTasksResource.scala#L88-L95
if "tasks" in response.json():
return self._parse_response(response, MarathonTask, is_list=True, resource_name='tasks')
else:
return response.json()
else:
# Terminate in batches
tasks = self.list_tasks(
app_id, host=host) if host else self.list_tasks(app_id)
for tbatch in batch(tasks, batch_size):
killed_tasks = [self.kill_task(app_id, t.id, scale=scale, wipe=wipe)
for t in tbatch]
# Pause until the tasks have been killed to avoid race
# conditions
killed_task_ids = set(t.id for t in killed_tasks)
running_task_ids = killed_task_ids
while killed_task_ids.intersection(running_task_ids):
time.sleep(1)
running_task_ids = set(
t.id for t in self.get_app(app_id).tasks)
if batch_delay == 0:
# Pause until the replacement tasks are healthy
desired_instances = self.get_app(app_id).instances
running_instances = 0
while running_instances < desired_instances:
time.sleep(1)
running_instances = sum(
t.started_at is None for t in self.get_app(app_id).tasks)
else:
time.sleep(batch_delay)
return tasks |
<SYSTEM_TASK:>
Kill a task.
<END_TASK>
<USER_TASK:>
Description:
def kill_task(self, app_id, task_id, scale=False, wipe=False):
"""Kill a task.
:param str app_id: application ID
:param str task_id: the task to kill
:param bool scale: if true, scale down the app by one if the task exists
:returns: the killed task
:rtype: :class:`marathon.models.task.MarathonTask`
""" |
params = {'scale': scale, 'wipe': wipe}
response = self._do_request('DELETE', '/v2/apps/{app_id}/tasks/{task_id}'
.format(app_id=app_id, task_id=task_id), params)
# Marathon is inconsistent about what type of object it returns on the multi
# task deletion endpoint, depending on the version of Marathon. See:
# https://github.com/mesosphere/marathon/blob/06a6f763a75fb6d652b4f1660685ae234bd15387/src/main/scala/mesosphere/marathon/api/v2/AppTasksResource.scala#L88-L95
if "task" in response.json():
return self._parse_response(response, MarathonTask, is_list=False, resource_name='task')
else:
return response.json() |
<SYSTEM_TASK:>
List the versions of an app.
<END_TASK>
<USER_TASK:>
Description:
def list_versions(self, app_id):
"""List the versions of an app.
:param str app_id: application ID
:returns: list of versions
:rtype: list[str]
""" |
response = self._do_request(
'GET', '/v2/apps/{app_id}/versions'.format(app_id=app_id))
return [version for version in response.json()['versions']] |
<SYSTEM_TASK:>
Get the configuration of an app at a specific version.
<END_TASK>
<USER_TASK:>
Description:
def get_version(self, app_id, version):
"""Get the configuration of an app at a specific version.
:param str app_id: application ID
:param str version: application version
:return: application configuration
:rtype: :class:`marathon.models.app.MarathonApp`
""" |
response = self._do_request('GET', '/v2/apps/{app_id}/versions/{version}'
.format(app_id=app_id, version=version))
return MarathonApp.from_json(response.json()) |
<SYSTEM_TASK:>
Register a callback URL as an event subscriber.
<END_TASK>
<USER_TASK:>
Description:
def create_event_subscription(self, url):
"""Register a callback URL as an event subscriber.
:param str url: callback URL
:returns: the created event subscription
:rtype: dict
""" |
params = {'callbackUrl': url}
response = self._do_request('POST', '/v2/eventSubscriptions', params)
return response.json() |
<SYSTEM_TASK:>
Deregister a callback URL as an event subscriber.
<END_TASK>
<USER_TASK:>
Description:
def delete_event_subscription(self, url):
"""Deregister a callback URL as an event subscriber.
:param str url: callback URL
:returns: the deleted event subscription
:rtype: dict
""" |
params = {'callbackUrl': url}
response = self._do_request('DELETE', '/v2/eventSubscriptions', params)
return response.json() |
<SYSTEM_TASK:>
List all running deployments.
<END_TASK>
<USER_TASK:>
Description:
def list_deployments(self):
"""List all running deployments.
:returns: list of deployments
:rtype: list[:class:`marathon.models.deployment.MarathonDeployment`]
""" |
response = self._do_request('GET', '/v2/deployments')
return self._parse_response(response, MarathonDeployment, is_list=True) |
<SYSTEM_TASK:>
List all the tasks queued up or waiting to be scheduled.
<END_TASK>
<USER_TASK:>
Description:
def list_queue(self, embed_last_unused_offers=False):
"""List all the tasks queued up or waiting to be scheduled.
:returns: list of queue items
:rtype: list[:class:`marathon.models.queue.MarathonQueueItem`]
""" |
if embed_last_unused_offers:
params = {'embed': 'lastUnusedOffers'}
else:
params = {}
response = self._do_request('GET', '/v2/queue', params=params)
return self._parse_response(response, MarathonQueueItem, is_list=True, resource_name='queue') |
<SYSTEM_TASK:>
Cancel a deployment.
<END_TASK>
<USER_TASK:>
Description:
def delete_deployment(self, deployment_id, force=False):
"""Cancel a deployment.
:param str deployment_id: deployment id
:param bool force: if true, don't create a rollback deployment to restore the previous configuration
:returns: a dict containing the deployment id and version (empty dict if force=True)
:rtype: dict
""" |
if force:
params = {'force': True}
self._do_request('DELETE', '/v2/deployments/{deployment}'.format(
deployment=deployment_id), params=params)
# Successful DELETE with ?force=true returns empty text (and status
# code 202). Client code should poll until deployment is removed.
return {}
else:
response = self._do_request(
'DELETE', '/v2/deployments/{deployment}'.format(deployment=deployment_id))
return response.json() |
<SYSTEM_TASK:>
Checks if a path is a correct format that Marathon expects. Raises ValueError if not valid.
<END_TASK>
<USER_TASK:>
Description:
def assert_valid_path(path):
"""Checks if a path is a correct format that Marathon expects. Raises ValueError if not valid.
:param str path: The app id.
:rtype: str
""" |
if path is None:
return
# As seen in:
# https://github.com/mesosphere/marathon/blob/0c11661ca2f259f8a903d114ef79023649a6f04b/src/main/scala/mesosphere/marathon/state/PathId.scala#L71
for id in filter(None, path.strip('/').split('/')):
if not ID_PATTERN.match(id):
raise ValueError(
'invalid path (allowed: lowercase letters, digits, hyphen, "/", ".", ".."): %r' % path)
return path |
<SYSTEM_TASK:>
Checks if an id is the correct format that Marathon expects. Raises ValueError if not valid.
<END_TASK>
<USER_TASK:>
Description:
def assert_valid_id(id):
"""Checks if an id is the correct format that Marathon expects. Raises ValueError if not valid.
:param str id: App or group id.
:rtype: str
""" |
if id is None:
return
if not ID_PATTERN.match(id.strip('/')):
raise ValueError(
'invalid id (allowed: lowercase letters, digits, hyphen, ".", ".."): %r' % id)
return id |
<SYSTEM_TASK:>
Construct an object from a parsed response.
<END_TASK>
<USER_TASK:>
Description:
def from_json(cls, attributes):
"""Construct an object from a parsed response.
:param dict attributes: object attributes from parsed response
""" |
return cls(**{to_snake_case(k): v for k, v in attributes.items()}) |
<SYSTEM_TASK:>
Encode an object as a JSON string.
<END_TASK>
<USER_TASK:>
Description:
def to_json(self, minimal=True):
"""Encode an object as a JSON string.
:param bool minimal: Construct a minimal representation of the object (ignore nulls and empty collections)
:rtype: str
""" |
if minimal:
return json.dumps(self.json_repr(minimal=True), cls=MarathonMinimalJsonEncoder, sort_keys=True)
else:
return json.dumps(self.json_repr(), cls=MarathonJsonEncoder, sort_keys=True) |
<SYSTEM_TASK:>
Construct a MarathonConstraint from a parsed response.
<END_TASK>
<USER_TASK:>
Description:
def from_json(cls, obj):
"""Construct a MarathonConstraint from a parsed response.
:param dict attributes: object attributes from parsed response
:rtype: :class:`MarathonConstraint`
""" |
if len(obj) == 2:
(field, operator) = obj
return cls(field, operator)
if len(obj) > 2:
(field, operator, value) = obj
return cls(field, operator, value) |
<SYSTEM_TASK:>
Construct a list of MarathonEndpoints from a list of tasks.
<END_TASK>
<USER_TASK:>
Description:
def from_tasks(cls, tasks):
"""Construct a list of MarathonEndpoints from a list of tasks.
:param list[:class:`marathon.models.MarathonTask`] tasks: list of tasks to parse
:rtype: list[:class:`MarathonEndpoint`]
""" |
endpoints = [
[
MarathonEndpoint(task.app_id, task.service_ports[
port_index], task.host, task.id, port)
for port_index, port in enumerate(task.ports)
]
for task in tasks
]
# Flatten result
return [item for sublist in endpoints for item in sublist] |
<SYSTEM_TASK:>
Convert newlines into U+23EC characters, followed by an actual newline and
<END_TASK>
<USER_TASK:>
Description:
def _format_newlines(prefix, formatted_node, options):
"""
Convert newlines into U+23EC characters, followed by an actual newline and
then a tree prefix so as to position the remaining text under the previous
line.
""" |
replacement = u''.join([
options.NEWLINE,
u'\n',
prefix])
return formatted_node.replace(u'\n', replacement) |
<SYSTEM_TASK:>
Gets a player.
<END_TASK>
<USER_TASK:>
Description:
async def get_player(self, tag):
"""Gets a player.
Gets a player with specified tag. If no tag is specified, the request will fail.
If the tag is invalid, a brawlstars.InvalidTag will be raised.
If the data is missing, a ValueError will be raised.
If the connection times out, a brawlstars.Timeout will be raised.
If the data was unable to be received, a brawlstars.HTTPError will be raised along with the
HTTP status code.
On success, will return a Player.
""" |
tag = tag.strip("#")
tag = tag.upper()
try:
async with self.session.get(self._base_url + 'players/' + tag, timeout=self.timeout,
headers=self.headers) as resp:
if resp.status == 200:
data = await resp.json()
elif 500 > resp.status > 400:
raise HTTPError(resp.status)
else:
raise Error()
except asyncio.TimeoutError:
raise Timeout()
except ValueError:
raise MissingData('data')
except Exception:
raise InvalidArg('tag')
data = Box(data)
player = Player(data)
return player |
<SYSTEM_TASK:>
Function allowing lazy importing of a module into the namespace.
<END_TASK>
<USER_TASK:>
Description:
def lazy_module(modname, error_strings=None, lazy_mod_class=LazyModule,
level='leaf'):
"""Function allowing lazy importing of a module into the namespace.
A lazy module object is created, registered in `sys.modules`, and
returned. This is a hollow module; actual loading, and `ImportErrors` if
not found, are delayed until an attempt is made to access attributes of the
lazy module.
A handy application is to use :func:`lazy_module` early in your own code
(say, in `__init__.py`) to register all modulenames you want to be lazy.
Because of registration in `sys.modules` later invocations of
`import modulename` will also return the lazy object. This means that after
initial registration the rest of your code can use regular pyhon import
statements and retain the lazyness of the modules.
Parameters
----------
modname : str
The module to import.
error_strings : dict, optional
A dictionary of strings to use when module-loading fails. Key 'msg'
sets the message to use (defaults to :attr:`lazy_import._MSG`). The
message is formatted using the remaining dictionary keys. The default
message informs the user of which module is missing (key 'module'),
what code loaded the module as lazy (key 'caller'), and which package
should be installed to solve the dependency (key 'install_name').
None of the keys is mandatory and all are given smart names by default.
lazy_mod_class: type, optional
Which class to use when instantiating the lazy module, to allow
deep customization. The default is :class:`LazyModule` and custom
alternatives **must** be a subclass thereof.
level : str, optional
Which submodule reference to return. Either a reference to the 'leaf'
module (the default) or to the 'base' module. This is useful if you'll
be using the module functionality in the same place you're calling
:func:`lazy_module` from, since then you don't need to run `import`
again. Setting *level* does not affect which names/modules get
registered in `sys.modules`.
For *level* set to 'base' and *modulename* 'aaa.bbb.ccc'::
aaa = lazy_import.lazy_module("aaa.bbb.ccc", level='base')
# 'aaa' becomes defined in the current namespace, with
# (sub)attributes 'aaa.bbb' and 'aaa.bbb.ccc'.
# It's the lazy equivalent to:
import aaa.bbb.ccc
For *level* set to 'leaf'::
ccc = lazy_import.lazy_module("aaa.bbb.ccc", level='leaf')
# Only 'ccc' becomes set in the current namespace.
# Lazy equivalent to:
from aaa.bbb import ccc
Returns
-------
module
The module specified by *modname*, or its base, depending on *level*.
The module isn't immediately imported. Instead, an instance of
*lazy_mod_class* is returned. Upon access to any of its attributes, the
module is finally loaded.
Examples
--------
>>> import lazy_import, sys
>>> np = lazy_import.lazy_module("numpy")
>>> np
Lazily-loaded module numpy
>>> np is sys.modules['numpy']
True
>>> np.pi # This causes the full loading of the module ...
3.141592653589793
>>> np # ... and the module is changed in place.
<module 'numpy' from '/usr/local/lib/python/site-packages/numpy/__init__.py'>
>>> import lazy_import, sys
>>> # The following succeeds even when asking for a module that's not available
>>> missing = lazy_import.lazy_module("missing_module")
>>> missing
Lazily-loaded module missing_module
>>> missing is sys.modules['missing_module']
True
>>> missing.some_attr # This causes the full loading of the module, which now fails.
ImportError: __main__ attempted to use a functionality that requires module missing_module, but it couldn't be loaded. Please install missing_module and retry.
See Also
--------
:func:`lazy_callable`
:class:`LazyModule`
""" |
if error_strings is None:
error_strings = {}
_set_default_errornames(modname, error_strings)
mod = _lazy_module(modname, error_strings, lazy_mod_class)
if level == 'base':
return sys.modules[module_basename(modname)]
elif level == 'leaf':
return mod
else:
raise ValueError("Parameter 'level' must be one of ('base', 'leaf')") |
<SYSTEM_TASK:>
Performs lazy importing of one or more callables.
<END_TASK>
<USER_TASK:>
Description:
def lazy_callable(modname, *names, **kwargs):
"""Performs lazy importing of one or more callables.
:func:`lazy_callable` creates functions that are thin wrappers that pass
any and all arguments straight to the target module's callables. These can
be functions or classes. The full loading of that module is only actually
triggered when the returned lazy function itself is called. This lazy
import of the target module uses the same mechanism as
:func:`lazy_module`.
If, however, the target module has already been fully imported prior
to invocation of :func:`lazy_callable`, then the target callables
themselves are returned and no lazy imports are made.
:func:`lazy_function` and :func:`lazy_function` are aliases of
:func:`lazy_callable`.
Parameters
----------
modname : str
The base module from where to import the callable(s) in *names*,
or a full 'module_name.callable_name' string.
names : str (optional)
The callable name(s) to import from the module specified by *modname*.
If left empty, *modname* is assumed to also include the callable name
to import.
error_strings : dict, optional
A dictionary of strings to use when reporting loading errors (either a
missing module, or a missing callable name in the loaded module).
*error_string* follows the same usage as described under
:func:`lazy_module`, with the exceptions that 1) a further key,
'msg_callable', can be supplied to be used as the error when a module
is successfully loaded but the target callable can't be found therein
(defaulting to :attr:`lazy_import._MSG_CALLABLE`); 2) a key 'callable'
is always added with the callable name being loaded.
lazy_mod_class : type, optional
See definition under :func:`lazy_module`.
lazy_call_class : type, optional
Analogously to *lazy_mod_class*, allows setting a custom class to
handle lazy callables, other than the default :class:`LazyCallable`.
Returns
-------
wrapper function or tuple of wrapper functions
If *names* is passed, returns a tuple of wrapper functions, one for
each element in *names*.
If only *modname* is passed it is assumed to be a full
'module_name.callable_name' string, in which case the wrapper for the
imported callable is returned directly, and not in a tuple.
Notes
-----
Unlike :func:`lazy_module`, which returns a lazy module that eventually
mutates into the fully-functional version, :func:`lazy_callable` only
returns thin wrappers that never change. This means that the returned
wrapper object never truly becomes the one under the module's namespace,
even after successful loading of the module in *modname*. This is fine for
most practical use cases, but may break code that relies on the usage of
the returned objects oter than calling them. One such example is the lazy
import of a class: it's fine to use the returned wrapper to instantiate an
object, but it can't be used, for instance, to subclass from.
Examples
--------
>>> import lazy_import, sys
>>> fn = lazy_import.lazy_callable("numpy.arange")
>>> sys.modules['numpy']
Lazily-loaded module numpy
>>> fn(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> sys.modules['numpy']
<module 'numpy' from '/usr/local/lib/python3.5/site-packages/numpy/__init__.py'>
>>> import lazy_import, sys
>>> cl = lazy_import.lazy_callable("numpy.ndarray") # a class
>>> obj = cl([1, 2]) # This works OK (and also triggers the loading of numpy)
>>> class MySubclass(cl): # This fails because cls is just a wrapper,
>>> pass # not an actual class.
See Also
--------
:func:`lazy_module`
:class:`LazyCallable`
:class:`LazyModule`
""" |
if not names:
modname, _, name = modname.rpartition(".")
lazy_mod_class = _setdef(kwargs, 'lazy_mod_class', LazyModule)
lazy_call_class = _setdef(kwargs, 'lazy_call_class', LazyCallable)
error_strings = _setdef(kwargs, 'error_strings', {})
_set_default_errornames(modname, error_strings, call=True)
if not names:
# We allow passing a single string as 'modname.callable_name',
# in which case the wrapper is returned directly and not as a list.
return _lazy_callable(modname, name, error_strings.copy(),
lazy_mod_class, lazy_call_class)
return tuple(_lazy_callable(modname, cname, error_strings.copy(),
lazy_mod_class, lazy_call_class) for cname in names) |
<SYSTEM_TASK:>
Ensures that a module, and its parents, are properly loaded
<END_TASK>
<USER_TASK:>
Description:
def _load_module(module):
"""Ensures that a module, and its parents, are properly loaded
""" |
modclass = type(module)
# We only take care of our own LazyModule instances
if not issubclass(modclass, LazyModule):
raise TypeError("Passed module is not a LazyModule instance.")
with _ImportLockContext():
parent, _, modname = module.__name__.rpartition('.')
logger.debug("loading module {}".format(modname))
# We first identify whether this is a loadable LazyModule, then we
# strip as much of lazy_import behavior as possible (keeping it cached,
# in case loading fails and we need to reset the lazy state).
if not hasattr(modclass, '_lazy_import_error_msgs'):
# Alreay loaded (no _lazy_import_error_msgs attr). Not reloading.
return
# First, ensure the parent is loaded (using recursion; *very* unlikely
# we'll ever hit a stack limit in this case).
modclass._LOADING = True
try:
if parent:
logger.debug("first loading parent module {}".format(parent))
setattr(sys.modules[parent], modname, module)
if not hasattr(modclass, '_LOADING'):
logger.debug("Module {} already loaded by the parent"
.format(modname))
# We've been loaded by the parent. Let's bail.
return
cached_data = _clean_lazymodule(module)
try:
# Get Python to do the real import!
reload_module(module)
except:
# Loading failed. We reset our lazy state.
logger.debug("Failed to load module {}. Resetting..."
.format(modname))
_reset_lazymodule(module, cached_data)
raise
else:
# Successful load
logger.debug("Successfully loaded module {}".format(modname))
delattr(modclass, '_LOADING')
_reset_lazy_submod_refs(module)
except (AttributeError, ImportError) as err:
logger.debug("Failed to load {}.\n{}: {}"
.format(modname, err.__class__.__name__, err))
logger.lazy_trace()
# Under Python 3 reloading our dummy LazyModule instances causes an
# AttributeError if the module can't be found. Would be preferrable
# if we could always rely on an ImportError. As it is we vet the
# AttributeError as thoroughly as possible.
if ((six.PY3 and isinstance(err, AttributeError)) and not
err.args[0] == "'NoneType' object has no attribute 'name'"):
# Not the AttributeError we were looking for.
raise
msg = modclass._lazy_import_error_msgs['msg']
raise_from(ImportError(
msg.format(**modclass._lazy_import_error_strings)), None) |
<SYSTEM_TASK:>
Like dict.setdefault but sets the default value also if None is present.
<END_TASK>
<USER_TASK:>
Description:
def _setdef(argdict, name, defaultvalue):
"""Like dict.setdefault but sets the default value also if None is present.
""" |
if not name in argdict or argdict[name] is None:
argdict[name] = defaultvalue
return argdict[name] |
<SYSTEM_TASK:>
Removes all lazy behavior from a module's class, for loading.
<END_TASK>
<USER_TASK:>
Description:
def _clean_lazymodule(module):
"""Removes all lazy behavior from a module's class, for loading.
Also removes all module attributes listed under the module's class deletion
dictionaries. Deletion dictionaries are class attributes with names
specified in `_DELETION_DICT`.
Parameters
----------
module: LazyModule
Returns
-------
dict
A dictionary of deleted class attributes, that can be used to reset the
lazy state using :func:`_reset_lazymodule`.
""" |
modclass = type(module)
_clean_lazy_submod_refs(module)
modclass.__getattribute__ = ModuleType.__getattribute__
modclass.__setattr__ = ModuleType.__setattr__
cls_attrs = {}
for cls_attr in _CLS_ATTRS:
try:
cls_attrs[cls_attr] = getattr(modclass, cls_attr)
delattr(modclass, cls_attr)
except AttributeError:
pass
return cls_attrs |
<SYSTEM_TASK:>
Compute timestamp from a datetime object that could be timezone aware
<END_TASK>
<USER_TASK:>
Description:
def timestamp_from_datetime(dt):
"""
Compute timestamp from a datetime object that could be timezone aware
or unaware.
""" |
try:
utc_dt = dt.astimezone(pytz.utc)
except ValueError:
utc_dt = dt.replace(tzinfo=pytz.utc)
return timegm(utc_dt.timetuple()) |
<SYSTEM_TASK:>
Similar to smart_bytes, except that lazy instances are resolved to
<END_TASK>
<USER_TASK:>
Description:
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
""" |
if isinstance(s, memoryview):
s = bytes(s)
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors) |
<SYSTEM_TASK:>
Wrap a function so that results for any argument tuple are stored in
<END_TASK>
<USER_TASK:>
Description:
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
""" |
@wraps(func)
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wrapper |
<SYSTEM_TASK:>
Remove sensitive information from msg.
<END_TASK>
<USER_TASK:>
Description:
def hide_auth(msg):
"""Remove sensitive information from msg.""" |
for pattern, repl in RE_HIDE_AUTH:
msg = pattern.sub(repl, msg)
return msg |
<SYSTEM_TASK:>
Prepare the HTTP handler, URL, and HTTP headers for all subsequent requests
<END_TASK>
<USER_TASK:>
Description:
def init(self):
"""Prepare the HTTP handler, URL, and HTTP headers for all subsequent requests""" |
self.debug('Initializing %r', self)
proto = self.server.split('://')[0]
if proto == 'https':
if hasattr(ssl, 'create_default_context'):
context = ssl.create_default_context()
if self.ssl_verify:
context.check_hostname = True
context.verify_mode = ssl.CERT_REQUIRED
else:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
self._http_handler = urllib2.HTTPSHandler(debuglevel=0, context=context)
else:
self._http_handler = urllib2.HTTPSHandler(debuglevel=0)
elif proto == 'http':
self._http_handler = urllib2.HTTPHandler(debuglevel=0)
else:
raise ValueError('Invalid protocol %s' % proto)
self._api_url = self.server + '/api_jsonrpc.php'
self._http_headers = {
'Content-Type': 'application/json-rpc',
'User-Agent': 'python/zabbix_api',
}
if self.httpuser:
self.debug('HTTP authentication enabled')
auth = self.httpuser + ':' + self.httppasswd
self._http_headers['Authorization'] = 'Basic ' + b64encode(auth.encode('utf-8')).decode('ascii') |
<SYSTEM_TASK:>
Calculate delta between current time and datetime and return a human readable form of the delta object
<END_TASK>
<USER_TASK:>
Description:
def get_age(dt):
"""Calculate delta between current time and datetime and return a human readable form of the delta object""" |
delta = datetime.now() - dt
days = delta.days
hours, rem = divmod(delta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
if days:
return '%dd %dh %dm' % (days, hours, minutes)
else:
return '%dh %dm %ds' % (hours, minutes, seconds) |
<SYSTEM_TASK:>
Perform a re-login if not signed in or raise an exception
<END_TASK>
<USER_TASK:>
Description:
def check_auth(self):
"""Perform a re-login if not signed in or raise an exception""" |
if not self.logged_in:
if self.relogin_interval and self.last_login and (time() - self.last_login) > self.relogin_interval:
self.log(WARNING, 'Zabbix API not logged in. Performing Zabbix API relogin after %d seconds',
self.relogin_interval)
self.relogin() # Will raise exception in case of login error
else:
raise ZabbixAPIException('Not logged in.') |
<SYSTEM_TASK:>
Check authentication and perform actual API request and relogin if needed
<END_TASK>
<USER_TASK:>
Description:
def call(self, method, params=None):
"""Check authentication and perform actual API request and relogin if needed""" |
start_time = time()
self.check_auth()
self.log(INFO, '[%s-%05d] Calling Zabbix API method "%s"', start_time, self.id, method)
self.log(DEBUG, '\twith parameters: %s', params)
try:
return self.do_request(self.json_obj(method, params=params))
except ZabbixAPIError as ex:
if self.relogin_interval and any(i in ex.error['data'] for i in self.LOGIN_ERRORS):
self.log(WARNING, 'Zabbix API not logged in (%s). Performing Zabbix API relogin', ex)
self.relogin() # Will raise exception in case of login error
return self.do_request(self.json_obj(method, params=params))
raise # Re-raise the exception
finally:
self.log(INFO, '[%s-%05d] Zabbix API method "%s" finished in %g seconds',
start_time, self.id, method, (time() - start_time)) |
<SYSTEM_TASK:>
download function to download parallely
<END_TASK>
<USER_TASK:>
Description:
def download_parallel(url, directory, idx, min_file_size = 0, max_file_size = -1,
no_redirects = False, pos = 0, mode = 's'):
"""
download function to download parallely
""" |
global main_it
global exit_flag
global total_chunks
global file_name
global i_max
file_name[idx]= url.split('/')[-1]
file_address = directory + '/' + file_name[idx]
is_redirects = not no_redirects
resp = s.get(url, stream = True, allow_redirects = is_redirects)
if not resp.status_code == 200:
# ignore this file since server returns invalid response
exit_flag += 1
return
try:
total_size = int(resp.headers['content-length'])
except KeyError:
total_size = len(resp.content)
total_chunks[idx] = total_size / chunk_size
if total_chunks[idx] < min_file_size:
# ignore this file since file size is lesser than min_file_size
exit_flag += 1
return
elif max_file_size != -1 and total_chunks[idx] > max_file_size:
# ignore this file since file size is greater than max_file_size
exit_flag += 1
return
file_iterable = resp.iter_content(chunk_size = chunk_size)
with open(file_address, 'wb') as f:
for sno, data in enumerate(file_iterable):
i_max[idx] = sno + 1
f.write(data)
exit_flag += 1 |
<SYSTEM_TASK:>
called when paralled downloading is true
<END_TASK>
<USER_TASK:>
Description:
def download_parallel_gui(root, urls, directory, min_file_size, max_file_size, no_redirects):
"""
called when paralled downloading is true
""" |
global parallel
# create directory to save files
if not os.path.exists(directory):
os.makedirs(directory)
parallel = True
app = progress_class(root, urls, directory, min_file_size, max_file_size, no_redirects) |
<SYSTEM_TASK:>
called when user wants serial downloading
<END_TASK>
<USER_TASK:>
Description:
def download_series_gui(frame, urls, directory, min_file_size, max_file_size, no_redirects):
"""
called when user wants serial downloading
""" |
# create directory to save files
if not os.path.exists(directory):
os.makedirs(directory)
app = progress_class(frame, urls, directory, min_file_size, max_file_size, no_redirects) |
<SYSTEM_TASK:>
function called when thread is started
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
function called when thread is started
""" |
global parallel
if parallel:
download_parallel(self.url, self.directory, self.idx,
self.min_file_size, self.max_file_size, self.no_redirects)
else:
download(self.url, self.directory, self.idx,
self.min_file_size, self.max_file_size, self.no_redirects) |
<SYSTEM_TASK:>
function to initialize thread for downloading
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
function to initialize thread for downloading
""" |
global parallel
for self.i in range(0, self.length):
if parallel:
self.thread.append(myThread(self.url[ self.i ], self.directory, self.i,
self.min_file_size, self.max_file_size, self.no_redirects))
else:
# if not parallel whole url list is passed
self.thread.append(myThread(self.url, self.directory, self.i , self.min_file_size,
self.max_file_size, self.no_redirects))
self.progress[self.i]["value"] = 0
self.bytes[self.i] = 0
self.thread[self.i].start()
self.read_bytes() |
<SYSTEM_TASK:>
reading bytes; update progress bar after 1 ms
<END_TASK>
<USER_TASK:>
Description:
def read_bytes(self):
"""
reading bytes; update progress bar after 1 ms
""" |
global exit_flag
for self.i in range(0, self.length) :
self.bytes[self.i] = i_max[self.i]
self.maxbytes[self.i] = total_chunks[self.i]
self.progress[self.i]["maximum"] = total_chunks[self.i]
self.progress[self.i]["value"] = self.bytes[self.i]
self.str[self.i].set(file_name[self.i]+ " " + str(self.bytes[self.i])
+ "KB / " + str(int(self.maxbytes[self.i] + 1)) + " KB")
if exit_flag == self.length:
exit_flag = 0
self.frame.destroy()
else:
self.frame.after(10, self.read_bytes) |
<SYSTEM_TASK:>
Add this type to our collection, if needed.
<END_TASK>
<USER_TASK:>
Description:
def declare_type(self, declared_type): # type: (TypeDef) -> TypeDef
"""Add this type to our collection, if needed.""" |
if declared_type not in self.collected_types:
self.collected_types[declared_type.name] = declared_type
return declared_type |
<SYSTEM_TASK:>
Collect the provided namespaces, checking for conflicts.
<END_TASK>
<USER_TASK:>
Description:
def add_namespaces(metadata, namespaces):
# type: (Mapping[Text, Any], MutableMapping[Text, Text]) -> None
"""Collect the provided namespaces, checking for conflicts.""" |
for key, value in metadata.items():
if key not in namespaces:
namespaces[key] = value
elif namespaces[key] != value:
raise validate.ValidationException(
"Namespace prefix '{}' has conflicting definitions '{}'"
" and '{}'.".format(key, namespaces[key], value)) |
<SYSTEM_TASK:>
Load a schema that can be used to validate documents using load_and_validate.
<END_TASK>
<USER_TASK:>
Description:
def load_schema(schema_ref, # type: Union[CommentedMap, CommentedSeq, Text]
cache=None # type: Dict
):
# type: (...) -> Tuple[Loader, Union[Names, SchemaParseException], Dict[Text, Any], Loader]
"""
Load a schema that can be used to validate documents using load_and_validate.
return: document_loader, avsc_names, schema_metadata, metaschema_loader
""" |
metaschema_names, _metaschema_doc, metaschema_loader = get_metaschema()
if cache is not None:
metaschema_loader.cache.update(cache)
schema_doc, schema_metadata = metaschema_loader.resolve_ref(schema_ref, "")
if not isinstance(schema_doc, MutableSequence):
raise ValueError("Schema reference must resolve to a list.")
validate_doc(metaschema_names, schema_doc, metaschema_loader, True)
metactx = schema_metadata.get("@context", {})
metactx.update(collect_namespaces(schema_metadata))
schema_ctx = jsonld_context.salad_to_jsonld_context(schema_doc, metactx)[0]
# Create the loader that will be used to load the target document.
document_loader = Loader(schema_ctx, cache=cache)
# Make the Avro validation that will be used to validate the target
# document
avsc_names = make_avro_schema(schema_doc, document_loader)
return document_loader, avsc_names, schema_metadata, metaschema_loader |
<SYSTEM_TASK:>
Load a document and validate it with the provided schema.
<END_TASK>
<USER_TASK:>
Description:
def load_and_validate(document_loader, # type: Loader
avsc_names, # type: Names
document, # type: Union[CommentedMap, Text]
strict, # type: bool
strict_foreign_properties=False # type: bool
):
# type: (...) -> Tuple[Any, Dict[Text, Any]]
"""Load a document and validate it with the provided schema.
return data, metadata
""" |
try:
if isinstance(document, CommentedMap):
data, metadata = document_loader.resolve_all(
document, document["id"], checklinks=True,
strict_foreign_properties=strict_foreign_properties)
else:
data, metadata = document_loader.resolve_ref(
document, checklinks=True,
strict_foreign_properties=strict_foreign_properties)
validate_doc(avsc_names, data, document_loader, strict,
strict_foreign_properties=strict_foreign_properties)
return data, metadata
except validate.ValidationException as exc:
raise validate.ValidationException(strip_dup_lineno(str(exc))) |
<SYSTEM_TASK:>
Calculate a reproducible name for anonymous types.
<END_TASK>
<USER_TASK:>
Description:
def get_anon_name(rec):
# type: (MutableMapping[Text, Any]) -> Text
"""Calculate a reproducible name for anonymous types.""" |
if "name" in rec:
return rec["name"]
anon_name = ""
if rec['type'] in ('enum', 'https://w3id.org/cwl/salad#enum'):
for sym in rec["symbols"]:
anon_name += sym
return "enum_"+hashlib.sha1(anon_name.encode("UTF-8")).hexdigest()
if rec['type'] in ('record', 'https://w3id.org/cwl/salad#record'):
for field in rec["fields"]:
anon_name += field["name"]
return "record_"+hashlib.sha1(anon_name.encode("UTF-8")).hexdigest()
if rec['type'] in ('array', 'https://w3id.org/cwl/salad#array'):
return ""
raise validate.ValidationException("Expected enum or record, was %s" % rec['type']) |
<SYSTEM_TASK:>
Go through and replace types in the 'spec' mapping
<END_TASK>
<USER_TASK:>
Description:
def replace_type(items, spec, loader, found, find_embeds=True, deepen=True):
# type: (Any, Dict[Text, Any], Loader, Set[Text], bool, bool) -> Any
""" Go through and replace types in the 'spec' mapping""" |
if isinstance(items, MutableMapping):
# recursively check these fields for types to replace
if items.get("type") in ("record", "enum") and items.get("name"):
if items["name"] in found:
return items["name"]
found.add(items["name"])
if not deepen:
return items
items = copy.copy(items)
if not items.get("name"):
items["name"] = get_anon_name(items)
for name in ("type", "items", "fields"):
if name in items:
items[name] = replace_type(
items[name], spec, loader, found, find_embeds=find_embeds,
deepen=find_embeds)
if isinstance(items[name], MutableSequence):
items[name] = flatten(items[name])
return items
if isinstance(items, MutableSequence):
# recursively transform list
return [replace_type(i, spec, loader, found, find_embeds=find_embeds,
deepen=deepen) for i in items]
if isinstance(items, string_types):
# found a string which is a symbol corresponding to a type.
replace_with = None
if items in loader.vocab:
# If it's a vocabulary term, first expand it to its fully qualified
# URI
items = loader.vocab[items]
if items in spec:
# Look up in specialization map
replace_with = spec[items]
if replace_with:
return replace_type(replace_with, spec, loader, found, find_embeds=find_embeds)
found.add(items)
return items |
<SYSTEM_TASK:>
Turn a URL into an Avro-safe name.
<END_TASK>
<USER_TASK:>
Description:
def avro_name(url): # type: (AnyStr) -> AnyStr
"""
Turn a URL into an Avro-safe name.
If the URL has no fragment, return this plain URL.
Extract either the last part of the URL fragment past the slash, otherwise
the whole fragment.
""" |
frg = urllib.parse.urldefrag(url)[1]
if frg != '':
if '/' in frg:
return frg[frg.rindex('/') + 1:]
return frg
return url |
<SYSTEM_TASK:>
Make a deep copy of list and dict objects.
<END_TASK>
<USER_TASK:>
Description:
def deepcopy_strip(item): # type: (Any) -> Any
"""
Make a deep copy of list and dict objects.
Intentionally do not copy attributes. This is to discard CommentedMap and
CommentedSeq metadata which is very expensive with regular copy.deepcopy.
""" |
if isinstance(item, MutableMapping):
return {k: deepcopy_strip(v) for k, v in iteritems(item)}
if isinstance(item, MutableSequence):
return [deepcopy_strip(k) for k in item]
return item |
<SYSTEM_TASK:>
All in one convenience function.
<END_TASK>
<USER_TASK:>
Description:
def make_avro_schema(i, # type: List[Any]
loader # type: Loader
): # type: (...) -> Names
"""
All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output.
""" |
names = Names()
avro = make_avro(i, loader)
make_avsc_object(convert_to_dict(avro), names)
return names |
<SYSTEM_TASK:>
Returns the last segment of the provided fragment or path.
<END_TASK>
<USER_TASK:>
Description:
def shortname(inputid): # type: (Text) -> Text
"""Returns the last segment of the provided fragment or path.""" |
parsed_id = urllib.parse.urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split(u"/")[-1]
return parsed_id.path.split(u"/")[-1] |
<SYSTEM_TASK:>
Write a GraphViz graph of the relationships between the fields.
<END_TASK>
<USER_TASK:>
Description:
def print_fieldrefs(doc, loader, stream):
# type: (List[Dict[Text, Any]], Loader, IO) -> None
"""Write a GraphViz graph of the relationships between the fields.""" |
obj = extend_and_specialize(doc, loader)
primitives = set(("http://www.w3.org/2001/XMLSchema#string",
"http://www.w3.org/2001/XMLSchema#boolean",
"http://www.w3.org/2001/XMLSchema#int",
"http://www.w3.org/2001/XMLSchema#long",
"https://w3id.org/cwl/salad#null",
"https://w3id.org/cwl/salad#enum",
"https://w3id.org/cwl/salad#array",
"https://w3id.org/cwl/salad#record",
"https://w3id.org/cwl/salad#Any"))
stream.write("digraph {\n")
for entry in obj:
if entry.get("abstract"):
continue
if entry["type"] == "record":
label = shortname(entry["name"])
for field in entry.get("fields", []):
found = set() # type: Set[Text]
field_name = shortname(field["name"])
replace_type(field["type"], {}, loader, found, find_embeds=False)
for each_type in found:
if each_type not in primitives:
stream.write(
"\"%s\" -> \"%s\" [label=\"%s\"];\n"
% (label, shortname(each_type), field_name))
stream.write("}\n") |
<SYSTEM_TASK:>
Retrieve the non-reserved properties from a dictionary of properties
<END_TASK>
<USER_TASK:>
Description:
def get_other_props(all_props, reserved_props):
# type: (Dict, Tuple) -> Optional[Dict]
"""
Retrieve the non-reserved properties from a dictionary of properties
@args reserved_props: The set of reserved properties to exclude
""" |
if hasattr(all_props, 'items') and callable(all_props.items):
return dict([(k,v) for (k,v) in list(all_props.items()) if k not in
reserved_props])
return None |
<SYSTEM_TASK:>
Back out a namespace from full name.
<END_TASK>
<USER_TASK:>
Description:
def get_space(self):
# type: () -> Optional[Text]
"""Back out a namespace from full name.""" |
if self._full is None:
return None
if self._full.find('.') > 0:
return self._full.rsplit(".", 1)[0]
else:
return "" |
<SYSTEM_TASK:>
Add a new schema object to the name set.
<END_TASK>
<USER_TASK:>
Description:
def add_name(self, name_attr, space_attr, new_schema):
# type: (Text, Optional[Text], NamedSchema) -> Name
"""
Add a new schema object to the name set.
@arg name_attr: name value read in schema
@arg space_attr: namespace value read in schema.
@return: the Name that was just added.
""" |
to_add = Name(name_attr, space_attr, self.default_namespace)
if to_add.fullname in VALID_TYPES:
fail_msg = '%s is a reserved type name.' % to_add.fullname
raise SchemaParseException(fail_msg)
elif to_add.fullname in self.names:
fail_msg = 'The name "%s" is already in use.' % to_add.fullname
raise SchemaParseException(fail_msg)
self.names[to_add.fullname] = new_schema
return to_add |
<SYSTEM_TASK:>
We're going to need to make message parameters too.
<END_TASK>
<USER_TASK:>
Description:
def make_field_objects(field_data, names):
# type: (List[Dict[Text, Text]], Names) -> List[Field]
"""We're going to need to make message parameters too.""" |
field_objects = []
field_names = [] # type: List[Text]
for field in field_data:
if hasattr(field, 'get') and callable(field.get):
atype = cast(Text, field.get('type'))
name = cast(Text, field.get('name'))
# null values can have a default value of None
has_default = False
default = None
if 'default' in field:
has_default = True
default = field.get('default')
order = field.get('order')
doc = field.get('doc')
other_props = get_other_props(field, FIELD_RESERVED_PROPS)
new_field = Field(atype, name, has_default, default, order, names, doc,
other_props)
# make sure field name has not been used yet
if new_field.name in field_names:
fail_msg = 'Field name %s already in use.' % new_field.name
raise SchemaParseException(fail_msg)
field_names.append(new_field.name)
else:
raise SchemaParseException('Not a valid field: %s' % field)
field_objects.append(new_field)
return field_objects |
<SYSTEM_TASK:>
to create loading progress bar
<END_TASK>
<USER_TASK:>
Description:
def task(ft):
"""
to create loading progress bar
""" |
ft.pack(expand = True, fill = BOTH, side = TOP)
pb_hD = ttk.Progressbar(ft, orient = 'horizontal', mode = 'indeterminate')
pb_hD.pack(expand = True, fill = BOTH, side = TOP)
pb_hD.start(50)
ft.mainloop() |
<SYSTEM_TASK:>
function to fetch links and download them
<END_TASK>
<USER_TASK:>
Description:
def download_content_gui(**args):
"""
function to fetch links and download them
""" |
global row
if not args ['directory']:
args ['directory'] = args ['query'].replace(' ', '-')
root1 = Frame(root)
t1 = threading.Thread(target = search_function, args = (root1,
args['query'], args['website'], args['file_type'], args['limit'],args['option']))
t1.start()
task(root1)
t1.join()
#new frame for progress bar
row = Frame(root)
row.pack()
if args['parallel']:
download_parallel_gui(row, links, args['directory'], args['min_file_size'],
args['max_file_size'], args['no_redirects'])
else:
download_series_gui(row, links, args['directory'], args['min_file_size'],
args['max_file_size'], args['no_redirects']) |
<SYSTEM_TASK:>
event for download button
<END_TASK>
<USER_TASK:>
Description:
def click_download(self, event):
"""
event for download button
""" |
args ['parallel'] = self.p.get()
args ['file_type'] = self.optionmenu.get()
args ['no_redirects'] = self.t.get()
args ['query'] = self.entry_query.get()
args ['min_file_size'] = int( self.entry_min.get())
args ['max_file_size'] = int( self.entry_max.get())
args ['limit'] = int( self.entry_limit.get())
args ['website']= self.entry_website.get()
args ['option']= self.engine.get()
print(args)
self.check_threat()
download_content_gui( **args ) |
<SYSTEM_TASK:>
function that gets called whenever entry is clicked
<END_TASK>
<USER_TASK:>
Description:
def on_entry_click(self, event):
"""
function that gets called whenever entry is clicked
""" |
if event.widget.config('fg') [4] == 'grey':
event.widget.delete(0, "end" ) # delete all the text in the entry
event.widget.insert(0, '') #Insert blank for user input
event.widget.config(fg = 'black') |
<SYSTEM_TASK:>
function that gets called whenever anywhere except entry is clicked
<END_TASK>
<USER_TASK:>
Description:
def on_focusout(self, event, a):
"""
function that gets called whenever anywhere except entry is clicked
""" |
if event.widget.get() == '':
event.widget.insert(0, default_text[a])
event.widget.config(fg = 'grey') |
<SYSTEM_TASK:>
dialogue box for choosing directory
<END_TASK>
<USER_TASK:>
Description:
def ask_dir(self):
"""
dialogue box for choosing directory
""" |
args ['directory'] = askdirectory(**self.dir_opt)
self.dir_text.set(args ['directory']) |
<SYSTEM_TASK:>
main function to search for links and return valid ones
<END_TASK>
<USER_TASK:>
Description:
def search(query, engine='g', site="", file_type = 'pdf', limit = 10):
"""
main function to search for links and return valid ones
""" |
if site == "":
search_query = "filetype:{0} {1}".format(file_type, query)
else:
search_query = "site:{0} filetype:{1} {2}".format(site,file_type, query)
headers = {
'User Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:53.0) \
Gecko/20100101 Firefox/53.0'
}
if engine == "g":
params = {
'q': search_query,
'start': 0,
}
links = get_google_links(limit, params, headers)
elif engine == "d":
params = {
'q': search_query,
}
links = get_duckduckgo_links(limit,params,headers)
else:
print("Wrong search engine selected!")
sys.exit()
valid_links = validate_links(links)
return valid_links |
<SYSTEM_TASK:>
function to check if input query is not None
<END_TASK>
<USER_TASK:>
Description:
def validate_args(**args):
"""
function to check if input query is not None
and set missing arguments to default value
""" |
if not args['query']:
print("\nMissing required query argument.")
sys.exit()
for key in DEFAULTS:
if key not in args:
args[key] = DEFAULTS[key]
return args |
<SYSTEM_TASK:>
main function to fetch links and download them
<END_TASK>
<USER_TASK:>
Description:
def download_content(**args):
"""
main function to fetch links and download them
""" |
args = validate_args(**args)
if not args['directory']:
args['directory'] = args['query'].replace(' ', '-')
print("Downloading {0} {1} files on topic {2} from {3} and saving to directory: {4}"
.format(args['limit'], args['file_type'], args['query'], args['website'], args['directory']))
links = search(args['query'], args['engine'], args['website'], args['file_type'], args['limit'])
if args['parallel']:
download_parallel(links, args['directory'], args['min_file_size'], args['max_file_size'], args['no_redirects'])
else:
download_series(links, args['directory'], args['min_file_size'], args['max_file_size'], args['no_redirects']) |
<SYSTEM_TASK:>
Return a list of subset of VM that match the pattern name
<END_TASK>
<USER_TASK:>
Description:
def find(self, name):
"""
Return a list of subset of VM that match the pattern name
@param name (str): the vm name of the virtual machine
@param name (Obj): the vm object that represent the virtual
machine (can be Pro or Smart)
@return (list): the subset containing the serach result.
""" |
if name.__class__ is 'base.Server.Pro' or name.__class__ is 'base.Server.Smart':
# print('DEBUG: matched VM object %s' % name.__class__)
pattern = name.vm_name
else:
# print('DEBUG: matched Str Object %s' % name.__class__)
pattern = name
# 14/06/2013: since this method is called within a thread and I wont to pass the return objects with queue or
# call back, I will allocate a list inside the Interface class object itself, which contain all of the vm found
# 02/11/2015: this must be changed ASAP! it's a mess this way... what was I thinking??
self.last_search_result = [vm for vm in self if pattern in vm.vm_name]
return self.last_search_result |
<SYSTEM_TASK:>
Initialize the internal list containing each template available for each
<END_TASK>
<USER_TASK:>
Description:
def get_hypervisors(self):
"""
Initialize the internal list containing each template available for each
hypervisor.
:return: [bool] True in case of success, otherwise False
""" |
json_scheme = self.gen_def_json_scheme('GetHypervisors')
json_obj = self.call_method_post(method='GetHypervisors', json_scheme=json_scheme)
self.json_templates = json_obj
d = dict(json_obj)
for elem in d['Value']:
hv = self.hypervisors[elem['HypervisorType']]
for inner_elem in elem['Templates']:
o = Template(hv)
o.template_id = inner_elem['Id']
o.descr = inner_elem['Description']
o.id_code = inner_elem['IdentificationCode']
o.name = inner_elem['Name']
o.enabled = inner_elem['Enabled']
if hv != 'SMART':
for rb in inner_elem['ResourceBounds']:
resource_type = rb['ResourceType']
if resource_type == 1:
o.resource_bounds.max_cpu = rb['Max']
if resource_type == 2:
o.resource_bounds.max_memory = rb['Max']
if resource_type == 3:
o.resource_bounds.hdd0 = rb['Max']
if resource_type == 7:
o.resource_bounds.hdd1 = rb['Max']
if resource_type == 8:
o.resource_bounds.hdd2 = rb['Max']
if resource_type == 9:
o.resource_bounds.hdd3 = rb['Max']
self.templates.append(o)
return True if json_obj['Success'] is 'True' else False |
<SYSTEM_TASK:>
Return an ip object representing a new bought IP
<END_TASK>
<USER_TASK:>
Description:
def purchase_ip(self, debug=False):
"""
Return an ip object representing a new bought IP
@param debug [Boolean] if true, request and response will be printed
@return (Ip): Ip object
""" |
json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')
json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)
try:
ip = Ip()
ip.ip_addr = json_obj['Value']['Value']
ip.resid = json_obj['Value']['ResourceId']
return ip
except:
raise Exception('Unknown error retrieving IP.') |
<SYSTEM_TASK:>
Delete an Ip from the boughs ip list
<END_TASK>
<USER_TASK:>
Description:
def remove_ip(self, ip_id):
"""
Delete an Ip from the boughs ip list
@param (str) ip_id: a string representing the resource id of the IP
@return: True if json method had success else False
""" |
ip_id = ' "IpAddressResourceId": %s' % ip_id
json_scheme = self.gen_def_json_scheme('SetRemoveIpAddress', ip_id)
json_obj = self.call_method_post(method='SetRemoveIpAddress', json_scheme=json_scheme)
pprint(json_obj)
return True if json_obj['Success'] is True else False |
<SYSTEM_TASK:>
Retrieve the smart package id given is English name
<END_TASK>
<USER_TASK:>
Description:
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
""" |
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId |
<SYSTEM_TASK:>
Wrapper around Requests for GET requests
<END_TASK>
<USER_TASK:>
Description:
def _get(self, *args, **kwargs):
"""Wrapper around Requests for GET requests
Returns:
Response:
A Requests Response object
""" |
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.get(*args, **kwargs)
return req |
<SYSTEM_TASK:>
Wrapper around Requests for GET XML requests
<END_TASK>
<USER_TASK:>
Description:
def _get_xml(self, *args, **kwargs):
"""Wrapper around Requests for GET XML requests
Returns:
Response:
A Requests Response object
""" |
req = self.session_xml.get(*args, **kwargs)
return req |
<SYSTEM_TASK:>
Wrapper around Requests for PUT requests
<END_TASK>
<USER_TASK:>
Description:
def _put(self, *args, **kwargs):
"""Wrapper around Requests for PUT requests
Returns:
Response:
A Requests Response object
""" |
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.put(*args, **kwargs)
return req |
<SYSTEM_TASK:>
Wrapper around Requests for DELETE requests
<END_TASK>
<USER_TASK:>
Description:
def _delete(self, *args, **kwargs):
"""Wrapper around Requests for DELETE requests
Returns:
Response:
A Requests Response object
""" |
if 'timeout' not in kwargs:
kwargs['timeout'] = self.timeout
req = self.session.delete(*args, **kwargs)
return req |
<SYSTEM_TASK:>
Test that application can authenticate to Crowd.
<END_TASK>
<USER_TASK:>
Description:
def auth_ping(self):
"""Test that application can authenticate to Crowd.
Attempts to authenticate the application user against
the Crowd server. In order for user authentication to
work, an application must be able to authenticate.
Returns:
bool:
True if the application authentication succeeded.
""" |
url = self.rest_url + "/non-existent/location"
response = self._get(url)
if response.status_code == 401:
return False
elif response.status_code == 404:
return True
else:
# An error encountered - problem with the Crowd server?
return False |
<SYSTEM_TASK:>
Authenticate a user account against the Crowd server.
<END_TASK>
<USER_TASK:>
Description:
def auth_user(self, username, password):
"""Authenticate a user account against the Crowd server.
Attempts to authenticate the user against the Crowd server.
Args:
username: The account username.
password: The account password.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd documentation
for the authoritative list of attributes.
None: If authentication failed.
""" |
response = self._post(self.rest_url + "/authentication",
data=json.dumps({"value": password}),
params={"username": username})
# If authentication failed for any reason return None
if not response.ok:
return None
# ...otherwise return a dictionary of user attributes
return response.json() |
<SYSTEM_TASK:>
Create a session for a user.
<END_TASK>
<USER_TASK:>
Description:
def get_session(self, username, password, remote="127.0.0.1", proxy=None):
"""Create a session for a user.
Attempts to create a user session on the Crowd server.
Args:
username: The account username.
password: The account password.
remote:
The remote address of the user. This can be used
to create multiple concurrent sessions for a user.
The host you run this program on may need to be configured
in Crowd as a trusted proxy for this to work.
proxy: Value of X-Forwarded-For server header.
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd
documentation for the authoritative list of attributes.
None: If authentication failed.
""" |
params = {
"username": username,
"password": password,
"validation-factors": {
"validationFactors": [
{"name": "remote_address", "value": remote, },
]
}
}
if proxy:
params["validation-factors"]["validationFactors"].append({"name": "X-Forwarded-For", "value": proxy, })
response = self._post(self.rest_url + "/session",
data=json.dumps(params),
params={"expand": "user"})
# If authentication failed for any reason return None
if not response.ok:
return None
# Otherwise return the user object
return response.json() |
<SYSTEM_TASK:>
Validate a session token.
<END_TASK>
<USER_TASK:>
Description:
def validate_session(self, token, remote="127.0.0.1", proxy=None):
"""Validate a session token.
Validate a previously acquired session token against the
Crowd server. This may be a token provided by a user from
a http cookie or by some other means.
Args:
token: The session token.
remote: The remote address of the user.
proxy: Value of X-Forwarded-For server header
Returns:
dict:
A dict mapping of user attributes if the application
authentication was successful. See the Crowd
documentation for the authoritative list of attributes.
None: If authentication failed.
""" |
params = {
"validationFactors": [
{"name": "remote_address", "value": remote, },
]
}
if proxy:
params["validation-factors"]["validationFactors"].append({"name": "X-Forwarded-For", "value": proxy, })
url = self.rest_url + "/session/%s" % token
response = self._post(url, data=json.dumps(params), params={"expand": "user"})
# For consistency between methods use None rather than False
# If token validation failed for any reason return None
if not response.ok:
return None
# Otherwise return the user object
return response.json() |
<SYSTEM_TASK:>
Terminates the session token, effectively logging out the user
<END_TASK>
<USER_TASK:>
Description:
def terminate_session(self, token):
"""Terminates the session token, effectively logging out the user
from all crowd-enabled services.
Args:
token: The session token.
Returns:
True: If session terminated
None: If session termination failed
""" |
url = self.rest_url + "/session/%s" % token
response = self._delete(url)
# For consistency between methods use None rather than False
# If token validation failed for any reason return None
if not response.ok:
return None
# Otherwise return True
return True |
<SYSTEM_TASK:>
Add a user to the directory
<END_TASK>
<USER_TASK:>
Description:
def add_user(self, username, raise_on_error=False, **kwargs):
"""Add a user to the directory
Args:
username: The account username
raise_on_error: optional (default: False)
**kwargs: key-value pairs:
password: mandatory
email: mandatory
first_name: optional
last_name: optional
display_name: optional
active: optional (default True)
Returns:
True: Succeeded
False: If unsuccessful
""" |
# Check that mandatory elements have been provided
if 'password' not in kwargs:
raise ValueError("missing password")
if 'email' not in kwargs:
raise ValueError("missing email")
# Populate data with default and mandatory values.
# A KeyError means a mandatory value was not provided,
# so raise a ValueError indicating bad args.
try:
data = {
"name": username,
"first-name": username,
"last-name": username,
"display-name": username,
"email": kwargs["email"],
"password": {"value": kwargs["password"]},
"active": True
}
except KeyError:
return ValueError
# Remove special case 'password'
del(kwargs["password"])
# Put values from kwargs into data
for k, v in kwargs.items():
new_k = k.replace("_", "-")
if new_k not in data:
raise ValueError("invalid argument %s" % k)
data[new_k] = v
response = self._post(self.rest_url + "/user",
data=json.dumps(data))
if response.status_code == 201:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False |
<SYSTEM_TASK:>
Set the active state of a user
<END_TASK>
<USER_TASK:>
Description:
def set_active(self, username, active_state):
"""Set the active state of a user
Args:
username: The account username
active_state: True or False
Returns:
True: If successful
None: If no user or failure occurred
""" |
if active_state not in (True, False):
raise ValueError("active_state must be True or False")
user = self.get_user(username)
if user is None:
return None
if user['active'] is active_state:
# Already in desired state
return True
user['active'] = active_state
response = self._put(self.rest_url + "/user",
params={"username": username},
data=json.dumps(user))
if response.status_code == 204:
return True
return None |
<SYSTEM_TASK:>
Change new password for a user
<END_TASK>
<USER_TASK:>
Description:
def change_password(self, username, newpassword, raise_on_error=False):
"""Change new password for a user
Args:
username: The account username.
newpassword: The account new password.
raise_on_error: optional (default: False)
Returns:
True: Succeeded
False: If unsuccessful
""" |
response = self._put(self.rest_url + "/user/password",
data=json.dumps({"value": newpassword}),
params={"username": username})
if response.ok:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False |
<SYSTEM_TASK:>
Retrieves a list of all users that directly or indirectly belong to the given groupname.
<END_TASK>
<USER_TASK:>
Description:
def get_nested_group_users(self, groupname):
"""Retrieves a list of all users that directly or indirectly belong to the given groupname.
Args:
groupname: The group name.
Returns:
list:
A list of strings of user names.
""" |
response = self._get(self.rest_url + "/group/user/nested",
params={"groupname": groupname,
"start-index": 0,
"max-results": 99999})
if not response.ok:
return None
return [u['name'] for u in response.json()['users']] |
<SYSTEM_TASK:>
Determines if the user exists.
<END_TASK>
<USER_TASK:>
Description:
def user_exists(self, username):
"""Determines if the user exists.
Args:
username: The user name.
Returns:
bool:
True if the user exists in the Crowd application.
""" |
response = self._get(self.rest_url + "/user",
params={"username": username})
if not response.ok:
return None
return True |
<SYSTEM_TASK:>
Fetches all group memberships.
<END_TASK>
<USER_TASK:>
Description:
def get_memberships(self):
"""Fetches all group memberships.
Returns:
dict:
key: group name
value: (array of users, array of groups)
""" |
response = self._get_xml(self.rest_url + "/group/membership")
if not response.ok:
return None
xmltree = etree.fromstring(response.content)
memberships = {}
for mg in xmltree.findall('membership'):
# coerce values to unicode in a python 2 and 3 compatible way
group = u'{}'.format(mg.get('group'))
users = [u'{}'.format(u.get('name')) for u in mg.find('users').findall('user')]
groups = [u'{}'.format(g.get('name')) for g in mg.find('groups').findall('group')]
memberships[group] = {u'users': users, u'groups': groups}
return memberships |
<SYSTEM_TASK:>
Performs a user search using the Crowd search API.
<END_TASK>
<USER_TASK:>
Description:
def search(self, entity_type, property_name, search_string, start_index=0, max_results=99999):
"""Performs a user search using the Crowd search API.
https://developer.atlassian.com/display/CROWDDEV/Crowd+REST+Resources#CrowdRESTResources-SearchResource
Args:
entity_type: 'user' or 'group'
property_name: eg. 'email', 'name'
search_string: the string to search for.
start_index: starting index of the results (default: 0)
max_results: maximum number of results returned (default: 99999)
Returns:
json results:
Returns search results.
""" |
params = {
"entity-type": entity_type,
"expand": entity_type,
"property-search-restriction": {
"property": {"name": property_name, "type": "STRING"},
"match-mode": "CONTAINS",
"value": search_string,
}
}
params = {
'entity-type': entity_type,
'expand': entity_type,
'start-index': start_index,
'max-results': max_results
}
# Construct XML payload of the form:
# <property-search-restriction>
# <property>
# <name>email</name>
# <type>STRING</type>
# </property>
# <match-mode>EXACTLY_MATCHES</match-mode>
# <value>[email protected]</value>
# </property-search-restriction>
root = etree.Element('property-search-restriction')
property_ = etree.Element('property')
prop_name = etree.Element('name')
prop_name.text = property_name
property_.append(prop_name)
prop_type = etree.Element('type')
prop_type.text = 'STRING'
property_.append(prop_type)
root.append(property_)
match_mode = etree.Element('match-mode')
match_mode.text = 'CONTAINS'
root.append(match_mode)
value = etree.Element('value')
value.text = search_string
root.append(value)
# Construct the XML payload expected by search API
payload = '<?xml version="1.0" encoding="UTF-8"?>\n' + etree.tostring(root).decode('utf-8')
# We're sending XML but would like a JSON response
session = self._build_session(content_type='xml')
session.headers.update({'Accept': 'application/json'})
response = session.post(self.rest_url + "/search", params=params, data=payload, timeout=self.timeout)
if not response.ok:
return None
return response.json() |
<SYSTEM_TASK:>
The versions associated with Blender
<END_TASK>
<USER_TASK:>
Description:
def versions(self) -> List(BlenderVersion):
"""
The versions associated with Blender
""" |
return [BlenderVersion(tag) for tag in self.git_repo.tags] + [BlenderVersion(BLENDER_VERSION_MASTER)] |
<SYSTEM_TASK:>
Copy libraries from the bin directory and place them as appropriate
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Copy libraries from the bin directory and place them as appropriate
""" |
self.announce("Moving library files", level=3)
# We have already built the libraries in the previous build_ext step
self.skip_build = True
bin_dir = self.distribution.bin_dir
libs = [os.path.join(bin_dir, _lib) for _lib in
os.listdir(bin_dir) if
os.path.isfile(os.path.join(bin_dir, _lib)) and
os.path.splitext(_lib)[1] in [".dll", ".so"]
and not (_lib.startswith("python") or _lib.startswith("bpy"))]
for lib in libs:
shutil.move(lib, os.path.join(self.build_dir,
os.path.basename(lib)))
# Mark the libs for installation, adding them to
# distribution.data_files seems to ensure that setuptools' record
# writer appends them to installed-files.txt in the package's egg-info
#
# Also tried adding the libraries to the distribution.libraries list,
# but that never seemed to add them to the installed-files.txt in the
# egg-info, and the online recommendation seems to be adding libraries
# into eager_resources in the call to setup(), which I think puts them
# in data_files anyways.
#
# What is the best way?
self.distribution.data_files = [os.path.join(self.install_dir,
os.path.basename(lib))
for lib in libs]
# Must be forced to run after adding the libs to data_files
self.distribution.run_command("install_data")
super().run() |
<SYSTEM_TASK:>
Perform build_cmake before doing the 'normal' stuff
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Perform build_cmake before doing the 'normal' stuff
""" |
for extension in self.extensions:
if extension.name == "bpy":
self.build_cmake(extension)
super().run() |
<SYSTEM_TASK:>
Return True if an object is part of the search index queryset.
<END_TASK>
<USER_TASK:>
Description:
def in_search_queryset(self, instance_id, index="_all"):
"""
Return True if an object is part of the search index queryset.
Sometimes it's useful to know if an object _should_ be indexed. If
an object is saved, how do you know if you should push that change
to the search index? The simplest (albeit not most efficient) way
is to check if it appears in the underlying search queryset.
NB this method doesn't evaluate the entire dataset, it chains an
additional queryset filter expression on the end. That's why it's
important that the `get_search_queryset` method returns a queryset.
Args:
instance_id: the id of model object that we are looking for.
Kwargs:
index: string, the name of the index in which to check.
Defaults to '_all'.
""" |
return self.get_search_queryset(index=index).filter(pk=instance_id).exists() |
<SYSTEM_TASK:>
Prepare SQL statement consisting of a sequence of WHEN .. THEN statements.
<END_TASK>
<USER_TASK:>
Description:
def _raw_sql(self, values):
"""Prepare SQL statement consisting of a sequence of WHEN .. THEN statements.""" |
if isinstance(self.model._meta.pk, CharField):
when_clauses = " ".join(
[self._when("'{}'".format(x), y) for (x, y) in values]
)
else:
when_clauses = " ".join([self._when(x, y) for (x, y) in values])
table_name = self.model._meta.db_table
primary_key = self.model._meta.pk.column
return 'SELECT CASE {}."{}" {} ELSE 0 END'.format(
table_name, primary_key, when_clauses
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.