_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q276200
|
admin_footer
|
test
|
def admin_footer(parser, token):
"""
Template tag that renders the footer information based on the
authenticated user's permissions.
"""
# split_contents() doesn't know how to split quoted strings.
tag_name = token.split_contents()
if len(tag_name) > 1:
raise base.TemplateSyntaxError(
|
python
|
{
"resource": ""
}
|
q276201
|
build_payment_parameters
|
test
|
def build_payment_parameters(amount: Money, client_ref: str) -> PaymentParameters:
"""
Builds the parameters needed to present the user with a datatrans payment form.
:param amount: The amount and currency we want the user to pay
:param client_ref: A unique reference for this payment
:return: The parameters needed to display the datatrans form
"""
merchant_id = web_merchant_id
amount, currency = money_to_amount_and_currency(amount)
refno = client_ref
|
python
|
{
"resource": ""
}
|
q276202
|
build_register_credit_card_parameters
|
test
|
def build_register_credit_card_parameters(client_ref: str) -> PaymentParameters:
"""
Builds the parameters needed to present the user with a datatrans form to register a credit card.
Contrary to a payment form, datatrans will not show an amount.
:param client_ref: A unique reference for this alias capture.
:return: The parameters needed to display the datatrans form
|
python
|
{
"resource": ""
}
|
q276203
|
pay_with_alias
|
test
|
def pay_with_alias(amount: Money, alias_registration_id: str, client_ref: str) -> Payment:
"""
Charges money using datatrans, given a previously registered credit card alias.
:param amount: The amount and currency we want to charge
:param alias_registration_id: The alias registration to use
:param client_ref: A unique reference for this charge
:return: a Payment (either successful or not)
"""
if amount.amount <= 0:
raise ValueError('Pay with alias takes a strictly positive amount')
alias_registration = AliasRegistration.objects.get(pk=alias_registration_id)
logger.info('paying-with-alias', amount=amount, client_ref=client_ref,
alias_registration=alias_registration)
|
python
|
{
"resource": ""
}
|
q276204
|
get_version
|
test
|
def get_version(version=None):
"""
Return full version nr, inc. rc, beta etc tags.
For example: `2.0.0a1`
:rtype: str
"""
v = version
|
python
|
{
"resource": ""
}
|
q276205
|
FilesystemBrowser._construct
|
test
|
def _construct(self):
'''Construct widget.'''
self.setLayout(QtGui.QVBoxLayout())
self._headerLayout = QtGui.QHBoxLayout()
self._locationWidget = QtGui.QComboBox()
self._headerLayout.addWidget(self._locationWidget, stretch=1)
self._upButton = QtGui.QToolButton()
self._upButton.setIcon(QtGui.QIcon(':riffle/icon/up'))
self._headerLayout.addWidget(self._upButton)
self.layout().addLayout(self._headerLayout)
self._contentSplitter = QtGui.QSplitter()
self._bookmarksWidget = QtGui.QListView()
self._contentSplitter.addWidget(self._bookmarksWidget)
self._filesystemWidget = QtGui.QTableView()
self._filesystemWidget.setSelectionBehavior(
self._filesystemWidget.SelectRows
)
self._filesystemWidget.setSelectionMode(
self._filesystemWidget.SingleSelection
)
self._filesystemWidget.verticalHeader().hide()
self._contentSplitter.addWidget(self._filesystemWidget)
proxy = riffle.model.FilesystemSortProxy(self)
model = riffle.model.Filesystem(
path=self._root, parent=self, iconFactory=self._iconFactory
)
proxy.setSourceModel(model)
proxy.setDynamicSortFilter(True)
|
python
|
{
"resource": ""
}
|
q276206
|
FilesystemBrowser._postConstruction
|
test
|
def _postConstruction(self):
'''Perform post-construction operations.'''
self.setWindowTitle('Filesystem Browser')
self._filesystemWidget.sortByColumn(0, QtCore.Qt.AscendingOrder)
# TODO: Remove once bookmarks widget implemented.
self._bookmarksWidget.hide()
self._acceptButton.setDefault(True)
self._acceptButton.setDisabled(True)
self._acceptButton.clicked.connect(self.accept)
self._cancelButton.clicked.connect(self.reject)
self._configureShortcuts()
self.setLocation(self._root)
self._filesystemWidget.horizontalHeader().setResizeMode(
QtGui.QHeaderView.ResizeToContents
)
self._filesystemWidget.horizontalHeader().setResizeMode(
|
python
|
{
"resource": ""
}
|
q276207
|
FilesystemBrowser._configureShortcuts
|
test
|
def _configureShortcuts(self):
'''Add keyboard shortcuts to navigate the filesystem.'''
self._upShortcut = QtGui.QShortcut(
|
python
|
{
"resource": ""
}
|
q276208
|
FilesystemBrowser._onActivateItem
|
test
|
def _onActivateItem(self, index):
'''Handle activation of item in listing.'''
item = self._filesystemWidget.model().item(index)
if not isinstance(item, riffle.model.File):
|
python
|
{
"resource": ""
}
|
q276209
|
FilesystemBrowser._onSelectItem
|
test
|
def _onSelectItem(self, selection, previousSelection):
'''Handle selection of item in listing.'''
self._acceptButton.setEnabled(True)
del self._selected[:]
|
python
|
{
"resource": ""
}
|
q276210
|
FilesystemBrowser._onNavigate
|
test
|
def _onNavigate(self, index):
'''Handle selection of path segment.'''
if index
|
python
|
{
"resource": ""
}
|
q276211
|
BuildResources.finalize_options
|
test
|
def finalize_options(self):
'''Finalize options to be used.'''
self.resource_source_path = os.path.join(RESOURCE_PATH,
|
python
|
{
"resource": ""
}
|
q276212
|
BuildResources.run
|
test
|
def run(self):
'''Run build.'''
if ON_READ_THE_DOCS:
# PySide not available.
return
try:
pyside_rcc_command = 'pyside-rcc'
# On Windows, pyside-rcc is not automatically available on the
# PATH so try to find it manually.
if sys.platform == 'win32':
import PySide
pyside_rcc_command = os.path.join(
os.path.dirname(PySide.__file__),
'pyside-rcc.exe'
)
subprocess.check_call([
pyside_rcc_command,
'-o',
self.resource_target_path,
self.resource_source_path
|
python
|
{
"resource": ""
}
|
q276213
|
Clean.run
|
test
|
def run(self):
'''Run clean.'''
relative_resource_path = os.path.relpath(
RESOURCE_TARGET_PATH, ROOT_PATH
)
if os.path.exists(relative_resource_path):
os.remove(relative_resource_path)
else:
distutils.log.warn(
'\'{0}\' does not exist -- can\'t clean it'
|
python
|
{
"resource": ""
}
|
q276214
|
Item.fetchChildren
|
test
|
def fetchChildren(self):
'''Fetch and return new children.
Will only fetch children whilst canFetchMore is True.
.. note::
It is the caller's responsibility to add each fetched child to this
parent if desired using :py:meth:`Item.addChild`.
'''
|
python
|
{
"resource": ""
}
|
q276215
|
Item.refetch
|
test
|
def refetch(self):
'''Reload children.'''
# Reset children
for child
|
python
|
{
"resource": ""
}
|
q276216
|
FilesystemSortProxy.icon
|
test
|
def icon(self, index):
'''Return icon for index.'''
sourceModel = self.sourceModel()
if not sourceModel:
|
python
|
{
"resource": ""
}
|
q276217
|
call
|
test
|
def call(args, stdout=None, stderr=None, stdin=None, daemonize=False,
preexec_fn=None, shell=False, cwd=None, env=None):
"""
Run an external command in a separate process and detach it from the current process. Excepting
`stdout`, `stderr`, and `stdin` all file descriptors are closed after forking. If `daemonize`
is True then the parent process exits. All stdio is redirected to `os.devnull` unless
specified. The `preexec_fn`, `shell`, `cwd`, and `env` parameters are the same as their `Popen`
counterparts. Return the PID of the child process if not daemonized.
"""
stream = lambda s, m: s is None and os.open(os.devnull, m) or s
stdout = stream(stdout, os.O_WRONLY)
stderr = stream(stderr, os.O_WRONLY)
stdin = stream(stdin, os.O_RDONLY)
shared_pid = Value('i', 0)
|
python
|
{
"resource": ""
}
|
q276218
|
Detach._get_max_fd
|
test
|
def _get_max_fd(self):
"""Return the maximum file descriptor value."""
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
result = limits[1]
|
python
|
{
"resource": ""
}
|
q276219
|
Detach._close_fd
|
test
|
def _close_fd(self, fd):
"""Close a file descriptor if it is open."""
try:
os.close(fd)
except OSError, exc:
if exc.errno != errno.EBADF:
|
python
|
{
"resource": ""
}
|
q276220
|
Detach._close_open_fds
|
test
|
def _close_open_fds(self):
"""Close open file descriptors."""
maxfd = self._get_max_fd()
for fd in reversed(range(maxfd)):
|
python
|
{
"resource": ""
}
|
q276221
|
Detach._redirect
|
test
|
def _redirect(self, stream, target):
"""Redirect a system stream to the provided target."""
if target is None:
target_fd = os.open(os.devnull, os.O_RDWR)
|
python
|
{
"resource": ""
}
|
q276222
|
set_form_widgets_attrs
|
test
|
def set_form_widgets_attrs(form, attrs):
"""Applies a given HTML attributes to each field widget of a given form.
Example:
set_form_widgets_attrs(my_form, {'class': 'clickable'})
"""
for _, field in form.fields.items():
attrs_ = dict(attrs)
for name,
|
python
|
{
"resource": ""
}
|
q276223
|
import_app_module
|
test
|
def import_app_module(app_name, module_name):
"""Returns a module from a given app by its name.
:param str app_name:
:param str module_name:
:rtype: module or None
"""
name_split = app_name.split('.')
if name_split[-1][0].isupper(): # Seems that we have app config class path here.
app_name = '.'.join(name_split[:-2])
module = import_module(app_name)
try:
|
python
|
{
"resource": ""
}
|
q276224
|
import_project_modules
|
test
|
def import_project_modules(module_name):
"""Imports modules from registered apps using given module name
and returns them as a list.
:param str module_name:
:rtype: list
"""
from django.conf import settings
submodules = []
for app in settings.INSTALLED_APPS:
|
python
|
{
"resource": ""
}
|
q276225
|
include_
|
test
|
def include_(parser, token):
"""Similar to built-in ``include`` template tag, but allowing
template variables to be used in template name and a fallback template,
thus making the tag more dynamic.
.. warning:: Requires Django 1.8+
Example:
{% load etc_misc %}
{% include_ "sub_{{ postfix_var }}.html" fallback "default.html" %}
"""
bits = token.split_contents()
dynamic = False
# We fallback to built-in `include` if a template name contains no variables.
if len(bits) >= 2:
dynamic = '{{' in bits[1]
if dynamic:
fallback = None
bits_new = []
for bit in bits:
if fallback is True:
# This bit is a `fallback` argument.
fallback = bit
continue
if bit == 'fallback':
|
python
|
{
"resource": ""
}
|
q276226
|
gravatar_get_url
|
test
|
def gravatar_get_url(obj, size=65, default='identicon'):
"""Returns Gravatar image URL for a given string or UserModel.
Example:
{% load
|
python
|
{
"resource": ""
}
|
q276227
|
gravatar_get_img
|
test
|
def gravatar_get_img(obj, size=65, default='identicon'):
"""Returns Gravatar image HTML tag for a given string or UserModel.
Example:
{% load gravatar %}
{% gravatar_get_img user_model %}
:param UserModel, str obj:
:param int size:
:param str default:
:return:
"""
|
python
|
{
"resource": ""
}
|
q276228
|
Port.is_valid_filesys
|
test
|
def is_valid_filesys(path):
"""Checks if the path is correct and exists, must be abs-> a dir -> and not a file."""
if os.path.isabs(path) and os.path.isdir(path) and \
not os.path.isfile(path):
return True
|
python
|
{
"resource": ""
}
|
q276229
|
Port.is_valid_s3_url
|
test
|
def is_valid_s3_url(url):
"""Checks if the url contains S3. Not an accurate validation of the url"""
# Skip if the url start with source: (gbdxtools syntax)
if url.startswith('source:'):
return True
|
python
|
{
"resource": ""
}
|
q276230
|
TaskController._get_template_abs_path
|
test
|
def _get_template_abs_path(filename):
"""
Return a valid absolute path. filename can be relative or absolute.
"""
if
|
python
|
{
"resource": ""
}
|
q276231
|
AccountStorageService.list
|
test
|
def list(self, s3_folder='', full_key_data=False):
"""Get a list of keys for the accounts"""
if not s3_folder.startswith('/'):
s3_folder = '/' + s3_folder
|
python
|
{
"resource": ""
}
|
q276232
|
Workflow._build_worklfow_json
|
test
|
def _build_worklfow_json(self):
"""
Build a workflow definition from the cloud_harness task.
"""
wf_json = {'tasks': [], 'name': 'cloud-harness_%s' % str(uuid.uuid4())}
task_def = json.loads(self.task_template.json())
d = {
"name": task_def['name'],
"outputs": [],
"inputs": [],
"taskType": task_def['taskType']
}
# Add input ports
for port in self.task_template.input_ports:
port_value = port.value
if port_value is False:
port_value = 'false'
if port_value is True:
port_value = 'true'
d['inputs'].append({
"name": port._name,
"value": port_value
})
# Add output ports
for port in self.task_template.output_ports:
d['outputs'].append({
"name": port._name
})
# Add task to workflow
wf_json['tasks'].append(d)
# Add port to be saved
|
python
|
{
"resource": ""
}
|
q276233
|
Workflow.execute
|
test
|
def execute(self, override_wf_json=None):
"""
Execute the cloud_harness task.
"""
r = self.gbdx.post(
self.URL,
json=self.json if override_wf_json is None else override_wf_json
|
python
|
{
"resource": ""
}
|
q276234
|
archive
|
test
|
def archive(folder, dry_run=False):
"Move an active project to the archive."
# error handling on archive_dir already done in
|
python
|
{
"resource": ""
}
|
q276235
|
_mkdir
|
test
|
def _mkdir(p):
"The equivalent of 'mkdir -p' in shell."
isdir = os.path.isdir
stack = [os.path.abspath(p)]
while not isdir(stack[-1]):
parent_dir = os.path.dirname(stack[-1])
|
python
|
{
"resource": ""
}
|
q276236
|
list
|
test
|
def list(pattern=()):
"List the contents of the archive directory."
# strategy: pick the intersection of all the patterns the user provides
globs = ['*{0}*'.format(p) for p in pattern] + ['*']
matches = []
offset = len(PROJ_ARCHIVE) + 1
for suffix in globs:
glob_pattern = os.path.join(PROJ_ARCHIVE, '*', '*', suffix)
matches.append(set(
|
python
|
{
"resource": ""
}
|
q276237
|
restore
|
test
|
def restore(folder):
"Restore a project from the archive."
if os.path.isdir(folder):
bail('a folder of the same name already exists!')
pattern = os.path.join(PROJ_ARCHIVE, '*', '*', folder)
matches = glob.glob(pattern)
if not matches:
bail('no project matches: ' + folder)
if len(matches) > 1:
|
python
|
{
"resource": ""
}
|
q276238
|
Client.new
|
test
|
def new(cls, access_token, environment='prod'):
'''Create new storage service client.
Arguments:
environment(str): The service environment to be used for the client.
'prod' or 'dev'.
access_token(str): The access token used to authenticate with the
service
|
python
|
{
"resource": ""
}
|
q276239
|
Client.list
|
test
|
def list(self, path):
'''List the entities found directly under the given path.
Args:
path (str): The path of the entity to be listed. Must start with a '/'.
Returns:
The list of entity names directly under the given path:
u'/12345/folder_1'
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
self.__validate_storage_path(path)
entity = self.api_client.get_entity_by_query(path=path)
if entity['entity_type'] not in self.__BROWSABLE_TYPES:
raise StorageArgumentException('The entity type "{0}" cannot be'
'listed'.format(entity['entity_type']))
entity_uuid = entity['uuid']
file_names = []
# get files
more_pages = True
page_number = 1
|
python
|
{
"resource": ""
}
|
q276240
|
Client.download_file
|
test
|
def download_file(self, path, target_path):
'''Download a file from storage service to local disk.
Existing files on the target path will be overwritten.
The download is not recursive, as it only works on files.
Args:
path (str): The path of the entity to be downloaded. Must start with a '/'.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
self.__validate_storage_path(path)
entity = self.api_client.get_entity_by_query(path=path)
|
python
|
{
"resource": ""
}
|
q276241
|
Client.exists
|
test
|
def exists(self, path):
'''Check if a certain path exists in the storage service.
Args:
path (str): The path to be checked
Returns:
True if the path exists, False otherwise
Raises:
StorageArgumentException: Invalid arguments
|
python
|
{
"resource": ""
}
|
q276242
|
Client.get_parent
|
test
|
def get_parent(self, path):
'''Get the parent entity of the entity pointed by the given path.
Args:
path (str): The path of the entity whose parent is needed
Returns:
A JSON object of the parent entity if found.
Raises:
StorageArgumentException: Invalid arguments
|
python
|
{
"resource": ""
}
|
q276243
|
Client.mkdir
|
test
|
def mkdir(self, path):
'''Create a folder in the storage service pointed by the given path.
Args:
path (str): The path of the folder to be created
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
|
python
|
{
"resource": ""
}
|
q276244
|
Client.upload_file
|
test
|
def upload_file(self, local_file, dest_path, mimetype):
'''Upload local file content to a storage service destination folder.
Args:
local_file(str)
dest_path(str):
absolute Storage service path '/project' prefix is essential
suffix should be the name the file will have on in the destination folder
i.e.: /project/folder/.../file_name
mimetype(str): set the contentType attribute
Returns:
The uuid of created file entity as string
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
|
python
|
{
"resource": ""
}
|
q276245
|
Client.delete
|
test
|
def delete(self, path):
''' Delete an entity from the storage service using its path.
Args:
path(str): The path of the entity to be delete
Returns:
The uuid of created file entity as string
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
self.__validate_storage_path(path, projects_allowed=False)
entity = self.api_client.get_entity_by_query(path=path)
if entity['entity_type'] in self.__BROWSABLE_TYPES:
# At this point it can only
|
python
|
{
"resource": ""
}
|
q276246
|
Client.__validate_storage_path
|
test
|
def __validate_storage_path(cls, path, projects_allowed=True):
'''Validate a string as a valid storage path'''
if not path or not isinstance(path, str) or path[0] != '/' or path == '/':
raise StorageArgumentException(
'The path must be a string, start with a slash (/), and be longer'
|
python
|
{
"resource": ""
}
|
q276247
|
Client.new
|
test
|
def new(cls, access_token, environment='prod'):
'''Creates a new cross-service client.'''
return cls(
|
python
|
{
"resource": ""
}
|
q276248
|
ApiClient.new
|
test
|
def new(cls, access_token, environment='prod'):
'''Create a new storage service REST client.
Arguments:
environment: The service environment to be used for the client
access_token: The access token used to authenticate with the
service
Returns:
A storage_service.api.ApiClient instance
Example:
>>> storage_client = ApiClient.new(my_access_token)
'''
request = RequestBuilder \
.request(environment) \
.to_service(cls.SERVICE_NAME, cls.SERVICE_VERSION) \
.throw(
StorageForbiddenException,
lambda resp: 'You are forbidden to do this.'
if resp.status_code == 403 else None
) \
.throw(
StorageNotFoundException,
|
python
|
{
"resource": ""
}
|
q276249
|
ApiClient.get_entity_details
|
test
|
def get_entity_details(self, entity_id):
'''Get generic entity by UUID.
Args:
entity_id (str): The UUID of the requested entity.
Returns:
A dictionary describing the entity::
{
u'collab_id': 2271,
u'created_by': u'303447',
u'created_on': u'2017-03-10T12:50:06.077891Z',
u'description': u'',
u'entity_type': u'project',
u'modified_by': u'303447',
|
python
|
{
"resource": ""
}
|
q276250
|
ApiClient.set_metadata
|
test
|
def set_metadata(self, entity_type, entity_id, metadata):
'''Set metadata for an entity.
Args:
entity_type (str): Type of the entity. Admitted values: ['project',
'folder', 'file'].
entity_id (str): The UUID of the entity to be modified.
metadata (dict): A dictionary of key/value pairs to be written as
metadata.
Warning:
It will replace all existing metadata with the provided dictionary.
Returns:
A dictionary of the updated metadata::
{
u'bar': u'200',
u'foo': u'100'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(entity_id):
|
python
|
{
"resource": ""
}
|
q276251
|
ApiClient.get_metadata
|
test
|
def get_metadata(self, entity_type, entity_id):
'''Get metadata of an entity.
Args:
entity_type (str): Type of the entity. Admitted values: ['project',
'folder', 'file'].
entity_id (str): The UUID of the entity to be modified.
Returns:
A dictionary of the metadata::
{
u'bar': u'200',
u'foo': u'100'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
|
python
|
{
"resource": ""
}
|
q276252
|
ApiClient.update_metadata
|
test
|
def update_metadata(self, entity_type, entity_id, metadata):
'''Update the metadata of an entity.
Existing non-modified metadata will not be affected.
Args:
entity_type (str): Type of the entity. Admitted values: 'project',
'folder', 'file'.
entity_id (str): The UUID of the entity to be modified.
metadata (dict): A dictionary of key/value pairs to be written as
metadata.
Returns:
A dictionary of the updated object metadata::
{
u'bar': u'200',
u'foo': u'100'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(entity_id):
|
python
|
{
"resource": ""
}
|
q276253
|
ApiClient.delete_metadata
|
test
|
def delete_metadata(self, entity_type, entity_id, metadata_keys):
'''Delete the selected metadata entries of an entity.
Only deletes selected metadata keys, for a complete wipe, use set_metadata.
Args:
entity_type (str): Type of the entity. Admitted values: ['project',
'folder', 'file'].
entity_id (srt): The UUID of the entity to be modified.
metadata_keys (lst): A list of metada keys to be deleted.
Returns:
A dictionary of the updated object metadata::
{
u'bar': u'200',
u'foo': u'100'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(entity_id):
|
python
|
{
"resource": ""
}
|
q276254
|
ApiClient.list_projects
|
test
|
def list_projects(self, hpc=None, access=None, name=None, collab_id=None,
page_size=DEFAULT_PAGE_SIZE, page=None, ordering=None):
'''List all the projects the user have access to.
This function does not retrieve all results, pages have
to be manually retrieved by the caller.
Args:
hpc (bool): If 'true', the result will contain only the HPC projects
(Unicore projects).
access (str): If provided, the result will contain only projects
where the user has the provided acccess.
Admitted values: ['read', 'write'].
name (str): Filter on the project name.
collab_id (int): Filter on the collab id.
page_size (int): Number of elements per page.
page (int): Number of the page
ordering (str): Indicate on which fields to sort the result.
|
python
|
{
"resource": ""
}
|
q276255
|
ApiClient.get_project_details
|
test
|
def get_project_details(self, project_id):
'''Get information on a given project
Args:
project_id (str): The UUID of the requested project.
Returns:
A dictionary describing the project::
{
u'collab_id': 2271,
u'created_by': u'303447',
u'created_on': u'2017-03-10T12:50:06.077891Z',
u'description': u'',
u'entity_type': u'project',
u'modified_by': u'303447',
|
python
|
{
"resource": ""
}
|
q276256
|
ApiClient.create_project
|
test
|
def create_project(self, collab_id):
'''Create a new project.
Args:
collab_id (int): The id of the collab the project should be created in.
Returns:
A dictionary of details of the created project::
{
u'collab_id': 12998,
u'created_by': u'303447',
u'created_on': u'2017-03-21T14:06:32.293902Z',
u'description': u'',
u'entity_type': u'project',
u'modified_by': u'303447',
u'modified_on': u'2017-03-21T14:06:32.293967Z',
u'name': u'12998',
u'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40'
}
Raises:
|
python
|
{
"resource": ""
}
|
q276257
|
ApiClient.delete_project
|
test
|
def delete_project(self, project):
'''Delete a project. It will recursively delete all the content.
Args:
project (str): The UUID of the project to be deleted.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: 403
StorageNotFoundException: 404
HTTPError: other non-20x error codes
'''
if not is_valid_uuid(project):
|
python
|
{
"resource": ""
}
|
q276258
|
ApiClient.create_folder
|
test
|
def create_folder(self, name, parent):
'''Create a new folder.
Args:
name (srt): The name of the folder.
parent (str): The UUID of the parent entity. The parent must be a
project or a folder.
Returns:
A dictionary of details of the created folder::
{
u'created_by': u'303447',
u'created_on': u'2017-03-21T14:06:32.293902Z',
u'description': u'',
u'entity_type': u'folder',
u'modified_by': u'303447',
u'modified_on': u'2017-03-21T14:06:32.293967Z',
u'name': u'myfolder',
u'parent': u'3abd8742-d069-44cf-a66b-2370df74a682',
u'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code
|
python
|
{
"resource": ""
}
|
q276259
|
ApiClient.get_folder_details
|
test
|
def get_folder_details(self, folder):
'''Get information on a given folder.
Args:
folder (str): The UUID of the requested folder.
Returns:
A dictionary of the folder details if found::
{
u'created_by': u'303447',
u'created_on': u'2017-03-21T14:06:32.293902Z',
u'description': u'',
u'entity_type': u'folder',
u'modified_by': u'303447',
u'modified_on': u'2017-03-21T14:06:32.293967Z',
|
python
|
{
"resource": ""
}
|
q276260
|
ApiClient.delete_folder
|
test
|
def delete_folder(self, folder):
'''Delete a folder. It will recursively delete all the content.
Args:
folder_id (str): The UUID of the folder to be deleted.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: 403
StorageNotFoundException: 404
HTTPError: other non-20x error codes
'''
if not is_valid_uuid(folder):
|
python
|
{
"resource": ""
}
|
q276261
|
ApiClient.upload_file_content
|
test
|
def upload_file_content(self, file_id, etag=None, source=None, content=None):
'''Upload a file content. The file entity must already exist.
If an ETag is provided the file stored on the server is verified
against it. If it does not match, StorageException is raised.
This means the client needs to update its knowledge of the resource
before attempting to update again. This can be used for optimistic
concurrency control.
Args:
file_id (str): The UUID of the file whose content is written.
etag (str): The etag to match the contents against.
source (str): The path of the local file whose content to be uploaded.
content (str): A string of the content to be uploaded.
Note:
ETags should be enclosed in double quotes::
my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"'
Returns:
The ETag of the file upload::
'"71e1ed9ee52e565a56aec66bc648a32c"'
Raises:
IOError: The source cannot be opened.
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
|
python
|
{
"resource": ""
}
|
q276262
|
ApiClient.copy_file_content
|
test
|
def copy_file_content(self, file_id, source_file):
'''Copy file content from source file to target file.
Args:
file_id (str): The UUID of the file whose content is written.
source_file (str): The UUID of the file whose content is copied.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(file_id):
raise StorageArgumentException(
|
python
|
{
"resource": ""
}
|
q276263
|
ApiClient.download_file_content
|
test
|
def download_file_content(self, file_id, etag=None):
'''Download file content.
Args:
file_id (str): The UUID of the file whose content is requested
etag (str): If the content is not changed since the provided ETag,
the content won't be downloaded. If the content is changed, it
will be downloaded and returned with its new ETag.
Note:
ETags should be enclosed in double quotes::
my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"'
Returns:
A tuple of ETag and content (etag, content) if the content was
retrieved. If an etag was provided, and content didn't change
returns (None, None)::
('"71e1ed9ee52e565a56aec66bc648a32c"', 'Hello world!')
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(file_id):
raise StorageArgumentException(
|
python
|
{
"resource": ""
}
|
q276264
|
ApiClient.get_signed_url
|
test
|
def get_signed_url(self, file_id):
'''Get a signed unauthenticated URL.
It can be used to download the file content without the need for a
token. The signed URL expires after 5 seconds.
Args:
file_id (str): The UUID of the file to get the link for.
Returns:
The signed url as a string
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not
|
python
|
{
"resource": ""
}
|
q276265
|
MongoDBHandler.emit
|
test
|
def emit(self, record):
""" pymongo expects a dict """
msg = self.format(record)
if not isinstance(msg, dict):
|
python
|
{
"resource": ""
}
|
q276266
|
RequestBuilder.to_service
|
test
|
def to_service(self, service, version):
'''Sets the service name and version the request should target
Args:
service (str): The name of the service as displayed in the services.json file
version (str): The version of the service as displayed in the services.json file
Returns:
The request
|
python
|
{
"resource": ""
}
|
q276267
|
RequestBuilder.with_headers
|
test
|
def with_headers(self, headers):
'''Adds headers to the request
Args:
headers (dict): The headers to add the request headers
Returns:
The request builder instance in order to chain calls
|
python
|
{
"resource": ""
}
|
q276268
|
RequestBuilder.with_params
|
test
|
def with_params(self, params):
'''Adds parameters to the request params
Args:
params (dict): The parameters to add to the request params
Returns:
The request builder instance in order to chain calls
|
python
|
{
"resource": ""
}
|
q276269
|
RequestBuilder.throw
|
test
|
def throw(self, exception_class, should_throw):
'''Defines if the an exception should be thrown after the request is sent
Args:
exception_class (class): The class of the exception to instantiate
should_throw (function): The predicate that should indicate if the exception
should be thrown. This function will be called with the response
|
python
|
{
"resource": ""
}
|
q276270
|
AdminBooleanMixin.get_list_display
|
test
|
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
list_display = []
for field_name in self.list_display:
|
python
|
{
"resource": ""
}
|
q276271
|
map_job
|
test
|
def map_job(job, func, inputs, *args):
"""
Spawns a tree of jobs to avoid overloading the number of jobs spawned by a single parent.
This function is appropriate to use when batching samples greater than 1,000.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param function func: Function to spawn dynamically, passes one sample as first argument
:param list inputs: Array
|
python
|
{
"resource": ""
}
|
q276272
|
gatk_genotype_gvcfs
|
test
|
def gatk_genotype_gvcfs(job,
gvcfs,
ref, fai, ref_dict,
annotations=None,
emit_threshold=10.0, call_threshold=30.0,
unsafe_mode=False):
"""
Runs GenotypeGVCFs on one or more gVCFs generated by HaplotypeCaller.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param dict gvcfs: Dictionary of GVCF FileStoreIDs {sample identifier: FileStoreID}
:param str ref: FileStoreID for the reference genome fasta file
:param str fai: FileStoreID for the reference genome index file
:param str ref_dict: FileStoreID for the reference genome sequence dictionary
:param list[str] annotations: Optional list of GATK variant annotations. Default: None.
:param float emit_threshold: Minimum phred-scale confidence threshold for
a variant to be emitted. GATK default: 10.0
:param float call_threshold: Minimum phred-scale confidence threshold for
a variant to be called. GATK default: 30.0
:param bool unsafe_mode: If True, runs gatk UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: VCF FileStoreID
:rtype: str
"""
inputs = {'genome.fa': ref,
'genome.fa.fai': fai,
|
python
|
{
"resource": ""
}
|
q276273
|
run_oncotator
|
test
|
def run_oncotator(job, vcf_id, oncotator_db):
"""
Uses Oncotator to add cancer relevant variant annotations to a VCF file. Oncotator can accept
other genome builds, but the output VCF is based on hg19.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str vcf_id: FileStoreID for VCF file
:param str oncotator_db: FileStoreID for Oncotator database
:return: Annotated VCF FileStoreID
:rtype: str
"""
job.fileStore.logToMaster('Running Oncotator')
inputs = {'input.vcf': vcf_id,
'oncotator_db': oncotator_db}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
inputs[name] = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
#
|
python
|
{
"resource": ""
}
|
q276274
|
DatapointArray.sort
|
test
|
def sort(self, f=lambda d: d["t"]):
"""Sort here works by sorting by timestamp by default"""
|
python
|
{
"resource": ""
}
|
q276275
|
DatapointArray.t
|
test
|
def t(self):
"""Returns just the timestamp portion of the datapoints as a list.
The
|
python
|
{
"resource": ""
}
|
q276276
|
DatapointArray.loadExport
|
test
|
def loadExport(self, folder):
"""Adds the data from a ConnectorDB export. If it is a stream export, then the folder
is the location of the export. If it is a device export, then the folder is the export folder
with the stream name as a subdirectory
If it is a user export, you will use the path of the export folder, with the user/device/stream
|
python
|
{
"resource": ""
}
|
q276277
|
DatapointArray.tshift
|
test
|
def tshift(self, t):
"""Shifts all timestamps in the datapoint array by the given number of seconds.
It is the same as the 'tshift' pipescript transform.
Warning: The shift is performed in-place! This means that it modifies the underlying array::
d = DatapointArray([{"t":56,"d":1}])
|
python
|
{
"resource": ""
}
|
q276278
|
DatapointArray.sum
|
test
|
def sum(self):
"""Gets the sum of the data portions of all datapoints within"""
|
python
|
{
"resource": ""
}
|
q276279
|
rfxcom
|
test
|
def rfxcom(device):
"""Start the event loop to collect data from the serial device."""
# If the device isn't passed in, look for it in the config.
if device is None:
device = app.config.get('DEVICE')
# If the device is *still* none, error.
if device is None:
|
python
|
{
"resource": ""
}
|
q276280
|
create_user
|
test
|
def create_user(username):
"Create a new user."
password = prompt_pass("Enter
|
python
|
{
"resource": ""
}
|
q276281
|
parse_vn_results
|
test
|
async def parse_vn_results(soup):
"""
Parse Visual Novel search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and id.
"""
soup = soup.find_all('td', class_='tc1')
|
python
|
{
"resource": ""
}
|
q276282
|
parse_release_results
|
test
|
async def parse_release_results(soup):
"""
Parse Releases search pages.
:param soup: The BS4 class object
:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.
It contains a Date released, Platform, Ages group and Name.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
releases = []
for item in soup:
child = list(item.children)
|
python
|
{
"resource": ""
}
|
q276283
|
parse_prod_staff_results
|
test
|
async def parse_prod_staff_results(soup):
"""
Parse a page of producer or staff results
:param soup: The BS4 class object
:return: A list of dictionaries containing a name and nationality.
"""
soup = soup.find_all('li')
|
python
|
{
"resource": ""
}
|
q276284
|
parse_character_results
|
test
|
async def parse_character_results(soup):
"""
Parse a page of character results.
:param soup: The BS4 class object
:return: Returns a list of dictionaries containing a name, gender and list of dictionaries containing a game name/id pair
for games they appeared in.
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
characters = []
for item in soup:
temp_c = {'gender': None, 'name': None, 'games': {}}
temp_c['gender'] = item.abbr.get('title')
temp_c['name'] = list(item.children)[1].a.string
temp_c['games'] = []
for
|
python
|
{
"resource": ""
}
|
q276285
|
parse_tag_results
|
test
|
async def parse_tag_results(soup):
"""
Parse a page of tag or trait results. Same format.
:param soup: BS4 Class Object
:return: A list of tags, Nothing else really useful there
"""
|
python
|
{
"resource": ""
}
|
q276286
|
parse_user_results
|
test
|
async def parse_user_results(soup):
"""
Parse a page of user results
:param soup: Bs4 Class object
:return: A list of dictionaries containing a name and join date
"""
soup = list(soup.find_all('table', class_='stripe')[0].children)[1:]
users = []
for item in soup:
t_u = {'name': None, 'joined': None}
|
python
|
{
"resource": ""
}
|
q276287
|
tarball_files
|
test
|
def tarball_files(tar_name, file_paths, output_dir='.', prefix=''):
"""
Creates a tarball from a group of files
:param str tar_name: Name of tarball
:param list[str] file_paths: Absolute file paths to include in the tarball
:param str output_dir: Output destination for tarball
:param str prefix: Optional prefix for files in tarball
"""
with tarfile.open(os.path.join(output_dir, tar_name), 'w:gz') as f_out:
|
python
|
{
"resource": ""
}
|
q276288
|
__forall_files
|
test
|
def __forall_files(file_paths, output_dir, op):
"""
Applies a function to a set of files and an output directory.
:param str output_dir: Output directory
:param list[str] file_paths: Absolute file paths to move
"""
for file_path in file_paths:
if not file_path.startswith('/'):
|
python
|
{
"resource": ""
}
|
q276289
|
copy_file_job
|
test
|
def copy_file_job(job, name, file_id, output_dir):
"""
Job version of move_files for one file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str name: Name of output file (including
|
python
|
{
"resource": ""
}
|
q276290
|
_make_parameters
|
test
|
def _make_parameters(master_ip, default_parameters, memory, arguments, override_parameters):
"""
Makes a Spark Submit style job submission line.
:param masterIP: The Spark leader IP address.
:param default_parameters: Application specific Spark configuration parameters.
:param memory: The memory to allocate to each Spark driver and executor.
:param arguments: Arguments to pass to the submitted job.
:param override_parameters: Parameters passed by the user, that override our defaults.
:type masterIP: MasterAddress
:type default_parameters: list of string
:type arguments: list of string
:type memory: int or None
:type override_parameters: list of string or None
"""
# python doesn't support logical xor?
# anywho, exactly one of memory or override_parameters must be defined
require((override_parameters is not None or memory is not None) and
(override_parameters is None or memory is None),
"Either the memory setting must be defined or you must provide Spark configuration parameters.")
# if the user hasn't provided overrides, set our defaults
parameters = []
if memory is not None:
parameters = ["--master", "spark://%s:%s" % (master_ip, SPARK_MASTER_PORT),
|
python
|
{
"resource": ""
}
|
q276291
|
MasterAddress.docker_parameters
|
test
|
def docker_parameters(self, docker_parameters=None):
"""
Augment a list of "docker run" arguments with those needed to map the notional Spark master address to the
real one, if they are different.
"""
if self != self.actual:
add_host_option = '--add-host=spark-master:'
|
python
|
{
"resource": ""
}
|
q276292
|
ConnectorObject.refresh
|
test
|
def refresh(self):
"""Refresh reloads data from the server. It raises an error if it fails to get the object's
|
python
|
{
"resource": ""
}
|
q276293
|
run_mutect
|
test
|
def run_mutect(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, ref_dict, fai, cosmic, dbsnp):
"""
Calls MuTect to perform variant analysis
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
:param str ref: Reference genome FileStoreID
:param str ref_dict: Reference dictionary FileStoreID
:param str fai: Reference index FileStoreID
:param str cosmic: Cosmic VCF FileStoreID
:param str dbsnp: DBSNP VCF FileStoreID
:return: MuTect output (tarball) FileStoreID
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai, ref_dict, cosmic, dbsnp]
file_names = ['normal.bam', 'normal.bai', 'tumor.bam', 'tumor.bai', 'ref.fasta',
'ref.fasta.fai', 'ref.dict', 'cosmic.vcf', 'dbsnp.vcf']
for file_store_id, name in zip(file_ids, file_names):
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: MuTect
parameters = ['--analysis_type', 'MuTect',
'--reference_sequence', 'ref.fasta',
'--cosmic', '/data/cosmic.vcf',
'--dbsnp', '/data/dbsnp.vcf',
'--input_file:normal', '/data/normal.bam',
|
python
|
{
"resource": ""
}
|
q276294
|
Device.create
|
test
|
def create(self, public=False, **kwargs):
"""Creates the device. Attempts to create private devices by default,
but if public is set to true, creates public devices.
You can also set other default properties by passing in the relevant information.
For example, setting a device with the given nickname and description::
dev.create(nickname="mydevice", description="This is an example")
Furthermore, ConnectorDB supports creation of a device's streams immediately,
which can considerably speed up device setup::
dev.create(streams={
|
python
|
{
"resource": ""
}
|
q276295
|
Device.streams
|
test
|
def streams(self):
"""Returns the list of streams that belong to the device"""
result = self.db.read(self.path, {"q": "ls"})
if result is None or result.json() is None:
return []
streams = []
|
python
|
{
"resource": ""
}
|
q276296
|
Device.export
|
test
|
def export(self, directory):
"""Exports the device to the given directory. The directory can't exist.
You can later import this device by running import_device on a user.
"""
if os.path.exists(directory):
raise FileExistsError(
"The device export directory already exists")
os.mkdir(directory)
|
python
|
{
"resource": ""
}
|
q276297
|
Shosetsu.search_vndb
|
test
|
async def search_vndb(self, stype, term):
"""
Search vndb.org for a term and return matching results from type.
:param stype: type to search for.
Type should be one of:
v - Visual Novels
r - Releases
p - Producers
s - Staff
c - Characters
g - Tags
i - traits
u - Users
:param term: string to search for
:return: Results. Result format depends on what you searched for. See the Parsing.py module for more specific documentation.
Exceptions:
aiohttp.HttpBadRequest - On 404s
VNDBOneResult - When you search for something but it instead redirects us to a direct content page
VNDBNoResults - When nothing was found for that search
VNDBBadStype - Raised when an incorrect search type is passed
"""
fstype = ""
if stype not in ['v', 'r', 'p', 's', 'c', 'g', 'i', 'u']:
raise VNDBBadStype(stype)
else:
if stype in ['v', 'p', 's', 'c', 'u']:
fstype = '/{}/all'.format(stype)
|
python
|
{
"resource": ""
}
|
q276298
|
Shosetsu.parse_search
|
test
|
async def parse_search(self, stype, soup):
"""
This is our parsing dispatcher
:param stype: Search type category
:param soup: The beautifulsoup object that contains the parsed html
"""
if stype == 'v':
return await parse_vn_results(soup)
elif stype == 'r':
return await parse_release_results(soup)
elif stype == 'p':
return await parse_prod_staff_results(soup)
elif stype == 's':
return await parse_prod_staff_results(soup)
elif stype == 'c':
|
python
|
{
"resource": ""
}
|
q276299
|
Dataset.addStream
|
test
|
def addStream(self, stream, interpolator="closest", t1=None, t2=None, dt=None, limit=None, i1=None, i2=None, transform=None,colname=None):
"""Adds the given stream to the query construction. Additionally, you can choose the interpolator to use for this stream, as well as a special name
for the column in the returned dataset. If no column name is given, the full stream path will be used.
addStream also supports Merge queries. You can insert a merge query instead of a stream, but be sure to name the column::
d = Dataset(cdb, t1=time.time()-1000,t2=time.time(),dt=10.)
d.addStream("temperature","average")
d.addStream("steps","sum")
m = Merge(cdb)
m.addStream("mystream")
m.addStream("mystream2")
d.addStream(m,colname="mycolumn")
result = d.run()
"""
streamquery = query_maker(t1, t2, limit, i1, i2, transform)
param_stream(self.cdb, streamquery, stream)
streamquery["interpolator"] = interpolator
if colname is None:
#
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.