source_code
stringlengths 45
7.24k
| function_name
stringlengths 1
111
| messages
list |
---|---|---|
def x(base_dir, extensions, exclude_dirs=list()):
result = []
for root, dir_names, file_names in os.walk(base_dir):
for filename in file_names:
candidate = os.path.join(root, filename)
if should_include_file_in_search(candidate, extensions,
exclude_dirs):
result.append(candidate)
return result
|
find_files
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(base_dir, extensions, exclude_dirs=list()):\n result = []\n for root, dir_names, file_names in os.walk(base_dir):\n for filename in file_names:\n candidate = os.path.join(root, filename)\n if should_include_file_in_search(candidate, extensions,\n exclude_dirs):\n result.append(candidate)\n return result\n",
"role": "user"
},
{
"content": "find_files",
"role": "assistant"
}
] |
def x(file_name, extensions, exclude_dirs):
return (exclude_dirs is None or not any(file_name.startswith(d) for d in
exclude_dirs)) and any(file_name.endswith(e) for e in extensions)
|
should_include_file_in_search
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_name, extensions, exclude_dirs):\n return (exclude_dirs is None or not any(file_name.startswith(d) for d in\n exclude_dirs)) and any(file_name.endswith(e) for e in extensions)\n",
"role": "user"
},
{
"content": "should_include_file_in_search",
"role": "assistant"
}
] |
def x(self, rank):
if rank == 'auto':
if self.__class__.__name__ == 'OneCodexAccessor':
return self._rank
if self._field == 'abundance':
return 'species'
else:
return 'genus'
else:
return rank
|
get_auto_rank
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, rank):\n if rank == 'auto':\n if self.__class__.__name__ == 'OneCodexAccessor':\n return self._rank\n if self._field == 'abundance':\n return 'species'\n else:\n return 'genus'\n else:\n return rank\n",
"role": "user"
},
{
"content": "get_auto_rank",
"role": "assistant"
}
] |
def x(self):
return getattr(self, '_normalized', False) or getattr(self, '_field', None
) == 'abundance' or bool((self._results.sum(axis=1).round(4) == 1.0
).all())
|
guess_normalized
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return getattr(self, '_normalized', False) or getattr(self, '_field', None\n ) == 'abundance' or bool((self._results.sum(axis=1).round(4) == 1.0\n ).all())\n",
"role": "user"
},
{
"content": "guess_normalized",
"role": "assistant"
}
] |
def x(self, metadata_fields, label=None):
import pandas as pd
help_metadata = ', '.join(self.metadata.keys())
magic_metadata = pd.DataFrame({'classification_id': self._results.index}
).set_index('classification_id')
if label is not None and 'Label' not in metadata_fields:
metadata_fields.append('Label')
magic_fields = {}
for f in set([f for f in metadata_fields if f]):
if isinstance(f, tuple):
for field in f:
if field not in self.metadata:
raise OneCodexException(
'Field {} not found. Choose from: {}'.format(field,
help_metadata))
if not (pd.api.types.is_bool_dtype(self.metadata[field]) or
pd.api.types.is_categorical_dtype(self.metadata[field]) or
pd.api.types.is_object_dtype(self.metadata[field])):
raise OneCodexException(
'When specifying multiple metadata fields, all must be categorical'
)
composite_field = '_'.join(f)
magic_metadata[composite_field] = ''
magic_metadata[composite_field] = magic_metadata[composite_field
].str.cat([self.metadata[field].astype(str) for field in f],
sep='_').str.lstrip('_')
magic_fields[f] = composite_field
else:
str_f = str(f)
if str_f == 'Label':
magic_metadata[str_f] = self.metadata['filename']
magic_fields[f] = str_f
if isinstance(label, six.string_types):
if label in self.metadata.columns:
magic_metadata[str_f] = self.metadata[label]
else:
raise OneCodexException(
'Label field {} not found. Choose from: {}'.
format(label, help_metadata))
elif callable(label):
for classification_id, metadata in self.metadata.to_dict(
orient='index').items():
c_id_label = label(metadata)
if not isinstance(c_id_label, six.string_types):
raise OneCodexException(
'Expected string from label function, got: {}'
.format(type(c_id_label).__name__))
magic_metadata.loc[classification_id, 'Label'
] = c_id_label
elif label is not None:
raise OneCodexException(
'Expected string or callable for label, got: {}'.
format(type(label).__name__))
elif str_f in self.metadata:
magic_metadata[f] = self.metadata[str_f]
magic_fields[f] = str_f
elif str_f in self._results.keys():
tax_name = self.taxonomy['name'][str_f]
df = self.to_df(rank=self.taxonomy['rank'][str_f])
renamed_field = '{} ({})'.format(tax_name, str_f)
magic_metadata[renamed_field] = df[str_f]
magic_fields[f] = renamed_field
else:
hits = []
if len(str_f) > 4:
for tax_id, tax_name in zip(self.taxonomy.index, self.
taxonomy['name']):
if str_f.lower() == tax_name.lower():
hits = [(tax_id, tax_name)]
break
elif str_f.lower() in tax_name.lower():
hits.append((tax_id, tax_name))
hits = sorted(hits, key=lambda x: int(x[0]))
if hits:
df = self.to_df(rank=self.taxonomy['rank'][hits[0][0]])
renamed_field = '{} ({})'.format(hits[0][1], hits[0][0])
magic_metadata[renamed_field] = df[hits[0][0]]
magic_fields[f] = renamed_field
else:
raise OneCodexException(
'Field or taxon {} not found. Choose from: {}'.
format(str_f, help_metadata))
return magic_metadata, magic_fields
|
metadata_fetch
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, metadata_fields, label=None):\n import pandas as pd\n help_metadata = ', '.join(self.metadata.keys())\n magic_metadata = pd.DataFrame({'classification_id': self._results.index}\n ).set_index('classification_id')\n if label is not None and 'Label' not in metadata_fields:\n metadata_fields.append('Label')\n magic_fields = {}\n for f in set([f for f in metadata_fields if f]):\n if isinstance(f, tuple):\n for field in f:\n if field not in self.metadata:\n raise OneCodexException(\n 'Field {} not found. Choose from: {}'.format(field,\n help_metadata))\n if not (pd.api.types.is_bool_dtype(self.metadata[field]) or\n pd.api.types.is_categorical_dtype(self.metadata[field]) or\n pd.api.types.is_object_dtype(self.metadata[field])):\n raise OneCodexException(\n 'When specifying multiple metadata fields, all must be categorical'\n )\n composite_field = '_'.join(f)\n magic_metadata[composite_field] = ''\n magic_metadata[composite_field] = magic_metadata[composite_field\n ].str.cat([self.metadata[field].astype(str) for field in f],\n sep='_').str.lstrip('_')\n magic_fields[f] = composite_field\n else:\n str_f = str(f)\n if str_f == 'Label':\n magic_metadata[str_f] = self.metadata['filename']\n magic_fields[f] = str_f\n if isinstance(label, six.string_types):\n if label in self.metadata.columns:\n magic_metadata[str_f] = self.metadata[label]\n else:\n raise OneCodexException(\n 'Label field {} not found. Choose from: {}'.\n format(label, help_metadata))\n elif callable(label):\n for classification_id, metadata in self.metadata.to_dict(\n orient='index').items():\n c_id_label = label(metadata)\n if not isinstance(c_id_label, six.string_types):\n raise OneCodexException(\n 'Expected string from label function, got: {}'\n .format(type(c_id_label).__name__))\n magic_metadata.loc[classification_id, 'Label'\n ] = c_id_label\n elif label is not None:\n raise OneCodexException(\n 'Expected string or callable for label, got: {}'.\n format(type(label).__name__))\n elif str_f in self.metadata:\n magic_metadata[f] = self.metadata[str_f]\n magic_fields[f] = str_f\n elif str_f in self._results.keys():\n tax_name = self.taxonomy['name'][str_f]\n df = self.to_df(rank=self.taxonomy['rank'][str_f])\n renamed_field = '{} ({})'.format(tax_name, str_f)\n magic_metadata[renamed_field] = df[str_f]\n magic_fields[f] = renamed_field\n else:\n hits = []\n if len(str_f) > 4:\n for tax_id, tax_name in zip(self.taxonomy.index, self.\n taxonomy['name']):\n if str_f.lower() == tax_name.lower():\n hits = [(tax_id, tax_name)]\n break\n elif str_f.lower() in tax_name.lower():\n hits.append((tax_id, tax_name))\n hits = sorted(hits, key=lambda x: int(x[0]))\n if hits:\n df = self.to_df(rank=self.taxonomy['rank'][hits[0][0]])\n renamed_field = '{} ({})'.format(hits[0][1], hits[0][0])\n magic_metadata[renamed_field] = df[hits[0][0]]\n magic_fields[f] = renamed_field\n else:\n raise OneCodexException(\n 'Field or taxon {} not found. Choose from: {}'.\n format(str_f, help_metadata))\n return magic_metadata, magic_fields\n",
"role": "user"
},
{
"content": "metadata_fetch",
"role": "assistant"
}
] |
def x(self, rank='auto', top_n=None, threshold=None, remove_zeros=True,
normalize='auto', table_format='wide'):
from onecodex.dataframes import ClassificationsDataFrame
rank = self._get_auto_rank(rank)
df = self._results.copy()
if rank:
if rank == 'kingdom':
warnings.warn(
'Did you mean to specify rank=kingdom? Use rank=superkingdom to see Bacteria, Archaea and Eukaryota.'
)
tax_ids_to_keep = []
for tax_id in df.keys():
if self.taxonomy['rank'][tax_id] == rank:
tax_ids_to_keep.append(tax_id)
if len(tax_ids_to_keep) == 0:
raise OneCodexException('No taxa kept--is rank ({}) correct?'.
format(rank))
df = df.loc[:, tax_ids_to_keep]
if normalize is False and self._guess_normalized():
raise OneCodexException(
'Data has already been normalized and this can not be undone.')
if (normalize is True or normalize == 'auto' and rank is not None and
self._field != 'abundance'):
df = df.div(df.sum(axis=1), axis=0)
if remove_zeros:
df = df.loc[:, (df != 0).any(axis=0)]
if threshold:
df = df.loc[:, df.max() >= threshold]
if top_n:
idx = df.sum(axis=0).sort_values(ascending=False).head(top_n).index
df = df.loc[:, idx]
ocx_data = {'ocx_metadata': self.metadata.copy(), 'ocx_rank': rank,
'ocx_field': self._field, 'ocx_taxonomy': self.taxonomy.copy(),
'ocx_normalized': normalize}
if table_format == 'long':
long_df = {'classification_id': [], 'tax_id': [], self._field: []}
for t_id in df:
for c_id, count in df[t_id].iteritems():
long_df['classification_id'].append(c_id)
long_df['tax_id'].append(t_id)
long_df[self._field].append(count)
results_df = ClassificationsDataFrame(long_df, **ocx_data)
elif table_format == 'wide':
results_df = ClassificationsDataFrame(df, **ocx_data)
else:
raise OneCodexException('table_format must be one of: long, wide')
return results_df
|
to_df
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, rank='auto', top_n=None, threshold=None, remove_zeros=True,\n normalize='auto', table_format='wide'):\n from onecodex.dataframes import ClassificationsDataFrame\n rank = self._get_auto_rank(rank)\n df = self._results.copy()\n if rank:\n if rank == 'kingdom':\n warnings.warn(\n 'Did you mean to specify rank=kingdom? Use rank=superkingdom to see Bacteria, Archaea and Eukaryota.'\n )\n tax_ids_to_keep = []\n for tax_id in df.keys():\n if self.taxonomy['rank'][tax_id] == rank:\n tax_ids_to_keep.append(tax_id)\n if len(tax_ids_to_keep) == 0:\n raise OneCodexException('No taxa kept--is rank ({}) correct?'.\n format(rank))\n df = df.loc[:, tax_ids_to_keep]\n if normalize is False and self._guess_normalized():\n raise OneCodexException(\n 'Data has already been normalized and this can not be undone.')\n if (normalize is True or normalize == 'auto' and rank is not None and \n self._field != 'abundance'):\n df = df.div(df.sum(axis=1), axis=0)\n if remove_zeros:\n df = df.loc[:, (df != 0).any(axis=0)]\n if threshold:\n df = df.loc[:, df.max() >= threshold]\n if top_n:\n idx = df.sum(axis=0).sort_values(ascending=False).head(top_n).index\n df = df.loc[:, idx]\n ocx_data = {'ocx_metadata': self.metadata.copy(), 'ocx_rank': rank,\n 'ocx_field': self._field, 'ocx_taxonomy': self.taxonomy.copy(),\n 'ocx_normalized': normalize}\n if table_format == 'long':\n long_df = {'classification_id': [], 'tax_id': [], self._field: []}\n for t_id in df:\n for c_id, count in df[t_id].iteritems():\n long_df['classification_id'].append(c_id)\n long_df['tax_id'].append(t_id)\n long_df[self._field].append(count)\n results_df = ClassificationsDataFrame(long_df, **ocx_data)\n elif table_format == 'wide':\n results_df = ClassificationsDataFrame(df, **ocx_data)\n else:\n raise OneCodexException('table_format must be one of: long, wide')\n return results_df\n",
"role": "user"
},
{
"content": "to_df",
"role": "assistant"
}
] |
def x(self, nb, resources=None, **kw):
nb = copy.deepcopy(nb)
if resources is None:
resources = {'metadata': {}}
elif 'metadata' not in resources:
resources['metadata'] = {}
do_not_insert_date = False
for cell in nb.cells:
if cell['cell_type'] == 'code':
for out in cell['outputs']:
if out.get('metadata') and out['metadata'].get('jupyter-vega'):
for mimetype in out.get('data', []):
if mimetype == 'image/svg+xml':
img = b64encode(bytes(out['data'][
'image/svg+xml'], encoding='UTF-8')).decode()
img = (
'<img src="data:image/svg+xml;charset=utf-8;base64,%s">'
% (img,))
out['data'] = {'image/svg+xml': img}
break
else:
out['data'] = {}
elif out.get('metadata') and out['metadata'].get('onecodex'
) == 'head.style':
for mimetype in out.get('data', []):
if mimetype == 'text/css':
style_block = ('<style type="text/css">{}</style>'
.format(out['data']['text/css']))
head_block = resources['metadata'].get('head_block'
, '') + style_block
resources['metadata']['head_block'] = head_block
break
out['data'] = {'text/plain': ''}
elif out.get('metadata') and out['metadata'].get('onecodex'
) == 'customdate':
do_not_insert_date = True
if not os.environ.get('ONE_CODEX_REPORT_NO_LOGO', False):
img = b64encode(bytes(open(os.path.join(ASSETS_PATH,
'one_codex_logo.png'), 'rb').read())).decode()
img = 'data:image/png;charset=utf-8;base64,%s' % (img,)
logo_html = report.set_logo(img, position='right')._repr_mimebundle_()[
'text/html']
head_block = resources['metadata'].get('head_block', '') + logo_html
resources['metadata']['head_block'] = head_block
if not do_not_insert_date:
date_div = report.set_date()._repr_mimebundle_()[0]['text/html']
head_block = resources['metadata'].get('head_block', '') + date_div
resources['metadata']['head_block'] = head_block
css = open(os.path.join(ASSETS_PATH, CSS_TEMPLATE_FILE), 'r').read()
css = '<style type="text/css">{}</style>'.format(css)
head_block = resources['metadata'].get('head_block', '') + css
resources['metadata']['head_block'] = head_block
meta_tags = [('dcterms.created', datetime.datetime.now(pytz.utc).
isoformat())]
user_uuid = os.environ.get('ONE_CODEX_USER_UUID')
if user_uuid is not None:
meta_tags.append(('author', 'one_codex_user_uuid_{}'.format(user_uuid))
)
nb_uuid = os.environ.get('ONE_CODEX_NOTEBOOK_UUID')
if nb_uuid is not None:
meta_tags.append(('author', 'one_codex_notebook_uuid_{}'.format(
nb_uuid)))
meta_html = ''
for meta_name, meta_val in meta_tags:
meta_html += '<meta name="{}" content="{}" />\n'.format(meta_name,
meta_val)
head_block = resources['metadata'].get('head_block', '') + meta_html
resources['metadata']['head_block'] = head_block
output, resources = super(OneCodexHTMLExporter, self).from_notebook_node(nb
, resources=resources, **kw)
return output, resources
|
from_notebook_node
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, nb, resources=None, **kw):\n nb = copy.deepcopy(nb)\n if resources is None:\n resources = {'metadata': {}}\n elif 'metadata' not in resources:\n resources['metadata'] = {}\n do_not_insert_date = False\n for cell in nb.cells:\n if cell['cell_type'] == 'code':\n for out in cell['outputs']:\n if out.get('metadata') and out['metadata'].get('jupyter-vega'):\n for mimetype in out.get('data', []):\n if mimetype == 'image/svg+xml':\n img = b64encode(bytes(out['data'][\n 'image/svg+xml'], encoding='UTF-8')).decode()\n img = (\n '<img src=\"data:image/svg+xml;charset=utf-8;base64,%s\">'\n % (img,))\n out['data'] = {'image/svg+xml': img}\n break\n else:\n out['data'] = {}\n elif out.get('metadata') and out['metadata'].get('onecodex'\n ) == 'head.style':\n for mimetype in out.get('data', []):\n if mimetype == 'text/css':\n style_block = ('<style type=\"text/css\">{}</style>'\n .format(out['data']['text/css']))\n head_block = resources['metadata'].get('head_block'\n , '') + style_block\n resources['metadata']['head_block'] = head_block\n break\n out['data'] = {'text/plain': ''}\n elif out.get('metadata') and out['metadata'].get('onecodex'\n ) == 'customdate':\n do_not_insert_date = True\n if not os.environ.get('ONE_CODEX_REPORT_NO_LOGO', False):\n img = b64encode(bytes(open(os.path.join(ASSETS_PATH,\n 'one_codex_logo.png'), 'rb').read())).decode()\n img = 'data:image/png;charset=utf-8;base64,%s' % (img,)\n logo_html = report.set_logo(img, position='right')._repr_mimebundle_()[\n 'text/html']\n head_block = resources['metadata'].get('head_block', '') + logo_html\n resources['metadata']['head_block'] = head_block\n if not do_not_insert_date:\n date_div = report.set_date()._repr_mimebundle_()[0]['text/html']\n head_block = resources['metadata'].get('head_block', '') + date_div\n resources['metadata']['head_block'] = head_block\n css = open(os.path.join(ASSETS_PATH, CSS_TEMPLATE_FILE), 'r').read()\n css = '<style type=\"text/css\">{}</style>'.format(css)\n head_block = resources['metadata'].get('head_block', '') + css\n resources['metadata']['head_block'] = head_block\n meta_tags = [('dcterms.created', datetime.datetime.now(pytz.utc).\n isoformat())]\n user_uuid = os.environ.get('ONE_CODEX_USER_UUID')\n if user_uuid is not None:\n meta_tags.append(('author', 'one_codex_user_uuid_{}'.format(user_uuid))\n )\n nb_uuid = os.environ.get('ONE_CODEX_NOTEBOOK_UUID')\n if nb_uuid is not None:\n meta_tags.append(('author', 'one_codex_notebook_uuid_{}'.format(\n nb_uuid)))\n meta_html = ''\n for meta_name, meta_val in meta_tags:\n meta_html += '<meta name=\"{}\" content=\"{}\" />\\n'.format(meta_name,\n meta_val)\n head_block = resources['metadata'].get('head_block', '') + meta_html\n resources['metadata']['head_block'] = head_block\n output, resources = super(OneCodexHTMLExporter, self).from_notebook_node(nb\n , resources=resources, **kw)\n return output, resources\n",
"role": "user"
},
{
"content": "from_notebook_node",
"role": "assistant"
}
] |
def x(self, nb, resources=None, **kw):
from weasyprint import HTML, CSS
nb = copy.deepcopy(nb)
output, resources = super(OneCodexPDFExporter, self).from_notebook_node(nb,
resources=resources, **kw)
buf = BytesIO()
HTML(string=output).write_pdf(buf, stylesheets=[CSS(os.path.join(
ASSETS_PATH, CSS_TEMPLATE_FILE))])
buf.seek(0)
return buf.read(), resources
|
from_notebook_node
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, nb, resources=None, **kw):\n from weasyprint import HTML, CSS\n nb = copy.deepcopy(nb)\n output, resources = super(OneCodexPDFExporter, self).from_notebook_node(nb,\n resources=resources, **kw)\n buf = BytesIO()\n HTML(string=output).write_pdf(buf, stylesheets=[CSS(os.path.join(\n ASSETS_PATH, CSS_TEMPLATE_FILE))])\n buf.seek(0)\n return buf.read(), resources\n",
"role": "user"
},
{
"content": "from_notebook_node",
"role": "assistant"
}
] |
def x(self, nb, resources=None, **kw):
output, resources = super(OneCodexDocumentExporter, self
).from_notebook_node(nb, resources=resources, **kw)
from onecodex import Api
from onecodex.lib.upload import upload_document_fileobj
ocx = Api()
default_filename = 'Analysis Report - {dt:%B} {dt.day}, {dt:%Y}'.format(dt
=datetime.datetime.now())
file_name = resources['metadata'].get('one_codex_doc_portal_filename',
default_filename)
try:
document_id = upload_document_fileobj(BytesIO(output), file_name,
ocx._client.session, ocx.Documents._resource)
except UploadException as exc:
resp = json.dumps({'status': 500, 'message': str(exc)})
return resp, resources
except Exception:
resp = json.dumps({'status': 500, 'message':
'Upload failed. Please contact [email protected] for assistance.'})
return resp, resources
resp = json.dumps({'status': 200, 'document_id': document_id})
return resp, resources
|
from_notebook_node
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, nb, resources=None, **kw):\n output, resources = super(OneCodexDocumentExporter, self\n ).from_notebook_node(nb, resources=resources, **kw)\n from onecodex import Api\n from onecodex.lib.upload import upload_document_fileobj\n ocx = Api()\n default_filename = 'Analysis Report - {dt:%B} {dt.day}, {dt:%Y}'.format(dt\n =datetime.datetime.now())\n file_name = resources['metadata'].get('one_codex_doc_portal_filename',\n default_filename)\n try:\n document_id = upload_document_fileobj(BytesIO(output), file_name,\n ocx._client.session, ocx.Documents._resource)\n except UploadException as exc:\n resp = json.dumps({'status': 500, 'message': str(exc)})\n return resp, resources\n except Exception:\n resp = json.dumps({'status': 500, 'message':\n 'Upload failed. Please contact [email protected] for assistance.'})\n return resp, resources\n resp = json.dumps({'status': 200, 'document_id': document_id})\n return resp, resources\n",
"role": "user"
},
{
"content": "from_notebook_node",
"role": "assistant"
}
] |
def x(project_base_dir, localization_bundle_path, tmp_directory,
exclude_dirs, include_strings_file, special_ui_components_prefix):
localization_directory = os.path.join(localization_bundle_path,
DEFAULT_LANGUAGE_DIRECTORY_NAME)
if not os.path.exists(localization_directory):
os.makedirs(localization_directory)
localization_file = os.path.join(localization_directory,
LOCALIZATION_FILENAME)
tmp_localization_directory = os.path.join(tmp_directory,
DEFAULT_LANGUAGE_DIRECTORY_NAME)
tmp_localization_file = os.path.join(tmp_localization_directory,
LOCALIZATION_FILENAME)
if os.path.isdir(tmp_localization_directory):
shutil.rmtree(tmp_localization_directory)
os.mkdir(tmp_localization_directory)
logging.info('Running genstrings')
source_files = extract_source_files(project_base_dir, exclude_dirs)
genstrings_cmd = 'genstrings -s JTLocalizedString -o %s %s' % (
tmp_localization_directory, ' '.join([('"%s"' % (source_file,)) for
source_file in source_files]))
genstrings_process = subprocess.Popen(genstrings_cmd, stdout=subprocess
.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
genstrings_out, genstrings_err = genstrings_process.communicate()
remove_empty_comments_from_file(tmp_localization_file)
add_genstrings_comments_to_file(tmp_localization_file, genstrings_err)
genstrings_rc = genstrings_process.returncode
if genstrings_rc != 0:
logging.fatal('genstrings returned %d, aborting run!', genstrings_rc)
sys.exit(genstrings_rc)
create_localized_strings_from_ib_files(project_base_dir, exclude_dirs,
tmp_localization_file, special_ui_components_prefix)
if include_strings_file:
target = open_strings_file(tmp_localization_file, 'a')
source = open_strings_file(include_strings_file, 'r')
target.write(source.read())
source.close()
target.close()
handle_duplications(tmp_localization_file)
if os.path.isfile(localization_file):
logging.info('Merging old localizable with new one...')
merge_strings_files(localization_file, tmp_localization_file)
else:
logging.info('No Localizable yet, moving the created file...')
shutil.move(tmp_localization_file, localization_file)
|
generate_strings
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(project_base_dir, localization_bundle_path, tmp_directory,\n exclude_dirs, include_strings_file, special_ui_components_prefix):\n localization_directory = os.path.join(localization_bundle_path,\n DEFAULT_LANGUAGE_DIRECTORY_NAME)\n if not os.path.exists(localization_directory):\n os.makedirs(localization_directory)\n localization_file = os.path.join(localization_directory,\n LOCALIZATION_FILENAME)\n tmp_localization_directory = os.path.join(tmp_directory,\n DEFAULT_LANGUAGE_DIRECTORY_NAME)\n tmp_localization_file = os.path.join(tmp_localization_directory,\n LOCALIZATION_FILENAME)\n if os.path.isdir(tmp_localization_directory):\n shutil.rmtree(tmp_localization_directory)\n os.mkdir(tmp_localization_directory)\n logging.info('Running genstrings')\n source_files = extract_source_files(project_base_dir, exclude_dirs)\n genstrings_cmd = 'genstrings -s JTLocalizedString -o %s %s' % (\n tmp_localization_directory, ' '.join([('\"%s\"' % (source_file,)) for\n source_file in source_files]))\n genstrings_process = subprocess.Popen(genstrings_cmd, stdout=subprocess\n .PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)\n genstrings_out, genstrings_err = genstrings_process.communicate()\n remove_empty_comments_from_file(tmp_localization_file)\n add_genstrings_comments_to_file(tmp_localization_file, genstrings_err)\n genstrings_rc = genstrings_process.returncode\n if genstrings_rc != 0:\n logging.fatal('genstrings returned %d, aborting run!', genstrings_rc)\n sys.exit(genstrings_rc)\n create_localized_strings_from_ib_files(project_base_dir, exclude_dirs,\n tmp_localization_file, special_ui_components_prefix)\n if include_strings_file:\n target = open_strings_file(tmp_localization_file, 'a')\n source = open_strings_file(include_strings_file, 'r')\n target.write(source.read())\n source.close()\n target.close()\n handle_duplications(tmp_localization_file)\n if os.path.isfile(localization_file):\n logging.info('Merging old localizable with new one...')\n merge_strings_files(localization_file, tmp_localization_file)\n else:\n logging.info('No Localizable yet, moving the created file...')\n shutil.move(tmp_localization_file, localization_file)\n",
"role": "user"
},
{
"content": "generate_strings",
"role": "assistant"
}
] |
def x(self, filter_func):
if callable(filter_func):
return self.__class__([obj for obj in self if filter_func(obj) is
True], **self._kwargs)
else:
raise OneCodexException('Expected callable for filter, got: {}'.
format(type(filter_func).__name__))
|
filter
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, filter_func):\n if callable(filter_func):\n return self.__class__([obj for obj in self if filter_func(obj) is \n True], **self._kwargs)\n else:\n raise OneCodexException('Expected callable for filter, got: {}'.\n format(type(filter_func).__name__))\n",
"role": "user"
},
{
"content": "filter",
"role": "assistant"
}
] |
def x(self, skip_missing=None):
skip_missing = skip_missing if skip_missing else self._kwargs[
'skip_missing']
new_classifications = []
for a in self._res_list:
if a.__class__.__name__ == 'Samples':
c = a.primary_classification
elif a.__class__.__name__ == 'Classifications':
c = a
else:
raise OneCodexException(
'Objects in SampleCollection must be one of: Classifications, Samples'
)
if skip_missing and not c.success:
warnings.warn('Classification {} not successful. Skipping.'.
format(c.id))
continue
new_classifications.append(c)
self._cached['classifications'] = new_classifications
|
classification_fetch
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, skip_missing=None):\n skip_missing = skip_missing if skip_missing else self._kwargs[\n 'skip_missing']\n new_classifications = []\n for a in self._res_list:\n if a.__class__.__name__ == 'Samples':\n c = a.primary_classification\n elif a.__class__.__name__ == 'Classifications':\n c = a\n else:\n raise OneCodexException(\n 'Objects in SampleCollection must be one of: Classifications, Samples'\n )\n if skip_missing and not c.success:\n warnings.warn('Classification {} not successful. Skipping.'.\n format(c.id))\n continue\n new_classifications.append(c)\n self._cached['classifications'] = new_classifications\n",
"role": "user"
},
{
"content": "classification_fetch",
"role": "assistant"
}
] |
def x(self):
import pandas as pd
DEFAULT_FIELDS = None
metadata = []
for c in self._classifications:
m = c.sample.metadata
if DEFAULT_FIELDS is None:
DEFAULT_FIELDS = list(m._resource._schema['properties'].keys())
DEFAULT_FIELDS.remove('$uri')
DEFAULT_FIELDS.remove('sample')
metadatum = {f: getattr(m, f) for f in DEFAULT_FIELDS}
metadatum['classification_id'] = c.id
metadatum['sample_id'] = m.sample.id
metadatum['metadata_id'] = m.id
metadatum['created_at'] = m.sample.created_at
metadatum['filename'] = c.sample.filename
metadatum.update(m.custom)
metadata.append(metadatum)
if metadata:
metadata = pd.DataFrame(metadata).set_index('classification_id')
else:
metadata = pd.DataFrame(columns=['classification_id', 'sample_id',
'metadata_id', 'created_at'])
self._cached['metadata'] = metadata
|
collate_metadata
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n import pandas as pd\n DEFAULT_FIELDS = None\n metadata = []\n for c in self._classifications:\n m = c.sample.metadata\n if DEFAULT_FIELDS is None:\n DEFAULT_FIELDS = list(m._resource._schema['properties'].keys())\n DEFAULT_FIELDS.remove('$uri')\n DEFAULT_FIELDS.remove('sample')\n metadatum = {f: getattr(m, f) for f in DEFAULT_FIELDS}\n metadatum['classification_id'] = c.id\n metadatum['sample_id'] = m.sample.id\n metadatum['metadata_id'] = m.id\n metadatum['created_at'] = m.sample.created_at\n metadatum['filename'] = c.sample.filename\n metadatum.update(m.custom)\n metadata.append(metadatum)\n if metadata:\n metadata = pd.DataFrame(metadata).set_index('classification_id')\n else:\n metadata = pd.DataFrame(columns=['classification_id', 'sample_id',\n 'metadata_id', 'created_at'])\n self._cached['metadata'] = metadata\n",
"role": "user"
},
{
"content": "collate_metadata",
"role": "assistant"
}
] |
def x(self, field=None):
import pandas as pd
field = field if field else self._kwargs['field']
if field not in ('auto', 'abundance', 'readcount', 'readcount_w_children'):
raise OneCodexException('Specified field ({}) not valid.'.format(field)
)
df = {'classification_id': [c.id for c in self._classifications]}
tax_info = {'tax_id': [], 'name': [], 'rank': [], 'parent_tax_id': []}
if field == 'auto':
field = 'readcount_w_children'
self._cached['field'] = field
for c_idx, c in enumerate(self._classifications):
result = c.results()['table']
for d in result:
d_tax_id = d['tax_id']
if d_tax_id not in tax_info['tax_id']:
for k in ('tax_id', 'name', 'rank', 'parent_tax_id'):
tax_info[k].append(d[k])
df[d_tax_id] = [0] * len(self._classifications)
df[d_tax_id][c_idx] = d[field]
df = pd.DataFrame(df).set_index('classification_id').fillna(0)
df.columns.name = 'tax_id'
tax_info = pd.DataFrame(tax_info).set_index('tax_id')
self._cached['results'] = df
self._cached['taxonomy'] = tax_info
|
collate_results
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, field=None):\n import pandas as pd\n field = field if field else self._kwargs['field']\n if field not in ('auto', 'abundance', 'readcount', 'readcount_w_children'):\n raise OneCodexException('Specified field ({}) not valid.'.format(field)\n )\n df = {'classification_id': [c.id for c in self._classifications]}\n tax_info = {'tax_id': [], 'name': [], 'rank': [], 'parent_tax_id': []}\n if field == 'auto':\n field = 'readcount_w_children'\n self._cached['field'] = field\n for c_idx, c in enumerate(self._classifications):\n result = c.results()['table']\n for d in result:\n d_tax_id = d['tax_id']\n if d_tax_id not in tax_info['tax_id']:\n for k in ('tax_id', 'name', 'rank', 'parent_tax_id'):\n tax_info[k].append(d[k])\n df[d_tax_id] = [0] * len(self._classifications)\n df[d_tax_id][c_idx] = d[field]\n df = pd.DataFrame(df).set_index('classification_id').fillna(0)\n df.columns.name = 'tax_id'\n tax_info = pd.DataFrame(tax_info).set_index('tax_id')\n self._cached['results'] = df\n self._cached['taxonomy'] = tax_info\n",
"role": "user"
},
{
"content": "collate_results",
"role": "assistant"
}
] |
def x(self, biom_id=None):
otu_format = 'Biological Observation Matrix 1.0.0'
otu_url = 'http://biom-format.org'
otu = OrderedDict({'id': biom_id, 'format': otu_format, 'format_url':
otu_url, 'type': 'OTU table', 'generated_by': 'One Codex API V1',
'date': datetime.now().isoformat(), 'rows': [], 'columns': [],
'matrix_type': 'sparse', 'matrix_element_type': 'int'})
rows = defaultdict(dict)
tax_ids_to_names = {}
for classification in self._classifications:
col_id = len(otu['columns'])
columns_entry = {'id': str(classification.id), 'sample_id': str(
classification.sample.id), 'sample_filename': classification.
sample.filename, 'metadata': json.loads(classification.sample.
metadata._to_json(include_references=False))}
otu['columns'].append(columns_entry)
sample_df = classification.table()
for row in sample_df.iterrows():
tax_id = row[1]['tax_id']
tax_ids_to_names[tax_id] = row[1]['name']
rows[tax_id][col_id] = int(row[1]['readcount'])
num_rows = len(rows)
num_cols = len(otu['columns'])
otu['shape'] = [num_rows, num_cols]
otu['data'] = []
for present_taxa in sorted(rows):
row_id = len(otu['rows'])
otu['rows'].append({'id': present_taxa, 'metadata': {'taxonomy':
tax_ids_to_names[present_taxa]}})
for sample_with_hit in rows[present_taxa]:
counts = rows[present_taxa][sample_with_hit]
otu['data'].append([row_id, sample_with_hit, counts])
return otu
|
to_otu
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, biom_id=None):\n otu_format = 'Biological Observation Matrix 1.0.0'\n otu_url = 'http://biom-format.org'\n otu = OrderedDict({'id': biom_id, 'format': otu_format, 'format_url':\n otu_url, 'type': 'OTU table', 'generated_by': 'One Codex API V1',\n 'date': datetime.now().isoformat(), 'rows': [], 'columns': [],\n 'matrix_type': 'sparse', 'matrix_element_type': 'int'})\n rows = defaultdict(dict)\n tax_ids_to_names = {}\n for classification in self._classifications:\n col_id = len(otu['columns'])\n columns_entry = {'id': str(classification.id), 'sample_id': str(\n classification.sample.id), 'sample_filename': classification.\n sample.filename, 'metadata': json.loads(classification.sample.\n metadata._to_json(include_references=False))}\n otu['columns'].append(columns_entry)\n sample_df = classification.table()\n for row in sample_df.iterrows():\n tax_id = row[1]['tax_id']\n tax_ids_to_names[tax_id] = row[1]['name']\n rows[tax_id][col_id] = int(row[1]['readcount'])\n num_rows = len(rows)\n num_cols = len(otu['columns'])\n otu['shape'] = [num_rows, num_cols]\n otu['data'] = []\n for present_taxa in sorted(rows):\n row_id = len(otu['rows'])\n otu['rows'].append({'id': present_taxa, 'metadata': {'taxonomy':\n tax_ids_to_names[present_taxa]}})\n for sample_with_hit in rows[present_taxa]:\n counts = rows[present_taxa][sample_with_hit]\n otu['data'].append([row_id, sample_with_hit, counts])\n return otu\n",
"role": "user"
},
{
"content": "to_otu",
"role": "assistant"
}
] |
def x(updated_localizable_file, old_translated_file, new_translated_file,
merged_translated_file):
output_file_elements = []
old_translated_file_dict = (
generate_localization_key_to_entry_dictionary_from_file(
old_translated_file))
new_translated_file_dict = (
generate_localization_key_to_entry_dictionary_from_file(
new_translated_file))
f = open_strings_file(updated_localizable_file, 'r')
for header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(
f):
translation_value = None
if len(header_comment) > 0:
output_file_elements.append(Comment(header_comment))
if value in new_translated_file_dict:
translation_value = new_translated_file_dict[value].value
elif key in old_translated_file_dict:
translation_value = old_translated_file_dict[key].value
elif key in new_translated_file_dict:
translation_value = new_translated_file_dict[key].value
if translation_value is not None:
output_file_elements.append(LocalizationEntry(comments, key,
translation_value))
f.close()
write_file_elements_to_strings_file(merged_translated_file,
output_file_elements)
|
localization_merge_back
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(updated_localizable_file, old_translated_file, new_translated_file,\n merged_translated_file):\n output_file_elements = []\n old_translated_file_dict = (\n generate_localization_key_to_entry_dictionary_from_file(\n old_translated_file))\n new_translated_file_dict = (\n generate_localization_key_to_entry_dictionary_from_file(\n new_translated_file))\n f = open_strings_file(updated_localizable_file, 'r')\n for header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(\n f):\n translation_value = None\n if len(header_comment) > 0:\n output_file_elements.append(Comment(header_comment))\n if value in new_translated_file_dict:\n translation_value = new_translated_file_dict[value].value\n elif key in old_translated_file_dict:\n translation_value = old_translated_file_dict[key].value\n elif key in new_translated_file_dict:\n translation_value = new_translated_file_dict[key].value\n if translation_value is not None:\n output_file_elements.append(LocalizationEntry(comments, key,\n translation_value))\n f.close()\n write_file_elements_to_strings_file(merged_translated_file,\n output_file_elements)\n",
"role": "user"
},
{
"content": "localization_merge_back",
"role": "assistant"
}
] |
def x(df, category, quantity, category_type='N', title=None, xlabel=None,
ylabel=None):
if category_type not in ('N', 'O', 'T'):
raise OneCodexException(
'If specifying category_type, must be N, O, or T')
lower_box = 'q1({}):Q'.format(quantity)
lower_whisker = 'min({}):Q'.format(quantity)
upper_box = 'q3({}):Q'.format(quantity)
upper_whisker = 'max({}):Q'.format(quantity)
if category_type == 'T':
x_format = 'hoursminutes({}):{}'.format(category, category_type)
else:
x_format = '{}:{}'.format(category, category_type)
lower_plot = alt.Chart(df).mark_rule().encode(y=alt.Y(lower_whisker,
axis=alt.Axis(title=ylabel)), y2=lower_box, x=x_format)
middle_plot = alt.Chart(df).mark_bar(size=35).encode(y=lower_box, y2=
upper_box, x=x_format)
upper_plot = alt.Chart(df).mark_rule().encode(y=upper_whisker, y2=
upper_box, x=x_format)
middle_tick = alt.Chart(df).mark_tick(color='black', size=35).encode(y=
'median({}):Q'.format(quantity), x=alt.X(x_format, axis=alt.Axis(
title=xlabel), scale=alt.Scale(rangeStep=45)), tooltip=
'median({}):Q'.format(quantity))
chart = lower_plot + middle_plot + upper_plot + middle_tick
if title:
chart = chart.properties(title=title)
return chart
|
boxplot
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(df, category, quantity, category_type='N', title=None, xlabel=None,\n ylabel=None):\n if category_type not in ('N', 'O', 'T'):\n raise OneCodexException(\n 'If specifying category_type, must be N, O, or T')\n lower_box = 'q1({}):Q'.format(quantity)\n lower_whisker = 'min({}):Q'.format(quantity)\n upper_box = 'q3({}):Q'.format(quantity)\n upper_whisker = 'max({}):Q'.format(quantity)\n if category_type == 'T':\n x_format = 'hoursminutes({}):{}'.format(category, category_type)\n else:\n x_format = '{}:{}'.format(category, category_type)\n lower_plot = alt.Chart(df).mark_rule().encode(y=alt.Y(lower_whisker,\n axis=alt.Axis(title=ylabel)), y2=lower_box, x=x_format)\n middle_plot = alt.Chart(df).mark_bar(size=35).encode(y=lower_box, y2=\n upper_box, x=x_format)\n upper_plot = alt.Chart(df).mark_rule().encode(y=upper_whisker, y2=\n upper_box, x=x_format)\n middle_tick = alt.Chart(df).mark_tick(color='black', size=35).encode(y=\n 'median({}):Q'.format(quantity), x=alt.X(x_format, axis=alt.Axis(\n title=xlabel), scale=alt.Scale(rangeStep=45)), tooltip=\n 'median({}):Q'.format(quantity))\n chart = lower_plot + middle_plot + upper_plot + middle_tick\n if title:\n chart = chart.properties(title=title)\n return chart\n",
"role": "user"
},
{
"content": "boxplot",
"role": "assistant"
}
] |
def x(tree):
plot_data = {'x': [], 'y': [], 'o': [], 'b': []}
for idx, (i, d) in enumerate(zip(tree['icoord'], tree['dcoord'])):
plot_data['x'].extend(map(lambda x: -x, d))
plot_data['y'].extend(map(lambda x: -x, i))
plot_data['o'].extend([0, 1, 2, 3])
plot_data['b'].extend([idx] * 4)
plot_data = pd.DataFrame(plot_data)
chart = alt.Chart(plot_data, width=100, height=15 * len(tree['leaves']) -
7.5).mark_line(point=False, opacity=0.5).encode(x=alt.X('x', axis=
None), y=alt.Y('y', axis=None, scale=alt.Scale(zero=True, nice=
False)), order='o', color=alt.Color('b:N', scale=alt.Scale(domain=
list(range(idx + 1)), range=['black'] * (idx + 1)), legend=None))
return chart
|
dendrogram
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(tree):\n plot_data = {'x': [], 'y': [], 'o': [], 'b': []}\n for idx, (i, d) in enumerate(zip(tree['icoord'], tree['dcoord'])):\n plot_data['x'].extend(map(lambda x: -x, d))\n plot_data['y'].extend(map(lambda x: -x, i))\n plot_data['o'].extend([0, 1, 2, 3])\n plot_data['b'].extend([idx] * 4)\n plot_data = pd.DataFrame(plot_data)\n chart = alt.Chart(plot_data, width=100, height=15 * len(tree['leaves']) -\n 7.5).mark_line(point=False, opacity=0.5).encode(x=alt.X('x', axis=\n None), y=alt.Y('y', axis=None, scale=alt.Scale(zero=True, nice=\n False)), order='o', color=alt.Color('b:N', scale=alt.Scale(domain=\n list(range(idx + 1)), range=['black'] * (idx + 1)), legend=None))\n return chart\n",
"role": "user"
},
{
"content": "dendrogram",
"role": "assistant"
}
] |
def x(localization_file, genstrings_err):
errors_to_log = [line for line in genstrings_err.splitlines() if
'used with multiple comments' not in line]
if len(errors_to_log) > 0:
logging.warning('genstrings warnings:\n%s', '\n'.join(errors_to_log))
loc_file = open_strings_file(localization_file, 'a')
regex_matches = re.findall(
'Warning: Key "(.*?)" used with multiple comments ("[^"]*" (& "[^"]*")+)'
, genstrings_err)
logging.info('Adding multiple comments from genstrings output')
for regex_match in regex_matches:
if len(regex_match) == 3:
key = regex_match[0]
comments = [comment.strip()[1:-1] for comment in regex_match[1]
.split('&')]
logging.info('Found key with %d comments: %s', len(comments), key)
loc_key = LocalizationEntry(comments, key, key)
loc_file.write(unicode(loc_key))
loc_file.write(u'\n')
loc_file.close()
|
add_genstrings_comments_to_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(localization_file, genstrings_err):\n errors_to_log = [line for line in genstrings_err.splitlines() if \n 'used with multiple comments' not in line]\n if len(errors_to_log) > 0:\n logging.warning('genstrings warnings:\\n%s', '\\n'.join(errors_to_log))\n loc_file = open_strings_file(localization_file, 'a')\n regex_matches = re.findall(\n 'Warning: Key \"(.*?)\" used with multiple comments (\"[^\"]*\" (& \"[^\"]*\")+)'\n , genstrings_err)\n logging.info('Adding multiple comments from genstrings output')\n for regex_match in regex_matches:\n if len(regex_match) == 3:\n key = regex_match[0]\n comments = [comment.strip()[1:-1] for comment in regex_match[1]\n .split('&')]\n logging.info('Found key with %d comments: %s', len(comments), key)\n loc_key = LocalizationEntry(comments, key, key)\n loc_file.write(unicode(loc_key))\n loc_file.write(u'\\n')\n loc_file.close()\n",
"role": "user"
},
{
"content": "add_genstrings_comments_to_file",
"role": "assistant"
}
] |
def x(self, rank='auto', metric='braycurtis', title=None, xlabel=None,
ylabel=None, tooltip=None, return_chart=False, linkage='average', label
=None):
if len(self._results) < 2:
raise OneCodexException(
'`plot_distance` requires 2 or more valid classification results.')
plot_data = {'1) Label': [], '2) Label': [], 'Distance': [],
'classification_id': []}
if tooltip:
if not isinstance(tooltip, list):
tooltip = [tooltip]
else:
tooltip = []
tooltip.insert(0, 'Label')
magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label)
formatted_fields = []
for _, magic_field in magic_fields.items():
field_group = []
for i in (1, 2):
field = '{}) {}'.format(i, magic_field)
plot_data[field] = []
field_group.append(field)
formatted_fields.append(field_group)
clust = self._cluster_by_sample(rank=rank, metric=metric, linkage=linkage)
for idx1, id1 in enumerate(clust['dist_matrix'].index):
for idx2, id2 in enumerate(clust['dist_matrix'].index):
if idx1 == idx2:
plot_data['Distance'].append(np.nan)
else:
plot_data['Distance'].append(clust['dist_matrix'].iloc[idx1,
idx2])
plot_data['classification_id'].append(id1)
for field_group, magic_field in zip(formatted_fields,
magic_fields.values()):
plot_data[field_group[0]].append(magic_metadata[magic_field
][id1])
plot_data[field_group[1]].append(magic_metadata[magic_field
][id2])
plot_data = pd.DataFrame(data=plot_data)
labels_in_order = magic_metadata['Label'][clust['ids_in_order']].tolist()
alt_kwargs = dict(x=alt.X('1) Label:N', axis=alt.Axis(title=xlabel),
sort=labels_in_order), y=alt.Y('2) Label:N', axis=alt.Axis(title=
ylabel, orient='right'), sort=labels_in_order), color='Distance:Q',
tooltip=list(chain.from_iterable(formatted_fields)) + ['Distance:Q'
], href='url:N', url='https://app.onecodex.com/classification/' +
alt.datum.classification_id)
chart = alt.Chart(plot_data, width=15 * len(clust['dist_matrix'].index),
height=15 * len(clust['dist_matrix'].index)).transform_calculate(url
=alt_kwargs.pop('url')).mark_rect().encode(**alt_kwargs)
if title:
chart = chart.properties(title=title)
dendro_chart = dendrogram(clust['scipy_tree'])
if return_chart:
return dendro_chart | chart
else:
(dendro_chart | chart).display()
|
plot_distance
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, rank='auto', metric='braycurtis', title=None, xlabel=None,\n ylabel=None, tooltip=None, return_chart=False, linkage='average', label\n =None):\n if len(self._results) < 2:\n raise OneCodexException(\n '`plot_distance` requires 2 or more valid classification results.')\n plot_data = {'1) Label': [], '2) Label': [], 'Distance': [],\n 'classification_id': []}\n if tooltip:\n if not isinstance(tooltip, list):\n tooltip = [tooltip]\n else:\n tooltip = []\n tooltip.insert(0, 'Label')\n magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label)\n formatted_fields = []\n for _, magic_field in magic_fields.items():\n field_group = []\n for i in (1, 2):\n field = '{}) {}'.format(i, magic_field)\n plot_data[field] = []\n field_group.append(field)\n formatted_fields.append(field_group)\n clust = self._cluster_by_sample(rank=rank, metric=metric, linkage=linkage)\n for idx1, id1 in enumerate(clust['dist_matrix'].index):\n for idx2, id2 in enumerate(clust['dist_matrix'].index):\n if idx1 == idx2:\n plot_data['Distance'].append(np.nan)\n else:\n plot_data['Distance'].append(clust['dist_matrix'].iloc[idx1,\n idx2])\n plot_data['classification_id'].append(id1)\n for field_group, magic_field in zip(formatted_fields,\n magic_fields.values()):\n plot_data[field_group[0]].append(magic_metadata[magic_field\n ][id1])\n plot_data[field_group[1]].append(magic_metadata[magic_field\n ][id2])\n plot_data = pd.DataFrame(data=plot_data)\n labels_in_order = magic_metadata['Label'][clust['ids_in_order']].tolist()\n alt_kwargs = dict(x=alt.X('1) Label:N', axis=alt.Axis(title=xlabel),\n sort=labels_in_order), y=alt.Y('2) Label:N', axis=alt.Axis(title=\n ylabel, orient='right'), sort=labels_in_order), color='Distance:Q',\n tooltip=list(chain.from_iterable(formatted_fields)) + ['Distance:Q'\n ], href='url:N', url='https://app.onecodex.com/classification/' +\n alt.datum.classification_id)\n chart = alt.Chart(plot_data, width=15 * len(clust['dist_matrix'].index),\n height=15 * len(clust['dist_matrix'].index)).transform_calculate(url\n =alt_kwargs.pop('url')).mark_rect().encode(**alt_kwargs)\n if title:\n chart = chart.properties(title=title)\n dendro_chart = dendrogram(clust['scipy_tree'])\n if return_chart:\n return dendro_chart | chart\n else:\n (dendro_chart | chart).display()\n",
"role": "user"
},
{
"content": "plot_distance",
"role": "assistant"
}
] |
def x(self, rank='auto', metric='braycurtis', method='pcoa', title=None,
xlabel=None, ylabel=None, color=None, size=None, tooltip=None,
return_chart=False, label=None):
if len(self._results) < 2:
raise OneCodexException(
'`plot_mds` requires 2 or more valid classification results.')
dists = self._compute_distance(rank, metric).to_data_frame()
if tooltip:
if not isinstance(tooltip, list):
tooltip = [tooltip]
else:
tooltip = []
tooltip.insert(0, 'Label')
if color and color not in tooltip:
tooltip.insert(1, color)
if size and size not in tooltip:
tooltip.insert(2, size)
magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label)
if method == 'smacof':
x_field = 'MDS1'
y_field = 'MDS2'
seed = np.random.RandomState(seed=3)
mds = manifold.MDS(max_iter=3000, eps=1e-12, random_state=seed,
dissimilarity='precomputed', n_jobs=1)
pos = mds.fit(dists).embedding_
plot_data = pd.DataFrame(pos, columns=[x_field, y_field], index=
dists.index)
plot_data = plot_data.div(plot_data.abs().max(axis=0), axis=1)
r_squared = []
for axis in [0, 1]:
mds_dist = pos.copy()
mds_dist[:, axis] = 0
mds_dist = squareform(euclidean_distances(mds_dist).round(6))
r_squared.append(pearsonr(mds_dist, squareform(dists))[0])
x_extra_label = 'r² = %.02f' % (r_squared[0],)
y_extra_label = 'r² = %.02f' % (r_squared[1],)
elif method == 'pcoa':
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ord_result = ordination.pcoa(dists.round(6))
plot_data = ord_result.samples.iloc[:, [0, 1]]
plot_data = plot_data.div(plot_data.abs().max(axis=0), axis=1)
plot_data.index = dists.index
x_field, y_field = plot_data.columns.tolist()
x_extra_label = '%0.02f%%' % (ord_result.proportion_explained[0] * 100,
)
y_extra_label = '%0.02f%%' % (ord_result.proportion_explained[1] * 100,
)
else:
raise OneCodexException('MDS method must be one of: smacof, pcoa')
if xlabel is None:
xlabel = '{} ({})'.format(x_field, x_extra_label)
if ylabel is None:
ylabel = '{} ({})'.format(y_field, y_extra_label)
plot_data = pd.concat([plot_data, magic_metadata], axis=1).reset_index()
alt_kwargs = dict(x=alt.X(x_field, axis=alt.Axis(title=xlabel)), y=alt.
Y(y_field, axis=alt.Axis(title=ylabel)), tooltip=[magic_fields[t] for
t in tooltip], href='url:N', url=
'https://app.onecodex.com/classification/' + alt.datum.
classification_id)
if color:
alt_kwargs['color'] = magic_fields[color]
if size:
alt_kwargs['size'] = magic_fields[size]
chart = alt.Chart(plot_data).transform_calculate(url=alt_kwargs.pop('url')
).mark_circle().encode(**alt_kwargs)
if title:
chart = chart.properties(title=title)
if return_chart:
return chart
else:
chart.interactive().display()
|
plot_mds
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, rank='auto', metric='braycurtis', method='pcoa', title=None,\n xlabel=None, ylabel=None, color=None, size=None, tooltip=None,\n return_chart=False, label=None):\n if len(self._results) < 2:\n raise OneCodexException(\n '`plot_mds` requires 2 or more valid classification results.')\n dists = self._compute_distance(rank, metric).to_data_frame()\n if tooltip:\n if not isinstance(tooltip, list):\n tooltip = [tooltip]\n else:\n tooltip = []\n tooltip.insert(0, 'Label')\n if color and color not in tooltip:\n tooltip.insert(1, color)\n if size and size not in tooltip:\n tooltip.insert(2, size)\n magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label)\n if method == 'smacof':\n x_field = 'MDS1'\n y_field = 'MDS2'\n seed = np.random.RandomState(seed=3)\n mds = manifold.MDS(max_iter=3000, eps=1e-12, random_state=seed,\n dissimilarity='precomputed', n_jobs=1)\n pos = mds.fit(dists).embedding_\n plot_data = pd.DataFrame(pos, columns=[x_field, y_field], index=\n dists.index)\n plot_data = plot_data.div(plot_data.abs().max(axis=0), axis=1)\n r_squared = []\n for axis in [0, 1]:\n mds_dist = pos.copy()\n mds_dist[:, axis] = 0\n mds_dist = squareform(euclidean_distances(mds_dist).round(6))\n r_squared.append(pearsonr(mds_dist, squareform(dists))[0])\n x_extra_label = 'r² = %.02f' % (r_squared[0],)\n y_extra_label = 'r² = %.02f' % (r_squared[1],)\n elif method == 'pcoa':\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n ord_result = ordination.pcoa(dists.round(6))\n plot_data = ord_result.samples.iloc[:, [0, 1]]\n plot_data = plot_data.div(plot_data.abs().max(axis=0), axis=1)\n plot_data.index = dists.index\n x_field, y_field = plot_data.columns.tolist()\n x_extra_label = '%0.02f%%' % (ord_result.proportion_explained[0] * 100,\n )\n y_extra_label = '%0.02f%%' % (ord_result.proportion_explained[1] * 100,\n )\n else:\n raise OneCodexException('MDS method must be one of: smacof, pcoa')\n if xlabel is None:\n xlabel = '{} ({})'.format(x_field, x_extra_label)\n if ylabel is None:\n ylabel = '{} ({})'.format(y_field, y_extra_label)\n plot_data = pd.concat([plot_data, magic_metadata], axis=1).reset_index()\n alt_kwargs = dict(x=alt.X(x_field, axis=alt.Axis(title=xlabel)), y=alt.\n Y(y_field, axis=alt.Axis(title=ylabel)), tooltip=[magic_fields[t] for\n t in tooltip], href='url:N', url=\n 'https://app.onecodex.com/classification/' + alt.datum.\n classification_id)\n if color:\n alt_kwargs['color'] = magic_fields[color]\n if size:\n alt_kwargs['size'] = magic_fields[size]\n chart = alt.Chart(plot_data).transform_calculate(url=alt_kwargs.pop('url')\n ).mark_circle().encode(**alt_kwargs)\n if title:\n chart = chart.properties(title=title)\n if return_chart:\n return chart\n else:\n chart.interactive().display()\n",
"role": "user"
},
{
"content": "plot_mds",
"role": "assistant"
}
] |
def x(self, path=None, file_obj=None, progressbar=False):
if path and file_obj:
raise OneCodexException('Please specify only one of: path, file_obj')
if path is None and file_obj is None:
path = os.path.join(os.getcwd(), self.filename)
if path and os.path.exists(path):
raise OneCodexException('{} already exists! Will not overwrite.'.
format(path))
try:
url_data = self._resource.download_uri()
resp = requests.get(url_data['download_uri'], stream=True)
with (open(path, 'wb') if path else file_obj) as f_out:
if progressbar:
with click.progressbar(length=self.size, label=self.filename
) as bar:
for data in resp.iter_content(chunk_size=1024):
bar.update(len(data))
f_out.write(data)
else:
for data in resp.iter_content(chunk_size=1024):
f_out.write(data)
except KeyboardInterrupt:
if path:
os.remove(path)
raise
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 401:
raise OneCodexException('You must be logged in to download files.')
elif exc.response.status_code == 402:
raise OneCodexException(
'You must either have a premium platform account or be in a notebook environment to download files.'
)
elif exc.response.status_code == 403:
raise OneCodexException(
'You are not authorized to download this file.')
else:
raise OneCodexException(
'Download failed with an HTTP status code {}.'.format(exc.
response.status_code))
return path
|
download
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, path=None, file_obj=None, progressbar=False):\n if path and file_obj:\n raise OneCodexException('Please specify only one of: path, file_obj')\n if path is None and file_obj is None:\n path = os.path.join(os.getcwd(), self.filename)\n if path and os.path.exists(path):\n raise OneCodexException('{} already exists! Will not overwrite.'.\n format(path))\n try:\n url_data = self._resource.download_uri()\n resp = requests.get(url_data['download_uri'], stream=True)\n with (open(path, 'wb') if path else file_obj) as f_out:\n if progressbar:\n with click.progressbar(length=self.size, label=self.filename\n ) as bar:\n for data in resp.iter_content(chunk_size=1024):\n bar.update(len(data))\n f_out.write(data)\n else:\n for data in resp.iter_content(chunk_size=1024):\n f_out.write(data)\n except KeyboardInterrupt:\n if path:\n os.remove(path)\n raise\n except requests.exceptions.HTTPError as exc:\n if exc.response.status_code == 401:\n raise OneCodexException('You must be logged in to download files.')\n elif exc.response.status_code == 402:\n raise OneCodexException(\n 'You must either have a premium platform account or be in a notebook environment to download files.'\n )\n elif exc.response.status_code == 403:\n raise OneCodexException(\n 'You are not authorized to download this file.')\n else:\n raise OneCodexException(\n 'Download failed with an HTTP status code {}.'.format(exc.\n response.status_code))\n return path\n",
"role": "user"
},
{
"content": "download",
"role": "assistant"
}
] |
def x(cls, file_path, progressbar=None):
res = cls._resource
doc_id = upload_document(file_path, res._client.session, res,
progressbar=progressbar)
return cls.get(doc_id)
|
upload
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(cls, file_path, progressbar=None):\n res = cls._resource\n doc_id = upload_document(file_path, res._client.session, res,\n progressbar=progressbar)\n return cls.get(doc_id)\n",
"role": "user"
},
{
"content": "upload",
"role": "assistant"
}
] |
def x(localization_bundle_path):
logging.info('Preparing for translation..')
for strings_file in os.listdir(os.path.join(localization_bundle_path,
DEFAULT_LANGUAGE_DIRECTORY_NAME)):
if not strings_file.endswith('.strings'):
continue
strings_path = os.path.join(localization_bundle_path,
DEFAULT_LANGUAGE_DIRECTORY_NAME, strings_file)
for lang_dir in os.listdir(localization_bundle_path):
if (lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME or lang_dir.
startswith('.')):
continue
dest_strings_path = os.path.join(localization_bundle_path,
lang_dir, strings_file)
pending_path = dest_strings_path + '.pending'
excluded_path = dest_strings_path + '.excluded'
if not os.path.exists(dest_strings_path):
open_strings_file(dest_strings_path, 'a').close()
logging.info('Preparing diff for %s in %s', lang_dir, pending_path)
localization_diff(strings_path, dest_strings_path,
excluded_path, pending_path)
|
prepare_for_translation
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(localization_bundle_path):\n logging.info('Preparing for translation..')\n for strings_file in os.listdir(os.path.join(localization_bundle_path,\n DEFAULT_LANGUAGE_DIRECTORY_NAME)):\n if not strings_file.endswith('.strings'):\n continue\n strings_path = os.path.join(localization_bundle_path,\n DEFAULT_LANGUAGE_DIRECTORY_NAME, strings_file)\n for lang_dir in os.listdir(localization_bundle_path):\n if (lang_dir == DEFAULT_LANGUAGE_DIRECTORY_NAME or lang_dir.\n startswith('.')):\n continue\n dest_strings_path = os.path.join(localization_bundle_path,\n lang_dir, strings_file)\n pending_path = dest_strings_path + '.pending'\n excluded_path = dest_strings_path + '.excluded'\n if not os.path.exists(dest_strings_path):\n open_strings_file(dest_strings_path, 'a').close()\n logging.info('Preparing diff for %s in %s', lang_dir, pending_path)\n localization_diff(strings_path, dest_strings_path,\n excluded_path, pending_path)\n",
"role": "user"
},
{
"content": "prepare_for_translation",
"role": "assistant"
}
] |
def x(ctx, api_key, no_pprint, verbose, telemetry):
click.Context.get_usage = click.Context.get_help
ctx.obj = {}
ctx.obj['API_KEY'] = api_key
ctx.obj['NOPPRINT'] = no_pprint
ctx.obj['TELEMETRY'] = telemetry
if verbose:
log.setLevel(logging.DEBUG)
if ctx.invoked_subcommand != 'upload':
warn_if_insecure_platform()
|
onecodex
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, api_key, no_pprint, verbose, telemetry):\n click.Context.get_usage = click.Context.get_help\n ctx.obj = {}\n ctx.obj['API_KEY'] = api_key\n ctx.obj['NOPPRINT'] = no_pprint\n ctx.obj['TELEMETRY'] = telemetry\n if verbose:\n log.setLevel(logging.DEBUG)\n if ctx.invoked_subcommand != 'upload':\n warn_if_insecure_platform()\n",
"role": "user"
},
{
"content": "onecodex",
"role": "assistant"
}
] |
def x(ctx, max_threads, files):
if len(files) == 0:
click.echo(ctx.get_help())
return
files = list(files)
bar = click.progressbar(length=sum([_file_size(x) for x in files]),
label='Uploading... ')
run_via_threadpool(ctx.obj['API'].Documents.upload, files, {
'progressbar': bar}, max_threads=max_threads, graceful_exit=False)
|
documents_upload
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, max_threads, files):\n if len(files) == 0:\n click.echo(ctx.get_help())\n return\n files = list(files)\n bar = click.progressbar(length=sum([_file_size(x) for x in files]),\n label='Uploading... ')\n run_via_threadpool(ctx.obj['API'].Documents.upload, files, {\n 'progressbar': bar}, max_threads=max_threads, graceful_exit=False)\n",
"role": "user"
},
{
"content": "documents_upload",
"role": "assistant"
}
] |
def x(ctx, classifications, results, readlevel, readlevel_path):
if not readlevel and not results:
cli_resource_fetcher(ctx, 'classifications', classifications)
elif not readlevel and results:
if len(classifications) != 1:
log.error(
'Can only request results data on one Classification at a time'
)
else:
classification = ctx.obj['API'].Classifications.get(classifications
[0])
if not classification:
log.error('Could not find classification {} (404 status code)'
.format(classifications[0]))
return
results = classification.results(json=True)
pprint(results, ctx.obj['NOPPRINT'])
elif readlevel is not None and not results:
if len(classifications) != 1:
log.error(
'Can only request read-level data on one Classification at a time'
)
else:
classification = ctx.obj['API'].Classifications.get(classifications
[0])
if not classification:
log.error('Could not find classification {} (404 status code)'
.format(classifications[0]))
return
tsv_url = classification._readlevel()['url']
log.info('Downloading tsv data from: {}'.format(tsv_url))
download_file_helper(tsv_url, readlevel_path)
else:
log.error(
'Can only request one of read-level data or results data at a time'
)
|
classifications
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, classifications, results, readlevel, readlevel_path):\n if not readlevel and not results:\n cli_resource_fetcher(ctx, 'classifications', classifications)\n elif not readlevel and results:\n if len(classifications) != 1:\n log.error(\n 'Can only request results data on one Classification at a time'\n )\n else:\n classification = ctx.obj['API'].Classifications.get(classifications\n [0])\n if not classification:\n log.error('Could not find classification {} (404 status code)'\n .format(classifications[0]))\n return\n results = classification.results(json=True)\n pprint(results, ctx.obj['NOPPRINT'])\n elif readlevel is not None and not results:\n if len(classifications) != 1:\n log.error(\n 'Can only request read-level data on one Classification at a time'\n )\n else:\n classification = ctx.obj['API'].Classifications.get(classifications\n [0])\n if not classification:\n log.error('Could not find classification {} (404 status code)'\n .format(classifications[0]))\n return\n tsv_url = classification._readlevel()['url']\n log.info('Downloading tsv data from: {}'.format(tsv_url))\n download_file_helper(tsv_url, readlevel_path)\n else:\n log.error(\n 'Can only request one of read-level data or results data at a time'\n )\n",
"role": "user"
},
{
"content": "classifications",
"role": "assistant"
}
] |
def x(ctx, files, max_threads, prompt, forward, reverse, tags, metadata,
project_id, coerce_ascii):
appendables = {}
if tags:
appendables['tags'] = []
for tag in tags:
appendables['tags'].append(tag)
if metadata:
appendables['metadata'] = {}
for metadata_kv in metadata:
split_metadata = metadata_kv.split('=', 1)
if len(split_metadata) > 1:
metadata_value = split_metadata[1]
appendables['metadata'][split_metadata[0]] = metadata_value
appendables = validate_appendables(appendables, ctx.obj['API'])
if (forward or reverse) and not (forward and reverse):
click.echo('You must specify both forward and reverse files', err=True)
ctx.exit(1)
if forward and reverse:
if len(files) > 0:
click.echo(
'You may not pass a FILES argument when using the --forward and --reverse options.'
, err=True)
ctx.exit(1)
files = [(forward, reverse)]
elif len(files) == 0:
click.echo(ctx.get_help())
return
else:
files = list(files)
paired_files = []
single_files = set(files)
for filename in files:
pair = re.sub('[._][Rr]1[._]', lambda x: x.group().replace('1',
'2'), filename)
if pair != filename and os.path.exists(pair):
if not prompt and pair not in single_files:
continue
paired_files.append((filename, pair))
if pair in single_files:
single_files.remove(pair)
single_files.remove(filename)
auto_pair = True
if prompt and len(paired_files) > 0:
pair_list = ''
for p in paired_files:
pair_list += '\n {} & {}'.format(os.path.basename(p[0]),
os.path.basename(p[1]))
answer = click.confirm(
"""It appears there are paired files:{}
Interleave them after upload?"""
.format(pair_list), default='Y')
if not answer:
auto_pair = False
if auto_pair:
files = paired_files + list(single_files)
total_size = sum([(_file_size(x[0], uncompressed=True) + _file_size(x[1
], uncompressed=True) if isinstance(x, tuple) else _file_size(x,
uncompressed=False)) for x in files])
upload_kwargs = {'metadata': appendables['valid_metadata'], 'tags':
appendables['valid_tags'], 'project': project_id, 'coerce_ascii':
coerce_ascii, 'progressbar': progressbar(length=total_size, label=
'Uploading...')}
run_via_threadpool(ctx.obj['API'].Samples.upload, files, upload_kwargs,
max_threads=max_threads, graceful_exit=False)
|
upload
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, files, max_threads, prompt, forward, reverse, tags, metadata,\n project_id, coerce_ascii):\n appendables = {}\n if tags:\n appendables['tags'] = []\n for tag in tags:\n appendables['tags'].append(tag)\n if metadata:\n appendables['metadata'] = {}\n for metadata_kv in metadata:\n split_metadata = metadata_kv.split('=', 1)\n if len(split_metadata) > 1:\n metadata_value = split_metadata[1]\n appendables['metadata'][split_metadata[0]] = metadata_value\n appendables = validate_appendables(appendables, ctx.obj['API'])\n if (forward or reverse) and not (forward and reverse):\n click.echo('You must specify both forward and reverse files', err=True)\n ctx.exit(1)\n if forward and reverse:\n if len(files) > 0:\n click.echo(\n 'You may not pass a FILES argument when using the --forward and --reverse options.'\n , err=True)\n ctx.exit(1)\n files = [(forward, reverse)]\n elif len(files) == 0:\n click.echo(ctx.get_help())\n return\n else:\n files = list(files)\n paired_files = []\n single_files = set(files)\n for filename in files:\n pair = re.sub('[._][Rr]1[._]', lambda x: x.group().replace('1',\n '2'), filename)\n if pair != filename and os.path.exists(pair):\n if not prompt and pair not in single_files:\n continue\n paired_files.append((filename, pair))\n if pair in single_files:\n single_files.remove(pair)\n single_files.remove(filename)\n auto_pair = True\n if prompt and len(paired_files) > 0:\n pair_list = ''\n for p in paired_files:\n pair_list += '\\n {} & {}'.format(os.path.basename(p[0]),\n os.path.basename(p[1]))\n answer = click.confirm(\n \"\"\"It appears there are paired files:{}\nInterleave them after upload?\"\"\"\n .format(pair_list), default='Y')\n if not answer:\n auto_pair = False\n if auto_pair:\n files = paired_files + list(single_files)\n total_size = sum([(_file_size(x[0], uncompressed=True) + _file_size(x[1\n ], uncompressed=True) if isinstance(x, tuple) else _file_size(x,\n uncompressed=False)) for x in files])\n upload_kwargs = {'metadata': appendables['valid_metadata'], 'tags':\n appendables['valid_tags'], 'project': project_id, 'coerce_ascii':\n coerce_ascii, 'progressbar': progressbar(length=total_size, label=\n 'Uploading...')}\n run_via_threadpool(ctx.obj['API'].Samples.upload, files, upload_kwargs,\n max_threads=max_threads, graceful_exit=False)\n",
"role": "user"
},
{
"content": "upload",
"role": "assistant"
}
] |
def x(ctx):
base_url = os.environ.get('ONE_CODEX_API_BASE', 'https://app.onecodex.com')
if not ctx.obj['API_KEY']:
_login(base_url)
else:
email = _login(base_url, api_key=ctx.obj['API_KEY'])
ocx = Api(api_key=ctx.obj['API_KEY'], telemetry=ctx.obj['TELEMETRY'])
if ocx._client.Account.instances()['email'] != email:
click.echo(
'Your login credentials do not match the provided email!',
err=True)
_remove_creds()
ctx.exit(1)
|
login
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx):\n base_url = os.environ.get('ONE_CODEX_API_BASE', 'https://app.onecodex.com')\n if not ctx.obj['API_KEY']:\n _login(base_url)\n else:\n email = _login(base_url, api_key=ctx.obj['API_KEY'])\n ocx = Api(api_key=ctx.obj['API_KEY'], telemetry=ctx.obj['TELEMETRY'])\n if ocx._client.Account.instances()['email'] != email:\n click.echo(\n 'Your login credentials do not match the provided email!',\n err=True)\n _remove_creds()\n ctx.exit(1)\n",
"role": "user"
},
{
"content": "login",
"role": "assistant"
}
] |
def x(classification, parent=False):
tax_id_map = {}
if parent:
for row in classification.results()['table']:
if row['parent_tax_id'] is not None and row['tax_id'] is not None:
tax_id_map[row['tax_id']] = row['parent_tax_id']
else:
for row in classification.results()['table']:
if row['parent_tax_id'] is not None and row['tax_id'] is not None:
try:
tax_id_map[row['parent_tax_id']].add(row['tax_id'])
except KeyError:
tax_id_map[row['parent_tax_id']] = set([row['tax_id']])
return tax_id_map
|
make_taxonomy_dict
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(classification, parent=False):\n tax_id_map = {}\n if parent:\n for row in classification.results()['table']:\n if row['parent_tax_id'] is not None and row['tax_id'] is not None:\n tax_id_map[row['tax_id']] = row['parent_tax_id']\n else:\n for row in classification.results()['table']:\n if row['parent_tax_id'] is not None and row['tax_id'] is not None:\n try:\n tax_id_map[row['parent_tax_id']].add(row['tax_id'])\n except KeyError:\n tax_id_map[row['parent_tax_id']] = set([row['tax_id']])\n return tax_id_map\n",
"role": "user"
},
{
"content": "make_taxonomy_dict",
"role": "assistant"
}
] |
def x(tax_id_map, tax_id, parent=False):
if parent:
pass
else:
def x(tax_id, visited):
try:
children = [tax_id] + list(tax_id_map[tax_id])
except KeyError:
children = [tax_id]
for child in children:
if child not in visited:
visited.append(child)
children.extend(_child_recurse(child, visited))
return children
return list(set(_child_recurse(tax_id, [])))
|
recurse_taxonomy_map
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(tax_id_map, tax_id, parent=False):\n if parent:\n pass\n else:\n\n def x(tax_id, visited):\n try:\n children = [tax_id] + list(tax_id_map[tax_id])\n except KeyError:\n children = [tax_id]\n for child in children:\n if child not in visited:\n visited.append(child)\n children.extend(_child_recurse(child, visited))\n return children\n return list(set(_child_recurse(tax_id, [])))\n",
"role": "user"
},
{
"content": "recurse_taxonomy_map",
"role": "assistant"
}
] |
def x(schema, ref_resolver=None, root=None):
if isinstance(schema, dict):
if len(schema) == 1 and '$ref' in schema and isinstance(schema[
'$ref'], six.string_types):
reference = schema['$ref']
if reference.startswith('#'):
return root
return ref_resolver(reference)
resolved = {}
for k, v in schema.items():
resolved[k] = schema_resolve_refs(v, ref_resolver=ref_resolver,
root=root if root is not None else resolved)
return resolved
if isinstance(schema, (list, tuple)):
return [schema_resolve_refs(v, ref_resolver=ref_resolver, root=root
) for v in schema]
return schema
|
schema_resolve_refs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(schema, ref_resolver=None, root=None):\n if isinstance(schema, dict):\n if len(schema) == 1 and '$ref' in schema and isinstance(schema[\n '$ref'], six.string_types):\n reference = schema['$ref']\n if reference.startswith('#'):\n return root\n return ref_resolver(reference)\n resolved = {}\n for k, v in schema.items():\n resolved[k] = schema_resolve_refs(v, ref_resolver=ref_resolver,\n root=root if root is not None else resolved)\n return resolved\n if isinstance(schema, (list, tuple)):\n return [schema_resolve_refs(v, ref_resolver=ref_resolver, root=root\n ) for v in schema]\n return schema\n",
"role": "user"
},
{
"content": "schema_resolve_refs",
"role": "assistant"
}
] |
def x(text=None, label=None):
if text is None and label is None:
raise OneCodexException('Please specify at least one of: text, label')
try:
ipy = get_ipython()
ref_list = ipy.meta.get('references', {})
except NameError:
raise OneCodexException('Must be run from within IPython')
def x(ref_num):
return '<sup class="reference">{}</sup>'.format(ref_num)
if text is not None:
for ref_label, (ref_num, ref_text) in ref_list.items():
if text == ref_text:
if label is not None and label != ref_label:
raise OneCodexException(
'Citation already in use with label={}'.format(
ref_label))
else:
break
else:
if label is not None and label in ref_list.keys():
raise OneCodexException('Citation label={} already in use'.
format(label))
if not ref_list:
ref_num = 1
else:
ref_num = max([x[0] for x in ref_list.values()]) + 1
if label is None:
ref_label = ref_num
else:
ref_label = label
ref_list[ref_label] = ref_num, text
ipy.meta['references'] = ref_list
return to_html(ref_num)
elif label is not None:
if label not in ref_list.keys():
raise OneCodexException('Cannot find citation with label={}'.
format(label))
return to_html(ref_list[label][0])
|
reference
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text=None, label=None):\n if text is None and label is None:\n raise OneCodexException('Please specify at least one of: text, label')\n try:\n ipy = get_ipython()\n ref_list = ipy.meta.get('references', {})\n except NameError:\n raise OneCodexException('Must be run from within IPython')\n\n def x(ref_num):\n return '<sup class=\"reference\">{}</sup>'.format(ref_num)\n if text is not None:\n for ref_label, (ref_num, ref_text) in ref_list.items():\n if text == ref_text:\n if label is not None and label != ref_label:\n raise OneCodexException(\n 'Citation already in use with label={}'.format(\n ref_label))\n else:\n break\n else:\n if label is not None and label in ref_list.keys():\n raise OneCodexException('Citation label={} already in use'.\n format(label))\n if not ref_list:\n ref_num = 1\n else:\n ref_num = max([x[0] for x in ref_list.values()]) + 1\n if label is None:\n ref_label = ref_num\n else:\n ref_label = label\n ref_list[ref_label] = ref_num, text\n ipy.meta['references'] = ref_list\n return to_html(ref_num)\n elif label is not None:\n if label not in ref_list.keys():\n raise OneCodexException('Cannot find citation with label={}'.\n format(label))\n return to_html(ref_list[label][0])\n",
"role": "user"
},
{
"content": "reference",
"role": "assistant"
}
] |
def x(self, name, schema, resource_cls=None):
cls = type(str(upper_camel_case(name)), (resource_cls or Resource,
collections.MutableMapping), {'__doc__': schema.get('description', '')}
)
cls._schema = schema
cls._client = self
cls._links = links = {}
for link_schema in schema['links']:
link = Link(self, rel=link_schema['rel'], href=link_schema['href'],
method=link_schema['method'], schema=link_schema.get('schema',
None), target_schema=link_schema.get('targetSchema', None))
if link.rel in ('self', 'instances', 'create', 'update', 'destroy'):
setattr(cls, '_{}'.format(link.rel), link)
links[link.rel] = link
if link.rel != 'update':
setattr(cls, snake_case(link.rel), link)
for property_name, property_schema in schema.get('properties', {}).items():
if property_name.startswith('$'):
continue
if property_schema.get('readOnly', False):
setattr(cls, property_name, property(fget=partial(lambda name,
obj: getitem(obj, name), property_name), doc=
property_schema.get('description', None)))
else:
setattr(cls, property_name, property(fget=partial(lambda name,
obj: getitem(obj, name), property_name), fset=partial(lambda
name, obj, value: setitem(obj, name, value), property_name),
fdel=partial(lambda name, obj: delitem(obj, name),
property_name), doc=property_schema.get('description', None)))
root = None
if 'instances' in links:
root = cls._instances.href
elif 'self' in links:
root = cls._self.href[:cls._self.href.rfind('/')]
else:
root = self._root_path + '/' + name.replace('_', '-')
self._resources[root] = cls
return cls
|
resource_factory
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, name, schema, resource_cls=None):\n cls = type(str(upper_camel_case(name)), (resource_cls or Resource,\n collections.MutableMapping), {'__doc__': schema.get('description', '')}\n )\n cls._schema = schema\n cls._client = self\n cls._links = links = {}\n for link_schema in schema['links']:\n link = Link(self, rel=link_schema['rel'], href=link_schema['href'],\n method=link_schema['method'], schema=link_schema.get('schema',\n None), target_schema=link_schema.get('targetSchema', None))\n if link.rel in ('self', 'instances', 'create', 'update', 'destroy'):\n setattr(cls, '_{}'.format(link.rel), link)\n links[link.rel] = link\n if link.rel != 'update':\n setattr(cls, snake_case(link.rel), link)\n for property_name, property_schema in schema.get('properties', {}).items():\n if property_name.startswith('$'):\n continue\n if property_schema.get('readOnly', False):\n setattr(cls, property_name, property(fget=partial(lambda name,\n obj: getitem(obj, name), property_name), doc=\n property_schema.get('description', None)))\n else:\n setattr(cls, property_name, property(fget=partial(lambda name,\n obj: getitem(obj, name), property_name), fset=partial(lambda\n name, obj, value: setitem(obj, name, value), property_name),\n fdel=partial(lambda name, obj: delitem(obj, name),\n property_name), doc=property_schema.get('description', None)))\n root = None\n if 'instances' in links:\n root = cls._instances.href\n elif 'self' in links:\n root = cls._self.href[:cls._self.href.rfind('/')]\n else:\n root = self._root_path + '/' + name.replace('_', '-')\n self._resources[root] = cls\n return cls\n",
"role": "user"
},
{
"content": "resource_factory",
"role": "assistant"
}
] |
def x(directory, exclude_dirs, special_ui_components_prefix):
result = []
for ib_file_path in find_files(directory, ['.xib', '.storyboard'],
exclude_dirs):
result += extract_string_pairs_in_ib_file(ib_file_path,
special_ui_components_prefix)
return result
|
extract_string_pairs_in_dir
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(directory, exclude_dirs, special_ui_components_prefix):\n result = []\n for ib_file_path in find_files(directory, ['.xib', '.storyboard'],\n exclude_dirs):\n result += extract_string_pairs_in_ib_file(ib_file_path,\n special_ui_components_prefix)\n return result\n",
"role": "user"
},
{
"content": "extract_string_pairs_in_dir",
"role": "assistant"
}
] |
def x(element):
element_entry_comment = get_element_attribute_or_empty(element, 'userLabel'
)
if element_entry_comment == '':
try:
element_entry_comment = element.getElementsByTagName('string')[0
].firstChild.nodeValue
except Exception:
element_entry_comment = ''
if not element_entry_comment.lower().startswith(
JT_INTERNATIONALIZED_COMMENT_PREFIX):
return None
else:
return element_entry_comment[len(JT_INTERNATIONALIZED_COMMENT_PREFIX):]
|
extract_element_internationalized_comment
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(element):\n element_entry_comment = get_element_attribute_or_empty(element, 'userLabel'\n )\n if element_entry_comment == '':\n try:\n element_entry_comment = element.getElementsByTagName('string')[0\n ].firstChild.nodeValue\n except Exception:\n element_entry_comment = ''\n if not element_entry_comment.lower().startswith(\n JT_INTERNATIONALIZED_COMMENT_PREFIX):\n return None\n else:\n return element_entry_comment[len(JT_INTERNATIONALIZED_COMMENT_PREFIX):]\n",
"role": "user"
},
{
"content": "extract_element_internationalized_comment",
"role": "assistant"
}
] |
def x(element, class_suffix, special_ui_components_prefix):
valid_class_names = ['%s%s' % (DEFAULT_UI_COMPONENTS_PREFIX, class_suffix)]
if special_ui_components_prefix is not None:
valid_class_names.append('%s%s' % (special_ui_components_prefix,
class_suffix))
if not element.hasAttribute('customClass') or element.attributes[
'customClass'].value not in valid_class_names:
logging.warn("WARNING: %s is internationalized but isn't one of %s",
extract_element_internationalized_comment(element),
valid_class_names)
|
warn_if_element_not_of_class
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(element, class_suffix, special_ui_components_prefix):\n valid_class_names = ['%s%s' % (DEFAULT_UI_COMPONENTS_PREFIX, class_suffix)]\n if special_ui_components_prefix is not None:\n valid_class_names.append('%s%s' % (special_ui_components_prefix,\n class_suffix))\n if not element.hasAttribute('customClass') or element.attributes[\n 'customClass'].value not in valid_class_names:\n logging.warn(\"WARNING: %s is internationalized but isn't one of %s\",\n extract_element_internationalized_comment(element),\n valid_class_names)\n",
"role": "user"
},
{
"content": "warn_if_element_not_of_class",
"role": "assistant"
}
] |
def x(results, ui_element, comment_prefix):
attributed_strings = ui_element.getElementsByTagName('attributedString')
if attributed_strings.length == 0:
return False
attributed_element = attributed_strings[0]
fragment_index = 1
for fragment in attributed_element.getElementsByTagName('fragment'):
try:
label_entry_key = fragment.attributes['content'].value
except KeyError:
label_entry_key = fragment.getElementsByTagName('string')[0
].firstChild.nodeValue
comment = '%s Part %d' % (comment_prefix, fragment_index)
results.append((label_entry_key, comment))
fragment_index += 1
return fragment_index > 1
|
add_string_pairs_from_attributed_ui_element
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(results, ui_element, comment_prefix):\n attributed_strings = ui_element.getElementsByTagName('attributedString')\n if attributed_strings.length == 0:\n return False\n attributed_element = attributed_strings[0]\n fragment_index = 1\n for fragment in attributed_element.getElementsByTagName('fragment'):\n try:\n label_entry_key = fragment.attributes['content'].value\n except KeyError:\n label_entry_key = fragment.getElementsByTagName('string')[0\n ].firstChild.nodeValue\n comment = '%s Part %d' % (comment_prefix, fragment_index)\n results.append((label_entry_key, comment))\n fragment_index += 1\n return fragment_index > 1\n",
"role": "user"
},
{
"content": "add_string_pairs_from_attributed_ui_element",
"role": "assistant"
}
] |
def x(xib_file, results, label, special_ui_components_prefix):
label_entry_comment = extract_element_internationalized_comment(label)
if label_entry_comment is None:
return
warn_if_element_not_of_class(label, 'Label', special_ui_components_prefix)
if label.hasAttribute('usesAttributedText') and label.attributes[
'usesAttributedText'].value == 'YES':
add_string_pairs_from_attributed_ui_element(results, label,
label_entry_comment)
else:
try:
label_entry_key = label.attributes['text'].value
except KeyError:
try:
label_entry_key = label.getElementsByTagName('string')[0
].firstChild.nodeValue
except Exception:
label_entry_key = 'N/A'
logging.warn('%s: Missing text entry in %s', xib_file,
label.toxml('UTF8'))
results.append((label_entry_key, label_entry_comment))
|
add_string_pairs_from_label_element
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(xib_file, results, label, special_ui_components_prefix):\n label_entry_comment = extract_element_internationalized_comment(label)\n if label_entry_comment is None:\n return\n warn_if_element_not_of_class(label, 'Label', special_ui_components_prefix)\n if label.hasAttribute('usesAttributedText') and label.attributes[\n 'usesAttributedText'].value == 'YES':\n add_string_pairs_from_attributed_ui_element(results, label,\n label_entry_comment)\n else:\n try:\n label_entry_key = label.attributes['text'].value\n except KeyError:\n try:\n label_entry_key = label.getElementsByTagName('string')[0\n ].firstChild.nodeValue\n except Exception:\n label_entry_key = 'N/A'\n logging.warn('%s: Missing text entry in %s', xib_file,\n label.toxml('UTF8'))\n results.append((label_entry_key, label_entry_comment))\n",
"role": "user"
},
{
"content": "add_string_pairs_from_label_element",
"role": "assistant"
}
] |
def x(xib_file, results, text_field, special_ui_components_prefix):
text_field_entry_comment = extract_element_internationalized_comment(
text_field)
if text_field_entry_comment is None:
return
if text_field.hasAttribute('usesAttributedText') and text_field.attributes[
'usesAttributedText'].value == 'YES':
add_string_pairs_from_attributed_ui_element(results, text_field,
text_field_entry_comment)
else:
try:
text_field_entry_key = text_field.attributes['text'].value
results.append((text_field_entry_key, text_field_entry_comment +
' default text value'))
except KeyError:
pass
try:
text_field_entry_key = text_field.attributes['placeholder'].value
results.append((text_field_entry_key, text_field_entry_comment +
' placeholder text value'))
except KeyError:
pass
warn_if_element_not_of_class(text_field, 'TextField',
special_ui_components_prefix)
|
add_string_pairs_from_text_field_element
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(xib_file, results, text_field, special_ui_components_prefix):\n text_field_entry_comment = extract_element_internationalized_comment(\n text_field)\n if text_field_entry_comment is None:\n return\n if text_field.hasAttribute('usesAttributedText') and text_field.attributes[\n 'usesAttributedText'].value == 'YES':\n add_string_pairs_from_attributed_ui_element(results, text_field,\n text_field_entry_comment)\n else:\n try:\n text_field_entry_key = text_field.attributes['text'].value\n results.append((text_field_entry_key, text_field_entry_comment +\n ' default text value'))\n except KeyError:\n pass\n try:\n text_field_entry_key = text_field.attributes['placeholder'].value\n results.append((text_field_entry_key, text_field_entry_comment +\n ' placeholder text value'))\n except KeyError:\n pass\n warn_if_element_not_of_class(text_field, 'TextField',\n special_ui_components_prefix)\n",
"role": "user"
},
{
"content": "add_string_pairs_from_text_field_element",
"role": "assistant"
}
] |
def x(xib_file, results, text_view, special_ui_components_prefix):
text_view_entry_comment = extract_element_internationalized_comment(
text_view)
if text_view_entry_comment is None:
return
if text_view.hasAttribute('usesAttributedText') and text_view.attributes[
'usesAttributedText'].value == 'YES':
add_string_pairs_from_attributed_ui_element(results, text_view,
text_view_entry_comment)
else:
try:
text_view_entry_key = text_view.attributes['text'].value
results.append((text_view_entry_key, text_view_entry_comment +
' default text value'))
except KeyError:
pass
warn_if_element_not_of_class(text_view, 'TextView',
special_ui_components_prefix)
|
add_string_pairs_from_text_view_element
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(xib_file, results, text_view, special_ui_components_prefix):\n text_view_entry_comment = extract_element_internationalized_comment(\n text_view)\n if text_view_entry_comment is None:\n return\n if text_view.hasAttribute('usesAttributedText') and text_view.attributes[\n 'usesAttributedText'].value == 'YES':\n add_string_pairs_from_attributed_ui_element(results, text_view,\n text_view_entry_comment)\n else:\n try:\n text_view_entry_key = text_view.attributes['text'].value\n results.append((text_view_entry_key, text_view_entry_comment +\n ' default text value'))\n except KeyError:\n pass\n warn_if_element_not_of_class(text_view, 'TextView',\n special_ui_components_prefix)\n",
"role": "user"
},
{
"content": "add_string_pairs_from_text_view_element",
"role": "assistant"
}
] |
def x(xib_file, results, button, special_ui_components_prefix):
button_entry_comment = extract_element_internationalized_comment(button)
if button_entry_comment is None:
return
for state in button.getElementsByTagName('state'):
state_name = state.attributes['key'].value
state_entry_comment = (button_entry_comment + ' - ' + state_name +
' state of button')
if not add_string_pairs_from_attributed_ui_element(results, state,
state_entry_comment):
try:
button_entry_key = state.attributes['title'].value
except KeyError:
try:
button_entry_key = state.getElementsByTagName('string')[0
].firstChild.nodeValue
except Exception:
continue
results.append((button_entry_key, state_entry_comment))
warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix
)
|
add_string_pairs_from_button_element
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(xib_file, results, button, special_ui_components_prefix):\n button_entry_comment = extract_element_internationalized_comment(button)\n if button_entry_comment is None:\n return\n for state in button.getElementsByTagName('state'):\n state_name = state.attributes['key'].value\n state_entry_comment = (button_entry_comment + ' - ' + state_name +\n ' state of button')\n if not add_string_pairs_from_attributed_ui_element(results, state,\n state_entry_comment):\n try:\n button_entry_key = state.attributes['title'].value\n except KeyError:\n try:\n button_entry_key = state.getElementsByTagName('string')[0\n ].firstChild.nodeValue\n except Exception:\n continue\n results.append((button_entry_key, state_entry_comment))\n warn_if_element_not_of_class(button, 'Button', special_ui_components_prefix\n )\n",
"role": "user"
},
{
"content": "add_string_pairs_from_button_element",
"role": "assistant"
}
] |
def x(localizable_file, translated_file, excluded_strings_file,
output_translation_file):
old_translated_file_dictionary = (
generate_localization_key_to_entry_dictionary_from_file(
translated_file))
if excluded_strings_file is not None and os.path.isfile(
excluded_strings_file):
excluded_file_dictionary = (
generate_localization_key_to_entry_dictionary_from_file(
excluded_strings_file))
else:
excluded_file_dictionary = {}
translated_list = old_translated_file_dictionary.keys()
output_dictionary = {}
output_file_elements = []
f = open_strings_file(localizable_file, 'r')
output_file_elements.append(Comment(
u"""
/**
* This file contains all the strings that were extracted from our app and that need to be translated.
* Each entry may or may not have a comment explaining context, and a "key" = "%s" equation.
* To localize, you need to fill the right side of the equation with the translation of the left side.
* Please keep special expressions such as '%%@' or '%%1$@' as is. Usually the comment will explain their context.
*/
"""
% (VALUE_PLACEHOLDER,)))
for _header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(
f):
if key in translated_list or key in excluded_file_dictionary:
if key in old_translated_file_dictionary:
old_translated_file_dictionary.pop(key)
elif value in output_dictionary:
output_dictionary[value].add_comments(comments)
output_file_elements.append(Comment(
u"""/* There was a value '%s' here but it was a duplicate of an older value and removed. */
"""
% value))
else:
loc_obj = LocalizationEntry(comments, value, VALUE_PLACEHOLDER)
output_dictionary[value] = loc_obj
output_file_elements.append(loc_obj)
for key, removed_trans in old_translated_file_dictionary.items():
output_file_elements.append(Comment(
u"""
/*
* Entry removed from previous translation file:
* %s
* "%s" = "%s";
*/
"""
% (', '.join(removed_trans.comments), removed_trans.key,
removed_trans.value)))
write_file_elements_to_strings_file(output_translation_file,
output_file_elements)
|
localization_diff
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(localizable_file, translated_file, excluded_strings_file,\n output_translation_file):\n old_translated_file_dictionary = (\n generate_localization_key_to_entry_dictionary_from_file(\n translated_file))\n if excluded_strings_file is not None and os.path.isfile(\n excluded_strings_file):\n excluded_file_dictionary = (\n generate_localization_key_to_entry_dictionary_from_file(\n excluded_strings_file))\n else:\n excluded_file_dictionary = {}\n translated_list = old_translated_file_dictionary.keys()\n output_dictionary = {}\n output_file_elements = []\n f = open_strings_file(localizable_file, 'r')\n output_file_elements.append(Comment(\n u\"\"\"\n/**\n * This file contains all the strings that were extracted from our app and that need to be translated.\n * Each entry may or may not have a comment explaining context, and a \"key\" = \"%s\" equation.\n * To localize, you need to fill the right side of the equation with the translation of the left side.\n * Please keep special expressions such as '%%@' or '%%1$@' as is. Usually the comment will explain their context.\n */\n\"\"\"\n % (VALUE_PLACEHOLDER,)))\n for _header_comment, comments, key, value in extract_header_comment_key_value_tuples_from_file(\n f):\n if key in translated_list or key in excluded_file_dictionary:\n if key in old_translated_file_dictionary:\n old_translated_file_dictionary.pop(key)\n elif value in output_dictionary:\n output_dictionary[value].add_comments(comments)\n output_file_elements.append(Comment(\n u\"\"\"/* There was a value '%s' here but it was a duplicate of an older value and removed. */\n\"\"\"\n % value))\n else:\n loc_obj = LocalizationEntry(comments, value, VALUE_PLACEHOLDER)\n output_dictionary[value] = loc_obj\n output_file_elements.append(loc_obj)\n for key, removed_trans in old_translated_file_dictionary.items():\n output_file_elements.append(Comment(\n u\"\"\"\n/*\n * Entry removed from previous translation file:\n * %s\n * \"%s\" = \"%s\";\n */\n\"\"\"\n % (', '.join(removed_trans.comments), removed_trans.key,\n removed_trans.value)))\n write_file_elements_to_strings_file(output_translation_file,\n output_file_elements)\n",
"role": "user"
},
{
"content": "localization_diff",
"role": "assistant"
}
] |
def x(self, rank='auto', haxis='Label', vaxis='simpson', title=None, xlabel
=None, ylabel=None, return_chart=False, plot_type='auto', label=None):
if rank is None:
raise OneCodexException(
"Please specify a rank or 'auto' to choose automatically")
if plot_type not in ('auto', 'boxplot', 'scatter'):
raise OneCodexException(
'Plot type must be one of: auto, boxplot, scatter')
df, magic_fields = self._metadata_fetch([haxis, 'Label'], label=label)
if vaxis in ('simpson', 'chao1', 'shannon'):
df.loc[:, vaxis] = self.alpha_diversity(vaxis, rank=rank)
magic_fields[vaxis] = vaxis
else:
vert_df, vert_magic_fields = self._metadata_fetch([vaxis])
if pd.api.types.is_bool_dtype(vert_df[vert_magic_fields[vaxis]]
) or pd.api.types.is_categorical_dtype(vert_df[
vert_magic_fields[vaxis]]) or pd.api.types.is_object_dtype(vert_df
[vert_magic_fields[vaxis]]) or not pd.api.types.is_numeric_dtype(
vert_df[vert_magic_fields[vaxis]]):
raise OneCodexException(
'Metadata field on vertical axis must be numerical')
df = pd.concat([df, vert_df], axis=1).dropna(subset=[
vert_magic_fields[vaxis]])
magic_fields.update(vert_magic_fields)
if pd.api.types.is_datetime64_any_dtype(df[magic_fields[haxis]]):
category_type = 'T'
if plot_type == 'auto':
plot_type = 'boxplot'
elif 'date' in magic_fields[haxis].split('_'):
df.loc[:, magic_fields[haxis]] = df.loc[:, magic_fields[haxis]].apply(
pd.to_datetime, utc=True)
category_type = 'T'
if plot_type == 'auto':
plot_type = 'boxplot'
elif pd.api.types.is_bool_dtype(df[magic_fields[haxis]]
) or pd.api.types.is_categorical_dtype(df[magic_fields[haxis]]
) or pd.api.types.is_object_dtype(df[magic_fields[haxis]]):
df = df.fillna({field: 'N/A' for field in df.columns})
category_type = 'N'
if plot_type == 'auto':
if len(df[magic_fields[haxis]].unique()) == len(df[magic_fields
[haxis]]):
plot_type = 'scatter'
else:
plot_type = 'boxplot'
elif pd.api.types.is_numeric_dtype(df[magic_fields[haxis]]):
df = df.dropna(subset=[magic_fields[vaxis]])
category_type = 'O'
if plot_type == 'auto':
plot_type = 'scatter'
else:
raise OneCodexException(
'Unplottable column type for horizontal axis ({})'.format(haxis))
if xlabel is None:
xlabel = magic_fields[haxis]
if ylabel is None:
ylabel = magic_fields[vaxis]
if plot_type == 'scatter':
df = df.reset_index()
alt_kwargs = dict(x=alt.X(magic_fields[haxis], axis=alt.Axis(title=
xlabel)), y=alt.Y(magic_fields[vaxis], axis=alt.Axis(title=
ylabel)), tooltip=['Label', '{}:Q'.format(vaxis)], href='url:N',
url='https://app.onecodex.com/classification/' + alt.datum.
classification_id)
chart = alt.Chart(df).transform_calculate(url=alt_kwargs.pop('url')
).mark_circle().encode(**alt_kwargs)
if title:
chart = chart.properties(title=title)
elif plot_type == 'boxplot':
chart = boxplot(df, magic_fields[haxis], magic_fields[vaxis],
category_type=category_type, title=title, xlabel=xlabel, ylabel
=ylabel)
if return_chart:
return chart
else:
chart.interactive().display()
|
plot_metadata
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, rank='auto', haxis='Label', vaxis='simpson', title=None, xlabel\n =None, ylabel=None, return_chart=False, plot_type='auto', label=None):\n if rank is None:\n raise OneCodexException(\n \"Please specify a rank or 'auto' to choose automatically\")\n if plot_type not in ('auto', 'boxplot', 'scatter'):\n raise OneCodexException(\n 'Plot type must be one of: auto, boxplot, scatter')\n df, magic_fields = self._metadata_fetch([haxis, 'Label'], label=label)\n if vaxis in ('simpson', 'chao1', 'shannon'):\n df.loc[:, vaxis] = self.alpha_diversity(vaxis, rank=rank)\n magic_fields[vaxis] = vaxis\n else:\n vert_df, vert_magic_fields = self._metadata_fetch([vaxis])\n if pd.api.types.is_bool_dtype(vert_df[vert_magic_fields[vaxis]]\n ) or pd.api.types.is_categorical_dtype(vert_df[\n vert_magic_fields[vaxis]]) or pd.api.types.is_object_dtype(vert_df\n [vert_magic_fields[vaxis]]) or not pd.api.types.is_numeric_dtype(\n vert_df[vert_magic_fields[vaxis]]):\n raise OneCodexException(\n 'Metadata field on vertical axis must be numerical')\n df = pd.concat([df, vert_df], axis=1).dropna(subset=[\n vert_magic_fields[vaxis]])\n magic_fields.update(vert_magic_fields)\n if pd.api.types.is_datetime64_any_dtype(df[magic_fields[haxis]]):\n category_type = 'T'\n if plot_type == 'auto':\n plot_type = 'boxplot'\n elif 'date' in magic_fields[haxis].split('_'):\n df.loc[:, magic_fields[haxis]] = df.loc[:, magic_fields[haxis]].apply(\n pd.to_datetime, utc=True)\n category_type = 'T'\n if plot_type == 'auto':\n plot_type = 'boxplot'\n elif pd.api.types.is_bool_dtype(df[magic_fields[haxis]]\n ) or pd.api.types.is_categorical_dtype(df[magic_fields[haxis]]\n ) or pd.api.types.is_object_dtype(df[magic_fields[haxis]]):\n df = df.fillna({field: 'N/A' for field in df.columns})\n category_type = 'N'\n if plot_type == 'auto':\n if len(df[magic_fields[haxis]].unique()) == len(df[magic_fields\n [haxis]]):\n plot_type = 'scatter'\n else:\n plot_type = 'boxplot'\n elif pd.api.types.is_numeric_dtype(df[magic_fields[haxis]]):\n df = df.dropna(subset=[magic_fields[vaxis]])\n category_type = 'O'\n if plot_type == 'auto':\n plot_type = 'scatter'\n else:\n raise OneCodexException(\n 'Unplottable column type for horizontal axis ({})'.format(haxis))\n if xlabel is None:\n xlabel = magic_fields[haxis]\n if ylabel is None:\n ylabel = magic_fields[vaxis]\n if plot_type == 'scatter':\n df = df.reset_index()\n alt_kwargs = dict(x=alt.X(magic_fields[haxis], axis=alt.Axis(title=\n xlabel)), y=alt.Y(magic_fields[vaxis], axis=alt.Axis(title=\n ylabel)), tooltip=['Label', '{}:Q'.format(vaxis)], href='url:N',\n url='https://app.onecodex.com/classification/' + alt.datum.\n classification_id)\n chart = alt.Chart(df).transform_calculate(url=alt_kwargs.pop('url')\n ).mark_circle().encode(**alt_kwargs)\n if title:\n chart = chart.properties(title=title)\n elif plot_type == 'boxplot':\n chart = boxplot(df, magic_fields[haxis], magic_fields[vaxis],\n category_type=category_type, title=title, xlabel=xlabel, ylabel\n =ylabel)\n if return_chart:\n return chart\n else:\n chart.interactive().display()\n",
"role": "user"
},
{
"content": "plot_metadata",
"role": "assistant"
}
] |
def x(app):
is_epub = isinstance(app.builder, EpubBuilder)
config_pages = app.config.html_additional_pages
if not is_epub and '404' not in config_pages:
yield '404', {}, '404.html'
|
add_404_page
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(app):\n is_epub = isinstance(app.builder, EpubBuilder)\n config_pages = app.config.html_additional_pages\n if not is_epub and '404' not in config_pages:\n yield '404', {}, '404.html'\n",
"role": "user"
},
{
"content": "add_404_page",
"role": "assistant"
}
] |
def x(app, pagename, templatename, context, doctree):
base = context.get('canonical_url')
if not base:
return
target = app.builder.get_target_uri(pagename)
context['page_canonical_url'] = base + target
|
canonical_url
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(app, pagename, templatename, context, doctree):\n base = context.get('canonical_url')\n if not base:\n return\n target = app.builder.get_target_uri(pagename)\n context['page_canonical_url'] = base + target\n",
"role": "user"
},
{
"content": "canonical_url",
"role": "assistant"
}
] |
def x(app):
if app.config.singlehtml_sidebars is not None and isinstance(app.
builder, SingleFileHTMLBuilder):
app.config.html_sidebars = app.config.singlehtml_sidebars
|
singlehtml_sidebars
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(app):\n if app.config.singlehtml_sidebars is not None and isinstance(app.\n builder, SingleFileHTMLBuilder):\n app.config.html_sidebars = app.config.singlehtml_sidebars\n",
"role": "user"
},
{
"content": "singlehtml_sidebars",
"role": "assistant"
}
] |
def x(app, what, name, obj, skip, options):
docstring = inspect.getdoc(obj) or ''
if skip or re.search('^\\s*:internal:\\s*$', docstring, re.M) is not None:
return True
|
skip_internal
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(app, what, name, obj, skip, options):\n docstring = inspect.getdoc(obj) or ''\n if skip or re.search('^\\\\s*:internal:\\\\s*$', docstring, re.M) is not None:\n return True\n",
"role": "user"
},
{
"content": "skip_internal",
"role": "assistant"
}
] |
def x(app, what, name, obj, options, lines):
if what != 'module':
return
lines[:] = [line for line in lines if not line.startswith((
':copyright:', ':license:'))]
|
cut_module_meta
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(app, what, name, obj, options, lines):\n if what != 'module':\n return\n lines[:] = [line for line in lines if not line.startswith((\n ':copyright:', ':license:'))]\n",
"role": "user"
},
{
"content": "cut_module_meta",
"role": "assistant"
}
] |
def x(name, version_length=2, placeholder='x'):
try:
release = pkg_resources.get_distribution(name).version
except ImportError:
print(textwrap.fill(
"'{name}' must be installed to build the documentation. Install from source using `pip install -e .` in a virtualenv."
.format(name=name)))
sys.exit(1)
version = '.'.join(release.split('.', version_length)[:version_length])
if placeholder:
version = '{}.{}'.format(version, placeholder)
return release, version
|
get_version
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(name, version_length=2, placeholder='x'):\n try:\n release = pkg_resources.get_distribution(name).version\n except ImportError:\n print(textwrap.fill(\n \"'{name}' must be installed to build the documentation. Install from source using `pip install -e .` in a virtualenv.\"\n .format(name=name)))\n sys.exit(1)\n version = '.'.join(release.split('.', version_length)[:version_length])\n if placeholder:\n version = '{}.{}'.format(version, placeholder)\n return release, version\n",
"role": "user"
},
{
"content": "get_version",
"role": "assistant"
}
] |
def x(app):
if app.config.is_pallets_theme is not None:
return
theme = getattr(app.builder, 'theme', None)
while theme is not None:
if theme.name == 'pocoo':
app.config.is_pallets_theme = True
break
theme = theme.base
else:
app.config.is_pallets_theme = False
|
set_is_pallets_theme
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(app):\n if app.config.is_pallets_theme is not None:\n return\n theme = getattr(app.builder, 'theme', None)\n while theme is not None:\n if theme.name == 'pocoo':\n app.config.is_pallets_theme = True\n break\n theme = theme.base\n else:\n app.config.is_pallets_theme = False\n",
"role": "user"
},
{
"content": "set_is_pallets_theme",
"role": "assistant"
}
] |
def x(default=None):
def x(f):
@wraps(f)
def x(app, *args, **kwargs):
if not app.config.is_pallets_theme:
return default
return f(app, *args, **kwargs)
return wrapped
return decorator
|
only_pallets_theme
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(default=None):\n\n def x(f):\n\n @wraps(f)\n def x(app, *args, **kwargs):\n if not app.config.is_pallets_theme:\n return default\n return f(app, *args, **kwargs)\n return wrapped\n return decorator\n",
"role": "user"
},
{
"content": "only_pallets_theme",
"role": "assistant"
}
] |
def x():
old_call = subprocess.call
def x(*args, **kwargs):
with tempfile.TemporaryFile('wb+') as f:
kwargs['stdout'] = f
kwargs['stderr'] = f
rv = subprocess.Popen(*args, **kwargs).wait()
f.seek(0)
click.echo(f.read().decode('utf-8', 'replace').rstrip())
return rv
subprocess.call = dummy_call
try:
yield
finally:
subprocess.call = old_call
|
patch_modules
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n old_call = subprocess.call\n\n def x(*args, **kwargs):\n with tempfile.TemporaryFile('wb+') as f:\n kwargs['stdout'] = f\n kwargs['stderr'] = f\n rv = subprocess.Popen(*args, **kwargs).wait()\n f.seek(0)\n click.echo(f.read().decode('utf-8', 'replace').rstrip())\n return rv\n subprocess.call = dummy_call\n try:\n yield\n finally:\n subprocess.call = old_call\n",
"role": "user"
},
{
"content": "patch_modules",
"role": "assistant"
}
] |
def x(document):
runner = getattr(document, 'click_example_runner', None)
if runner is None:
runner = document.click_example_runner = ExampleRunner()
return runner
|
get_example_runner
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(document):\n runner = getattr(document, 'click_example_runner', None)\n if runner is None:\n runner = document.click_example_runner = ExampleRunner()\n return runner\n",
"role": "user"
},
{
"content": "get_example_runner",
"role": "assistant"
}
] |
def x(self, cli, args=None, prog_name=None, input=None, terminate_input=
False, env=None, _output_lines=None, **extra):
output_lines = _output_lines if _output_lines is not None else []
if env:
for key, value in sorted(env.items()):
value = shlex.quote(value)
output_lines.append('$ export {}={}'.format(key, value))
args = args or []
if prog_name is None:
prog_name = cli.name.replace('_', '-')
output_lines.append('$ {} {}'.format(prog_name, ' '.join(shlex.quote(x) for
x in args)).rstrip())
prog_name = prog_name.rsplit(' ', 1)[-1]
if isinstance(input, (tuple, list)):
input = '\n'.join(input) + '\n'
if terminate_input:
input += '\x04'
result = super(ExampleRunner, self).invoke(cli=cli, args=args, input=
input, env=env, prog_name=prog_name, **extra)
output_lines.extend(result.output.splitlines())
return result
|
invoke
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, cli, args=None, prog_name=None, input=None, terminate_input=\n False, env=None, _output_lines=None, **extra):\n output_lines = _output_lines if _output_lines is not None else []\n if env:\n for key, value in sorted(env.items()):\n value = shlex.quote(value)\n output_lines.append('$ export {}={}'.format(key, value))\n args = args or []\n if prog_name is None:\n prog_name = cli.name.replace('_', '-')\n output_lines.append('$ {} {}'.format(prog_name, ' '.join(shlex.quote(x) for\n x in args)).rstrip())\n prog_name = prog_name.rsplit(' ', 1)[-1]\n if isinstance(input, (tuple, list)):\n input = '\\n'.join(input) + '\\n'\n if terminate_input:\n input += '\\x04'\n result = super(ExampleRunner, self).invoke(cli=cli, args=args, input=\n input, env=env, prog_name=prog_name, **extra)\n output_lines.extend(result.output.splitlines())\n return result\n",
"role": "user"
},
{
"content": "invoke",
"role": "assistant"
}
] |
def x(self, source):
with patch_modules():
code = compile(source, '<docs>', 'exec')
exec(code, self.namespace)
|
declare_example
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, source):\n with patch_modules():\n code = compile(source, '<docs>', 'exec')\n exec(code, self.namespace)\n",
"role": "user"
},
{
"content": "declare_example",
"role": "assistant"
}
] |
def x(self, source):
code = compile(source, '<docs>', 'exec')
buffer = []
invoke = partial(self.invoke, _output_lines=buffer)
def x(text=''):
buffer.append(text)
exec(code, self.namespace, {'invoke': invoke, 'println': println,
'isolated_filesystem': self.isolated_filesystem})
return buffer
|
run_example
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, source):\n code = compile(source, '<docs>', 'exec')\n buffer = []\n invoke = partial(self.invoke, _output_lines=buffer)\n\n def x(text=''):\n buffer.append(text)\n exec(code, self.namespace, {'invoke': invoke, 'println': println,\n 'isolated_filesystem': self.isolated_filesystem})\n return buffer\n",
"role": "user"
},
{
"content": "run_example",
"role": "assistant"
}
] |
def x(*, verbose: bool=False, quiet: bool=False, color: str='auto', title:
str='auto', timestamp: bool=False) ->None:
_setup(verbose=verbose, quiet=quiet, color=color, title=title,
timestamp=timestamp)
|
setup
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*, verbose: bool=False, quiet: bool=False, color: str='auto', title:\n str='auto', timestamp: bool=False) ->None:\n _setup(verbose=verbose, quiet=quiet, color=color, title=title,\n timestamp=timestamp)\n",
"role": "user"
},
{
"content": "setup",
"role": "assistant"
}
] |
def x(tokens: Sequence[Token], *, end: str='\n', sep: str=' ') ->Tuple[str, str
]:
flat_tokens = list()
for token in tokens:
if isinstance(token, UnicodeSequence):
flat_tokens.extend(token.tuple())
else:
flat_tokens.append(token)
with_color = _process_tokens(flat_tokens, end=end, sep=sep, color=True)
without_color = _process_tokens(flat_tokens, end=end, sep=sep, color=False)
return with_color, without_color
|
process_tokens
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(tokens: Sequence[Token], *, end: str='\\n', sep: str=' ') ->Tuple[str, str\n ]:\n flat_tokens = list()\n for token in tokens:\n if isinstance(token, UnicodeSequence):\n flat_tokens.extend(token.tuple())\n else:\n flat_tokens.append(token)\n with_color = _process_tokens(flat_tokens, end=end, sep=sep, color=True)\n without_color = _process_tokens(flat_tokens, end=end, sep=sep, color=False)\n return with_color, without_color\n",
"role": "user"
},
{
"content": "process_tokens",
"role": "assistant"
}
] |
def x(*tokens: Token, end: str='\n', sep: str=' ', fileobj: FileObj=sys.
stdout, update_title: bool=False) ->None:
if using_colorama():
global _INITIALIZED
if not _INITIALIZED:
colorama.init()
_INITIALIZED = True
with_color, without_color = process_tokens(tokens, end=end, sep=sep)
if CONFIG['record']:
_MESSAGES.append(without_color)
if update_title and with_color:
write_title_string(without_color, fileobj)
to_write = with_color if config_color(fileobj) else without_color
write_and_flush(fileobj, to_write)
|
message
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*tokens: Token, end: str='\\n', sep: str=' ', fileobj: FileObj=sys.\n stdout, update_title: bool=False) ->None:\n if using_colorama():\n global _INITIALIZED\n if not _INITIALIZED:\n colorama.init()\n _INITIALIZED = True\n with_color, without_color = process_tokens(tokens, end=end, sep=sep)\n if CONFIG['record']:\n _MESSAGES.append(without_color)\n if update_title and with_color:\n write_title_string(without_color, fileobj)\n to_write = with_color if config_color(fileobj) else without_color\n write_and_flush(fileobj, to_write)\n",
"role": "user"
},
{
"content": "message",
"role": "assistant"
}
] |
def x(*tokens: Token, **kwargs: Any) ->None:
error(*tokens, **kwargs)
sys.exit(1)
|
fatal
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*tokens: Token, **kwargs: Any) ->None:\n error(*tokens, **kwargs)\n sys.exit(1)\n",
"role": "user"
},
{
"content": "fatal",
"role": "assistant"
}
] |
def x(*tokens: Token, **kwargs: Any) ->None:
tokens = [bold, red, 'Error:'] + list(tokens)
kwargs['fileobj'] = sys.stderr
message(*tokens, **kwargs)
|
error
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*tokens: Token, **kwargs: Any) ->None:\n tokens = [bold, red, 'Error:'] + list(tokens)\n kwargs['fileobj'] = sys.stderr\n message(*tokens, **kwargs)\n",
"role": "user"
},
{
"content": "error",
"role": "assistant"
}
] |
def x(*tokens: Token, **kwargs: Any) ->None:
tokens = [brown, 'Warning:'] + list(tokens)
kwargs['fileobj'] = sys.stderr
message(*tokens, **kwargs)
|
warning
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*tokens: Token, **kwargs: Any) ->None:\n tokens = [brown, 'Warning:'] + list(tokens)\n kwargs['fileobj'] = sys.stderr\n message(*tokens, **kwargs)\n",
"role": "user"
},
{
"content": "warning",
"role": "assistant"
}
] |
def x(*tokens: Token, **kwargs: Any) ->None:
process_tokens_kwargs = kwargs.copy()
process_tokens_kwargs['color'] = False
no_color = _process_tokens(tokens, **process_tokens_kwargs)
info(*tokens, **kwargs)
info('-' * len(no_color), end='\n\n')
|
info_section
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*tokens: Token, **kwargs: Any) ->None:\n process_tokens_kwargs = kwargs.copy()\n process_tokens_kwargs['color'] = False\n no_color = _process_tokens(tokens, **process_tokens_kwargs)\n info(*tokens, **kwargs)\n info('-' * len(no_color), end='\\n\\n')\n",
"role": "user"
},
{
"content": "info_section",
"role": "assistant"
}
] |
def x(*tokens: Token, **kwargs: Any) ->None:
info(bold, blue, '::', reset, *tokens, **kwargs)
|
info_1
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*tokens: Token, **kwargs: Any) ->None:\n info(bold, blue, '::', reset, *tokens, **kwargs)\n",
"role": "user"
},
{
"content": "info_1",
"role": "assistant"
}
] |
def x(*, last: bool=False, fileobj: Any=None) ->None:
end = '\n' if last else ''
info('.', end=end, fileobj=fileobj)
|
dot
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*, last: bool=False, fileobj: Any=None) ->None:\n end = '\\n' if last else ''\n info('.', end=end, fileobj=fileobj)\n",
"role": "user"
},
{
"content": "dot",
"role": "assistant"
}
] |
def x(i: int, n: int, *rest: Token, **kwargs: Any) ->None:
num_digits = len(str(n))
counter_format = '(%{}d/%d)'.format(num_digits)
counter_str = counter_format % (i + 1, n)
info(green, '*', reset, counter_str, reset, *rest, **kwargs)
|
info_count
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(i: int, n: int, *rest: Token, **kwargs: Any) ->None:\n num_digits = len(str(n))\n counter_format = '(%{}d/%d)'.format(num_digits)\n counter_str = counter_format % (i + 1, n)\n info(green, '*', reset, counter_str, reset, *rest, **kwargs)\n",
"role": "user"
},
{
"content": "info_count",
"role": "assistant"
}
] |
def x(prefix: str, value: float, max_value: float) ->None:
if sys.stdout.isatty():
percent = float(value) / max_value * 100
sys.stdout.write(prefix + ': %.0f%%\r' % percent)
sys.stdout.flush()
|
info_progress
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(prefix: str, value: float, max_value: float) ->None:\n if sys.stdout.isatty():\n percent = float(value) / max_value * 100\n sys.stdout.write(prefix + ': %.0f%%\\r' % percent)\n sys.stdout.flush()\n",
"role": "user"
},
{
"content": "info_progress",
"role": "assistant"
}
] |
def x(*tokens: Token, **kwargs: Any) ->None:
if not CONFIG['verbose'] or CONFIG['record']:
return
message(*tokens, **kwargs)
|
debug
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*tokens: Token, **kwargs: Any) ->None:\n if not CONFIG['verbose'] or CONFIG['record']:\n return\n message(*tokens, **kwargs)\n",
"role": "user"
},
{
"content": "debug",
"role": "assistant"
}
] |
def x(elems: Sequence[str], num: int=2) ->List[str]:
return [(' ' * num + l) for l in elems]
|
indent_iterable
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(elems: Sequence[str], num: int=2) ->List[str]:\n return [(' ' * num + l) for l in elems]\n",
"role": "user"
},
{
"content": "indent_iterable",
"role": "assistant"
}
] |
def x(text: str, num: int=2) ->str:
lines = text.splitlines()
return '\n'.join(indent_iterable(lines, num=num))
|
indent
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text: str, num: int=2) ->str:\n lines = text.splitlines()\n return '\\n'.join(indent_iterable(lines, num=num))\n",
"role": "user"
},
{
"content": "indent",
"role": "assistant"
}
] |
def x(exception: Exception, message: str) ->Sequence[Token]:
tb = sys.exc_info()[2]
buffer = io.StringIO()
traceback.print_tb(tb, file=io)
return red, message + '\n', exception.__class__.__name__, str(exception
), '\n', reset, buffer.getvalue()
|
message_for_exception
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(exception: Exception, message: str) ->Sequence[Token]:\n tb = sys.exc_info()[2]\n buffer = io.StringIO()\n traceback.print_tb(tb, file=io)\n return red, message + '\\n', exception.__class__.__name__, str(exception\n ), '\\n', reset, buffer.getvalue()\n",
"role": "user"
},
{
"content": "message_for_exception",
"role": "assistant"
}
] |
def x(*question: Token, default: Optional[str]=None) ->Optional[str]:
tokens = get_ask_tokens(question)
if default:
tokens.append('(%s)' % default)
info(*tokens)
answer = read_input()
if not answer:
return default
return answer
|
ask_string
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*question: Token, default: Optional[str]=None) ->Optional[str]:\n tokens = get_ask_tokens(question)\n if default:\n tokens.append('(%s)' % default)\n info(*tokens)\n answer = read_input()\n if not answer:\n return default\n return answer\n",
"role": "user"
},
{
"content": "ask_string",
"role": "assistant"
}
] |
def x(*question: Token) ->str:
tokens = get_ask_tokens(question)
info(*tokens)
answer = read_password()
return answer
|
ask_password
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*question: Token) ->str:\n tokens = get_ask_tokens(question)\n info(*tokens)\n answer = read_password()\n return answer\n",
"role": "user"
},
{
"content": "ask_password",
"role": "assistant"
}
] |
def x(*prompt: Token, choices: List[Any], func_desc: Optional[FuncDesc]=None
) ->Any:
if func_desc is None:
func_desc = lambda x: str(x)
tokens = get_ask_tokens(prompt)
info(*tokens)
choices.sort(key=func_desc)
for i, choice in enumerate(choices, start=1):
choice_desc = func_desc(choice)
info(' ', blue, '%i' % i, reset, choice_desc)
keep_asking = True
res = None
while keep_asking:
answer = read_input()
if not answer:
return None
try:
index = int(answer)
except ValueError:
info('Please enter a valid number')
continue
if index not in range(1, len(choices) + 1):
info(str(index), 'is out of range')
continue
res = choices[index - 1]
keep_asking = False
return res
|
ask_choice
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*prompt: Token, choices: List[Any], func_desc: Optional[FuncDesc]=None\n ) ->Any:\n if func_desc is None:\n func_desc = lambda x: str(x)\n tokens = get_ask_tokens(prompt)\n info(*tokens)\n choices.sort(key=func_desc)\n for i, choice in enumerate(choices, start=1):\n choice_desc = func_desc(choice)\n info(' ', blue, '%i' % i, reset, choice_desc)\n keep_asking = True\n res = None\n while keep_asking:\n answer = read_input()\n if not answer:\n return None\n try:\n index = int(answer)\n except ValueError:\n info('Please enter a valid number')\n continue\n if index not in range(1, len(choices) + 1):\n info(str(index), 'is out of range')\n continue\n res = choices[index - 1]\n keep_asking = False\n return res\n",
"role": "user"
},
{
"content": "ask_choice",
"role": "assistant"
}
] |
def x(*question: Token, default: bool=False) ->bool:
while True:
tokens = [green, '::', reset] + list(question) + [reset]
if default:
tokens.append('(Y/n)')
else:
tokens.append('(y/N)')
info(*tokens)
answer = read_input()
if answer.lower() in ['y', 'yes']:
return True
if answer.lower() in ['n', 'no']:
return False
if not answer:
return default
warning("Please answer by 'y' (yes) or 'n' (no) ")
|
ask_yes_no
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(*question: Token, default: bool=False) ->bool:\n while True:\n tokens = [green, '::', reset] + list(question) + [reset]\n if default:\n tokens.append('(Y/n)')\n else:\n tokens.append('(y/N)')\n info(*tokens)\n answer = read_input()\n if answer.lower() in ['y', 'yes']:\n return True\n if answer.lower() in ['n', 'no']:\n return False\n if not answer:\n return default\n warning(\"Please answer by 'y' (yes) or 'n' (no) \")\n",
"role": "user"
},
{
"content": "ask_yes_no",
"role": "assistant"
}
] |
def x(message: str, user_input: str, choices: Sequence[str]) ->str:
if not choices:
return message
else:
result = {difflib.SequenceMatcher(a=user_input, b=choice).ratio():
choice for choice in choices}
message += '\nDid you mean: %s?' % result[max(result)]
return message
|
did_you_mean
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(message: str, user_input: str, choices: Sequence[str]) ->str:\n if not choices:\n return message\n else:\n result = {difflib.SequenceMatcher(a=user_input, b=choice).ratio():\n choice for choice in choices}\n message += '\\nDid you mean: %s?' % result[max(result)]\n return message\n",
"role": "user"
},
{
"content": "did_you_mean",
"role": "assistant"
}
] |
def x(self) ->None:
end_time = datetime.datetime.now()
elapsed_time = end_time - self.start_time
elapsed_seconds = elapsed_time.seconds
hours, remainder = divmod(int(elapsed_seconds), 3600)
minutes, seconds = divmod(remainder, 60)
as_str = '%sh %sm %ss %dms' % (hours, minutes, seconds, elapsed_time.
microseconds / 1000)
info('%s took %s' % (self.description, as_str))
|
stop
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self) ->None:\n end_time = datetime.datetime.now()\n elapsed_time = end_time - self.start_time\n elapsed_seconds = elapsed_time.seconds\n hours, remainder = divmod(int(elapsed_seconds), 3600)\n minutes, seconds = divmod(remainder, 60)\n as_str = '%sh %sm %ss %dms' % (hours, minutes, seconds, elapsed_time.\n microseconds / 1000)\n info('%s took %s' % (self.description, as_str))\n",
"role": "user"
},
{
"content": "stop",
"role": "assistant"
}
] |
def x(xml):
parser = ET.XMLParser(resolve_entities=False, strip_cdata=False,
remove_blank_text=True)
document = ET.fromstring(xml, parser)
return ('%s\n%s' % ('<?xml version="1.0" encoding="UTF-8"?>', ET.
tostring(document, pretty_print=True).decode('utf-8'))).encode('utf-8')
|
xmllint_format
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(xml):\n parser = ET.XMLParser(resolve_entities=False, strip_cdata=False,\n remove_blank_text=True)\n document = ET.fromstring(xml, parser)\n return ('%s\\n%s' % ('<?xml version=\"1.0\" encoding=\"UTF-8\"?>', ET.\n tostring(document, pretty_print=True).decode('utf-8'))).encode('utf-8')\n",
"role": "user"
},
{
"content": "xmllint_format",
"role": "assistant"
}
] |
def x(self, directory, url, basename=None, overwrite=False, subdir=None):
log = getLogger('ocrd.resolver.download_to_directory')
log.debug(
'directory=|%s| url=|%s| basename=|%s| overwrite=|%s| subdir=|%s|',
directory, url, basename, overwrite, subdir)
if url is None:
raise Exception("'url' must be a string")
if directory is None:
raise Exception("'directory' must be a string")
if basename is None:
if subdir is not None or directory and url.startswith('file://%s' %
directory):
basename = url.rsplit('/', 1)[-1]
else:
basename = safe_filename(url)
if subdir is not None:
basename = join(subdir, basename)
outfilename = join(directory, basename)
if exists(outfilename) and not overwrite:
log.debug('File already exists and overwrite=False: %s', outfilename)
return outfilename
outfiledir = outfilename.rsplit('/', 1)[0]
if not isdir(outfiledir):
makedirs(outfiledir)
log.debug("Downloading <%s> to '%s'", url, outfilename)
if url.startswith('file://'):
url = url[len('file://'):]
if '://' not in url:
copyfile(url, outfilename)
else:
response = requests.get(url)
if response.status_code != 200:
raise Exception('Not found: %s (HTTP %d)' % (url, response.
status_code))
with open(outfilename, 'wb') as outfile:
outfile.write(response.content)
return outfilename
|
download_to_directory
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, directory, url, basename=None, overwrite=False, subdir=None):\n log = getLogger('ocrd.resolver.download_to_directory')\n log.debug(\n 'directory=|%s| url=|%s| basename=|%s| overwrite=|%s| subdir=|%s|',\n directory, url, basename, overwrite, subdir)\n if url is None:\n raise Exception(\"'url' must be a string\")\n if directory is None:\n raise Exception(\"'directory' must be a string\")\n if basename is None:\n if subdir is not None or directory and url.startswith('file://%s' %\n directory):\n basename = url.rsplit('/', 1)[-1]\n else:\n basename = safe_filename(url)\n if subdir is not None:\n basename = join(subdir, basename)\n outfilename = join(directory, basename)\n if exists(outfilename) and not overwrite:\n log.debug('File already exists and overwrite=False: %s', outfilename)\n return outfilename\n outfiledir = outfilename.rsplit('/', 1)[0]\n if not isdir(outfiledir):\n makedirs(outfiledir)\n log.debug(\"Downloading <%s> to '%s'\", url, outfilename)\n if url.startswith('file://'):\n url = url[len('file://'):]\n if '://' not in url:\n copyfile(url, outfilename)\n else:\n response = requests.get(url)\n if response.status_code != 200:\n raise Exception('Not found: %s (HTTP %d)' % (url, response.\n status_code))\n with open(outfilename, 'wb') as outfile:\n outfile.write(response.content)\n return outfilename\n",
"role": "user"
},
{
"content": "download_to_directory",
"role": "assistant"
}
] |
def x(self, mets_url, dst_dir=None, clobber_mets=False, mets_basename=None,
download=False, baseurl=None):
if dst_dir and not dst_dir.startswith('/'):
dst_dir = abspath(dst_dir)
if mets_url is None:
if baseurl is None:
raise Exception(
'Must pass mets_url and/or baseurl to workspace_from_url')
else:
mets_url = 'file://%s/%s' % (baseurl, mets_basename if
mets_basename else 'mets.xml')
if baseurl is None:
baseurl = mets_url.rsplit('/', 1)[0]
log.debug("workspace_from_url\nmets_url='%s'\nbaseurl='%s'\ndst_dir='%s'",
mets_url, baseurl, dst_dir)
if '://' not in mets_url:
mets_url = 'file://%s' % abspath(mets_url)
if dst_dir is None:
if mets_url.startswith('file://'):
dst_dir = dirname(mets_url[len('file://'):])
else:
dst_dir = tempfile.mkdtemp(prefix=TMP_PREFIX)
log.debug("Creating workspace '%s' for METS @ <%s>", dst_dir,
mets_url)
if mets_basename is None:
mets_basename = mets_url.rsplit('/', 1)[-1].split('?')[0].split('#')[0]
dst_mets = join(dst_dir, mets_basename)
log.debug("Copying mets url '%s' to '%s'", mets_url, dst_mets)
if 'file://' + dst_mets == mets_url:
log.debug('Target and source mets are identical')
elif exists(dst_mets) and not clobber_mets:
raise Exception(
"File '%s' already exists but clobber_mets is false" % dst_mets)
else:
self.download_to_directory(dst_dir, mets_url, basename=mets_basename)
workspace = Workspace(self, dst_dir, mets_basename=mets_basename,
baseurl=baseurl)
if download:
for f in workspace.mets.find_files():
workspace.download_file(f)
return workspace
|
workspace_from_url
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, mets_url, dst_dir=None, clobber_mets=False, mets_basename=None,\n download=False, baseurl=None):\n if dst_dir and not dst_dir.startswith('/'):\n dst_dir = abspath(dst_dir)\n if mets_url is None:\n if baseurl is None:\n raise Exception(\n 'Must pass mets_url and/or baseurl to workspace_from_url')\n else:\n mets_url = 'file://%s/%s' % (baseurl, mets_basename if\n mets_basename else 'mets.xml')\n if baseurl is None:\n baseurl = mets_url.rsplit('/', 1)[0]\n log.debug(\"workspace_from_url\\nmets_url='%s'\\nbaseurl='%s'\\ndst_dir='%s'\",\n mets_url, baseurl, dst_dir)\n if '://' not in mets_url:\n mets_url = 'file://%s' % abspath(mets_url)\n if dst_dir is None:\n if mets_url.startswith('file://'):\n dst_dir = dirname(mets_url[len('file://'):])\n else:\n dst_dir = tempfile.mkdtemp(prefix=TMP_PREFIX)\n log.debug(\"Creating workspace '%s' for METS @ <%s>\", dst_dir,\n mets_url)\n if mets_basename is None:\n mets_basename = mets_url.rsplit('/', 1)[-1].split('?')[0].split('#')[0]\n dst_mets = join(dst_dir, mets_basename)\n log.debug(\"Copying mets url '%s' to '%s'\", mets_url, dst_mets)\n if 'file://' + dst_mets == mets_url:\n log.debug('Target and source mets are identical')\n elif exists(dst_mets) and not clobber_mets:\n raise Exception(\n \"File '%s' already exists but clobber_mets is false\" % dst_mets)\n else:\n self.download_to_directory(dst_dir, mets_url, basename=mets_basename)\n workspace = Workspace(self, dst_dir, mets_basename=mets_basename,\n baseurl=baseurl)\n if download:\n for f in workspace.mets.find_files():\n workspace.download_file(f)\n return workspace\n",
"role": "user"
},
{
"content": "workspace_from_url",
"role": "assistant"
}
] |
def x(self, directory, mets_basename='mets.xml', clobber_mets=False):
if directory is None:
directory = tempfile.mkdtemp(prefix=TMP_PREFIX)
if not exists(directory):
makedirs(directory)
mets_fpath = join(directory, mets_basename)
if not clobber_mets and exists(mets_fpath):
raise Exception("Not clobbering existing mets.xml in '%s'." % directory
)
mets = OcrdMets.empty_mets()
with open(mets_fpath, 'wb') as fmets:
log.info('Writing %s', mets_fpath)
fmets.write(mets.to_xml(xmllint=True))
return Workspace(self, directory, mets)
|
workspace_from_nothing
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, directory, mets_basename='mets.xml', clobber_mets=False):\n if directory is None:\n directory = tempfile.mkdtemp(prefix=TMP_PREFIX)\n if not exists(directory):\n makedirs(directory)\n mets_fpath = join(directory, mets_basename)\n if not clobber_mets and exists(mets_fpath):\n raise Exception(\"Not clobbering existing mets.xml in '%s'.\" % directory\n )\n mets = OcrdMets.empty_mets()\n with open(mets_fpath, 'wb') as fmets:\n log.info('Writing %s', mets_fpath)\n fmets.write(mets.to_xml(xmllint=True))\n return Workspace(self, directory, mets)\n",
"role": "user"
},
{
"content": "workspace_from_nothing",
"role": "assistant"
}
] |
def x(self, xmllint=False):
root = self._tree.getroot()
ret = ET.tostring(ET.ElementTree(root), pretty_print=True)
if xmllint:
ret = xmllint_format(ret)
return ret
|
to_xml
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, xmllint=False):\n root = self._tree.getroot()\n ret = ET.tostring(ET.ElementTree(root), pretty_print=True)\n if xmllint:\n ret = xmllint_format(ret)\n return ret\n",
"role": "user"
},
{
"content": "to_xml",
"role": "assistant"
}
] |
def x(ctx, directory, mets_basename, backup):
ctx.obj = WorkspaceCtx(os.path.abspath(directory), mets_basename,
automatic_backup=backup)
|
workspace_cli
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, directory, mets_basename, backup):\n ctx.obj = WorkspaceCtx(os.path.abspath(directory), mets_basename,\n automatic_backup=backup)\n",
"role": "user"
},
{
"content": "workspace_cli",
"role": "assistant"
}
] |
def x(ctx, clobber_mets, download, mets_url, workspace_dir):
workspace = ctx.resolver.workspace_from_url(mets_url, dst_dir=os.path.
abspath(workspace_dir if workspace_dir else mkdtemp(prefix=
TMP_PREFIX)), mets_basename=ctx.mets_basename, clobber_mets=
clobber_mets, download=download)
workspace.save_mets()
print(workspace.directory)
|
workspace_clone
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, clobber_mets, download, mets_url, workspace_dir):\n workspace = ctx.resolver.workspace_from_url(mets_url, dst_dir=os.path.\n abspath(workspace_dir if workspace_dir else mkdtemp(prefix=\n TMP_PREFIX)), mets_basename=ctx.mets_basename, clobber_mets=\n clobber_mets, download=download)\n workspace.save_mets()\n print(workspace.directory)\n",
"role": "user"
},
{
"content": "workspace_clone",
"role": "assistant"
}
] |
def x(ctx, clobber_mets, directory):
workspace = ctx.resolver.workspace_from_nothing(directory=os.path.
abspath(directory), mets_basename=ctx.mets_basename, clobber_mets=
clobber_mets)
workspace.save_mets()
print(workspace.directory)
|
workspace_create
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, clobber_mets, directory):\n workspace = ctx.resolver.workspace_from_nothing(directory=os.path.\n abspath(directory), mets_basename=ctx.mets_basename, clobber_mets=\n clobber_mets)\n workspace.save_mets()\n print(workspace.directory)\n",
"role": "user"
},
{
"content": "workspace_create",
"role": "assistant"
}
] |
def x(ctx, file_grp, file_id, mimetype, page_id, force, local_filename):
workspace = Workspace(ctx.resolver, directory=ctx.directory,
mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)
if not local_filename.startswith(ctx.directory):
log.debug("File '%s' is not in workspace, copying", local_filename)
local_filename = ctx.resolver.download_to_directory(ctx.directory,
'file://' + local_filename, subdir=file_grp)
url = 'file://' + local_filename
workspace.mets.add_file(fileGrp=file_grp, ID=file_id, mimetype=mimetype,
url=url, pageId=page_id, force=force, local_filename=local_filename)
workspace.save_mets()
|
workspace_add_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, file_grp, file_id, mimetype, page_id, force, local_filename):\n workspace = Workspace(ctx.resolver, directory=ctx.directory,\n mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup)\n if not local_filename.startswith(ctx.directory):\n log.debug(\"File '%s' is not in workspace, copying\", local_filename)\n local_filename = ctx.resolver.download_to_directory(ctx.directory, \n 'file://' + local_filename, subdir=file_grp)\n url = 'file://' + local_filename\n workspace.mets.add_file(fileGrp=file_grp, ID=file_id, mimetype=mimetype,\n url=url, pageId=page_id, force=force, local_filename=local_filename)\n workspace.save_mets()\n",
"role": "user"
},
{
"content": "workspace_add_file",
"role": "assistant"
}
] |
def x(ctx, file_grp, mimetype, page_id, file_id, output_field, download):
workspace = Workspace(ctx.resolver, directory=ctx.directory,
mets_basename=ctx.mets_basename)
for f in workspace.mets.find_files(ID=file_id, fileGrp=file_grp,
mimetype=mimetype, pageId=page_id):
if download:
workspace.download_file(f)
workspace.save_mets()
ret = '\t'.join([(getattr(f, field) or '') for field in output_field])
print(ret)
|
workspace_find
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, file_grp, mimetype, page_id, file_id, output_field, download):\n workspace = Workspace(ctx.resolver, directory=ctx.directory,\n mets_basename=ctx.mets_basename)\n for f in workspace.mets.find_files(ID=file_id, fileGrp=file_grp,\n mimetype=mimetype, pageId=page_id):\n if download:\n workspace.download_file(f)\n workspace.save_mets()\n ret = '\\t'.join([(getattr(f, field) or '') for field in output_field])\n print(ret)\n",
"role": "user"
},
{
"content": "workspace_find",
"role": "assistant"
}
] |
def x(ctx):
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver,
directory=ctx.directory, mets_basename=ctx.mets_basename,
automatic_backup=ctx.automatic_backup))
backup_manager.add()
|
workspace_backup_add
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx):\n backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver,\n directory=ctx.directory, mets_basename=ctx.mets_basename,\n automatic_backup=ctx.automatic_backup))\n backup_manager.add()\n",
"role": "user"
},
{
"content": "workspace_backup_add",
"role": "assistant"
}
] |
def x(ctx):
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver,
directory=ctx.directory, mets_basename=ctx.mets_basename,
automatic_backup=ctx.automatic_backup))
for b in backup_manager.list():
print(b)
|
workspace_backup_list
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx):\n backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver,\n directory=ctx.directory, mets_basename=ctx.mets_basename,\n automatic_backup=ctx.automatic_backup))\n for b in backup_manager.list():\n print(b)\n",
"role": "user"
},
{
"content": "workspace_backup_list",
"role": "assistant"
}
] |
def x(ctx, choose_first, bak):
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver,
directory=ctx.directory, mets_basename=ctx.mets_basename,
automatic_backup=ctx.automatic_backup))
backup_manager.restore(bak, choose_first)
|
workspace_backup_restore
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx, choose_first, bak):\n backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver,\n directory=ctx.directory, mets_basename=ctx.mets_basename,\n automatic_backup=ctx.automatic_backup))\n backup_manager.restore(bak, choose_first)\n",
"role": "user"
},
{
"content": "workspace_backup_restore",
"role": "assistant"
}
] |
def x(ctx):
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver,
directory=ctx.directory, mets_basename=ctx.mets_basename,
automatic_backup=ctx.automatic_backup))
backup_manager.undo()
|
workspace_backup_undo
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(ctx):\n backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver,\n directory=ctx.directory, mets_basename=ctx.mets_basename,\n automatic_backup=ctx.automatic_backup))\n backup_manager.undo()\n",
"role": "user"
},
{
"content": "workspace_backup_undo",
"role": "assistant"
}
] |
def x(validator_class):
validate_properties = validator_class.VALIDATORS['properties']
def x(validator, properties, instance, schema):
for prop, subschema in properties.items():
if 'default' in subschema:
instance.setdefault(prop, subschema['default'])
for error in validate_properties(validator, properties, instance,
schema):
yield error
return validators.extend(validator_class, {'properties': set_defaults})
|
extend_with_default
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(validator_class):\n validate_properties = validator_class.VALIDATORS['properties']\n\n def x(validator, properties, instance, schema):\n for prop, subschema in properties.items():\n if 'default' in subschema:\n instance.setdefault(prop, subschema['default'])\n for error in validate_properties(validator, properties, instance,\n schema):\n yield error\n return validators.extend(validator_class, {'properties': set_defaults})\n",
"role": "user"
},
{
"content": "extend_with_default",
"role": "assistant"
}
] |
def x(obj, schema):
if isinstance(obj, str):
obj = json.loads(obj)
return JsonValidator(schema)._validate(obj)
|
validate
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(obj, schema):\n if isinstance(obj, str):\n obj = json.loads(obj)\n return JsonValidator(schema)._validate(obj)\n",
"role": "user"
},
{
"content": "validate",
"role": "assistant"
}
] |
def x(self, obj):
report = ValidationReport()
if not self.validator.is_valid(obj):
for v in self.validator.iter_errors(obj):
report.add_error('[%s] %s' % ('.'.join(str(vv) for vv in v.path
), v.message))
return report
|
validate
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, obj):\n report = ValidationReport()\n if not self.validator.is_valid(obj):\n for v in self.validator.iter_errors(obj):\n report.add_error('[%s] %s' % ('.'.join(str(vv) for vv in v.path\n ), v.message))\n return report\n",
"role": "user"
},
{
"content": "validate",
"role": "assistant"
}
] |
def x(processorClass, ocrd_tool=None, mets_url=None, resolver=None,
workspace=None, page_id=None, log_level=None, input_file_grp=None,
output_file_grp=None, parameter=None, working_dir=None):
workspace = _get_workspace(workspace, resolver, mets_url, working_dir)
if parameter is not None:
if not '://' in parameter:
fname = os.path.abspath(parameter)
else:
fname = workspace.download_url(parameter)
with open(fname, 'r') as param_json_file:
parameter = json.load(param_json_file)
else:
parameter = {}
log.debug('Running processor %s', processorClass)
processor = processorClass(workspace, ocrd_tool=ocrd_tool, page_id=
page_id, input_file_grp=input_file_grp, output_file_grp=
output_file_grp, parameter=parameter)
ocrd_tool = processor.ocrd_tool
name = '%s v%s' % (ocrd_tool['executable'], processor.version)
otherrole = ocrd_tool['steps'][0]
log.debug('Processor instance %s (%s doing %s)', processor, name, otherrole
)
processor.process()
workspace.mets.add_agent(name=name, _type='OTHER', othertype='SOFTWARE',
role='OTHER', otherrole=otherrole)
workspace.save_mets()
return processor
|
run_processor
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(processorClass, ocrd_tool=None, mets_url=None, resolver=None,\n workspace=None, page_id=None, log_level=None, input_file_grp=None,\n output_file_grp=None, parameter=None, working_dir=None):\n workspace = _get_workspace(workspace, resolver, mets_url, working_dir)\n if parameter is not None:\n if not '://' in parameter:\n fname = os.path.abspath(parameter)\n else:\n fname = workspace.download_url(parameter)\n with open(fname, 'r') as param_json_file:\n parameter = json.load(param_json_file)\n else:\n parameter = {}\n log.debug('Running processor %s', processorClass)\n processor = processorClass(workspace, ocrd_tool=ocrd_tool, page_id=\n page_id, input_file_grp=input_file_grp, output_file_grp=\n output_file_grp, parameter=parameter)\n ocrd_tool = processor.ocrd_tool\n name = '%s v%s' % (ocrd_tool['executable'], processor.version)\n otherrole = ocrd_tool['steps'][0]\n log.debug('Processor instance %s (%s doing %s)', processor, name, otherrole\n )\n processor.process()\n workspace.mets.add_agent(name=name, _type='OTHER', othertype='SOFTWARE',\n role='OTHER', otherrole=otherrole)\n workspace.save_mets()\n return processor\n",
"role": "user"
},
{
"content": "run_processor",
"role": "assistant"
}
] |
def x(executable, mets_url=None, resolver=None, workspace=None, page_id=
None, log_level=None, input_file_grp=None, output_file_grp=None,
parameter=None, working_dir=None):
workspace = _get_workspace(workspace, resolver, mets_url, working_dir)
args = [executable, '--working-dir', workspace.directory]
args += ['--mets', mets_url]
if log_level:
args += ['--log-level', log_level]
if page_id:
args += ['--page-id', page_id]
if input_file_grp:
args += ['--input-file-grp', input_file_grp]
if output_file_grp:
args += ['--output-file-grp', output_file_grp]
if parameter:
args += ['--parameter', parameter]
log.debug("Running subprocess '%s'", ' '.join(args))
return subprocess.call(args)
|
run_cli
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(executable, mets_url=None, resolver=None, workspace=None, page_id=\n None, log_level=None, input_file_grp=None, output_file_grp=None,\n parameter=None, working_dir=None):\n workspace = _get_workspace(workspace, resolver, mets_url, working_dir)\n args = [executable, '--working-dir', workspace.directory]\n args += ['--mets', mets_url]\n if log_level:\n args += ['--log-level', log_level]\n if page_id:\n args += ['--page-id', page_id]\n if input_file_grp:\n args += ['--input-file-grp', input_file_grp]\n if output_file_grp:\n args += ['--output-file-grp', output_file_grp]\n if parameter:\n args += ['--parameter', parameter]\n log.debug(\"Running subprocess '%s'\", ' '.join(args))\n return subprocess.call(args)\n",
"role": "user"
},
{
"content": "run_cli",
"role": "assistant"
}
] |
def x(self):
return self.workspace.mets.find_files(fileGrp=self.input_file_grp,
pageId=self.page_id)
|
input_files
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return self.workspace.mets.find_files(fileGrp=self.input_file_grp,\n pageId=self.page_id)\n",
"role": "user"
},
{
"content": "input_files",
"role": "assistant"
}
] |
def x(input_file):
if input_file.local_filename is None:
raise Exception("input_file must have 'local_filename' property")
exif = exif_from_filename(input_file.local_filename)
now = datetime.now()
return PcGtsType(Metadata=MetadataType(Creator='OCR-D/core %s' %
VERSION, Created=now, LastChange=now), Page=PageType(imageWidth=
exif.width, imageHeight=exif.height, imageFilename=input_file.url if
input_file.url is not None else 'file://' + input_file.local_filename))
|
page_from_image
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(input_file):\n if input_file.local_filename is None:\n raise Exception(\"input_file must have 'local_filename' property\")\n exif = exif_from_filename(input_file.local_filename)\n now = datetime.now()\n return PcGtsType(Metadata=MetadataType(Creator='OCR-D/core %s' %\n VERSION, Created=now, LastChange=now), Page=PageType(imageWidth=\n exif.width, imageHeight=exif.height, imageFilename=input_file.url if\n input_file.url is not None else 'file://' + input_file.local_filename))\n",
"role": "user"
},
{
"content": "page_from_image",
"role": "assistant"
}
] |
def x(input_file):
if input_file.mimetype.startswith('image'):
return page_from_image(input_file)
if input_file.mimetype == MIMETYPE_PAGE:
return parse(input_file.local_filename, silence=True)
raise Exception("Unsupported mimetype '%s'" % input_file.mimetype)
|
page_from_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(input_file):\n if input_file.mimetype.startswith('image'):\n return page_from_image(input_file)\n if input_file.mimetype == MIMETYPE_PAGE:\n return parse(input_file.local_filename, silence=True)\n raise Exception(\"Unsupported mimetype '%s'\" % input_file.mimetype)\n",
"role": "user"
},
{
"content": "page_from_file",
"role": "assistant"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.