nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
thushv89/attention_keras
|
322a16ee147122026b63305aaa5e899d9e5de883
|
src/examples/utils/model_helper.py
|
python
|
plot_attention_weights
|
(encoder_inputs, attention_weights, en_id2word, fr_id2word, filename=None)
|
Plots attention weights
:param encoder_inputs: Sequence of word ids (list/numpy.ndarray)
:param attention_weights: Sequence of (<word_id_at_decode_step_t>:<attention_weights_at_decode_step_t>)
:param en_id2word: dict
:param fr_id2word: dict
:return:
|
Plots attention weights
:param encoder_inputs: Sequence of word ids (list/numpy.ndarray)
:param attention_weights: Sequence of (<word_id_at_decode_step_t>:<attention_weights_at_decode_step_t>)
:param en_id2word: dict
:param fr_id2word: dict
:return:
|
[
"Plots",
"attention",
"weights",
":",
"param",
"encoder_inputs",
":",
"Sequence",
"of",
"word",
"ids",
"(",
"list",
"/",
"numpy",
".",
"ndarray",
")",
":",
"param",
"attention_weights",
":",
"Sequence",
"of",
"(",
"<word_id_at_decode_step_t",
">",
":",
"<attention_weights_at_decode_step_t",
">",
")",
":",
"param",
"en_id2word",
":",
"dict",
":",
"param",
"fr_id2word",
":",
"dict",
":",
"return",
":"
] |
def plot_attention_weights(encoder_inputs, attention_weights, en_id2word, fr_id2word, filename=None):
"""
Plots attention weights
:param encoder_inputs: Sequence of word ids (list/numpy.ndarray)
:param attention_weights: Sequence of (<word_id_at_decode_step_t>:<attention_weights_at_decode_step_t>)
:param en_id2word: dict
:param fr_id2word: dict
:return:
"""
if len(attention_weights) == 0:
print('Your attention weights was empty. No attention map saved to the disk. ' +
'\nPlease check if the decoder produced a proper translation')
return
mats = []
dec_inputs = []
for dec_ind, attn in attention_weights:
mats.append(attn.reshape(-1))
dec_inputs.append(dec_ind)
attention_mat = np.transpose(np.array(mats))
fig, ax = plt.subplots(figsize=(32, 32))
ax.imshow(attention_mat)
ax.set_xticks(np.arange(attention_mat.shape[1]))
ax.set_yticks(np.arange(attention_mat.shape[0]))
ax.set_xticklabels([fr_id2word[inp] if inp != 0 else "<Res>" for inp in dec_inputs])
ax.set_yticklabels([en_id2word[inp] if inp != 0 else "<Res>" for inp in encoder_inputs.ravel()])
ax.tick_params(labelsize=32)
ax.tick_params(axis='x', labelrotation=90)
if not os.path.exists(config.RESULTS_DIR):
os.mkdir(config.RESULTS_DIR)
if filename is None:
plt.savefig(os.path.join(config.RESULTS_DIR, 'attention.png'))
else:
plt.savefig(os.path.join(config.RESULTS_DIR, '{}'.format(filename)))
|
[
"def",
"plot_attention_weights",
"(",
"encoder_inputs",
",",
"attention_weights",
",",
"en_id2word",
",",
"fr_id2word",
",",
"filename",
"=",
"None",
")",
":",
"if",
"len",
"(",
"attention_weights",
")",
"==",
"0",
":",
"print",
"(",
"'Your attention weights was empty. No attention map saved to the disk. '",
"+",
"'\\nPlease check if the decoder produced a proper translation'",
")",
"return",
"mats",
"=",
"[",
"]",
"dec_inputs",
"=",
"[",
"]",
"for",
"dec_ind",
",",
"attn",
"in",
"attention_weights",
":",
"mats",
".",
"append",
"(",
"attn",
".",
"reshape",
"(",
"-",
"1",
")",
")",
"dec_inputs",
".",
"append",
"(",
"dec_ind",
")",
"attention_mat",
"=",
"np",
".",
"transpose",
"(",
"np",
".",
"array",
"(",
"mats",
")",
")",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"32",
",",
"32",
")",
")",
"ax",
".",
"imshow",
"(",
"attention_mat",
")",
"ax",
".",
"set_xticks",
"(",
"np",
".",
"arange",
"(",
"attention_mat",
".",
"shape",
"[",
"1",
"]",
")",
")",
"ax",
".",
"set_yticks",
"(",
"np",
".",
"arange",
"(",
"attention_mat",
".",
"shape",
"[",
"0",
"]",
")",
")",
"ax",
".",
"set_xticklabels",
"(",
"[",
"fr_id2word",
"[",
"inp",
"]",
"if",
"inp",
"!=",
"0",
"else",
"\"<Res>\"",
"for",
"inp",
"in",
"dec_inputs",
"]",
")",
"ax",
".",
"set_yticklabels",
"(",
"[",
"en_id2word",
"[",
"inp",
"]",
"if",
"inp",
"!=",
"0",
"else",
"\"<Res>\"",
"for",
"inp",
"in",
"encoder_inputs",
".",
"ravel",
"(",
")",
"]",
")",
"ax",
".",
"tick_params",
"(",
"labelsize",
"=",
"32",
")",
"ax",
".",
"tick_params",
"(",
"axis",
"=",
"'x'",
",",
"labelrotation",
"=",
"90",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config",
".",
"RESULTS_DIR",
")",
":",
"os",
".",
"mkdir",
"(",
"config",
".",
"RESULTS_DIR",
")",
"if",
"filename",
"is",
"None",
":",
"plt",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"RESULTS_DIR",
",",
"'attention.png'",
")",
")",
"else",
":",
"plt",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"RESULTS_DIR",
",",
"'{}'",
".",
"format",
"(",
"filename",
")",
")",
")"
] |
https://github.com/thushv89/attention_keras/blob/322a16ee147122026b63305aaa5e899d9e5de883/src/examples/utils/model_helper.py#L8-L47
|
||
Tautulli/Tautulli
|
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
|
plexpy/libraries.py
|
python
|
Libraries.get_datatables_list
|
(self, kwargs=None, grouping=None)
|
return dict
|
[] |
def get_datatables_list(self, kwargs=None, grouping=None):
default_return = {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': []}
data_tables = datatables.DataTables()
custom_where = [['library_sections.deleted_section', 0]]
if grouping is None:
grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES
if session.get_session_shared_libraries():
custom_where.append(['library_sections.section_id', session.get_session_shared_libraries()])
group_by = 'session_history.reference_id' if grouping else 'session_history.id'
columns = ['library_sections.id AS row_id',
'library_sections.server_id',
'library_sections.section_id',
'library_sections.section_name',
'library_sections.section_type',
'library_sections.count',
'library_sections.parent_count',
'library_sections.child_count',
'library_sections.thumb AS library_thumb',
'library_sections.custom_thumb_url AS custom_thumb',
'library_sections.art AS library_art',
'library_sections.custom_art_url AS custom_art',
'COUNT(DISTINCT %s) AS plays' % group_by,
'SUM(CASE WHEN session_history.stopped > 0 THEN (session_history.stopped - session_history.started) \
ELSE 0 END) - SUM(CASE WHEN session_history.paused_counter IS NULL THEN 0 ELSE \
session_history.paused_counter END) AS duration',
'MAX(session_history.started) AS last_accessed',
'MAX(session_history.id) AS history_row_id',
'session_history_metadata.full_title AS last_played',
'session_history.rating_key',
'session_history_metadata.media_type',
'session_history_metadata.thumb',
'session_history_metadata.parent_thumb',
'session_history_metadata.grandparent_thumb',
'session_history_metadata.parent_title',
'session_history_metadata.year',
'session_history_metadata.media_index',
'session_history_metadata.parent_media_index',
'session_history_metadata.content_rating',
'session_history_metadata.labels',
'session_history_metadata.live',
'session_history_metadata.added_at',
'session_history_metadata.originally_available_at',
'session_history_metadata.guid',
'library_sections.do_notify',
'library_sections.do_notify_created',
'library_sections.keep_history',
'library_sections.is_active'
]
try:
query = data_tables.ssp_query(table_name='library_sections',
columns=columns,
custom_where=custom_where,
group_by=['library_sections.server_id', 'library_sections.section_id'],
join_types=['LEFT OUTER JOIN',
'LEFT OUTER JOIN',
'LEFT OUTER JOIN'],
join_tables=['session_history',
'session_history_metadata',
'session_history_media_info'],
join_evals=[['session_history.section_id', 'library_sections.section_id'],
['session_history.id', 'session_history_metadata.id'],
['session_history.id', 'session_history_media_info.id']],
kwargs=kwargs)
except Exception as e:
logger.warn("Tautulli Libraries :: Unable to execute database query for get_list: %s." % e)
return default_return
result = query['result']
rows = []
for item in result:
if item['media_type'] == 'episode' and item['parent_thumb']:
thumb = item['parent_thumb']
elif item['media_type'] == 'episode':
thumb = item['grandparent_thumb']
else:
thumb = item['thumb']
if item['custom_thumb'] and item['custom_thumb'] != item['library_thumb']:
library_thumb = item['custom_thumb']
elif item['library_thumb']:
library_thumb = item['library_thumb']
else:
library_thumb = common.DEFAULT_COVER_THUMB
if item['custom_art'] and item['custom_art'] != item['library_art']:
library_art = item['custom_art']
else:
library_art = item['library_art']
row = {'row_id': item['row_id'],
'server_id': item['server_id'],
'section_id': item['section_id'],
'section_name': item['section_name'],
'section_type': item['section_type'],
'count': item['count'],
'parent_count': item['parent_count'],
'child_count': item['child_count'],
'library_thumb': library_thumb,
'library_art': library_art,
'plays': item['plays'],
'duration': item['duration'],
'last_accessed': item['last_accessed'],
'history_row_id': item['history_row_id'],
'last_played': item['last_played'],
'rating_key': item['rating_key'],
'media_type': item['media_type'],
'thumb': thumb,
'parent_title': item['parent_title'],
'year': item['year'],
'media_index': item['media_index'],
'parent_media_index': item['parent_media_index'],
'content_rating': item['content_rating'],
'labels': item['labels'].split(';') if item['labels'] else (),
'live': item['live'],
'originally_available_at': item['originally_available_at'],
'guid': item['guid'],
'do_notify': helpers.checked(item['do_notify']),
'do_notify_created': helpers.checked(item['do_notify_created']),
'keep_history': helpers.checked(item['keep_history']),
'is_active': item['is_active']
}
rows.append(row)
dict = {'recordsFiltered': query['filteredCount'],
'recordsTotal': query['totalCount'],
'data': session.mask_session_info(rows),
'draw': query['draw']
}
return dict
|
[
"def",
"get_datatables_list",
"(",
"self",
",",
"kwargs",
"=",
"None",
",",
"grouping",
"=",
"None",
")",
":",
"default_return",
"=",
"{",
"'recordsFiltered'",
":",
"0",
",",
"'recordsTotal'",
":",
"0",
",",
"'draw'",
":",
"0",
",",
"'data'",
":",
"[",
"]",
"}",
"data_tables",
"=",
"datatables",
".",
"DataTables",
"(",
")",
"custom_where",
"=",
"[",
"[",
"'library_sections.deleted_section'",
",",
"0",
"]",
"]",
"if",
"grouping",
"is",
"None",
":",
"grouping",
"=",
"plexpy",
".",
"CONFIG",
".",
"GROUP_HISTORY_TABLES",
"if",
"session",
".",
"get_session_shared_libraries",
"(",
")",
":",
"custom_where",
".",
"append",
"(",
"[",
"'library_sections.section_id'",
",",
"session",
".",
"get_session_shared_libraries",
"(",
")",
"]",
")",
"group_by",
"=",
"'session_history.reference_id'",
"if",
"grouping",
"else",
"'session_history.id'",
"columns",
"=",
"[",
"'library_sections.id AS row_id'",
",",
"'library_sections.server_id'",
",",
"'library_sections.section_id'",
",",
"'library_sections.section_name'",
",",
"'library_sections.section_type'",
",",
"'library_sections.count'",
",",
"'library_sections.parent_count'",
",",
"'library_sections.child_count'",
",",
"'library_sections.thumb AS library_thumb'",
",",
"'library_sections.custom_thumb_url AS custom_thumb'",
",",
"'library_sections.art AS library_art'",
",",
"'library_sections.custom_art_url AS custom_art'",
",",
"'COUNT(DISTINCT %s) AS plays'",
"%",
"group_by",
",",
"'SUM(CASE WHEN session_history.stopped > 0 THEN (session_history.stopped - session_history.started) \\\n ELSE 0 END) - SUM(CASE WHEN session_history.paused_counter IS NULL THEN 0 ELSE \\\n session_history.paused_counter END) AS duration'",
",",
"'MAX(session_history.started) AS last_accessed'",
",",
"'MAX(session_history.id) AS history_row_id'",
",",
"'session_history_metadata.full_title AS last_played'",
",",
"'session_history.rating_key'",
",",
"'session_history_metadata.media_type'",
",",
"'session_history_metadata.thumb'",
",",
"'session_history_metadata.parent_thumb'",
",",
"'session_history_metadata.grandparent_thumb'",
",",
"'session_history_metadata.parent_title'",
",",
"'session_history_metadata.year'",
",",
"'session_history_metadata.media_index'",
",",
"'session_history_metadata.parent_media_index'",
",",
"'session_history_metadata.content_rating'",
",",
"'session_history_metadata.labels'",
",",
"'session_history_metadata.live'",
",",
"'session_history_metadata.added_at'",
",",
"'session_history_metadata.originally_available_at'",
",",
"'session_history_metadata.guid'",
",",
"'library_sections.do_notify'",
",",
"'library_sections.do_notify_created'",
",",
"'library_sections.keep_history'",
",",
"'library_sections.is_active'",
"]",
"try",
":",
"query",
"=",
"data_tables",
".",
"ssp_query",
"(",
"table_name",
"=",
"'library_sections'",
",",
"columns",
"=",
"columns",
",",
"custom_where",
"=",
"custom_where",
",",
"group_by",
"=",
"[",
"'library_sections.server_id'",
",",
"'library_sections.section_id'",
"]",
",",
"join_types",
"=",
"[",
"'LEFT OUTER JOIN'",
",",
"'LEFT OUTER JOIN'",
",",
"'LEFT OUTER JOIN'",
"]",
",",
"join_tables",
"=",
"[",
"'session_history'",
",",
"'session_history_metadata'",
",",
"'session_history_media_info'",
"]",
",",
"join_evals",
"=",
"[",
"[",
"'session_history.section_id'",
",",
"'library_sections.section_id'",
"]",
",",
"[",
"'session_history.id'",
",",
"'session_history_metadata.id'",
"]",
",",
"[",
"'session_history.id'",
",",
"'session_history_media_info.id'",
"]",
"]",
",",
"kwargs",
"=",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"\"Tautulli Libraries :: Unable to execute database query for get_list: %s.\"",
"%",
"e",
")",
"return",
"default_return",
"result",
"=",
"query",
"[",
"'result'",
"]",
"rows",
"=",
"[",
"]",
"for",
"item",
"in",
"result",
":",
"if",
"item",
"[",
"'media_type'",
"]",
"==",
"'episode'",
"and",
"item",
"[",
"'parent_thumb'",
"]",
":",
"thumb",
"=",
"item",
"[",
"'parent_thumb'",
"]",
"elif",
"item",
"[",
"'media_type'",
"]",
"==",
"'episode'",
":",
"thumb",
"=",
"item",
"[",
"'grandparent_thumb'",
"]",
"else",
":",
"thumb",
"=",
"item",
"[",
"'thumb'",
"]",
"if",
"item",
"[",
"'custom_thumb'",
"]",
"and",
"item",
"[",
"'custom_thumb'",
"]",
"!=",
"item",
"[",
"'library_thumb'",
"]",
":",
"library_thumb",
"=",
"item",
"[",
"'custom_thumb'",
"]",
"elif",
"item",
"[",
"'library_thumb'",
"]",
":",
"library_thumb",
"=",
"item",
"[",
"'library_thumb'",
"]",
"else",
":",
"library_thumb",
"=",
"common",
".",
"DEFAULT_COVER_THUMB",
"if",
"item",
"[",
"'custom_art'",
"]",
"and",
"item",
"[",
"'custom_art'",
"]",
"!=",
"item",
"[",
"'library_art'",
"]",
":",
"library_art",
"=",
"item",
"[",
"'custom_art'",
"]",
"else",
":",
"library_art",
"=",
"item",
"[",
"'library_art'",
"]",
"row",
"=",
"{",
"'row_id'",
":",
"item",
"[",
"'row_id'",
"]",
",",
"'server_id'",
":",
"item",
"[",
"'server_id'",
"]",
",",
"'section_id'",
":",
"item",
"[",
"'section_id'",
"]",
",",
"'section_name'",
":",
"item",
"[",
"'section_name'",
"]",
",",
"'section_type'",
":",
"item",
"[",
"'section_type'",
"]",
",",
"'count'",
":",
"item",
"[",
"'count'",
"]",
",",
"'parent_count'",
":",
"item",
"[",
"'parent_count'",
"]",
",",
"'child_count'",
":",
"item",
"[",
"'child_count'",
"]",
",",
"'library_thumb'",
":",
"library_thumb",
",",
"'library_art'",
":",
"library_art",
",",
"'plays'",
":",
"item",
"[",
"'plays'",
"]",
",",
"'duration'",
":",
"item",
"[",
"'duration'",
"]",
",",
"'last_accessed'",
":",
"item",
"[",
"'last_accessed'",
"]",
",",
"'history_row_id'",
":",
"item",
"[",
"'history_row_id'",
"]",
",",
"'last_played'",
":",
"item",
"[",
"'last_played'",
"]",
",",
"'rating_key'",
":",
"item",
"[",
"'rating_key'",
"]",
",",
"'media_type'",
":",
"item",
"[",
"'media_type'",
"]",
",",
"'thumb'",
":",
"thumb",
",",
"'parent_title'",
":",
"item",
"[",
"'parent_title'",
"]",
",",
"'year'",
":",
"item",
"[",
"'year'",
"]",
",",
"'media_index'",
":",
"item",
"[",
"'media_index'",
"]",
",",
"'parent_media_index'",
":",
"item",
"[",
"'parent_media_index'",
"]",
",",
"'content_rating'",
":",
"item",
"[",
"'content_rating'",
"]",
",",
"'labels'",
":",
"item",
"[",
"'labels'",
"]",
".",
"split",
"(",
"';'",
")",
"if",
"item",
"[",
"'labels'",
"]",
"else",
"(",
")",
",",
"'live'",
":",
"item",
"[",
"'live'",
"]",
",",
"'originally_available_at'",
":",
"item",
"[",
"'originally_available_at'",
"]",
",",
"'guid'",
":",
"item",
"[",
"'guid'",
"]",
",",
"'do_notify'",
":",
"helpers",
".",
"checked",
"(",
"item",
"[",
"'do_notify'",
"]",
")",
",",
"'do_notify_created'",
":",
"helpers",
".",
"checked",
"(",
"item",
"[",
"'do_notify_created'",
"]",
")",
",",
"'keep_history'",
":",
"helpers",
".",
"checked",
"(",
"item",
"[",
"'keep_history'",
"]",
")",
",",
"'is_active'",
":",
"item",
"[",
"'is_active'",
"]",
"}",
"rows",
".",
"append",
"(",
"row",
")",
"dict",
"=",
"{",
"'recordsFiltered'",
":",
"query",
"[",
"'filteredCount'",
"]",
",",
"'recordsTotal'",
":",
"query",
"[",
"'totalCount'",
"]",
",",
"'data'",
":",
"session",
".",
"mask_session_info",
"(",
"rows",
")",
",",
"'draw'",
":",
"query",
"[",
"'draw'",
"]",
"}",
"return",
"dict"
] |
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/plexpy/libraries.py#L313-L453
|
|||
materialsproject/pymatgen
|
8128f3062a334a2edd240e4062b5b9bdd1ae6f58
|
pymatgen/electronic_structure/boltztrap2.py
|
python
|
VasprunBSLoader.__init__
|
(self, obj, structure=None, nelect=None)
|
Args:
obj: Either a pmg Vasprun or a BandStructure object.
structure: Structure object in case is not included in the BandStructure object.
nelect: number of electrons in case a BandStructure obj is provided.
Example:
vrun = Vasprun('vasprun.xml')
data = VasprunBSLoader(vrun)
|
Args:
obj: Either a pmg Vasprun or a BandStructure object.
structure: Structure object in case is not included in the BandStructure object.
nelect: number of electrons in case a BandStructure obj is provided.
Example:
vrun = Vasprun('vasprun.xml')
data = VasprunBSLoader(vrun)
|
[
"Args",
":",
"obj",
":",
"Either",
"a",
"pmg",
"Vasprun",
"or",
"a",
"BandStructure",
"object",
".",
"structure",
":",
"Structure",
"object",
"in",
"case",
"is",
"not",
"included",
"in",
"the",
"BandStructure",
"object",
".",
"nelect",
":",
"number",
"of",
"electrons",
"in",
"case",
"a",
"BandStructure",
"obj",
"is",
"provided",
".",
"Example",
":",
"vrun",
"=",
"Vasprun",
"(",
"vasprun",
".",
"xml",
")",
"data",
"=",
"VasprunBSLoader",
"(",
"vrun",
")"
] |
def __init__(self, obj, structure=None, nelect=None):
"""
Args:
obj: Either a pmg Vasprun or a BandStructure object.
structure: Structure object in case is not included in the BandStructure object.
nelect: number of electrons in case a BandStructure obj is provided.
Example:
vrun = Vasprun('vasprun.xml')
data = VasprunBSLoader(vrun)
"""
if isinstance(obj, Vasprun):
structure = obj.final_structure
nelect = obj.parameters["NELECT"]
bs_obj = obj.get_band_structure()
elif isinstance(obj, BandStructure):
bs_obj = obj
else:
raise BoltztrapError("The object provided is neither a Bandstructure nor a Vasprun.")
self.kpoints = np.array([kp.frac_coords for kp in bs_obj.kpoints])
if bs_obj.structure:
self.structure = bs_obj.structure
elif structure:
self.structure = structure
else:
raise BoltztrapError("A structure must be given.")
self.atoms = AseAtomsAdaptor.get_atoms(self.structure)
self.proj_all = None
if bs_obj.projections:
self.proj_all = {sp: p.transpose((1, 0, 3, 2)) for sp, p in bs_obj.projections.items()}
e = np.array(list(bs_obj.bands.values()))
e = e.reshape(-1, e.shape[-1])
self.ebands_all = e * units.eV
self.is_spin_polarized = bs_obj.is_spin_polarized
if bs_obj.is_spin_polarized:
self.dosweight = 1.0
else:
self.dosweight = 2.0
self.lattvec = self.atoms.get_cell().T * units.Angstrom
self.mommat_all = None # not implemented yet
self.mommat = None # not implemented yet
self.magmom = None # not implemented yet
self.fermi = bs_obj.efermi * units.eV
self.UCvol = self.structure.volume * units.Angstrom ** 3
if not bs_obj.is_metal():
self.vbm_idx = max(bs_obj.get_vbm()["band_index"][Spin.up] + bs_obj.get_vbm()["band_index"][Spin.down])
self.cbm_idx = min(bs_obj.get_cbm()["band_index"][Spin.up] + bs_obj.get_cbm()["band_index"][Spin.down])
self.vbm = bs_obj.get_vbm()["energy"]
self.cbm = bs_obj.get_cbm()["energy"]
else:
self.vbm_idx = None
self.cbm_idx = None
self.vbm = self.fermi
self.cbm = self.fermi
if nelect:
self.nelect_all = nelect
elif self.vbm_idx:
self.nelect_all = self.vbm_idx + self.cbm_idx + 1
else:
raise BoltztrapError("nelect must be given.")
|
[
"def",
"__init__",
"(",
"self",
",",
"obj",
",",
"structure",
"=",
"None",
",",
"nelect",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Vasprun",
")",
":",
"structure",
"=",
"obj",
".",
"final_structure",
"nelect",
"=",
"obj",
".",
"parameters",
"[",
"\"NELECT\"",
"]",
"bs_obj",
"=",
"obj",
".",
"get_band_structure",
"(",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"BandStructure",
")",
":",
"bs_obj",
"=",
"obj",
"else",
":",
"raise",
"BoltztrapError",
"(",
"\"The object provided is neither a Bandstructure nor a Vasprun.\"",
")",
"self",
".",
"kpoints",
"=",
"np",
".",
"array",
"(",
"[",
"kp",
".",
"frac_coords",
"for",
"kp",
"in",
"bs_obj",
".",
"kpoints",
"]",
")",
"if",
"bs_obj",
".",
"structure",
":",
"self",
".",
"structure",
"=",
"bs_obj",
".",
"structure",
"elif",
"structure",
":",
"self",
".",
"structure",
"=",
"structure",
"else",
":",
"raise",
"BoltztrapError",
"(",
"\"A structure must be given.\"",
")",
"self",
".",
"atoms",
"=",
"AseAtomsAdaptor",
".",
"get_atoms",
"(",
"self",
".",
"structure",
")",
"self",
".",
"proj_all",
"=",
"None",
"if",
"bs_obj",
".",
"projections",
":",
"self",
".",
"proj_all",
"=",
"{",
"sp",
":",
"p",
".",
"transpose",
"(",
"(",
"1",
",",
"0",
",",
"3",
",",
"2",
")",
")",
"for",
"sp",
",",
"p",
"in",
"bs_obj",
".",
"projections",
".",
"items",
"(",
")",
"}",
"e",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"bs_obj",
".",
"bands",
".",
"values",
"(",
")",
")",
")",
"e",
"=",
"e",
".",
"reshape",
"(",
"-",
"1",
",",
"e",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"self",
".",
"ebands_all",
"=",
"e",
"*",
"units",
".",
"eV",
"self",
".",
"is_spin_polarized",
"=",
"bs_obj",
".",
"is_spin_polarized",
"if",
"bs_obj",
".",
"is_spin_polarized",
":",
"self",
".",
"dosweight",
"=",
"1.0",
"else",
":",
"self",
".",
"dosweight",
"=",
"2.0",
"self",
".",
"lattvec",
"=",
"self",
".",
"atoms",
".",
"get_cell",
"(",
")",
".",
"T",
"*",
"units",
".",
"Angstrom",
"self",
".",
"mommat_all",
"=",
"None",
"# not implemented yet",
"self",
".",
"mommat",
"=",
"None",
"# not implemented yet",
"self",
".",
"magmom",
"=",
"None",
"# not implemented yet",
"self",
".",
"fermi",
"=",
"bs_obj",
".",
"efermi",
"*",
"units",
".",
"eV",
"self",
".",
"UCvol",
"=",
"self",
".",
"structure",
".",
"volume",
"*",
"units",
".",
"Angstrom",
"**",
"3",
"if",
"not",
"bs_obj",
".",
"is_metal",
"(",
")",
":",
"self",
".",
"vbm_idx",
"=",
"max",
"(",
"bs_obj",
".",
"get_vbm",
"(",
")",
"[",
"\"band_index\"",
"]",
"[",
"Spin",
".",
"up",
"]",
"+",
"bs_obj",
".",
"get_vbm",
"(",
")",
"[",
"\"band_index\"",
"]",
"[",
"Spin",
".",
"down",
"]",
")",
"self",
".",
"cbm_idx",
"=",
"min",
"(",
"bs_obj",
".",
"get_cbm",
"(",
")",
"[",
"\"band_index\"",
"]",
"[",
"Spin",
".",
"up",
"]",
"+",
"bs_obj",
".",
"get_cbm",
"(",
")",
"[",
"\"band_index\"",
"]",
"[",
"Spin",
".",
"down",
"]",
")",
"self",
".",
"vbm",
"=",
"bs_obj",
".",
"get_vbm",
"(",
")",
"[",
"\"energy\"",
"]",
"self",
".",
"cbm",
"=",
"bs_obj",
".",
"get_cbm",
"(",
")",
"[",
"\"energy\"",
"]",
"else",
":",
"self",
".",
"vbm_idx",
"=",
"None",
"self",
".",
"cbm_idx",
"=",
"None",
"self",
".",
"vbm",
"=",
"self",
".",
"fermi",
"self",
".",
"cbm",
"=",
"self",
".",
"fermi",
"if",
"nelect",
":",
"self",
".",
"nelect_all",
"=",
"nelect",
"elif",
"self",
".",
"vbm_idx",
":",
"self",
".",
"nelect_all",
"=",
"self",
".",
"vbm_idx",
"+",
"self",
".",
"cbm_idx",
"+",
"1",
"else",
":",
"raise",
"BoltztrapError",
"(",
"\"nelect must be given.\"",
")"
] |
https://github.com/materialsproject/pymatgen/blob/8128f3062a334a2edd240e4062b5b9bdd1ae6f58/pymatgen/electronic_structure/boltztrap2.py#L66-L134
|
||
ppizarror/pygame-menu
|
da5827a1ad0686e8ff2aa536b74bbfba73967bcf
|
pygame_menu/baseimage.py
|
python
|
BaseImage.pick_channels
|
(self, channels: ChannelType)
|
return self
|
Pick certain channels of the image, channels are ``"r"`` (red), ``"g"``
(green) and ``"b"`` (blue); ``channels param`` is a list/tuple of channels
(non empty).
For example, ``pick_channels(['r', 'g'])``: All channels not included on
the list will be discarded.
:param channels: Channels, list or tuple containing ``"r"``, ``"g"`` or ``"b"`` (all combinations are possible)
:return: Self reference
|
Pick certain channels of the image, channels are ``"r"`` (red), ``"g"``
(green) and ``"b"`` (blue); ``channels param`` is a list/tuple of channels
(non empty).
|
[
"Pick",
"certain",
"channels",
"of",
"the",
"image",
"channels",
"are",
"r",
"(",
"red",
")",
"g",
"(",
"green",
")",
"and",
"b",
"(",
"blue",
")",
";",
"channels",
"param",
"is",
"a",
"list",
"/",
"tuple",
"of",
"channels",
"(",
"non",
"empty",
")",
"."
] |
def pick_channels(self, channels: ChannelType) -> 'BaseImage':
"""
Pick certain channels of the image, channels are ``"r"`` (red), ``"g"``
(green) and ``"b"`` (blue); ``channels param`` is a list/tuple of channels
(non empty).
For example, ``pick_channels(['r', 'g'])``: All channels not included on
the list will be discarded.
:param channels: Channels, list or tuple containing ``"r"``, ``"g"`` or ``"b"`` (all combinations are possible)
:return: Self reference
"""
if isinstance(channels, str):
channels = [channels]
assert isinstance(channels, VectorInstance)
assert 1 <= len(channels) <= 3, 'maximum size of channels can be 3'
w, h = self._surface.get_size()
for x in range(w):
for y in range(h):
r, g, b, a = self._surface.get_at((x, y))
if 'r' not in channels:
r = 0
if 'g' not in channels:
g = 0
if 'b' not in channels:
b = 0
# noinspection PyArgumentList
self._surface.set_at((x, y), pygame.Color(r, g, b, a))
return self
|
[
"def",
"pick_channels",
"(",
"self",
",",
"channels",
":",
"ChannelType",
")",
"->",
"'BaseImage'",
":",
"if",
"isinstance",
"(",
"channels",
",",
"str",
")",
":",
"channels",
"=",
"[",
"channels",
"]",
"assert",
"isinstance",
"(",
"channels",
",",
"VectorInstance",
")",
"assert",
"1",
"<=",
"len",
"(",
"channels",
")",
"<=",
"3",
",",
"'maximum size of channels can be 3'",
"w",
",",
"h",
"=",
"self",
".",
"_surface",
".",
"get_size",
"(",
")",
"for",
"x",
"in",
"range",
"(",
"w",
")",
":",
"for",
"y",
"in",
"range",
"(",
"h",
")",
":",
"r",
",",
"g",
",",
"b",
",",
"a",
"=",
"self",
".",
"_surface",
".",
"get_at",
"(",
"(",
"x",
",",
"y",
")",
")",
"if",
"'r'",
"not",
"in",
"channels",
":",
"r",
"=",
"0",
"if",
"'g'",
"not",
"in",
"channels",
":",
"g",
"=",
"0",
"if",
"'b'",
"not",
"in",
"channels",
":",
"b",
"=",
"0",
"# noinspection PyArgumentList",
"self",
".",
"_surface",
".",
"set_at",
"(",
"(",
"x",
",",
"y",
")",
",",
"pygame",
".",
"Color",
"(",
"r",
",",
"g",
",",
"b",
",",
"a",
")",
")",
"return",
"self"
] |
https://github.com/ppizarror/pygame-menu/blob/da5827a1ad0686e8ff2aa536b74bbfba73967bcf/pygame_menu/baseimage.py#L572-L601
|
|
ukdtom/ExportTools.bundle
|
49aba4292a2897f640162a833c2792480aa4f0b6
|
Contents/Libraries/Shared/xlsxwriter/workbook.py
|
python
|
Workbook.add_worksheet
|
(self, name=None, worksheet_class=None)
|
return self._add_sheet(name, worksheet_class=worksheet_class)
|
Add a new worksheet to the Excel workbook.
Args:
name: The worksheet name. Defaults to 'Sheet1', etc.
Returns:
Reference to a worksheet object.
|
Add a new worksheet to the Excel workbook.
|
[
"Add",
"a",
"new",
"worksheet",
"to",
"the",
"Excel",
"workbook",
"."
] |
def add_worksheet(self, name=None, worksheet_class=None):
"""
Add a new worksheet to the Excel workbook.
Args:
name: The worksheet name. Defaults to 'Sheet1', etc.
Returns:
Reference to a worksheet object.
"""
if worksheet_class is None:
worksheet_class = self.worksheet_class
return self._add_sheet(name, worksheet_class=worksheet_class)
|
[
"def",
"add_worksheet",
"(",
"self",
",",
"name",
"=",
"None",
",",
"worksheet_class",
"=",
"None",
")",
":",
"if",
"worksheet_class",
"is",
"None",
":",
"worksheet_class",
"=",
"self",
".",
"worksheet_class",
"return",
"self",
".",
"_add_sheet",
"(",
"name",
",",
"worksheet_class",
"=",
"worksheet_class",
")"
] |
https://github.com/ukdtom/ExportTools.bundle/blob/49aba4292a2897f640162a833c2792480aa4f0b6/Contents/Libraries/Shared/xlsxwriter/workbook.py#L165-L179
|
|
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
|
cb692f527e4e819b6c228187c5702d990a180043
|
external/Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/decimal.py
|
python
|
_dlog10
|
(c, e, p)
|
return _div_nearest(log_tenpower+log_d, 100)
|
Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1.
|
Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1.
|
[
"Given",
"integers",
"c",
"e",
"and",
"p",
"with",
"c",
">",
"0",
"p",
">",
"=",
"0",
"compute",
"an",
"integer",
"approximation",
"to",
"10",
"**",
"p",
"*",
"log10",
"(",
"c",
"*",
"10",
"**",
"e",
")",
"with",
"an",
"absolute",
"error",
"of",
"at",
"most",
"1",
".",
"Assumes",
"that",
"c",
"*",
"10",
"**",
"e",
"is",
"not",
"exactly",
"1",
"."
] |
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
|
[
"def",
"_dlog10",
"(",
"c",
",",
"e",
",",
"p",
")",
":",
"# increase precision by 2; compensate for this by dividing",
"# final result by 100",
"p",
"+=",
"2",
"# write c*10**e as d*10**f with either:",
"# f >= 0 and 1 <= d <= 10, or",
"# f <= 0 and 0.1 <= d <= 1.",
"# Thus for c*10**e close to 1, f = 0",
"l",
"=",
"len",
"(",
"str",
"(",
"c",
")",
")",
"f",
"=",
"e",
"+",
"l",
"-",
"(",
"e",
"+",
"l",
">=",
"1",
")",
"if",
"p",
">",
"0",
":",
"M",
"=",
"10",
"**",
"p",
"k",
"=",
"e",
"+",
"p",
"-",
"f",
"if",
"k",
">=",
"0",
":",
"c",
"*=",
"10",
"**",
"k",
"else",
":",
"c",
"=",
"_div_nearest",
"(",
"c",
",",
"10",
"**",
"-",
"k",
")",
"log_d",
"=",
"_ilog",
"(",
"c",
",",
"M",
")",
"# error < 5 + 22 = 27",
"log_10",
"=",
"_log10_digits",
"(",
"p",
")",
"# error < 1",
"log_d",
"=",
"_div_nearest",
"(",
"log_d",
"*",
"M",
",",
"log_10",
")",
"log_tenpower",
"=",
"f",
"*",
"M",
"# exact",
"else",
":",
"log_d",
"=",
"0",
"# error < 2.31",
"log_tenpower",
"=",
"_div_nearest",
"(",
"f",
",",
"10",
"**",
"-",
"p",
")",
"# error < 0.5",
"return",
"_div_nearest",
"(",
"log_tenpower",
"+",
"log_d",
",",
"100",
")"
] |
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/decimal.py#L5551-L5583
|
|
python-diamond/Diamond
|
7000e16cfdf4508ed9291fc4b3800592557b2431
|
src/diamond/utils/log.py
|
python
|
setup_logging
|
(configfile, stdout=False)
|
return log
|
[] |
def setup_logging(configfile, stdout=False):
log = logging.getLogger('diamond')
try:
logging.config.fileConfig(configfile, disable_existing_loggers=False)
# if the stdout flag is set, we use the log level of the root logger
# for logging to stdout, and keep all loggers defined in the conf file
if stdout:
rootLogLevel = logging.getLogger().getEffectiveLevel()
log.setLevel(rootLogLevel)
streamHandler = logging.StreamHandler(sys.stdout)
streamHandler.setFormatter(DebugFormatter())
streamHandler.setLevel(rootLogLevel)
log.addHandler(streamHandler)
except Exception as e:
sys.stderr.write("Error occurs when initialize logging: ")
sys.stderr.write(str(e))
sys.stderr.write(os.linesep)
return log
|
[
"def",
"setup_logging",
"(",
"configfile",
",",
"stdout",
"=",
"False",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'diamond'",
")",
"try",
":",
"logging",
".",
"config",
".",
"fileConfig",
"(",
"configfile",
",",
"disable_existing_loggers",
"=",
"False",
")",
"# if the stdout flag is set, we use the log level of the root logger",
"# for logging to stdout, and keep all loggers defined in the conf file",
"if",
"stdout",
":",
"rootLogLevel",
"=",
"logging",
".",
"getLogger",
"(",
")",
".",
"getEffectiveLevel",
"(",
")",
"log",
".",
"setLevel",
"(",
"rootLogLevel",
")",
"streamHandler",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"streamHandler",
".",
"setFormatter",
"(",
"DebugFormatter",
"(",
")",
")",
"streamHandler",
".",
"setLevel",
"(",
"rootLogLevel",
")",
"log",
".",
"addHandler",
"(",
"streamHandler",
")",
"except",
"Exception",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Error occurs when initialize logging: \"",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"str",
"(",
"e",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"os",
".",
"linesep",
")",
"return",
"log"
] |
https://github.com/python-diamond/Diamond/blob/7000e16cfdf4508ed9291fc4b3800592557b2431/src/diamond/utils/log.py#L35-L57
|
|||
mozillazg/pypy
|
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
|
pypy/module/cpyext/stubs.py
|
python
|
PyDescr_IsData
|
(space, descr)
|
Return true if the descriptor objects descr describes a data attribute, or
false if it describes a method. descr must be a descriptor object; there is
no error checking.
|
Return true if the descriptor objects descr describes a data attribute, or
false if it describes a method. descr must be a descriptor object; there is
no error checking.
|
[
"Return",
"true",
"if",
"the",
"descriptor",
"objects",
"descr",
"describes",
"a",
"data",
"attribute",
"or",
"false",
"if",
"it",
"describes",
"a",
"method",
".",
"descr",
"must",
"be",
"a",
"descriptor",
"object",
";",
"there",
"is",
"no",
"error",
"checking",
"."
] |
def PyDescr_IsData(space, descr):
"""Return true if the descriptor objects descr describes a data attribute, or
false if it describes a method. descr must be a descriptor object; there is
no error checking.
"""
raise NotImplementedError
|
[
"def",
"PyDescr_IsData",
"(",
"space",
",",
"descr",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/pypy/module/cpyext/stubs.py#L263-L268
|
||
numba/numba
|
bf480b9e0da858a65508c2b17759a72ee6a44c51
|
numba/cpython/setobj.py
|
python
|
SetInstance.allocate
|
(cls, context, builder, set_type, nitems=None)
|
return self
|
Allocate a SetInstance with its storage. Same as allocate_ex(),
but return an initialized *instance*. If allocation failed,
control is transferred to the caller using the target's current
call convention.
|
Allocate a SetInstance with its storage. Same as allocate_ex(),
but return an initialized *instance*. If allocation failed,
control is transferred to the caller using the target's current
call convention.
|
[
"Allocate",
"a",
"SetInstance",
"with",
"its",
"storage",
".",
"Same",
"as",
"allocate_ex",
"()",
"but",
"return",
"an",
"initialized",
"*",
"instance",
"*",
".",
"If",
"allocation",
"failed",
"control",
"is",
"transferred",
"to",
"the",
"caller",
"using",
"the",
"target",
"s",
"current",
"call",
"convention",
"."
] |
def allocate(cls, context, builder, set_type, nitems=None):
"""
Allocate a SetInstance with its storage. Same as allocate_ex(),
but return an initialized *instance*. If allocation failed,
control is transferred to the caller using the target's current
call convention.
"""
ok, self = cls.allocate_ex(context, builder, set_type, nitems)
with builder.if_then(builder.not_(ok), likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot allocate set",))
return self
|
[
"def",
"allocate",
"(",
"cls",
",",
"context",
",",
"builder",
",",
"set_type",
",",
"nitems",
"=",
"None",
")",
":",
"ok",
",",
"self",
"=",
"cls",
".",
"allocate_ex",
"(",
"context",
",",
"builder",
",",
"set_type",
",",
"nitems",
")",
"with",
"builder",
".",
"if_then",
"(",
"builder",
".",
"not_",
"(",
"ok",
")",
",",
"likely",
"=",
"False",
")",
":",
"context",
".",
"call_conv",
".",
"return_user_exc",
"(",
"builder",
",",
"MemoryError",
",",
"(",
"\"cannot allocate set\"",
",",
")",
")",
"return",
"self"
] |
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/cpython/setobj.py#L768-L779
|
|
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/graphs/graph_decompositions/modular_decomposition.py
|
python
|
promote_left
|
(root)
|
Perform the promotion phase on the forest root.
If child and parent both are marked by LEFT_SPLIT then child is removed
and placed just before the parent
INPUT:
- ``root`` -- The forest which needs to be promoted
EXAMPLES::
sage: from sage.graphs.graph_decompositions.modular_decomposition import *
sage: g = Graph()
sage: g.add_vertices([1, 2, 3, 4, 5, 6, 7])
sage: g.add_edge(2, 3)
sage: g.add_edge(4, 3)
sage: g.add_edge(5, 3)
sage: g.add_edge(2, 6)
sage: g.add_edge(4, 7)
sage: g.add_edge(2, 1)
sage: g.add_edge(6, 1)
sage: g.add_edge(4, 2)
sage: g.add_edge(5, 2)
sage: forest = Node(NodeType.FOREST)
sage: forest.children = [create_normal_node(2),
....: create_normal_node(3), create_normal_node(1)]
sage: series_node = Node(NodeType.SERIES)
sage: series_node.children = [create_normal_node(4),
....: create_normal_node(5)]
sage: parallel_node = Node(NodeType.PARALLEL)
sage: parallel_node.children = [create_normal_node(6),
....: create_normal_node(7)]
sage: forest.children.insert(1, series_node)
sage: forest.children.insert(3, parallel_node)
sage: vertex_status = {2: VertexPosition.LEFT_OF_SOURCE,
....: 3: VertexPosition.SOURCE,
....: 1: VertexPosition.RIGHT_OF_SOURCE,
....: 4: VertexPosition.LEFT_OF_SOURCE,
....: 5: VertexPosition.LEFT_OF_SOURCE,
....: 6: VertexPosition.RIGHT_OF_SOURCE,
....: 7: VertexPosition.RIGHT_OF_SOURCE}
sage: vertex_dist = {2: 1, 4: 1, 5: 1, 3: 0, 6: 2, 7: 2, 1: 3}
sage: x = {u for u in g.neighbor_iterator(2)
....: if vertex_dist[u] != vertex_dist[2]}
sage: maximal_subtrees_with_leaves_in_x(forest, 2, x, vertex_status,
....: False, 0)
sage: promote_left(forest)
sage: forest
FOREST [NORMAL [2], SERIES [NORMAL [4], NORMAL [5]], NORMAL [3],
PARALLEL [NORMAL [6]], PARALLEL [NORMAL [7]],
PARALLEL [], NORMAL [1]]
|
Perform the promotion phase on the forest root.
|
[
"Perform",
"the",
"promotion",
"phase",
"on",
"the",
"forest",
"root",
"."
] |
def promote_left(root):
"""
Perform the promotion phase on the forest root.
If child and parent both are marked by LEFT_SPLIT then child is removed
and placed just before the parent
INPUT:
- ``root`` -- The forest which needs to be promoted
EXAMPLES::
sage: from sage.graphs.graph_decompositions.modular_decomposition import *
sage: g = Graph()
sage: g.add_vertices([1, 2, 3, 4, 5, 6, 7])
sage: g.add_edge(2, 3)
sage: g.add_edge(4, 3)
sage: g.add_edge(5, 3)
sage: g.add_edge(2, 6)
sage: g.add_edge(4, 7)
sage: g.add_edge(2, 1)
sage: g.add_edge(6, 1)
sage: g.add_edge(4, 2)
sage: g.add_edge(5, 2)
sage: forest = Node(NodeType.FOREST)
sage: forest.children = [create_normal_node(2),
....: create_normal_node(3), create_normal_node(1)]
sage: series_node = Node(NodeType.SERIES)
sage: series_node.children = [create_normal_node(4),
....: create_normal_node(5)]
sage: parallel_node = Node(NodeType.PARALLEL)
sage: parallel_node.children = [create_normal_node(6),
....: create_normal_node(7)]
sage: forest.children.insert(1, series_node)
sage: forest.children.insert(3, parallel_node)
sage: vertex_status = {2: VertexPosition.LEFT_OF_SOURCE,
....: 3: VertexPosition.SOURCE,
....: 1: VertexPosition.RIGHT_OF_SOURCE,
....: 4: VertexPosition.LEFT_OF_SOURCE,
....: 5: VertexPosition.LEFT_OF_SOURCE,
....: 6: VertexPosition.RIGHT_OF_SOURCE,
....: 7: VertexPosition.RIGHT_OF_SOURCE}
sage: vertex_dist = {2: 1, 4: 1, 5: 1, 3: 0, 6: 2, 7: 2, 1: 3}
sage: x = {u for u in g.neighbor_iterator(2)
....: if vertex_dist[u] != vertex_dist[2]}
sage: maximal_subtrees_with_leaves_in_x(forest, 2, x, vertex_status,
....: False, 0)
sage: promote_left(forest)
sage: forest
FOREST [NORMAL [2], SERIES [NORMAL [4], NORMAL [5]], NORMAL [3],
PARALLEL [NORMAL [6]], PARALLEL [NORMAL [7]],
PARALLEL [], NORMAL [1]]
"""
q = deque()
# q has [parent, child] elements as parent needs to be modified
for child in root.children:
q.append([root, child])
while q:
parent, child = q.popleft()
if child.node_type == NodeType.NORMAL:
continue
# stores the elements to be removed from the child
to_remove = []
# stores the index of child in parent list
index = parent.children.index(child)
for grand_child in child.children:
# if tree and child both have LEFT_SPLIT then tree from
# child is inserted just before child in the parent
if grand_child.has_left_split() and child.has_left_split():
parent.children.insert(index, grand_child)
index += 1
to_remove.append(grand_child)
q.append([parent, grand_child])
else:
q.append([child, grand_child])
for grand_child in to_remove:
child.children.remove(grand_child)
|
[
"def",
"promote_left",
"(",
"root",
")",
":",
"q",
"=",
"deque",
"(",
")",
"# q has [parent, child] elements as parent needs to be modified",
"for",
"child",
"in",
"root",
".",
"children",
":",
"q",
".",
"append",
"(",
"[",
"root",
",",
"child",
"]",
")",
"while",
"q",
":",
"parent",
",",
"child",
"=",
"q",
".",
"popleft",
"(",
")",
"if",
"child",
".",
"node_type",
"==",
"NodeType",
".",
"NORMAL",
":",
"continue",
"# stores the elements to be removed from the child",
"to_remove",
"=",
"[",
"]",
"# stores the index of child in parent list",
"index",
"=",
"parent",
".",
"children",
".",
"index",
"(",
"child",
")",
"for",
"grand_child",
"in",
"child",
".",
"children",
":",
"# if tree and child both have LEFT_SPLIT then tree from",
"# child is inserted just before child in the parent",
"if",
"grand_child",
".",
"has_left_split",
"(",
")",
"and",
"child",
".",
"has_left_split",
"(",
")",
":",
"parent",
".",
"children",
".",
"insert",
"(",
"index",
",",
"grand_child",
")",
"index",
"+=",
"1",
"to_remove",
".",
"append",
"(",
"grand_child",
")",
"q",
".",
"append",
"(",
"[",
"parent",
",",
"grand_child",
"]",
")",
"else",
":",
"q",
".",
"append",
"(",
"[",
"child",
",",
"grand_child",
"]",
")",
"for",
"grand_child",
"in",
"to_remove",
":",
"child",
".",
"children",
".",
"remove",
"(",
"grand_child",
")"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/graphs/graph_decompositions/modular_decomposition.py#L1734-L1820
|
||
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMountainofPigeonsTranslations.py
|
python
|
extractMountainofPigeonsTranslations
|
(item)
|
return False
|
Mountain of Pigeons Translations
|
Mountain of Pigeons Translations
|
[
"Mountain",
"of",
"Pigeons",
"Translations"
] |
def extractMountainofPigeonsTranslations(item):
"""
Mountain of Pigeons Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Manga' in item['tags']:
return None
if 'Anime' in item['tags']:
return None
tagmap = [
('bahamut', 'Undefeated Bahamut Chronicle', 'translated'),
('HSN', 'Hataraku Maou-sama!', 'translated'),
('Trinity Seven', 'Trinity Seven', 'translated'),
('log horizon', 'Log Horizon', 'translated'),
('GaWoRaRe', 'Kanojo ga Flag wo Oraretara', 'translated'),
('Rokujouma', 'Rokujouma no Shinryakusha!?', 'translated'),
('World Break', 'Seiken Tsukai no World Break', 'translated'),
('Four Cours After', 'Four Cours After', 'translated'),
('Upon the Wind and Melody of the Lute', 'Upon the Wind and Melody of the Lute', 'translated'),
('MonsterTamer', 'Monster Tamer’s Fluffy Master-Apprentice Life', 'translated'),
('Magia', 'Revenge Magia of the Magic Breaker', 'translated'),
('Low-Life', 'Seishun Buta Yarou', 'translated'),
('Hundred', 'Hundred', 'translated'),
('ElfWife', 'I, a Demon Lord, Took a Slave Elf as my Wife, but how do I Love Her?', 'translated'),
('StarrySky', 'I Hold Your Voice Alone, Under The Starry Sky', 'translated'),
('Maou-ppoi', 'Maou-ppoi no!', 'translated'),
('KimiSen', 'Kimi to Boku no Saigo no Senjo, Aruiha Sekai ga Hajimaru Seisen', 'translated'),
('IseCafé', 'Have a Coffee After School, In Another World\'s Café', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('Using My God Skill “Breathing” to Level Up, I Will Challenge the Dungeon of the Gods', 'Using My God Skill "Breathing" to Level Up, I Will Challenge the Dungeon of the Gods', 'translated'),
(' The Strongest Mage’s Retirement Plan', 'Saikyou Mahoushi no Inton Keikaku', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"def",
"extractMountainofPigeonsTranslations",
"(",
"item",
")",
":",
"vol",
",",
"chp",
",",
"frag",
",",
"postfix",
"=",
"extractVolChapterFragmentPostfix",
"(",
"item",
"[",
"'title'",
"]",
")",
"if",
"not",
"(",
"chp",
"or",
"vol",
"or",
"frag",
")",
"or",
"'preview'",
"in",
"item",
"[",
"'title'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"None",
"if",
"'Manga'",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"None",
"if",
"'Anime'",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"None",
"tagmap",
"=",
"[",
"(",
"'bahamut'",
",",
"'Undefeated Bahamut Chronicle'",
",",
"'translated'",
")",
",",
"(",
"'HSN'",
",",
"'Hataraku Maou-sama!'",
",",
"'translated'",
")",
",",
"(",
"'Trinity Seven'",
",",
"'Trinity Seven'",
",",
"'translated'",
")",
",",
"(",
"'log horizon'",
",",
"'Log Horizon'",
",",
"'translated'",
")",
",",
"(",
"'GaWoRaRe'",
",",
"'Kanojo ga Flag wo Oraretara'",
",",
"'translated'",
")",
",",
"(",
"'Rokujouma'",
",",
"'Rokujouma no Shinryakusha!?'",
",",
"'translated'",
")",
",",
"(",
"'World Break'",
",",
"'Seiken Tsukai no World Break'",
",",
"'translated'",
")",
",",
"(",
"'Four Cours After'",
",",
"'Four Cours After'",
",",
"'translated'",
")",
",",
"(",
"'Upon the Wind and Melody of the Lute'",
",",
"'Upon the Wind and Melody of the Lute'",
",",
"'translated'",
")",
",",
"(",
"'MonsterTamer'",
",",
"'Monster Tamer’s Fluffy Master-Apprentice Life', ",
" ",
"ranslated'),",
"",
"",
"(",
"'Magia'",
",",
"'Revenge Magia of the Magic Breaker'",
",",
"'translated'",
")",
",",
"(",
"'Low-Life'",
",",
"'Seishun Buta Yarou'",
",",
"'translated'",
")",
",",
"(",
"'Hundred'",
",",
"'Hundred'",
",",
"'translated'",
")",
",",
"(",
"'ElfWife'",
",",
"'I, a Demon Lord, Took a Slave Elf as my Wife, but how do I Love Her?'",
",",
"'translated'",
")",
",",
"(",
"'StarrySky'",
",",
"'I Hold Your Voice Alone, Under The Starry Sky'",
",",
"'translated'",
")",
",",
"(",
"'Maou-ppoi'",
",",
"'Maou-ppoi no!'",
",",
"'translated'",
")",
",",
"(",
"'KimiSen'",
",",
"'Kimi to Boku no Saigo no Senjo, Aruiha Sekai ga Hajimaru Seisen'",
",",
"'translated'",
")",
",",
"(",
"'IseCafé',",
" ",
"Have a Coffee After School, In Another World\\'s Café', ",
" ",
"ranslated'),",
"",
"",
"]",
"for",
"tagname",
",",
"name",
",",
"tl_type",
"in",
"tagmap",
":",
"if",
"tagname",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"buildReleaseMessageWithType",
"(",
"item",
",",
"name",
",",
"vol",
",",
"chp",
",",
"frag",
"=",
"frag",
",",
"postfix",
"=",
"postfix",
",",
"tl_type",
"=",
"tl_type",
")",
"titlemap",
"=",
"[",
"(",
"'Using My God Skill “Breathing” to Level Up, I Will Challenge the Dungeon of the Gods', '",
"U",
"ng My God Skill \"Breathing\" to Level Up, I Will Challenge the Dungeon of the Gods', ",
" ",
"nslated'),",
"",
"",
"(",
"'\tThe Strongest Mage’s Retirement Plan', ",
" ",
"aikyou Mahoushi no Inton Keikaku', ",
" ",
"ranslated'),",
"",
"",
"]",
"for",
"titlecomponent",
",",
"name",
",",
"tl_type",
"in",
"titlemap",
":",
"if",
"titlecomponent",
".",
"lower",
"(",
")",
"in",
"item",
"[",
"'title'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"buildReleaseMessageWithType",
"(",
"item",
",",
"name",
",",
"vol",
",",
"chp",
",",
"frag",
"=",
"frag",
",",
"postfix",
"=",
"postfix",
",",
"tl_type",
"=",
"tl_type",
")",
"return",
"False"
] |
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractMountainofPigeonsTranslations.py#L1-L49
|
|
tenpy/tenpy
|
bbdd3dbbdb511948eb0e6ba7ff619ac6ca657fff
|
tenpy/simulations/simulation.py
|
python
|
resume_from_checkpoint
|
(*,
filename=None,
checkpoint_results=None,
update_sim_params=None,
simulation_class_kwargs=None)
|
return results
|
Resume a simulation run from a given checkpoint.
(All parameters have to be given as keyword arguments.)
Parameters
----------
filename : None | str
The filename of the checkpoint to be loaded.
You can either specify the `filename` or the `checkpoint_results`.
checkpoint_results : None | dict
Alternatively to `filename` the results of the simulation so far, i.e. directly the data
dicitonary saved at a simulation checkpoint.
update_sim_params : None | dict
Allows to update specific :cfg:config:`Simulation` parameters; ignored if `None`.
Uses :func:`~tenpy.tools.misc.update_recursive` to update values, such that the keys of
`update_sim_params` can be recursive, e.g. `algorithm_params/max_sweeps`.
simlation_class_kwargs : None | dict
Further keyword arguemnts given to the simulation class, ignored if `None`.
Returns
-------
results :
The results from running the simulation, i.e.,
what :meth:`tenpy.simulations.Simulation.resume_run()` returned.
Notes
-----
The `checkpoint_filename` should be relative to the current working directory. If you use the
:cfg:option:`Simulation.directory`, the simulation class will attempt to change to that
directory during initialization. Hence, either resume the simulation from the same directory
where you originally started, or update the :cfg:option:`Simulation.directory`
(and :cfg:option`Simulation.output_filename`) parameter with `update_sim_params`.
|
Resume a simulation run from a given checkpoint.
|
[
"Resume",
"a",
"simulation",
"run",
"from",
"a",
"given",
"checkpoint",
"."
] |
def resume_from_checkpoint(*,
filename=None,
checkpoint_results=None,
update_sim_params=None,
simulation_class_kwargs=None):
"""Resume a simulation run from a given checkpoint.
(All parameters have to be given as keyword arguments.)
Parameters
----------
filename : None | str
The filename of the checkpoint to be loaded.
You can either specify the `filename` or the `checkpoint_results`.
checkpoint_results : None | dict
Alternatively to `filename` the results of the simulation so far, i.e. directly the data
dicitonary saved at a simulation checkpoint.
update_sim_params : None | dict
Allows to update specific :cfg:config:`Simulation` parameters; ignored if `None`.
Uses :func:`~tenpy.tools.misc.update_recursive` to update values, such that the keys of
`update_sim_params` can be recursive, e.g. `algorithm_params/max_sweeps`.
simlation_class_kwargs : None | dict
Further keyword arguemnts given to the simulation class, ignored if `None`.
Returns
-------
results :
The results from running the simulation, i.e.,
what :meth:`tenpy.simulations.Simulation.resume_run()` returned.
Notes
-----
The `checkpoint_filename` should be relative to the current working directory. If you use the
:cfg:option:`Simulation.directory`, the simulation class will attempt to change to that
directory during initialization. Hence, either resume the simulation from the same directory
where you originally started, or update the :cfg:option:`Simulation.directory`
(and :cfg:option`Simulation.output_filename`) parameter with `update_sim_params`.
"""
if filename is not None:
if checkpoint_results is not None:
raise ValueError("pass either filename or checkpoint_results")
checkpoint_results = hdf5_io.load(filename)
if checkpoint_results is None:
raise ValueError("you need to pass `filename` or `checkpoint_results`")
if checkpoint_results['finished_run']:
raise Skip("Simulation already finished", filename)
sim_class_mod = checkpoint_results['version_info']['simulation_module']
sim_class_name = checkpoint_results['version_info']['simulation_class']
SimClass = hdf5_io.find_global(sim_class_mod, sim_class_name)
if simulation_class_kwargs is None:
simulation_class_kwargs = {}
options = checkpoint_results['simulation_parameters']
if update_sim_params is not None:
update_recursive(options, update_sim_params)
with SimClass.from_saved_checkpoint(checkpoint_results=checkpoint_results,
**simulation_class_kwargs) as sim:
results = sim.resume_run()
if 'sequential' in options:
sequential = options['sequential']
sequential['index'] += 1
resume_data = sim.engine.get_resume_data(sequential_simulations=True)
if 'sequential' in options:
# note: it is important to exit the with ... as sim`` statement before continuing
# to free memory and cache
del sim # free memory
return run_seq_simulations(sequential,
SimClass,
simulation_class_kwargs,
resume_data=resume_data,
**options)
return results
|
[
"def",
"resume_from_checkpoint",
"(",
"*",
",",
"filename",
"=",
"None",
",",
"checkpoint_results",
"=",
"None",
",",
"update_sim_params",
"=",
"None",
",",
"simulation_class_kwargs",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"not",
"None",
":",
"if",
"checkpoint_results",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"pass either filename or checkpoint_results\"",
")",
"checkpoint_results",
"=",
"hdf5_io",
".",
"load",
"(",
"filename",
")",
"if",
"checkpoint_results",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"you need to pass `filename` or `checkpoint_results`\"",
")",
"if",
"checkpoint_results",
"[",
"'finished_run'",
"]",
":",
"raise",
"Skip",
"(",
"\"Simulation already finished\"",
",",
"filename",
")",
"sim_class_mod",
"=",
"checkpoint_results",
"[",
"'version_info'",
"]",
"[",
"'simulation_module'",
"]",
"sim_class_name",
"=",
"checkpoint_results",
"[",
"'version_info'",
"]",
"[",
"'simulation_class'",
"]",
"SimClass",
"=",
"hdf5_io",
".",
"find_global",
"(",
"sim_class_mod",
",",
"sim_class_name",
")",
"if",
"simulation_class_kwargs",
"is",
"None",
":",
"simulation_class_kwargs",
"=",
"{",
"}",
"options",
"=",
"checkpoint_results",
"[",
"'simulation_parameters'",
"]",
"if",
"update_sim_params",
"is",
"not",
"None",
":",
"update_recursive",
"(",
"options",
",",
"update_sim_params",
")",
"with",
"SimClass",
".",
"from_saved_checkpoint",
"(",
"checkpoint_results",
"=",
"checkpoint_results",
",",
"*",
"*",
"simulation_class_kwargs",
")",
"as",
"sim",
":",
"results",
"=",
"sim",
".",
"resume_run",
"(",
")",
"if",
"'sequential'",
"in",
"options",
":",
"sequential",
"=",
"options",
"[",
"'sequential'",
"]",
"sequential",
"[",
"'index'",
"]",
"+=",
"1",
"resume_data",
"=",
"sim",
".",
"engine",
".",
"get_resume_data",
"(",
"sequential_simulations",
"=",
"True",
")",
"if",
"'sequential'",
"in",
"options",
":",
"# note: it is important to exit the with ... as sim`` statement before continuing",
"# to free memory and cache",
"del",
"sim",
"# free memory",
"return",
"run_seq_simulations",
"(",
"sequential",
",",
"SimClass",
",",
"simulation_class_kwargs",
",",
"resume_data",
"=",
"resume_data",
",",
"*",
"*",
"options",
")",
"return",
"results"
] |
https://github.com/tenpy/tenpy/blob/bbdd3dbbdb511948eb0e6ba7ff619ac6ca657fff/tenpy/simulations/simulation.py#L913-L985
|
|
kuri65536/python-for-android
|
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
|
python3-alpha/python-libs/gdata/apps/multidomain/data.py
|
python
|
UserEntry.SetPassword
|
(self, value)
|
Set the password of this User object.
Args:
value: string The new password to give this object.
|
Set the password of this User object.
|
[
"Set",
"the",
"password",
"of",
"this",
"User",
"object",
"."
] |
def SetPassword(self, value):
"""Set the password of this User object.
Args:
value: string The new password to give this object.
"""
self._SetProperty(USER_PASSWORD, value)
|
[
"def",
"SetPassword",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_SetProperty",
"(",
"USER_PASSWORD",
",",
"value",
")"
] |
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python3-alpha/python-libs/gdata/apps/multidomain/data.py#L129-L135
|
||
robotframework/RIDE
|
6e8a50774ff33dead3a2757a11b0b4418ab205c0
|
src/robotide/lib/robot/libraries/OperatingSystem.py
|
python
|
OperatingSystem.create_binary_file
|
(self, path, content)
|
Creates a binary file with the given content.
If content is given as a Unicode string, it is first converted to bytes
character by character. All characters with ordinal below 256 can be
used and are converted to bytes with same values. Using characters
with higher ordinal is an error.
Byte strings, and possible other types, are written to the file as is.
If the directory for the file does not exist, it is created, along
with missing intermediate directories.
Examples:
| Create Binary File | ${dir}/example.png | ${image content} |
| Create Binary File | ${path} | \\x01\\x00\\xe4\\x00 |
Use `Create File` if you want to create a text file using a certain
encoding. `File Should Not Exist` can be used to avoid overwriting
existing files.
|
Creates a binary file with the given content.
|
[
"Creates",
"a",
"binary",
"file",
"with",
"the",
"given",
"content",
"."
] |
def create_binary_file(self, path, content):
"""Creates a binary file with the given content.
If content is given as a Unicode string, it is first converted to bytes
character by character. All characters with ordinal below 256 can be
used and are converted to bytes with same values. Using characters
with higher ordinal is an error.
Byte strings, and possible other types, are written to the file as is.
If the directory for the file does not exist, it is created, along
with missing intermediate directories.
Examples:
| Create Binary File | ${dir}/example.png | ${image content} |
| Create Binary File | ${path} | \\x01\\x00\\xe4\\x00 |
Use `Create File` if you want to create a text file using a certain
encoding. `File Should Not Exist` can be used to avoid overwriting
existing files.
"""
if is_unicode(content):
content = bytes(bytearray(ord(c) for c in content))
path = self._write_to_file(path, content, mode='wb')
self._link("Created binary file '%s'.", path)
|
[
"def",
"create_binary_file",
"(",
"self",
",",
"path",
",",
"content",
")",
":",
"if",
"is_unicode",
"(",
"content",
")",
":",
"content",
"=",
"bytes",
"(",
"bytearray",
"(",
"ord",
"(",
"c",
")",
"for",
"c",
"in",
"content",
")",
")",
"path",
"=",
"self",
".",
"_write_to_file",
"(",
"path",
",",
"content",
",",
"mode",
"=",
"'wb'",
")",
"self",
".",
"_link",
"(",
"\"Created binary file '%s'.\"",
",",
"path",
")"
] |
https://github.com/robotframework/RIDE/blob/6e8a50774ff33dead3a2757a11b0b4418ab205c0/src/robotide/lib/robot/libraries/OperatingSystem.py#L592-L616
|
||
log2timeline/plaso
|
fe2e316b8c76a0141760c0f2f181d84acb83abc2
|
plaso/containers/artifacts.py
|
python
|
PathArtifact._SplitPath
|
(self, path, path_segment_separator)
|
return path_segments
|
Splits a path.
Args:
path (str): a path.
path_segment_separator (str): path segment separator.
Returns:
list[str]: path segments.
|
Splits a path.
|
[
"Splits",
"a",
"path",
"."
] |
def _SplitPath(self, path, path_segment_separator):
"""Splits a path.
Args:
path (str): a path.
path_segment_separator (str): path segment separator.
Returns:
list[str]: path segments.
"""
path = path or ''
split_path = path.split(path_segment_separator)
path_segments = [split_path[0]]
path_segments.extend(list(filter(None, split_path[1:])))
return path_segments
|
[
"def",
"_SplitPath",
"(",
"self",
",",
"path",
",",
"path_segment_separator",
")",
":",
"path",
"=",
"path",
"or",
"''",
"split_path",
"=",
"path",
".",
"split",
"(",
"path_segment_separator",
")",
"path_segments",
"=",
"[",
"split_path",
"[",
"0",
"]",
"]",
"path_segments",
".",
"extend",
"(",
"list",
"(",
"filter",
"(",
"None",
",",
"split_path",
"[",
"1",
":",
"]",
")",
")",
")",
"return",
"path_segments"
] |
https://github.com/log2timeline/plaso/blob/fe2e316b8c76a0141760c0f2f181d84acb83abc2/plaso/containers/artifacts.py#L355-L371
|
|
NifTK/NiftyNet
|
935bf4334cd00fa9f9d50f6a95ddcbfdde4031e0
|
niftynet/layer/rgb_histogram_equilisation.py
|
python
|
RGBHistogramEquilisationLayer.layer_op
|
(self, image, mask=None)
|
:param image: a 3-channel tensor assumed to be an image in floating-point
RGB format (each channel in [0, 1])
:return: the equilised image
|
:param image: a 3-channel tensor assumed to be an image in floating-point
RGB format (each channel in [0, 1])
:return: the equilised image
|
[
":",
"param",
"image",
":",
"a",
"3",
"-",
"channel",
"tensor",
"assumed",
"to",
"be",
"an",
"image",
"in",
"floating",
"-",
"point",
"RGB",
"format",
"(",
"each",
"channel",
"in",
"[",
"0",
"1",
"]",
")",
":",
"return",
":",
"the",
"equilised",
"image"
] |
def layer_op(self, image, mask=None):
"""
:param image: a 3-channel tensor assumed to be an image in floating-point
RGB format (each channel in [0, 1])
:return: the equilised image
"""
if isinstance(image, dict):
image[self.image_name] = self._normalise_image(
image[self.image_name])
return image, mask
else:
return self._normalise_image(image), mask
|
[
"def",
"layer_op",
"(",
"self",
",",
"image",
",",
"mask",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"image",
",",
"dict",
")",
":",
"image",
"[",
"self",
".",
"image_name",
"]",
"=",
"self",
".",
"_normalise_image",
"(",
"image",
"[",
"self",
".",
"image_name",
"]",
")",
"return",
"image",
",",
"mask",
"else",
":",
"return",
"self",
".",
"_normalise_image",
"(",
"image",
")",
",",
"mask"
] |
https://github.com/NifTK/NiftyNet/blob/935bf4334cd00fa9f9d50f6a95ddcbfdde4031e0/niftynet/layer/rgb_histogram_equilisation.py#L51-L64
|
||
HymanLiuTS/flaskTs
|
286648286976e85d9b9a5873632331efcafe0b21
|
flasky/lib/python2.7/site-packages/sqlalchemy/orm/events.py
|
python
|
MapperEvents.instrument_class
|
(self, mapper, class_)
|
Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
|
Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
|
[
"Receive",
"a",
"class",
"when",
"the",
"mapper",
"is",
"first",
"constructed",
"before",
"instrumentation",
"is",
"applied",
"to",
"the",
"mapped",
"class",
"."
] |
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
|
[
"def",
"instrument_class",
"(",
"self",
",",
"mapper",
",",
"class_",
")",
":"
] |
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/sqlalchemy/orm/events.py#L634-L655
|
||
openstack/cinder
|
23494a6d6c51451688191e1847a458f1d3cdcaa5
|
cinder/volume/drivers/dell_emc/powermax/common.py
|
python
|
PowerMaxCommon._delete_group_snapshot
|
(self, group_snapshot, snapshots)
|
return model_update, snapshots_model_update
|
Helper function to delete a group snapshot.
:param group_snapshot: the group snapshot object
:param snapshots: the snapshot objects
:returns: model_update, snapshots_model_update
:raises: VolumeBackendApiException, NotImplementedError
|
Helper function to delete a group snapshot.
|
[
"Helper",
"function",
"to",
"delete",
"a",
"group",
"snapshot",
"."
] |
def _delete_group_snapshot(self, group_snapshot, snapshots):
"""Helper function to delete a group snapshot.
:param group_snapshot: the group snapshot object
:param snapshots: the snapshot objects
:returns: model_update, snapshots_model_update
:raises: VolumeBackendApiException, NotImplementedError
"""
snapshots_model_update = []
source_group = group_snapshot.get('group')
grp_id = group_snapshot.group_id
if not volume_utils.is_group_a_cg_snapshot_type(source_group):
raise NotImplementedError()
LOG.info("Delete snapshot grpSnapshotId: %(grpSnapshotId)s"
" for source group %(grpId)s",
{'grpSnapshotId': group_snapshot.id,
'grpId': grp_id})
snap_name = self.utils.truncate_string(group_snapshot.id, 19)
vol_grp_name = None
try:
# Get the array serial
array, extra_specs = self._get_volume_group_info(
source_group)
# Get the volume group dict for getting the group name
volume_group = (self._find_volume_group(array, source_group))
if volume_group and volume_group.get('name'):
vol_grp_name = volume_group['name']
if vol_grp_name is None:
LOG.warning("Cannot find generic volume group %(grp_ss_id)s. "
"on array %(array)s",
{'grp_ss_id': group_snapshot.id,
'array': array})
else:
self.provision.delete_group_replica(
array, snap_name, vol_grp_name)
model_update = {'status': fields.GroupSnapshotStatus.DELETED}
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot.id,
'status': fields.SnapshotStatus.DELETED})
except Exception as e:
LOG.error("Error deleting volume group snapshot."
"Error received: %(e)s", {'e': e})
model_update = {
'status': fields.GroupSnapshotStatus.ERROR_DELETING}
return model_update, snapshots_model_update
|
[
"def",
"_delete_group_snapshot",
"(",
"self",
",",
"group_snapshot",
",",
"snapshots",
")",
":",
"snapshots_model_update",
"=",
"[",
"]",
"source_group",
"=",
"group_snapshot",
".",
"get",
"(",
"'group'",
")",
"grp_id",
"=",
"group_snapshot",
".",
"group_id",
"if",
"not",
"volume_utils",
".",
"is_group_a_cg_snapshot_type",
"(",
"source_group",
")",
":",
"raise",
"NotImplementedError",
"(",
")",
"LOG",
".",
"info",
"(",
"\"Delete snapshot grpSnapshotId: %(grpSnapshotId)s\"",
"\" for source group %(grpId)s\"",
",",
"{",
"'grpSnapshotId'",
":",
"group_snapshot",
".",
"id",
",",
"'grpId'",
":",
"grp_id",
"}",
")",
"snap_name",
"=",
"self",
".",
"utils",
".",
"truncate_string",
"(",
"group_snapshot",
".",
"id",
",",
"19",
")",
"vol_grp_name",
"=",
"None",
"try",
":",
"# Get the array serial",
"array",
",",
"extra_specs",
"=",
"self",
".",
"_get_volume_group_info",
"(",
"source_group",
")",
"# Get the volume group dict for getting the group name",
"volume_group",
"=",
"(",
"self",
".",
"_find_volume_group",
"(",
"array",
",",
"source_group",
")",
")",
"if",
"volume_group",
"and",
"volume_group",
".",
"get",
"(",
"'name'",
")",
":",
"vol_grp_name",
"=",
"volume_group",
"[",
"'name'",
"]",
"if",
"vol_grp_name",
"is",
"None",
":",
"LOG",
".",
"warning",
"(",
"\"Cannot find generic volume group %(grp_ss_id)s. \"",
"\"on array %(array)s\"",
",",
"{",
"'grp_ss_id'",
":",
"group_snapshot",
".",
"id",
",",
"'array'",
":",
"array",
"}",
")",
"else",
":",
"self",
".",
"provision",
".",
"delete_group_replica",
"(",
"array",
",",
"snap_name",
",",
"vol_grp_name",
")",
"model_update",
"=",
"{",
"'status'",
":",
"fields",
".",
"GroupSnapshotStatus",
".",
"DELETED",
"}",
"for",
"snapshot",
"in",
"snapshots",
":",
"snapshots_model_update",
".",
"append",
"(",
"{",
"'id'",
":",
"snapshot",
".",
"id",
",",
"'status'",
":",
"fields",
".",
"SnapshotStatus",
".",
"DELETED",
"}",
")",
"except",
"Exception",
"as",
"e",
":",
"LOG",
".",
"error",
"(",
"\"Error deleting volume group snapshot.\"",
"\"Error received: %(e)s\"",
",",
"{",
"'e'",
":",
"e",
"}",
")",
"model_update",
"=",
"{",
"'status'",
":",
"fields",
".",
"GroupSnapshotStatus",
".",
"ERROR_DELETING",
"}",
"return",
"model_update",
",",
"snapshots_model_update"
] |
https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/volume/drivers/dell_emc/powermax/common.py#L6157-L6206
|
|
sadighian/crypto-rl
|
078081e5715cadeae9c798a3d759c9d59d2041bc
|
data_recorder/connector_components/orderbook.py
|
python
|
OrderBook.render_lob_feature_names
|
(include_orderflow: bool = INCLUDE_ORDERFLOW)
|
return feature_names
|
Get the column names for the LOB render features.
:param include_orderflow: if TRUE, order flow imbalance stats are included in set
:return: list containing features names
|
Get the column names for the LOB render features.
|
[
"Get",
"the",
"column",
"names",
"for",
"the",
"LOB",
"render",
"features",
"."
] |
def render_lob_feature_names(include_orderflow: bool = INCLUDE_ORDERFLOW) -> list:
"""
Get the column names for the LOB render features.
:param include_orderflow: if TRUE, order flow imbalance stats are included in set
:return: list containing features names
"""
feature_names = list()
feature_names.append('midpoint')
feature_names.append('spread')
feature_names.append('buys')
feature_names.append('sells')
feature_types = ['distance', 'notional']
if include_orderflow:
feature_types += ['cancel_notional', 'limit_notional', 'market_notional']
for side in ['bids', 'asks']:
for feature in feature_types:
for row in range(MAX_BOOK_ROWS):
feature_names.append(f"{side}_{feature}_{row}")
LOGGER.info(f"render_feature_names() has {len(feature_names)} features")
return feature_names
|
[
"def",
"render_lob_feature_names",
"(",
"include_orderflow",
":",
"bool",
"=",
"INCLUDE_ORDERFLOW",
")",
"->",
"list",
":",
"feature_names",
"=",
"list",
"(",
")",
"feature_names",
".",
"append",
"(",
"'midpoint'",
")",
"feature_names",
".",
"append",
"(",
"'spread'",
")",
"feature_names",
".",
"append",
"(",
"'buys'",
")",
"feature_names",
".",
"append",
"(",
"'sells'",
")",
"feature_types",
"=",
"[",
"'distance'",
",",
"'notional'",
"]",
"if",
"include_orderflow",
":",
"feature_types",
"+=",
"[",
"'cancel_notional'",
",",
"'limit_notional'",
",",
"'market_notional'",
"]",
"for",
"side",
"in",
"[",
"'bids'",
",",
"'asks'",
"]",
":",
"for",
"feature",
"in",
"feature_types",
":",
"for",
"row",
"in",
"range",
"(",
"MAX_BOOK_ROWS",
")",
":",
"feature_names",
".",
"append",
"(",
"f\"{side}_{feature}_{row}\"",
")",
"LOGGER",
".",
"info",
"(",
"f\"render_feature_names() has {len(feature_names)} features\"",
")",
"return",
"feature_names"
] |
https://github.com/sadighian/crypto-rl/blob/078081e5715cadeae9c798a3d759c9d59d2041bc/data_recorder/connector_components/orderbook.py#L98-L123
|
|
replit-archive/empythoned
|
977ec10ced29a3541a4973dc2b59910805695752
|
dist/lib/python2.7/ctypes/macholib/dyld.py
|
python
|
framework_find
|
(fn, executable_path=None, env=None)
|
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
|
Find a framework using dyld semantics in a very loose manner.
|
[
"Find",
"a",
"framework",
"using",
"dyld",
"semantics",
"in",
"a",
"very",
"loose",
"manner",
"."
] |
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError, e:
pass
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise e
|
[
"def",
"framework_find",
"(",
"fn",
",",
"executable_path",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"try",
":",
"return",
"dyld_find",
"(",
"fn",
",",
"executable_path",
"=",
"executable_path",
",",
"env",
"=",
"env",
")",
"except",
"ValueError",
",",
"e",
":",
"pass",
"fmwk_index",
"=",
"fn",
".",
"rfind",
"(",
"'.framework'",
")",
"if",
"fmwk_index",
"==",
"-",
"1",
":",
"fmwk_index",
"=",
"len",
"(",
"fn",
")",
"fn",
"+=",
"'.framework'",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"fn",
",",
"os",
".",
"path",
".",
"basename",
"(",
"fn",
"[",
":",
"fmwk_index",
"]",
")",
")",
"try",
":",
"return",
"dyld_find",
"(",
"fn",
",",
"executable_path",
"=",
"executable_path",
",",
"env",
"=",
"env",
")",
"except",
"ValueError",
":",
"raise",
"e"
] |
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/ctypes/macholib/dyld.py#L140-L161
|
||
saltstack/salt
|
fae5bc757ad0f1716483ce7ae180b451545c2058
|
salt/modules/cimc.py
|
python
|
set_user
|
(uid=None, username=None, password=None, priv=None, status=None)
|
return ret
|
Sets a CIMC user with specified configurations.
.. versionadded:: 2019.2.0
Args:
uid(int): The user ID slot to create the user account in.
username(str): The name of the user.
password(str): The clear text password of the user.
priv(str): The privilege level of the user.
status(str): The account status of the user.
CLI Example:
.. code-block:: bash
salt '*' cimc.set_user 11 username=admin password=foobar priv=admin active
|
Sets a CIMC user with specified configurations.
|
[
"Sets",
"a",
"CIMC",
"user",
"with",
"specified",
"configurations",
"."
] |
def set_user(uid=None, username=None, password=None, priv=None, status=None):
"""
Sets a CIMC user with specified configurations.
.. versionadded:: 2019.2.0
Args:
uid(int): The user ID slot to create the user account in.
username(str): The name of the user.
password(str): The clear text password of the user.
priv(str): The privilege level of the user.
status(str): The account status of the user.
CLI Example:
.. code-block:: bash
salt '*' cimc.set_user 11 username=admin password=foobar priv=admin active
"""
conf = ""
if not uid:
raise salt.exceptions.CommandExecutionError("The user ID must be specified.")
if status:
conf += ' accountStatus="{}"'.format(status)
if username:
conf += ' name="{}"'.format(username)
if priv:
conf += ' priv="{}"'.format(priv)
if password:
conf += ' pwd="{}"'.format(password)
dn = "sys/user-ext/user-{}".format(uid)
inconfig = """<aaaUser id="{0}"{1} dn="sys/user-ext/user-{0}"/>""".format(uid, conf)
ret = __proxy__["cimc.set_config_modify"](dn, inconfig, False)
return ret
|
[
"def",
"set_user",
"(",
"uid",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"priv",
"=",
"None",
",",
"status",
"=",
"None",
")",
":",
"conf",
"=",
"\"\"",
"if",
"not",
"uid",
":",
"raise",
"salt",
".",
"exceptions",
".",
"CommandExecutionError",
"(",
"\"The user ID must be specified.\"",
")",
"if",
"status",
":",
"conf",
"+=",
"' accountStatus=\"{}\"'",
".",
"format",
"(",
"status",
")",
"if",
"username",
":",
"conf",
"+=",
"' name=\"{}\"'",
".",
"format",
"(",
"username",
")",
"if",
"priv",
":",
"conf",
"+=",
"' priv=\"{}\"'",
".",
"format",
"(",
"priv",
")",
"if",
"password",
":",
"conf",
"+=",
"' pwd=\"{}\"'",
".",
"format",
"(",
"password",
")",
"dn",
"=",
"\"sys/user-ext/user-{}\"",
".",
"format",
"(",
"uid",
")",
"inconfig",
"=",
"\"\"\"<aaaUser id=\"{0}\"{1} dn=\"sys/user-ext/user-{0}\"/>\"\"\"",
".",
"format",
"(",
"uid",
",",
"conf",
")",
"ret",
"=",
"__proxy__",
"[",
"\"cimc.set_config_modify\"",
"]",
"(",
"dn",
",",
"inconfig",
",",
"False",
")",
"return",
"ret"
] |
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/cimc.py#L897-L944
|
|
larryhastings/gilectomy
|
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
|
Lib/stat.py
|
python
|
S_ISLNK
|
(mode)
|
return S_IFMT(mode) == S_IFLNK
|
Return True if mode is from a symbolic link.
|
Return True if mode is from a symbolic link.
|
[
"Return",
"True",
"if",
"mode",
"is",
"from",
"a",
"symbolic",
"link",
"."
] |
def S_ISLNK(mode):
"""Return True if mode is from a symbolic link."""
return S_IFMT(mode) == S_IFLNK
|
[
"def",
"S_ISLNK",
"(",
"mode",
")",
":",
"return",
"S_IFMT",
"(",
"mode",
")",
"==",
"S_IFLNK"
] |
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/stat.py#L66-L68
|
|
awslabs/aws-ec2rescue-linux
|
8ecf40e7ea0d2563dac057235803fca2221029d2
|
ec2rlcore/prediag.py
|
python
|
print_indent
|
(str_arg, level=0)
|
Print str_arg indented two spaces per level.
|
Print str_arg indented two spaces per level.
|
[
"Print",
"str_arg",
"indented",
"two",
"spaces",
"per",
"level",
"."
] |
def print_indent(str_arg, level=0):
"""Print str_arg indented two spaces per level."""
print("{}{}".format(level * " ", str_arg))
|
[
"def",
"print_indent",
"(",
"str_arg",
",",
"level",
"=",
"0",
")",
":",
"print",
"(",
"\"{}{}\"",
".",
"format",
"(",
"level",
"*",
"\" \"",
",",
"str_arg",
")",
")"
] |
https://github.com/awslabs/aws-ec2rescue-linux/blob/8ecf40e7ea0d2563dac057235803fca2221029d2/ec2rlcore/prediag.py#L340-L342
|
||
stopstalk/stopstalk-deployment
|
10c3ab44c4ece33ae515f6888c15033db2004bb1
|
aws_lambda/spoj_aws_lambda_function/lambda_code/pkg_resources/_vendor/pyparsing.py
|
python
|
ParserElement.split
|
(self, instring, maxsplit=_MAX_INT, includeSeparators=False)
|
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
|
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
|
[
"Generator",
"method",
"to",
"split",
"a",
"string",
"using",
"the",
"given",
"expression",
"as",
"a",
"separator",
".",
"May",
"be",
"called",
"with",
"optional",
"C",
"{",
"maxsplit",
"}",
"argument",
"to",
"limit",
"the",
"number",
"of",
"splits",
";",
"and",
"the",
"optional",
"C",
"{",
"includeSeparators",
"}",
"argument",
"(",
"default",
"=",
"C",
"{",
"False",
"}",
")",
"if",
"the",
"separating",
"matching",
"text",
"should",
"be",
"included",
"in",
"the",
"split",
"results",
".",
"Example",
"::",
"punc",
"=",
"oneOf",
"(",
"list",
"(",
".",
";",
":",
"/",
"-",
"!?",
"))",
"print",
"(",
"list",
"(",
"punc",
".",
"split",
"(",
"This",
"this?",
"this",
"sentence",
"is",
"badly",
"punctuated!",
")))",
"prints",
"::",
"[",
"This",
"this",
"this",
"sentence",
"is",
"badly",
"punctuated",
"]"
] |
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
|
[
"def",
"split",
"(",
"self",
",",
"instring",
",",
"maxsplit",
"=",
"_MAX_INT",
",",
"includeSeparators",
"=",
"False",
")",
":",
"splits",
"=",
"0",
"last",
"=",
"0",
"for",
"t",
",",
"s",
",",
"e",
"in",
"self",
".",
"scanString",
"(",
"instring",
",",
"maxMatches",
"=",
"maxsplit",
")",
":",
"yield",
"instring",
"[",
"last",
":",
"s",
"]",
"if",
"includeSeparators",
":",
"yield",
"t",
"[",
"0",
"]",
"last",
"=",
"e",
"yield",
"instring",
"[",
"last",
":",
"]"
] |
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/pkg_resources/_vendor/pyparsing.py#L1799-L1819
|
||
IronLanguages/ironpython2
|
51fdedeeda15727717fb8268a805f71b06c0b9f1
|
Src/StdLib/Lib/site-packages/win32/lib/win32timezone.py
|
python
|
utcnow
|
()
|
return now
|
Return the UTC time now with timezone awareness as enabled
by this module
>>> now = utcnow()
|
Return the UTC time now with timezone awareness as enabled
by this module
>>> now = utcnow()
|
[
"Return",
"the",
"UTC",
"time",
"now",
"with",
"timezone",
"awareness",
"as",
"enabled",
"by",
"this",
"module",
">>>",
"now",
"=",
"utcnow",
"()"
] |
def utcnow():
"""
Return the UTC time now with timezone awareness as enabled
by this module
>>> now = utcnow()
"""
now = datetime.datetime.utcnow()
now = now.replace(tzinfo=TimeZoneInfo.utc())
return now
|
[
"def",
"utcnow",
"(",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"now",
"=",
"now",
".",
"replace",
"(",
"tzinfo",
"=",
"TimeZoneInfo",
".",
"utc",
"(",
")",
")",
"return",
"now"
] |
https://github.com/IronLanguages/ironpython2/blob/51fdedeeda15727717fb8268a805f71b06c0b9f1/Src/StdLib/Lib/site-packages/win32/lib/win32timezone.py#L666-L674
|
|
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/eiam/v20210420/eiam_client.py
|
python
|
EiamClient.ListApplications
|
(self, request)
|
获取应用列表信息。
:param request: Request instance for ListApplications.
:type request: :class:`tencentcloud.eiam.v20210420.models.ListApplicationsRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.ListApplicationsResponse`
|
获取应用列表信息。
|
[
"获取应用列表信息。"
] |
def ListApplications(self, request):
"""获取应用列表信息。
:param request: Request instance for ListApplications.
:type request: :class:`tencentcloud.eiam.v20210420.models.ListApplicationsRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.ListApplicationsResponse`
"""
try:
params = request._serialize()
body = self.call("ListApplications", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ListApplicationsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
[
"def",
"ListApplications",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"params",
"=",
"request",
".",
"_serialize",
"(",
")",
"body",
"=",
"self",
".",
"call",
"(",
"\"ListApplications\"",
",",
"params",
")",
"response",
"=",
"json",
".",
"loads",
"(",
"body",
")",
"if",
"\"Error\"",
"not",
"in",
"response",
"[",
"\"Response\"",
"]",
":",
"model",
"=",
"models",
".",
"ListApplicationsResponse",
"(",
")",
"model",
".",
"_deserialize",
"(",
"response",
"[",
"\"Response\"",
"]",
")",
"return",
"model",
"else",
":",
"code",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Code\"",
"]",
"message",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Message\"",
"]",
"reqid",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"RequestId\"",
"]",
"raise",
"TencentCloudSDKException",
"(",
"code",
",",
"message",
",",
"reqid",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"TencentCloudSDKException",
")",
":",
"raise",
"else",
":",
"raise",
"TencentCloudSDKException",
"(",
"e",
".",
"message",
",",
"e",
".",
"message",
")"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/eiam/v20210420/eiam_client.py#L757-L782
|
||
mozillazg/pypy
|
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
|
pypy/objspace/std/bytearrayobject.py
|
python
|
BytearrayDocstrings.endswith
|
()
|
B.endswith(suffix[, start[, end]]) -> bool
Return True if B ends with the specified suffix, False otherwise.
With optional start, test B beginning at that position.
With optional end, stop comparing B at that position.
suffix can also be a tuple of strings to try.
|
B.endswith(suffix[, start[, end]]) -> bool
|
[
"B",
".",
"endswith",
"(",
"suffix",
"[",
"start",
"[",
"end",
"]]",
")",
"-",
">",
"bool"
] |
def endswith():
"""B.endswith(suffix[, start[, end]]) -> bool
Return True if B ends with the specified suffix, False otherwise.
With optional start, test B beginning at that position.
With optional end, stop comparing B at that position.
suffix can also be a tuple of strings to try.
"""
|
[
"def",
"endswith",
"(",
")",
":"
] |
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/pypy/objspace/std/bytearrayobject.py#L772-L779
|
||
fortharris/Pcode
|
147962d160a834c219e12cb456abc130826468e4
|
Extensions/Settings/GeneralSettings.py
|
python
|
GeneralSettings.setMatchBraces
|
(self, state)
|
[] |
def setMatchBraces(self, state):
self.useData.SETTINGS["MatchBraces"] = str(state)
for i in range(self.projectWindowStack.count() - 1):
editorTabWidget = self.projectWindowStack.widget(i).editorTabWidget
for i in range(editorTabWidget.count()):
editor = editorTabWidget.getEditor(i)
editor2 = editorTabWidget.getCloneEditor(i)
if state:
editor.setBraceMatching(QsciScintilla.SloppyBraceMatch)
editor2.setBraceMatching(
QsciScintilla.SloppyBraceMatch)
else:
editor.setBraceMatching(QsciScintilla.NoBraceMatch)
editor2.setBraceMatching(QsciScintilla.NoBraceMatch)
|
[
"def",
"setMatchBraces",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"useData",
".",
"SETTINGS",
"[",
"\"MatchBraces\"",
"]",
"=",
"str",
"(",
"state",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"projectWindowStack",
".",
"count",
"(",
")",
"-",
"1",
")",
":",
"editorTabWidget",
"=",
"self",
".",
"projectWindowStack",
".",
"widget",
"(",
"i",
")",
".",
"editorTabWidget",
"for",
"i",
"in",
"range",
"(",
"editorTabWidget",
".",
"count",
"(",
")",
")",
":",
"editor",
"=",
"editorTabWidget",
".",
"getEditor",
"(",
"i",
")",
"editor2",
"=",
"editorTabWidget",
".",
"getCloneEditor",
"(",
"i",
")",
"if",
"state",
":",
"editor",
".",
"setBraceMatching",
"(",
"QsciScintilla",
".",
"SloppyBraceMatch",
")",
"editor2",
".",
"setBraceMatching",
"(",
"QsciScintilla",
".",
"SloppyBraceMatch",
")",
"else",
":",
"editor",
".",
"setBraceMatching",
"(",
"QsciScintilla",
".",
"NoBraceMatch",
")",
"editor2",
".",
"setBraceMatching",
"(",
"QsciScintilla",
".",
"NoBraceMatch",
")"
] |
https://github.com/fortharris/Pcode/blob/147962d160a834c219e12cb456abc130826468e4/Extensions/Settings/GeneralSettings.py#L434-L447
|
||||
Chaffelson/nipyapi
|
d3b186fd701ce308c2812746d98af9120955e810
|
nipyapi/registry/models/extension_repo_group.py
|
python
|
ExtensionRepoGroup.bucket_name
|
(self, bucket_name)
|
Sets the bucket_name of this ExtensionRepoGroup.
The bucket name
:param bucket_name: The bucket_name of this ExtensionRepoGroup.
:type: str
|
Sets the bucket_name of this ExtensionRepoGroup.
The bucket name
|
[
"Sets",
"the",
"bucket_name",
"of",
"this",
"ExtensionRepoGroup",
".",
"The",
"bucket",
"name"
] |
def bucket_name(self, bucket_name):
"""
Sets the bucket_name of this ExtensionRepoGroup.
The bucket name
:param bucket_name: The bucket_name of this ExtensionRepoGroup.
:type: str
"""
self._bucket_name = bucket_name
|
[
"def",
"bucket_name",
"(",
"self",
",",
"bucket_name",
")",
":",
"self",
".",
"_bucket_name",
"=",
"bucket_name"
] |
https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/registry/models/extension_repo_group.py#L96-L105
|
||
awslabs/gluon-ts
|
066ec3b7f47aa4ee4c061a28f35db7edbad05a98
|
src/gluonts/nursery/SCott/pts/model/tempflow/tempflow_network.py
|
python
|
TempFlowTrainingNetwork.distr_args
|
(self, rnn_outputs: torch.Tensor)
|
return distr_args
|
Returns the distribution of DeepVAR with respect to the RNN outputs.
Parameters
----------
rnn_outputs
Outputs of the unrolled RNN (batch_size, seq_len, num_cells)
scale
Mean scale for each time series (batch_size, 1, target_dim)
Returns
-------
distr
Distribution instance
distr_args
Distribution arguments
|
Returns the distribution of DeepVAR with respect to the RNN outputs.
|
[
"Returns",
"the",
"distribution",
"of",
"DeepVAR",
"with",
"respect",
"to",
"the",
"RNN",
"outputs",
"."
] |
def distr_args(self, rnn_outputs: torch.Tensor):
"""
Returns the distribution of DeepVAR with respect to the RNN outputs.
Parameters
----------
rnn_outputs
Outputs of the unrolled RNN (batch_size, seq_len, num_cells)
scale
Mean scale for each time series (batch_size, 1, target_dim)
Returns
-------
distr
Distribution instance
distr_args
Distribution arguments
"""
(distr_args,) = self.proj_dist_args(rnn_outputs)
# # compute likelihood of target given the predicted parameters
# distr = self.distr_output.distribution(distr_args, scale=scale)
# return distr, distr_args
return distr_args
|
[
"def",
"distr_args",
"(",
"self",
",",
"rnn_outputs",
":",
"torch",
".",
"Tensor",
")",
":",
"(",
"distr_args",
",",
")",
"=",
"self",
".",
"proj_dist_args",
"(",
"rnn_outputs",
")",
"# # compute likelihood of target given the predicted parameters",
"# distr = self.distr_output.distribution(distr_args, scale=scale)",
"# return distr, distr_args",
"return",
"distr_args"
] |
https://github.com/awslabs/gluon-ts/blob/066ec3b7f47aa4ee4c061a28f35db7edbad05a98/src/gluonts/nursery/SCott/pts/model/tempflow/tempflow_network.py#L296-L320
|
|
nadineproject/nadine
|
c41c8ef7ffe18f1853029c97eecc329039b4af6c
|
nadine/models/organization.py
|
python
|
OrganizationMember.__str__
|
(self)
|
return "%s member of %s" % (self.user, self.organization)
|
[] |
def __str__(self):
return "%s member of %s" % (self.user, self.organization)
|
[
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"\"%s member of %s\"",
"%",
"(",
"self",
".",
"user",
",",
"self",
".",
"organization",
")"
] |
https://github.com/nadineproject/nadine/blob/c41c8ef7ffe18f1853029c97eecc329039b4af6c/nadine/models/organization.py#L191-L192
|
|||
dimagi/commcare-hq
|
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
|
custom/up_nrhm/sql_data.py
|
python
|
ASHAAFChecklistData.table_name
|
(self)
|
return get_table_name(self.config['domain'], TABLE_ID)
|
[] |
def table_name(self):
return get_table_name(self.config['domain'], TABLE_ID)
|
[
"def",
"table_name",
"(",
"self",
")",
":",
"return",
"get_table_name",
"(",
"self",
".",
"config",
"[",
"'domain'",
"]",
",",
"TABLE_ID",
")"
] |
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/custom/up_nrhm/sql_data.py#L260-L261
|
|||
derrod/legendary
|
3bc819e56742cec3b60ec209e22004ae7ee783b3
|
legendary/utils/egl_crypt.py
|
python
|
xor_bytes
|
(a, b)
|
return bytes(i ^ j for i, j in zip(a, b))
|
Returns a new byte array with the elements xor'ed.
|
Returns a new byte array with the elements xor'ed.
|
[
"Returns",
"a",
"new",
"byte",
"array",
"with",
"the",
"elements",
"xor",
"ed",
"."
] |
def xor_bytes(a, b):
""" Returns a new byte array with the elements xor'ed. """
return bytes(i ^ j for i, j in zip(a, b))
|
[
"def",
"xor_bytes",
"(",
"a",
",",
"b",
")",
":",
"return",
"bytes",
"(",
"i",
"^",
"j",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"a",
",",
"b",
")",
")"
] |
https://github.com/derrod/legendary/blob/3bc819e56742cec3b60ec209e22004ae7ee783b3/legendary/utils/egl_crypt.py#L130-L132
|
|
scrapinghub/splash
|
802d8391984bae049ef95a3fe1a74feaee95a233
|
splash/lua_runtime.py
|
python
|
SplashLuaRuntime.remove_allowed_object
|
(self, obj)
|
Remove an object from a list of objects the runtime can access
|
Remove an object from a list of objects the runtime can access
|
[
"Remove",
"an",
"object",
"from",
"a",
"list",
"of",
"objects",
"the",
"runtime",
"can",
"access"
] |
def remove_allowed_object(self, obj):
""" Remove an object from a list of objects the runtime can access """
if obj in self._allowed_object_attrs:
del self._allowed_object_attrs[obj]
|
[
"def",
"remove_allowed_object",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
"in",
"self",
".",
"_allowed_object_attrs",
":",
"del",
"self",
".",
"_allowed_object_attrs",
"[",
"obj",
"]"
] |
https://github.com/scrapinghub/splash/blob/802d8391984bae049ef95a3fe1a74feaee95a233/splash/lua_runtime.py#L46-L49
|
||
fonttools/fonttools
|
892322aaff6a89bea5927379ec06bc0da3dfb7df
|
Lib/fontTools/subset/__init__.py
|
python
|
retain_empty_scripts
|
(self)
|
return self.__class__ == ttLib.getTableClass('GSUB')
|
[] |
def retain_empty_scripts(self):
# https://github.com/fonttools/fonttools/issues/518
# https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15
return self.__class__ == ttLib.getTableClass('GSUB')
|
[
"def",
"retain_empty_scripts",
"(",
"self",
")",
":",
"# https://github.com/fonttools/fonttools/issues/518",
"# https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15",
"return",
"self",
".",
"__class__",
"==",
"ttLib",
".",
"getTableClass",
"(",
"'GSUB'",
")"
] |
https://github.com/fonttools/fonttools/blob/892322aaff6a89bea5927379ec06bc0da3dfb7df/Lib/fontTools/subset/__init__.py#L1571-L1574
|
|||
simonacca/zatt
|
ec4748599dd8365d4d808dc9131cc71dd28f9cbf
|
zatt/server/states.py
|
python
|
Leader.on_client_append
|
(self, protocol, msg)
|
Append new entries to Leader log.
|
Append new entries to Leader log.
|
[
"Append",
"new",
"entries",
"to",
"Leader",
"log",
"."
] |
def on_client_append(self, protocol, msg):
"""Append new entries to Leader log."""
entry = {'term': self.persist['currentTerm'], 'data': msg['data']}
if msg['data']['key'] == 'cluster':
protocol.send({'type': 'result', 'success': False})
self.log.append_entries([entry], self.log.index)
if self.log.index in self.waiting_clients:
self.waiting_clients[self.log.index].append(protocol)
else:
self.waiting_clients[self.log.index] = [protocol]
self.on_peer_response_append(
self.volatile['address'], {'success': True,
'matchIndex': self.log.commitIndex})
|
[
"def",
"on_client_append",
"(",
"self",
",",
"protocol",
",",
"msg",
")",
":",
"entry",
"=",
"{",
"'term'",
":",
"self",
".",
"persist",
"[",
"'currentTerm'",
"]",
",",
"'data'",
":",
"msg",
"[",
"'data'",
"]",
"}",
"if",
"msg",
"[",
"'data'",
"]",
"[",
"'key'",
"]",
"==",
"'cluster'",
":",
"protocol",
".",
"send",
"(",
"{",
"'type'",
":",
"'result'",
",",
"'success'",
":",
"False",
"}",
")",
"self",
".",
"log",
".",
"append_entries",
"(",
"[",
"entry",
"]",
",",
"self",
".",
"log",
".",
"index",
")",
"if",
"self",
".",
"log",
".",
"index",
"in",
"self",
".",
"waiting_clients",
":",
"self",
".",
"waiting_clients",
"[",
"self",
".",
"log",
".",
"index",
"]",
".",
"append",
"(",
"protocol",
")",
"else",
":",
"self",
".",
"waiting_clients",
"[",
"self",
".",
"log",
".",
"index",
"]",
"=",
"[",
"protocol",
"]",
"self",
".",
"on_peer_response_append",
"(",
"self",
".",
"volatile",
"[",
"'address'",
"]",
",",
"{",
"'success'",
":",
"True",
",",
"'matchIndex'",
":",
"self",
".",
"log",
".",
"commitIndex",
"}",
")"
] |
https://github.com/simonacca/zatt/blob/ec4748599dd8365d4d808dc9131cc71dd28f9cbf/zatt/server/states.py#L311-L323
|
||
mchristopher/PokemonGo-DesktopMap
|
ec37575f2776ee7d64456e2a1f6b6b78830b4fe0
|
app/pywin/Lib/pipes.py
|
python
|
quote
|
(file)
|
return "'" + file.replace("'", "'\"'\"'") + "'"
|
Return a shell-escaped version of the file string.
|
Return a shell-escaped version of the file string.
|
[
"Return",
"a",
"shell",
"-",
"escaped",
"version",
"of",
"the",
"file",
"string",
"."
] |
def quote(file):
"""Return a shell-escaped version of the file string."""
for c in file:
if c not in _safechars:
break
else:
if not file:
return "''"
return file
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + file.replace("'", "'\"'\"'") + "'"
|
[
"def",
"quote",
"(",
"file",
")",
":",
"for",
"c",
"in",
"file",
":",
"if",
"c",
"not",
"in",
"_safechars",
":",
"break",
"else",
":",
"if",
"not",
"file",
":",
"return",
"\"''\"",
"return",
"file",
"# use single quotes, and put single quotes into double quotes",
"# the string $'b is then quoted as '$'\"'\"'b'",
"return",
"\"'\"",
"+",
"file",
".",
"replace",
"(",
"\"'\"",
",",
"\"'\\\"'\\\"'\"",
")",
"+",
"\"'\""
] |
https://github.com/mchristopher/PokemonGo-DesktopMap/blob/ec37575f2776ee7d64456e2a1f6b6b78830b4fe0/app/pywin/Lib/pipes.py#L267-L278
|
|
Alex-Fabbri/Multi-News
|
f6476d1f114662eb93db32e9b704b7c4fe047217
|
code/Hi_MAP/onmt/models/sru.py
|
python
|
SRUCell.forward
|
(self, input, c0=None)
|
return h, c
|
[] |
def forward(self, input, c0=None):
assert input.dim() == 2 or input.dim() == 3
n_in, n_out = self.n_in, self.n_out
batch = input.size(-2)
if c0 is None:
c0 = input.data.new(
batch, n_out if not self.bidirectional else n_out * 2
).zero_()
if self.training and (self.rnn_dropout > 0):
mask = self.get_dropout_mask_((batch, n_in), self.rnn_dropout)
x = input * mask.expand_as(input)
else:
x = input
x_2d = x if x.dim() == 2 else x.contiguous().view(-1, n_in)
u = x_2d.mm(self.weight)
if self.training and (self.dropout > 0):
bidir = 2 if self.bidirectional else 1
mask_h = self.get_dropout_mask_(
(batch, n_out * bidir), self.dropout)
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0, mask_h
)
else:
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0
)
return h, c
|
[
"def",
"forward",
"(",
"self",
",",
"input",
",",
"c0",
"=",
"None",
")",
":",
"assert",
"input",
".",
"dim",
"(",
")",
"==",
"2",
"or",
"input",
".",
"dim",
"(",
")",
"==",
"3",
"n_in",
",",
"n_out",
"=",
"self",
".",
"n_in",
",",
"self",
".",
"n_out",
"batch",
"=",
"input",
".",
"size",
"(",
"-",
"2",
")",
"if",
"c0",
"is",
"None",
":",
"c0",
"=",
"input",
".",
"data",
".",
"new",
"(",
"batch",
",",
"n_out",
"if",
"not",
"self",
".",
"bidirectional",
"else",
"n_out",
"*",
"2",
")",
".",
"zero_",
"(",
")",
"if",
"self",
".",
"training",
"and",
"(",
"self",
".",
"rnn_dropout",
">",
"0",
")",
":",
"mask",
"=",
"self",
".",
"get_dropout_mask_",
"(",
"(",
"batch",
",",
"n_in",
")",
",",
"self",
".",
"rnn_dropout",
")",
"x",
"=",
"input",
"*",
"mask",
".",
"expand_as",
"(",
"input",
")",
"else",
":",
"x",
"=",
"input",
"x_2d",
"=",
"x",
"if",
"x",
".",
"dim",
"(",
")",
"==",
"2",
"else",
"x",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"-",
"1",
",",
"n_in",
")",
"u",
"=",
"x_2d",
".",
"mm",
"(",
"self",
".",
"weight",
")",
"if",
"self",
".",
"training",
"and",
"(",
"self",
".",
"dropout",
">",
"0",
")",
":",
"bidir",
"=",
"2",
"if",
"self",
".",
"bidirectional",
"else",
"1",
"mask_h",
"=",
"self",
".",
"get_dropout_mask_",
"(",
"(",
"batch",
",",
"n_out",
"*",
"bidir",
")",
",",
"self",
".",
"dropout",
")",
"h",
",",
"c",
"=",
"SRU_Compute",
"(",
"self",
".",
"activation_type",
",",
"n_out",
",",
"self",
".",
"bidirectional",
")",
"(",
"u",
",",
"input",
",",
"self",
".",
"bias",
",",
"c0",
",",
"mask_h",
")",
"else",
":",
"h",
",",
"c",
"=",
"SRU_Compute",
"(",
"self",
".",
"activation_type",
",",
"n_out",
",",
"self",
".",
"bidirectional",
")",
"(",
"u",
",",
"input",
",",
"self",
".",
"bias",
",",
"c0",
")",
"return",
"h",
",",
"c"
] |
https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/models/sru.py#L528-L560
|
|||
stb-tester/stb-tester
|
5b652bd5018360f2352f9bedc5f80ff92e66b2d1
|
_stbt/keyboard.py
|
python
|
_join_with_commas
|
(items, last_one=", ")
|
>>> _join_with_commas(["A", "B", "C"], last_one=" or ")
'A, B or C'
>>> _join_with_commas(["A", "C"], last_one=" or ")
'A or C'
>>> _join_with_commas(["A"], last_one=" or ")
'A'
>>> _join_with_commas([], last_one=" or ")
''
|
>>> _join_with_commas(["A", "B", "C"], last_one=" or ")
'A, B or C'
>>> _join_with_commas(["A", "C"], last_one=" or ")
'A or C'
>>> _join_with_commas(["A"], last_one=" or ")
'A'
>>> _join_with_commas([], last_one=" or ")
''
|
[
">>>",
"_join_with_commas",
"(",
"[",
"A",
"B",
"C",
"]",
"last_one",
"=",
"or",
")",
"A",
"B",
"or",
"C",
">>>",
"_join_with_commas",
"(",
"[",
"A",
"C",
"]",
"last_one",
"=",
"or",
")",
"A",
"or",
"C",
">>>",
"_join_with_commas",
"(",
"[",
"A",
"]",
"last_one",
"=",
"or",
")",
"A",
">>>",
"_join_with_commas",
"(",
"[]",
"last_one",
"=",
"or",
")"
] |
def _join_with_commas(items, last_one=", "):
"""
>>> _join_with_commas(["A", "B", "C"], last_one=" or ")
'A, B or C'
>>> _join_with_commas(["A", "C"], last_one=" or ")
'A or C'
>>> _join_with_commas(["A"], last_one=" or ")
'A'
>>> _join_with_commas([], last_one=" or ")
''
"""
if len(items) > 1:
return last_one.join([
", ".join(items[:-1]),
items[-1]])
elif len(items) == 1:
return items[0]
else:
return ""
|
[
"def",
"_join_with_commas",
"(",
"items",
",",
"last_one",
"=",
"\", \"",
")",
":",
"if",
"len",
"(",
"items",
")",
">",
"1",
":",
"return",
"last_one",
".",
"join",
"(",
"[",
"\", \"",
".",
"join",
"(",
"items",
"[",
":",
"-",
"1",
"]",
")",
",",
"items",
"[",
"-",
"1",
"]",
"]",
")",
"elif",
"len",
"(",
"items",
")",
"==",
"1",
":",
"return",
"items",
"[",
"0",
"]",
"else",
":",
"return",
"\"\""
] |
https://github.com/stb-tester/stb-tester/blob/5b652bd5018360f2352f9bedc5f80ff92e66b2d1/_stbt/keyboard.py#L807-L825
|
||
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
|
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
|
deep-learning/NLP/Seq2Seq-PyTorch/model.py
|
python
|
Seq2SeqFastAttention.get_state
|
(self, input)
|
return h0_encoder.cuda(), c0_encoder.cuda()
|
Get cell states and hidden states.
|
Get cell states and hidden states.
|
[
"Get",
"cell",
"states",
"and",
"hidden",
"states",
"."
] |
def get_state(self, input):
"""Get cell states and hidden states."""
batch_size = input.size(0) \
if self.encoder.batch_first else input.size(1)
h0_encoder = Variable(torch.zeros(
self.encoder.num_layers * self.num_directions,
batch_size,
self.src_hidden_dim
), requires_grad=False)
c0_encoder = Variable(torch.zeros(
self.encoder.num_layers * self.num_directions,
batch_size,
self.src_hidden_dim
), requires_grad=False)
return h0_encoder.cuda(), c0_encoder.cuda()
|
[
"def",
"get_state",
"(",
"self",
",",
"input",
")",
":",
"batch_size",
"=",
"input",
".",
"size",
"(",
"0",
")",
"if",
"self",
".",
"encoder",
".",
"batch_first",
"else",
"input",
".",
"size",
"(",
"1",
")",
"h0_encoder",
"=",
"Variable",
"(",
"torch",
".",
"zeros",
"(",
"self",
".",
"encoder",
".",
"num_layers",
"*",
"self",
".",
"num_directions",
",",
"batch_size",
",",
"self",
".",
"src_hidden_dim",
")",
",",
"requires_grad",
"=",
"False",
")",
"c0_encoder",
"=",
"Variable",
"(",
"torch",
".",
"zeros",
"(",
"self",
".",
"encoder",
".",
"num_layers",
"*",
"self",
".",
"num_directions",
",",
"batch_size",
",",
"self",
".",
"src_hidden_dim",
")",
",",
"requires_grad",
"=",
"False",
")",
"return",
"h0_encoder",
".",
"cuda",
"(",
")",
",",
"c0_encoder",
".",
"cuda",
"(",
")"
] |
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/deep-learning/NLP/Seq2Seq-PyTorch/model.py#L1093-L1108
|
|
Azure/azure-devops-cli-extension
|
11334cd55806bef0b99c3bee5a438eed71e44037
|
azure-devops/azext_devops/devops_sdk/v5_1/git/git_client_base.py
|
python
|
GitClientBase.get_pull_request_reviewer
|
(self, repository_id, pull_request_id, reviewer_id, project=None)
|
return self._deserialize('IdentityRefWithVote', response)
|
GetPullRequestReviewer.
Retrieve information about a particular reviewer on a pull request
:param str repository_id: The repository ID of the pull request’s target branch.
:param int pull_request_id: ID of the pull request.
:param str reviewer_id: ID of the reviewer.
:param str project: Project ID or project name
:rtype: :class:`<IdentityRefWithVote> <azure.devops.v5_1.git.models.IdentityRefWithVote>`
|
GetPullRequestReviewer.
Retrieve information about a particular reviewer on a pull request
:param str repository_id: The repository ID of the pull request’s target branch.
:param int pull_request_id: ID of the pull request.
:param str reviewer_id: ID of the reviewer.
:param str project: Project ID or project name
:rtype: :class:`<IdentityRefWithVote> <azure.devops.v5_1.git.models.IdentityRefWithVote>`
|
[
"GetPullRequestReviewer",
".",
"Retrieve",
"information",
"about",
"a",
"particular",
"reviewer",
"on",
"a",
"pull",
"request",
":",
"param",
"str",
"repository_id",
":",
"The",
"repository",
"ID",
"of",
"the",
"pull",
"request’s",
"target",
"branch",
".",
":",
"param",
"int",
"pull_request_id",
":",
"ID",
"of",
"the",
"pull",
"request",
".",
":",
"param",
"str",
"reviewer_id",
":",
"ID",
"of",
"the",
"reviewer",
".",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"rtype",
":",
":",
"class",
":",
"<IdentityRefWithVote",
">",
"<azure",
".",
"devops",
".",
"v5_1",
".",
"git",
".",
"models",
".",
"IdentityRefWithVote",
">"
] |
def get_pull_request_reviewer(self, repository_id, pull_request_id, reviewer_id, project=None):
"""GetPullRequestReviewer.
Retrieve information about a particular reviewer on a pull request
:param str repository_id: The repository ID of the pull request’s target branch.
:param int pull_request_id: ID of the pull request.
:param str reviewer_id: ID of the reviewer.
:param str project: Project ID or project name
:rtype: :class:`<IdentityRefWithVote> <azure.devops.v5_1.git.models.IdentityRefWithVote>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int')
if reviewer_id is not None:
route_values['reviewerId'] = self._serialize.url('reviewer_id', reviewer_id, 'str')
response = self._send(http_method='GET',
location_id='4b6702c7-aa35-4b89-9c96-b9abf6d3e540',
version='5.1',
route_values=route_values)
return self._deserialize('IdentityRefWithVote', response)
|
[
"def",
"get_pull_request_reviewer",
"(",
"self",
",",
"repository_id",
",",
"pull_request_id",
",",
"reviewer_id",
",",
"project",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"repository_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'repositoryId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'repository_id'",
",",
"repository_id",
",",
"'str'",
")",
"if",
"pull_request_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'pullRequestId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'pull_request_id'",
",",
"pull_request_id",
",",
"'int'",
")",
"if",
"reviewer_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'reviewerId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'reviewer_id'",
",",
"reviewer_id",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'4b6702c7-aa35-4b89-9c96-b9abf6d3e540'",
",",
"version",
"=",
"'5.1'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'IdentityRefWithVote'",
",",
"response",
")"
] |
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/devops_sdk/v5_1/git/git_client_base.py#L1937-L1959
|
|
glamp/bashplotlib
|
db4065cfe65c0bf7c530e0e8b9328daf9593ad74
|
bashplotlib/utils/helpers.py
|
python
|
get_colour
|
(colour, default="default")
|
return bcolours.get(colour, bcolours[default])
|
Get the escape code sequence for a colour
|
Get the escape code sequence for a colour
|
[
"Get",
"the",
"escape",
"code",
"sequence",
"for",
"a",
"colour"
] |
def get_colour(colour, default="default"):
"""
Get the escape code sequence for a colour
"""
return bcolours.get(colour, bcolours[default])
|
[
"def",
"get_colour",
"(",
"colour",
",",
"default",
"=",
"\"default\"",
")",
":",
"return",
"bcolours",
".",
"get",
"(",
"colour",
",",
"bcolours",
"[",
"default",
"]",
")"
] |
https://github.com/glamp/bashplotlib/blob/db4065cfe65c0bf7c530e0e8b9328daf9593ad74/bashplotlib/utils/helpers.py#L29-L33
|
|
xgi/castero
|
766965fb1d3586d62ab6fd6dd144fa510c1e0ecb
|
castero/feed.py
|
python
|
Feed.title
|
(self)
|
return self._title
|
str: the title of the feed
|
str: the title of the feed
|
[
"str",
":",
"the",
"title",
"of",
"the",
"feed"
] |
def title(self) -> str:
"""str: the title of the feed"""
return self._title
|
[
"def",
"title",
"(",
"self",
")",
"->",
"str",
":",
"return",
"self",
".",
"_title"
] |
https://github.com/xgi/castero/blob/766965fb1d3586d62ab6fd6dd144fa510c1e0ecb/castero/feed.py#L373-L375
|
|
bytedance/byteps
|
d0bcf1a87ee87539ceb29bcc976d4da063ffc47b
|
example/tensorflow/tensorflow2_mnist_bps_MirroredStrategy.py
|
python
|
mnist_dataset
|
(batch_size)
|
return train_dataset
|
[] |
def mnist_dataset(batch_size):
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
# The `x` arrays are in uint8 and have values in the range [0, 255].
# We need to convert them to float32 with values in the range [0, 1]
x_train = x_train / np.float32(255)
y_train = y_train.astype(np.int64)
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(60000).repeat().batch(batch_size)
return train_dataset
|
[
"def",
"mnist_dataset",
"(",
"batch_size",
")",
":",
"(",
"x_train",
",",
"y_train",
")",
",",
"_",
"=",
"tf",
".",
"keras",
".",
"datasets",
".",
"mnist",
".",
"load_data",
"(",
")",
"# The `x` arrays are in uint8 and have values in the range [0, 255].",
"# We need to convert them to float32 with values in the range [0, 1]",
"x_train",
"=",
"x_train",
"/",
"np",
".",
"float32",
"(",
"255",
")",
"y_train",
"=",
"y_train",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"train_dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"(",
"x_train",
",",
"y_train",
")",
")",
".",
"shuffle",
"(",
"60000",
")",
".",
"repeat",
"(",
")",
".",
"batch",
"(",
"batch_size",
")",
"return",
"train_dataset"
] |
https://github.com/bytedance/byteps/blob/d0bcf1a87ee87539ceb29bcc976d4da063ffc47b/example/tensorflow/tensorflow2_mnist_bps_MirroredStrategy.py#L28-L36
|
|||
zopefoundation/Zope
|
ea04dd670d1a48d4d5c879d3db38fc2e9b4330bb
|
src/Products/SiteAccess/__init__.py
|
python
|
initialize
|
(context)
|
[] |
def initialize(context):
from Products.SiteAccess import VirtualHostMonster
context.registerClass(
instance_class=VirtualHostMonster.VirtualHostMonster,
permission='Add Virtual Host Monsters',
constructors=VirtualHostMonster.constructors,
)
|
[
"def",
"initialize",
"(",
"context",
")",
":",
"from",
"Products",
".",
"SiteAccess",
"import",
"VirtualHostMonster",
"context",
".",
"registerClass",
"(",
"instance_class",
"=",
"VirtualHostMonster",
".",
"VirtualHostMonster",
",",
"permission",
"=",
"'Add Virtual Host Monsters'",
",",
"constructors",
"=",
"VirtualHostMonster",
".",
"constructors",
",",
")"
] |
https://github.com/zopefoundation/Zope/blob/ea04dd670d1a48d4d5c879d3db38fc2e9b4330bb/src/Products/SiteAccess/__init__.py#L1-L8
|
||||
nilearn/nilearn
|
9edba4471747efacf21260bf470a346307f52706
|
nilearn/plotting/displays/_slicers.py
|
python
|
TiledSlicer._find_initial_axes_coord
|
(self, index)
|
return [coord1, coord2, coord3, coord4]
|
Find coordinates for initial axes placement for xyz cuts.
Parameters
----------
index : :obj:`int`
Index corresponding to current cut 'x', 'y' or 'z'.
Returns
-------
[coord1, coord2, coord3, coord4] : :obj:`list` of :obj:`int`
x0, y0, x1, y1 coordinates used by matplotlib
to position axes in figure.
|
Find coordinates for initial axes placement for xyz cuts.
|
[
"Find",
"coordinates",
"for",
"initial",
"axes",
"placement",
"for",
"xyz",
"cuts",
"."
] |
def _find_initial_axes_coord(self, index):
"""Find coordinates for initial axes placement for xyz cuts.
Parameters
----------
index : :obj:`int`
Index corresponding to current cut 'x', 'y' or 'z'.
Returns
-------
[coord1, coord2, coord3, coord4] : :obj:`list` of :obj:`int`
x0, y0, x1, y1 coordinates used by matplotlib
to position axes in figure.
"""
rect_x0, rect_y0, rect_x1, rect_y1 = self.rect
if index == 0:
coord1 = rect_x1 - rect_x0
coord2 = 0.5 * (rect_y1 - rect_y0) + rect_y0
coord3 = 0.5 * (rect_x1 - rect_x0) + rect_x0
coord4 = rect_y1 - rect_y0
elif index == 1:
coord1 = 0.5 * (rect_x1 - rect_x0) + rect_x0
coord2 = 0.5 * (rect_y1 - rect_y0) + rect_y0
coord3 = rect_x1 - rect_x0
coord4 = rect_y1 - rect_y0
elif index == 2:
coord1 = rect_x1 - rect_x0
coord2 = rect_y1 - rect_y0
coord3 = 0.5 * (rect_x1 - rect_x0) + rect_x0
coord4 = 0.5 * (rect_y1 - rect_y0) + rect_y0
return [coord1, coord2, coord3, coord4]
|
[
"def",
"_find_initial_axes_coord",
"(",
"self",
",",
"index",
")",
":",
"rect_x0",
",",
"rect_y0",
",",
"rect_x1",
",",
"rect_y1",
"=",
"self",
".",
"rect",
"if",
"index",
"==",
"0",
":",
"coord1",
"=",
"rect_x1",
"-",
"rect_x0",
"coord2",
"=",
"0.5",
"*",
"(",
"rect_y1",
"-",
"rect_y0",
")",
"+",
"rect_y0",
"coord3",
"=",
"0.5",
"*",
"(",
"rect_x1",
"-",
"rect_x0",
")",
"+",
"rect_x0",
"coord4",
"=",
"rect_y1",
"-",
"rect_y0",
"elif",
"index",
"==",
"1",
":",
"coord1",
"=",
"0.5",
"*",
"(",
"rect_x1",
"-",
"rect_x0",
")",
"+",
"rect_x0",
"coord2",
"=",
"0.5",
"*",
"(",
"rect_y1",
"-",
"rect_y0",
")",
"+",
"rect_y0",
"coord3",
"=",
"rect_x1",
"-",
"rect_x0",
"coord4",
"=",
"rect_y1",
"-",
"rect_y0",
"elif",
"index",
"==",
"2",
":",
"coord1",
"=",
"rect_x1",
"-",
"rect_x0",
"coord2",
"=",
"rect_y1",
"-",
"rect_y0",
"coord3",
"=",
"0.5",
"*",
"(",
"rect_x1",
"-",
"rect_x0",
")",
"+",
"rect_x0",
"coord4",
"=",
"0.5",
"*",
"(",
"rect_y1",
"-",
"rect_y0",
")",
"+",
"rect_y0",
"return",
"[",
"coord1",
",",
"coord2",
",",
"coord3",
",",
"coord4",
"]"
] |
https://github.com/nilearn/nilearn/blob/9edba4471747efacf21260bf470a346307f52706/nilearn/plotting/displays/_slicers.py#L971-L1002
|
|
s-leger/archipack
|
5a6243bf1edf08a6b429661ce291dacb551e5f8a
|
pygeos/geom.py
|
python
|
Geometry.length
|
(self)
|
return 0.0
|
* Returns the length of this Geometry.
* Linear geometries return their length.
* Areal geometries return their perimeter.
* They override this function to compute the area.
* Others return 0.0
*
* @return the length of the Geometry
|
* Returns the length of this Geometry.
* Linear geometries return their length.
* Areal geometries return their perimeter.
* They override this function to compute the area.
* Others return 0.0
*
*
|
[
"*",
"Returns",
"the",
"length",
"of",
"this",
"Geometry",
".",
"*",
"Linear",
"geometries",
"return",
"their",
"length",
".",
"*",
"Areal",
"geometries",
"return",
"their",
"perimeter",
".",
"*",
"They",
"override",
"this",
"function",
"to",
"compute",
"the",
"area",
".",
"*",
"Others",
"return",
"0",
".",
"0",
"*",
"*"
] |
def length(self):
"""
* Returns the length of this Geometry.
* Linear geometries return their length.
* Areal geometries return their perimeter.
* They override this function to compute the area.
* Others return 0.0
*
* @return the length of the Geometry
"""
return 0.0
|
[
"def",
"length",
"(",
"self",
")",
":",
"return",
"0.0"
] |
https://github.com/s-leger/archipack/blob/5a6243bf1edf08a6b429661ce291dacb551e5f8a/pygeos/geom.py#L289-L299
|
|
mozman/ezdxf
|
59d0fc2ea63f5cf82293428f5931da7e9f9718e9
|
src/ezdxf/sections/objects.py
|
python
|
ObjectsSection.export_dxf
|
(self, tagwriter: "TagWriter")
|
Export DXF entity by `tagwriter`. (internal API)
|
Export DXF entity by `tagwriter`. (internal API)
|
[
"Export",
"DXF",
"entity",
"by",
"tagwriter",
".",
"(",
"internal",
"API",
")"
] |
def export_dxf(self, tagwriter: "TagWriter") -> None:
"""Export DXF entity by `tagwriter`. (internal API)"""
tagwriter.write_str(" 0\nSECTION\n 2\nOBJECTS\n")
self._entity_space.export_dxf(tagwriter)
tagwriter.write_tag2(0, "ENDSEC")
|
[
"def",
"export_dxf",
"(",
"self",
",",
"tagwriter",
":",
"\"TagWriter\"",
")",
"->",
"None",
":",
"tagwriter",
".",
"write_str",
"(",
"\" 0\\nSECTION\\n 2\\nOBJECTS\\n\"",
")",
"self",
".",
"_entity_space",
".",
"export_dxf",
"(",
"tagwriter",
")",
"tagwriter",
".",
"write_tag2",
"(",
"0",
",",
"\"ENDSEC\"",
")"
] |
https://github.com/mozman/ezdxf/blob/59d0fc2ea63f5cf82293428f5931da7e9f9718e9/src/ezdxf/sections/objects.py#L73-L77
|
||
lad1337/XDM
|
0c1b7009fe00f06f102a6f67c793478f515e7efe
|
site-packages/logilab/common/fileutils.py
|
python
|
relative_path
|
(from_file, to_file)
|
return sep.join(result)
|
Try to get a relative path from `from_file` to `to_file`
(path will be absolute if to_file is an absolute file). This function
is useful to create link in `from_file` to `to_file`. This typical use
case is used in this function description.
If both files are relative, they're expected to be relative to the same
directory.
>>> relative_path( from_file='toto/index.html', to_file='index.html')
'../index.html'
>>> relative_path( from_file='index.html', to_file='toto/index.html')
'toto/index.html'
>>> relative_path( from_file='tutu/index.html', to_file='toto/index.html')
'../toto/index.html'
>>> relative_path( from_file='toto/index.html', to_file='/index.html')
'/index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/index.html')
'../index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/toto/summary.html')
'summary.html'
>>> relative_path( from_file='index.html', to_file='index.html')
''
>>> relative_path( from_file='/index.html', to_file='toto/index.html')
Traceback (most recent call last):
File "<string>", line 1, in ?
File "<stdin>", line 37, in relative_path
UnresolvableError
>>> relative_path( from_file='/index.html', to_file='/index.html')
''
>>>
:type from_file: str
:param from_file: source file (where links will be inserted)
:type to_file: str
:param to_file: target file (on which links point)
:raise UnresolvableError: if it has been unable to guess a correct path
:rtype: str
:return: the relative path of `to_file` from `from_file`
|
Try to get a relative path from `from_file` to `to_file`
(path will be absolute if to_file is an absolute file). This function
is useful to create link in `from_file` to `to_file`. This typical use
case is used in this function description.
|
[
"Try",
"to",
"get",
"a",
"relative",
"path",
"from",
"from_file",
"to",
"to_file",
"(",
"path",
"will",
"be",
"absolute",
"if",
"to_file",
"is",
"an",
"absolute",
"file",
")",
".",
"This",
"function",
"is",
"useful",
"to",
"create",
"link",
"in",
"from_file",
"to",
"to_file",
".",
"This",
"typical",
"use",
"case",
"is",
"used",
"in",
"this",
"function",
"description",
"."
] |
def relative_path(from_file, to_file):
"""Try to get a relative path from `from_file` to `to_file`
(path will be absolute if to_file is an absolute file). This function
is useful to create link in `from_file` to `to_file`. This typical use
case is used in this function description.
If both files are relative, they're expected to be relative to the same
directory.
>>> relative_path( from_file='toto/index.html', to_file='index.html')
'../index.html'
>>> relative_path( from_file='index.html', to_file='toto/index.html')
'toto/index.html'
>>> relative_path( from_file='tutu/index.html', to_file='toto/index.html')
'../toto/index.html'
>>> relative_path( from_file='toto/index.html', to_file='/index.html')
'/index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/index.html')
'../index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/toto/summary.html')
'summary.html'
>>> relative_path( from_file='index.html', to_file='index.html')
''
>>> relative_path( from_file='/index.html', to_file='toto/index.html')
Traceback (most recent call last):
File "<string>", line 1, in ?
File "<stdin>", line 37, in relative_path
UnresolvableError
>>> relative_path( from_file='/index.html', to_file='/index.html')
''
>>>
:type from_file: str
:param from_file: source file (where links will be inserted)
:type to_file: str
:param to_file: target file (on which links point)
:raise UnresolvableError: if it has been unable to guess a correct path
:rtype: str
:return: the relative path of `to_file` from `from_file`
"""
from_file = normpath(from_file)
to_file = normpath(to_file)
if from_file == to_file:
return ''
if isabs(to_file):
if not isabs(from_file):
return to_file
elif isabs(from_file):
raise UnresolvableError()
from_parts = from_file.split(sep)
to_parts = to_file.split(sep)
idem = 1
result = []
while len(from_parts) > 1:
dirname = from_parts.pop(0)
if idem and len(to_parts) > 1 and dirname == to_parts[0]:
to_parts.pop(0)
else:
idem = 0
result.append('..')
result += to_parts
return sep.join(result)
|
[
"def",
"relative_path",
"(",
"from_file",
",",
"to_file",
")",
":",
"from_file",
"=",
"normpath",
"(",
"from_file",
")",
"to_file",
"=",
"normpath",
"(",
"to_file",
")",
"if",
"from_file",
"==",
"to_file",
":",
"return",
"''",
"if",
"isabs",
"(",
"to_file",
")",
":",
"if",
"not",
"isabs",
"(",
"from_file",
")",
":",
"return",
"to_file",
"elif",
"isabs",
"(",
"from_file",
")",
":",
"raise",
"UnresolvableError",
"(",
")",
"from_parts",
"=",
"from_file",
".",
"split",
"(",
"sep",
")",
"to_parts",
"=",
"to_file",
".",
"split",
"(",
"sep",
")",
"idem",
"=",
"1",
"result",
"=",
"[",
"]",
"while",
"len",
"(",
"from_parts",
")",
">",
"1",
":",
"dirname",
"=",
"from_parts",
".",
"pop",
"(",
"0",
")",
"if",
"idem",
"and",
"len",
"(",
"to_parts",
")",
">",
"1",
"and",
"dirname",
"==",
"to_parts",
"[",
"0",
"]",
":",
"to_parts",
".",
"pop",
"(",
"0",
")",
"else",
":",
"idem",
"=",
"0",
"result",
".",
"append",
"(",
"'..'",
")",
"result",
"+=",
"to_parts",
"return",
"sep",
".",
"join",
"(",
"result",
")"
] |
https://github.com/lad1337/XDM/blob/0c1b7009fe00f06f102a6f67c793478f515e7efe/site-packages/logilab/common/fileutils.py#L180-L244
|
|
jgagneastro/coffeegrindsize
|
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
|
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/numpy/polynomial/polynomial.py
|
python
|
polyfit
|
(x, y, deg, rcond=None, full=False, w=None)
|
Least-squares fit of a polynomial to data.
Return the coefficients of a polynomial of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,
where `n` is `deg`.
Parameters
----------
x : array_like, shape (`M`,)
x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
y : array_like, shape (`M`,) or (`M`, `K`)
y-coordinates of the sample points. Several sets of sample points
sharing the same x-coordinates can be (independently) fit with one
call to `polyfit` by passing in for `y` a 2-D array that contains
one data set per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than `rcond`, relative to the largest singular value, will be
ignored. The default value is ``len(x)*eps``, where `eps` is the
relative precision of the platform's float type, about 2e-16 in
most cases.
full : bool, optional
Switch determining the nature of the return value. When ``False``
(the default) just the coefficients are returned; when ``True``,
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
Polynomial coefficients ordered from low to high. If `y` was 2-D,
the coefficients in column `k` of `coef` represent the polynomial
fit to the data in `y`'s `k`-th column.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Raises
------
RankWarning
Raised if the matrix in the least-squares fit is rank deficient.
The warning is only raised if `full` == False. The warnings can
be turned off by:
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, hermfit, hermefit
polyval : Evaluates a polynomial.
polyvander : Vandermonde matrix for powers.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the polynomial `p` that minimizes
the sum of the weighted squared errors
.. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) over-determined matrix equation:
.. math :: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected (and `full` == ``False``), a `RankWarning` will be raised.
This means that the coefficient values may be poorly determined.
Fitting to a lower order polynomial will usually get rid of the warning
(but may not be what you want, of course; if you have independent
reason(s) for choosing the degree which isn't working, you may have to:
a) reconsider those reasons, and/or b) reconsider the quality of your
data). The `rcond` parameter can also be set to a value smaller than
its default, but the resulting fit may be spurious and have large
contributions from roundoff error.
Polynomial fits using double precision tend to "fail" at about
(polynomial) degree 20. Fits using Chebyshev or Legendre series are
generally better conditioned, but much can still depend on the
distribution of the sample points and the smoothness of the data. If
the quality of the fit is inadequate, splines may be a good
alternative.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
>>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])
>>> stats # note the large SSR, explaining the rather poor results
[array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,
0.28853036]), 1.1324274851176597e-014]
Same thing without the added noise
>>> y = x**3 - x
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,
1.00000000e+00])
>>> stats # note the minuscule SSR
[array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,
0.50443316, 0.28853036]), 1.1324274851176597e-014]
|
Least-squares fit of a polynomial to data.
|
[
"Least",
"-",
"squares",
"fit",
"of",
"a",
"polynomial",
"to",
"data",
"."
] |
def polyfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least-squares fit of a polynomial to data.
Return the coefficients of a polynomial of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,
where `n` is `deg`.
Parameters
----------
x : array_like, shape (`M`,)
x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
y : array_like, shape (`M`,) or (`M`, `K`)
y-coordinates of the sample points. Several sets of sample points
sharing the same x-coordinates can be (independently) fit with one
call to `polyfit` by passing in for `y` a 2-D array that contains
one data set per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than `rcond`, relative to the largest singular value, will be
ignored. The default value is ``len(x)*eps``, where `eps` is the
relative precision of the platform's float type, about 2e-16 in
most cases.
full : bool, optional
Switch determining the nature of the return value. When ``False``
(the default) just the coefficients are returned; when ``True``,
diagnostic information from the singular value decomposition (used
to solve the fit's matrix equation) is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
Polynomial coefficients ordered from low to high. If `y` was 2-D,
the coefficients in column `k` of `coef` represent the polynomial
fit to the data in `y`'s `k`-th column.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Raises
------
RankWarning
Raised if the matrix in the least-squares fit is rank deficient.
The warning is only raised if `full` == False. The warnings can
be turned off by:
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, hermfit, hermefit
polyval : Evaluates a polynomial.
polyvander : Vandermonde matrix for powers.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the polynomial `p` that minimizes
the sum of the weighted squared errors
.. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) over-determined matrix equation:
.. math :: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected (and `full` == ``False``), a `RankWarning` will be raised.
This means that the coefficient values may be poorly determined.
Fitting to a lower order polynomial will usually get rid of the warning
(but may not be what you want, of course; if you have independent
reason(s) for choosing the degree which isn't working, you may have to:
a) reconsider those reasons, and/or b) reconsider the quality of your
data). The `rcond` parameter can also be set to a value smaller than
its default, but the resulting fit may be spurious and have large
contributions from roundoff error.
Polynomial fits using double precision tend to "fail" at about
(polynomial) degree 20. Fits using Chebyshev or Legendre series are
generally better conditioned, but much can still depend on the
distribution of the sample points and the smoothness of the data. If
the quality of the fit is inadequate, splines may be a good
alternative.
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
>>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])
>>> stats # note the large SSR, explaining the rather poor results
[array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,
0.28853036]), 1.1324274851176597e-014]
Same thing without the added noise
>>> y = x**3 - x
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,
1.00000000e+00])
>>> stats # note the minuscule SSR
[array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,
0.50443316, 0.28853036]), 1.1324274851176597e-014]
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = polyvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = polyvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim == 1:
if c.ndim == 2:
cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax + 1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
|
[
"def",
"polyfit",
"(",
"x",
",",
"y",
",",
"deg",
",",
"rcond",
"=",
"None",
",",
"full",
"=",
"False",
",",
"w",
"=",
"None",
")",
":",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"+",
"0.0",
"y",
"=",
"np",
".",
"asarray",
"(",
"y",
")",
"+",
"0.0",
"deg",
"=",
"np",
".",
"asarray",
"(",
"deg",
")",
"# check arguments.",
"if",
"deg",
".",
"ndim",
">",
"1",
"or",
"deg",
".",
"dtype",
".",
"kind",
"not",
"in",
"'iu'",
"or",
"deg",
".",
"size",
"==",
"0",
":",
"raise",
"TypeError",
"(",
"\"deg must be an int or non-empty 1-D array of int\"",
")",
"if",
"deg",
".",
"min",
"(",
")",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"expected deg >= 0\"",
")",
"if",
"x",
".",
"ndim",
"!=",
"1",
":",
"raise",
"TypeError",
"(",
"\"expected 1D vector for x\"",
")",
"if",
"x",
".",
"size",
"==",
"0",
":",
"raise",
"TypeError",
"(",
"\"expected non-empty vector for x\"",
")",
"if",
"y",
".",
"ndim",
"<",
"1",
"or",
"y",
".",
"ndim",
">",
"2",
":",
"raise",
"TypeError",
"(",
"\"expected 1D or 2D array for y\"",
")",
"if",
"len",
"(",
"x",
")",
"!=",
"len",
"(",
"y",
")",
":",
"raise",
"TypeError",
"(",
"\"expected x and y to have same length\"",
")",
"if",
"deg",
".",
"ndim",
"==",
"0",
":",
"lmax",
"=",
"deg",
"order",
"=",
"lmax",
"+",
"1",
"van",
"=",
"polyvander",
"(",
"x",
",",
"lmax",
")",
"else",
":",
"deg",
"=",
"np",
".",
"sort",
"(",
"deg",
")",
"lmax",
"=",
"deg",
"[",
"-",
"1",
"]",
"order",
"=",
"len",
"(",
"deg",
")",
"van",
"=",
"polyvander",
"(",
"x",
",",
"lmax",
")",
"[",
":",
",",
"deg",
"]",
"# set up the least squares matrices in transposed form",
"lhs",
"=",
"van",
".",
"T",
"rhs",
"=",
"y",
".",
"T",
"if",
"w",
"is",
"not",
"None",
":",
"w",
"=",
"np",
".",
"asarray",
"(",
"w",
")",
"+",
"0.0",
"if",
"w",
".",
"ndim",
"!=",
"1",
":",
"raise",
"TypeError",
"(",
"\"expected 1D vector for w\"",
")",
"if",
"len",
"(",
"x",
")",
"!=",
"len",
"(",
"w",
")",
":",
"raise",
"TypeError",
"(",
"\"expected x and w to have same length\"",
")",
"# apply weights. Don't use inplace operations as they",
"# can cause problems with NA.",
"lhs",
"=",
"lhs",
"*",
"w",
"rhs",
"=",
"rhs",
"*",
"w",
"# set rcond",
"if",
"rcond",
"is",
"None",
":",
"rcond",
"=",
"len",
"(",
"x",
")",
"*",
"np",
".",
"finfo",
"(",
"x",
".",
"dtype",
")",
".",
"eps",
"# Determine the norms of the design matrix columns.",
"if",
"issubclass",
"(",
"lhs",
".",
"dtype",
".",
"type",
",",
"np",
".",
"complexfloating",
")",
":",
"scl",
"=",
"np",
".",
"sqrt",
"(",
"(",
"np",
".",
"square",
"(",
"lhs",
".",
"real",
")",
"+",
"np",
".",
"square",
"(",
"lhs",
".",
"imag",
")",
")",
".",
"sum",
"(",
"1",
")",
")",
"else",
":",
"scl",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"square",
"(",
"lhs",
")",
".",
"sum",
"(",
"1",
")",
")",
"scl",
"[",
"scl",
"==",
"0",
"]",
"=",
"1",
"# Solve the least squares problem.",
"c",
",",
"resids",
",",
"rank",
",",
"s",
"=",
"la",
".",
"lstsq",
"(",
"lhs",
".",
"T",
"/",
"scl",
",",
"rhs",
".",
"T",
",",
"rcond",
")",
"c",
"=",
"(",
"c",
".",
"T",
"/",
"scl",
")",
".",
"T",
"# Expand c to include non-fitted coefficients which are set to zero",
"if",
"deg",
".",
"ndim",
"==",
"1",
":",
"if",
"c",
".",
"ndim",
"==",
"2",
":",
"cc",
"=",
"np",
".",
"zeros",
"(",
"(",
"lmax",
"+",
"1",
",",
"c",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"c",
".",
"dtype",
")",
"else",
":",
"cc",
"=",
"np",
".",
"zeros",
"(",
"lmax",
"+",
"1",
",",
"dtype",
"=",
"c",
".",
"dtype",
")",
"cc",
"[",
"deg",
"]",
"=",
"c",
"c",
"=",
"cc",
"# warn on rank reduction",
"if",
"rank",
"!=",
"order",
"and",
"not",
"full",
":",
"msg",
"=",
"\"The fit may be poorly conditioned\"",
"warnings",
".",
"warn",
"(",
"msg",
",",
"pu",
".",
"RankWarning",
",",
"stacklevel",
"=",
"2",
")",
"if",
"full",
":",
"return",
"c",
",",
"[",
"resids",
",",
"rank",
",",
"s",
",",
"rcond",
"]",
"else",
":",
"return",
"c"
] |
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/numpy/polynomial/polynomial.py#L1293-L1509
|
||
persephone-tools/persephone
|
ef7cbf169b1fd7ad6eb880dbda6357f0e7393fba
|
persephone/corpus.py
|
python
|
Corpus.get_untranscribed_prefixes
|
(self)
|
return get_untranscribed_prefixes_from_file(self.tgt_dir)
|
The file "untranscribed_prefixes.txt" will specify prefixes which
do not have an associated transcription file if placed in the target directory.
This will fetch those prefixes from that file and will return an empty
list if that file does not exist.
See find_untranscribed_wavs function for finding untranscribed prefixes in an
experiment directory.
|
The file "untranscribed_prefixes.txt" will specify prefixes which
do not have an associated transcription file if placed in the target directory.
|
[
"The",
"file",
"untranscribed_prefixes",
".",
"txt",
"will",
"specify",
"prefixes",
"which",
"do",
"not",
"have",
"an",
"associated",
"transcription",
"file",
"if",
"placed",
"in",
"the",
"target",
"directory",
"."
] |
def get_untranscribed_prefixes(self) -> List[str]:
"""
The file "untranscribed_prefixes.txt" will specify prefixes which
do not have an associated transcription file if placed in the target directory.
This will fetch those prefixes from that file and will return an empty
list if that file does not exist.
See find_untranscribed_wavs function for finding untranscribed prefixes in an
experiment directory.
"""
return get_untranscribed_prefixes_from_file(self.tgt_dir)
|
[
"def",
"get_untranscribed_prefixes",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"get_untranscribed_prefixes_from_file",
"(",
"self",
".",
"tgt_dir",
")"
] |
https://github.com/persephone-tools/persephone/blob/ef7cbf169b1fd7ad6eb880dbda6357f0e7393fba/persephone/corpus.py#L555-L566
|
|
marcosfede/algorithms
|
1ee7c815f9d556c9cef4d4b0d21ee3a409d21629
|
strings/rabin_karp.py
|
python
|
RollingHash.move_window
|
(self)
|
[] |
def move_window(self):
if self.window_end <= len(self.text) - 1:
# remove left letter from hash value
self.hash -= (ord(self.text[self.window_start]) - ord("a") + 1) * 26 ** (self.sizeWord - 1)
self.hash *= 26
self.hash += ord(self.text[self.window_end]) - ord("a") + 1
self.window_start += 1
self.window_end += 1
|
[
"def",
"move_window",
"(",
"self",
")",
":",
"if",
"self",
".",
"window_end",
"<=",
"len",
"(",
"self",
".",
"text",
")",
"-",
"1",
":",
"# remove left letter from hash value",
"self",
".",
"hash",
"-=",
"(",
"ord",
"(",
"self",
".",
"text",
"[",
"self",
".",
"window_start",
"]",
")",
"-",
"ord",
"(",
"\"a\"",
")",
"+",
"1",
")",
"*",
"26",
"**",
"(",
"self",
".",
"sizeWord",
"-",
"1",
")",
"self",
".",
"hash",
"*=",
"26",
"self",
".",
"hash",
"+=",
"ord",
"(",
"self",
".",
"text",
"[",
"self",
".",
"window_end",
"]",
")",
"-",
"ord",
"(",
"\"a\"",
")",
"+",
"1",
"self",
".",
"window_start",
"+=",
"1",
"self",
".",
"window_end",
"+=",
"1"
] |
https://github.com/marcosfede/algorithms/blob/1ee7c815f9d556c9cef4d4b0d21ee3a409d21629/strings/rabin_karp.py#L20-L27
|
||||
mesalock-linux/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
lib-python/2.7/lib-tk/turtle.py
|
python
|
TurtleScreenBase._type
|
(self, item)
|
return self.cv.type(item)
|
Return 'line' or 'polygon' or 'image' depending on
type of item.
|
Return 'line' or 'polygon' or 'image' depending on
type of item.
|
[
"Return",
"line",
"or",
"polygon",
"or",
"image",
"depending",
"on",
"type",
"of",
"item",
"."
] |
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
return self.cv.type(item)
|
[
"def",
"_type",
"(",
"self",
",",
"item",
")",
":",
"return",
"self",
".",
"cv",
".",
"type",
"(",
"item",
")"
] |
https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/lib-python/2.7/lib-tk/turtle.py#L747-L751
|
|
girder/girder
|
0766ba8e7f9b25ce81e7c0d19bd343479bceea20
|
plugins/user_quota/girder_user_quota/quota.py
|
python
|
QuotaPolicy.checkUploadStart
|
(self, event)
|
Check if an upload will fit within a quota restriction. This is before
the upload occurs, but since multiple uploads can be started
concurrently, we also have to check when the upload is being completed.
:param event: event record.
|
Check if an upload will fit within a quota restriction. This is before
the upload occurs, but since multiple uploads can be started
concurrently, we also have to check when the upload is being completed.
|
[
"Check",
"if",
"an",
"upload",
"will",
"fit",
"within",
"a",
"quota",
"restriction",
".",
"This",
"is",
"before",
"the",
"upload",
"occurs",
"but",
"since",
"multiple",
"uploads",
"can",
"be",
"started",
"concurrently",
"we",
"also",
"have",
"to",
"check",
"when",
"the",
"upload",
"is",
"being",
"completed",
"."
] |
def checkUploadStart(self, event):
"""
Check if an upload will fit within a quota restriction. This is before
the upload occurs, but since multiple uploads can be started
concurrently, we also have to check when the upload is being completed.
:param event: event record.
"""
if '_id' in event.info:
return
quotaInfo = self._checkUploadSize(event.info)
if quotaInfo:
raise ValidationException(
'Upload would exceed file storage quota (need %s, only %s '
'available - used %s out of %s)' %
(formatSize(quotaInfo['sizeNeeded']),
formatSize(quotaInfo['quotaLeft']),
formatSize(quotaInfo['quotaUsed']),
formatSize(quotaInfo['fileSizeQuota'])),
field='size')
|
[
"def",
"checkUploadStart",
"(",
"self",
",",
"event",
")",
":",
"if",
"'_id'",
"in",
"event",
".",
"info",
":",
"return",
"quotaInfo",
"=",
"self",
".",
"_checkUploadSize",
"(",
"event",
".",
"info",
")",
"if",
"quotaInfo",
":",
"raise",
"ValidationException",
"(",
"'Upload would exceed file storage quota (need %s, only %s '",
"'available - used %s out of %s)'",
"%",
"(",
"formatSize",
"(",
"quotaInfo",
"[",
"'sizeNeeded'",
"]",
")",
",",
"formatSize",
"(",
"quotaInfo",
"[",
"'quotaLeft'",
"]",
")",
",",
"formatSize",
"(",
"quotaInfo",
"[",
"'quotaUsed'",
"]",
")",
",",
"formatSize",
"(",
"quotaInfo",
"[",
"'fileSizeQuota'",
"]",
")",
")",
",",
"field",
"=",
"'size'",
")"
] |
https://github.com/girder/girder/blob/0766ba8e7f9b25ce81e7c0d19bd343479bceea20/plugins/user_quota/girder_user_quota/quota.py#L366-L385
|
||
JBakamovic/cxxd
|
142c19649b036bd6f6bdcd4684de735ea11a6c94
|
services/source_code_model/indexer/clang_indexer.py
|
python
|
start_indexing_subprocess
|
(root_directory, compiler_args_filename, indexer_input_list_filename, output_db_filename, log_filename)
|
return subprocess.Popen(shlex.split(cmd))
|
[] |
def start_indexing_subprocess(root_directory, compiler_args_filename, indexer_input_list_filename, output_db_filename, log_filename):
cmd = "python2 " + get_clang_index_path() + \
" --project_root_directory='" + root_directory + \
"' --compiler_args_filename='" + compiler_args_filename + \
"' --input_list='" + indexer_input_list_filename + \
"' --output_db_filename='" + output_db_filename + \
"' " + "--log_file='" + log_filename + "'"
return subprocess.Popen(shlex.split(cmd))
|
[
"def",
"start_indexing_subprocess",
"(",
"root_directory",
",",
"compiler_args_filename",
",",
"indexer_input_list_filename",
",",
"output_db_filename",
",",
"log_filename",
")",
":",
"cmd",
"=",
"\"python2 \"",
"+",
"get_clang_index_path",
"(",
")",
"+",
"\" --project_root_directory='\"",
"+",
"root_directory",
"+",
"\"' --compiler_args_filename='\"",
"+",
"compiler_args_filename",
"+",
"\"' --input_list='\"",
"+",
"indexer_input_list_filename",
"+",
"\"' --output_db_filename='\"",
"+",
"output_db_filename",
"+",
"\"' \"",
"+",
"\"--log_file='\"",
"+",
"log_filename",
"+",
"\"'\"",
"return",
"subprocess",
".",
"Popen",
"(",
"shlex",
".",
"split",
"(",
"cmd",
")",
")"
] |
https://github.com/JBakamovic/cxxd/blob/142c19649b036bd6f6bdcd4684de735ea11a6c94/services/source_code_model/indexer/clang_indexer.py#L363-L370
|
|||
bread-and-pepper/django-userena
|
7dfb3d5d148127e32f217a62096d507266a3a83c
|
userena/contrib/umessages/views.py
|
python
|
MessageListView.get_queryset
|
(self)
|
return MessageContact.objects.get_contacts_for(self.request.user)
|
[] |
def get_queryset(self):
return MessageContact.objects.get_contacts_for(self.request.user)
|
[
"def",
"get_queryset",
"(",
"self",
")",
":",
"return",
"MessageContact",
".",
"objects",
".",
"get_contacts_for",
"(",
"self",
".",
"request",
".",
"user",
")"
] |
https://github.com/bread-and-pepper/django-userena/blob/7dfb3d5d148127e32f217a62096d507266a3a83c/userena/contrib/umessages/views.py#L37-L38
|
|||
omz/PythonistaAppTemplate
|
f560f93f8876d82a21d108977f90583df08d55af
|
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/patches.py
|
python
|
Patch.get_linewidth
|
(self)
|
return self._linewidth
|
Return the line width in points.
|
Return the line width in points.
|
[
"Return",
"the",
"line",
"width",
"in",
"points",
"."
] |
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
|
[
"def",
"get_linewidth",
"(",
"self",
")",
":",
"return",
"self",
".",
"_linewidth"
] |
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/patches.py#L214-L218
|
|
hydroshare/hydroshare
|
7ba563b55412f283047fb3ef6da367d41dec58c6
|
hs_modelinstance/models.py
|
python
|
ModelInstanceMetaData.executed_by
|
(self)
|
return self._executed_by.all().first()
|
[] |
def executed_by(self):
return self._executed_by.all().first()
|
[
"def",
"executed_by",
"(",
"self",
")",
":",
"return",
"self",
".",
"_executed_by",
".",
"all",
"(",
")",
".",
"first",
"(",
")"
] |
https://github.com/hydroshare/hydroshare/blob/7ba563b55412f283047fb3ef6da367d41dec58c6/hs_modelinstance/models.py#L145-L146
|
|||
krintoxi/NoobSec-Toolkit
|
38738541cbc03cedb9a3b3ed13b629f781ad64f6
|
NoobSecToolkit - MAC OSX/scripts/sshbackdoors/master.py
|
python
|
BackdoorMe.do_clear
|
(self, args)
|
[] |
def do_clear(self, args):
os.system("clear")
|
[
"def",
"do_clear",
"(",
"self",
",",
"args",
")",
":",
"os",
".",
"system",
"(",
"\"clear\"",
")"
] |
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit - MAC OSX/scripts/sshbackdoors/master.py#L206-L207
|
||||
chapmanb/bcbb
|
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
|
nextgen/bcbio/distributed/split.py
|
python
|
_check_group_status
|
(xs, grouped_info)
|
return ready, grouped
|
Identify grouped items that need ungrouping to continue.
|
Identify grouped items that need ungrouping to continue.
|
[
"Identify",
"grouped",
"items",
"that",
"need",
"ungrouping",
"to",
"continue",
"."
] |
def _check_group_status(xs, grouped_info):
"""Identify grouped items that need ungrouping to continue.
"""
ready = []
grouped = []
for x in xs:
if x.has_key("group"):
x["group_orig"] = grouped_info[x["group"]]
grouped.append([x])
else:
ready.append(x)
return ready, grouped
|
[
"def",
"_check_group_status",
"(",
"xs",
",",
"grouped_info",
")",
":",
"ready",
"=",
"[",
"]",
"grouped",
"=",
"[",
"]",
"for",
"x",
"in",
"xs",
":",
"if",
"x",
".",
"has_key",
"(",
"\"group\"",
")",
":",
"x",
"[",
"\"group_orig\"",
"]",
"=",
"grouped_info",
"[",
"x",
"[",
"\"group\"",
"]",
"]",
"grouped",
".",
"append",
"(",
"[",
"x",
"]",
")",
"else",
":",
"ready",
".",
"append",
"(",
"x",
")",
"return",
"ready",
",",
"grouped"
] |
https://github.com/chapmanb/bcbb/blob/dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027/nextgen/bcbio/distributed/split.py#L42-L53
|
|
bookieio/Bookie
|
78b15fc68ec7e7dc3ad0c4fa049ce670a304d419
|
bookie/models/auth.py
|
python
|
User.deactivate
|
(self)
|
In case we need to disable the login
|
In case we need to disable the login
|
[
"In",
"case",
"we",
"need",
"to",
"disable",
"the",
"login"
] |
def deactivate(self):
"""In case we need to disable the login"""
self.activated = False
|
[
"def",
"deactivate",
"(",
"self",
")",
":",
"self",
".",
"activated",
"=",
"False"
] |
https://github.com/bookieio/Bookie/blob/78b15fc68ec7e7dc3ad0c4fa049ce670a304d419/bookie/models/auth.py#L341-L343
|
||
tp4a/teleport
|
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
|
server/www/packages/packages-linux/x64/ldap3/protocol/rfc2849.py
|
python
|
persistent_search_response_to_ldif
|
(change)
|
return ldif_lines[:-1]
|
[] |
def persistent_search_response_to_ldif(change):
ldif_lines = ['# ' + datetime.now().isoformat()]
control = decode_persistent_search_control(change)
if control:
if control['changeNumber']:
ldif_lines.append('# change number: ' + str(control['changeNumber']))
ldif_lines.append(control['changeType'])
if control['previousDN']:
ldif_lines.append('# previous dn: ' + str(control['previousDN']))
ldif_lines += operation_to_ldif('searchResponse', [change])
return ldif_lines[:-1]
|
[
"def",
"persistent_search_response_to_ldif",
"(",
"change",
")",
":",
"ldif_lines",
"=",
"[",
"'# '",
"+",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
"]",
"control",
"=",
"decode_persistent_search_control",
"(",
"change",
")",
"if",
"control",
":",
"if",
"control",
"[",
"'changeNumber'",
"]",
":",
"ldif_lines",
".",
"append",
"(",
"'# change number: '",
"+",
"str",
"(",
"control",
"[",
"'changeNumber'",
"]",
")",
")",
"ldif_lines",
".",
"append",
"(",
"control",
"[",
"'changeType'",
"]",
")",
"if",
"control",
"[",
"'previousDN'",
"]",
":",
"ldif_lines",
".",
"append",
"(",
"'# previous dn: '",
"+",
"str",
"(",
"control",
"[",
"'previousDN'",
"]",
")",
")",
"ldif_lines",
"+=",
"operation_to_ldif",
"(",
"'searchResponse'",
",",
"[",
"change",
"]",
")",
"return",
"ldif_lines",
"[",
":",
"-",
"1",
"]"
] |
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/ldap3/protocol/rfc2849.py#L283-L294
|
|||
securityclippy/elasticintel
|
aa08d3e9f5ab1c000128e95161139ce97ff0e334
|
ingest_feed_lambda/numpy/polynomial/chebyshev.py
|
python
|
_zseries_der
|
(zs)
|
return d
|
Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
|
Differentiate a z-series.
|
[
"Differentiate",
"a",
"z",
"-",
"series",
"."
] |
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
|
[
"def",
"_zseries_der",
"(",
"zs",
")",
":",
"n",
"=",
"len",
"(",
"zs",
")",
"//",
"2",
"ns",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"1",
",",
"0",
",",
"1",
"]",
",",
"dtype",
"=",
"zs",
".",
"dtype",
")",
"zs",
"*=",
"np",
".",
"arange",
"(",
"-",
"n",
",",
"n",
"+",
"1",
")",
"*",
"2",
"d",
",",
"r",
"=",
"_zseries_div",
"(",
"zs",
",",
"ns",
")",
"return",
"d"
] |
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/numpy/polynomial/chebyshev.py#L258-L287
|
|
udacity/ud330
|
fc0cf20871b64bc252179e1a3cca17d0224de3d5
|
Lesson3/step3/project.py
|
python
|
newRestaurant
|
()
|
[] |
def newRestaurant():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newRestaurant = Restaurant(
name=request.form['name'], user_id=login_session['user_id'])
session.add(newRestaurant)
flash('New Restaurant %s Successfully Created' % newRestaurant.name)
session.commit()
return redirect(url_for('showRestaurants'))
else:
return render_template('newRestaurant.html')
|
[
"def",
"newRestaurant",
"(",
")",
":",
"if",
"'username'",
"not",
"in",
"login_session",
":",
"return",
"redirect",
"(",
"'/login'",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"newRestaurant",
"=",
"Restaurant",
"(",
"name",
"=",
"request",
".",
"form",
"[",
"'name'",
"]",
",",
"user_id",
"=",
"login_session",
"[",
"'user_id'",
"]",
")",
"session",
".",
"add",
"(",
"newRestaurant",
")",
"flash",
"(",
"'New Restaurant %s Successfully Created'",
"%",
"newRestaurant",
".",
"name",
")",
"session",
".",
"commit",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'showRestaurants'",
")",
")",
"else",
":",
"return",
"render_template",
"(",
"'newRestaurant.html'",
")"
] |
https://github.com/udacity/ud330/blob/fc0cf20871b64bc252179e1a3cca17d0224de3d5/Lesson3/step3/project.py#L225-L236
|
||||
pantsbuild/pex
|
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
|
pex/vendor/_vendored/pip/pip/_vendor/urllib3/contrib/_securetransport/low_level.py
|
python
|
_assert_no_error
|
(error, exception_class=None)
|
Checks the return code and throws an exception if there is an error to
report
|
Checks the return code and throws an exception if there is an error to
report
|
[
"Checks",
"the",
"return",
"code",
"and",
"throws",
"an",
"exception",
"if",
"there",
"is",
"an",
"error",
"to",
"report"
] |
def _assert_no_error(error, exception_class=None):
"""
Checks the return code and throws an exception if there is an error to
report
"""
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == u"":
output = u"OSStatus %s" % error
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output)
|
[
"def",
"_assert_no_error",
"(",
"error",
",",
"exception_class",
"=",
"None",
")",
":",
"if",
"error",
"==",
"0",
":",
"return",
"cf_error_string",
"=",
"Security",
".",
"SecCopyErrorMessageString",
"(",
"error",
",",
"None",
")",
"output",
"=",
"_cf_string_to_unicode",
"(",
"cf_error_string",
")",
"CoreFoundation",
".",
"CFRelease",
"(",
"cf_error_string",
")",
"if",
"output",
"is",
"None",
"or",
"output",
"==",
"u\"\"",
":",
"output",
"=",
"u\"OSStatus %s\"",
"%",
"error",
"if",
"exception_class",
"is",
"None",
":",
"exception_class",
"=",
"ssl",
".",
"SSLError",
"raise",
"exception_class",
"(",
"output",
")"
] |
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/pip/pip/_vendor/urllib3/contrib/_securetransport/low_level.py#L129-L147
|
||
lisa-lab/pylearn2
|
af81e5c362f0df4df85c3e54e23b2adeec026055
|
pylearn2/utils/datasets.py
|
python
|
BatchIterator.__iter__
|
(self)
|
Generator function to iterate through all minibatches
|
Generator function to iterate through all minibatches
|
[
"Generator",
"function",
"to",
"iterate",
"through",
"all",
"minibatches"
] |
def __iter__(self):
"""Generator function to iterate through all minibatches"""
counter = [0, 0, 0]
for chosen in self.permut:
# Retrieve minibatch from chosen set
index = counter[chosen]
minibatch = self.dataset[chosen][
index * self.batch_size:(index + 1) * self.batch_size
]
# Increment the related counter
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
# Return the computed minibatch
yield minibatch
|
[
"def",
"__iter__",
"(",
"self",
")",
":",
"counter",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"for",
"chosen",
"in",
"self",
".",
"permut",
":",
"# Retrieve minibatch from chosen set",
"index",
"=",
"counter",
"[",
"chosen",
"]",
"minibatch",
"=",
"self",
".",
"dataset",
"[",
"chosen",
"]",
"[",
"index",
"*",
"self",
".",
"batch_size",
":",
"(",
"index",
"+",
"1",
")",
"*",
"self",
".",
"batch_size",
"]",
"# Increment the related counter",
"counter",
"[",
"chosen",
"]",
"=",
"(",
"counter",
"[",
"chosen",
"]",
"+",
"1",
")",
"%",
"self",
".",
"limit",
"[",
"chosen",
"]",
"# Return the computed minibatch",
"yield",
"minibatch"
] |
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/utils/datasets.py#L231-L243
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/base.py
|
python
|
MSDialect.get_foreign_keys
|
(self, connection, tablename,
dbname, owner, schema, **kw)
|
return list(fkeys.values())
|
[] |
def get_foreign_keys(self, connection, tablename,
dbname, owner, schema, **kw):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias('C')
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,
R.c.table_schema, R.c.table_name, R.c.column_name,
RR.c.constraint_name, RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule],
sql.and_(C.c.table_name == tablename,
C.c.table_schema == owner,
RR.c.constraint_schema == C.c.table_schema,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name ==
RR.c.unique_constraint_name,
C.c.ordinal_position == R.c.ordinal_position
),
order_by=[RR.c.constraint_name, R.c.ordinal_position]
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
fknm, scols, rcols = (None, [], [])
def fkey_rec():
return {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec['name'] = rfknm
if not rec['referred_table']:
rec['referred_table'] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec['referred_schema'] = rschema
local_cols, remote_cols = \
rec['constrained_columns'],\
rec['referred_columns']
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
|
[
"def",
"get_foreign_keys",
"(",
"self",
",",
"connection",
",",
"tablename",
",",
"dbname",
",",
"owner",
",",
"schema",
",",
"*",
"*",
"kw",
")",
":",
"RR",
"=",
"ischema",
".",
"ref_constraints",
"C",
"=",
"ischema",
".",
"key_constraints",
".",
"alias",
"(",
"'C'",
")",
"R",
"=",
"ischema",
".",
"key_constraints",
".",
"alias",
"(",
"'R'",
")",
"# Foreign key constraints",
"s",
"=",
"sql",
".",
"select",
"(",
"[",
"C",
".",
"c",
".",
"column_name",
",",
"R",
".",
"c",
".",
"table_schema",
",",
"R",
".",
"c",
".",
"table_name",
",",
"R",
".",
"c",
".",
"column_name",
",",
"RR",
".",
"c",
".",
"constraint_name",
",",
"RR",
".",
"c",
".",
"match_option",
",",
"RR",
".",
"c",
".",
"update_rule",
",",
"RR",
".",
"c",
".",
"delete_rule",
"]",
",",
"sql",
".",
"and_",
"(",
"C",
".",
"c",
".",
"table_name",
"==",
"tablename",
",",
"C",
".",
"c",
".",
"table_schema",
"==",
"owner",
",",
"RR",
".",
"c",
".",
"constraint_schema",
"==",
"C",
".",
"c",
".",
"table_schema",
",",
"C",
".",
"c",
".",
"constraint_name",
"==",
"RR",
".",
"c",
".",
"constraint_name",
",",
"R",
".",
"c",
".",
"constraint_name",
"==",
"RR",
".",
"c",
".",
"unique_constraint_name",
",",
"C",
".",
"c",
".",
"ordinal_position",
"==",
"R",
".",
"c",
".",
"ordinal_position",
")",
",",
"order_by",
"=",
"[",
"RR",
".",
"c",
".",
"constraint_name",
",",
"R",
".",
"c",
".",
"ordinal_position",
"]",
")",
"# group rows by constraint ID, to handle multi-column FKs",
"fkeys",
"=",
"[",
"]",
"fknm",
",",
"scols",
",",
"rcols",
"=",
"(",
"None",
",",
"[",
"]",
",",
"[",
"]",
")",
"def",
"fkey_rec",
"(",
")",
":",
"return",
"{",
"'name'",
":",
"None",
",",
"'constrained_columns'",
":",
"[",
"]",
",",
"'referred_schema'",
":",
"None",
",",
"'referred_table'",
":",
"None",
",",
"'referred_columns'",
":",
"[",
"]",
"}",
"fkeys",
"=",
"util",
".",
"defaultdict",
"(",
"fkey_rec",
")",
"for",
"r",
"in",
"connection",
".",
"execute",
"(",
"s",
")",
".",
"fetchall",
"(",
")",
":",
"scol",
",",
"rschema",
",",
"rtbl",
",",
"rcol",
",",
"rfknm",
",",
"fkmatch",
",",
"fkuprule",
",",
"fkdelrule",
"=",
"r",
"rec",
"=",
"fkeys",
"[",
"rfknm",
"]",
"rec",
"[",
"'name'",
"]",
"=",
"rfknm",
"if",
"not",
"rec",
"[",
"'referred_table'",
"]",
":",
"rec",
"[",
"'referred_table'",
"]",
"=",
"rtbl",
"if",
"schema",
"is",
"not",
"None",
"or",
"owner",
"!=",
"rschema",
":",
"if",
"dbname",
":",
"rschema",
"=",
"dbname",
"+",
"\".\"",
"+",
"rschema",
"rec",
"[",
"'referred_schema'",
"]",
"=",
"rschema",
"local_cols",
",",
"remote_cols",
"=",
"rec",
"[",
"'constrained_columns'",
"]",
",",
"rec",
"[",
"'referred_columns'",
"]",
"local_cols",
".",
"append",
"(",
"scol",
")",
"remote_cols",
".",
"append",
"(",
"rcol",
")",
"return",
"list",
"(",
"fkeys",
".",
"values",
"(",
")",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/base.py#L2230-L2287
|
|||
qutebrowser/qutebrowser
|
3a2aaaacbf97f4bf0c72463f3da94ed2822a5442
|
qutebrowser/browser/qutescheme.py
|
python
|
qute_pdfjs
|
(url: QUrl)
|
Handler for qute://pdfjs.
Return the pdf.js viewer or redirect to original URL if the file does not
exist.
|
Handler for qute://pdfjs.
|
[
"Handler",
"for",
"qute",
":",
"//",
"pdfjs",
"."
] |
def qute_pdfjs(url: QUrl) -> _HandlerRet:
"""Handler for qute://pdfjs.
Return the pdf.js viewer or redirect to original URL if the file does not
exist.
"""
if url.path() == '/file':
filename = QUrlQuery(url).queryItemValue('filename')
if not filename:
raise UrlInvalidError("Missing filename")
if '/' in filename or os.sep in filename:
raise RequestDeniedError("Path separator in filename.")
path = _pdf_path(filename)
with open(path, 'rb') as f:
data = f.read()
mimetype = utils.guess_mimetype(filename, fallback=True)
return mimetype, data
if url.path() == '/web/viewer.html':
query = QUrlQuery(url)
filename = query.queryItemValue("filename")
if not filename:
raise UrlInvalidError("Missing filename")
path = _pdf_path(filename)
if not os.path.isfile(path):
source = query.queryItemValue('source')
if not source: # This may happen with old URLs stored in history
raise UrlInvalidError("Missing source")
raise Redirect(QUrl(source))
data = pdfjs.generate_pdfjs_page(filename, url)
return 'text/html', data
try:
data = pdfjs.get_pdfjs_res(url.path())
except pdfjs.PDFJSNotFound as e:
# Logging as the error might get lost otherwise since we're not showing
# the error page if a single asset is missing. This way we don't lose
# information, as the failed pdfjs requests are still in the log.
log.misc.warning(
"pdfjs resource requested but not found: {}".format(e.path))
raise NotFoundError("Can't find pdfjs resource '{}'".format(e.path))
else:
mimetype = utils.guess_mimetype(url.fileName(), fallback=True)
return mimetype, data
|
[
"def",
"qute_pdfjs",
"(",
"url",
":",
"QUrl",
")",
"->",
"_HandlerRet",
":",
"if",
"url",
".",
"path",
"(",
")",
"==",
"'/file'",
":",
"filename",
"=",
"QUrlQuery",
"(",
"url",
")",
".",
"queryItemValue",
"(",
"'filename'",
")",
"if",
"not",
"filename",
":",
"raise",
"UrlInvalidError",
"(",
"\"Missing filename\"",
")",
"if",
"'/'",
"in",
"filename",
"or",
"os",
".",
"sep",
"in",
"filename",
":",
"raise",
"RequestDeniedError",
"(",
"\"Path separator in filename.\"",
")",
"path",
"=",
"_pdf_path",
"(",
"filename",
")",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
"mimetype",
"=",
"utils",
".",
"guess_mimetype",
"(",
"filename",
",",
"fallback",
"=",
"True",
")",
"return",
"mimetype",
",",
"data",
"if",
"url",
".",
"path",
"(",
")",
"==",
"'/web/viewer.html'",
":",
"query",
"=",
"QUrlQuery",
"(",
"url",
")",
"filename",
"=",
"query",
".",
"queryItemValue",
"(",
"\"filename\"",
")",
"if",
"not",
"filename",
":",
"raise",
"UrlInvalidError",
"(",
"\"Missing filename\"",
")",
"path",
"=",
"_pdf_path",
"(",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"source",
"=",
"query",
".",
"queryItemValue",
"(",
"'source'",
")",
"if",
"not",
"source",
":",
"# This may happen with old URLs stored in history",
"raise",
"UrlInvalidError",
"(",
"\"Missing source\"",
")",
"raise",
"Redirect",
"(",
"QUrl",
"(",
"source",
")",
")",
"data",
"=",
"pdfjs",
".",
"generate_pdfjs_page",
"(",
"filename",
",",
"url",
")",
"return",
"'text/html'",
",",
"data",
"try",
":",
"data",
"=",
"pdfjs",
".",
"get_pdfjs_res",
"(",
"url",
".",
"path",
"(",
")",
")",
"except",
"pdfjs",
".",
"PDFJSNotFound",
"as",
"e",
":",
"# Logging as the error might get lost otherwise since we're not showing",
"# the error page if a single asset is missing. This way we don't lose",
"# information, as the failed pdfjs requests are still in the log.",
"log",
".",
"misc",
".",
"warning",
"(",
"\"pdfjs resource requested but not found: {}\"",
".",
"format",
"(",
"e",
".",
"path",
")",
")",
"raise",
"NotFoundError",
"(",
"\"Can't find pdfjs resource '{}'\"",
".",
"format",
"(",
"e",
".",
"path",
")",
")",
"else",
":",
"mimetype",
"=",
"utils",
".",
"guess_mimetype",
"(",
"url",
".",
"fileName",
"(",
")",
",",
"fallback",
"=",
"True",
")",
"return",
"mimetype",
",",
"data"
] |
https://github.com/qutebrowser/qutebrowser/blob/3a2aaaacbf97f4bf0c72463f3da94ed2822a5442/qutebrowser/browser/qutescheme.py#L523-L570
|
||
adamrehn/ue4cli
|
25e3f31830494141bb3bdb11a8d52d5c8d8d64ef
|
ue4cli/UnrealManagerWindows.py
|
python
|
UnrealManagerWindows._editorPathSuffix
|
(self, cmdVersion)
|
return '-Cmd.exe' if cmdVersion == True else '.exe'
|
[] |
def _editorPathSuffix(self, cmdVersion):
return '-Cmd.exe' if cmdVersion == True else '.exe'
|
[
"def",
"_editorPathSuffix",
"(",
"self",
",",
"cmdVersion",
")",
":",
"return",
"'-Cmd.exe'",
"if",
"cmdVersion",
"==",
"True",
"else",
"'.exe'"
] |
https://github.com/adamrehn/ue4cli/blob/25e3f31830494141bb3bdb11a8d52d5c8d8d64ef/ue4cli/UnrealManagerWindows.py#L75-L76
|
|||
inkandswitch/livebook
|
93c8d467734787366ad084fc3566bf5cbe249c51
|
public/pypyjs/modules/numpy/ma/core.py
|
python
|
_MaskedUnaryOperation.__call__
|
(self, a, *args, **kwargs)
|
return masked_result
|
Execute the call behavior.
|
Execute the call behavior.
|
[
"Execute",
"the",
"call",
"behavior",
"."
] |
def __call__(self, a, *args, **kwargs):
"""
Execute the call behavior.
"""
d = getdata(a)
# Deal with domain
if self.domain is not None:
# Case 1.1. : Domained function
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
else:
# Case 1.2. : Function without a domain
# Get the result and the mask
result = self.f(d, *args, **kwargs)
m = getmask(a)
if not result.ndim:
# Case 2.1. : The result is scalarscalar
if m:
return masked
return result
if m is not nomask:
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input Now,
# that's plain silly: in C, we would just skip the element and
# keep the original, but we do have to do it that way in Python
# In case result has a lower dtype than the inputs (as in
# equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
masked_result = result.view(get_masked_subclass(a))
masked_result._mask = m
masked_result._update_from(result)
return masked_result
|
[
"def",
"__call__",
"(",
"self",
",",
"a",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"d",
"=",
"getdata",
"(",
"a",
")",
"# Deal with domain",
"if",
"self",
".",
"domain",
"is",
"not",
"None",
":",
"# Case 1.1. : Domained function",
"with",
"np",
".",
"errstate",
"(",
"divide",
"=",
"'ignore'",
",",
"invalid",
"=",
"'ignore'",
")",
":",
"result",
"=",
"self",
".",
"f",
"(",
"d",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Make a mask",
"m",
"=",
"~",
"umath",
".",
"isfinite",
"(",
"result",
")",
"m",
"|=",
"self",
".",
"domain",
"(",
"d",
")",
"m",
"|=",
"getmask",
"(",
"a",
")",
"else",
":",
"# Case 1.2. : Function without a domain",
"# Get the result and the mask",
"result",
"=",
"self",
".",
"f",
"(",
"d",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"m",
"=",
"getmask",
"(",
"a",
")",
"if",
"not",
"result",
".",
"ndim",
":",
"# Case 2.1. : The result is scalarscalar",
"if",
"m",
":",
"return",
"masked",
"return",
"result",
"if",
"m",
"is",
"not",
"nomask",
":",
"# Case 2.2. The result is an array",
"# We need to fill the invalid data back w/ the input Now,",
"# that's plain silly: in C, we would just skip the element and",
"# keep the original, but we do have to do it that way in Python",
"# In case result has a lower dtype than the inputs (as in",
"# equal)",
"try",
":",
"np",
".",
"copyto",
"(",
"result",
",",
"d",
",",
"where",
"=",
"m",
")",
"except",
"TypeError",
":",
"pass",
"# Transform to",
"masked_result",
"=",
"result",
".",
"view",
"(",
"get_masked_subclass",
"(",
"a",
")",
")",
"masked_result",
".",
"_mask",
"=",
"m",
"masked_result",
".",
"_update_from",
"(",
"result",
")",
"return",
"masked_result"
] |
https://github.com/inkandswitch/livebook/blob/93c8d467734787366ad084fc3566bf5cbe249c51/public/pypyjs/modules/numpy/ma/core.py#L866-L909
|
|
gprMax/gprMax
|
2d5926aa1f70b96ade2bbe4e99c8190d6d60f66d
|
gprMax/input_cmd_funcs.py
|
python
|
edge
|
(xs, ys, zs, xf, yf, zf, material, rotate90origin=())
|
return s, f
|
Prints the gprMax #edge command.
Args:
xs, ys, zs, xf, yf, zf (float): Start and finish coordinates.
material (str): Material identifier.
rotate90origin (tuple): x, y origin for 90 degree CCW rotation in x-y plane.
Returns:
s, f (tuple): 2 namedtuple Coordinate for the start and finish coordinates
|
Prints the gprMax #edge command.
|
[
"Prints",
"the",
"gprMax",
"#edge",
"command",
"."
] |
def edge(xs, ys, zs, xf, yf, zf, material, rotate90origin=()):
"""Prints the gprMax #edge command.
Args:
xs, ys, zs, xf, yf, zf (float): Start and finish coordinates.
material (str): Material identifier.
rotate90origin (tuple): x, y origin for 90 degree CCW rotation in x-y plane.
Returns:
s, f (tuple): 2 namedtuple Coordinate for the start and finish coordinates
"""
if rotate90origin:
if xs == xf:
polarisation = 'y'
else:
polarisation = 'x '
xs, ys, xf, yf = rotate90_edge(xs, ys, xf, yf, polarisation, rotate90origin)
s = Coordinate(xs, ys, zs)
f = Coordinate(xf, yf, zf)
command('edge', s, f, material)
return s, f
|
[
"def",
"edge",
"(",
"xs",
",",
"ys",
",",
"zs",
",",
"xf",
",",
"yf",
",",
"zf",
",",
"material",
",",
"rotate90origin",
"=",
"(",
")",
")",
":",
"if",
"rotate90origin",
":",
"if",
"xs",
"==",
"xf",
":",
"polarisation",
"=",
"'y'",
"else",
":",
"polarisation",
"=",
"'x '",
"xs",
",",
"ys",
",",
"xf",
",",
"yf",
"=",
"rotate90_edge",
"(",
"xs",
",",
"ys",
",",
"xf",
",",
"yf",
",",
"polarisation",
",",
"rotate90origin",
")",
"s",
"=",
"Coordinate",
"(",
"xs",
",",
"ys",
",",
"zs",
")",
"f",
"=",
"Coordinate",
"(",
"xf",
",",
"yf",
",",
"zf",
")",
"command",
"(",
"'edge'",
",",
"s",
",",
"f",
",",
"material",
")",
"return",
"s",
",",
"f"
] |
https://github.com/gprMax/gprMax/blob/2d5926aa1f70b96ade2bbe4e99c8190d6d60f66d/gprMax/input_cmd_funcs.py#L273-L296
|
|
uber-research/learning-to-reweight-examples
|
0b616c99ecf8a1c99925322167694272a966ed00
|
cifar/cifar_train.py
|
python
|
_get_data_inputs
|
(bsize, seed=0)
|
return Datasets()
|
Gets data input tensors.
|
Gets data input tensors.
|
[
"Gets",
"data",
"input",
"tensors",
"."
] |
def _get_data_inputs(bsize, seed=0):
"""Gets data input tensors."""
# Compute the dataset directory for this experiment.
data_name = FLAGS.dataset
data_dir = os.path.join(FLAGS.data_root, data_name)
print(data_dir)
log.info('Building dataset')
trn_data = _get_data_input(data_name, data_dir, 'train', bsize, True, seed)
val_data = _get_data_input(data_name, data_dir, 'validation', bsize, False, seed)
test_data = _get_data_input(data_name, data_dir, 'test', bsize, False, seed)
class Datasets:
train = trn_data
val = val_data
test = test_data
return Datasets()
|
[
"def",
"_get_data_inputs",
"(",
"bsize",
",",
"seed",
"=",
"0",
")",
":",
"# Compute the dataset directory for this experiment.",
"data_name",
"=",
"FLAGS",
".",
"dataset",
"data_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FLAGS",
".",
"data_root",
",",
"data_name",
")",
"print",
"(",
"data_dir",
")",
"log",
".",
"info",
"(",
"'Building dataset'",
")",
"trn_data",
"=",
"_get_data_input",
"(",
"data_name",
",",
"data_dir",
",",
"'train'",
",",
"bsize",
",",
"True",
",",
"seed",
")",
"val_data",
"=",
"_get_data_input",
"(",
"data_name",
",",
"data_dir",
",",
"'validation'",
",",
"bsize",
",",
"False",
",",
"seed",
")",
"test_data",
"=",
"_get_data_input",
"(",
"data_name",
",",
"data_dir",
",",
"'test'",
",",
"bsize",
",",
"False",
",",
"seed",
")",
"class",
"Datasets",
":",
"train",
"=",
"trn_data",
"val",
"=",
"val_data",
"test",
"=",
"test_data",
"return",
"Datasets",
"(",
")"
] |
https://github.com/uber-research/learning-to-reweight-examples/blob/0b616c99ecf8a1c99925322167694272a966ed00/cifar/cifar_train.py#L136-L153
|
|
shiyanhui/FileHeader
|
f347cc134021fb0b710694b71c57742476f5fd2b
|
jinja2/environment.py
|
python
|
Environment.lex
|
(self, source, name=None, filename=None)
|
Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
|
Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
|
[
"Lex",
"the",
"given",
"sourcecode",
"and",
"return",
"a",
"generator",
"that",
"yields",
"tokens",
"as",
"tuples",
"in",
"the",
"form",
"(",
"lineno",
"token_type",
"value",
")",
".",
"This",
"can",
"be",
"useful",
"for",
":",
"ref",
":",
"extension",
"development",
"<writing",
"-",
"extensions",
">",
"and",
"debugging",
"templates",
"."
] |
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = text_type(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
|
[
"def",
"lex",
"(",
"self",
",",
"source",
",",
"name",
"=",
"None",
",",
"filename",
"=",
"None",
")",
":",
"source",
"=",
"text_type",
"(",
"source",
")",
"try",
":",
"return",
"self",
".",
"lexer",
".",
"tokeniter",
"(",
"source",
",",
"name",
",",
"filename",
")",
"except",
"TemplateSyntaxError",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"self",
".",
"handle_exception",
"(",
"exc_info",
",",
"source_hint",
"=",
"source",
")"
] |
https://github.com/shiyanhui/FileHeader/blob/f347cc134021fb0b710694b71c57742476f5fd2b/jinja2/environment.py#L461-L476
|
||
Ultimaker/Uranium
|
66da853cd9a04edd3a8a03526fac81e83c03f5aa
|
UM/Qt/ListModel.py
|
python
|
ListModel.sort
|
(self, fun: Callable[[Any], float])
|
Sort the list.
:param fun: The callable to use for determining the sort key.
|
Sort the list.
|
[
"Sort",
"the",
"list",
"."
] |
def sort(self, fun: Callable[[Any], float]) -> None:
"""Sort the list.
:param fun: The callable to use for determining the sort key.
"""
self.beginResetModel()
self._items.sort(key = fun)
self.endResetModel()
|
[
"def",
"sort",
"(",
"self",
",",
"fun",
":",
"Callable",
"[",
"[",
"Any",
"]",
",",
"float",
"]",
")",
"->",
"None",
":",
"self",
".",
"beginResetModel",
"(",
")",
"self",
".",
"_items",
".",
"sort",
"(",
"key",
"=",
"fun",
")",
"self",
".",
"endResetModel",
"(",
")"
] |
https://github.com/Ultimaker/Uranium/blob/66da853cd9a04edd3a8a03526fac81e83c03f5aa/UM/Qt/ListModel.py#L164-L172
|
||
bruderstein/PythonScript
|
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
|
PythonLib/full/concurrent/futures/_base.py
|
python
|
Future.running
|
(self)
|
Return True if the future is currently executing.
|
Return True if the future is currently executing.
|
[
"Return",
"True",
"if",
"the",
"future",
"is",
"currently",
"executing",
"."
] |
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
|
[
"def",
"running",
"(",
"self",
")",
":",
"with",
"self",
".",
"_condition",
":",
"return",
"self",
".",
"_state",
"==",
"RUNNING"
] |
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/concurrent/futures/_base.py#L377-L380
|
||
zhufz/nlp_research
|
b435319858520edcca7c0320dca3e0013087c276
|
language_model/bert/run_classifier.py
|
python
|
XnliProcessor.get_dev_examples
|
(self, data_dir)
|
return examples
|
See base class.
|
See base class.
|
[
"See",
"base",
"class",
"."
] |
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
|
[
"def",
"get_dev_examples",
"(",
"self",
",",
"data_dir",
")",
":",
"lines",
"=",
"self",
".",
"_read_tsv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"\"xnli.dev.tsv\"",
")",
")",
"examples",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"line",
")",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"i",
"==",
"0",
":",
"continue",
"guid",
"=",
"\"dev-%d\"",
"%",
"(",
"i",
")",
"language",
"=",
"tokenization",
".",
"convert_to_unicode",
"(",
"line",
"[",
"0",
"]",
")",
"if",
"language",
"!=",
"tokenization",
".",
"convert_to_unicode",
"(",
"self",
".",
"language",
")",
":",
"continue",
"text_a",
"=",
"tokenization",
".",
"convert_to_unicode",
"(",
"line",
"[",
"6",
"]",
")",
"text_b",
"=",
"tokenization",
".",
"convert_to_unicode",
"(",
"line",
"[",
"7",
"]",
")",
"label",
"=",
"tokenization",
".",
"convert_to_unicode",
"(",
"line",
"[",
"1",
"]",
")",
"examples",
".",
"append",
"(",
"InputExample",
"(",
"guid",
"=",
"guid",
",",
"text_a",
"=",
"text_a",
",",
"text_b",
"=",
"text_b",
",",
"label",
"=",
"label",
")",
")",
"return",
"examples"
] |
https://github.com/zhufz/nlp_research/blob/b435319858520edcca7c0320dca3e0013087c276/language_model/bert/run_classifier.py#L232-L248
|
|
OpenCobolIDE/OpenCobolIDE
|
c78d0d335378e5fe0a5e74f53c19b68b55e85388
|
open_cobol_ide/extlibs/future/backports/email/feedparser.py
|
python
|
FeedParser._pop_message
|
(self)
|
return retval
|
[] |
def _pop_message(self):
retval = self._msgstack.pop()
if self._msgstack:
self._cur = self._msgstack[-1]
else:
self._cur = None
return retval
|
[
"def",
"_pop_message",
"(",
"self",
")",
":",
"retval",
"=",
"self",
".",
"_msgstack",
".",
"pop",
"(",
")",
"if",
"self",
".",
"_msgstack",
":",
"self",
".",
"_cur",
"=",
"self",
".",
"_msgstack",
"[",
"-",
"1",
"]",
"else",
":",
"self",
".",
"_cur",
"=",
"None",
"return",
"retval"
] |
https://github.com/OpenCobolIDE/OpenCobolIDE/blob/c78d0d335378e5fe0a5e74f53c19b68b55e85388/open_cobol_ide/extlibs/future/backports/email/feedparser.py#L208-L214
|
|||
joxeankoret/diaphora
|
dcb5a25ac9fe23a285b657e5389cf770de7ac928
|
pygments/lexers/__init__.py
|
python
|
find_lexer_class
|
(name)
|
Lookup a lexer class by name.
Return None if not found.
|
Lookup a lexer class by name.
|
[
"Lookup",
"a",
"lexer",
"class",
"by",
"name",
"."
] |
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
|
[
"def",
"find_lexer_class",
"(",
"name",
")",
":",
"if",
"name",
"in",
"_lexer_cache",
":",
"return",
"_lexer_cache",
"[",
"name",
"]",
"# lookup builtin lexers",
"for",
"module_name",
",",
"lname",
",",
"aliases",
",",
"_",
",",
"_",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"if",
"name",
"==",
"lname",
":",
"_load_lexers",
"(",
"module_name",
")",
"return",
"_lexer_cache",
"[",
"name",
"]",
"# continue with lexers from setuptools entrypoints",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"if",
"cls",
".",
"name",
"==",
"name",
":",
"return",
"cls"
] |
https://github.com/joxeankoret/diaphora/blob/dcb5a25ac9fe23a285b657e5389cf770de7ac928/pygments/lexers/__init__.py#L57-L72
|
||
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/waf/v20180125/waf_client.py
|
python
|
WafClient.CreateAccessExport
|
(self, request)
|
本接口用于创建访问日志导出
:param request: Request instance for CreateAccessExport.
:type request: :class:`tencentcloud.waf.v20180125.models.CreateAccessExportRequest`
:rtype: :class:`tencentcloud.waf.v20180125.models.CreateAccessExportResponse`
|
本接口用于创建访问日志导出
|
[
"本接口用于创建访问日志导出"
] |
def CreateAccessExport(self, request):
"""本接口用于创建访问日志导出
:param request: Request instance for CreateAccessExport.
:type request: :class:`tencentcloud.waf.v20180125.models.CreateAccessExportRequest`
:rtype: :class:`tencentcloud.waf.v20180125.models.CreateAccessExportResponse`
"""
try:
params = request._serialize()
body = self.call("CreateAccessExport", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateAccessExportResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
[
"def",
"CreateAccessExport",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"params",
"=",
"request",
".",
"_serialize",
"(",
")",
"body",
"=",
"self",
".",
"call",
"(",
"\"CreateAccessExport\"",
",",
"params",
")",
"response",
"=",
"json",
".",
"loads",
"(",
"body",
")",
"if",
"\"Error\"",
"not",
"in",
"response",
"[",
"\"Response\"",
"]",
":",
"model",
"=",
"models",
".",
"CreateAccessExportResponse",
"(",
")",
"model",
".",
"_deserialize",
"(",
"response",
"[",
"\"Response\"",
"]",
")",
"return",
"model",
"else",
":",
"code",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Code\"",
"]",
"message",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Message\"",
"]",
"reqid",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"RequestId\"",
"]",
"raise",
"TencentCloudSDKException",
"(",
"code",
",",
"message",
",",
"reqid",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"TencentCloudSDKException",
")",
":",
"raise",
"else",
":",
"raise",
"TencentCloudSDKException",
"(",
"e",
".",
"message",
",",
"e",
".",
"message",
")"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/waf/v20180125/waf_client.py#L57-L82
|
||
datamllab/rlcard
|
c21ea82519c453a42e3bdc6848bd3356e9b6ac43
|
rlcard/games/gin_rummy/dealer.py
|
python
|
GinRummyDealer.deal_cards
|
(self, player: GinRummyPlayer, num: int)
|
Deal some cards from stock_pile to one player
Args:
player (GinRummyPlayer): The GinRummyPlayer object
num (int): The number of cards to be dealt
|
Deal some cards from stock_pile to one player
|
[
"Deal",
"some",
"cards",
"from",
"stock_pile",
"to",
"one",
"player"
] |
def deal_cards(self, player: GinRummyPlayer, num: int):
''' Deal some cards from stock_pile to one player
Args:
player (GinRummyPlayer): The GinRummyPlayer object
num (int): The number of cards to be dealt
'''
for _ in range(num):
player.hand.append(self.stock_pile.pop())
player.did_populate_hand()
|
[
"def",
"deal_cards",
"(",
"self",
",",
"player",
":",
"GinRummyPlayer",
",",
"num",
":",
"int",
")",
":",
"for",
"_",
"in",
"range",
"(",
"num",
")",
":",
"player",
".",
"hand",
".",
"append",
"(",
"self",
".",
"stock_pile",
".",
"pop",
"(",
")",
")",
"player",
".",
"did_populate_hand",
"(",
")"
] |
https://github.com/datamllab/rlcard/blob/c21ea82519c453a42e3bdc6848bd3356e9b6ac43/rlcard/games/gin_rummy/dealer.py#L23-L32
|
||
Lawouach/WebSocket-for-Python
|
a3e6d157b7bb1da1009e66aa750170f1c07aa143
|
ws4py/server/wsgirefserver.py
|
python
|
WSGIServer.server_close
|
(self)
|
Properly initiate closing handshakes on
all websockets when the WSGI server terminates.
|
Properly initiate closing handshakes on
all websockets when the WSGI server terminates.
|
[
"Properly",
"initiate",
"closing",
"handshakes",
"on",
"all",
"websockets",
"when",
"the",
"WSGI",
"server",
"terminates",
"."
] |
def server_close(self):
"""
Properly initiate closing handshakes on
all websockets when the WSGI server terminates.
"""
if hasattr(self, 'manager'):
self.manager.close_all()
self.manager.stop()
self.manager.join()
delattr(self, 'manager')
_WSGIServer.server_close(self)
|
[
"def",
"server_close",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'manager'",
")",
":",
"self",
".",
"manager",
".",
"close_all",
"(",
")",
"self",
".",
"manager",
".",
"stop",
"(",
")",
"self",
".",
"manager",
".",
"join",
"(",
")",
"delattr",
"(",
"self",
",",
"'manager'",
")",
"_WSGIServer",
".",
"server_close",
"(",
"self",
")"
] |
https://github.com/Lawouach/WebSocket-for-Python/blob/a3e6d157b7bb1da1009e66aa750170f1c07aa143/ws4py/server/wsgirefserver.py#L131-L141
|
||
JiYou/openstack
|
8607dd488bde0905044b303eb6e52bdea6806923
|
packages/source/quantum/quantum/openstack/common/log.py
|
python
|
LegacyFormatter.format
|
(self, record)
|
return logging.Formatter.format(self, record)
|
Uses contextstring if request_id is set, otherwise default.
|
Uses contextstring if request_id is set, otherwise default.
|
[
"Uses",
"contextstring",
"if",
"request_id",
"is",
"set",
"otherwise",
"default",
"."
] |
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
|
[
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"# NOTE(sdague): default the fancier formating params",
"# to an empty string so we don't throw an exception if",
"# they get used",
"for",
"key",
"in",
"(",
"'instance'",
",",
"'color'",
")",
":",
"if",
"key",
"not",
"in",
"record",
".",
"__dict__",
":",
"record",
".",
"__dict__",
"[",
"key",
"]",
"=",
"''",
"if",
"record",
".",
"__dict__",
".",
"get",
"(",
"'request_id'",
",",
"None",
")",
":",
"self",
".",
"_fmt",
"=",
"CONF",
".",
"logging_context_format_string",
"else",
":",
"self",
".",
"_fmt",
"=",
"CONF",
".",
"logging_default_format_string",
"if",
"(",
"record",
".",
"levelno",
"==",
"logging",
".",
"DEBUG",
"and",
"CONF",
".",
"logging_debug_format_suffix",
")",
":",
"self",
".",
"_fmt",
"+=",
"\" \"",
"+",
"CONF",
".",
"logging_debug_format_suffix",
"# Cache this on the record, Logger will respect our formated copy",
"if",
"record",
".",
"exc_info",
":",
"record",
".",
"exc_text",
"=",
"self",
".",
"formatException",
"(",
"record",
".",
"exc_info",
",",
"record",
")",
"return",
"logging",
".",
"Formatter",
".",
"format",
"(",
"self",
",",
"record",
")"
] |
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/quantum/quantum/openstack/common/log.py#L458-L479
|
|
leo-editor/leo-editor
|
383d6776d135ef17d73d935a2f0ecb3ac0e99494
|
leo/plugins/backlink.py
|
python
|
backlinkController.updateTab
|
(self, tag, k)
|
called by leo select position hook
|
called by leo select position hook
|
[
"called",
"by",
"leo",
"select",
"position",
"hook"
] |
def updateTab(self, tag, k):
"""called by leo select position hook"""
if k['c'] != self.c:
return # not our problem
self.updateTabInt()
|
[
"def",
"updateTab",
"(",
"self",
",",
"tag",
",",
"k",
")",
":",
"if",
"k",
"[",
"'c'",
"]",
"!=",
"self",
".",
"c",
":",
"return",
"# not our problem",
"self",
".",
"updateTabInt",
"(",
")"
] |
https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/plugins/backlink.py#L603-L608
|
||
AppScale/gts
|
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
|
AppServer/lib/django-1.5/django/contrib/gis/geos/geometry.py
|
python
|
GEOSGeometry.__repr__
|
(self)
|
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
|
Short-hand representation because WKT may be very large.
|
Short-hand representation because WKT may be very large.
|
[
"Short",
"-",
"hand",
"representation",
"because",
"WKT",
"may",
"be",
"very",
"large",
"."
] |
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
|
[
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"'<%s object at %s>'",
"%",
"(",
"self",
".",
"geom_type",
",",
"hex",
"(",
"addressof",
"(",
"self",
".",
"ptr",
")",
")",
")"
] |
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/django-1.5/django/contrib/gis/geos/geometry.py#L137-L139
|
|
home-assistant/core
|
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
|
homeassistant/components/qwikswitch/__init__.py
|
python
|
async_setup
|
(hass: HomeAssistant, config: ConfigType)
|
return True
|
Qwiskswitch component setup.
|
Qwiskswitch component setup.
|
[
"Qwiskswitch",
"component",
"setup",
"."
] |
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Qwiskswitch component setup."""
# Add cmd's to in /&listen packets will fire events
# By default only buttons of type [TOGGLE,SCENE EXE,LEVEL]
cmd_buttons = set(CMD_BUTTONS)
for btn in config[DOMAIN][CONF_BUTTON_EVENTS]:
cmd_buttons.add(btn)
url = config[DOMAIN][CONF_URL]
dimmer_adjust = config[DOMAIN][CONF_DIMMER_ADJUST]
sensors = config[DOMAIN][CONF_SENSORS]
switches = config[DOMAIN][CONF_SWITCHES]
def callback_value_changed(_qsd, qsid, _val):
"""Update entity values based on device change."""
_LOGGER.debug("Dispatch %s (update from devices)", qsid)
hass.helpers.dispatcher.async_dispatcher_send(qsid, None)
session = async_get_clientsession(hass)
qsusb = QSUsb(
url=url,
dim_adj=dimmer_adjust,
session=session,
callback_value_changed=callback_value_changed,
)
# Discover all devices in QSUSB
if not await qsusb.update_from_devices():
return False
hass.data[DOMAIN] = qsusb
comps: dict[Platform, list] = {
Platform.SWITCH: [],
Platform.LIGHT: [],
Platform.SENSOR: [],
Platform.BINARY_SENSOR: [],
}
sensor_ids = []
for sens in sensors:
try:
_, _type = SENSORS[sens["type"]]
sensor_ids.append(sens["id"])
if _type is bool:
comps[Platform.BINARY_SENSOR].append(sens)
continue
comps[Platform.SENSOR].append(sens)
for _key in ("invert", "class"):
if _key in sens:
_LOGGER.warning(
"%s should only be used for binary_sensors: %s", _key, sens
)
except KeyError:
_LOGGER.warning(
"Sensor validation failed for sensor id=%s type=%s",
sens["id"],
sens["type"],
)
for qsid, dev in qsusb.devices.items():
if qsid in switches:
if dev.qstype != QSType.relay:
_LOGGER.warning("You specified a switch that is not a relay %s", qsid)
continue
comps[Platform.SWITCH].append(qsid)
elif dev.qstype in (QSType.relay, QSType.dimmer):
comps[Platform.LIGHT].append(qsid)
else:
_LOGGER.warning("Ignored unknown QSUSB device: %s", dev)
continue
# Load platforms
for comp_name, comp_conf in comps.items():
if comp_conf:
load_platform(hass, comp_name, DOMAIN, {DOMAIN: comp_conf}, config)
def callback_qs_listen(qspacket):
"""Typically a button press or update signal."""
# If button pressed, fire a hass event
if QS_ID in qspacket:
if qspacket.get(QS_CMD, "") in cmd_buttons:
hass.bus.async_fire(f"qwikswitch.button.{qspacket[QS_ID]}", qspacket)
return
if qspacket[QS_ID] in sensor_ids:
_LOGGER.debug("Dispatch %s ((%s))", qspacket[QS_ID], qspacket)
hass.helpers.dispatcher.async_dispatcher_send(qspacket[QS_ID], qspacket)
# Update all ha_objects
hass.async_add_job(qsusb.update_from_devices)
@callback
def async_start(_):
"""Start listening."""
qsusb.listen(callback_qs_listen)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, async_start)
@callback
def async_stop(_):
"""Stop the listener."""
hass.data[DOMAIN].stop()
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, async_stop)
return True
|
[
"async",
"def",
"async_setup",
"(",
"hass",
":",
"HomeAssistant",
",",
"config",
":",
"ConfigType",
")",
"->",
"bool",
":",
"# Add cmd's to in /&listen packets will fire events",
"# By default only buttons of type [TOGGLE,SCENE EXE,LEVEL]",
"cmd_buttons",
"=",
"set",
"(",
"CMD_BUTTONS",
")",
"for",
"btn",
"in",
"config",
"[",
"DOMAIN",
"]",
"[",
"CONF_BUTTON_EVENTS",
"]",
":",
"cmd_buttons",
".",
"add",
"(",
"btn",
")",
"url",
"=",
"config",
"[",
"DOMAIN",
"]",
"[",
"CONF_URL",
"]",
"dimmer_adjust",
"=",
"config",
"[",
"DOMAIN",
"]",
"[",
"CONF_DIMMER_ADJUST",
"]",
"sensors",
"=",
"config",
"[",
"DOMAIN",
"]",
"[",
"CONF_SENSORS",
"]",
"switches",
"=",
"config",
"[",
"DOMAIN",
"]",
"[",
"CONF_SWITCHES",
"]",
"def",
"callback_value_changed",
"(",
"_qsd",
",",
"qsid",
",",
"_val",
")",
":",
"\"\"\"Update entity values based on device change.\"\"\"",
"_LOGGER",
".",
"debug",
"(",
"\"Dispatch %s (update from devices)\"",
",",
"qsid",
")",
"hass",
".",
"helpers",
".",
"dispatcher",
".",
"async_dispatcher_send",
"(",
"qsid",
",",
"None",
")",
"session",
"=",
"async_get_clientsession",
"(",
"hass",
")",
"qsusb",
"=",
"QSUsb",
"(",
"url",
"=",
"url",
",",
"dim_adj",
"=",
"dimmer_adjust",
",",
"session",
"=",
"session",
",",
"callback_value_changed",
"=",
"callback_value_changed",
",",
")",
"# Discover all devices in QSUSB",
"if",
"not",
"await",
"qsusb",
".",
"update_from_devices",
"(",
")",
":",
"return",
"False",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"=",
"qsusb",
"comps",
":",
"dict",
"[",
"Platform",
",",
"list",
"]",
"=",
"{",
"Platform",
".",
"SWITCH",
":",
"[",
"]",
",",
"Platform",
".",
"LIGHT",
":",
"[",
"]",
",",
"Platform",
".",
"SENSOR",
":",
"[",
"]",
",",
"Platform",
".",
"BINARY_SENSOR",
":",
"[",
"]",
",",
"}",
"sensor_ids",
"=",
"[",
"]",
"for",
"sens",
"in",
"sensors",
":",
"try",
":",
"_",
",",
"_type",
"=",
"SENSORS",
"[",
"sens",
"[",
"\"type\"",
"]",
"]",
"sensor_ids",
".",
"append",
"(",
"sens",
"[",
"\"id\"",
"]",
")",
"if",
"_type",
"is",
"bool",
":",
"comps",
"[",
"Platform",
".",
"BINARY_SENSOR",
"]",
".",
"append",
"(",
"sens",
")",
"continue",
"comps",
"[",
"Platform",
".",
"SENSOR",
"]",
".",
"append",
"(",
"sens",
")",
"for",
"_key",
"in",
"(",
"\"invert\"",
",",
"\"class\"",
")",
":",
"if",
"_key",
"in",
"sens",
":",
"_LOGGER",
".",
"warning",
"(",
"\"%s should only be used for binary_sensors: %s\"",
",",
"_key",
",",
"sens",
")",
"except",
"KeyError",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Sensor validation failed for sensor id=%s type=%s\"",
",",
"sens",
"[",
"\"id\"",
"]",
",",
"sens",
"[",
"\"type\"",
"]",
",",
")",
"for",
"qsid",
",",
"dev",
"in",
"qsusb",
".",
"devices",
".",
"items",
"(",
")",
":",
"if",
"qsid",
"in",
"switches",
":",
"if",
"dev",
".",
"qstype",
"!=",
"QSType",
".",
"relay",
":",
"_LOGGER",
".",
"warning",
"(",
"\"You specified a switch that is not a relay %s\"",
",",
"qsid",
")",
"continue",
"comps",
"[",
"Platform",
".",
"SWITCH",
"]",
".",
"append",
"(",
"qsid",
")",
"elif",
"dev",
".",
"qstype",
"in",
"(",
"QSType",
".",
"relay",
",",
"QSType",
".",
"dimmer",
")",
":",
"comps",
"[",
"Platform",
".",
"LIGHT",
"]",
".",
"append",
"(",
"qsid",
")",
"else",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Ignored unknown QSUSB device: %s\"",
",",
"dev",
")",
"continue",
"# Load platforms",
"for",
"comp_name",
",",
"comp_conf",
"in",
"comps",
".",
"items",
"(",
")",
":",
"if",
"comp_conf",
":",
"load_platform",
"(",
"hass",
",",
"comp_name",
",",
"DOMAIN",
",",
"{",
"DOMAIN",
":",
"comp_conf",
"}",
",",
"config",
")",
"def",
"callback_qs_listen",
"(",
"qspacket",
")",
":",
"\"\"\"Typically a button press or update signal.\"\"\"",
"# If button pressed, fire a hass event",
"if",
"QS_ID",
"in",
"qspacket",
":",
"if",
"qspacket",
".",
"get",
"(",
"QS_CMD",
",",
"\"\"",
")",
"in",
"cmd_buttons",
":",
"hass",
".",
"bus",
".",
"async_fire",
"(",
"f\"qwikswitch.button.{qspacket[QS_ID]}\"",
",",
"qspacket",
")",
"return",
"if",
"qspacket",
"[",
"QS_ID",
"]",
"in",
"sensor_ids",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Dispatch %s ((%s))\"",
",",
"qspacket",
"[",
"QS_ID",
"]",
",",
"qspacket",
")",
"hass",
".",
"helpers",
".",
"dispatcher",
".",
"async_dispatcher_send",
"(",
"qspacket",
"[",
"QS_ID",
"]",
",",
"qspacket",
")",
"# Update all ha_objects",
"hass",
".",
"async_add_job",
"(",
"qsusb",
".",
"update_from_devices",
")",
"@",
"callback",
"def",
"async_start",
"(",
"_",
")",
":",
"\"\"\"Start listening.\"\"\"",
"qsusb",
".",
"listen",
"(",
"callback_qs_listen",
")",
"hass",
".",
"bus",
".",
"async_listen_once",
"(",
"EVENT_HOMEASSISTANT_START",
",",
"async_start",
")",
"@",
"callback",
"def",
"async_stop",
"(",
"_",
")",
":",
"\"\"\"Stop the listener.\"\"\"",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
".",
"stop",
"(",
")",
"hass",
".",
"bus",
".",
"async_listen",
"(",
"EVENT_HOMEASSISTANT_STOP",
",",
"async_stop",
")",
"return",
"True"
] |
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/qwikswitch/__init__.py#L137-L244
|
|
missionpinball/mpf
|
8e6b74cff4ba06d2fec9445742559c1068b88582
|
mpf/platforms/fast/fast_gi.py
|
python
|
FASTGIString.is_successor_of
|
(self, other)
|
Return true if the other light has the previous number.
|
Return true if the other light has the previous number.
|
[
"Return",
"true",
"if",
"the",
"other",
"light",
"has",
"the",
"previous",
"number",
"."
] |
def is_successor_of(self, other):
"""Return true if the other light has the previous number."""
raise AssertionError("Not possible in FASTGI.")
|
[
"def",
"is_successor_of",
"(",
"self",
",",
"other",
")",
":",
"raise",
"AssertionError",
"(",
"\"Not possible in FASTGI.\"",
")"
] |
https://github.com/missionpinball/mpf/blob/8e6b74cff4ba06d2fec9445742559c1068b88582/mpf/platforms/fast/fast_gi.py#L36-L38
|
||
sc0tfree/mentalist
|
953a07bedf8842c817f0825d3cd0c6e6ce1d3e7f
|
mentalist/model.py
|
python
|
Chain.get_words
|
(self, basewords_only=False)
|
A generator that yields the chain's words
|
A generator that yields the chain's words
|
[
"A",
"generator",
"that",
"yields",
"the",
"chain",
"s",
"words"
] |
def get_words(self, basewords_only=False):
'''A generator that yields the chain's words
'''
for attr in self.nodes[0].attrs:
attr.words_read = 0
if basewords_only:
for word in self.nodes[0].get_words([]):
yield word
else:
words = []
for node in self.nodes:
words = node.get_words(words)
for word in words:
yield word
|
[
"def",
"get_words",
"(",
"self",
",",
"basewords_only",
"=",
"False",
")",
":",
"for",
"attr",
"in",
"self",
".",
"nodes",
"[",
"0",
"]",
".",
"attrs",
":",
"attr",
".",
"words_read",
"=",
"0",
"if",
"basewords_only",
":",
"for",
"word",
"in",
"self",
".",
"nodes",
"[",
"0",
"]",
".",
"get_words",
"(",
"[",
"]",
")",
":",
"yield",
"word",
"else",
":",
"words",
"=",
"[",
"]",
"for",
"node",
"in",
"self",
".",
"nodes",
":",
"words",
"=",
"node",
".",
"get_words",
"(",
"words",
")",
"for",
"word",
"in",
"words",
":",
"yield",
"word"
] |
https://github.com/sc0tfree/mentalist/blob/953a07bedf8842c817f0825d3cd0c6e6ce1d3e7f/mentalist/model.py#L115-L129
|
||
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
Lib/site-packages/pip-7.1.2-py3.3.egg/pip/utils/ui.py
|
python
|
InterruptibleMixin.finish
|
(self)
|
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
|
Restore the original SIGINT handler after finishing.
|
[
"Restore",
"the",
"original",
"SIGINT",
"handler",
"after",
"finishing",
"."
] |
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
|
[
"def",
"finish",
"(",
"self",
")",
":",
"super",
"(",
"InterruptibleMixin",
",",
"self",
")",
".",
"finish",
"(",
")",
"signal",
"(",
"SIGINT",
",",
"self",
".",
"original_handler",
")"
] |
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pip-7.1.2-py3.3.egg/pip/utils/ui.py#L88-L96
|
||
pwnieexpress/pwn_plug_sources
|
1a23324f5dc2c3de20f9c810269b6a29b2758cad
|
src/set/src/core/scapy.py
|
python
|
ScapyFreqFilter.filter
|
(self, record)
|
return 1
|
[] |
def filter(self, record):
wt = conf.warning_threshold
if wt > 0:
stk = traceback.extract_stack()
caller=None
for f,l,n,c in stk:
if n == 'warning':
break
caller = l
tm,nb = self.warning_table.get(caller, (0,0))
ltm = time.time()
if ltm-tm > wt:
tm = ltm
nb = 0
else:
if nb < 2:
nb += 1
if nb == 2:
record.msg = "more "+record.msg
else:
return 0
self.warning_table[caller] = (tm,nb)
return 1
|
[
"def",
"filter",
"(",
"self",
",",
"record",
")",
":",
"wt",
"=",
"conf",
".",
"warning_threshold",
"if",
"wt",
">",
"0",
":",
"stk",
"=",
"traceback",
".",
"extract_stack",
"(",
")",
"caller",
"=",
"None",
"for",
"f",
",",
"l",
",",
"n",
",",
"c",
"in",
"stk",
":",
"if",
"n",
"==",
"'warning'",
":",
"break",
"caller",
"=",
"l",
"tm",
",",
"nb",
"=",
"self",
".",
"warning_table",
".",
"get",
"(",
"caller",
",",
"(",
"0",
",",
"0",
")",
")",
"ltm",
"=",
"time",
".",
"time",
"(",
")",
"if",
"ltm",
"-",
"tm",
">",
"wt",
":",
"tm",
"=",
"ltm",
"nb",
"=",
"0",
"else",
":",
"if",
"nb",
"<",
"2",
":",
"nb",
"+=",
"1",
"if",
"nb",
"==",
"2",
":",
"record",
".",
"msg",
"=",
"\"more \"",
"+",
"record",
".",
"msg",
"else",
":",
"return",
"0",
"self",
".",
"warning_table",
"[",
"caller",
"]",
"=",
"(",
"tm",
",",
"nb",
")",
"return",
"1"
] |
https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/set/src/core/scapy.py#L54-L76
|
|||
HuangYG123/CurricularFace
|
68c8727fb7cd2243ecbfd7e09c35efc87c6e2de4
|
backbone/model_irse.py
|
python
|
Backbone.forward
|
(self, x)
|
return x, conv_out
|
[] |
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
conv_out = x.view(x.shape[0], -1)
x = self.output_layer(x)
return x, conv_out
|
[
"def",
"forward",
"(",
"self",
",",
"x",
")",
":",
"x",
"=",
"self",
".",
"input_layer",
"(",
"x",
")",
"x",
"=",
"self",
".",
"body",
"(",
"x",
")",
"conv_out",
"=",
"x",
".",
"view",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"x",
"=",
"self",
".",
"output_layer",
"(",
"x",
")",
"return",
"x",
",",
"conv_out"
] |
https://github.com/HuangYG123/CurricularFace/blob/68c8727fb7cd2243ecbfd7e09c35efc87c6e2de4/backbone/model_irse.py#L169-L175
|
|||
pyvisa/pyvisa
|
ae8c8b1180851ee4d120bc3527923c944b9623d6
|
pyvisa/ctwrapper/functions.py
|
python
|
gpib_command
|
(library, session, data)
|
return return_count.value, ret
|
Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to write.
Returns
-------
int
Number of written bytes
constants.StatusCode
Return value of the library call.
|
Write GPIB command bytes on the bus.
|
[
"Write",
"GPIB",
"command",
"bytes",
"on",
"the",
"bus",
"."
] |
def gpib_command(library, session, data):
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to write.
Returns
-------
int
Number of written bytes
constants.StatusCode
Return value of the library call.
"""
return_count = ViUInt32()
# [ViSession, ViBuf, ViUInt32, ViPUInt32]
ret = library.viGpibCommand(session, data, len(data), byref(return_count))
return return_count.value, ret
|
[
"def",
"gpib_command",
"(",
"library",
",",
"session",
",",
"data",
")",
":",
"return_count",
"=",
"ViUInt32",
"(",
")",
"# [ViSession, ViBuf, ViUInt32, ViPUInt32]",
"ret",
"=",
"library",
".",
"viGpibCommand",
"(",
"session",
",",
"data",
",",
"len",
"(",
"data",
")",
",",
"byref",
"(",
"return_count",
")",
")",
"return",
"return_count",
".",
"value",
",",
"ret"
] |
https://github.com/pyvisa/pyvisa/blob/ae8c8b1180851ee4d120bc3527923c944b9623d6/pyvisa/ctwrapper/functions.py#L859-L885
|
|
out0fmemory/GoAgent-Always-Available
|
c4254984fea633ce3d1893fe5901debd9f22c2a9
|
server/lib/google/appengine/api/logservice/logservice.py
|
python
|
_LogsDequeBuffer._flush
|
(self)
|
Internal version of flush() with no locking.
|
Internal version of flush() with no locking.
|
[
"Internal",
"version",
"of",
"flush",
"()",
"with",
"no",
"locking",
"."
] |
def _flush(self):
"""Internal version of flush() with no locking."""
records_to_be_flushed = []
try:
while True:
group = log_service_pb.UserAppLogGroup()
bytes_left = self._MAX_FLUSH_SIZE
while self._buffer:
record = self._get_record()
if record.IsBlank():
continue
message = self._clean(record.message)
message = self._truncate(message, self._MAX_LINE_SIZE)
if len(message) > bytes_left:
self._rollback_record(record)
break
records_to_be_flushed.append(record)
line = group.add_log_line()
line.set_timestamp_usec(record.created)
line.set_level(record.level)
if record.source_location is not None:
line.mutable_source_location().set_file(record.source_location[0])
line.mutable_source_location().set_line(record.source_location[1])
line.mutable_source_location().set_function_name(
record.source_location[2])
line.set_message(message)
bytes_left -= 1 + group.lengthString(line.ByteSize())
request = log_service_pb.FlushRequest()
request.set_logs(group.Encode())
response = api_base_pb.VoidProto()
apiproxy_stub_map.MakeSyncCall('logservice', 'Flush', request, response)
if not self._buffer:
break
except apiproxy_errors.CancelledError:
records_to_be_flushed.reverse()
self._buffer.extendleft(records_to_be_flushed)
except Exception, e:
records_to_be_flushed.reverse()
self._buffer.extendleft(records_to_be_flushed)
line = '-' * 80
msg = 'ERROR: Could not flush to log_service (%s)\n%s\n%s\n%s\n'
_sys_stderr.write(msg % (e, line, self._contents(), line))
self._clear()
raise
else:
self._clear()
|
[
"def",
"_flush",
"(",
"self",
")",
":",
"records_to_be_flushed",
"=",
"[",
"]",
"try",
":",
"while",
"True",
":",
"group",
"=",
"log_service_pb",
".",
"UserAppLogGroup",
"(",
")",
"bytes_left",
"=",
"self",
".",
"_MAX_FLUSH_SIZE",
"while",
"self",
".",
"_buffer",
":",
"record",
"=",
"self",
".",
"_get_record",
"(",
")",
"if",
"record",
".",
"IsBlank",
"(",
")",
":",
"continue",
"message",
"=",
"self",
".",
"_clean",
"(",
"record",
".",
"message",
")",
"message",
"=",
"self",
".",
"_truncate",
"(",
"message",
",",
"self",
".",
"_MAX_LINE_SIZE",
")",
"if",
"len",
"(",
"message",
")",
">",
"bytes_left",
":",
"self",
".",
"_rollback_record",
"(",
"record",
")",
"break",
"records_to_be_flushed",
".",
"append",
"(",
"record",
")",
"line",
"=",
"group",
".",
"add_log_line",
"(",
")",
"line",
".",
"set_timestamp_usec",
"(",
"record",
".",
"created",
")",
"line",
".",
"set_level",
"(",
"record",
".",
"level",
")",
"if",
"record",
".",
"source_location",
"is",
"not",
"None",
":",
"line",
".",
"mutable_source_location",
"(",
")",
".",
"set_file",
"(",
"record",
".",
"source_location",
"[",
"0",
"]",
")",
"line",
".",
"mutable_source_location",
"(",
")",
".",
"set_line",
"(",
"record",
".",
"source_location",
"[",
"1",
"]",
")",
"line",
".",
"mutable_source_location",
"(",
")",
".",
"set_function_name",
"(",
"record",
".",
"source_location",
"[",
"2",
"]",
")",
"line",
".",
"set_message",
"(",
"message",
")",
"bytes_left",
"-=",
"1",
"+",
"group",
".",
"lengthString",
"(",
"line",
".",
"ByteSize",
"(",
")",
")",
"request",
"=",
"log_service_pb",
".",
"FlushRequest",
"(",
")",
"request",
".",
"set_logs",
"(",
"group",
".",
"Encode",
"(",
")",
")",
"response",
"=",
"api_base_pb",
".",
"VoidProto",
"(",
")",
"apiproxy_stub_map",
".",
"MakeSyncCall",
"(",
"'logservice'",
",",
"'Flush'",
",",
"request",
",",
"response",
")",
"if",
"not",
"self",
".",
"_buffer",
":",
"break",
"except",
"apiproxy_errors",
".",
"CancelledError",
":",
"records_to_be_flushed",
".",
"reverse",
"(",
")",
"self",
".",
"_buffer",
".",
"extendleft",
"(",
"records_to_be_flushed",
")",
"except",
"Exception",
",",
"e",
":",
"records_to_be_flushed",
".",
"reverse",
"(",
")",
"self",
".",
"_buffer",
".",
"extendleft",
"(",
"records_to_be_flushed",
")",
"line",
"=",
"'-'",
"*",
"80",
"msg",
"=",
"'ERROR: Could not flush to log_service (%s)\\n%s\\n%s\\n%s\\n'",
"_sys_stderr",
".",
"write",
"(",
"msg",
"%",
"(",
"e",
",",
"line",
",",
"self",
".",
"_contents",
"(",
")",
",",
"line",
")",
")",
"self",
".",
"_clear",
"(",
")",
"raise",
"else",
":",
"self",
".",
"_clear",
"(",
")"
] |
https://github.com/out0fmemory/GoAgent-Always-Available/blob/c4254984fea633ce3d1893fe5901debd9f22c2a9/server/lib/google/appengine/api/logservice/logservice.py#L347-L408
|
||
jeromerony/fast_adversarial
|
45210b7c79e2deaeac9845d6c901dc2580d6e316
|
fast_adv/models/cifar10/wide_resnet.py
|
python
|
WideResNet.__init__
|
(self, depth, num_classes, widen_factor=1, dropRate=0.0)
|
[] |
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
|
[
"def",
"__init__",
"(",
"self",
",",
"depth",
",",
"num_classes",
",",
"widen_factor",
"=",
"1",
",",
"dropRate",
"=",
"0.0",
")",
":",
"super",
"(",
"WideResNet",
",",
"self",
")",
".",
"__init__",
"(",
")",
"nChannels",
"=",
"[",
"16",
",",
"16",
"*",
"widen_factor",
",",
"32",
"*",
"widen_factor",
",",
"64",
"*",
"widen_factor",
"]",
"assert",
"(",
"depth",
"-",
"4",
")",
"%",
"6",
"==",
"0",
",",
"'depth should be 6n+4'",
"n",
"=",
"(",
"depth",
"-",
"4",
")",
"//",
"6",
"block",
"=",
"BasicBlock",
"# 1st conv before any network block",
"self",
".",
"conv1",
"=",
"nn",
".",
"Conv2d",
"(",
"3",
",",
"nChannels",
"[",
"0",
"]",
",",
"kernel_size",
"=",
"3",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"1",
",",
"bias",
"=",
"False",
")",
"# 1st block",
"self",
".",
"block1",
"=",
"NetworkBlock",
"(",
"n",
",",
"nChannels",
"[",
"0",
"]",
",",
"nChannels",
"[",
"1",
"]",
",",
"block",
",",
"1",
",",
"dropRate",
")",
"# 2nd block",
"self",
".",
"block2",
"=",
"NetworkBlock",
"(",
"n",
",",
"nChannels",
"[",
"1",
"]",
",",
"nChannels",
"[",
"2",
"]",
",",
"block",
",",
"2",
",",
"dropRate",
")",
"# 3rd block",
"self",
".",
"block3",
"=",
"NetworkBlock",
"(",
"n",
",",
"nChannels",
"[",
"2",
"]",
",",
"nChannels",
"[",
"3",
"]",
",",
"block",
",",
"2",
",",
"dropRate",
")",
"# global average pooling and classifier",
"self",
".",
"bn1",
"=",
"nn",
".",
"BatchNorm2d",
"(",
"nChannels",
"[",
"3",
"]",
")",
"self",
".",
"relu",
"=",
"nn",
".",
"ReLU",
"(",
"inplace",
"=",
"True",
")",
"self",
".",
"fc",
"=",
"nn",
".",
"Linear",
"(",
"nChannels",
"[",
"3",
"]",
",",
"num_classes",
")",
"self",
".",
"nChannels",
"=",
"nChannels",
"[",
"3",
"]",
"for",
"m",
"in",
"self",
".",
"modules",
"(",
")",
":",
"if",
"isinstance",
"(",
"m",
",",
"nn",
".",
"Conv2d",
")",
":",
"n",
"=",
"m",
".",
"kernel_size",
"[",
"0",
"]",
"*",
"m",
".",
"kernel_size",
"[",
"1",
"]",
"*",
"m",
".",
"out_channels",
"m",
".",
"weight",
".",
"data",
".",
"normal_",
"(",
"0",
",",
"math",
".",
"sqrt",
"(",
"2.",
"/",
"n",
")",
")",
"elif",
"isinstance",
"(",
"m",
",",
"nn",
".",
"BatchNorm2d",
")",
":",
"m",
".",
"weight",
".",
"data",
".",
"fill_",
"(",
"1",
")",
"m",
".",
"bias",
".",
"data",
".",
"zero_",
"(",
")",
"elif",
"isinstance",
"(",
"m",
",",
"nn",
".",
"Linear",
")",
":",
"m",
".",
"bias",
".",
"data",
".",
"zero_",
"(",
")"
] |
https://github.com/jeromerony/fast_adversarial/blob/45210b7c79e2deaeac9845d6c901dc2580d6e316/fast_adv/models/cifar10/wide_resnet.py#L53-L82
|
||||
klen/graphite-beacon
|
c1f071e9f557693bc90f6acbc314994985dc3b77
|
graphite_beacon/handlers/telegram.py
|
python
|
TelegramHandler.get_message
|
(self, level, alert, value, **kwargs)
|
return generated.decode().strip()
|
Standart alert message. Same format across all
graphite-beacon handlers.
|
Standart alert message. Same format across all
graphite-beacon handlers.
|
[
"Standart",
"alert",
"message",
".",
"Same",
"format",
"across",
"all",
"graphite",
"-",
"beacon",
"handlers",
"."
] |
def get_message(self, level, alert, value, **kwargs):
"""Standart alert message. Same format across all
graphite-beacon handlers.
"""
target, ntype = kwargs.get('target'), kwargs.get('ntype')
msg_type = 'telegram' if ntype == 'graphite' else 'short'
tmpl = TEMPLATES[ntype][msg_type]
generated = tmpl.generate(
level=level, reactor=self.reactor, alert=alert,
value=value, target=target,)
return generated.decode().strip()
|
[
"def",
"get_message",
"(",
"self",
",",
"level",
",",
"alert",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"target",
",",
"ntype",
"=",
"kwargs",
".",
"get",
"(",
"'target'",
")",
",",
"kwargs",
".",
"get",
"(",
"'ntype'",
")",
"msg_type",
"=",
"'telegram'",
"if",
"ntype",
"==",
"'graphite'",
"else",
"'short'",
"tmpl",
"=",
"TEMPLATES",
"[",
"ntype",
"]",
"[",
"msg_type",
"]",
"generated",
"=",
"tmpl",
".",
"generate",
"(",
"level",
"=",
"level",
",",
"reactor",
"=",
"self",
".",
"reactor",
",",
"alert",
"=",
"alert",
",",
"value",
"=",
"value",
",",
"target",
"=",
"target",
",",
")",
"return",
"generated",
".",
"decode",
"(",
")",
".",
"strip",
"(",
")"
] |
https://github.com/klen/graphite-beacon/blob/c1f071e9f557693bc90f6acbc314994985dc3b77/graphite_beacon/handlers/telegram.py#L160-L171
|
|
649453932/Bert-Chinese-Text-Classification-Pytorch
|
050a7b0dc75d8a2d7fd526002c4642d5329a0c27
|
pytorch_pretrained/tokenization.py
|
python
|
whitespace_tokenize
|
(text)
|
return tokens
|
Runs basic whitespace cleaning and splitting on a piece of text.
|
Runs basic whitespace cleaning and splitting on a piece of text.
|
[
"Runs",
"basic",
"whitespace",
"cleaning",
"and",
"splitting",
"on",
"a",
"piece",
"of",
"text",
"."
] |
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
|
[
"def",
"whitespace_tokenize",
"(",
"text",
")",
":",
"text",
"=",
"text",
".",
"strip",
"(",
")",
"if",
"not",
"text",
":",
"return",
"[",
"]",
"tokens",
"=",
"text",
".",
"split",
"(",
")",
"return",
"tokens"
] |
https://github.com/649453932/Bert-Chinese-Text-Classification-Pytorch/blob/050a7b0dc75d8a2d7fd526002c4642d5329a0c27/pytorch_pretrained/tokenization.py#L65-L71
|
|
graphcore/examples
|
46d2b7687b829778369fc6328170a7b14761e5c6
|
applications/tensorflow2/unet/losses.py
|
python
|
dice_ce_loss
|
(y_true, y_pred)
|
return ce + dice_loss
|
Calculate the combined loss.
|
Calculate the combined loss.
|
[
"Calculate",
"the",
"combined",
"loss",
"."
] |
def dice_ce_loss(y_true, y_pred):
"""Calculate the combined loss."""
ce = ce_loss(y_true, y_pred)
dice_loss = dice_coef_loss_fn(y_true, y_pred)
return ce + dice_loss
|
[
"def",
"dice_ce_loss",
"(",
"y_true",
",",
"y_pred",
")",
":",
"ce",
"=",
"ce_loss",
"(",
"y_true",
",",
"y_pred",
")",
"dice_loss",
"=",
"dice_coef_loss_fn",
"(",
"y_true",
",",
"y_pred",
")",
"return",
"ce",
"+",
"dice_loss"
] |
https://github.com/graphcore/examples/blob/46d2b7687b829778369fc6328170a7b14761e5c6/applications/tensorflow2/unet/losses.py#L61-L65
|
|
mchristopher/PokemonGo-DesktopMap
|
ec37575f2776ee7d64456e2a1f6b6b78830b4fe0
|
app/pywin/Lib/rfc822.py
|
python
|
AddrlistClass.getquote
|
(self)
|
return self.getdelimited('"', '"\r', 0)
|
Get a quote-delimited fragment from self's field.
|
Get a quote-delimited fragment from self's field.
|
[
"Get",
"a",
"quote",
"-",
"delimited",
"fragment",
"from",
"self",
"s",
"field",
"."
] |
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', 0)
|
[
"def",
"getquote",
"(",
"self",
")",
":",
"return",
"self",
".",
"getdelimited",
"(",
"'\"'",
",",
"'\"\\r'",
",",
"0",
")"
] |
https://github.com/mchristopher/PokemonGo-DesktopMap/blob/ec37575f2776ee7d64456e2a1f6b6b78830b4fe0/app/pywin/Lib/rfc822.py#L721-L723
|
|
ghostop14/sparrow-wifi
|
4b8289773ea4304872062f65a6ffc9352612b08e
|
sparrow-wifi.py
|
python
|
startRemoteSpectrumScan
|
(agentIP, agentPort, scan5)
|
[] |
def startRemoteSpectrumScan(agentIP, agentPort, scan5):
if scan5:
url = "http://" + agentIP + ":" + str(agentPort) + "/spectrum/scanstart5"
else:
url = "http://" + agentIP + ":" + str(agentPort) + "/spectrum/scanstart24"
statusCode, responsestr = makeGetRequest(url)
if statusCode == 200:
try:
responsedict = json.loads(responsestr)
errcode = responsedict['errcode']
errmsg = responsedict['errmsg']
return errcode, errmsg
except:
return -1, 'Error parsing response'
else:
return -2, 'Bad response from agent [' + str(statusCode) + ']'
|
[
"def",
"startRemoteSpectrumScan",
"(",
"agentIP",
",",
"agentPort",
",",
"scan5",
")",
":",
"if",
"scan5",
":",
"url",
"=",
"\"http://\"",
"+",
"agentIP",
"+",
"\":\"",
"+",
"str",
"(",
"agentPort",
")",
"+",
"\"/spectrum/scanstart5\"",
"else",
":",
"url",
"=",
"\"http://\"",
"+",
"agentIP",
"+",
"\":\"",
"+",
"str",
"(",
"agentPort",
")",
"+",
"\"/spectrum/scanstart24\"",
"statusCode",
",",
"responsestr",
"=",
"makeGetRequest",
"(",
"url",
")",
"if",
"statusCode",
"==",
"200",
":",
"try",
":",
"responsedict",
"=",
"json",
".",
"loads",
"(",
"responsestr",
")",
"errcode",
"=",
"responsedict",
"[",
"'errcode'",
"]",
"errmsg",
"=",
"responsedict",
"[",
"'errmsg'",
"]",
"return",
"errcode",
",",
"errmsg",
"except",
":",
"return",
"-",
"1",
",",
"'Error parsing response'",
"else",
":",
"return",
"-",
"2",
",",
"'Bad response from agent ['",
"+",
"str",
"(",
"statusCode",
")",
"+",
"']'"
] |
https://github.com/ghostop14/sparrow-wifi/blob/4b8289773ea4304872062f65a6ffc9352612b08e/sparrow-wifi.py#L188-L204
|
||||
mne-tools/mne-python
|
f90b303ce66a8415e64edd4605b09ac0179c1ebf
|
mne/io/brainvision/brainvision.py
|
python
|
_BVEventParser.__call__
|
(self, description)
|
return code
|
Parse BrainVision event codes (like `Stimulus/S 11`) to ints.
|
Parse BrainVision event codes (like `Stimulus/S 11`) to ints.
|
[
"Parse",
"BrainVision",
"event",
"codes",
"(",
"like",
"Stimulus",
"/",
"S",
"11",
")",
"to",
"ints",
"."
] |
def __call__(self, description):
"""Parse BrainVision event codes (like `Stimulus/S 11`) to ints."""
offsets = _BV_EVENT_IO_OFFSETS
maybe_digit = description[-3:].strip()
kind = description[:-3]
if maybe_digit.isdigit() and kind in offsets:
code = int(maybe_digit) + offsets[kind]
elif description in _OTHER_ACCEPTED_MARKERS:
code = _OTHER_ACCEPTED_MARKERS[description]
else:
code = (super(_BVEventParser, self)
.__call__(description, offset=_OTHER_OFFSET))
return code
|
[
"def",
"__call__",
"(",
"self",
",",
"description",
")",
":",
"offsets",
"=",
"_BV_EVENT_IO_OFFSETS",
"maybe_digit",
"=",
"description",
"[",
"-",
"3",
":",
"]",
".",
"strip",
"(",
")",
"kind",
"=",
"description",
"[",
":",
"-",
"3",
"]",
"if",
"maybe_digit",
".",
"isdigit",
"(",
")",
"and",
"kind",
"in",
"offsets",
":",
"code",
"=",
"int",
"(",
"maybe_digit",
")",
"+",
"offsets",
"[",
"kind",
"]",
"elif",
"description",
"in",
"_OTHER_ACCEPTED_MARKERS",
":",
"code",
"=",
"_OTHER_ACCEPTED_MARKERS",
"[",
"description",
"]",
"else",
":",
"code",
"=",
"(",
"super",
"(",
"_BVEventParser",
",",
"self",
")",
".",
"__call__",
"(",
"description",
",",
"offset",
"=",
"_OTHER_OFFSET",
")",
")",
"return",
"code"
] |
https://github.com/mne-tools/mne-python/blob/f90b303ce66a8415e64edd4605b09ac0179c1ebf/mne/io/brainvision/brainvision.py#L875-L888
|
|
niosus/EasyClangComplete
|
3b16eb17735aaa3f56bb295fc5481b269ee9f2ef
|
plugin/clang/cindex50.py
|
python
|
Token.spelling
|
(self)
|
return conf.lib.clang_getTokenSpelling(self._tu, self)
|
The spelling of this token.
This is the textual representation of the token in source.
|
The spelling of this token.
|
[
"The",
"spelling",
"of",
"this",
"token",
"."
] |
def spelling(self):
"""The spelling of this token.
This is the textual representation of the token in source.
"""
return conf.lib.clang_getTokenSpelling(self._tu, self)
|
[
"def",
"spelling",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getTokenSpelling",
"(",
"self",
".",
"_tu",
",",
"self",
")"
] |
https://github.com/niosus/EasyClangComplete/blob/3b16eb17735aaa3f56bb295fc5481b269ee9f2ef/plugin/clang/cindex50.py#L3178-L3183
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.