repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
manns/pyspread
|
pyspread/src/gui/_main_window.py
|
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_main_window.py#L832-L905
|
def OnOpen(self, event):
"""File open event handler"""
# If changes have taken place save of old grid
if undo.stack().haschanged():
save_choice = self.interfaces.get_save_request_from_user()
if save_choice is None:
# Cancelled close operation
return
elif save_choice:
# User wants to save content
post_command_event(self.main_window, self.main_window.SaveMsg)
# Get filepath from user
f2w = get_filetypes2wildcards(
["pys", "pysu", "xls", "xlsx", "ods", "all"])
filetypes = f2w.keys()
wildcards = f2w.values()
wildcard = "|".join(wildcards)
message = _("Choose file to open.")
style = wx.OPEN
default_filetype = config["default_open_filetype"]
try:
default_filterindex = filetypes.index(default_filetype)
except ValueError:
# Be graceful if the user has entered an unkown filetype
default_filterindex = 0
get_fp_fidx = self.interfaces.get_filepath_findex_from_user
filepath, filterindex = get_fp_fidx(wildcard, message, style,
filterindex=default_filterindex)
if filepath is None:
return
filetype = filetypes[filterindex]
# Change the main window filepath state
self.main_window.filepath = filepath
# Load file into grid
post_command_event(self.main_window,
self.main_window.GridActionOpenMsg,
attr={"filepath": filepath, "filetype": filetype})
# Set Window title to new filepath
title_text = filepath.split("/")[-1] + " - pyspread"
post_command_event(self.main_window,
self.main_window.TitleMsg, text=title_text)
self.main_window.grid.ForceRefresh()
if is_gtk():
try:
wx.Yield()
except:
pass
# Update savepoint and clear the undo stack
undo.stack().clear()
undo.stack().savepoint()
# Update content changed state
try:
post_command_event(self.main_window, self.ContentChangedMsg)
except TypeError:
# The main window does not exist any more
pass
|
[
"def",
"OnOpen",
"(",
"self",
",",
"event",
")",
":",
"# If changes have taken place save of old grid",
"if",
"undo",
".",
"stack",
"(",
")",
".",
"haschanged",
"(",
")",
":",
"save_choice",
"=",
"self",
".",
"interfaces",
".",
"get_save_request_from_user",
"(",
")",
"if",
"save_choice",
"is",
"None",
":",
"# Cancelled close operation",
"return",
"elif",
"save_choice",
":",
"# User wants to save content",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"main_window",
".",
"SaveMsg",
")",
"# Get filepath from user",
"f2w",
"=",
"get_filetypes2wildcards",
"(",
"[",
"\"pys\"",
",",
"\"pysu\"",
",",
"\"xls\"",
",",
"\"xlsx\"",
",",
"\"ods\"",
",",
"\"all\"",
"]",
")",
"filetypes",
"=",
"f2w",
".",
"keys",
"(",
")",
"wildcards",
"=",
"f2w",
".",
"values",
"(",
")",
"wildcard",
"=",
"\"|\"",
".",
"join",
"(",
"wildcards",
")",
"message",
"=",
"_",
"(",
"\"Choose file to open.\"",
")",
"style",
"=",
"wx",
".",
"OPEN",
"default_filetype",
"=",
"config",
"[",
"\"default_open_filetype\"",
"]",
"try",
":",
"default_filterindex",
"=",
"filetypes",
".",
"index",
"(",
"default_filetype",
")",
"except",
"ValueError",
":",
"# Be graceful if the user has entered an unkown filetype",
"default_filterindex",
"=",
"0",
"get_fp_fidx",
"=",
"self",
".",
"interfaces",
".",
"get_filepath_findex_from_user",
"filepath",
",",
"filterindex",
"=",
"get_fp_fidx",
"(",
"wildcard",
",",
"message",
",",
"style",
",",
"filterindex",
"=",
"default_filterindex",
")",
"if",
"filepath",
"is",
"None",
":",
"return",
"filetype",
"=",
"filetypes",
"[",
"filterindex",
"]",
"# Change the main window filepath state",
"self",
".",
"main_window",
".",
"filepath",
"=",
"filepath",
"# Load file into grid",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"main_window",
".",
"GridActionOpenMsg",
",",
"attr",
"=",
"{",
"\"filepath\"",
":",
"filepath",
",",
"\"filetype\"",
":",
"filetype",
"}",
")",
"# Set Window title to new filepath",
"title_text",
"=",
"filepath",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
"+",
"\" - pyspread\"",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"main_window",
".",
"TitleMsg",
",",
"text",
"=",
"title_text",
")",
"self",
".",
"main_window",
".",
"grid",
".",
"ForceRefresh",
"(",
")",
"if",
"is_gtk",
"(",
")",
":",
"try",
":",
"wx",
".",
"Yield",
"(",
")",
"except",
":",
"pass",
"# Update savepoint and clear the undo stack",
"undo",
".",
"stack",
"(",
")",
".",
"clear",
"(",
")",
"undo",
".",
"stack",
"(",
")",
".",
"savepoint",
"(",
")",
"# Update content changed state",
"try",
":",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"ContentChangedMsg",
")",
"except",
"TypeError",
":",
"# The main window does not exist any more",
"pass"
] |
File open event handler
|
[
"File",
"open",
"event",
"handler"
] |
python
|
train
| 31.378378 |
openfisca/openfisca-france-indirect-taxation
|
openfisca_france_indirect_taxation/param/preprocessing.py
|
https://github.com/openfisca/openfisca-france-indirect-taxation/blob/b4bc7da90a1126ebfc3af2c3ec61de5a2b70bb2e/openfisca_france_indirect_taxation/param/preprocessing.py#L29-L475
|
def preprocess_legislation(legislation_json):
'''
Preprocess the legislation parameters to add prices and amounts from national accounts
'''
import os
import pkg_resources
import pandas as pd
# Add fuel prices to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
prix_annuel_carburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'prix',
'prix_annuel_carburants.csv'
), sep =';'
)
prix_annuel_carburants['Date'] = prix_annuel_carburants['Date'].astype(int)
prix_annuel_carburants = prix_annuel_carburants.set_index('Date')
all_values = {}
prix_carburants = {
"@type": "Node",
"description": "prix des carburants en euros par hectolitre",
"children": {},
}
# For super_95_e10, we need to use the price of super_95 between 2009 and 2012 included,
# because we don't have the data. We use super_95 because it is very close and won't affect the results too much
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
all_values['super_95_e10_ttc'] = []
for year in range(1990, 2009):
values1 = dict()
values1['start'] = u'{}-01-01'.format(year)
values1['stop'] = u'{}-12-31'.format(year)
values1['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values1)
prix_annuel = prix_annuel_carburants['super_95_ttc']
for year in range(2009, 2013):
values2 = dict()
values2['start'] = u'{}-01-01'.format(year)
values2['stop'] = u'{}-12-31'.format(year)
values2['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values2)
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
for year in range(2013, 2015):
values3 = dict()
values3['start'] = u'{}-01-01'.format(year)
values3['stop'] = u'{}-12-31'.format(year)
values3['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values3)
prix_carburants['children']['super_95_e10_ttc'] = {
"@type": "Parameter",
"description": 'super_95_e10_ttc'.replace('_', ' '),
"format": "float",
"values": all_values['super_95_e10_ttc']
}
for element in ['diesel_ht', 'diesel_ttc', 'super_95_ht', 'super_95_ttc', 'super_98_ht', 'super_98_ttc',
'super_95_e10_ht', 'gplc_ht', 'gplc_ttc', 'super_plombe_ht', 'super_plombe_ttc']:
assert element in prix_annuel_carburants.columns
prix_annuel = prix_annuel_carburants[element]
all_values[element] = []
for year in range(1990, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = prix_annuel.loc[year] * 100
all_values[element].append(values)
prix_carburants['children'][element] = {
"@type": "Parameter",
"description": element.replace('_', ' '),
"format": "float",
"values": all_values[element]
}
legislation_json['children']['imposition_indirecte']['children']['prix_carburants'] = prix_carburants
# Add the number of vehicle in circulation to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
parc_annuel_moyen_vp = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'parc_annuel_moyen_vp.csv'
), sep =';'
)
parc_annuel_moyen_vp = parc_annuel_moyen_vp.set_index('Unnamed: 0')
values_parc = {}
parc_vp = {
"@type": "Node",
"description": "taille moyenne du parc automobile en France métropolitaine en milliers de véhicules",
"children": {},
}
for element in ['diesel', 'essence']:
taille_parc = parc_annuel_moyen_vp[element]
values_parc[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = taille_parc.loc[year]
values_parc[element].append(values)
parc_vp['children'][element] = {
"@type": "Parameter",
"description": "nombre de véhicules particuliers immatriculés en France à motorisation " + element,
"format": "float",
"values": values_parc[element]
}
legislation_json['children']['imposition_indirecte']['children']['parc_vp'] = parc_vp
# Add the total quantity of fuel consumed per year to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
quantite_carbu_vp_france = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'quantite_carbu_vp_france.csv'
), sep =';'
)
quantite_carbu_vp_france = quantite_carbu_vp_france.set_index('Unnamed: 0')
values_quantite = {}
quantite_carbu_vp = {
"@type": "Node",
"description": "quantite de carburants consommés en France métropolitaine",
"children": {},
}
for element in ['diesel', 'essence']:
quantite_carburants = quantite_carbu_vp_france[element]
values_quantite[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = quantite_carburants.loc[year]
values_quantite[element].append(values)
quantite_carbu_vp['children'][element] = {
"@type": "Parameter",
"description": "consommation totale de " + element + " en France",
"format": "float",
"values": values_quantite[element]
}
legislation_json['children']['imposition_indirecte']['children']['quantite_carbu_vp'] = quantite_carbu_vp
# Add the shares of each type of supercabrurant (SP95, SP98, E10, etc.) among supercarburants
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
part_des_types_de_supercarburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'part_des_types_de_supercarburants.csv'
), sep =';'
)
del part_des_types_de_supercarburants['Source']
part_des_types_de_supercarburants = \
part_des_types_de_supercarburants[part_des_types_de_supercarburants['annee'] > 0].copy()
part_des_types_de_supercarburants['annee'] = part_des_types_de_supercarburants['annee'].astype(int)
part_des_types_de_supercarburants = part_des_types_de_supercarburants.set_index('annee')
# delete share of e_85 because we have no data for its price
# When the sum of all shares is not one, need to multiply each share by the same coefficient
cols = part_des_types_de_supercarburants.columns
for element in cols:
part_des_types_de_supercarburants[element] = (
part_des_types_de_supercarburants[element] /
(part_des_types_de_supercarburants['somme'] - part_des_types_de_supercarburants['sp_e85'])
)
del part_des_types_de_supercarburants['sp_e85']
del part_des_types_de_supercarburants['somme']
cols = part_des_types_de_supercarburants.columns
part_des_types_de_supercarburants['somme'] = 0
for element in cols:
part_des_types_de_supercarburants['somme'] += part_des_types_de_supercarburants[element]
assert (part_des_types_de_supercarburants['somme'] == 1).any(), "The weighting of the shares did not work"
values_part_supercarburants = {}
part_type_supercaburant = {
"@type": "Node",
"description": "part de la consommation totale d'essence de chaque type supercarburant",
"children": {},
}
for element in ['super_plombe', 'sp_95', 'sp_98', 'sp_e10']:
part_par_carburant = part_des_types_de_supercarburants[element]
values_part_supercarburants[element] = []
for year in range(2000, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = part_par_carburant.loc[year]
values_part_supercarburants[element].append(values)
part_type_supercaburant['children'][element] = {
"@type": "Parameter",
"description": "part de " + element + " dans la consommation totale d'essences",
"format": "float",
"values": values_part_supercarburants[element]
}
legislation_json['children']['imposition_indirecte']['children']['part_type_supercarburants'] = \
part_type_supercaburant
# Add data from comptabilite national about alcohol
alcool_conso_et_vin = {
"@type": "Node",
"description": "alcools",
"children": {},
}
alcool_conso_et_vin['children']['vin'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur le vin",
"children": {
"droit_cn_vin": {
"@type": "Parameter",
"description": u"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 129},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 130},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 129},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 132},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 133},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 127},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 127},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 127},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 127},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 125},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 117},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 119},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 117},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 114},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 117},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 119},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 118},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 120},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 122},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_vin": {
"@type": "Parameter",
"description": u"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 7191},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 7419},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 7636},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 8025},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 8451},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 8854},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 9168},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 9476},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 9695},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 9985},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 9933},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 10002},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 10345},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 10461},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 10728},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 11002},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 11387},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 11407},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 11515},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['biere'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur la bière",
"children": {
"droit_cn_biere": {
"@type": "Parameter",
"description": "Masse droit biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 361},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 366},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 364},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 365},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 380},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 359},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 364},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 361},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 370},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 378},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 364},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 396},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 382},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 375}, {'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 376},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 375},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 393},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 783},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 897},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_biere": {
"@type": "Parameter",
"description": u"Masse consommation biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2111},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2144},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2186},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2291},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2334},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2290},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2327},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2405},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2554},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2484},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2466},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2486},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2458},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2287},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2375},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2461},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2769},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2868},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3321},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['alcools_forts'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur alcools forts",
"children": {
"droit_cn_alcools": {
"@type": "Parameter",
"description": "Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort",
"format": "float",
"values": [
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 1872},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 1957},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 1932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 1891},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 1908},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 1842},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 1954},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 1990},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2005},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2031},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2111},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2150},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2225},
# TODO: Problème pour les alcools forts chiffres différents entre les deux bases excel !
],
},
"droit_cn_alcools_total": {
"@type": "Parameter",
"description": u"Masse droit alcool selon comptabilité nationale avec les differents droits",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2337},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2350},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2366},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2369},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2385},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2416}, {'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2514},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2503},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2453},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2409},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2352},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2477},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2516},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2528},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2629},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2734},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 3078},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2718},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3022},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_alcools": {
"@type": "Parameter",
"description": u"Masse consommation alcool selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 4893},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 5075},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 5065},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 5123},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 5234},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 5558},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 5721},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 5932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 5895},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 5967},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 5960},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 6106},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 6142},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 6147},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 6342},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 6618},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 6680},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 6996},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 7022},
],
},
},
}
legislation_json['children']['imposition_indirecte']['children']['alcool_conso_et_vin'] = alcool_conso_et_vin
# Make the change from francs to euros for excise taxes in ticpe
keys_ticpe = legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'].keys()
for element in keys_ticpe:
get_values = \
legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'][element]['values']
for each_value in get_values:
get_character = '{}'.format(each_value['start'])
year = int(get_character[:4])
if year < 2002:
each_value['value'] = each_value['value'] / 6.55957
else:
each_value['value'] = each_value['value']
return legislation_json
|
[
"def",
"preprocess_legislation",
"(",
"legislation_json",
")",
":",
"import",
"os",
"import",
"pkg_resources",
"import",
"pandas",
"as",
"pd",
"# Add fuel prices to the tree",
"default_config_files_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pkg_resources",
".",
"get_distribution",
"(",
"'openfisca_france_indirect_taxation'",
")",
".",
"location",
")",
"prix_annuel_carburants",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"default_config_files_directory",
",",
"'openfisca_france_indirect_taxation'",
",",
"'assets'",
",",
"'prix'",
",",
"'prix_annuel_carburants.csv'",
")",
",",
"sep",
"=",
"';'",
")",
"prix_annuel_carburants",
"[",
"'Date'",
"]",
"=",
"prix_annuel_carburants",
"[",
"'Date'",
"]",
".",
"astype",
"(",
"int",
")",
"prix_annuel_carburants",
"=",
"prix_annuel_carburants",
".",
"set_index",
"(",
"'Date'",
")",
"all_values",
"=",
"{",
"}",
"prix_carburants",
"=",
"{",
"\"@type\"",
":",
"\"Node\"",
",",
"\"description\"",
":",
"\"prix des carburants en euros par hectolitre\"",
",",
"\"children\"",
":",
"{",
"}",
",",
"}",
"# For super_95_e10, we need to use the price of super_95 between 2009 and 2012 included,",
"# because we don't have the data. We use super_95 because it is very close and won't affect the results too much",
"prix_annuel",
"=",
"prix_annuel_carburants",
"[",
"'super_95_e10_ttc'",
"]",
"all_values",
"[",
"'super_95_e10_ttc'",
"]",
"=",
"[",
"]",
"for",
"year",
"in",
"range",
"(",
"1990",
",",
"2009",
")",
":",
"values1",
"=",
"dict",
"(",
")",
"values1",
"[",
"'start'",
"]",
"=",
"u'{}-01-01'",
".",
"format",
"(",
"year",
")",
"values1",
"[",
"'stop'",
"]",
"=",
"u'{}-12-31'",
".",
"format",
"(",
"year",
")",
"values1",
"[",
"'value'",
"]",
"=",
"prix_annuel",
".",
"loc",
"[",
"year",
"]",
"*",
"100",
"all_values",
"[",
"'super_95_e10_ttc'",
"]",
".",
"append",
"(",
"values1",
")",
"prix_annuel",
"=",
"prix_annuel_carburants",
"[",
"'super_95_ttc'",
"]",
"for",
"year",
"in",
"range",
"(",
"2009",
",",
"2013",
")",
":",
"values2",
"=",
"dict",
"(",
")",
"values2",
"[",
"'start'",
"]",
"=",
"u'{}-01-01'",
".",
"format",
"(",
"year",
")",
"values2",
"[",
"'stop'",
"]",
"=",
"u'{}-12-31'",
".",
"format",
"(",
"year",
")",
"values2",
"[",
"'value'",
"]",
"=",
"prix_annuel",
".",
"loc",
"[",
"year",
"]",
"*",
"100",
"all_values",
"[",
"'super_95_e10_ttc'",
"]",
".",
"append",
"(",
"values2",
")",
"prix_annuel",
"=",
"prix_annuel_carburants",
"[",
"'super_95_e10_ttc'",
"]",
"for",
"year",
"in",
"range",
"(",
"2013",
",",
"2015",
")",
":",
"values3",
"=",
"dict",
"(",
")",
"values3",
"[",
"'start'",
"]",
"=",
"u'{}-01-01'",
".",
"format",
"(",
"year",
")",
"values3",
"[",
"'stop'",
"]",
"=",
"u'{}-12-31'",
".",
"format",
"(",
"year",
")",
"values3",
"[",
"'value'",
"]",
"=",
"prix_annuel",
".",
"loc",
"[",
"year",
"]",
"*",
"100",
"all_values",
"[",
"'super_95_e10_ttc'",
"]",
".",
"append",
"(",
"values3",
")",
"prix_carburants",
"[",
"'children'",
"]",
"[",
"'super_95_e10_ttc'",
"]",
"=",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"'super_95_e10_ttc'",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
",",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"all_values",
"[",
"'super_95_e10_ttc'",
"]",
"}",
"for",
"element",
"in",
"[",
"'diesel_ht'",
",",
"'diesel_ttc'",
",",
"'super_95_ht'",
",",
"'super_95_ttc'",
",",
"'super_98_ht'",
",",
"'super_98_ttc'",
",",
"'super_95_e10_ht'",
",",
"'gplc_ht'",
",",
"'gplc_ttc'",
",",
"'super_plombe_ht'",
",",
"'super_plombe_ttc'",
"]",
":",
"assert",
"element",
"in",
"prix_annuel_carburants",
".",
"columns",
"prix_annuel",
"=",
"prix_annuel_carburants",
"[",
"element",
"]",
"all_values",
"[",
"element",
"]",
"=",
"[",
"]",
"for",
"year",
"in",
"range",
"(",
"1990",
",",
"2015",
")",
":",
"values",
"=",
"dict",
"(",
")",
"values",
"[",
"'start'",
"]",
"=",
"u'{}-01-01'",
".",
"format",
"(",
"year",
")",
"values",
"[",
"'stop'",
"]",
"=",
"u'{}-12-31'",
".",
"format",
"(",
"year",
")",
"values",
"[",
"'value'",
"]",
"=",
"prix_annuel",
".",
"loc",
"[",
"year",
"]",
"*",
"100",
"all_values",
"[",
"element",
"]",
".",
"append",
"(",
"values",
")",
"prix_carburants",
"[",
"'children'",
"]",
"[",
"element",
"]",
"=",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"element",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
",",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"all_values",
"[",
"element",
"]",
"}",
"legislation_json",
"[",
"'children'",
"]",
"[",
"'imposition_indirecte'",
"]",
"[",
"'children'",
"]",
"[",
"'prix_carburants'",
"]",
"=",
"prix_carburants",
"# Add the number of vehicle in circulation to the tree",
"default_config_files_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pkg_resources",
".",
"get_distribution",
"(",
"'openfisca_france_indirect_taxation'",
")",
".",
"location",
")",
"parc_annuel_moyen_vp",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"default_config_files_directory",
",",
"'openfisca_france_indirect_taxation'",
",",
"'assets'",
",",
"'quantites'",
",",
"'parc_annuel_moyen_vp.csv'",
")",
",",
"sep",
"=",
"';'",
")",
"parc_annuel_moyen_vp",
"=",
"parc_annuel_moyen_vp",
".",
"set_index",
"(",
"'Unnamed: 0'",
")",
"values_parc",
"=",
"{",
"}",
"parc_vp",
"=",
"{",
"\"@type\"",
":",
"\"Node\"",
",",
"\"description\"",
":",
"\"taille moyenne du parc automobile en France métropolitaine en milliers de véhicules\",",
"",
"\"children\"",
":",
"{",
"}",
",",
"}",
"for",
"element",
"in",
"[",
"'diesel'",
",",
"'essence'",
"]",
":",
"taille_parc",
"=",
"parc_annuel_moyen_vp",
"[",
"element",
"]",
"values_parc",
"[",
"element",
"]",
"=",
"[",
"]",
"for",
"year",
"in",
"range",
"(",
"1990",
",",
"2014",
")",
":",
"values",
"=",
"dict",
"(",
")",
"values",
"[",
"'start'",
"]",
"=",
"u'{}-01-01'",
".",
"format",
"(",
"year",
")",
"values",
"[",
"'stop'",
"]",
"=",
"u'{}-12-31'",
".",
"format",
"(",
"year",
")",
"values",
"[",
"'value'",
"]",
"=",
"taille_parc",
".",
"loc",
"[",
"year",
"]",
"values_parc",
"[",
"element",
"]",
".",
"append",
"(",
"values",
")",
"parc_vp",
"[",
"'children'",
"]",
"[",
"element",
"]",
"=",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"\"nombre de véhicules particuliers immatriculés en France à motorisation \" + ",
"l",
"ment,",
"",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"values_parc",
"[",
"element",
"]",
"}",
"legislation_json",
"[",
"'children'",
"]",
"[",
"'imposition_indirecte'",
"]",
"[",
"'children'",
"]",
"[",
"'parc_vp'",
"]",
"=",
"parc_vp",
"# Add the total quantity of fuel consumed per year to the tree",
"default_config_files_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pkg_resources",
".",
"get_distribution",
"(",
"'openfisca_france_indirect_taxation'",
")",
".",
"location",
")",
"quantite_carbu_vp_france",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"default_config_files_directory",
",",
"'openfisca_france_indirect_taxation'",
",",
"'assets'",
",",
"'quantites'",
",",
"'quantite_carbu_vp_france.csv'",
")",
",",
"sep",
"=",
"';'",
")",
"quantite_carbu_vp_france",
"=",
"quantite_carbu_vp_france",
".",
"set_index",
"(",
"'Unnamed: 0'",
")",
"values_quantite",
"=",
"{",
"}",
"quantite_carbu_vp",
"=",
"{",
"\"@type\"",
":",
"\"Node\"",
",",
"\"description\"",
":",
"\"quantite de carburants consommés en France métropolitaine\",",
"",
"\"children\"",
":",
"{",
"}",
",",
"}",
"for",
"element",
"in",
"[",
"'diesel'",
",",
"'essence'",
"]",
":",
"quantite_carburants",
"=",
"quantite_carbu_vp_france",
"[",
"element",
"]",
"values_quantite",
"[",
"element",
"]",
"=",
"[",
"]",
"for",
"year",
"in",
"range",
"(",
"1990",
",",
"2014",
")",
":",
"values",
"=",
"dict",
"(",
")",
"values",
"[",
"'start'",
"]",
"=",
"u'{}-01-01'",
".",
"format",
"(",
"year",
")",
"values",
"[",
"'stop'",
"]",
"=",
"u'{}-12-31'",
".",
"format",
"(",
"year",
")",
"values",
"[",
"'value'",
"]",
"=",
"quantite_carburants",
".",
"loc",
"[",
"year",
"]",
"values_quantite",
"[",
"element",
"]",
".",
"append",
"(",
"values",
")",
"quantite_carbu_vp",
"[",
"'children'",
"]",
"[",
"element",
"]",
"=",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"\"consommation totale de \"",
"+",
"element",
"+",
"\" en France\"",
",",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"values_quantite",
"[",
"element",
"]",
"}",
"legislation_json",
"[",
"'children'",
"]",
"[",
"'imposition_indirecte'",
"]",
"[",
"'children'",
"]",
"[",
"'quantite_carbu_vp'",
"]",
"=",
"quantite_carbu_vp",
"# Add the shares of each type of supercabrurant (SP95, SP98, E10, etc.) among supercarburants",
"default_config_files_directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pkg_resources",
".",
"get_distribution",
"(",
"'openfisca_france_indirect_taxation'",
")",
".",
"location",
")",
"part_des_types_de_supercarburants",
"=",
"pd",
".",
"read_csv",
"(",
"os",
".",
"path",
".",
"join",
"(",
"default_config_files_directory",
",",
"'openfisca_france_indirect_taxation'",
",",
"'assets'",
",",
"'part_des_types_de_supercarburants.csv'",
")",
",",
"sep",
"=",
"';'",
")",
"del",
"part_des_types_de_supercarburants",
"[",
"'Source'",
"]",
"part_des_types_de_supercarburants",
"=",
"part_des_types_de_supercarburants",
"[",
"part_des_types_de_supercarburants",
"[",
"'annee'",
"]",
">",
"0",
"]",
".",
"copy",
"(",
")",
"part_des_types_de_supercarburants",
"[",
"'annee'",
"]",
"=",
"part_des_types_de_supercarburants",
"[",
"'annee'",
"]",
".",
"astype",
"(",
"int",
")",
"part_des_types_de_supercarburants",
"=",
"part_des_types_de_supercarburants",
".",
"set_index",
"(",
"'annee'",
")",
"# delete share of e_85 because we have no data for its price",
"# When the sum of all shares is not one, need to multiply each share by the same coefficient",
"cols",
"=",
"part_des_types_de_supercarburants",
".",
"columns",
"for",
"element",
"in",
"cols",
":",
"part_des_types_de_supercarburants",
"[",
"element",
"]",
"=",
"(",
"part_des_types_de_supercarburants",
"[",
"element",
"]",
"/",
"(",
"part_des_types_de_supercarburants",
"[",
"'somme'",
"]",
"-",
"part_des_types_de_supercarburants",
"[",
"'sp_e85'",
"]",
")",
")",
"del",
"part_des_types_de_supercarburants",
"[",
"'sp_e85'",
"]",
"del",
"part_des_types_de_supercarburants",
"[",
"'somme'",
"]",
"cols",
"=",
"part_des_types_de_supercarburants",
".",
"columns",
"part_des_types_de_supercarburants",
"[",
"'somme'",
"]",
"=",
"0",
"for",
"element",
"in",
"cols",
":",
"part_des_types_de_supercarburants",
"[",
"'somme'",
"]",
"+=",
"part_des_types_de_supercarburants",
"[",
"element",
"]",
"assert",
"(",
"part_des_types_de_supercarburants",
"[",
"'somme'",
"]",
"==",
"1",
")",
".",
"any",
"(",
")",
",",
"\"The weighting of the shares did not work\"",
"values_part_supercarburants",
"=",
"{",
"}",
"part_type_supercaburant",
"=",
"{",
"\"@type\"",
":",
"\"Node\"",
",",
"\"description\"",
":",
"\"part de la consommation totale d'essence de chaque type supercarburant\"",
",",
"\"children\"",
":",
"{",
"}",
",",
"}",
"for",
"element",
"in",
"[",
"'super_plombe'",
",",
"'sp_95'",
",",
"'sp_98'",
",",
"'sp_e10'",
"]",
":",
"part_par_carburant",
"=",
"part_des_types_de_supercarburants",
"[",
"element",
"]",
"values_part_supercarburants",
"[",
"element",
"]",
"=",
"[",
"]",
"for",
"year",
"in",
"range",
"(",
"2000",
",",
"2015",
")",
":",
"values",
"=",
"dict",
"(",
")",
"values",
"[",
"'start'",
"]",
"=",
"u'{}-01-01'",
".",
"format",
"(",
"year",
")",
"values",
"[",
"'stop'",
"]",
"=",
"u'{}-12-31'",
".",
"format",
"(",
"year",
")",
"values",
"[",
"'value'",
"]",
"=",
"part_par_carburant",
".",
"loc",
"[",
"year",
"]",
"values_part_supercarburants",
"[",
"element",
"]",
".",
"append",
"(",
"values",
")",
"part_type_supercaburant",
"[",
"'children'",
"]",
"[",
"element",
"]",
"=",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"\"part de \"",
"+",
"element",
"+",
"\" dans la consommation totale d'essences\"",
",",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"values_part_supercarburants",
"[",
"element",
"]",
"}",
"legislation_json",
"[",
"'children'",
"]",
"[",
"'imposition_indirecte'",
"]",
"[",
"'children'",
"]",
"[",
"'part_type_supercarburants'",
"]",
"=",
"part_type_supercaburant",
"# Add data from comptabilite national about alcohol",
"alcool_conso_et_vin",
"=",
"{",
"\"@type\"",
":",
"\"Node\"",
",",
"\"description\"",
":",
"\"alcools\"",
",",
"\"children\"",
":",
"{",
"}",
",",
"}",
"alcool_conso_et_vin",
"[",
"'children'",
"]",
"[",
"'vin'",
"]",
"=",
"{",
"\"@type\"",
":",
"\"Node\"",
",",
"\"description\"",
":",
"\"Pour calculer le taux de taxation implicite sur le vin\"",
",",
"\"children\"",
":",
"{",
"\"droit_cn_vin\"",
":",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"u\"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale\",",
"",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"[",
"{",
"'start'",
":",
"u'1995-01-01'",
",",
"'stop'",
":",
"u'1995-12-31'",
",",
"'value'",
":",
"129",
"}",
",",
"{",
"'start'",
":",
"u'1996-01-01'",
",",
"'stop'",
":",
"u'1996-12-31'",
",",
"'value'",
":",
"130",
"}",
",",
"{",
"'start'",
":",
"u'1997-01-01'",
",",
"'stop'",
":",
"u'1997-12-31'",
",",
"'value'",
":",
"129",
"}",
",",
"{",
"'start'",
":",
"u'1998-01-01'",
",",
"'stop'",
":",
"u'1998-12-31'",
",",
"'value'",
":",
"132",
"}",
",",
"{",
"'start'",
":",
"u'1999-01-01'",
",",
"'stop'",
":",
"u'1999-12-31'",
",",
"'value'",
":",
"133",
"}",
",",
"{",
"'start'",
":",
"u'2000-01-01'",
",",
"'stop'",
":",
"u'2000-12-31'",
",",
"'value'",
":",
"127",
"}",
",",
"{",
"'start'",
":",
"u'2001-01-01'",
",",
"'stop'",
":",
"u'2001-12-31'",
",",
"'value'",
":",
"127",
"}",
",",
"{",
"'start'",
":",
"u'2002-01-01'",
",",
"'stop'",
":",
"u'2002-12-31'",
",",
"'value'",
":",
"127",
"}",
",",
"{",
"'start'",
":",
"u'2003-01-01'",
",",
"'stop'",
":",
"u'2003-12-31'",
",",
"'value'",
":",
"127",
"}",
",",
"{",
"'start'",
":",
"u'2004-01-01'",
",",
"'stop'",
":",
"u'2004-12-31'",
",",
"'value'",
":",
"125",
"}",
",",
"{",
"'start'",
":",
"u'2005-01-01'",
",",
"'stop'",
":",
"u'2005-12-31'",
",",
"'value'",
":",
"117",
"}",
",",
"{",
"'start'",
":",
"u'2006-01-01'",
",",
"'stop'",
":",
"u'2006-12-31'",
",",
"'value'",
":",
"119",
"}",
",",
"{",
"'start'",
":",
"u'2007-01-01'",
",",
"'stop'",
":",
"u'2007-12-31'",
",",
"'value'",
":",
"117",
"}",
",",
"{",
"'start'",
":",
"u'2008-01-01'",
",",
"'stop'",
":",
"u'2008-12-31'",
",",
"'value'",
":",
"114",
"}",
",",
"{",
"'start'",
":",
"u'2009-01-01'",
",",
"'stop'",
":",
"u'2009-12-31'",
",",
"'value'",
":",
"117",
"}",
",",
"{",
"'start'",
":",
"u'2010-01-01'",
",",
"'stop'",
":",
"u'2010-12-31'",
",",
"'value'",
":",
"119",
"}",
",",
"{",
"'start'",
":",
"u'2011-01-01'",
",",
"'stop'",
":",
"u'2011-12-31'",
",",
"'value'",
":",
"118",
"}",
",",
"{",
"'start'",
":",
"u'2012-01-01'",
",",
"'stop'",
":",
"u'2012-12-31'",
",",
"'value'",
":",
"120",
"}",
",",
"{",
"'start'",
":",
"u'2013-01-01'",
",",
"'stop'",
":",
"u'2013-12-31'",
",",
"'value'",
":",
"122",
"}",
",",
"# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },",
"]",
",",
"}",
",",
"\"masse_conso_cn_vin\"",
":",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"u\"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale\",",
"",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"[",
"{",
"'start'",
":",
"u'1995-01-01'",
",",
"'stop'",
":",
"u'1995-12-31'",
",",
"'value'",
":",
"7191",
"}",
",",
"{",
"'start'",
":",
"u'1996-01-01'",
",",
"'stop'",
":",
"u'1996-12-31'",
",",
"'value'",
":",
"7419",
"}",
",",
"{",
"'start'",
":",
"u'1997-01-01'",
",",
"'stop'",
":",
"u'1997-12-31'",
",",
"'value'",
":",
"7636",
"}",
",",
"{",
"'start'",
":",
"u'1998-01-01'",
",",
"'stop'",
":",
"u'1998-12-31'",
",",
"'value'",
":",
"8025",
"}",
",",
"{",
"'start'",
":",
"u'1999-01-01'",
",",
"'stop'",
":",
"u'1999-12-31'",
",",
"'value'",
":",
"8451",
"}",
",",
"{",
"'start'",
":",
"u'2000-01-01'",
",",
"'stop'",
":",
"u'2000-12-31'",
",",
"'value'",
":",
"8854",
"}",
",",
"{",
"'start'",
":",
"u'2001-01-01'",
",",
"'stop'",
":",
"u'2001-12-31'",
",",
"'value'",
":",
"9168",
"}",
",",
"{",
"'start'",
":",
"u'2002-01-01'",
",",
"'stop'",
":",
"u'2002-12-31'",
",",
"'value'",
":",
"9476",
"}",
",",
"{",
"'start'",
":",
"u'2003-01-01'",
",",
"'stop'",
":",
"u'2003-12-31'",
",",
"'value'",
":",
"9695",
"}",
",",
"{",
"'start'",
":",
"u'2004-01-01'",
",",
"'stop'",
":",
"u'2004-12-31'",
",",
"'value'",
":",
"9985",
"}",
",",
"{",
"'start'",
":",
"u'2005-01-01'",
",",
"'stop'",
":",
"u'2005-12-31'",
",",
"'value'",
":",
"9933",
"}",
",",
"{",
"'start'",
":",
"u'2006-01-01'",
",",
"'stop'",
":",
"u'2006-12-31'",
",",
"'value'",
":",
"10002",
"}",
",",
"{",
"'start'",
":",
"u'2007-01-01'",
",",
"'stop'",
":",
"u'2007-12-31'",
",",
"'value'",
":",
"10345",
"}",
",",
"{",
"'start'",
":",
"u'2008-01-01'",
",",
"'stop'",
":",
"u'2008-12-31'",
",",
"'value'",
":",
"10461",
"}",
",",
"{",
"'start'",
":",
"u'2009-01-01'",
",",
"'stop'",
":",
"u'2009-12-31'",
",",
"'value'",
":",
"10728",
"}",
",",
"{",
"'start'",
":",
"u'2010-01-01'",
",",
"'stop'",
":",
"u'2010-12-31'",
",",
"'value'",
":",
"11002",
"}",
",",
"{",
"'start'",
":",
"u'2011-01-01'",
",",
"'stop'",
":",
"u'2011-12-31'",
",",
"'value'",
":",
"11387",
"}",
",",
"{",
"'start'",
":",
"u'2012-01-01'",
",",
"'stop'",
":",
"u'2012-12-31'",
",",
"'value'",
":",
"11407",
"}",
",",
"{",
"'start'",
":",
"u'2013-01-01'",
",",
"'stop'",
":",
"u'2013-12-31'",
",",
"'value'",
":",
"11515",
"}",
",",
"# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },",
"]",
",",
"}",
",",
"}",
",",
"}",
"alcool_conso_et_vin",
"[",
"'children'",
"]",
"[",
"'biere'",
"]",
"=",
"{",
"\"@type\"",
":",
"\"Node\"",
",",
"\"description\"",
":",
"\"Pour calculer le taux de taxation implicite sur la bière\",",
"",
"\"children\"",
":",
"{",
"\"droit_cn_biere\"",
":",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"\"Masse droit biere selon comptabilité nationale\",",
"",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"[",
"{",
"'start'",
":",
"u'1995-01-01'",
",",
"'stop'",
":",
"u'1995-12-31'",
",",
"'value'",
":",
"361",
"}",
",",
"{",
"'start'",
":",
"u'1996-01-01'",
",",
"'stop'",
":",
"u'1996-12-31'",
",",
"'value'",
":",
"366",
"}",
",",
"{",
"'start'",
":",
"u'1997-01-01'",
",",
"'stop'",
":",
"u'1997-12-31'",
",",
"'value'",
":",
"364",
"}",
",",
"{",
"'start'",
":",
"u'1998-01-01'",
",",
"'stop'",
":",
"u'1998-12-31'",
",",
"'value'",
":",
"365",
"}",
",",
"{",
"'start'",
":",
"u'1999-01-01'",
",",
"'stop'",
":",
"u'1999-12-31'",
",",
"'value'",
":",
"380",
"}",
",",
"{",
"'start'",
":",
"u'2000-01-01'",
",",
"'stop'",
":",
"u'2000-12-31'",
",",
"'value'",
":",
"359",
"}",
",",
"{",
"'start'",
":",
"u'2001-01-01'",
",",
"'stop'",
":",
"u'2001-12-31'",
",",
"'value'",
":",
"364",
"}",
",",
"{",
"'start'",
":",
"u'2002-01-01'",
",",
"'stop'",
":",
"u'2002-12-31'",
",",
"'value'",
":",
"361",
"}",
",",
"{",
"'start'",
":",
"u'2003-01-01'",
",",
"'stop'",
":",
"u'2003-12-31'",
",",
"'value'",
":",
"370",
"}",
",",
"{",
"'start'",
":",
"u'2004-01-01'",
",",
"'stop'",
":",
"u'2004-12-31'",
",",
"'value'",
":",
"378",
"}",
",",
"{",
"'start'",
":",
"u'2005-01-01'",
",",
"'stop'",
":",
"u'2005-12-31'",
",",
"'value'",
":",
"364",
"}",
",",
"{",
"'start'",
":",
"u'2006-01-01'",
",",
"'stop'",
":",
"u'2006-12-31'",
",",
"'value'",
":",
"396",
"}",
",",
"{",
"'start'",
":",
"u'2007-01-01'",
",",
"'stop'",
":",
"u'2007-12-31'",
",",
"'value'",
":",
"382",
"}",
",",
"{",
"'start'",
":",
"u'2008-01-01'",
",",
"'stop'",
":",
"u'2008-12-31'",
",",
"'value'",
":",
"375",
"}",
",",
"{",
"'start'",
":",
"u'2009-01-01'",
",",
"'stop'",
":",
"u'2009-12-31'",
",",
"'value'",
":",
"376",
"}",
",",
"{",
"'start'",
":",
"u'2010-01-01'",
",",
"'stop'",
":",
"u'2010-12-31'",
",",
"'value'",
":",
"375",
"}",
",",
"{",
"'start'",
":",
"u'2011-01-01'",
",",
"'stop'",
":",
"u'2011-12-31'",
",",
"'value'",
":",
"393",
"}",
",",
"{",
"'start'",
":",
"u'2012-01-01'",
",",
"'stop'",
":",
"u'2012-12-31'",
",",
"'value'",
":",
"783",
"}",
",",
"{",
"'start'",
":",
"u'2013-01-01'",
",",
"'stop'",
":",
"u'2013-12-31'",
",",
"'value'",
":",
"897",
"}",
",",
"# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },",
"]",
",",
"}",
",",
"\"masse_conso_cn_biere\"",
":",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"u\"Masse consommation biere selon comptabilité nationale\",",
"",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"[",
"{",
"'start'",
":",
"u'1995-01-01'",
",",
"'stop'",
":",
"u'1995-12-31'",
",",
"'value'",
":",
"2111",
"}",
",",
"{",
"'start'",
":",
"u'1996-01-01'",
",",
"'stop'",
":",
"u'1996-12-31'",
",",
"'value'",
":",
"2144",
"}",
",",
"{",
"'start'",
":",
"u'1997-01-01'",
",",
"'stop'",
":",
"u'1997-12-31'",
",",
"'value'",
":",
"2186",
"}",
",",
"{",
"'start'",
":",
"u'1998-01-01'",
",",
"'stop'",
":",
"u'1998-12-31'",
",",
"'value'",
":",
"2291",
"}",
",",
"{",
"'start'",
":",
"u'1999-01-01'",
",",
"'stop'",
":",
"u'1999-12-31'",
",",
"'value'",
":",
"2334",
"}",
",",
"{",
"'start'",
":",
"u'2000-01-01'",
",",
"'stop'",
":",
"u'2000-12-31'",
",",
"'value'",
":",
"2290",
"}",
",",
"{",
"'start'",
":",
"u'2001-01-01'",
",",
"'stop'",
":",
"u'2001-12-31'",
",",
"'value'",
":",
"2327",
"}",
",",
"{",
"'start'",
":",
"u'2002-01-01'",
",",
"'stop'",
":",
"u'2002-12-31'",
",",
"'value'",
":",
"2405",
"}",
",",
"{",
"'start'",
":",
"u'2003-01-01'",
",",
"'stop'",
":",
"u'2003-12-31'",
",",
"'value'",
":",
"2554",
"}",
",",
"{",
"'start'",
":",
"u'2004-01-01'",
",",
"'stop'",
":",
"u'2004-12-31'",
",",
"'value'",
":",
"2484",
"}",
",",
"{",
"'start'",
":",
"u'2005-01-01'",
",",
"'stop'",
":",
"u'2005-12-31'",
",",
"'value'",
":",
"2466",
"}",
",",
"{",
"'start'",
":",
"u'2006-01-01'",
",",
"'stop'",
":",
"u'2006-12-31'",
",",
"'value'",
":",
"2486",
"}",
",",
"{",
"'start'",
":",
"u'2007-01-01'",
",",
"'stop'",
":",
"u'2007-12-31'",
",",
"'value'",
":",
"2458",
"}",
",",
"{",
"'start'",
":",
"u'2008-01-01'",
",",
"'stop'",
":",
"u'2008-12-31'",
",",
"'value'",
":",
"2287",
"}",
",",
"{",
"'start'",
":",
"u'2009-01-01'",
",",
"'stop'",
":",
"u'2009-12-31'",
",",
"'value'",
":",
"2375",
"}",
",",
"{",
"'start'",
":",
"u'2010-01-01'",
",",
"'stop'",
":",
"u'2010-12-31'",
",",
"'value'",
":",
"2461",
"}",
",",
"{",
"'start'",
":",
"u'2011-01-01'",
",",
"'stop'",
":",
"u'2011-12-31'",
",",
"'value'",
":",
"2769",
"}",
",",
"{",
"'start'",
":",
"u'2012-01-01'",
",",
"'stop'",
":",
"u'2012-12-31'",
",",
"'value'",
":",
"2868",
"}",
",",
"{",
"'start'",
":",
"u'2013-01-01'",
",",
"'stop'",
":",
"u'2013-12-31'",
",",
"'value'",
":",
"3321",
"}",
",",
"# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },",
"]",
",",
"}",
",",
"}",
",",
"}",
"alcool_conso_et_vin",
"[",
"'children'",
"]",
"[",
"'alcools_forts'",
"]",
"=",
"{",
"\"@type\"",
":",
"\"Node\"",
",",
"\"description\"",
":",
"\"Pour calculer le taux de taxation implicite sur alcools forts\"",
",",
"\"children\"",
":",
"{",
"\"droit_cn_alcools\"",
":",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"\"Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort\",",
"",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"[",
"{",
"'start'",
":",
"u'2000-01-01'",
",",
"'stop'",
":",
"u'2000-12-31'",
",",
"'value'",
":",
"1872",
"}",
",",
"{",
"'start'",
":",
"u'2001-01-01'",
",",
"'stop'",
":",
"u'2001-12-31'",
",",
"'value'",
":",
"1957",
"}",
",",
"{",
"'start'",
":",
"u'2002-01-01'",
",",
"'stop'",
":",
"u'2002-12-31'",
",",
"'value'",
":",
"1932",
"}",
",",
"{",
"'start'",
":",
"u'2003-01-01'",
",",
"'stop'",
":",
"u'2003-12-31'",
",",
"'value'",
":",
"1891",
"}",
",",
"{",
"'start'",
":",
"u'2004-01-01'",
",",
"'stop'",
":",
"u'2004-12-31'",
",",
"'value'",
":",
"1908",
"}",
",",
"{",
"'start'",
":",
"u'2005-01-01'",
",",
"'stop'",
":",
"u'2005-12-31'",
",",
"'value'",
":",
"1842",
"}",
",",
"{",
"'start'",
":",
"u'2006-01-01'",
",",
"'stop'",
":",
"u'2006-12-31'",
",",
"'value'",
":",
"1954",
"}",
",",
"{",
"'start'",
":",
"u'2007-01-01'",
",",
"'stop'",
":",
"u'2007-12-31'",
",",
"'value'",
":",
"1990",
"}",
",",
"{",
"'start'",
":",
"u'2008-01-01'",
",",
"'stop'",
":",
"u'2008-12-31'",
",",
"'value'",
":",
"2005",
"}",
",",
"{",
"'start'",
":",
"u'2009-01-01'",
",",
"'stop'",
":",
"u'2009-12-31'",
",",
"'value'",
":",
"2031",
"}",
",",
"{",
"'start'",
":",
"u'2010-01-01'",
",",
"'stop'",
":",
"u'2010-12-31'",
",",
"'value'",
":",
"2111",
"}",
",",
"{",
"'start'",
":",
"u'2011-01-01'",
",",
"'stop'",
":",
"u'2011-12-31'",
",",
"'value'",
":",
"2150",
"}",
",",
"{",
"'start'",
":",
"u'2012-01-01'",
",",
"'stop'",
":",
"u'2012-12-31'",
",",
"'value'",
":",
"2225",
"}",
",",
"# TODO: Problème pour les alcools forts chiffres différents entre les deux bases excel !",
"]",
",",
"}",
",",
"\"droit_cn_alcools_total\"",
":",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"u\"Masse droit alcool selon comptabilité nationale avec les differents droits\",",
"",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"[",
"{",
"'start'",
":",
"u'1995-01-01'",
",",
"'stop'",
":",
"u'1995-12-31'",
",",
"'value'",
":",
"2337",
"}",
",",
"{",
"'start'",
":",
"u'1996-01-01'",
",",
"'stop'",
":",
"u'1996-12-31'",
",",
"'value'",
":",
"2350",
"}",
",",
"{",
"'start'",
":",
"u'1997-01-01'",
",",
"'stop'",
":",
"u'1997-12-31'",
",",
"'value'",
":",
"2366",
"}",
",",
"{",
"'start'",
":",
"u'1998-01-01'",
",",
"'stop'",
":",
"u'1998-12-31'",
",",
"'value'",
":",
"2369",
"}",
",",
"{",
"'start'",
":",
"u'1999-01-01'",
",",
"'stop'",
":",
"u'1999-12-31'",
",",
"'value'",
":",
"2385",
"}",
",",
"{",
"'start'",
":",
"u'2000-01-01'",
",",
"'stop'",
":",
"u'2000-12-31'",
",",
"'value'",
":",
"2416",
"}",
",",
"{",
"'start'",
":",
"u'2001-01-01'",
",",
"'stop'",
":",
"u'2001-12-31'",
",",
"'value'",
":",
"2514",
"}",
",",
"{",
"'start'",
":",
"u'2002-01-01'",
",",
"'stop'",
":",
"u'2002-12-31'",
",",
"'value'",
":",
"2503",
"}",
",",
"{",
"'start'",
":",
"u'2003-01-01'",
",",
"'stop'",
":",
"u'2003-12-31'",
",",
"'value'",
":",
"2453",
"}",
",",
"{",
"'start'",
":",
"u'2004-01-01'",
",",
"'stop'",
":",
"u'2004-12-31'",
",",
"'value'",
":",
"2409",
"}",
",",
"{",
"'start'",
":",
"u'2005-01-01'",
",",
"'stop'",
":",
"u'2005-12-31'",
",",
"'value'",
":",
"2352",
"}",
",",
"{",
"'start'",
":",
"u'2006-01-01'",
",",
"'stop'",
":",
"u'2006-12-31'",
",",
"'value'",
":",
"2477",
"}",
",",
"{",
"'start'",
":",
"u'2007-01-01'",
",",
"'stop'",
":",
"u'2007-12-31'",
",",
"'value'",
":",
"2516",
"}",
",",
"{",
"'start'",
":",
"u'2008-01-01'",
",",
"'stop'",
":",
"u'2008-12-31'",
",",
"'value'",
":",
"2528",
"}",
",",
"{",
"'start'",
":",
"u'2009-01-01'",
",",
"'stop'",
":",
"u'2009-12-31'",
",",
"'value'",
":",
"2629",
"}",
",",
"{",
"'start'",
":",
"u'2010-01-01'",
",",
"'stop'",
":",
"u'2010-12-31'",
",",
"'value'",
":",
"2734",
"}",
",",
"{",
"'start'",
":",
"u'2011-01-01'",
",",
"'stop'",
":",
"u'2011-12-31'",
",",
"'value'",
":",
"3078",
"}",
",",
"{",
"'start'",
":",
"u'2012-01-01'",
",",
"'stop'",
":",
"u'2012-12-31'",
",",
"'value'",
":",
"2718",
"}",
",",
"{",
"'start'",
":",
"u'2013-01-01'",
",",
"'stop'",
":",
"u'2013-12-31'",
",",
"'value'",
":",
"3022",
"}",
",",
"# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },",
"]",
",",
"}",
",",
"\"masse_conso_cn_alcools\"",
":",
"{",
"\"@type\"",
":",
"\"Parameter\"",
",",
"\"description\"",
":",
"u\"Masse consommation alcool selon comptabilité nationale\",",
"",
"\"format\"",
":",
"\"float\"",
",",
"\"values\"",
":",
"[",
"{",
"'start'",
":",
"u'1995-01-01'",
",",
"'stop'",
":",
"u'1995-12-31'",
",",
"'value'",
":",
"4893",
"}",
",",
"{",
"'start'",
":",
"u'1996-01-01'",
",",
"'stop'",
":",
"u'1996-12-31'",
",",
"'value'",
":",
"5075",
"}",
",",
"{",
"'start'",
":",
"u'1997-01-01'",
",",
"'stop'",
":",
"u'1997-12-31'",
",",
"'value'",
":",
"5065",
"}",
",",
"{",
"'start'",
":",
"u'1998-01-01'",
",",
"'stop'",
":",
"u'1998-12-31'",
",",
"'value'",
":",
"5123",
"}",
",",
"{",
"'start'",
":",
"u'1999-01-01'",
",",
"'stop'",
":",
"u'1999-12-31'",
",",
"'value'",
":",
"5234",
"}",
",",
"{",
"'start'",
":",
"u'2000-01-01'",
",",
"'stop'",
":",
"u'2000-12-31'",
",",
"'value'",
":",
"5558",
"}",
",",
"{",
"'start'",
":",
"u'2001-01-01'",
",",
"'stop'",
":",
"u'2001-12-31'",
",",
"'value'",
":",
"5721",
"}",
",",
"{",
"'start'",
":",
"u'2002-01-01'",
",",
"'stop'",
":",
"u'2002-12-31'",
",",
"'value'",
":",
"5932",
"}",
",",
"{",
"'start'",
":",
"u'2003-01-01'",
",",
"'stop'",
":",
"u'2003-12-31'",
",",
"'value'",
":",
"5895",
"}",
",",
"{",
"'start'",
":",
"u'2004-01-01'",
",",
"'stop'",
":",
"u'2004-12-31'",
",",
"'value'",
":",
"5967",
"}",
",",
"{",
"'start'",
":",
"u'2005-01-01'",
",",
"'stop'",
":",
"u'2005-12-31'",
",",
"'value'",
":",
"5960",
"}",
",",
"{",
"'start'",
":",
"u'2006-01-01'",
",",
"'stop'",
":",
"u'2006-12-31'",
",",
"'value'",
":",
"6106",
"}",
",",
"{",
"'start'",
":",
"u'2007-01-01'",
",",
"'stop'",
":",
"u'2007-12-31'",
",",
"'value'",
":",
"6142",
"}",
",",
"{",
"'start'",
":",
"u'2008-01-01'",
",",
"'stop'",
":",
"u'2008-12-31'",
",",
"'value'",
":",
"6147",
"}",
",",
"{",
"'start'",
":",
"u'2009-01-01'",
",",
"'stop'",
":",
"u'2009-12-31'",
",",
"'value'",
":",
"6342",
"}",
",",
"{",
"'start'",
":",
"u'2010-01-01'",
",",
"'stop'",
":",
"u'2010-12-31'",
",",
"'value'",
":",
"6618",
"}",
",",
"{",
"'start'",
":",
"u'2011-01-01'",
",",
"'stop'",
":",
"u'2011-12-31'",
",",
"'value'",
":",
"6680",
"}",
",",
"{",
"'start'",
":",
"u'2012-01-01'",
",",
"'stop'",
":",
"u'2012-12-31'",
",",
"'value'",
":",
"6996",
"}",
",",
"{",
"'start'",
":",
"u'2013-01-01'",
",",
"'stop'",
":",
"u'2013-12-31'",
",",
"'value'",
":",
"7022",
"}",
",",
"]",
",",
"}",
",",
"}",
",",
"}",
"legislation_json",
"[",
"'children'",
"]",
"[",
"'imposition_indirecte'",
"]",
"[",
"'children'",
"]",
"[",
"'alcool_conso_et_vin'",
"]",
"=",
"alcool_conso_et_vin",
"# Make the change from francs to euros for excise taxes in ticpe",
"keys_ticpe",
"=",
"legislation_json",
"[",
"'children'",
"]",
"[",
"'imposition_indirecte'",
"]",
"[",
"'children'",
"]",
"[",
"'ticpe'",
"]",
"[",
"'children'",
"]",
".",
"keys",
"(",
")",
"for",
"element",
"in",
"keys_ticpe",
":",
"get_values",
"=",
"legislation_json",
"[",
"'children'",
"]",
"[",
"'imposition_indirecte'",
"]",
"[",
"'children'",
"]",
"[",
"'ticpe'",
"]",
"[",
"'children'",
"]",
"[",
"element",
"]",
"[",
"'values'",
"]",
"for",
"each_value",
"in",
"get_values",
":",
"get_character",
"=",
"'{}'",
".",
"format",
"(",
"each_value",
"[",
"'start'",
"]",
")",
"year",
"=",
"int",
"(",
"get_character",
"[",
":",
"4",
"]",
")",
"if",
"year",
"<",
"2002",
":",
"each_value",
"[",
"'value'",
"]",
"=",
"each_value",
"[",
"'value'",
"]",
"/",
"6.55957",
"else",
":",
"each_value",
"[",
"'value'",
"]",
"=",
"each_value",
"[",
"'value'",
"]",
"return",
"legislation_json"
] |
Preprocess the legislation parameters to add prices and amounts from national accounts
|
[
"Preprocess",
"the",
"legislation",
"parameters",
"to",
"add",
"prices",
"and",
"amounts",
"from",
"national",
"accounts"
] |
python
|
train
| 53.340045 |
angr/angr
|
angr/storage/paged_memory.py
|
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/storage/paged_memory.py#L601-L613
|
def contains_no_backer(self, addr):
"""
Tests if the address is contained in any page of paged memory, without considering memory backers.
:param int addr: The address to test.
:return: True if the address is included in one of the pages, False otherwise.
:rtype: bool
"""
for i, p in self._pages.items():
if i * self._page_size <= addr < (i + 1) * self._page_size:
return addr - (i * self._page_size) in p.keys()
return False
|
[
"def",
"contains_no_backer",
"(",
"self",
",",
"addr",
")",
":",
"for",
"i",
",",
"p",
"in",
"self",
".",
"_pages",
".",
"items",
"(",
")",
":",
"if",
"i",
"*",
"self",
".",
"_page_size",
"<=",
"addr",
"<",
"(",
"i",
"+",
"1",
")",
"*",
"self",
".",
"_page_size",
":",
"return",
"addr",
"-",
"(",
"i",
"*",
"self",
".",
"_page_size",
")",
"in",
"p",
".",
"keys",
"(",
")",
"return",
"False"
] |
Tests if the address is contained in any page of paged memory, without considering memory backers.
:param int addr: The address to test.
:return: True if the address is included in one of the pages, False otherwise.
:rtype: bool
|
[
"Tests",
"if",
"the",
"address",
"is",
"contained",
"in",
"any",
"page",
"of",
"paged",
"memory",
"without",
"considering",
"memory",
"backers",
"."
] |
python
|
train
| 39.076923 |
google-research/batch-ppo
|
agents/algorithms/ppo/utility.py
|
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/utility.py#L94-L105
|
def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stack([sequence, discount], 2)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return')
|
[
"def",
"lambda_return",
"(",
"reward",
",",
"value",
",",
"length",
",",
"discount",
",",
"lambda_",
")",
":",
"timestep",
"=",
"tf",
".",
"range",
"(",
"reward",
".",
"shape",
"[",
"1",
"]",
".",
"value",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
"timestep",
"[",
"None",
",",
":",
"]",
"<",
"length",
"[",
":",
",",
"None",
"]",
",",
"tf",
".",
"float32",
")",
"sequence",
"=",
"mask",
"*",
"reward",
"+",
"discount",
"*",
"value",
"*",
"(",
"1",
"-",
"lambda_",
")",
"discount",
"=",
"mask",
"*",
"discount",
"*",
"lambda_",
"sequence",
"=",
"tf",
".",
"stack",
"(",
"[",
"sequence",
",",
"discount",
"]",
",",
"2",
")",
"return_",
"=",
"tf",
".",
"reverse",
"(",
"tf",
".",
"transpose",
"(",
"tf",
".",
"scan",
"(",
"lambda",
"agg",
",",
"cur",
":",
"cur",
"[",
"0",
"]",
"+",
"cur",
"[",
"1",
"]",
"*",
"agg",
",",
"tf",
".",
"transpose",
"(",
"tf",
".",
"reverse",
"(",
"sequence",
",",
"[",
"1",
"]",
")",
",",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
",",
"tf",
".",
"zeros_like",
"(",
"value",
"[",
":",
",",
"-",
"1",
"]",
")",
",",
"1",
",",
"False",
")",
",",
"[",
"1",
",",
"0",
"]",
")",
",",
"[",
"1",
"]",
")",
"return",
"tf",
".",
"check_numerics",
"(",
"tf",
".",
"stop_gradient",
"(",
"return_",
")",
",",
"'return'",
")"
] |
TD-lambda returns.
|
[
"TD",
"-",
"lambda",
"returns",
"."
] |
python
|
train
| 50.666667 |
saltstack/salt
|
salt/modules/azurearm_compute.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_compute.py#L512-L537
|
def virtual_machines_list_all(**kwargs):
'''
.. versionadded:: 2019.2.0
List all virtual machines within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.virtual_machines_list_all
'''
result = {}
compconn = __utils__['azurearm.get_client']('compute', **kwargs)
try:
vms = __utils__['azurearm.paged_object_to_list'](
compconn.virtual_machines.list_all()
)
for vm in vms: # pylint: disable=invalid-name
result[vm['name']] = vm
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
result = {'error': str(exc)}
return result
|
[
"def",
"virtual_machines_list_all",
"(",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"{",
"}",
"compconn",
"=",
"__utils__",
"[",
"'azurearm.get_client'",
"]",
"(",
"'compute'",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"vms",
"=",
"__utils__",
"[",
"'azurearm.paged_object_to_list'",
"]",
"(",
"compconn",
".",
"virtual_machines",
".",
"list_all",
"(",
")",
")",
"for",
"vm",
"in",
"vms",
":",
"# pylint: disable=invalid-name",
"result",
"[",
"vm",
"[",
"'name'",
"]",
"]",
"=",
"vm",
"except",
"CloudError",
"as",
"exc",
":",
"__utils__",
"[",
"'azurearm.log_cloud_error'",
"]",
"(",
"'compute'",
",",
"str",
"(",
"exc",
")",
",",
"*",
"*",
"kwargs",
")",
"result",
"=",
"{",
"'error'",
":",
"str",
"(",
"exc",
")",
"}",
"return",
"result"
] |
.. versionadded:: 2019.2.0
List all virtual machines within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.virtual_machines_list_all
|
[
"..",
"versionadded",
"::",
"2019",
".",
"2",
".",
"0"
] |
python
|
train
| 26.461538 |
facebook/watchman
|
python/pywatchman/capabilities.py
|
https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/python/pywatchman/capabilities.py#L59-L77
|
def synthesize(vers, opts):
""" Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
"""
parsed_version = parse_version(vers["version"])
vers["capabilities"] = {}
for name in opts["optional"]:
vers["capabilities"][name] = check(parsed_version, name)
failed = False # noqa: F841 T25377293 Grandfathered in
for name in opts["required"]:
have = check(parsed_version, name)
vers["capabilities"][name] = have
if not have:
vers["error"] = (
"client required capability `"
+ name
+ "` is not supported by this server"
)
return vers
|
[
"def",
"synthesize",
"(",
"vers",
",",
"opts",
")",
":",
"parsed_version",
"=",
"parse_version",
"(",
"vers",
"[",
"\"version\"",
"]",
")",
"vers",
"[",
"\"capabilities\"",
"]",
"=",
"{",
"}",
"for",
"name",
"in",
"opts",
"[",
"\"optional\"",
"]",
":",
"vers",
"[",
"\"capabilities\"",
"]",
"[",
"name",
"]",
"=",
"check",
"(",
"parsed_version",
",",
"name",
")",
"failed",
"=",
"False",
"# noqa: F841 T25377293 Grandfathered in",
"for",
"name",
"in",
"opts",
"[",
"\"required\"",
"]",
":",
"have",
"=",
"check",
"(",
"parsed_version",
",",
"name",
")",
"vers",
"[",
"\"capabilities\"",
"]",
"[",
"name",
"]",
"=",
"have",
"if",
"not",
"have",
":",
"vers",
"[",
"\"error\"",
"]",
"=",
"(",
"\"client required capability `\"",
"+",
"name",
"+",
"\"` is not supported by this server\"",
")",
"return",
"vers"
] |
Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
|
[
"Synthesize",
"a",
"capability",
"enabled",
"version",
"response",
"This",
"is",
"a",
"very",
"limited",
"emulation",
"for",
"relatively",
"recent",
"feature",
"sets"
] |
python
|
train
| 37.631579 |
mjirik/imtools
|
imtools/datasets.py
|
https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/datasets.py#L14-L47
|
def sliver_reader(filename_end_mask="*[0-9].mhd", sliver_reference_dir="~/data/medical/orig/sliver07/training/", read_orig=True, read_seg=False):
"""
Generator for reading sliver data from directory structure.
:param filename_end_mask: file selection can be controlled with this parameter
:param sliver_reference_dir: directory with sliver .mhd and .raw files
:param read_orig: read image data if is set True
:param read_seg: read segmentation data if is set True
:return: numeric_label, vs_mm, oname, orig_data, rname, ref_data
"""
sliver_reference_dir = op.expanduser(sliver_reference_dir)
orig_fnames = glob.glob(sliver_reference_dir + "*orig" + filename_end_mask)
ref_fnames = glob.glob(sliver_reference_dir + "*seg"+ filename_end_mask)
orig_fnames.sort()
ref_fnames.sort()
output = []
for i in range(0, len(orig_fnames)):
oname = orig_fnames[i]
rname = ref_fnames[i]
vs_mm = None
ref_data= None
orig_data = None
if read_orig:
orig_data, metadata = io3d.datareader.read(oname)
vs_mm = metadata['voxelsize_mm']
if read_seg:
ref_data, metadata = io3d.datareader.read(rname)
vs_mm = metadata['voxelsize_mm']
import re
numeric_label = re.search(".*g(\d+)", oname).group(1)
out = (numeric_label, vs_mm, oname, orig_data, rname, ref_data)
yield out
|
[
"def",
"sliver_reader",
"(",
"filename_end_mask",
"=",
"\"*[0-9].mhd\"",
",",
"sliver_reference_dir",
"=",
"\"~/data/medical/orig/sliver07/training/\"",
",",
"read_orig",
"=",
"True",
",",
"read_seg",
"=",
"False",
")",
":",
"sliver_reference_dir",
"=",
"op",
".",
"expanduser",
"(",
"sliver_reference_dir",
")",
"orig_fnames",
"=",
"glob",
".",
"glob",
"(",
"sliver_reference_dir",
"+",
"\"*orig\"",
"+",
"filename_end_mask",
")",
"ref_fnames",
"=",
"glob",
".",
"glob",
"(",
"sliver_reference_dir",
"+",
"\"*seg\"",
"+",
"filename_end_mask",
")",
"orig_fnames",
".",
"sort",
"(",
")",
"ref_fnames",
".",
"sort",
"(",
")",
"output",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"orig_fnames",
")",
")",
":",
"oname",
"=",
"orig_fnames",
"[",
"i",
"]",
"rname",
"=",
"ref_fnames",
"[",
"i",
"]",
"vs_mm",
"=",
"None",
"ref_data",
"=",
"None",
"orig_data",
"=",
"None",
"if",
"read_orig",
":",
"orig_data",
",",
"metadata",
"=",
"io3d",
".",
"datareader",
".",
"read",
"(",
"oname",
")",
"vs_mm",
"=",
"metadata",
"[",
"'voxelsize_mm'",
"]",
"if",
"read_seg",
":",
"ref_data",
",",
"metadata",
"=",
"io3d",
".",
"datareader",
".",
"read",
"(",
"rname",
")",
"vs_mm",
"=",
"metadata",
"[",
"'voxelsize_mm'",
"]",
"import",
"re",
"numeric_label",
"=",
"re",
".",
"search",
"(",
"\".*g(\\d+)\"",
",",
"oname",
")",
".",
"group",
"(",
"1",
")",
"out",
"=",
"(",
"numeric_label",
",",
"vs_mm",
",",
"oname",
",",
"orig_data",
",",
"rname",
",",
"ref_data",
")",
"yield",
"out"
] |
Generator for reading sliver data from directory structure.
:param filename_end_mask: file selection can be controlled with this parameter
:param sliver_reference_dir: directory with sliver .mhd and .raw files
:param read_orig: read image data if is set True
:param read_seg: read segmentation data if is set True
:return: numeric_label, vs_mm, oname, orig_data, rname, ref_data
|
[
"Generator",
"for",
"reading",
"sliver",
"data",
"from",
"directory",
"structure",
"."
] |
python
|
train
| 41.558824 |
blockstack/blockstack-core
|
api/search/substring_search.py
|
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/search/substring_search.py#L41-L50
|
def anyword_substring_search_inner(query_word, target_words):
""" return True if ANY target_word matches a query_word
"""
for target_word in target_words:
if(target_word.startswith(query_word)):
return query_word
return False
|
[
"def",
"anyword_substring_search_inner",
"(",
"query_word",
",",
"target_words",
")",
":",
"for",
"target_word",
"in",
"target_words",
":",
"if",
"(",
"target_word",
".",
"startswith",
"(",
"query_word",
")",
")",
":",
"return",
"query_word",
"return",
"False"
] |
return True if ANY target_word matches a query_word
|
[
"return",
"True",
"if",
"ANY",
"target_word",
"matches",
"a",
"query_word"
] |
python
|
train
| 25.5 |
anuragkumarak95/wordnet
|
wordnet/tf_idf_generator.py
|
https://github.com/anuragkumarak95/wordnet/blob/7aba239ddebb0971e9e76124890373b60a2573c8/wordnet/tf_idf_generator.py#L35-L89
|
def find_tf_idf(file_names=['./../test/testdata'],prev_file_path=None, dump_path=None):
'''Function to create a TF-IDF list of dictionaries for a corpus of docs.
If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding)
and also re-generate a new tfidf list which overrides over an old one by mentioning its path.
@Args:
--
file_names : paths of files to be processed on, these files are created using twitter_streaming module.
prev_file_path : path of old .tfidfpkl file, if available. (default=None)
dump_path : directory-path where to dump generated lists.(default=None)
@returns:
--
df : a dict of unique words in corpus,with their document frequency as values.
tf_idf : the generated tf-idf list of dictionaries for mentioned docs.
'''
tf_idf = [] # will hold a dict of word_count for every doc(line in a doc in this case)
df = defaultdict(int)
# this statement is useful for altering existant tf-idf file and adding new docs in itself.(## memory is now the biggest issue)
if prev_file_path:
print(TAG,'modifying over exising file.. @',prev_file_path)
df,tf_idf = pickle.load(open(prev_file_path,'rb'))
prev_doc_count = len(df)
prev_corpus_length = len(tf_idf)
for f in file_names:
# never use 'rb' for textual data, it creates something like, {b'line-inside-the-doc'}
with open(f,'r') as file1:
#create word_count dict for all docs
for line in file1:
wdict = defaultdict(int)
#find the amount of doc a word is in
for word in set(line.split()):
df[word] +=1
#find the count of all words in every doc
for word in line.split():
wdict[word] += 1
tf_idf.append(wdict)
#calculating final TF-IDF values for all words in all docs(line is a doc in this case)
for doc in tf_idf:
for key in doc:
true_idf = math.log(len(tf_idf)/df[key])
true_tf = doc[key]/float(len(doc))
doc[key] = true_tf * true_idf
print(TAG,'Total number of unique words in corpus',len(df),'( '+paint('++'+str(len(df)-prev_doc_count),'g')+' )' if prev_file_path else '')
print(TAG,'Total number of docs in corpus:',len(tf_idf),'( '+paint('++'+str(len(tf_idf)-prev_corpus_length),'g')+' )' if prev_file_path else '')
# dump if a dir-path is given
if dump_path:
if dump_path[-8:] == 'tfidfpkl':
pickle.dump((df,tf_idf),open(dump_path,'wb'),protocol=pickle.HIGHEST_PROTOCOL)
print(TAG,'Dumping TF-IDF vars @',dump_path)
return df,tf_idf
|
[
"def",
"find_tf_idf",
"(",
"file_names",
"=",
"[",
"'./../test/testdata'",
"]",
",",
"prev_file_path",
"=",
"None",
",",
"dump_path",
"=",
"None",
")",
":",
"tf_idf",
"=",
"[",
"]",
"# will hold a dict of word_count for every doc(line in a doc in this case)",
"df",
"=",
"defaultdict",
"(",
"int",
")",
"# this statement is useful for altering existant tf-idf file and adding new docs in itself.(## memory is now the biggest issue)",
"if",
"prev_file_path",
":",
"print",
"(",
"TAG",
",",
"'modifying over exising file.. @'",
",",
"prev_file_path",
")",
"df",
",",
"tf_idf",
"=",
"pickle",
".",
"load",
"(",
"open",
"(",
"prev_file_path",
",",
"'rb'",
")",
")",
"prev_doc_count",
"=",
"len",
"(",
"df",
")",
"prev_corpus_length",
"=",
"len",
"(",
"tf_idf",
")",
"for",
"f",
"in",
"file_names",
":",
"# never use 'rb' for textual data, it creates something like, {b'line-inside-the-doc'}",
"with",
"open",
"(",
"f",
",",
"'r'",
")",
"as",
"file1",
":",
"#create word_count dict for all docs",
"for",
"line",
"in",
"file1",
":",
"wdict",
"=",
"defaultdict",
"(",
"int",
")",
"#find the amount of doc a word is in",
"for",
"word",
"in",
"set",
"(",
"line",
".",
"split",
"(",
")",
")",
":",
"df",
"[",
"word",
"]",
"+=",
"1",
"#find the count of all words in every doc",
"for",
"word",
"in",
"line",
".",
"split",
"(",
")",
":",
"wdict",
"[",
"word",
"]",
"+=",
"1",
"tf_idf",
".",
"append",
"(",
"wdict",
")",
"#calculating final TF-IDF values for all words in all docs(line is a doc in this case)",
"for",
"doc",
"in",
"tf_idf",
":",
"for",
"key",
"in",
"doc",
":",
"true_idf",
"=",
"math",
".",
"log",
"(",
"len",
"(",
"tf_idf",
")",
"/",
"df",
"[",
"key",
"]",
")",
"true_tf",
"=",
"doc",
"[",
"key",
"]",
"/",
"float",
"(",
"len",
"(",
"doc",
")",
")",
"doc",
"[",
"key",
"]",
"=",
"true_tf",
"*",
"true_idf",
"print",
"(",
"TAG",
",",
"'Total number of unique words in corpus'",
",",
"len",
"(",
"df",
")",
",",
"'( '",
"+",
"paint",
"(",
"'++'",
"+",
"str",
"(",
"len",
"(",
"df",
")",
"-",
"prev_doc_count",
")",
",",
"'g'",
")",
"+",
"' )'",
"if",
"prev_file_path",
"else",
"''",
")",
"print",
"(",
"TAG",
",",
"'Total number of docs in corpus:'",
",",
"len",
"(",
"tf_idf",
")",
",",
"'( '",
"+",
"paint",
"(",
"'++'",
"+",
"str",
"(",
"len",
"(",
"tf_idf",
")",
"-",
"prev_corpus_length",
")",
",",
"'g'",
")",
"+",
"' )'",
"if",
"prev_file_path",
"else",
"''",
")",
"# dump if a dir-path is given",
"if",
"dump_path",
":",
"if",
"dump_path",
"[",
"-",
"8",
":",
"]",
"==",
"'tfidfpkl'",
":",
"pickle",
".",
"dump",
"(",
"(",
"df",
",",
"tf_idf",
")",
",",
"open",
"(",
"dump_path",
",",
"'wb'",
")",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"print",
"(",
"TAG",
",",
"'Dumping TF-IDF vars @'",
",",
"dump_path",
")",
"return",
"df",
",",
"tf_idf"
] |
Function to create a TF-IDF list of dictionaries for a corpus of docs.
If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding)
and also re-generate a new tfidf list which overrides over an old one by mentioning its path.
@Args:
--
file_names : paths of files to be processed on, these files are created using twitter_streaming module.
prev_file_path : path of old .tfidfpkl file, if available. (default=None)
dump_path : directory-path where to dump generated lists.(default=None)
@returns:
--
df : a dict of unique words in corpus,with their document frequency as values.
tf_idf : the generated tf-idf list of dictionaries for mentioned docs.
|
[
"Function",
"to",
"create",
"a",
"TF",
"-",
"IDF",
"list",
"of",
"dictionaries",
"for",
"a",
"corpus",
"of",
"docs",
".",
"If",
"you",
"opt",
"for",
"dumping",
"the",
"data",
"you",
"can",
"provide",
"a",
"file_path",
"with",
".",
"tfidfpkl",
"extension",
"(",
"standard",
"made",
"for",
"better",
"understanding",
")",
"and",
"also",
"re",
"-",
"generate",
"a",
"new",
"tfidf",
"list",
"which",
"overrides",
"over",
"an",
"old",
"one",
"by",
"mentioning",
"its",
"path",
"."
] |
python
|
train
| 49.563636 |
bwohlberg/sporco
|
sporco/dictlrn/prlcnscdl.py
|
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/dictlrn/prlcnscdl.py#L687-L700
|
def cbpdnmd_ystep(k):
"""Do the Y step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
"""
if mp_W.shape[0] > 1:
W = mp_W[k]
else:
W = mp_W
AXU0 = mp_DX[k] - mp_S[k] + mp_Z_U0[k]
AXU1 = mp_Z_X[k] + mp_Z_U1[k]
mp_Z_Y0[k] = mp_xrho*AXU0 / (W**2 + mp_xrho)
mp_Z_Y1[k] = sp.prox_l1(AXU1, (mp_lmbda/mp_xrho))
|
[
"def",
"cbpdnmd_ystep",
"(",
"k",
")",
":",
"if",
"mp_W",
".",
"shape",
"[",
"0",
"]",
">",
"1",
":",
"W",
"=",
"mp_W",
"[",
"k",
"]",
"else",
":",
"W",
"=",
"mp_W",
"AXU0",
"=",
"mp_DX",
"[",
"k",
"]",
"-",
"mp_S",
"[",
"k",
"]",
"+",
"mp_Z_U0",
"[",
"k",
"]",
"AXU1",
"=",
"mp_Z_X",
"[",
"k",
"]",
"+",
"mp_Z_U1",
"[",
"k",
"]",
"mp_Z_Y0",
"[",
"k",
"]",
"=",
"mp_xrho",
"*",
"AXU0",
"/",
"(",
"W",
"**",
"2",
"+",
"mp_xrho",
")",
"mp_Z_Y1",
"[",
"k",
"]",
"=",
"sp",
".",
"prox_l1",
"(",
"AXU1",
",",
"(",
"mp_lmbda",
"/",
"mp_xrho",
")",
")"
] |
Do the Y step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
|
[
"Do",
"the",
"Y",
"step",
"of",
"the",
"cbpdn",
"stage",
".",
"The",
"only",
"parameter",
"is",
"the",
"slice",
"index",
"k",
"and",
"there",
"are",
"no",
"return",
"values",
";",
"all",
"inputs",
"and",
"outputs",
"are",
"from",
"and",
"to",
"global",
"variables",
"."
] |
python
|
train
| 32.142857 |
twilio/twilio-python
|
twilio/rest/proxy/v1/service/session/__init__.py
|
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/proxy/v1/service/session/__init__.py#L154-L163
|
def get(self, sid):
"""
Constructs a SessionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.session.SessionContext
:rtype: twilio.rest.proxy.v1.service.session.SessionContext
"""
return SessionContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
|
[
"def",
"get",
"(",
"self",
",",
"sid",
")",
":",
"return",
"SessionContext",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"sid",
"=",
"sid",
",",
")"
] |
Constructs a SessionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.session.SessionContext
:rtype: twilio.rest.proxy.v1.service.session.SessionContext
|
[
"Constructs",
"a",
"SessionContext"
] |
python
|
train
| 37.6 |
autokey/autokey
|
lib/autokey/scripting.py
|
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/scripting.py#L700-L715
|
def choose_colour(self, title="Select Colour", **kwargs):
"""
Show a Colour Chooser dialog
Usage: C{dialog.choose_colour(title="Select Colour")}
@param title: window title for the dialog
@return:
@rtype: C{DialogData(int, Optional[ColourData])}
"""
return_data = self._run_zenity(title, ["--color-selection"], kwargs)
if return_data.successful:
converted_colour = ColourData.from_zenity_tuple_str(return_data.data)
return DialogData(return_data.return_code, converted_colour)
else:
return DialogData(return_data.return_code, None)
|
[
"def",
"choose_colour",
"(",
"self",
",",
"title",
"=",
"\"Select Colour\"",
",",
"*",
"*",
"kwargs",
")",
":",
"return_data",
"=",
"self",
".",
"_run_zenity",
"(",
"title",
",",
"[",
"\"--color-selection\"",
"]",
",",
"kwargs",
")",
"if",
"return_data",
".",
"successful",
":",
"converted_colour",
"=",
"ColourData",
".",
"from_zenity_tuple_str",
"(",
"return_data",
".",
"data",
")",
"return",
"DialogData",
"(",
"return_data",
".",
"return_code",
",",
"converted_colour",
")",
"else",
":",
"return",
"DialogData",
"(",
"return_data",
".",
"return_code",
",",
"None",
")"
] |
Show a Colour Chooser dialog
Usage: C{dialog.choose_colour(title="Select Colour")}
@param title: window title for the dialog
@return:
@rtype: C{DialogData(int, Optional[ColourData])}
|
[
"Show",
"a",
"Colour",
"Chooser",
"dialog",
"Usage",
":",
"C",
"{",
"dialog",
".",
"choose_colour",
"(",
"title",
"=",
"Select",
"Colour",
")",
"}"
] |
python
|
train
| 40.5625 |
textbook/aslack
|
aslack/slack_bot/bot.py
|
https://github.com/textbook/aslack/blob/9ac6a44e4464180109fa4be130ad7a980a9d1acc/aslack/slack_bot/bot.py#L267-L285
|
def _validate_first_message(cls, msg):
"""Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake.
"""
data = cls._unpack_message(msg)
logger.debug(data)
if data != cls.RTM_HANDSHAKE:
raise SlackApiError('Unexpected response: {!r}'.format(data))
logger.info('Joined real-time messaging.')
|
[
"def",
"_validate_first_message",
"(",
"cls",
",",
"msg",
")",
":",
"data",
"=",
"cls",
".",
"_unpack_message",
"(",
"msg",
")",
"logger",
".",
"debug",
"(",
"data",
")",
"if",
"data",
"!=",
"cls",
".",
"RTM_HANDSHAKE",
":",
"raise",
"SlackApiError",
"(",
"'Unexpected response: {!r}'",
".",
"format",
"(",
"data",
")",
")",
"logger",
".",
"info",
"(",
"'Joined real-time messaging.'",
")"
] |
Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake.
|
[
"Check",
"the",
"first",
"message",
"matches",
"the",
"expected",
"handshake",
"."
] |
python
|
valid
| 32.421053 |
ARMmbed/yotta
|
yotta/lib/cmakegen.py
|
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/cmakegen.py#L175-L200
|
def _validateListedSubdirsExist(self, component):
''' Return true if all the subdirectories which this component lists in
its module.json file exist (although their validity is otherwise
not checked).
If they don't, warning messages are printed.
'''
lib_subdirs = component.getLibs(explicit_only=True)
bin_subdirs = component.getBinaries()
ok = True
for d in lib_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"lib directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
for d in bin_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"bin directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
return ok
|
[
"def",
"_validateListedSubdirsExist",
"(",
"self",
",",
"component",
")",
":",
"lib_subdirs",
"=",
"component",
".",
"getLibs",
"(",
"explicit_only",
"=",
"True",
")",
"bin_subdirs",
"=",
"component",
".",
"getBinaries",
"(",
")",
"ok",
"=",
"True",
"for",
"d",
"in",
"lib_subdirs",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"component",
".",
"path",
",",
"d",
")",
")",
":",
"logger",
".",
"warning",
"(",
"\"lib directory \\\"%s\\\" doesn't exist but is listed in the module.json file of %s\"",
",",
"d",
",",
"component",
")",
"ok",
"=",
"False",
"for",
"d",
"in",
"bin_subdirs",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"component",
".",
"path",
",",
"d",
")",
")",
":",
"logger",
".",
"warning",
"(",
"\"bin directory \\\"%s\\\" doesn't exist but is listed in the module.json file of %s\"",
",",
"d",
",",
"component",
")",
"ok",
"=",
"False",
"return",
"ok"
] |
Return true if all the subdirectories which this component lists in
its module.json file exist (although their validity is otherwise
not checked).
If they don't, warning messages are printed.
|
[
"Return",
"true",
"if",
"all",
"the",
"subdirectories",
"which",
"this",
"component",
"lists",
"in",
"its",
"module",
".",
"json",
"file",
"exist",
"(",
"although",
"their",
"validity",
"is",
"otherwise",
"not",
"checked",
")",
"."
] |
python
|
valid
| 38.538462 |
istresearch/scrapy-cluster
|
crawler/crawling/log_retry_middleware.py
|
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/crawler/crawling/log_retry_middleware.py#L149-L161
|
def _increment_504_stat(self, request):
'''
Increments the 504 stat counters
@param request: The scrapy request in the spider
'''
for key in self.stats_dict:
if key == 'lifetime':
unique = request.url + str(time.time())
self.stats_dict[key].increment(unique)
else:
self.stats_dict[key].increment()
self.logger.debug("Incremented status_code '504' stats")
|
[
"def",
"_increment_504_stat",
"(",
"self",
",",
"request",
")",
":",
"for",
"key",
"in",
"self",
".",
"stats_dict",
":",
"if",
"key",
"==",
"'lifetime'",
":",
"unique",
"=",
"request",
".",
"url",
"+",
"str",
"(",
"time",
".",
"time",
"(",
")",
")",
"self",
".",
"stats_dict",
"[",
"key",
"]",
".",
"increment",
"(",
"unique",
")",
"else",
":",
"self",
".",
"stats_dict",
"[",
"key",
"]",
".",
"increment",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Incremented status_code '504' stats\"",
")"
] |
Increments the 504 stat counters
@param request: The scrapy request in the spider
|
[
"Increments",
"the",
"504",
"stat",
"counters"
] |
python
|
train
| 35.615385 |
inasafe/inasafe
|
safe/datastore/folder.py
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/datastore/folder.py#L125-L144
|
def layer_uri(self, layer_name):
"""Get layer URI.
:param layer_name: The name of the layer to fetch.
:type layer_name: str
:return: The URI to the layer.
:rtype: str
.. versionadded:: 4.0
"""
layers = self.layers()
for layer, extension in product(layers, EXTENSIONS):
one_file = QFileInfo(
self.uri.filePath(layer + '.' + extension))
if one_file.exists():
if one_file.baseName() == layer_name:
return one_file.absoluteFilePath()
else:
return None
|
[
"def",
"layer_uri",
"(",
"self",
",",
"layer_name",
")",
":",
"layers",
"=",
"self",
".",
"layers",
"(",
")",
"for",
"layer",
",",
"extension",
"in",
"product",
"(",
"layers",
",",
"EXTENSIONS",
")",
":",
"one_file",
"=",
"QFileInfo",
"(",
"self",
".",
"uri",
".",
"filePath",
"(",
"layer",
"+",
"'.'",
"+",
"extension",
")",
")",
"if",
"one_file",
".",
"exists",
"(",
")",
":",
"if",
"one_file",
".",
"baseName",
"(",
")",
"==",
"layer_name",
":",
"return",
"one_file",
".",
"absoluteFilePath",
"(",
")",
"else",
":",
"return",
"None"
] |
Get layer URI.
:param layer_name: The name of the layer to fetch.
:type layer_name: str
:return: The URI to the layer.
:rtype: str
.. versionadded:: 4.0
|
[
"Get",
"layer",
"URI",
"."
] |
python
|
train
| 29.95 |
waqasbhatti/astrobase
|
astrobase/lcproc/varthreshold.py
|
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcproc/varthreshold.py#L96-L699
|
def variability_threshold(featuresdir,
outfile,
magbins=DEFAULT_MAGBINS,
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
min_lcmad_stdev=5.0,
min_stetj_stdev=2.0,
min_iqr_stdev=2.0,
min_inveta_stdev=2.0,
verbose=True):
'''This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
Parameters
----------
featuresdir : str
This is the directory containing variability feature pickles created by
:py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.
outfile : str
This is the output pickle file that will contain all the threshold
information.
magbins : np.array of floats
This sets the magnitude bins to use for calculating thresholds.
maxobjects : int or None
This is the number of objects to process. If None, all objects with
feature pickles in `featuresdir` will be processed.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the thresholds.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the thresholds.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the thresholds.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array
These are all the standard deviation multiplier for the distributions of
light curve standard deviation, Stetson J variability index, the light
curve interquartile range, and 1/eta variability index
respectively. These multipliers set the minimum values of these measures
to use for selecting variable stars. If provided as floats, the same
value will be used for all magbins. If provided as np.arrays of `size =
magbins.size - 1`, will be used to apply possibly different sigma cuts
for each magbin.
verbose : bool
If True, will report progress and warn about any problems.
Returns
-------
dict
Contains all of the variability threshold information along with indices
into the array of the object IDs chosen as variables.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# list of input pickles generated by varfeatures functions above
pklist = glob.glob(os.path.join(featuresdir, 'varfeatures-*.pkl'))
if maxobjects:
pklist = pklist[:maxobjects]
allobjects = {}
for magcol in magcols:
# keep local copies of these so we can fix them independently in case of
# nans
if (isinstance(min_stetj_stdev, list) or
isinstance(min_stetj_stdev, np.ndarray)):
magcol_min_stetj_stdev = min_stetj_stdev[::]
else:
magcol_min_stetj_stdev = min_stetj_stdev
if (isinstance(min_iqr_stdev, list) or
isinstance(min_iqr_stdev, np.ndarray)):
magcol_min_iqr_stdev = min_iqr_stdev[::]
else:
magcol_min_iqr_stdev = min_iqr_stdev
if (isinstance(min_inveta_stdev, list) or
isinstance(min_inveta_stdev, np.ndarray)):
magcol_min_inveta_stdev = min_inveta_stdev[::]
else:
magcol_min_inveta_stdev = min_inveta_stdev
LOGINFO('getting all object sdssr, LC MAD, stet J, IQR, eta...')
# we'll calculate the sigma per magnitude bin, so get the mags as well
allobjects[magcol] = {
'objectid':[],
'sdssr':[],
'lcmad':[],
'stetsonj':[],
'iqr':[],
'eta':[]
}
# fancy progress bar with tqdm if present
if TQDM and verbose:
listiterator = tqdm(pklist)
else:
listiterator = pklist
for pkl in listiterator:
with open(pkl,'rb') as infd:
thisfeatures = pickle.load(infd)
objectid = thisfeatures['objectid']
# the object magnitude
if ('info' in thisfeatures and
thisfeatures['info'] and
'sdssr' in thisfeatures['info']):
if (thisfeatures['info']['sdssr'] and
thisfeatures['info']['sdssr'] > 3.0):
sdssr = thisfeatures['info']['sdssr']
elif (magcol in thisfeatures and
thisfeatures[magcol] and
'median' in thisfeatures[magcol] and
thisfeatures[magcol]['median'] > 3.0):
sdssr = thisfeatures[magcol]['median']
elif (thisfeatures['info']['jmag'] and
thisfeatures['info']['hmag'] and
thisfeatures['info']['kmag']):
sdssr = jhk_to_sdssr(thisfeatures['info']['jmag'],
thisfeatures['info']['hmag'],
thisfeatures['info']['kmag'])
else:
sdssr = np.nan
else:
sdssr = np.nan
# the MAD of the light curve
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mad']):
lcmad = thisfeatures[magcol]['mad']
else:
lcmad = np.nan
# stetson index
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['stetsonj']):
stetsonj = thisfeatures[magcol]['stetsonj']
else:
stetsonj = np.nan
# IQR
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mag_iqr']):
iqr = thisfeatures[magcol]['mag_iqr']
else:
iqr = np.nan
# eta
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['eta_normal']):
eta = thisfeatures[magcol]['eta_normal']
else:
eta = np.nan
allobjects[magcol]['objectid'].append(objectid)
allobjects[magcol]['sdssr'].append(sdssr)
allobjects[magcol]['lcmad'].append(lcmad)
allobjects[magcol]['stetsonj'].append(stetsonj)
allobjects[magcol]['iqr'].append(iqr)
allobjects[magcol]['eta'].append(eta)
#
# done with collection of info
#
LOGINFO('finding objects above thresholds per magbin...')
# turn the info into arrays
allobjects[magcol]['objectid'] = np.ravel(np.array(
allobjects[magcol]['objectid']
))
allobjects[magcol]['sdssr'] = np.ravel(np.array(
allobjects[magcol]['sdssr']
))
allobjects[magcol]['lcmad'] = np.ravel(np.array(
allobjects[magcol]['lcmad']
))
allobjects[magcol]['stetsonj'] = np.ravel(np.array(
allobjects[magcol]['stetsonj']
))
allobjects[magcol]['iqr'] = np.ravel(np.array(
allobjects[magcol]['iqr']
))
allobjects[magcol]['eta'] = np.ravel(np.array(
allobjects[magcol]['eta']
))
# only get finite elements everywhere
thisfinind = (
np.isfinite(allobjects[magcol]['sdssr']) &
np.isfinite(allobjects[magcol]['lcmad']) &
np.isfinite(allobjects[magcol]['stetsonj']) &
np.isfinite(allobjects[magcol]['iqr']) &
np.isfinite(allobjects[magcol]['eta'])
)
allobjects[magcol]['objectid'] = allobjects[magcol]['objectid'][
thisfinind
]
allobjects[magcol]['sdssr'] = allobjects[magcol]['sdssr'][thisfinind]
allobjects[magcol]['lcmad'] = allobjects[magcol]['lcmad'][thisfinind]
allobjects[magcol]['stetsonj'] = allobjects[magcol]['stetsonj'][
thisfinind
]
allobjects[magcol]['iqr'] = allobjects[magcol]['iqr'][thisfinind]
allobjects[magcol]['eta'] = allobjects[magcol]['eta'][thisfinind]
# invert eta so we can threshold the same way as the others
allobjects[magcol]['inveta'] = 1.0/allobjects[magcol]['eta']
# do the thresholding by magnitude bin
magbininds = np.digitize(allobjects[magcol]['sdssr'],
magbins)
binned_objectids = []
binned_sdssr = []
binned_sdssr_median = []
binned_lcmad = []
binned_stetsonj = []
binned_iqr = []
binned_inveta = []
binned_count = []
binned_objectids_thresh_stetsonj = []
binned_objectids_thresh_iqr = []
binned_objectids_thresh_inveta = []
binned_objectids_thresh_all = []
binned_lcmad_median = []
binned_lcmad_stdev = []
binned_stetsonj_median = []
binned_stetsonj_stdev = []
binned_inveta_median = []
binned_inveta_stdev = []
binned_iqr_median = []
binned_iqr_stdev = []
# go through all the mag bins and get the thresholds for J, inveta, IQR
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbinind = np.where(magbininds == mbinind)
thisbin_sdssr_median = (magbins[magi] + magbins[magi+1])/2.0
binned_sdssr_median.append(thisbin_sdssr_median)
thisbin_objectids = allobjects[magcol]['objectid'][thisbinind]
thisbin_sdssr = allobjects[magcol]['sdssr'][thisbinind]
thisbin_lcmad = allobjects[magcol]['lcmad'][thisbinind]
thisbin_stetsonj = allobjects[magcol]['stetsonj'][thisbinind]
thisbin_iqr = allobjects[magcol]['iqr'][thisbinind]
thisbin_inveta = allobjects[magcol]['inveta'][thisbinind]
thisbin_count = thisbin_objectids.size
if thisbin_count > 4:
thisbin_lcmad_median = np.median(thisbin_lcmad)
thisbin_lcmad_stdev = np.median(
np.abs(thisbin_lcmad - thisbin_lcmad_median)
) * 1.483
binned_lcmad_median.append(thisbin_lcmad_median)
binned_lcmad_stdev.append(thisbin_lcmad_stdev)
thisbin_stetsonj_median = np.median(thisbin_stetsonj)
thisbin_stetsonj_stdev = np.median(
np.abs(thisbin_stetsonj - thisbin_stetsonj_median)
) * 1.483
binned_stetsonj_median.append(thisbin_stetsonj_median)
binned_stetsonj_stdev.append(thisbin_stetsonj_stdev)
# now get the objects above the required stdev threshold
if isinstance(magcol_min_stetj_stdev, float):
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
magcol_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
elif (isinstance(magcol_min_stetj_stdev, np.ndarray) or
isinstance(magcol_min_stetj_stdev, list)):
thisbin_min_stetj_stdev = magcol_min_stetj_stdev[magi]
if not np.isfinite(thisbin_min_stetj_stdev):
LOGWARNING('provided threshold stetson J stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_stetj_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_stetj_stdev[magi] = 2.0
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
thisbin_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
thisbin_iqr_median = np.median(thisbin_iqr)
thisbin_iqr_stdev = np.median(
np.abs(thisbin_iqr - thisbin_iqr_median)
) * 1.483
binned_iqr_median.append(thisbin_iqr_median)
binned_iqr_stdev.append(thisbin_iqr_stdev)
# get the objects above the required stdev threshold
if isinstance(magcol_min_iqr_stdev, float):
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
magcol_min_iqr_stdev*thisbin_iqr_stdev)
]
elif (isinstance(magcol_min_iqr_stdev, np.ndarray) or
isinstance(magcol_min_iqr_stdev, list)):
thisbin_min_iqr_stdev = magcol_min_iqr_stdev[magi]
if not np.isfinite(thisbin_min_iqr_stdev):
LOGWARNING('provided threshold IQR stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_iqr_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_iqr_stdev[magi] = 2.0
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
thisbin_min_iqr_stdev*thisbin_iqr_stdev)
]
thisbin_inveta_median = np.median(thisbin_inveta)
thisbin_inveta_stdev = np.median(
np.abs(thisbin_inveta - thisbin_inveta_median)
) * 1.483
binned_inveta_median.append(thisbin_inveta_median)
binned_inveta_stdev.append(thisbin_inveta_stdev)
if isinstance(magcol_min_inveta_stdev, float):
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
magcol_min_inveta_stdev*thisbin_inveta_stdev
)
]
elif (isinstance(magcol_min_inveta_stdev, np.ndarray) or
isinstance(magcol_min_inveta_stdev, list)):
thisbin_min_inveta_stdev = magcol_min_inveta_stdev[magi]
if not np.isfinite(thisbin_min_inveta_stdev):
LOGWARNING('provided threshold inveta stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_inveta_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_inveta_stdev[magi] = 2.0
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
thisbin_min_inveta_stdev*thisbin_inveta_stdev
)
]
else:
thisbin_objectids_thresh_stetsonj = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_iqr = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_inveta = (
np.array([],dtype=np.unicode_)
)
#
# done with check for enough objects in the bin
#
# get the intersection of all threshold objects to get objects that
# lie above the threshold for all variable indices
thisbin_objectids_thresh_all = reduce(
np.intersect1d,
(thisbin_objectids_thresh_stetsonj,
thisbin_objectids_thresh_iqr,
thisbin_objectids_thresh_inveta)
)
binned_objectids.append(thisbin_objectids)
binned_sdssr.append(thisbin_sdssr)
binned_lcmad.append(thisbin_lcmad)
binned_stetsonj.append(thisbin_stetsonj)
binned_iqr.append(thisbin_iqr)
binned_inveta.append(thisbin_inveta)
binned_count.append(thisbin_objectids.size)
binned_objectids_thresh_stetsonj.append(
thisbin_objectids_thresh_stetsonj
)
binned_objectids_thresh_iqr.append(
thisbin_objectids_thresh_iqr
)
binned_objectids_thresh_inveta.append(
thisbin_objectids_thresh_inveta
)
binned_objectids_thresh_all.append(
thisbin_objectids_thresh_all
)
#
# done with magbins
#
# update the output dict for this magcol
allobjects[magcol]['magbins'] = magbins
allobjects[magcol]['binned_objectids'] = binned_objectids
allobjects[magcol]['binned_sdssr_median'] = binned_sdssr_median
allobjects[magcol]['binned_sdssr'] = binned_sdssr
allobjects[magcol]['binned_count'] = binned_count
allobjects[magcol]['binned_lcmad'] = binned_lcmad
allobjects[magcol]['binned_lcmad_median'] = binned_lcmad_median
allobjects[magcol]['binned_lcmad_stdev'] = binned_lcmad_stdev
allobjects[magcol]['binned_stetsonj'] = binned_stetsonj
allobjects[magcol]['binned_stetsonj_median'] = binned_stetsonj_median
allobjects[magcol]['binned_stetsonj_stdev'] = binned_stetsonj_stdev
allobjects[magcol]['binned_iqr'] = binned_iqr
allobjects[magcol]['binned_iqr_median'] = binned_iqr_median
allobjects[magcol]['binned_iqr_stdev'] = binned_iqr_stdev
allobjects[magcol]['binned_inveta'] = binned_inveta
allobjects[magcol]['binned_inveta_median'] = binned_inveta_median
allobjects[magcol]['binned_inveta_stdev'] = binned_inveta_stdev
allobjects[magcol]['binned_objectids_thresh_stetsonj'] = (
binned_objectids_thresh_stetsonj
)
allobjects[magcol]['binned_objectids_thresh_iqr'] = (
binned_objectids_thresh_iqr
)
allobjects[magcol]['binned_objectids_thresh_inveta'] = (
binned_objectids_thresh_inveta
)
allobjects[magcol]['binned_objectids_thresh_all'] = (
binned_objectids_thresh_all
)
# get the common selected objects thru all measures
try:
allobjects[magcol]['objectids_all_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_all']
)
)
except ValueError:
LOGWARNING('not enough variable objects matching all thresholds')
allobjects[magcol]['objectids_all_thresh_all_magbins'] = (
np.array([])
)
allobjects[magcol]['objectids_stetsonj_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_stetsonj']
)
)
allobjects[magcol]['objectids_inveta_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_inveta'])
)
allobjects[magcol]['objectids_iqr_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_iqr'])
)
# turn these into np.arrays for easier plotting if they're lists
if isinstance(min_stetj_stdev, list):
allobjects[magcol]['min_stetj_stdev'] = np.array(
magcol_min_stetj_stdev
)
else:
allobjects[magcol]['min_stetj_stdev'] = magcol_min_stetj_stdev
if isinstance(min_iqr_stdev, list):
allobjects[magcol]['min_iqr_stdev'] = np.array(
magcol_min_iqr_stdev
)
else:
allobjects[magcol]['min_iqr_stdev'] = magcol_min_iqr_stdev
if isinstance(min_inveta_stdev, list):
allobjects[magcol]['min_inveta_stdev'] = np.array(
magcol_min_inveta_stdev
)
else:
allobjects[magcol]['min_inveta_stdev'] = magcol_min_inveta_stdev
# this one doesn't get touched (for now)
allobjects[magcol]['min_lcmad_stdev'] = min_lcmad_stdev
#
# done with all magcols
#
allobjects['magbins'] = magbins
with open(outfile,'wb') as outfd:
pickle.dump(allobjects, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return allobjects
|
[
"def",
"variability_threshold",
"(",
"featuresdir",
",",
"outfile",
",",
"magbins",
"=",
"DEFAULT_MAGBINS",
",",
"maxobjects",
"=",
"None",
",",
"timecols",
"=",
"None",
",",
"magcols",
"=",
"None",
",",
"errcols",
"=",
"None",
",",
"lcformat",
"=",
"'hat-sql'",
",",
"lcformatdir",
"=",
"None",
",",
"min_lcmad_stdev",
"=",
"5.0",
",",
"min_stetj_stdev",
"=",
"2.0",
",",
"min_iqr_stdev",
"=",
"2.0",
",",
"min_inveta_stdev",
"=",
"2.0",
",",
"verbose",
"=",
"True",
")",
":",
"try",
":",
"formatinfo",
"=",
"get_lcformat",
"(",
"lcformat",
",",
"use_lcformat_dir",
"=",
"lcformatdir",
")",
"if",
"formatinfo",
":",
"(",
"dfileglob",
",",
"readerfunc",
",",
"dtimecols",
",",
"dmagcols",
",",
"derrcols",
",",
"magsarefluxes",
",",
"normfunc",
")",
"=",
"formatinfo",
"else",
":",
"LOGERROR",
"(",
"\"can't figure out the light curve format\"",
")",
"return",
"None",
"except",
"Exception",
"as",
"e",
":",
"LOGEXCEPTION",
"(",
"\"can't figure out the light curve format\"",
")",
"return",
"None",
"# override the default timecols, magcols, and errcols",
"# using the ones provided to the function",
"if",
"timecols",
"is",
"None",
":",
"timecols",
"=",
"dtimecols",
"if",
"magcols",
"is",
"None",
":",
"magcols",
"=",
"dmagcols",
"if",
"errcols",
"is",
"None",
":",
"errcols",
"=",
"derrcols",
"# list of input pickles generated by varfeatures functions above",
"pklist",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"featuresdir",
",",
"'varfeatures-*.pkl'",
")",
")",
"if",
"maxobjects",
":",
"pklist",
"=",
"pklist",
"[",
":",
"maxobjects",
"]",
"allobjects",
"=",
"{",
"}",
"for",
"magcol",
"in",
"magcols",
":",
"# keep local copies of these so we can fix them independently in case of",
"# nans",
"if",
"(",
"isinstance",
"(",
"min_stetj_stdev",
",",
"list",
")",
"or",
"isinstance",
"(",
"min_stetj_stdev",
",",
"np",
".",
"ndarray",
")",
")",
":",
"magcol_min_stetj_stdev",
"=",
"min_stetj_stdev",
"[",
":",
":",
"]",
"else",
":",
"magcol_min_stetj_stdev",
"=",
"min_stetj_stdev",
"if",
"(",
"isinstance",
"(",
"min_iqr_stdev",
",",
"list",
")",
"or",
"isinstance",
"(",
"min_iqr_stdev",
",",
"np",
".",
"ndarray",
")",
")",
":",
"magcol_min_iqr_stdev",
"=",
"min_iqr_stdev",
"[",
":",
":",
"]",
"else",
":",
"magcol_min_iqr_stdev",
"=",
"min_iqr_stdev",
"if",
"(",
"isinstance",
"(",
"min_inveta_stdev",
",",
"list",
")",
"or",
"isinstance",
"(",
"min_inveta_stdev",
",",
"np",
".",
"ndarray",
")",
")",
":",
"magcol_min_inveta_stdev",
"=",
"min_inveta_stdev",
"[",
":",
":",
"]",
"else",
":",
"magcol_min_inveta_stdev",
"=",
"min_inveta_stdev",
"LOGINFO",
"(",
"'getting all object sdssr, LC MAD, stet J, IQR, eta...'",
")",
"# we'll calculate the sigma per magnitude bin, so get the mags as well",
"allobjects",
"[",
"magcol",
"]",
"=",
"{",
"'objectid'",
":",
"[",
"]",
",",
"'sdssr'",
":",
"[",
"]",
",",
"'lcmad'",
":",
"[",
"]",
",",
"'stetsonj'",
":",
"[",
"]",
",",
"'iqr'",
":",
"[",
"]",
",",
"'eta'",
":",
"[",
"]",
"}",
"# fancy progress bar with tqdm if present",
"if",
"TQDM",
"and",
"verbose",
":",
"listiterator",
"=",
"tqdm",
"(",
"pklist",
")",
"else",
":",
"listiterator",
"=",
"pklist",
"for",
"pkl",
"in",
"listiterator",
":",
"with",
"open",
"(",
"pkl",
",",
"'rb'",
")",
"as",
"infd",
":",
"thisfeatures",
"=",
"pickle",
".",
"load",
"(",
"infd",
")",
"objectid",
"=",
"thisfeatures",
"[",
"'objectid'",
"]",
"# the object magnitude",
"if",
"(",
"'info'",
"in",
"thisfeatures",
"and",
"thisfeatures",
"[",
"'info'",
"]",
"and",
"'sdssr'",
"in",
"thisfeatures",
"[",
"'info'",
"]",
")",
":",
"if",
"(",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'sdssr'",
"]",
"and",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'sdssr'",
"]",
">",
"3.0",
")",
":",
"sdssr",
"=",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'sdssr'",
"]",
"elif",
"(",
"magcol",
"in",
"thisfeatures",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"and",
"'median'",
"in",
"thisfeatures",
"[",
"magcol",
"]",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'median'",
"]",
">",
"3.0",
")",
":",
"sdssr",
"=",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'median'",
"]",
"elif",
"(",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'jmag'",
"]",
"and",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'hmag'",
"]",
"and",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'kmag'",
"]",
")",
":",
"sdssr",
"=",
"jhk_to_sdssr",
"(",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'jmag'",
"]",
",",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'hmag'",
"]",
",",
"thisfeatures",
"[",
"'info'",
"]",
"[",
"'kmag'",
"]",
")",
"else",
":",
"sdssr",
"=",
"np",
".",
"nan",
"else",
":",
"sdssr",
"=",
"np",
".",
"nan",
"# the MAD of the light curve",
"if",
"(",
"magcol",
"in",
"thisfeatures",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'mad'",
"]",
")",
":",
"lcmad",
"=",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'mad'",
"]",
"else",
":",
"lcmad",
"=",
"np",
".",
"nan",
"# stetson index",
"if",
"(",
"magcol",
"in",
"thisfeatures",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
")",
":",
"stetsonj",
"=",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
"else",
":",
"stetsonj",
"=",
"np",
".",
"nan",
"# IQR",
"if",
"(",
"magcol",
"in",
"thisfeatures",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'mag_iqr'",
"]",
")",
":",
"iqr",
"=",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'mag_iqr'",
"]",
"else",
":",
"iqr",
"=",
"np",
".",
"nan",
"# eta",
"if",
"(",
"magcol",
"in",
"thisfeatures",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"and",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'eta_normal'",
"]",
")",
":",
"eta",
"=",
"thisfeatures",
"[",
"magcol",
"]",
"[",
"'eta_normal'",
"]",
"else",
":",
"eta",
"=",
"np",
".",
"nan",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectid'",
"]",
".",
"append",
"(",
"objectid",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'sdssr'",
"]",
".",
"append",
"(",
"sdssr",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'lcmad'",
"]",
".",
"append",
"(",
"lcmad",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
".",
"append",
"(",
"stetsonj",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'iqr'",
"]",
".",
"append",
"(",
"iqr",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'eta'",
"]",
".",
"append",
"(",
"eta",
")",
"#",
"# done with collection of info",
"#",
"LOGINFO",
"(",
"'finding objects above thresholds per magbin...'",
")",
"# turn the info into arrays",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectid'",
"]",
"=",
"np",
".",
"ravel",
"(",
"np",
".",
"array",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectid'",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'sdssr'",
"]",
"=",
"np",
".",
"ravel",
"(",
"np",
".",
"array",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'sdssr'",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'lcmad'",
"]",
"=",
"np",
".",
"ravel",
"(",
"np",
".",
"array",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'lcmad'",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
"=",
"np",
".",
"ravel",
"(",
"np",
".",
"array",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'iqr'",
"]",
"=",
"np",
".",
"ravel",
"(",
"np",
".",
"array",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'iqr'",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'eta'",
"]",
"=",
"np",
".",
"ravel",
"(",
"np",
".",
"array",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'eta'",
"]",
")",
")",
"# only get finite elements everywhere",
"thisfinind",
"=",
"(",
"np",
".",
"isfinite",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'sdssr'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'lcmad'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'iqr'",
"]",
")",
"&",
"np",
".",
"isfinite",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'eta'",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectid'",
"]",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectid'",
"]",
"[",
"thisfinind",
"]",
"allobjects",
"[",
"magcol",
"]",
"[",
"'sdssr'",
"]",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'sdssr'",
"]",
"[",
"thisfinind",
"]",
"allobjects",
"[",
"magcol",
"]",
"[",
"'lcmad'",
"]",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'lcmad'",
"]",
"[",
"thisfinind",
"]",
"allobjects",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
"[",
"thisfinind",
"]",
"allobjects",
"[",
"magcol",
"]",
"[",
"'iqr'",
"]",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'iqr'",
"]",
"[",
"thisfinind",
"]",
"allobjects",
"[",
"magcol",
"]",
"[",
"'eta'",
"]",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'eta'",
"]",
"[",
"thisfinind",
"]",
"# invert eta so we can threshold the same way as the others",
"allobjects",
"[",
"magcol",
"]",
"[",
"'inveta'",
"]",
"=",
"1.0",
"/",
"allobjects",
"[",
"magcol",
"]",
"[",
"'eta'",
"]",
"# do the thresholding by magnitude bin",
"magbininds",
"=",
"np",
".",
"digitize",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'sdssr'",
"]",
",",
"magbins",
")",
"binned_objectids",
"=",
"[",
"]",
"binned_sdssr",
"=",
"[",
"]",
"binned_sdssr_median",
"=",
"[",
"]",
"binned_lcmad",
"=",
"[",
"]",
"binned_stetsonj",
"=",
"[",
"]",
"binned_iqr",
"=",
"[",
"]",
"binned_inveta",
"=",
"[",
"]",
"binned_count",
"=",
"[",
"]",
"binned_objectids_thresh_stetsonj",
"=",
"[",
"]",
"binned_objectids_thresh_iqr",
"=",
"[",
"]",
"binned_objectids_thresh_inveta",
"=",
"[",
"]",
"binned_objectids_thresh_all",
"=",
"[",
"]",
"binned_lcmad_median",
"=",
"[",
"]",
"binned_lcmad_stdev",
"=",
"[",
"]",
"binned_stetsonj_median",
"=",
"[",
"]",
"binned_stetsonj_stdev",
"=",
"[",
"]",
"binned_inveta_median",
"=",
"[",
"]",
"binned_inveta_stdev",
"=",
"[",
"]",
"binned_iqr_median",
"=",
"[",
"]",
"binned_iqr_stdev",
"=",
"[",
"]",
"# go through all the mag bins and get the thresholds for J, inveta, IQR",
"for",
"mbinind",
",",
"magi",
"in",
"zip",
"(",
"np",
".",
"unique",
"(",
"magbininds",
")",
",",
"range",
"(",
"len",
"(",
"magbins",
")",
"-",
"1",
")",
")",
":",
"thisbinind",
"=",
"np",
".",
"where",
"(",
"magbininds",
"==",
"mbinind",
")",
"thisbin_sdssr_median",
"=",
"(",
"magbins",
"[",
"magi",
"]",
"+",
"magbins",
"[",
"magi",
"+",
"1",
"]",
")",
"/",
"2.0",
"binned_sdssr_median",
".",
"append",
"(",
"thisbin_sdssr_median",
")",
"thisbin_objectids",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectid'",
"]",
"[",
"thisbinind",
"]",
"thisbin_sdssr",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'sdssr'",
"]",
"[",
"thisbinind",
"]",
"thisbin_lcmad",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'lcmad'",
"]",
"[",
"thisbinind",
"]",
"thisbin_stetsonj",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'stetsonj'",
"]",
"[",
"thisbinind",
"]",
"thisbin_iqr",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'iqr'",
"]",
"[",
"thisbinind",
"]",
"thisbin_inveta",
"=",
"allobjects",
"[",
"magcol",
"]",
"[",
"'inveta'",
"]",
"[",
"thisbinind",
"]",
"thisbin_count",
"=",
"thisbin_objectids",
".",
"size",
"if",
"thisbin_count",
">",
"4",
":",
"thisbin_lcmad_median",
"=",
"np",
".",
"median",
"(",
"thisbin_lcmad",
")",
"thisbin_lcmad_stdev",
"=",
"np",
".",
"median",
"(",
"np",
".",
"abs",
"(",
"thisbin_lcmad",
"-",
"thisbin_lcmad_median",
")",
")",
"*",
"1.483",
"binned_lcmad_median",
".",
"append",
"(",
"thisbin_lcmad_median",
")",
"binned_lcmad_stdev",
".",
"append",
"(",
"thisbin_lcmad_stdev",
")",
"thisbin_stetsonj_median",
"=",
"np",
".",
"median",
"(",
"thisbin_stetsonj",
")",
"thisbin_stetsonj_stdev",
"=",
"np",
".",
"median",
"(",
"np",
".",
"abs",
"(",
"thisbin_stetsonj",
"-",
"thisbin_stetsonj_median",
")",
")",
"*",
"1.483",
"binned_stetsonj_median",
".",
"append",
"(",
"thisbin_stetsonj_median",
")",
"binned_stetsonj_stdev",
".",
"append",
"(",
"thisbin_stetsonj_stdev",
")",
"# now get the objects above the required stdev threshold",
"if",
"isinstance",
"(",
"magcol_min_stetj_stdev",
",",
"float",
")",
":",
"thisbin_objectids_thresh_stetsonj",
"=",
"thisbin_objectids",
"[",
"thisbin_stetsonj",
">",
"(",
"thisbin_stetsonj_median",
"+",
"magcol_min_stetj_stdev",
"*",
"thisbin_stetsonj_stdev",
")",
"]",
"elif",
"(",
"isinstance",
"(",
"magcol_min_stetj_stdev",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"magcol_min_stetj_stdev",
",",
"list",
")",
")",
":",
"thisbin_min_stetj_stdev",
"=",
"magcol_min_stetj_stdev",
"[",
"magi",
"]",
"if",
"not",
"np",
".",
"isfinite",
"(",
"thisbin_min_stetj_stdev",
")",
":",
"LOGWARNING",
"(",
"'provided threshold stetson J stdev '",
"'for magbin: %.3f is nan, using 2.0'",
"%",
"thisbin_sdssr_median",
")",
"thisbin_min_stetj_stdev",
"=",
"2.0",
"# update the input list/array as well, since we'll be",
"# saving it to the output dict and using it to plot the",
"# variability thresholds",
"magcol_min_stetj_stdev",
"[",
"magi",
"]",
"=",
"2.0",
"thisbin_objectids_thresh_stetsonj",
"=",
"thisbin_objectids",
"[",
"thisbin_stetsonj",
">",
"(",
"thisbin_stetsonj_median",
"+",
"thisbin_min_stetj_stdev",
"*",
"thisbin_stetsonj_stdev",
")",
"]",
"thisbin_iqr_median",
"=",
"np",
".",
"median",
"(",
"thisbin_iqr",
")",
"thisbin_iqr_stdev",
"=",
"np",
".",
"median",
"(",
"np",
".",
"abs",
"(",
"thisbin_iqr",
"-",
"thisbin_iqr_median",
")",
")",
"*",
"1.483",
"binned_iqr_median",
".",
"append",
"(",
"thisbin_iqr_median",
")",
"binned_iqr_stdev",
".",
"append",
"(",
"thisbin_iqr_stdev",
")",
"# get the objects above the required stdev threshold",
"if",
"isinstance",
"(",
"magcol_min_iqr_stdev",
",",
"float",
")",
":",
"thisbin_objectids_thresh_iqr",
"=",
"thisbin_objectids",
"[",
"thisbin_iqr",
">",
"(",
"thisbin_iqr_median",
"+",
"magcol_min_iqr_stdev",
"*",
"thisbin_iqr_stdev",
")",
"]",
"elif",
"(",
"isinstance",
"(",
"magcol_min_iqr_stdev",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"magcol_min_iqr_stdev",
",",
"list",
")",
")",
":",
"thisbin_min_iqr_stdev",
"=",
"magcol_min_iqr_stdev",
"[",
"magi",
"]",
"if",
"not",
"np",
".",
"isfinite",
"(",
"thisbin_min_iqr_stdev",
")",
":",
"LOGWARNING",
"(",
"'provided threshold IQR stdev '",
"'for magbin: %.3f is nan, using 2.0'",
"%",
"thisbin_sdssr_median",
")",
"thisbin_min_iqr_stdev",
"=",
"2.0",
"# update the input list/array as well, since we'll be",
"# saving it to the output dict and using it to plot the",
"# variability thresholds",
"magcol_min_iqr_stdev",
"[",
"magi",
"]",
"=",
"2.0",
"thisbin_objectids_thresh_iqr",
"=",
"thisbin_objectids",
"[",
"thisbin_iqr",
">",
"(",
"thisbin_iqr_median",
"+",
"thisbin_min_iqr_stdev",
"*",
"thisbin_iqr_stdev",
")",
"]",
"thisbin_inveta_median",
"=",
"np",
".",
"median",
"(",
"thisbin_inveta",
")",
"thisbin_inveta_stdev",
"=",
"np",
".",
"median",
"(",
"np",
".",
"abs",
"(",
"thisbin_inveta",
"-",
"thisbin_inveta_median",
")",
")",
"*",
"1.483",
"binned_inveta_median",
".",
"append",
"(",
"thisbin_inveta_median",
")",
"binned_inveta_stdev",
".",
"append",
"(",
"thisbin_inveta_stdev",
")",
"if",
"isinstance",
"(",
"magcol_min_inveta_stdev",
",",
"float",
")",
":",
"thisbin_objectids_thresh_inveta",
"=",
"thisbin_objectids",
"[",
"thisbin_inveta",
">",
"(",
"thisbin_inveta_median",
"+",
"magcol_min_inveta_stdev",
"*",
"thisbin_inveta_stdev",
")",
"]",
"elif",
"(",
"isinstance",
"(",
"magcol_min_inveta_stdev",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"magcol_min_inveta_stdev",
",",
"list",
")",
")",
":",
"thisbin_min_inveta_stdev",
"=",
"magcol_min_inveta_stdev",
"[",
"magi",
"]",
"if",
"not",
"np",
".",
"isfinite",
"(",
"thisbin_min_inveta_stdev",
")",
":",
"LOGWARNING",
"(",
"'provided threshold inveta stdev '",
"'for magbin: %.3f is nan, using 2.0'",
"%",
"thisbin_sdssr_median",
")",
"thisbin_min_inveta_stdev",
"=",
"2.0",
"# update the input list/array as well, since we'll be",
"# saving it to the output dict and using it to plot the",
"# variability thresholds",
"magcol_min_inveta_stdev",
"[",
"magi",
"]",
"=",
"2.0",
"thisbin_objectids_thresh_inveta",
"=",
"thisbin_objectids",
"[",
"thisbin_inveta",
">",
"(",
"thisbin_inveta_median",
"+",
"thisbin_min_inveta_stdev",
"*",
"thisbin_inveta_stdev",
")",
"]",
"else",
":",
"thisbin_objectids_thresh_stetsonj",
"=",
"(",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"unicode_",
")",
")",
"thisbin_objectids_thresh_iqr",
"=",
"(",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"unicode_",
")",
")",
"thisbin_objectids_thresh_inveta",
"=",
"(",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"unicode_",
")",
")",
"#",
"# done with check for enough objects in the bin",
"#",
"# get the intersection of all threshold objects to get objects that",
"# lie above the threshold for all variable indices",
"thisbin_objectids_thresh_all",
"=",
"reduce",
"(",
"np",
".",
"intersect1d",
",",
"(",
"thisbin_objectids_thresh_stetsonj",
",",
"thisbin_objectids_thresh_iqr",
",",
"thisbin_objectids_thresh_inveta",
")",
")",
"binned_objectids",
".",
"append",
"(",
"thisbin_objectids",
")",
"binned_sdssr",
".",
"append",
"(",
"thisbin_sdssr",
")",
"binned_lcmad",
".",
"append",
"(",
"thisbin_lcmad",
")",
"binned_stetsonj",
".",
"append",
"(",
"thisbin_stetsonj",
")",
"binned_iqr",
".",
"append",
"(",
"thisbin_iqr",
")",
"binned_inveta",
".",
"append",
"(",
"thisbin_inveta",
")",
"binned_count",
".",
"append",
"(",
"thisbin_objectids",
".",
"size",
")",
"binned_objectids_thresh_stetsonj",
".",
"append",
"(",
"thisbin_objectids_thresh_stetsonj",
")",
"binned_objectids_thresh_iqr",
".",
"append",
"(",
"thisbin_objectids_thresh_iqr",
")",
"binned_objectids_thresh_inveta",
".",
"append",
"(",
"thisbin_objectids_thresh_inveta",
")",
"binned_objectids_thresh_all",
".",
"append",
"(",
"thisbin_objectids_thresh_all",
")",
"#",
"# done with magbins",
"#",
"# update the output dict for this magcol",
"allobjects",
"[",
"magcol",
"]",
"[",
"'magbins'",
"]",
"=",
"magbins",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids'",
"]",
"=",
"binned_objectids",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_sdssr_median'",
"]",
"=",
"binned_sdssr_median",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_sdssr'",
"]",
"=",
"binned_sdssr",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_count'",
"]",
"=",
"binned_count",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_lcmad'",
"]",
"=",
"binned_lcmad",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_lcmad_median'",
"]",
"=",
"binned_lcmad_median",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_lcmad_stdev'",
"]",
"=",
"binned_lcmad_stdev",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_stetsonj'",
"]",
"=",
"binned_stetsonj",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_stetsonj_median'",
"]",
"=",
"binned_stetsonj_median",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_stetsonj_stdev'",
"]",
"=",
"binned_stetsonj_stdev",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_iqr'",
"]",
"=",
"binned_iqr",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_iqr_median'",
"]",
"=",
"binned_iqr_median",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_iqr_stdev'",
"]",
"=",
"binned_iqr_stdev",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_inveta'",
"]",
"=",
"binned_inveta",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_inveta_median'",
"]",
"=",
"binned_inveta_median",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_inveta_stdev'",
"]",
"=",
"binned_inveta_stdev",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids_thresh_stetsonj'",
"]",
"=",
"(",
"binned_objectids_thresh_stetsonj",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids_thresh_iqr'",
"]",
"=",
"(",
"binned_objectids_thresh_iqr",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids_thresh_inveta'",
"]",
"=",
"(",
"binned_objectids_thresh_inveta",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids_thresh_all'",
"]",
"=",
"(",
"binned_objectids_thresh_all",
")",
"# get the common selected objects thru all measures",
"try",
":",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectids_all_thresh_all_magbins'",
"]",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"concatenate",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids_thresh_all'",
"]",
")",
")",
"except",
"ValueError",
":",
"LOGWARNING",
"(",
"'not enough variable objects matching all thresholds'",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectids_all_thresh_all_magbins'",
"]",
"=",
"(",
"np",
".",
"array",
"(",
"[",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectids_stetsonj_thresh_all_magbins'",
"]",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"concatenate",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids_thresh_stetsonj'",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectids_inveta_thresh_all_magbins'",
"]",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"concatenate",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids_thresh_inveta'",
"]",
")",
")",
"allobjects",
"[",
"magcol",
"]",
"[",
"'objectids_iqr_thresh_all_magbins'",
"]",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"concatenate",
"(",
"allobjects",
"[",
"magcol",
"]",
"[",
"'binned_objectids_thresh_iqr'",
"]",
")",
")",
"# turn these into np.arrays for easier plotting if they're lists",
"if",
"isinstance",
"(",
"min_stetj_stdev",
",",
"list",
")",
":",
"allobjects",
"[",
"magcol",
"]",
"[",
"'min_stetj_stdev'",
"]",
"=",
"np",
".",
"array",
"(",
"magcol_min_stetj_stdev",
")",
"else",
":",
"allobjects",
"[",
"magcol",
"]",
"[",
"'min_stetj_stdev'",
"]",
"=",
"magcol_min_stetj_stdev",
"if",
"isinstance",
"(",
"min_iqr_stdev",
",",
"list",
")",
":",
"allobjects",
"[",
"magcol",
"]",
"[",
"'min_iqr_stdev'",
"]",
"=",
"np",
".",
"array",
"(",
"magcol_min_iqr_stdev",
")",
"else",
":",
"allobjects",
"[",
"magcol",
"]",
"[",
"'min_iqr_stdev'",
"]",
"=",
"magcol_min_iqr_stdev",
"if",
"isinstance",
"(",
"min_inveta_stdev",
",",
"list",
")",
":",
"allobjects",
"[",
"magcol",
"]",
"[",
"'min_inveta_stdev'",
"]",
"=",
"np",
".",
"array",
"(",
"magcol_min_inveta_stdev",
")",
"else",
":",
"allobjects",
"[",
"magcol",
"]",
"[",
"'min_inveta_stdev'",
"]",
"=",
"magcol_min_inveta_stdev",
"# this one doesn't get touched (for now)",
"allobjects",
"[",
"magcol",
"]",
"[",
"'min_lcmad_stdev'",
"]",
"=",
"min_lcmad_stdev",
"#",
"# done with all magcols",
"#",
"allobjects",
"[",
"'magbins'",
"]",
"=",
"magbins",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"outfd",
":",
"pickle",
".",
"dump",
"(",
"allobjects",
",",
"outfd",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"return",
"allobjects"
] |
This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
Parameters
----------
featuresdir : str
This is the directory containing variability feature pickles created by
:py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.
outfile : str
This is the output pickle file that will contain all the threshold
information.
magbins : np.array of floats
This sets the magnitude bins to use for calculating thresholds.
maxobjects : int or None
This is the number of objects to process. If None, all objects with
feature pickles in `featuresdir` will be processed.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the thresholds.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the thresholds.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the thresholds.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array
These are all the standard deviation multiplier for the distributions of
light curve standard deviation, Stetson J variability index, the light
curve interquartile range, and 1/eta variability index
respectively. These multipliers set the minimum values of these measures
to use for selecting variable stars. If provided as floats, the same
value will be used for all magbins. If provided as np.arrays of `size =
magbins.size - 1`, will be used to apply possibly different sigma cuts
for each magbin.
verbose : bool
If True, will report progress and warn about any problems.
Returns
-------
dict
Contains all of the variability threshold information along with indices
into the array of the object IDs chosen as variables.
|
[
"This",
"generates",
"a",
"list",
"of",
"objects",
"with",
"stetson",
"J",
"IQR",
"and",
"1",
".",
"0",
"/",
"eta",
"above",
"some",
"threshold",
"value",
"to",
"select",
"them",
"as",
"potential",
"variable",
"stars",
"."
] |
python
|
valid
| 37.875828 |
abusque/qng
|
qng/generator.py
|
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L83-L91
|
def _get_names(self):
"""Get the list of first names.
:return: A list of first name entries.
"""
names = self._read_name_file('names.json')
names = self._compute_weights(names)
return names
|
[
"def",
"_get_names",
"(",
"self",
")",
":",
"names",
"=",
"self",
".",
"_read_name_file",
"(",
"'names.json'",
")",
"names",
"=",
"self",
".",
"_compute_weights",
"(",
"names",
")",
"return",
"names"
] |
Get the list of first names.
:return: A list of first name entries.
|
[
"Get",
"the",
"list",
"of",
"first",
"names",
"."
] |
python
|
train
| 25.666667 |
pywbem/pywbem
|
examples/enuminstances.py
|
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/examples/enuminstances.py#L74-L100
|
def main():
""" Get arguments and call the execution function"""
if len(sys.argv) < 6:
print("Usage: %s server_url username password namespace' \
' classname" % sys.argv[0])
print('Using internal defaults')
server_url = SERVER_URL
namespace = TEST_NAMESPACE
username = USERNAME
password = PASSWORD
classname = TEST_CLASS
else:
print('Get from input')
server_url = sys.argv[1]
namespace = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
classname = sys.argv[5]
# create the credentials tuple for WBEMConnection
creds = (username, password)
# call the method to execute the request and display results
execute_request(server_url, creds, namespace, classname)
return 0
|
[
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"<",
"6",
":",
"print",
"(",
"\"Usage: %s server_url username password namespace' \\\n ' classname\"",
"%",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"print",
"(",
"'Using internal defaults'",
")",
"server_url",
"=",
"SERVER_URL",
"namespace",
"=",
"TEST_NAMESPACE",
"username",
"=",
"USERNAME",
"password",
"=",
"PASSWORD",
"classname",
"=",
"TEST_CLASS",
"else",
":",
"print",
"(",
"'Get from input'",
")",
"server_url",
"=",
"sys",
".",
"argv",
"[",
"1",
"]",
"namespace",
"=",
"sys",
".",
"argv",
"[",
"2",
"]",
"username",
"=",
"sys",
".",
"argv",
"[",
"3",
"]",
"password",
"=",
"sys",
".",
"argv",
"[",
"4",
"]",
"classname",
"=",
"sys",
".",
"argv",
"[",
"5",
"]",
"# create the credentials tuple for WBEMConnection",
"creds",
"=",
"(",
"username",
",",
"password",
")",
"# call the method to execute the request and display results",
"execute_request",
"(",
"server_url",
",",
"creds",
",",
"namespace",
",",
"classname",
")",
"return",
"0"
] |
Get arguments and call the execution function
|
[
"Get",
"arguments",
"and",
"call",
"the",
"execution",
"function"
] |
python
|
train
| 29.777778 |
saltstack/salt
|
salt/utils/dictdiffer.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dictdiffer.py#L247-L269
|
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
|
[
"def",
"added",
"(",
"self",
")",
":",
"def",
"_added",
"(",
"diffs",
",",
"prefix",
")",
":",
"keys",
"=",
"[",
"]",
"for",
"key",
"in",
"diffs",
".",
"keys",
"(",
")",
":",
"if",
"isinstance",
"(",
"diffs",
"[",
"key",
"]",
",",
"dict",
")",
"and",
"'old'",
"not",
"in",
"diffs",
"[",
"key",
"]",
":",
"keys",
".",
"extend",
"(",
"_added",
"(",
"diffs",
"[",
"key",
"]",
",",
"prefix",
"=",
"'{0}{1}.'",
".",
"format",
"(",
"prefix",
",",
"key",
")",
")",
")",
"elif",
"diffs",
"[",
"key",
"]",
"[",
"'old'",
"]",
"==",
"self",
".",
"NONE_VALUE",
":",
"if",
"isinstance",
"(",
"diffs",
"[",
"key",
"]",
"[",
"'new'",
"]",
",",
"dict",
")",
":",
"keys",
".",
"extend",
"(",
"_added",
"(",
"diffs",
"[",
"key",
"]",
"[",
"'new'",
"]",
",",
"prefix",
"=",
"'{0}{1}.'",
".",
"format",
"(",
"prefix",
",",
"key",
")",
")",
")",
"else",
":",
"keys",
".",
"append",
"(",
"'{0}{1}'",
".",
"format",
"(",
"prefix",
",",
"key",
")",
")",
"return",
"keys",
"return",
"sorted",
"(",
"_added",
"(",
"self",
".",
"_diffs",
",",
"prefix",
"=",
"''",
")",
")"
] |
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
|
[
"Returns",
"all",
"keys",
"that",
"have",
"been",
"added",
"."
] |
python
|
train
| 39.826087 |
couchbase/couchbase-python-client
|
couchbase/subdocument.py
|
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/subdocument.py#L132-L141
|
def replace(path, value, **kwargs):
"""
Replace an existing path. This works on any valid path if the path already
exists. Valid only in :cb_bmeth:`mutate_in`
:param path: The path to replace
:param value: The new value
"""
return _gen_4spec(LCB_SDCMD_REPLACE, path, value,
create_path=False, **kwargs)
|
[
"def",
"replace",
"(",
"path",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gen_4spec",
"(",
"LCB_SDCMD_REPLACE",
",",
"path",
",",
"value",
",",
"create_path",
"=",
"False",
",",
"*",
"*",
"kwargs",
")"
] |
Replace an existing path. This works on any valid path if the path already
exists. Valid only in :cb_bmeth:`mutate_in`
:param path: The path to replace
:param value: The new value
|
[
"Replace",
"an",
"existing",
"path",
".",
"This",
"works",
"on",
"any",
"valid",
"path",
"if",
"the",
"path",
"already",
"exists",
".",
"Valid",
"only",
"in",
":",
"cb_bmeth",
":",
"mutate_in"
] |
python
|
train
| 34.4 |
mottosso/Qt.py
|
examples/loadUi/baseinstance2.py
|
https://github.com/mottosso/Qt.py/blob/d88a0c1762ad90d1965008cc14c53504bbcc0061/examples/loadUi/baseinstance2.py#L54-L89
|
def pyside_load_ui(uifile, base_instance=None):
"""Provide PyQt4.uic.loadUi functionality to PySide
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Note:
pysideuic is required for this to work with PySide.
This seems to work correctly in Maya as well as outside of it as
opposed to other implementations which involve overriding QUiLoader.
Returns:
QWidget: the base instance
"""
form_class, base_class = load_ui_type(uifile)
if not base_instance:
typeName = form_class.__name__
finalType = type(typeName,
(form_class, base_class),
{})
base_instance = finalType()
else:
if not isinstance(base_instance, base_class):
raise RuntimeError(
'The base_instance passed to loadUi does not inherit from'
' needed base type (%s)' % type(base_class))
typeName = type(base_instance).__name__
base_instance.__class__ = type(typeName,
(form_class, type(base_instance)),
{})
base_instance.setupUi(base_instance)
return base_instance
|
[
"def",
"pyside_load_ui",
"(",
"uifile",
",",
"base_instance",
"=",
"None",
")",
":",
"form_class",
",",
"base_class",
"=",
"load_ui_type",
"(",
"uifile",
")",
"if",
"not",
"base_instance",
":",
"typeName",
"=",
"form_class",
".",
"__name__",
"finalType",
"=",
"type",
"(",
"typeName",
",",
"(",
"form_class",
",",
"base_class",
")",
",",
"{",
"}",
")",
"base_instance",
"=",
"finalType",
"(",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"base_instance",
",",
"base_class",
")",
":",
"raise",
"RuntimeError",
"(",
"'The base_instance passed to loadUi does not inherit from'",
"' needed base type (%s)'",
"%",
"type",
"(",
"base_class",
")",
")",
"typeName",
"=",
"type",
"(",
"base_instance",
")",
".",
"__name__",
"base_instance",
".",
"__class__",
"=",
"type",
"(",
"typeName",
",",
"(",
"form_class",
",",
"type",
"(",
"base_instance",
")",
")",
",",
"{",
"}",
")",
"base_instance",
".",
"setupUi",
"(",
"base_instance",
")",
"return",
"base_instance"
] |
Provide PyQt4.uic.loadUi functionality to PySide
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Note:
pysideuic is required for this to work with PySide.
This seems to work correctly in Maya as well as outside of it as
opposed to other implementations which involve overriding QUiLoader.
Returns:
QWidget: the base instance
|
[
"Provide",
"PyQt4",
".",
"uic",
".",
"loadUi",
"functionality",
"to",
"PySide"
] |
python
|
train
| 35.083333 |
gwpy/gwpy
|
gwpy/io/datafind.py
|
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L615-L626
|
def find_types(observatory, match=None, trend=None,
connection=None, **connection_kw):
"""Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s)
"""
return sorted(connection.find_types(observatory, match=match),
key=lambda x: _type_priority(observatory, x, trend=trend))
|
[
"def",
"find_types",
"(",
"observatory",
",",
"match",
"=",
"None",
",",
"trend",
"=",
"None",
",",
"connection",
"=",
"None",
",",
"*",
"*",
"connection_kw",
")",
":",
"return",
"sorted",
"(",
"connection",
".",
"find_types",
"(",
"observatory",
",",
"match",
"=",
"match",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"_type_priority",
"(",
"observatory",
",",
"x",
",",
"trend",
"=",
"trend",
")",
")"
] |
Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s)
|
[
"Find",
"the",
"available",
"data",
"types",
"for",
"a",
"given",
"observatory",
"."
] |
python
|
train
| 37.833333 |
O365/python-o365
|
O365/connection.py
|
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/connection.py#L140-L165
|
def get_scopes_for(self, user_provided_scopes):
""" Returns a list of scopes needed for each of the
scope_helpers provided, by adding the prefix to them if required
:param user_provided_scopes: a list of scopes or scope helpers
:type user_provided_scopes: list or tuple or str
:return: scopes with url prefix added
:rtype: list
:raises ValueError: if unexpected datatype of scopes are passed
"""
if user_provided_scopes is None:
# return all available scopes
user_provided_scopes = [app_part for app_part in self._oauth_scopes]
elif isinstance(user_provided_scopes, str):
user_provided_scopes = [user_provided_scopes]
if not isinstance(user_provided_scopes, (list, tuple)):
raise ValueError(
"'user_provided_scopes' must be a list or a tuple of strings")
scopes = set()
for app_part in user_provided_scopes:
for scope in self._oauth_scopes.get(app_part, [(app_part,)]):
scopes.add(self._prefix_scope(scope))
return list(scopes)
|
[
"def",
"get_scopes_for",
"(",
"self",
",",
"user_provided_scopes",
")",
":",
"if",
"user_provided_scopes",
"is",
"None",
":",
"# return all available scopes",
"user_provided_scopes",
"=",
"[",
"app_part",
"for",
"app_part",
"in",
"self",
".",
"_oauth_scopes",
"]",
"elif",
"isinstance",
"(",
"user_provided_scopes",
",",
"str",
")",
":",
"user_provided_scopes",
"=",
"[",
"user_provided_scopes",
"]",
"if",
"not",
"isinstance",
"(",
"user_provided_scopes",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"'user_provided_scopes' must be a list or a tuple of strings\"",
")",
"scopes",
"=",
"set",
"(",
")",
"for",
"app_part",
"in",
"user_provided_scopes",
":",
"for",
"scope",
"in",
"self",
".",
"_oauth_scopes",
".",
"get",
"(",
"app_part",
",",
"[",
"(",
"app_part",
",",
")",
"]",
")",
":",
"scopes",
".",
"add",
"(",
"self",
".",
"_prefix_scope",
"(",
"scope",
")",
")",
"return",
"list",
"(",
"scopes",
")"
] |
Returns a list of scopes needed for each of the
scope_helpers provided, by adding the prefix to them if required
:param user_provided_scopes: a list of scopes or scope helpers
:type user_provided_scopes: list or tuple or str
:return: scopes with url prefix added
:rtype: list
:raises ValueError: if unexpected datatype of scopes are passed
|
[
"Returns",
"a",
"list",
"of",
"scopes",
"needed",
"for",
"each",
"of",
"the",
"scope_helpers",
"provided",
"by",
"adding",
"the",
"prefix",
"to",
"them",
"if",
"required"
] |
python
|
train
| 42.692308 |
ramses-tech/ramses
|
ramses/utils.py
|
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/utils.py#L345-L354
|
def get_route_name(resource_uri):
""" Get route name from RAML resource URI.
:param resource_uri: String representing RAML resource URI.
:returns string: String with route name, which is :resource_uri:
stripped of non-word characters.
"""
resource_uri = resource_uri.strip('/')
resource_uri = re.sub('\W', '', resource_uri)
return resource_uri
|
[
"def",
"get_route_name",
"(",
"resource_uri",
")",
":",
"resource_uri",
"=",
"resource_uri",
".",
"strip",
"(",
"'/'",
")",
"resource_uri",
"=",
"re",
".",
"sub",
"(",
"'\\W'",
",",
"''",
",",
"resource_uri",
")",
"return",
"resource_uri"
] |
Get route name from RAML resource URI.
:param resource_uri: String representing RAML resource URI.
:returns string: String with route name, which is :resource_uri:
stripped of non-word characters.
|
[
"Get",
"route",
"name",
"from",
"RAML",
"resource",
"URI",
"."
] |
python
|
train
| 37.1 |
biosignalsnotebooks/biosignalsnotebooks
|
biosignalsnotebooks/build/lib/biosignalsnotebooks/extract.py
|
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/extract.py#L268-L315
|
def psd(tachogram_time, tachogram_data):
"""
-----
Brief
-----
Determination of the Power Spectral Density Function (Fourier Domain)
-----------
Description
-----------
The Power Spectral Density Function allows to perceive the behavior of a given signal in terms of its frequency.
This procedure costs the time resolution of the signal but may be important to extract features in a different
domain appart from the time domain.
This function constructs the Power Spectral Density Function in the frequency domain.
----------
Parameters
----------
tachogram_time : list
X Axis of tachogram.
tachogram_data : list
Y Axis of tachogram.
Returns
-------
out : list, list
Frequency and power axis.
"""
init_time = tachogram_time[0]
fin_time = tachogram_time[-1]
tck = interpol.splrep(tachogram_time, tachogram_data)
interpolation_rate = 4
nn_time_even = numpy.linspace(init_time, fin_time, (fin_time - init_time) * interpolation_rate)
nn_tachogram_even = interpol.splev(nn_time_even, tck)
freq_axis, power_axis = scisignal.welch(nn_tachogram_even, interpolation_rate,
window=scisignal.get_window("hanning",
min(len(nn_tachogram_even),
1000)),
nperseg=min(len(nn_tachogram_even), 1000))
freqs = [round(val, 3) for val in freq_axis if val < 0.5]
power = [round(val, 4) for val, freq in zip(power_axis, freq_axis) if freq < 0.5]
return freqs, power
|
[
"def",
"psd",
"(",
"tachogram_time",
",",
"tachogram_data",
")",
":",
"init_time",
"=",
"tachogram_time",
"[",
"0",
"]",
"fin_time",
"=",
"tachogram_time",
"[",
"-",
"1",
"]",
"tck",
"=",
"interpol",
".",
"splrep",
"(",
"tachogram_time",
",",
"tachogram_data",
")",
"interpolation_rate",
"=",
"4",
"nn_time_even",
"=",
"numpy",
".",
"linspace",
"(",
"init_time",
",",
"fin_time",
",",
"(",
"fin_time",
"-",
"init_time",
")",
"*",
"interpolation_rate",
")",
"nn_tachogram_even",
"=",
"interpol",
".",
"splev",
"(",
"nn_time_even",
",",
"tck",
")",
"freq_axis",
",",
"power_axis",
"=",
"scisignal",
".",
"welch",
"(",
"nn_tachogram_even",
",",
"interpolation_rate",
",",
"window",
"=",
"scisignal",
".",
"get_window",
"(",
"\"hanning\"",
",",
"min",
"(",
"len",
"(",
"nn_tachogram_even",
")",
",",
"1000",
")",
")",
",",
"nperseg",
"=",
"min",
"(",
"len",
"(",
"nn_tachogram_even",
")",
",",
"1000",
")",
")",
"freqs",
"=",
"[",
"round",
"(",
"val",
",",
"3",
")",
"for",
"val",
"in",
"freq_axis",
"if",
"val",
"<",
"0.5",
"]",
"power",
"=",
"[",
"round",
"(",
"val",
",",
"4",
")",
"for",
"val",
",",
"freq",
"in",
"zip",
"(",
"power_axis",
",",
"freq_axis",
")",
"if",
"freq",
"<",
"0.5",
"]",
"return",
"freqs",
",",
"power"
] |
-----
Brief
-----
Determination of the Power Spectral Density Function (Fourier Domain)
-----------
Description
-----------
The Power Spectral Density Function allows to perceive the behavior of a given signal in terms of its frequency.
This procedure costs the time resolution of the signal but may be important to extract features in a different
domain appart from the time domain.
This function constructs the Power Spectral Density Function in the frequency domain.
----------
Parameters
----------
tachogram_time : list
X Axis of tachogram.
tachogram_data : list
Y Axis of tachogram.
Returns
-------
out : list, list
Frequency and power axis.
|
[
"-----",
"Brief",
"-----",
"Determination",
"of",
"the",
"Power",
"Spectral",
"Density",
"Function",
"(",
"Fourier",
"Domain",
")"
] |
python
|
train
| 35.125 |
pkgw/pwkit
|
pwkit/astutil.py
|
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/astutil.py#L338-L354
|
def parsedeglat (latstr):
"""Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
"""
deg = _parsesexagesimal (latstr, 'latitude', True)
if abs (deg) > 90:
raise ValueError ('illegal latitude specification: ' + latstr)
return deg * D2R
|
[
"def",
"parsedeglat",
"(",
"latstr",
")",
":",
"deg",
"=",
"_parsesexagesimal",
"(",
"latstr",
",",
"'latitude'",
",",
"True",
")",
"if",
"abs",
"(",
"deg",
")",
">",
"90",
":",
"raise",
"ValueError",
"(",
"'illegal latitude specification: '",
"+",
"latstr",
")",
"return",
"deg",
"*",
"D2R"
] |
Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
|
[
"Parse",
"a",
"latitude",
"formatted",
"as",
"sexagesimal",
"degrees",
"into",
"an",
"angle",
"."
] |
python
|
train
| 47.823529 |
saltstack/salt
|
salt/modules/lxd.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L804-L850
|
def container_rename(name, newname, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Rename a container
name :
Name of the container to Rename
newname :
The new name of the contianer
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
if container.status_code == CONTAINER_STATUS_RUNNING:
raise SaltInvocationError(
"Can't rename the running container '{0}'.".format(name)
)
container.rename(newname, wait=True)
return _pylxd_model_to_dict(container)
|
[
"def",
"container_rename",
"(",
"name",
",",
"newname",
",",
"remote_addr",
"=",
"None",
",",
"cert",
"=",
"None",
",",
"key",
"=",
"None",
",",
"verify_cert",
"=",
"True",
")",
":",
"container",
"=",
"container_get",
"(",
"name",
",",
"remote_addr",
",",
"cert",
",",
"key",
",",
"verify_cert",
",",
"_raw",
"=",
"True",
")",
"if",
"container",
".",
"status_code",
"==",
"CONTAINER_STATUS_RUNNING",
":",
"raise",
"SaltInvocationError",
"(",
"\"Can't rename the running container '{0}'.\"",
".",
"format",
"(",
"name",
")",
")",
"container",
".",
"rename",
"(",
"newname",
",",
"wait",
"=",
"True",
")",
"return",
"_pylxd_model_to_dict",
"(",
"container",
")"
] |
Rename a container
name :
Name of the container to Rename
newname :
The new name of the contianer
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
|
[
"Rename",
"a",
"container"
] |
python
|
train
| 25.617021 |
Capitains/MyCapytain
|
MyCapytain/resources/texts/local/capitains/cts.py
|
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/texts/local/capitains/cts.py#L672-L676
|
def next(self):
""" Next CapitainsCtsPassage (Interactive CapitainsCtsPassage)
"""
if self.nextId is not None:
return super(CapitainsCtsPassage, self).getTextualNode(subreference=self.nextId)
|
[
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"nextId",
"is",
"not",
"None",
":",
"return",
"super",
"(",
"CapitainsCtsPassage",
",",
"self",
")",
".",
"getTextualNode",
"(",
"subreference",
"=",
"self",
".",
"nextId",
")"
] |
Next CapitainsCtsPassage (Interactive CapitainsCtsPassage)
|
[
"Next",
"CapitainsCtsPassage",
"(",
"Interactive",
"CapitainsCtsPassage",
")"
] |
python
|
train
| 44.6 |
aboSamoor/polyglot
|
polyglot/load.py
|
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/load.py#L107-L117
|
def load_pos_model(lang="en", version="2"):
"""Return a part of speech tagger parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
"""
src_dir = "pos{}".format(version)
p = locate_resource(src_dir, lang)
fh = _open(p)
return dict(np.load(fh))
|
[
"def",
"load_pos_model",
"(",
"lang",
"=",
"\"en\"",
",",
"version",
"=",
"\"2\"",
")",
":",
"src_dir",
"=",
"\"pos{}\"",
".",
"format",
"(",
"version",
")",
"p",
"=",
"locate_resource",
"(",
"src_dir",
",",
"lang",
")",
"fh",
"=",
"_open",
"(",
"p",
")",
"return",
"dict",
"(",
"np",
".",
"load",
"(",
"fh",
")",
")"
] |
Return a part of speech tagger parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
|
[
"Return",
"a",
"part",
"of",
"speech",
"tagger",
"parameters",
"for",
"lang",
"and",
"of",
"version",
"version"
] |
python
|
train
| 31 |
sethmlarson/virtualbox-python
|
virtualbox/library.py
|
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L25044-L25075
|
def set_visible_region(self, rectangles, count):
"""Suggests a new visible region to this frame buffer. This region
represents the area of the VM display which is a union of regions of
all top-level windows of the guest operating system running inside the
VM (if the Guest Additions for this system support this
functionality). This information may be used by the frontends to
implement the seamless desktop integration feature.
The address of the provided array must be in the process space of
this IFramebuffer object.
The IFramebuffer implementation must make a copy of the provided
array of rectangles.
Method not yet implemented.
in rectangles of type str
Pointer to the @c RTRECT array.
in count of type int
Number of @c RTRECT elements in the @a rectangles array.
"""
if not isinstance(rectangles, basestring):
raise TypeError("rectangles can only be an instance of type basestring")
if not isinstance(count, baseinteger):
raise TypeError("count can only be an instance of type baseinteger")
self._call("setVisibleRegion",
in_p=[rectangles, count])
|
[
"def",
"set_visible_region",
"(",
"self",
",",
"rectangles",
",",
"count",
")",
":",
"if",
"not",
"isinstance",
"(",
"rectangles",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"rectangles can only be an instance of type basestring\"",
")",
"if",
"not",
"isinstance",
"(",
"count",
",",
"baseinteger",
")",
":",
"raise",
"TypeError",
"(",
"\"count can only be an instance of type baseinteger\"",
")",
"self",
".",
"_call",
"(",
"\"setVisibleRegion\"",
",",
"in_p",
"=",
"[",
"rectangles",
",",
"count",
"]",
")"
] |
Suggests a new visible region to this frame buffer. This region
represents the area of the VM display which is a union of regions of
all top-level windows of the guest operating system running inside the
VM (if the Guest Additions for this system support this
functionality). This information may be used by the frontends to
implement the seamless desktop integration feature.
The address of the provided array must be in the process space of
this IFramebuffer object.
The IFramebuffer implementation must make a copy of the provided
array of rectangles.
Method not yet implemented.
in rectangles of type str
Pointer to the @c RTRECT array.
in count of type int
Number of @c RTRECT elements in the @a rectangles array.
|
[
"Suggests",
"a",
"new",
"visible",
"region",
"to",
"this",
"frame",
"buffer",
".",
"This",
"region",
"represents",
"the",
"area",
"of",
"the",
"VM",
"display",
"which",
"is",
"a",
"union",
"of",
"regions",
"of",
"all",
"top",
"-",
"level",
"windows",
"of",
"the",
"guest",
"operating",
"system",
"running",
"inside",
"the",
"VM",
"(",
"if",
"the",
"Guest",
"Additions",
"for",
"this",
"system",
"support",
"this",
"functionality",
")",
".",
"This",
"information",
"may",
"be",
"used",
"by",
"the",
"frontends",
"to",
"implement",
"the",
"seamless",
"desktop",
"integration",
"feature",
".",
"The",
"address",
"of",
"the",
"provided",
"array",
"must",
"be",
"in",
"the",
"process",
"space",
"of",
"this",
"IFramebuffer",
"object",
".",
"The",
"IFramebuffer",
"implementation",
"must",
"make",
"a",
"copy",
"of",
"the",
"provided",
"array",
"of",
"rectangles",
".",
"Method",
"not",
"yet",
"implemented",
"."
] |
python
|
train
| 40.1875 |
MSchnei/pyprf_feature
|
pyprf_feature/analysis/save_fit_tc_nii.py
|
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/save_fit_tc_nii.py#L39-L292
|
def save_tc_to_nii(strCsvCnfg, lgcTest=False, lstRat=None, lgcMdlRsp=False,
strPathHrf=None, lgcSaveRam=False):
"""
Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# if fitting was done with custom hrf, make sure to retrieve results with
# '_hrf' appendix
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# If suppressive surround flag is on, make sure to retrieve results with
# '_supsur' appendix
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
cfg.strPathMdl = cfg.strPathMdl + '_supsur'
# Append 1.0 as the first entry, which is the key for fitting without
# surround (only centre)
lstRat.insert(0, 1.0)
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load beta parameters estimates, aka weights for time courses
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathBeta[0]), errorMsg
# Load ratio image, if fitting was obtained with suppressive surround
if lstRat is not None:
lstPathRatio = [cfg.strPathOut + '_Ratios.nii.gz']
aryRatio = load_res_prm(lstPathRatio,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathRatio[0]), errorMsg
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
aryBetas = aryBetas[aryLgcVar, :]
if lstRat is not None:
aryRatio = aryRatio[aryLgcVar, :]
# Get array with model parameters that were fitted on a grid
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2,
cfg.varExtYmin, cfg.varExtYmax,
cfg.varNumPrfSizes, cfg.varPrfStdMin,
cfg.varPrfStdMax, kwUnt='deg',
kwCrd=cfg.strKwCrd)
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# Apply logical
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Get corresponding pRF model time courses
aryPrfTc = np.load(cfg.strPathMdl + '.npy')
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
if lgcMdlRsp:
aryMdlRsp = np.load(cfg.strPathMdl + '_mdlRsp.npy')
# %% Derive fitted time course models for all voxels
# Initialize array that will collect the fitted time courses
aryFitTc = np.zeros((aryFunc.shape), dtype=np.float32)
# If desired, initiliaze array that will collect model responses underlying
# the fitted time course
if lgcMdlRsp:
if lstRat is not None:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0], aryMdlRsp.shape[1],
aryMdlRsp.shape[3]),
dtype=np.float32)
else:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0],
aryMdlRsp.shape[1]), dtype=np.float32)
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryIntGssPrm.shape[0])
# Find unique rows of fitted model parameters
aryUnqRows, aryUnqInd = fnd_unq_rws(aryIntGssPrm, return_index=False,
return_inverse=True)
# Loop over all best-fitting model parameter combinations found
print('---Assign models to voxels')
for indRow, vecPrm in enumerate(aryUnqRows):
# Get logical for voxels for which this prm combi was the best
lgcVxl = [aryUnqInd == indRow][0]
if np.all(np.invert(lgcVxl)):
print('---No voxel found')
# Mark those voxels that were visited
vecVxlTst[lgcVxl] += 1
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=0.01).all(axis=1))[0][0]
# Tell user if no model was found
if lgcMdl is None:
print('---No model found')
# Get model time courses
aryMdlTc = aryPrfTc[lgcMdl, ...]
# Get beta parameter estimates
aryWeights = aryBetas[lgcVxl, :]
# If fitting was done with surround suppression, find ratios for voxels
# and the indices of these ratios in lstRat
if lstRat is not None:
aryVxlRatio = aryRatio[lgcVxl, :]
indRat = [ind for ind, rat1 in enumerate(lstRat) for rat2 in
aryVxlRatio[:, 0] if np.isclose(rat1, rat2)]
indVxl = range(len(indRat))
# Combine model time courses and weights to yield fitted time course
if lstRat is not None:
aryFitTcTmp = np.tensordot(aryWeights, aryMdlTc, axes=([1], [0]))
aryFitTc[lgcVxl, :] = aryFitTcTmp[indVxl, indRat, :]
else:
aryFitTc[lgcVxl, :] = np.dot(aryWeights, aryMdlTc)
# If desired by user, also save the model responses per voxels
if lgcMdlRsp:
# If desired also save the model responses that won
if lstRat is not None:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :, indRat, :]
else:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :]
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for tc recreation'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# %% Export preprocessed voxel time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_EmpTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFunc as a single 4D nii file
print('---Save empirical time courses')
export_nii(aryFunc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_EmpTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFunc, axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut)
# %% Export fitted time courses and, if desired, model responses as nii
# List with name suffices of output images:
lstNiiNames = ['_FitTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFitTc as a single 4D nii file
print('---Save fitted time courses')
export_nii(aryFitTc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcMdlRsp:
# Create full path name
strNpyName = cfg.strPathOut + '_FitMdlRsp' + '.npy'
# Save aryFitMdlRsp as npy file
print('---Save fitted model responses')
np.save(strNpyName, aryFitMdlRsp)
print('------Done.')
# Save the mask so we know which voxels these parameters belonged to
strNpyMskName = cfg.strPathOut + '_FitMdlRsp_Mask' + '.npy'
aryLgcMsk[aryLgcMsk] = aryLgcVar
print('---Save mask for fitted model responses')
np.save(strNpyMskName, aryLgcMsk)
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_FitTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFitTc,
axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut)
|
[
"def",
"save_tc_to_nii",
"(",
"strCsvCnfg",
",",
"lgcTest",
"=",
"False",
",",
"lstRat",
"=",
"None",
",",
"lgcMdlRsp",
"=",
"False",
",",
"strPathHrf",
"=",
"None",
",",
"lgcSaveRam",
"=",
"False",
")",
":",
"# %% Load configuration settings that were used for fitting",
"# Load config parameters from csv file into dictionary:",
"dicCnfg",
"=",
"load_config",
"(",
"strCsvCnfg",
",",
"lgcTest",
"=",
"lgcTest",
")",
"# Load config parameters from dictionary into namespace:",
"cfg",
"=",
"cls_set_config",
"(",
"dicCnfg",
")",
"# if fitting was done with custom hrf, make sure to retrieve results with",
"# '_hrf' appendix",
"if",
"strPathHrf",
"is",
"not",
"None",
":",
"cfg",
".",
"strPathOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_hrf'",
"# If suppressive surround flag is on, make sure to retrieve results with",
"# '_supsur' appendix",
"if",
"lstRat",
"is",
"not",
"None",
":",
"cfg",
".",
"strPathOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_supsur'",
"cfg",
".",
"strPathMdl",
"=",
"cfg",
".",
"strPathMdl",
"+",
"'_supsur'",
"# Append 1.0 as the first entry, which is the key for fitting without",
"# surround (only centre)",
"lstRat",
".",
"insert",
"(",
"0",
",",
"1.0",
")",
"# %% Load previous pRF fitting results",
"# Derive paths to the x, y, sigma winner parameters from pyprf_feature",
"lstWnrPrm",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_x_pos.nii.gz'",
",",
"cfg",
".",
"strPathOut",
"+",
"'_y_pos.nii.gz'",
",",
"cfg",
".",
"strPathOut",
"+",
"'_SD.nii.gz'",
"]",
"# Check if fitting has been performed, i.e. whether parameter files exist",
"# Throw error message if they do not exist.",
"errorMsg",
"=",
"'Files that should have resulted from fitting do not exist. \\\n \\nPlease perform pRF fitting first, calling e.g.: \\\n \\npyprf_feature -config /path/to/my_config_file.csv'",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"0",
"]",
")",
",",
"errorMsg",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"1",
"]",
")",
",",
"errorMsg",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstWnrPrm",
"[",
"2",
"]",
")",
",",
"errorMsg",
"# Load the x, y, sigma winner parameters from pyprf_feature",
"aryIntGssPrm",
"=",
"load_res_prm",
"(",
"lstWnrPrm",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Load beta parameters estimates, aka weights for time courses",
"lstPathBeta",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_Betas.nii.gz'",
"]",
"aryBetas",
"=",
"load_res_prm",
"(",
"lstPathBeta",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstPathBeta",
"[",
"0",
"]",
")",
",",
"errorMsg",
"# Load ratio image, if fitting was obtained with suppressive surround",
"if",
"lstRat",
"is",
"not",
"None",
":",
"lstPathRatio",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"'_Ratios.nii.gz'",
"]",
"aryRatio",
"=",
"load_res_prm",
"(",
"lstPathRatio",
",",
"lstFlsMsk",
"=",
"[",
"cfg",
".",
"strPathNiiMask",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"lstPathRatio",
"[",
"0",
"]",
")",
",",
"errorMsg",
"# Some voxels were excluded because they did not have sufficient mean",
"# and/or variance - exclude their initial parameters, too",
"# Get inclusion mask and nii header",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"hdrMsk",
",",
"aryAff",
",",
"aryFunc",
",",
"tplNiiShp",
"=",
"prep_func",
"(",
"cfg",
".",
"strPathNiiMask",
",",
"cfg",
".",
"lstPathNiiFunc",
",",
"varAvgThr",
"=",
"-",
"100",
")",
"# Apply inclusion mask",
"aryIntGssPrm",
"=",
"aryIntGssPrm",
"[",
"aryLgcVar",
",",
":",
"]",
"aryBetas",
"=",
"aryBetas",
"[",
"aryLgcVar",
",",
":",
"]",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryRatio",
"=",
"aryRatio",
"[",
"aryLgcVar",
",",
":",
"]",
"# Get array with model parameters that were fitted on a grid",
"# [x positions, y positions, sigmas]",
"aryMdlParams",
"=",
"crt_mdl_prms",
"(",
"(",
"int",
"(",
"cfg",
".",
"varVslSpcSzeX",
")",
",",
"int",
"(",
"cfg",
".",
"varVslSpcSzeY",
")",
")",
",",
"cfg",
".",
"varNum1",
",",
"cfg",
".",
"varExtXmin",
",",
"cfg",
".",
"varExtXmax",
",",
"cfg",
".",
"varNum2",
",",
"cfg",
".",
"varExtYmin",
",",
"cfg",
".",
"varExtYmax",
",",
"cfg",
".",
"varNumPrfSizes",
",",
"cfg",
".",
"varPrfStdMin",
",",
"cfg",
".",
"varPrfStdMax",
",",
"kwUnt",
"=",
"'deg'",
",",
"kwCrd",
"=",
"cfg",
".",
"strKwCrd",
")",
"# Load logical for parameter exclusion in unstimulated area",
"lgcMdlInc",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strPathMdl",
"+",
"'_lgcMdlInc.npy'",
")",
"# Apply logical",
"aryMdlParams",
"=",
"aryMdlParams",
"[",
"lgcMdlInc",
",",
":",
"]",
"# Get corresponding pRF model time courses",
"aryPrfTc",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strPathMdl",
"+",
"'.npy'",
")",
"# The model time courses will be preprocessed such that they are smoothed",
"# (temporally) with same factor as the data and that they will be z-scored:",
"aryPrfTc",
"=",
"prep_models",
"(",
"aryPrfTc",
",",
"varSdSmthTmp",
"=",
"cfg",
".",
"varSdSmthTmp",
")",
"if",
"lgcMdlRsp",
":",
"aryMdlRsp",
"=",
"np",
".",
"load",
"(",
"cfg",
".",
"strPathMdl",
"+",
"'_mdlRsp.npy'",
")",
"# %% Derive fitted time course models for all voxels",
"# Initialize array that will collect the fitted time courses",
"aryFitTc",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryFunc",
".",
"shape",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# If desired, initiliaze array that will collect model responses underlying",
"# the fitted time course",
"if",
"lgcMdlRsp",
":",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryFitMdlRsp",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryIntGssPrm",
".",
"shape",
"[",
"0",
"]",
",",
"aryMdlRsp",
".",
"shape",
"[",
"1",
"]",
",",
"aryMdlRsp",
".",
"shape",
"[",
"3",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"else",
":",
"aryFitMdlRsp",
"=",
"np",
".",
"zeros",
"(",
"(",
"aryIntGssPrm",
".",
"shape",
"[",
"0",
"]",
",",
"aryMdlRsp",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# create vector that allows to check whether every voxel is visited",
"# exactly once",
"vecVxlTst",
"=",
"np",
".",
"zeros",
"(",
"aryIntGssPrm",
".",
"shape",
"[",
"0",
"]",
")",
"# Find unique rows of fitted model parameters",
"aryUnqRows",
",",
"aryUnqInd",
"=",
"fnd_unq_rws",
"(",
"aryIntGssPrm",
",",
"return_index",
"=",
"False",
",",
"return_inverse",
"=",
"True",
")",
"# Loop over all best-fitting model parameter combinations found",
"print",
"(",
"'---Assign models to voxels'",
")",
"for",
"indRow",
",",
"vecPrm",
"in",
"enumerate",
"(",
"aryUnqRows",
")",
":",
"# Get logical for voxels for which this prm combi was the best",
"lgcVxl",
"=",
"[",
"aryUnqInd",
"==",
"indRow",
"]",
"[",
"0",
"]",
"if",
"np",
".",
"all",
"(",
"np",
".",
"invert",
"(",
"lgcVxl",
")",
")",
":",
"print",
"(",
"'---No voxel found'",
")",
"# Mark those voxels that were visited",
"vecVxlTst",
"[",
"lgcVxl",
"]",
"+=",
"1",
"# Get logical index for the model number",
"# This can only be 1 index, so we directly get 1st entry of array",
"lgcMdl",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isclose",
"(",
"aryMdlParams",
",",
"vecPrm",
",",
"atol",
"=",
"0.01",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"# Tell user if no model was found",
"if",
"lgcMdl",
"is",
"None",
":",
"print",
"(",
"'---No model found'",
")",
"# Get model time courses",
"aryMdlTc",
"=",
"aryPrfTc",
"[",
"lgcMdl",
",",
"...",
"]",
"# Get beta parameter estimates",
"aryWeights",
"=",
"aryBetas",
"[",
"lgcVxl",
",",
":",
"]",
"# If fitting was done with surround suppression, find ratios for voxels",
"# and the indices of these ratios in lstRat",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryVxlRatio",
"=",
"aryRatio",
"[",
"lgcVxl",
",",
":",
"]",
"indRat",
"=",
"[",
"ind",
"for",
"ind",
",",
"rat1",
"in",
"enumerate",
"(",
"lstRat",
")",
"for",
"rat2",
"in",
"aryVxlRatio",
"[",
":",
",",
"0",
"]",
"if",
"np",
".",
"isclose",
"(",
"rat1",
",",
"rat2",
")",
"]",
"indVxl",
"=",
"range",
"(",
"len",
"(",
"indRat",
")",
")",
"# Combine model time courses and weights to yield fitted time course",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryFitTcTmp",
"=",
"np",
".",
"tensordot",
"(",
"aryWeights",
",",
"aryMdlTc",
",",
"axes",
"=",
"(",
"[",
"1",
"]",
",",
"[",
"0",
"]",
")",
")",
"aryFitTc",
"[",
"lgcVxl",
",",
":",
"]",
"=",
"aryFitTcTmp",
"[",
"indVxl",
",",
"indRat",
",",
":",
"]",
"else",
":",
"aryFitTc",
"[",
"lgcVxl",
",",
":",
"]",
"=",
"np",
".",
"dot",
"(",
"aryWeights",
",",
"aryMdlTc",
")",
"# If desired by user, also save the model responses per voxels",
"if",
"lgcMdlRsp",
":",
"# If desired also save the model responses that won",
"if",
"lstRat",
"is",
"not",
"None",
":",
"aryFitMdlRsp",
"[",
"lgcVxl",
",",
":",
"]",
"=",
"aryMdlRsp",
"[",
"lgcMdl",
",",
":",
",",
"indRat",
",",
":",
"]",
"else",
":",
"aryFitMdlRsp",
"[",
"lgcVxl",
",",
":",
"]",
"=",
"aryMdlRsp",
"[",
"lgcMdl",
",",
":",
"]",
"# check that every voxel was visited exactly once",
"errMsg",
"=",
"'At least one voxel visited more than once for tc recreation'",
"assert",
"len",
"(",
"vecVxlTst",
")",
"==",
"np",
".",
"sum",
"(",
"vecVxlTst",
")",
",",
"errMsg",
"# %% Export preprocessed voxel time courses as nii",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_EmpTc'",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export aryFunc as a single 4D nii file",
"print",
"(",
"'---Save empirical time courses'",
")",
"export_nii",
"(",
"aryFunc",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"print",
"(",
"'------Done.'",
")",
"# If desired by user, also save RAM-saving version of nii",
"if",
"lgcSaveRam",
":",
"strPthRamOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_EmpTc_saveRAM'",
"+",
"'.nii.gz'",
"imgNii",
"=",
"nb",
".",
"Nifti1Image",
"(",
"np",
".",
"expand_dims",
"(",
"np",
".",
"expand_dims",
"(",
"aryFunc",
",",
"axis",
"=",
"1",
")",
",",
"axis",
"=",
"1",
")",
",",
"affine",
"=",
"np",
".",
"eye",
"(",
"4",
")",
")",
"nb",
".",
"save",
"(",
"imgNii",
",",
"strPthRamOut",
")",
"# %% Export fitted time courses and, if desired, model responses as nii",
"# List with name suffices of output images:",
"lstNiiNames",
"=",
"[",
"'_FitTc'",
"]",
"# Create full path names from nii file names and output path",
"lstNiiNames",
"=",
"[",
"cfg",
".",
"strPathOut",
"+",
"strNii",
"+",
"'.nii.gz'",
"for",
"strNii",
"in",
"lstNiiNames",
"]",
"# export aryFitTc as a single 4D nii file",
"print",
"(",
"'---Save fitted time courses'",
")",
"export_nii",
"(",
"aryFitTc",
",",
"lstNiiNames",
",",
"aryLgcMsk",
",",
"aryLgcVar",
",",
"tplNiiShp",
",",
"aryAff",
",",
"hdrMsk",
",",
"outFormat",
"=",
"'4D'",
")",
"print",
"(",
"'------Done.'",
")",
"if",
"lgcMdlRsp",
":",
"# Create full path name",
"strNpyName",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_FitMdlRsp'",
"+",
"'.npy'",
"# Save aryFitMdlRsp as npy file",
"print",
"(",
"'---Save fitted model responses'",
")",
"np",
".",
"save",
"(",
"strNpyName",
",",
"aryFitMdlRsp",
")",
"print",
"(",
"'------Done.'",
")",
"# Save the mask so we know which voxels these parameters belonged to",
"strNpyMskName",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_FitMdlRsp_Mask'",
"+",
"'.npy'",
"aryLgcMsk",
"[",
"aryLgcMsk",
"]",
"=",
"aryLgcVar",
"print",
"(",
"'---Save mask for fitted model responses'",
")",
"np",
".",
"save",
"(",
"strNpyMskName",
",",
"aryLgcMsk",
")",
"print",
"(",
"'------Done.'",
")",
"# If desired by user, also save RAM-saving version of nii",
"if",
"lgcSaveRam",
":",
"strPthRamOut",
"=",
"cfg",
".",
"strPathOut",
"+",
"'_FitTc_saveRAM'",
"+",
"'.nii.gz'",
"imgNii",
"=",
"nb",
".",
"Nifti1Image",
"(",
"np",
".",
"expand_dims",
"(",
"np",
".",
"expand_dims",
"(",
"aryFitTc",
",",
"axis",
"=",
"1",
")",
",",
"axis",
"=",
"1",
")",
",",
"affine",
"=",
"np",
".",
"eye",
"(",
"4",
")",
")",
"nb",
".",
"save",
"(",
"imgNii",
",",
"strPthRamOut",
")"
] |
Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk.
|
[
"Save",
"empirical",
"and",
"fitted",
"time",
"courses",
"to",
"nii",
"file",
"format",
"."
] |
python
|
train
| 41.066929 |
elastic/elasticsearch-py
|
elasticsearch/client/indices.py
|
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/indices.py#L478-L496
|
def delete_alias(self, index, name, params=None):
"""
Delete specific alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names (supports wildcards);
use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified
indices.
:arg master_timeout: Specify timeout for connection to master
:arg request_timeout: Explicit timeout for the operation
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"DELETE", _make_path(index, "_alias", name), params=params
)
|
[
"def",
"delete_alias",
"(",
"self",
",",
"index",
",",
"name",
",",
"params",
"=",
"None",
")",
":",
"for",
"param",
"in",
"(",
"index",
",",
"name",
")",
":",
"if",
"param",
"in",
"SKIP_IN_PATH",
":",
"raise",
"ValueError",
"(",
"\"Empty value passed for a required argument.\"",
")",
"return",
"self",
".",
"transport",
".",
"perform_request",
"(",
"\"DELETE\"",
",",
"_make_path",
"(",
"index",
",",
"\"_alias\"",
",",
"name",
")",
",",
"params",
"=",
"params",
")"
] |
Delete specific alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names (supports wildcards);
use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified
indices.
:arg master_timeout: Specify timeout for connection to master
:arg request_timeout: Explicit timeout for the operation
|
[
"Delete",
"specific",
"alias",
".",
"<http",
":",
"//",
"www",
".",
"elastic",
".",
"co",
"/",
"guide",
"/",
"en",
"/",
"elasticsearch",
"/",
"reference",
"/",
"current",
"/",
"indices",
"-",
"aliases",
".",
"html",
">",
"_"
] |
python
|
train
| 46.789474 |
sprockets/sprockets.mixins.mediatype
|
sprockets/mixins/mediatype/content.py
|
https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L344-L359
|
def send_response(self, body, set_content_type=True):
"""
Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True`
"""
settings = get_settings(self.application, force_instance=True)
handler = settings[self.get_response_content_type()]
content_type, data_bytes = handler.to_bytes(body)
if set_content_type:
self.set_header('Content-Type', content_type)
self.add_header('Vary', 'Accept')
self.write(data_bytes)
|
[
"def",
"send_response",
"(",
"self",
",",
"body",
",",
"set_content_type",
"=",
"True",
")",
":",
"settings",
"=",
"get_settings",
"(",
"self",
".",
"application",
",",
"force_instance",
"=",
"True",
")",
"handler",
"=",
"settings",
"[",
"self",
".",
"get_response_content_type",
"(",
")",
"]",
"content_type",
",",
"data_bytes",
"=",
"handler",
".",
"to_bytes",
"(",
"body",
")",
"if",
"set_content_type",
":",
"self",
".",
"set_header",
"(",
"'Content-Type'",
",",
"content_type",
")",
"self",
".",
"add_header",
"(",
"'Vary'",
",",
"'Accept'",
")",
"self",
".",
"write",
"(",
"data_bytes",
")"
] |
Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True`
|
[
"Serialize",
"and",
"send",
"body",
"in",
"the",
"response",
"."
] |
python
|
train
| 40.5625 |
scot-dev/scot
|
scot/matfiles.py
|
https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/matfiles.py#L25-L33
|
def _check_keys(dictionary):
"""
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
"""
for key in dictionary:
if isinstance(dictionary[key], matlab.mio5_params.mat_struct):
dictionary[key] = _todict(dictionary[key])
return dictionary
|
[
"def",
"_check_keys",
"(",
"dictionary",
")",
":",
"for",
"key",
"in",
"dictionary",
":",
"if",
"isinstance",
"(",
"dictionary",
"[",
"key",
"]",
",",
"matlab",
".",
"mio5_params",
".",
"mat_struct",
")",
":",
"dictionary",
"[",
"key",
"]",
"=",
"_todict",
"(",
"dictionary",
"[",
"key",
"]",
")",
"return",
"dictionary"
] |
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
|
[
"checks",
"if",
"entries",
"in",
"dictionary",
"are",
"mat",
"-",
"objects",
".",
"If",
"yes",
"todict",
"is",
"called",
"to",
"change",
"them",
"to",
"nested",
"dictionaries"
] |
python
|
train
| 36.666667 |
astrocatalogs/astrocats
|
astrocats/catalog/spectrum.py
|
https://github.com/astrocatalogs/astrocats/blob/11abc3131c6366ecd23964369e55ff264add7805/astrocats/catalog/spectrum.py#L135-L158
|
def is_duplicate_of(self, other):
"""Check if spectrum is duplicate of another."""
if super(Spectrum, self).is_duplicate_of(other):
return True
row_matches = 0
for ri, row in enumerate(self.get(self._KEYS.DATA, [])):
lambda1, flux1 = tuple(row[0:2])
if (self._KEYS.DATA not in other or
ri > len(other[self._KEYS.DATA])):
break
lambda2, flux2 = tuple(other[self._KEYS.DATA][ri][0:2])
minlambdalen = min(len(lambda1), len(lambda2))
minfluxlen = min(len(flux1), len(flux2))
if (lambda1[:minlambdalen + 1] == lambda2[:minlambdalen + 1] and
flux1[:minfluxlen + 1] == flux2[:minfluxlen + 1] and
float(flux1[:minfluxlen + 1]) != 0.0):
row_matches += 1
# Five row matches should be enough to be sure spectrum is a dupe.
if row_matches >= 5:
return True
# Matches need to happen in the first 10 rows.
if ri >= 10:
break
return False
|
[
"def",
"is_duplicate_of",
"(",
"self",
",",
"other",
")",
":",
"if",
"super",
"(",
"Spectrum",
",",
"self",
")",
".",
"is_duplicate_of",
"(",
"other",
")",
":",
"return",
"True",
"row_matches",
"=",
"0",
"for",
"ri",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"get",
"(",
"self",
".",
"_KEYS",
".",
"DATA",
",",
"[",
"]",
")",
")",
":",
"lambda1",
",",
"flux1",
"=",
"tuple",
"(",
"row",
"[",
"0",
":",
"2",
"]",
")",
"if",
"(",
"self",
".",
"_KEYS",
".",
"DATA",
"not",
"in",
"other",
"or",
"ri",
">",
"len",
"(",
"other",
"[",
"self",
".",
"_KEYS",
".",
"DATA",
"]",
")",
")",
":",
"break",
"lambda2",
",",
"flux2",
"=",
"tuple",
"(",
"other",
"[",
"self",
".",
"_KEYS",
".",
"DATA",
"]",
"[",
"ri",
"]",
"[",
"0",
":",
"2",
"]",
")",
"minlambdalen",
"=",
"min",
"(",
"len",
"(",
"lambda1",
")",
",",
"len",
"(",
"lambda2",
")",
")",
"minfluxlen",
"=",
"min",
"(",
"len",
"(",
"flux1",
")",
",",
"len",
"(",
"flux2",
")",
")",
"if",
"(",
"lambda1",
"[",
":",
"minlambdalen",
"+",
"1",
"]",
"==",
"lambda2",
"[",
":",
"minlambdalen",
"+",
"1",
"]",
"and",
"flux1",
"[",
":",
"minfluxlen",
"+",
"1",
"]",
"==",
"flux2",
"[",
":",
"minfluxlen",
"+",
"1",
"]",
"and",
"float",
"(",
"flux1",
"[",
":",
"minfluxlen",
"+",
"1",
"]",
")",
"!=",
"0.0",
")",
":",
"row_matches",
"+=",
"1",
"# Five row matches should be enough to be sure spectrum is a dupe.",
"if",
"row_matches",
">=",
"5",
":",
"return",
"True",
"# Matches need to happen in the first 10 rows.",
"if",
"ri",
">=",
"10",
":",
"break",
"return",
"False"
] |
Check if spectrum is duplicate of another.
|
[
"Check",
"if",
"spectrum",
"is",
"duplicate",
"of",
"another",
"."
] |
python
|
train
| 45.666667 |
pndurette/gTTS
|
gtts/tts.py
|
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tts.py#L238-L250
|
def save(self, savefile):
"""Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
"""
with open(str(savefile), 'wb') as f:
self.write_to_fp(f)
log.debug("Saved to %s", savefile)
|
[
"def",
"save",
"(",
"self",
",",
"savefile",
")",
":",
"with",
"open",
"(",
"str",
"(",
"savefile",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"self",
".",
"write_to_fp",
"(",
"f",
")",
"log",
".",
"debug",
"(",
"\"Saved to %s\"",
",",
"savefile",
")"
] |
Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
|
[
"Do",
"the",
"TTS",
"API",
"request",
"and",
"write",
"result",
"to",
"file",
"."
] |
python
|
train
| 30.461538 |
peri-source/peri
|
peri/runner.py
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/runner.py#L583-L617
|
def _pick_state_im_name(state_name, im_name, use_full_path=False):
"""
If state_name or im_name is None, picks them interactively through Tk,
and then sets with or without the full path.
Parameters
----------
state_name : {string, None}
The name of the state. If None, selected through Tk.
im_name : {string, None}
The name of the image. If None, selected through Tk.
use_full_path : Bool, optional
Set to True to return the names as full paths rather than
relative paths. Default is False (relative path).
"""
initial_dir = os.getcwd()
if (state_name is None) or (im_name is None):
wid = tk.Tk()
wid.withdraw()
if state_name is None:
state_name = tkfd.askopenfilename(
initialdir=initial_dir, title='Select pre-featured state')
os.chdir(os.path.dirname(state_name))
if im_name is None:
im_name = tkfd.askopenfilename(
initialdir=initial_dir, title='Select new image')
if (not use_full_path) and (os.path.dirname(im_name) != ''):
im_path = os.path.dirname(im_name)
os.chdir(im_path)
im_name = os.path.basename(im_name)
else:
os.chdir(initial_dir)
return state_name, im_name
|
[
"def",
"_pick_state_im_name",
"(",
"state_name",
",",
"im_name",
",",
"use_full_path",
"=",
"False",
")",
":",
"initial_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"(",
"state_name",
"is",
"None",
")",
"or",
"(",
"im_name",
"is",
"None",
")",
":",
"wid",
"=",
"tk",
".",
"Tk",
"(",
")",
"wid",
".",
"withdraw",
"(",
")",
"if",
"state_name",
"is",
"None",
":",
"state_name",
"=",
"tkfd",
".",
"askopenfilename",
"(",
"initialdir",
"=",
"initial_dir",
",",
"title",
"=",
"'Select pre-featured state'",
")",
"os",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"state_name",
")",
")",
"if",
"im_name",
"is",
"None",
":",
"im_name",
"=",
"tkfd",
".",
"askopenfilename",
"(",
"initialdir",
"=",
"initial_dir",
",",
"title",
"=",
"'Select new image'",
")",
"if",
"(",
"not",
"use_full_path",
")",
"and",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"im_name",
")",
"!=",
"''",
")",
":",
"im_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"im_name",
")",
"os",
".",
"chdir",
"(",
"im_path",
")",
"im_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"im_name",
")",
"else",
":",
"os",
".",
"chdir",
"(",
"initial_dir",
")",
"return",
"state_name",
",",
"im_name"
] |
If state_name or im_name is None, picks them interactively through Tk,
and then sets with or without the full path.
Parameters
----------
state_name : {string, None}
The name of the state. If None, selected through Tk.
im_name : {string, None}
The name of the image. If None, selected through Tk.
use_full_path : Bool, optional
Set to True to return the names as full paths rather than
relative paths. Default is False (relative path).
|
[
"If",
"state_name",
"or",
"im_name",
"is",
"None",
"picks",
"them",
"interactively",
"through",
"Tk",
"and",
"then",
"sets",
"with",
"or",
"without",
"the",
"full",
"path",
"."
] |
python
|
valid
| 36.285714 |
Baguage/django-auth-pubtkt
|
django_auth_pubtkt/auth_pubtkt.py
|
https://github.com/Baguage/django-auth-pubtkt/blob/d3f4284212ffbfdc3588929a31e36a4cc7f39786/django_auth_pubtkt/auth_pubtkt.py#L45-L73
|
def verify_ticket_signature(self, data, sig):
"""Verify ticket signature. """
try:
signature = base64.b64decode(sig)
except TypeError as e:
if hasattr(self, "debug"):
print("Exception in function base64.b64decode. File %s" % (__file__))
print("%s" % e)
return False
if six.PY3:
# To avoid "TypeError: Unicode-objects must be encoded before hashing'
data = data.encode('utf-8')
digest = hashlib.sha1(data).digest()
if isinstance(self.pub_key, RSA.RSA_pub):
try:
self.pub_key.verify(digest, signature, 'sha1')
except RSA.RSAError:
return False
return True
if isinstance(self.pub_key, DSA.DSA_pub):
try:
return self.pub_key.verify_asn1(digest, signature)
except DSA.DSAError as e:
if hasattr(self, "debug"):
print("Exception in function self.pub_key.verify_asn1(digest, signature). File %s" % (__file__))
print("%s" % e)
return False
# Unknown key type
return False
|
[
"def",
"verify_ticket_signature",
"(",
"self",
",",
"data",
",",
"sig",
")",
":",
"try",
":",
"signature",
"=",
"base64",
".",
"b64decode",
"(",
"sig",
")",
"except",
"TypeError",
"as",
"e",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"debug\"",
")",
":",
"print",
"(",
"\"Exception in function base64.b64decode. File %s\"",
"%",
"(",
"__file__",
")",
")",
"print",
"(",
"\"%s\"",
"%",
"e",
")",
"return",
"False",
"if",
"six",
".",
"PY3",
":",
"# To avoid \"TypeError: Unicode-objects must be encoded before hashing'",
"data",
"=",
"data",
".",
"encode",
"(",
"'utf-8'",
")",
"digest",
"=",
"hashlib",
".",
"sha1",
"(",
"data",
")",
".",
"digest",
"(",
")",
"if",
"isinstance",
"(",
"self",
".",
"pub_key",
",",
"RSA",
".",
"RSA_pub",
")",
":",
"try",
":",
"self",
".",
"pub_key",
".",
"verify",
"(",
"digest",
",",
"signature",
",",
"'sha1'",
")",
"except",
"RSA",
".",
"RSAError",
":",
"return",
"False",
"return",
"True",
"if",
"isinstance",
"(",
"self",
".",
"pub_key",
",",
"DSA",
".",
"DSA_pub",
")",
":",
"try",
":",
"return",
"self",
".",
"pub_key",
".",
"verify_asn1",
"(",
"digest",
",",
"signature",
")",
"except",
"DSA",
".",
"DSAError",
"as",
"e",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"debug\"",
")",
":",
"print",
"(",
"\"Exception in function self.pub_key.verify_asn1(digest, signature). File %s\"",
"%",
"(",
"__file__",
")",
")",
"print",
"(",
"\"%s\"",
"%",
"e",
")",
"return",
"False",
"# Unknown key type",
"return",
"False"
] |
Verify ticket signature.
|
[
"Verify",
"ticket",
"signature",
"."
] |
python
|
train
| 40.896552 |
xapple/plumbing
|
plumbing/databases/sqlite_database.py
|
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/databases/sqlite_database.py#L134-L140
|
def new_connection(self):
"""Make a new connection."""
if not self.prepared: self.prepare()
con = sqlite3.connect(self.path, isolation_level=self.isolation)
con.row_factory = self.factory
if self.text_fact: con.text_factory = self.text_fact
return con
|
[
"def",
"new_connection",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"prepared",
":",
"self",
".",
"prepare",
"(",
")",
"con",
"=",
"sqlite3",
".",
"connect",
"(",
"self",
".",
"path",
",",
"isolation_level",
"=",
"self",
".",
"isolation",
")",
"con",
".",
"row_factory",
"=",
"self",
".",
"factory",
"if",
"self",
".",
"text_fact",
":",
"con",
".",
"text_factory",
"=",
"self",
".",
"text_fact",
"return",
"con"
] |
Make a new connection.
|
[
"Make",
"a",
"new",
"connection",
"."
] |
python
|
train
| 41.857143 |
fastai/fastai
|
fastai/vision/image.py
|
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L547-L556
|
def _affine_mult(c:FlowField,m:AffineMatrix)->FlowField:
"Multiply `c` by `m` - can adjust for rectangular shaped `c`."
if m is None: return c
size = c.flow.size()
h,w = c.size
m[0,1] *= h/w
m[1,0] *= w/h
c.flow = c.flow.view(-1,2)
c.flow = torch.addmm(m[:2,2], c.flow, m[:2,:2].t()).view(size)
return c
|
[
"def",
"_affine_mult",
"(",
"c",
":",
"FlowField",
",",
"m",
":",
"AffineMatrix",
")",
"->",
"FlowField",
":",
"if",
"m",
"is",
"None",
":",
"return",
"c",
"size",
"=",
"c",
".",
"flow",
".",
"size",
"(",
")",
"h",
",",
"w",
"=",
"c",
".",
"size",
"m",
"[",
"0",
",",
"1",
"]",
"*=",
"h",
"/",
"w",
"m",
"[",
"1",
",",
"0",
"]",
"*=",
"w",
"/",
"h",
"c",
".",
"flow",
"=",
"c",
".",
"flow",
".",
"view",
"(",
"-",
"1",
",",
"2",
")",
"c",
".",
"flow",
"=",
"torch",
".",
"addmm",
"(",
"m",
"[",
":",
"2",
",",
"2",
"]",
",",
"c",
".",
"flow",
",",
"m",
"[",
":",
"2",
",",
":",
"2",
"]",
".",
"t",
"(",
")",
")",
".",
"view",
"(",
"size",
")",
"return",
"c"
] |
Multiply `c` by `m` - can adjust for rectangular shaped `c`.
|
[
"Multiply",
"c",
"by",
"m",
"-",
"can",
"adjust",
"for",
"rectangular",
"shaped",
"c",
"."
] |
python
|
train
| 33.1 |
saltstack/salt
|
salt/renderers/aws_kms.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/aws_kms.py#L125-L151
|
def _session():
'''
Return the boto3 session to use for the KMS client.
If aws_kms:profile_name is set in the salt configuration, use that profile.
Otherwise, fall back on the default aws profile.
We use the boto3 profile system to avoid having to duplicate
individual boto3 configuration settings in salt configuration.
'''
profile_name = _cfg('profile_name')
if profile_name:
log.info('Using the "%s" aws profile.', profile_name)
else:
log.info('aws_kms:profile_name is not set in salt. Falling back on default profile.')
try:
return boto3.Session(profile_name=profile_name)
except botocore.exceptions.ProfileNotFound as orig_exc:
err_msg = 'Boto3 could not find the "{}" profile configured in Salt.'.format(
profile_name or 'default')
config_error = salt.exceptions.SaltConfigurationError(err_msg)
six.raise_from(config_error, orig_exc)
except botocore.exceptions.NoRegionError as orig_exc:
err_msg = ('Boto3 was unable to determine the AWS '
'endpoint region using the {} profile.').format(profile_name or 'default')
config_error = salt.exceptions.SaltConfigurationError(err_msg)
six.raise_from(config_error, orig_exc)
|
[
"def",
"_session",
"(",
")",
":",
"profile_name",
"=",
"_cfg",
"(",
"'profile_name'",
")",
"if",
"profile_name",
":",
"log",
".",
"info",
"(",
"'Using the \"%s\" aws profile.'",
",",
"profile_name",
")",
"else",
":",
"log",
".",
"info",
"(",
"'aws_kms:profile_name is not set in salt. Falling back on default profile.'",
")",
"try",
":",
"return",
"boto3",
".",
"Session",
"(",
"profile_name",
"=",
"profile_name",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ProfileNotFound",
"as",
"orig_exc",
":",
"err_msg",
"=",
"'Boto3 could not find the \"{}\" profile configured in Salt.'",
".",
"format",
"(",
"profile_name",
"or",
"'default'",
")",
"config_error",
"=",
"salt",
".",
"exceptions",
".",
"SaltConfigurationError",
"(",
"err_msg",
")",
"six",
".",
"raise_from",
"(",
"config_error",
",",
"orig_exc",
")",
"except",
"botocore",
".",
"exceptions",
".",
"NoRegionError",
"as",
"orig_exc",
":",
"err_msg",
"=",
"(",
"'Boto3 was unable to determine the AWS '",
"'endpoint region using the {} profile.'",
")",
".",
"format",
"(",
"profile_name",
"or",
"'default'",
")",
"config_error",
"=",
"salt",
".",
"exceptions",
".",
"SaltConfigurationError",
"(",
"err_msg",
")",
"six",
".",
"raise_from",
"(",
"config_error",
",",
"orig_exc",
")"
] |
Return the boto3 session to use for the KMS client.
If aws_kms:profile_name is set in the salt configuration, use that profile.
Otherwise, fall back on the default aws profile.
We use the boto3 profile system to avoid having to duplicate
individual boto3 configuration settings in salt configuration.
|
[
"Return",
"the",
"boto3",
"session",
"to",
"use",
"for",
"the",
"KMS",
"client",
"."
] |
python
|
train
| 46.555556 |
cozy/python_cozy_management
|
cozy_management/couchdb.py
|
https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/couchdb.py#L62-L81
|
def curl_couchdb(url, method='GET', base_url=BASE_URL, data=None):
'''
Launch a curl on CouchDB instance
'''
(username, password) = get_admin()
if username is None:
auth = None
else:
auth = (username, password)
if method == 'PUT':
req = requests.put('{}{}'.format(base_url, url), auth=auth, data=data)
elif method == 'DELETE':
req = requests.delete('{}{}'.format(base_url, url), auth=auth)
else:
req = requests.get('{}{}'.format(base_url, url), auth=auth)
if req.status_code not in [200, 201]:
raise HTTPError('{}: {}'.format(req.status_code, req.text))
return req
|
[
"def",
"curl_couchdb",
"(",
"url",
",",
"method",
"=",
"'GET'",
",",
"base_url",
"=",
"BASE_URL",
",",
"data",
"=",
"None",
")",
":",
"(",
"username",
",",
"password",
")",
"=",
"get_admin",
"(",
")",
"if",
"username",
"is",
"None",
":",
"auth",
"=",
"None",
"else",
":",
"auth",
"=",
"(",
"username",
",",
"password",
")",
"if",
"method",
"==",
"'PUT'",
":",
"req",
"=",
"requests",
".",
"put",
"(",
"'{}{}'",
".",
"format",
"(",
"base_url",
",",
"url",
")",
",",
"auth",
"=",
"auth",
",",
"data",
"=",
"data",
")",
"elif",
"method",
"==",
"'DELETE'",
":",
"req",
"=",
"requests",
".",
"delete",
"(",
"'{}{}'",
".",
"format",
"(",
"base_url",
",",
"url",
")",
",",
"auth",
"=",
"auth",
")",
"else",
":",
"req",
"=",
"requests",
".",
"get",
"(",
"'{}{}'",
".",
"format",
"(",
"base_url",
",",
"url",
")",
",",
"auth",
"=",
"auth",
")",
"if",
"req",
".",
"status_code",
"not",
"in",
"[",
"200",
",",
"201",
"]",
":",
"raise",
"HTTPError",
"(",
"'{}: {}'",
".",
"format",
"(",
"req",
".",
"status_code",
",",
"req",
".",
"text",
")",
")",
"return",
"req"
] |
Launch a curl on CouchDB instance
|
[
"Launch",
"a",
"curl",
"on",
"CouchDB",
"instance"
] |
python
|
train
| 32.15 |
mozilla/socorrolib
|
socorrolib/app/fetch_transform_save_app.py
|
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/app/fetch_transform_save_app.py#L188-L200
|
def _infinite_iterator(self):
"""this iterator wraps the "_basic_iterator" when the configuration
specifies that the "number_of_submissions" is set to "forever".
Whenever the "_basic_iterator" is exhausted, it is called again to
restart the iteration. It is up to the implementation of the innermost
iterator to define what starting over means. Some iterators may
repeat exactly what they did before, while others may iterate over
new values"""
while True:
for crash_id in self._basic_iterator():
if self._filter_disallowed_values(crash_id):
continue
yield crash_id
|
[
"def",
"_infinite_iterator",
"(",
"self",
")",
":",
"while",
"True",
":",
"for",
"crash_id",
"in",
"self",
".",
"_basic_iterator",
"(",
")",
":",
"if",
"self",
".",
"_filter_disallowed_values",
"(",
"crash_id",
")",
":",
"continue",
"yield",
"crash_id"
] |
this iterator wraps the "_basic_iterator" when the configuration
specifies that the "number_of_submissions" is set to "forever".
Whenever the "_basic_iterator" is exhausted, it is called again to
restart the iteration. It is up to the implementation of the innermost
iterator to define what starting over means. Some iterators may
repeat exactly what they did before, while others may iterate over
new values
|
[
"this",
"iterator",
"wraps",
"the",
"_basic_iterator",
"when",
"the",
"configuration",
"specifies",
"that",
"the",
"number_of_submissions",
"is",
"set",
"to",
"forever",
".",
"Whenever",
"the",
"_basic_iterator",
"is",
"exhausted",
"it",
"is",
"called",
"again",
"to",
"restart",
"the",
"iteration",
".",
"It",
"is",
"up",
"to",
"the",
"implementation",
"of",
"the",
"innermost",
"iterator",
"to",
"define",
"what",
"starting",
"over",
"means",
".",
"Some",
"iterators",
"may",
"repeat",
"exactly",
"what",
"they",
"did",
"before",
"while",
"others",
"may",
"iterate",
"over",
"new",
"values"
] |
python
|
train
| 52.538462 |
Richienb/quilt
|
src/quilt_lang/__init__.py
|
https://github.com/Richienb/quilt/blob/4a659cac66f5286ad046d54a12fd850be5606643/src/quilt_lang/__init__.py#L200-L235
|
def binboolflip(item):
"""
Convert 0 or 1 to False or True (or vice versa).
The converter works as follows:
- 0 > False
- False > 0
- 1 > True
- True > 1
:type item: integer or boolean
:param item: The item to convert.
>>> binboolflip(0)
False
>>> binboolflip(False)
0
>>> binboolflip(1)
True
>>> binboolflip(True)
1
>>> binboolflip("foo")
Traceback (most recent call last):
...
ValueError: Invalid item specified.
"""
if item in [0, False, 1, True]:
return int(item) if isinstance(item, bool) else bool(item)
# Raise a warning
raise ValueError("Invalid item specified.")
|
[
"def",
"binboolflip",
"(",
"item",
")",
":",
"if",
"item",
"in",
"[",
"0",
",",
"False",
",",
"1",
",",
"True",
"]",
":",
"return",
"int",
"(",
"item",
")",
"if",
"isinstance",
"(",
"item",
",",
"bool",
")",
"else",
"bool",
"(",
"item",
")",
"# Raise a warning",
"raise",
"ValueError",
"(",
"\"Invalid item specified.\"",
")"
] |
Convert 0 or 1 to False or True (or vice versa).
The converter works as follows:
- 0 > False
- False > 0
- 1 > True
- True > 1
:type item: integer or boolean
:param item: The item to convert.
>>> binboolflip(0)
False
>>> binboolflip(False)
0
>>> binboolflip(1)
True
>>> binboolflip(True)
1
>>> binboolflip("foo")
Traceback (most recent call last):
...
ValueError: Invalid item specified.
|
[
"Convert",
"0",
"or",
"1",
"to",
"False",
"or",
"True",
"(",
"or",
"vice",
"versa",
")",
".",
"The",
"converter",
"works",
"as",
"follows",
":"
] |
python
|
train
| 18.194444 |
saltstack/salt
|
salt/modules/glusterfs.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glusterfs.py#L442-L472
|
def start_volume(name, force=False):
'''
Start a gluster volume
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster
'''
cmd = 'volume start {0}'.format(name)
if force:
cmd = '{0} force'.format(cmd)
volinfo = info(name)
if name not in volinfo:
log.error("Cannot start non-existing volume %s", name)
return False
if not force and volinfo[name]['status'] == '1':
log.info("Volume %s already started", name)
return True
return _gluster(cmd)
|
[
"def",
"start_volume",
"(",
"name",
",",
"force",
"=",
"False",
")",
":",
"cmd",
"=",
"'volume start {0}'",
".",
"format",
"(",
"name",
")",
"if",
"force",
":",
"cmd",
"=",
"'{0} force'",
".",
"format",
"(",
"cmd",
")",
"volinfo",
"=",
"info",
"(",
"name",
")",
"if",
"name",
"not",
"in",
"volinfo",
":",
"log",
".",
"error",
"(",
"\"Cannot start non-existing volume %s\"",
",",
"name",
")",
"return",
"False",
"if",
"not",
"force",
"and",
"volinfo",
"[",
"name",
"]",
"[",
"'status'",
"]",
"==",
"'1'",
":",
"log",
".",
"info",
"(",
"\"Volume %s already started\"",
",",
"name",
")",
"return",
"True",
"return",
"_gluster",
"(",
"cmd",
")"
] |
Start a gluster volume
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster
|
[
"Start",
"a",
"gluster",
"volume"
] |
python
|
train
| 21.225806 |
tensorflow/tensor2tensor
|
tensor2tensor/utils/expert_utils.py
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/expert_utils.py#L405-L434
|
def _my_top_k(x, k):
"""GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
"""
if k > 10:
return tf.nn.top_k(x, k)
values = []
indices = []
depth = tf.shape(x)[1]
for i in range(k):
values.append(tf.reduce_max(x, 1))
argmax = tf.argmax(x, 1)
indices.append(argmax)
if i + 1 < k:
x += tf.one_hot(argmax, depth, -1e9)
return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
|
[
"def",
"_my_top_k",
"(",
"x",
",",
"k",
")",
":",
"if",
"k",
">",
"10",
":",
"return",
"tf",
".",
"nn",
".",
"top_k",
"(",
"x",
",",
"k",
")",
"values",
"=",
"[",
"]",
"indices",
"=",
"[",
"]",
"depth",
"=",
"tf",
".",
"shape",
"(",
"x",
")",
"[",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"k",
")",
":",
"values",
".",
"append",
"(",
"tf",
".",
"reduce_max",
"(",
"x",
",",
"1",
")",
")",
"argmax",
"=",
"tf",
".",
"argmax",
"(",
"x",
",",
"1",
")",
"indices",
".",
"append",
"(",
"argmax",
")",
"if",
"i",
"+",
"1",
"<",
"k",
":",
"x",
"+=",
"tf",
".",
"one_hot",
"(",
"argmax",
",",
"depth",
",",
"-",
"1e9",
")",
"return",
"tf",
".",
"stack",
"(",
"values",
",",
"axis",
"=",
"1",
")",
",",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"stack",
"(",
"indices",
",",
"axis",
"=",
"1",
")",
")"
] |
GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
|
[
"GPU",
"-",
"compatible",
"version",
"of",
"top",
"-",
"k",
"that",
"works",
"for",
"very",
"small",
"constant",
"k",
"."
] |
python
|
train
| 29.333333 |
klen/makesite
|
makesite/main.py
|
https://github.com/klen/makesite/blob/f6f77a43a04a256189e8fffbeac1ffd63f35a10c/makesite/main.py#L161-L168
|
def shell(args):
" A helper command to be used for shell integration "
print
print "# Makesite integration "
print "# ==================== "
print "export MAKESITE_HOME=%s" % args.path
print "source %s" % op.join(settings.BASEDIR, 'shell.sh')
print
|
[
"def",
"shell",
"(",
"args",
")",
":",
"print",
"print",
"\"# Makesite integration \"",
"print",
"\"# ==================== \"",
"print",
"\"export MAKESITE_HOME=%s\"",
"%",
"args",
".",
"path",
"print",
"\"source %s\"",
"%",
"op",
".",
"join",
"(",
"settings",
".",
"BASEDIR",
",",
"'shell.sh'",
")",
"print"
] |
A helper command to be used for shell integration
|
[
"A",
"helper",
"command",
"to",
"be",
"used",
"for",
"shell",
"integration"
] |
python
|
train
| 33.625 |
Garee/pytodoist
|
pytodoist/todoist.py
|
https://github.com/Garee/pytodoist/blob/3359cbff485ebdbbb4ffbd58d71e21a817874dd7/pytodoist/todoist.py#L625-L639
|
def get_label(self, label_name):
"""Return the user's label that has a given name.
:param label_name: The name to search for.
:type label_name: str
:return: A label that has a matching name or ``None`` if not found.
:rtype: :class:`pytodoist.todoist.Label`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> label = user.get_label('family')
"""
for label in self.get_labels():
if label.name == label_name:
return label
|
[
"def",
"get_label",
"(",
"self",
",",
"label_name",
")",
":",
"for",
"label",
"in",
"self",
".",
"get_labels",
"(",
")",
":",
"if",
"label",
".",
"name",
"==",
"label_name",
":",
"return",
"label"
] |
Return the user's label that has a given name.
:param label_name: The name to search for.
:type label_name: str
:return: A label that has a matching name or ``None`` if not found.
:rtype: :class:`pytodoist.todoist.Label`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> label = user.get_label('family')
|
[
"Return",
"the",
"user",
"s",
"label",
"that",
"has",
"a",
"given",
"name",
"."
] |
python
|
train
| 37.333333 |
omza/azurestoragewrap
|
azurestoragewrap/blob.py
|
https://github.com/omza/azurestoragewrap/blob/976878e95d82ff0f7d8a00a5e4a7a3fb6268ab08/azurestoragewrap/blob.py#L449-L473
|
def download(self, storagemodel:object, modeldefinition = None):
""" load blob from storage into StorageBlobModelInstance """
if (storagemodel.name is None):
# No content to download
raise AzureStorageWrapException(storagemodel, "StorageBlobModel does not contain content nor content settings")
else:
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
if modeldefinition['blobservice'].exists(container_name, blob_name):
""" download blob """
blob = modeldefinition['blobservice'].get_blob_to_bytes(
container_name=modeldefinition['container'],
blob_name=storagemodel.name
)
storagemodel.__mergeblob__(blob)
except Exception as e:
msg = 'can not load blob from container {} because {!s}'.format(storagemodel._containername, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return storagemodel
|
[
"def",
"download",
"(",
"self",
",",
"storagemodel",
":",
"object",
",",
"modeldefinition",
"=",
"None",
")",
":",
"if",
"(",
"storagemodel",
".",
"name",
"is",
"None",
")",
":",
"# No content to download",
"raise",
"AzureStorageWrapException",
"(",
"storagemodel",
",",
"\"StorageBlobModel does not contain content nor content settings\"",
")",
"else",
":",
"container_name",
"=",
"modeldefinition",
"[",
"'container'",
"]",
"blob_name",
"=",
"storagemodel",
".",
"name",
"try",
":",
"if",
"modeldefinition",
"[",
"'blobservice'",
"]",
".",
"exists",
"(",
"container_name",
",",
"blob_name",
")",
":",
"\"\"\" download blob \"\"\"",
"blob",
"=",
"modeldefinition",
"[",
"'blobservice'",
"]",
".",
"get_blob_to_bytes",
"(",
"container_name",
"=",
"modeldefinition",
"[",
"'container'",
"]",
",",
"blob_name",
"=",
"storagemodel",
".",
"name",
")",
"storagemodel",
".",
"__mergeblob__",
"(",
"blob",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"'can not load blob from container {} because {!s}'",
".",
"format",
"(",
"storagemodel",
".",
"_containername",
",",
"e",
")",
"raise",
"AzureStorageWrapException",
"(",
"storagemodel",
",",
"msg",
"=",
"msg",
")",
"return",
"storagemodel"
] |
load blob from storage into StorageBlobModelInstance
|
[
"load",
"blob",
"from",
"storage",
"into",
"StorageBlobModelInstance"
] |
python
|
train
| 45.32 |
slackapi/python-slackclient
|
slack/web/client.py
|
https://github.com/slackapi/python-slackclient/blob/901341c0284fd81e6d2719d6a0502308760d83e4/slack/web/client.py#L591-L598
|
def files_info(self, *, id: str, **kwargs) -> SlackResponse:
"""Gets information about a team file.
Args:
id (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"id": id})
return self.api_call("files.info", http_verb="GET", params=kwargs)
|
[
"def",
"files_info",
"(",
"self",
",",
"*",
",",
"id",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
"->",
"SlackResponse",
":",
"kwargs",
".",
"update",
"(",
"{",
"\"id\"",
":",
"id",
"}",
")",
"return",
"self",
".",
"api_call",
"(",
"\"files.info\"",
",",
"http_verb",
"=",
"\"GET\"",
",",
"params",
"=",
"kwargs",
")"
] |
Gets information about a team file.
Args:
id (str): The file id. e.g. 'F1234467890'
|
[
"Gets",
"information",
"about",
"a",
"team",
"file",
"."
] |
python
|
train
| 36.25 |
edx/edx-enterprise
|
integrated_channels/sap_success_factors/exporters/utils.py
|
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/sap_success_factors/exporters/utils.py#L29-L47
|
def transform_language_code(code):
"""
Transform ISO language code (e.g. en-us) to the language name expected by SAPSF.
"""
if code is None:
return 'English'
components = code.split('-', 2)
language_code = components[0]
try:
country_code = components[1]
except IndexError:
country_code = '_'
language_family = SUCCESSFACTORS_OCN_LANGUAGE_CODES.get(language_code)
if not language_family:
return 'English'
return language_family.get(country_code, language_family['_'])
|
[
"def",
"transform_language_code",
"(",
"code",
")",
":",
"if",
"code",
"is",
"None",
":",
"return",
"'English'",
"components",
"=",
"code",
".",
"split",
"(",
"'-'",
",",
"2",
")",
"language_code",
"=",
"components",
"[",
"0",
"]",
"try",
":",
"country_code",
"=",
"components",
"[",
"1",
"]",
"except",
"IndexError",
":",
"country_code",
"=",
"'_'",
"language_family",
"=",
"SUCCESSFACTORS_OCN_LANGUAGE_CODES",
".",
"get",
"(",
"language_code",
")",
"if",
"not",
"language_family",
":",
"return",
"'English'",
"return",
"language_family",
".",
"get",
"(",
"country_code",
",",
"language_family",
"[",
"'_'",
"]",
")"
] |
Transform ISO language code (e.g. en-us) to the language name expected by SAPSF.
|
[
"Transform",
"ISO",
"language",
"code",
"(",
"e",
".",
"g",
".",
"en",
"-",
"us",
")",
"to",
"the",
"language",
"name",
"expected",
"by",
"SAPSF",
"."
] |
python
|
valid
| 27.736842 |
saltstack/salt
|
salt/modules/bluecoat_sslv.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bluecoat_sslv.py#L101-L121
|
def add_distinguished_name_list(list_name):
'''
Add a list of policy distinguished names.
list_name(str): The name of the specific policy distinguished name list to add.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name_list MyDistinguishedList
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_distinguished_names_list",
"params": [{"list_name": list_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response)
|
[
"def",
"add_distinguished_name_list",
"(",
"list_name",
")",
":",
"payload",
"=",
"{",
"\"jsonrpc\"",
":",
"\"2.0\"",
",",
"\"id\"",
":",
"\"ID0\"",
",",
"\"method\"",
":",
"\"add_policy_distinguished_names_list\"",
",",
"\"params\"",
":",
"[",
"{",
"\"list_name\"",
":",
"list_name",
"}",
"]",
"}",
"response",
"=",
"__proxy__",
"[",
"'bluecoat_sslv.call'",
"]",
"(",
"payload",
",",
"True",
")",
"return",
"_validate_change_result",
"(",
"response",
")"
] |
Add a list of policy distinguished names.
list_name(str): The name of the specific policy distinguished name list to add.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name_list MyDistinguishedList
|
[
"Add",
"a",
"list",
"of",
"policy",
"distinguished",
"names",
"."
] |
python
|
train
| 27.714286 |
mediawiki-utilities/python-mwreverts
|
mwreverts/historical_dict.py
|
https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/historical_dict.py#L28-L52
|
def insert(self, key, value):
'''Adds a new key-value pair. Returns any discarded values.'''
# Add to history and catch expectorate
if len(self.history) == self.maxsize:
expectorate = self.history[0]
else:
expectorate = None
self.history.append((key, value))
# Add to the appropriate list of values
if key in self:
super().__getitem__(key).append(value)
else:
super().__setitem__(key, [value])
# Clean up old values
if expectorate is not None:
old_key, old_value = expectorate
super().__getitem__(old_key).pop(0)
if len(super().__getitem__(old_key)) == 0:
super().__delitem__(old_key)
return (old_key, old_value)
|
[
"def",
"insert",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"# Add to history and catch expectorate",
"if",
"len",
"(",
"self",
".",
"history",
")",
"==",
"self",
".",
"maxsize",
":",
"expectorate",
"=",
"self",
".",
"history",
"[",
"0",
"]",
"else",
":",
"expectorate",
"=",
"None",
"self",
".",
"history",
".",
"append",
"(",
"(",
"key",
",",
"value",
")",
")",
"# Add to the appropriate list of values",
"if",
"key",
"in",
"self",
":",
"super",
"(",
")",
".",
"__getitem__",
"(",
"key",
")",
".",
"append",
"(",
"value",
")",
"else",
":",
"super",
"(",
")",
".",
"__setitem__",
"(",
"key",
",",
"[",
"value",
"]",
")",
"# Clean up old values",
"if",
"expectorate",
"is",
"not",
"None",
":",
"old_key",
",",
"old_value",
"=",
"expectorate",
"super",
"(",
")",
".",
"__getitem__",
"(",
"old_key",
")",
".",
"pop",
"(",
"0",
")",
"if",
"len",
"(",
"super",
"(",
")",
".",
"__getitem__",
"(",
"old_key",
")",
")",
"==",
"0",
":",
"super",
"(",
")",
".",
"__delitem__",
"(",
"old_key",
")",
"return",
"(",
"old_key",
",",
"old_value",
")"
] |
Adds a new key-value pair. Returns any discarded values.
|
[
"Adds",
"a",
"new",
"key",
"-",
"value",
"pair",
".",
"Returns",
"any",
"discarded",
"values",
"."
] |
python
|
train
| 31.4 |
mattja/nsim
|
nsim/timeseries.py
|
https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L448-L455
|
def absolute(self):
"""Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
"""
return Timeseries(np.absolute(self), self.tspan, self.labels)
|
[
"def",
"absolute",
"(",
"self",
")",
":",
"return",
"Timeseries",
"(",
"np",
".",
"absolute",
"(",
"self",
")",
",",
"self",
".",
"tspan",
",",
"self",
".",
"labels",
")"
] |
Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
|
[
"Calculate",
"the",
"absolute",
"value",
"element",
"-",
"wise",
"."
] |
python
|
train
| 34.875 |
obriencj/python-javatools
|
javatools/report.py
|
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/report.py#L123-L143
|
def setup(self):
"""
instantiates all report formats that have been added to this
reporter, and calls their setup methods.
"""
if self._formats:
# setup has been run already.
return
basedir = self.basedir
options = self.options
crumbs = self.get_relative_breadcrumbs()
fmts = list()
for fmt_class in self.formats:
fmt = fmt_class(basedir, options, crumbs)
fmt.setup()
fmts.append(fmt)
self._formats = fmts
|
[
"def",
"setup",
"(",
"self",
")",
":",
"if",
"self",
".",
"_formats",
":",
"# setup has been run already.",
"return",
"basedir",
"=",
"self",
".",
"basedir",
"options",
"=",
"self",
".",
"options",
"crumbs",
"=",
"self",
".",
"get_relative_breadcrumbs",
"(",
")",
"fmts",
"=",
"list",
"(",
")",
"for",
"fmt_class",
"in",
"self",
".",
"formats",
":",
"fmt",
"=",
"fmt_class",
"(",
"basedir",
",",
"options",
",",
"crumbs",
")",
"fmt",
".",
"setup",
"(",
")",
"fmts",
".",
"append",
"(",
"fmt",
")",
"self",
".",
"_formats",
"=",
"fmts"
] |
instantiates all report formats that have been added to this
reporter, and calls their setup methods.
|
[
"instantiates",
"all",
"report",
"formats",
"that",
"have",
"been",
"added",
"to",
"this",
"reporter",
"and",
"calls",
"their",
"setup",
"methods",
"."
] |
python
|
train
| 25.571429 |
pmacosta/peng
|
peng/functions.py
|
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/functions.py#L700-L761
|
def remove_extra_delims(expr, ldelim="(", rdelim=")"):
r"""
Remove unnecessary delimiters in mathematical expressions.
Delimiters (parenthesis, brackets, etc.) may be removed either because
there are multiple consecutive delimiters enclosing a single expressions or
because the delimiters are implied by operator precedence rules. Function
names must start with a letter and then can contain alphanumeric characters
and a maximum of one underscore
:param expr: Mathematical expression
:type expr: string
:param ldelim: Single character left delimiter
:type ldelim: string
:param rdelim: Single character right delimiter
:type rdelim: string
:rtype: string
:raises:
* RuntimeError (Argument \`expr\` is not valid)
* RuntimeError (Argument \`ldelim\` is not valid)
* RuntimeError (Argument \`rdelim\` is not valid)
* RuntimeError (Function name `*[function_name]*` is not valid)
* RuntimeError (Mismatched delimiters)
"""
op_group = ""
for item1 in _OP_PREC:
if isinstance(item1, list):
for item2 in item1:
op_group += item2
else:
op_group += item1
iobj = zip([expr, ldelim, rdelim], ["expr", "ldelim", "rdelim"])
for item, desc in iobj:
if not isinstance(item, str):
raise RuntimeError("Argument `{0}` is not valid".format(desc))
if (len(ldelim) != 1) or ((len(ldelim) == 1) and (ldelim in op_group)):
raise RuntimeError("Argument `ldelim` is not valid")
if (len(rdelim) != 1) or ((len(rdelim) == 1) and (rdelim in op_group)):
raise RuntimeError("Argument `rdelim` is not valid")
if expr.count(ldelim) != expr.count(rdelim):
raise RuntimeError("Mismatched delimiters")
if not expr:
return expr
vchars = (
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
".0123456789"
r"_()[]\{\}" + rdelim + ldelim + op_group
)
if any([item not in vchars for item in expr]) or ("__" in expr):
raise RuntimeError("Argument `expr` is not valid")
expr = _remove_consecutive_delims(expr, ldelim=ldelim, rdelim=rdelim)
expr = expr.replace(ldelim + rdelim, "")
return _remove_extra_delims(expr, ldelim=ldelim, rdelim=rdelim)
|
[
"def",
"remove_extra_delims",
"(",
"expr",
",",
"ldelim",
"=",
"\"(\"",
",",
"rdelim",
"=",
"\")\"",
")",
":",
"op_group",
"=",
"\"\"",
"for",
"item1",
"in",
"_OP_PREC",
":",
"if",
"isinstance",
"(",
"item1",
",",
"list",
")",
":",
"for",
"item2",
"in",
"item1",
":",
"op_group",
"+=",
"item2",
"else",
":",
"op_group",
"+=",
"item1",
"iobj",
"=",
"zip",
"(",
"[",
"expr",
",",
"ldelim",
",",
"rdelim",
"]",
",",
"[",
"\"expr\"",
",",
"\"ldelim\"",
",",
"\"rdelim\"",
"]",
")",
"for",
"item",
",",
"desc",
"in",
"iobj",
":",
"if",
"not",
"isinstance",
"(",
"item",
",",
"str",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Argument `{0}` is not valid\"",
".",
"format",
"(",
"desc",
")",
")",
"if",
"(",
"len",
"(",
"ldelim",
")",
"!=",
"1",
")",
"or",
"(",
"(",
"len",
"(",
"ldelim",
")",
"==",
"1",
")",
"and",
"(",
"ldelim",
"in",
"op_group",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Argument `ldelim` is not valid\"",
")",
"if",
"(",
"len",
"(",
"rdelim",
")",
"!=",
"1",
")",
"or",
"(",
"(",
"len",
"(",
"rdelim",
")",
"==",
"1",
")",
"and",
"(",
"rdelim",
"in",
"op_group",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Argument `rdelim` is not valid\"",
")",
"if",
"expr",
".",
"count",
"(",
"ldelim",
")",
"!=",
"expr",
".",
"count",
"(",
"rdelim",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Mismatched delimiters\"",
")",
"if",
"not",
"expr",
":",
"return",
"expr",
"vchars",
"=",
"(",
"\"abcdefghijklmnopqrstuvwxyz\"",
"\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"",
"\".0123456789\"",
"r\"_()[]\\{\\}\"",
"+",
"rdelim",
"+",
"ldelim",
"+",
"op_group",
")",
"if",
"any",
"(",
"[",
"item",
"not",
"in",
"vchars",
"for",
"item",
"in",
"expr",
"]",
")",
"or",
"(",
"\"__\"",
"in",
"expr",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Argument `expr` is not valid\"",
")",
"expr",
"=",
"_remove_consecutive_delims",
"(",
"expr",
",",
"ldelim",
"=",
"ldelim",
",",
"rdelim",
"=",
"rdelim",
")",
"expr",
"=",
"expr",
".",
"replace",
"(",
"ldelim",
"+",
"rdelim",
",",
"\"\"",
")",
"return",
"_remove_extra_delims",
"(",
"expr",
",",
"ldelim",
"=",
"ldelim",
",",
"rdelim",
"=",
"rdelim",
")"
] |
r"""
Remove unnecessary delimiters in mathematical expressions.
Delimiters (parenthesis, brackets, etc.) may be removed either because
there are multiple consecutive delimiters enclosing a single expressions or
because the delimiters are implied by operator precedence rules. Function
names must start with a letter and then can contain alphanumeric characters
and a maximum of one underscore
:param expr: Mathematical expression
:type expr: string
:param ldelim: Single character left delimiter
:type ldelim: string
:param rdelim: Single character right delimiter
:type rdelim: string
:rtype: string
:raises:
* RuntimeError (Argument \`expr\` is not valid)
* RuntimeError (Argument \`ldelim\` is not valid)
* RuntimeError (Argument \`rdelim\` is not valid)
* RuntimeError (Function name `*[function_name]*` is not valid)
* RuntimeError (Mismatched delimiters)
|
[
"r",
"Remove",
"unnecessary",
"delimiters",
"in",
"mathematical",
"expressions",
"."
] |
python
|
test
| 36.370968 |
bitesofcode/projexui
|
projexui/widgets/xnodewidget/xnode.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnode.py#L1922-L1930
|
def setPalette(self, palette):
"""
Sets the palette for this node to the inputed palette. If None is
provided, then the scene's palette will be used for this node.
:param palette | <XNodePalette> || None
"""
self._palette = XNodePalette(palette) if palette is not None else None
self.setDirty()
|
[
"def",
"setPalette",
"(",
"self",
",",
"palette",
")",
":",
"self",
".",
"_palette",
"=",
"XNodePalette",
"(",
"palette",
")",
"if",
"palette",
"is",
"not",
"None",
"else",
"None",
"self",
".",
"setDirty",
"(",
")"
] |
Sets the palette for this node to the inputed palette. If None is
provided, then the scene's palette will be used for this node.
:param palette | <XNodePalette> || None
|
[
"Sets",
"the",
"palette",
"for",
"this",
"node",
"to",
"the",
"inputed",
"palette",
".",
"If",
"None",
"is",
"provided",
"then",
"the",
"scene",
"s",
"palette",
"will",
"be",
"used",
"for",
"this",
"node",
".",
":",
"param",
"palette",
"|",
"<XNodePalette",
">",
"||",
"None"
] |
python
|
train
| 39.666667 |
googleapis/google-cloud-python
|
storage/google/cloud/storage/batch.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L304-L335
|
def _unpack_batch_response(response):
"""Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request.
"""
parser = Parser()
message = _generate_faux_mime_message(parser, response)
if not isinstance(message._payload, list):
raise ValueError("Bad response: not multi-part")
for subrequest in message._payload:
status_line, rest = subrequest._payload.split("\n", 1)
_, status, _ = status_line.split(" ", 2)
sub_message = parser.parsestr(rest)
payload = sub_message._payload
msg_headers = dict(sub_message._headers)
content_id = msg_headers.get("Content-ID")
subresponse = requests.Response()
subresponse.request = requests.Request(
method="BATCH", url="contentid://{}".format(content_id)
).prepare()
subresponse.status_code = int(status)
subresponse.headers.update(msg_headers)
subresponse._content = payload.encode("utf-8")
yield subresponse
|
[
"def",
"_unpack_batch_response",
"(",
"response",
")",
":",
"parser",
"=",
"Parser",
"(",
")",
"message",
"=",
"_generate_faux_mime_message",
"(",
"parser",
",",
"response",
")",
"if",
"not",
"isinstance",
"(",
"message",
".",
"_payload",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"Bad response: not multi-part\"",
")",
"for",
"subrequest",
"in",
"message",
".",
"_payload",
":",
"status_line",
",",
"rest",
"=",
"subrequest",
".",
"_payload",
".",
"split",
"(",
"\"\\n\"",
",",
"1",
")",
"_",
",",
"status",
",",
"_",
"=",
"status_line",
".",
"split",
"(",
"\" \"",
",",
"2",
")",
"sub_message",
"=",
"parser",
".",
"parsestr",
"(",
"rest",
")",
"payload",
"=",
"sub_message",
".",
"_payload",
"msg_headers",
"=",
"dict",
"(",
"sub_message",
".",
"_headers",
")",
"content_id",
"=",
"msg_headers",
".",
"get",
"(",
"\"Content-ID\"",
")",
"subresponse",
"=",
"requests",
".",
"Response",
"(",
")",
"subresponse",
".",
"request",
"=",
"requests",
".",
"Request",
"(",
"method",
"=",
"\"BATCH\"",
",",
"url",
"=",
"\"contentid://{}\"",
".",
"format",
"(",
"content_id",
")",
")",
".",
"prepare",
"(",
")",
"subresponse",
".",
"status_code",
"=",
"int",
"(",
"status",
")",
"subresponse",
".",
"headers",
".",
"update",
"(",
"msg_headers",
")",
"subresponse",
".",
"_content",
"=",
"payload",
".",
"encode",
"(",
"\"utf-8\"",
")",
"yield",
"subresponse"
] |
Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request.
|
[
"Convert",
"requests",
".",
"Response",
"-",
">",
"[",
"(",
"headers",
"payload",
")",
"]",
"."
] |
python
|
train
| 36.34375 |
gem/oq-engine
|
openquake/calculators/getters.py
|
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/getters.py#L433-L460
|
def gen_rupture_getters(dstore, slc=slice(None),
concurrent_tasks=1, hdf5cache=None):
"""
:yields: RuptureGetters
"""
if dstore.parent:
dstore = dstore.parent
csm_info = dstore['csm_info']
trt_by_grp = csm_info.grp_by("trt")
samples = csm_info.get_samples_by_grp()
rlzs_by_gsim = csm_info.get_rlzs_by_gsim_grp()
rup_array = dstore['ruptures'][slc]
maxweight = numpy.ceil(len(rup_array) / (concurrent_tasks or 1))
nr, ne = 0, 0
for grp_id, arr in general.group_array(rup_array, 'grp_id').items():
if not rlzs_by_gsim[grp_id]:
# this may happen if a source model has no sources, like
# in event_based_risk/case_3
continue
for block in general.block_splitter(arr, maxweight):
rgetter = RuptureGetter(
hdf5cache or dstore.filename, numpy.array(block), grp_id,
trt_by_grp[grp_id], samples[grp_id], rlzs_by_gsim[grp_id])
rgetter.weight = getattr(block, 'weight', len(block))
yield rgetter
nr += len(block)
ne += rgetter.num_events
logging.info('Read %d ruptures and %d events', nr, ne)
|
[
"def",
"gen_rupture_getters",
"(",
"dstore",
",",
"slc",
"=",
"slice",
"(",
"None",
")",
",",
"concurrent_tasks",
"=",
"1",
",",
"hdf5cache",
"=",
"None",
")",
":",
"if",
"dstore",
".",
"parent",
":",
"dstore",
"=",
"dstore",
".",
"parent",
"csm_info",
"=",
"dstore",
"[",
"'csm_info'",
"]",
"trt_by_grp",
"=",
"csm_info",
".",
"grp_by",
"(",
"\"trt\"",
")",
"samples",
"=",
"csm_info",
".",
"get_samples_by_grp",
"(",
")",
"rlzs_by_gsim",
"=",
"csm_info",
".",
"get_rlzs_by_gsim_grp",
"(",
")",
"rup_array",
"=",
"dstore",
"[",
"'ruptures'",
"]",
"[",
"slc",
"]",
"maxweight",
"=",
"numpy",
".",
"ceil",
"(",
"len",
"(",
"rup_array",
")",
"/",
"(",
"concurrent_tasks",
"or",
"1",
")",
")",
"nr",
",",
"ne",
"=",
"0",
",",
"0",
"for",
"grp_id",
",",
"arr",
"in",
"general",
".",
"group_array",
"(",
"rup_array",
",",
"'grp_id'",
")",
".",
"items",
"(",
")",
":",
"if",
"not",
"rlzs_by_gsim",
"[",
"grp_id",
"]",
":",
"# this may happen if a source model has no sources, like",
"# in event_based_risk/case_3",
"continue",
"for",
"block",
"in",
"general",
".",
"block_splitter",
"(",
"arr",
",",
"maxweight",
")",
":",
"rgetter",
"=",
"RuptureGetter",
"(",
"hdf5cache",
"or",
"dstore",
".",
"filename",
",",
"numpy",
".",
"array",
"(",
"block",
")",
",",
"grp_id",
",",
"trt_by_grp",
"[",
"grp_id",
"]",
",",
"samples",
"[",
"grp_id",
"]",
",",
"rlzs_by_gsim",
"[",
"grp_id",
"]",
")",
"rgetter",
".",
"weight",
"=",
"getattr",
"(",
"block",
",",
"'weight'",
",",
"len",
"(",
"block",
")",
")",
"yield",
"rgetter",
"nr",
"+=",
"len",
"(",
"block",
")",
"ne",
"+=",
"rgetter",
".",
"num_events",
"logging",
".",
"info",
"(",
"'Read %d ruptures and %d events'",
",",
"nr",
",",
"ne",
")"
] |
:yields: RuptureGetters
|
[
":",
"yields",
":",
"RuptureGetters"
] |
python
|
train
| 42.142857 |
twilio/authy-python
|
authy/api/resources.py
|
https://github.com/twilio/authy-python/blob/7a0073b39a56bac495b10e4b4fca3f09982de6ed/authy/api/resources.py#L543-L582
|
def __make_http_query(self, params, topkey=''):
"""
Function to covert params into url encoded query string
:param dict params: Json string sent by Authy.
:param string topkey: params key
:return string: url encoded Query.
"""
if len(params) == 0:
return ""
result = ""
# is a dictionary?
if type(params) is dict:
for key in params.keys():
newkey = quote(key)
if topkey != '':
newkey = topkey + quote('[' + key + ']')
if type(params[key]) is dict:
result += self.__make_http_query(params[key], newkey)
elif type(params[key]) is list:
i = 0
for val in params[key]:
if type(val) is dict:
result += self.__make_http_query(
val, newkey + quote('['+str(i)+']'))
else:
result += newkey + \
quote('['+str(i)+']') + "=" + \
quote(str(val)) + "&"
i = i + 1
# boolean should have special treatment as well
elif type(params[key]) is bool:
result += newkey + "=" + \
quote(str(params[key]).lower()) + "&"
# assume string (integers and floats work well)
else:
result += newkey + "=" + quote(str(params[key])) + "&"
# remove the last '&'
if (result) and (topkey == '') and (result[-1] == '&'):
result = result[:-1]
return result
|
[
"def",
"__make_http_query",
"(",
"self",
",",
"params",
",",
"topkey",
"=",
"''",
")",
":",
"if",
"len",
"(",
"params",
")",
"==",
"0",
":",
"return",
"\"\"",
"result",
"=",
"\"\"",
"# is a dictionary?",
"if",
"type",
"(",
"params",
")",
"is",
"dict",
":",
"for",
"key",
"in",
"params",
".",
"keys",
"(",
")",
":",
"newkey",
"=",
"quote",
"(",
"key",
")",
"if",
"topkey",
"!=",
"''",
":",
"newkey",
"=",
"topkey",
"+",
"quote",
"(",
"'['",
"+",
"key",
"+",
"']'",
")",
"if",
"type",
"(",
"params",
"[",
"key",
"]",
")",
"is",
"dict",
":",
"result",
"+=",
"self",
".",
"__make_http_query",
"(",
"params",
"[",
"key",
"]",
",",
"newkey",
")",
"elif",
"type",
"(",
"params",
"[",
"key",
"]",
")",
"is",
"list",
":",
"i",
"=",
"0",
"for",
"val",
"in",
"params",
"[",
"key",
"]",
":",
"if",
"type",
"(",
"val",
")",
"is",
"dict",
":",
"result",
"+=",
"self",
".",
"__make_http_query",
"(",
"val",
",",
"newkey",
"+",
"quote",
"(",
"'['",
"+",
"str",
"(",
"i",
")",
"+",
"']'",
")",
")",
"else",
":",
"result",
"+=",
"newkey",
"+",
"quote",
"(",
"'['",
"+",
"str",
"(",
"i",
")",
"+",
"']'",
")",
"+",
"\"=\"",
"+",
"quote",
"(",
"str",
"(",
"val",
")",
")",
"+",
"\"&\"",
"i",
"=",
"i",
"+",
"1",
"# boolean should have special treatment as well",
"elif",
"type",
"(",
"params",
"[",
"key",
"]",
")",
"is",
"bool",
":",
"result",
"+=",
"newkey",
"+",
"\"=\"",
"+",
"quote",
"(",
"str",
"(",
"params",
"[",
"key",
"]",
")",
".",
"lower",
"(",
")",
")",
"+",
"\"&\"",
"# assume string (integers and floats work well)",
"else",
":",
"result",
"+=",
"newkey",
"+",
"\"=\"",
"+",
"quote",
"(",
"str",
"(",
"params",
"[",
"key",
"]",
")",
")",
"+",
"\"&\"",
"# remove the last '&'",
"if",
"(",
"result",
")",
"and",
"(",
"topkey",
"==",
"''",
")",
"and",
"(",
"result",
"[",
"-",
"1",
"]",
"==",
"'&'",
")",
":",
"result",
"=",
"result",
"[",
":",
"-",
"1",
"]",
"return",
"result"
] |
Function to covert params into url encoded query string
:param dict params: Json string sent by Authy.
:param string topkey: params key
:return string: url encoded Query.
|
[
"Function",
"to",
"covert",
"params",
"into",
"url",
"encoded",
"query",
"string",
":",
"param",
"dict",
"params",
":",
"Json",
"string",
"sent",
"by",
"Authy",
".",
":",
"param",
"string",
"topkey",
":",
"params",
"key",
":",
"return",
"string",
":",
"url",
"encoded",
"Query",
"."
] |
python
|
train
| 42.8 |
synw/dataswim
|
dataswim/data/clean.py
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/clean.py#L52-L68
|
def zero_nan(self, *cols):
"""
Converts zero values to nan values in selected columns
:param \*cols: names of the colums
:type \*cols: str, at least one
:example: ``ds.zero_nan("mycol1", "mycol2")``
"""
if len(cols) == 0:
self.warning("Can not nan zero values if a column name "
"is not provided")
df = self._zero_nan(*cols)
if df is None:
self.err("Can not fill zero values with nan")
return
self.df = df
|
[
"def",
"zero_nan",
"(",
"self",
",",
"*",
"cols",
")",
":",
"if",
"len",
"(",
"cols",
")",
"==",
"0",
":",
"self",
".",
"warning",
"(",
"\"Can not nan zero values if a column name \"",
"\"is not provided\"",
")",
"df",
"=",
"self",
".",
"_zero_nan",
"(",
"*",
"cols",
")",
"if",
"df",
"is",
"None",
":",
"self",
".",
"err",
"(",
"\"Can not fill zero values with nan\"",
")",
"return",
"self",
".",
"df",
"=",
"df"
] |
Converts zero values to nan values in selected columns
:param \*cols: names of the colums
:type \*cols: str, at least one
:example: ``ds.zero_nan("mycol1", "mycol2")``
|
[
"Converts",
"zero",
"values",
"to",
"nan",
"values",
"in",
"selected",
"columns"
] |
python
|
train
| 31.294118 |
taxpon/pymesh
|
pymesh/base.py
|
https://github.com/taxpon/pymesh/blob/a90b3b2ed1408d793f3b5208dd8087b08fb7c92e/pymesh/base.py#L193-L206
|
def __calc_signed_volume(triangle):
""" Calculate signed volume of given triangle
:param list of list triangle:
:rtype float
"""
v321 = triangle[2][0] * triangle[1][1] * triangle[0][2]
v231 = triangle[1][0] * triangle[2][1] * triangle[0][2]
v312 = triangle[2][0] * triangle[0][1] * triangle[1][2]
v132 = triangle[0][0] * triangle[2][1] * triangle[1][2]
v213 = triangle[1][0] * triangle[0][1] * triangle[2][2]
v123 = triangle[0][0] * triangle[1][1] * triangle[2][2]
signed_volume = (-v321 + v231 + v312 - v132 - v213 + v123) / 6.0
return signed_volume
|
[
"def",
"__calc_signed_volume",
"(",
"triangle",
")",
":",
"v321",
"=",
"triangle",
"[",
"2",
"]",
"[",
"0",
"]",
"*",
"triangle",
"[",
"1",
"]",
"[",
"1",
"]",
"*",
"triangle",
"[",
"0",
"]",
"[",
"2",
"]",
"v231",
"=",
"triangle",
"[",
"1",
"]",
"[",
"0",
"]",
"*",
"triangle",
"[",
"2",
"]",
"[",
"1",
"]",
"*",
"triangle",
"[",
"0",
"]",
"[",
"2",
"]",
"v312",
"=",
"triangle",
"[",
"2",
"]",
"[",
"0",
"]",
"*",
"triangle",
"[",
"0",
"]",
"[",
"1",
"]",
"*",
"triangle",
"[",
"1",
"]",
"[",
"2",
"]",
"v132",
"=",
"triangle",
"[",
"0",
"]",
"[",
"0",
"]",
"*",
"triangle",
"[",
"2",
"]",
"[",
"1",
"]",
"*",
"triangle",
"[",
"1",
"]",
"[",
"2",
"]",
"v213",
"=",
"triangle",
"[",
"1",
"]",
"[",
"0",
"]",
"*",
"triangle",
"[",
"0",
"]",
"[",
"1",
"]",
"*",
"triangle",
"[",
"2",
"]",
"[",
"2",
"]",
"v123",
"=",
"triangle",
"[",
"0",
"]",
"[",
"0",
"]",
"*",
"triangle",
"[",
"1",
"]",
"[",
"1",
"]",
"*",
"triangle",
"[",
"2",
"]",
"[",
"2",
"]",
"signed_volume",
"=",
"(",
"-",
"v321",
"+",
"v231",
"+",
"v312",
"-",
"v132",
"-",
"v213",
"+",
"v123",
")",
"/",
"6.0",
"return",
"signed_volume"
] |
Calculate signed volume of given triangle
:param list of list triangle:
:rtype float
|
[
"Calculate",
"signed",
"volume",
"of",
"given",
"triangle",
":",
"param",
"list",
"of",
"list",
"triangle",
":",
":",
"rtype",
"float"
] |
python
|
train
| 45.285714 |
aouyar/PyMunin
|
pysysinfo/diskio.py
|
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L418-L430
|
def getSwapStats(self, dev):
"""Returns I/O stats for swap partition.
@param dev: Device name for swap partition.
@return: Dict of stats.
"""
if self._swapList is None:
self._initSwapInfo()
if dev in self._swapList:
return self.getDevStats(dev)
else:
return None
|
[
"def",
"getSwapStats",
"(",
"self",
",",
"dev",
")",
":",
"if",
"self",
".",
"_swapList",
"is",
"None",
":",
"self",
".",
"_initSwapInfo",
"(",
")",
"if",
"dev",
"in",
"self",
".",
"_swapList",
":",
"return",
"self",
".",
"getDevStats",
"(",
"dev",
")",
"else",
":",
"return",
"None"
] |
Returns I/O stats for swap partition.
@param dev: Device name for swap partition.
@return: Dict of stats.
|
[
"Returns",
"I",
"/",
"O",
"stats",
"for",
"swap",
"partition",
"."
] |
python
|
train
| 27.692308 |
bkabrda/flask-whooshee
|
flask_whooshee.py
|
https://github.com/bkabrda/flask-whooshee/blob/773fc51ed53043bd5e92c65eadef5663845ae8c4/flask_whooshee.py#L138-L163
|
def search(cls, search_string, values_of='', group=whoosh.qparser.OrGroup, match_substrings=True, limit=None):
"""Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
"""
index = Whooshee.get_or_create_index(_get_app(cls), cls)
prepped_string = cls.prep_search_string(search_string, match_substrings)
with index.searcher() as searcher:
parser = whoosh.qparser.MultifieldParser(cls.schema.names(), index.schema, group=group)
query = parser.parse(prepped_string)
results = searcher.search(query, limit=limit)
if values_of:
return [x[values_of] for x in results]
return results
|
[
"def",
"search",
"(",
"cls",
",",
"search_string",
",",
"values_of",
"=",
"''",
",",
"group",
"=",
"whoosh",
".",
"qparser",
".",
"OrGroup",
",",
"match_substrings",
"=",
"True",
",",
"limit",
"=",
"None",
")",
":",
"index",
"=",
"Whooshee",
".",
"get_or_create_index",
"(",
"_get_app",
"(",
"cls",
")",
",",
"cls",
")",
"prepped_string",
"=",
"cls",
".",
"prep_search_string",
"(",
"search_string",
",",
"match_substrings",
")",
"with",
"index",
".",
"searcher",
"(",
")",
"as",
"searcher",
":",
"parser",
"=",
"whoosh",
".",
"qparser",
".",
"MultifieldParser",
"(",
"cls",
".",
"schema",
".",
"names",
"(",
")",
",",
"index",
".",
"schema",
",",
"group",
"=",
"group",
")",
"query",
"=",
"parser",
".",
"parse",
"(",
"prepped_string",
")",
"results",
"=",
"searcher",
".",
"search",
"(",
"query",
",",
"limit",
"=",
"limit",
")",
"if",
"values_of",
":",
"return",
"[",
"x",
"[",
"values_of",
"]",
"for",
"x",
"in",
"results",
"]",
"return",
"results"
] |
Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
|
[
"Searches",
"the",
"fields",
"for",
"given",
"search_string",
".",
"Returns",
"the",
"found",
"records",
"if",
"values_of",
"is",
"left",
"empty",
"else",
"the",
"values",
"of",
"the",
"given",
"columns",
"."
] |
python
|
train
| 57.192308 |
hhromic/python-oslom-runner
|
oslom/runner.py
|
https://github.com/hhromic/python-oslom-runner/blob/f5991bd5014c65d0a9852641d51cbc344407d6a2/oslom/runner.py#L66-L70
|
def store_mapping(self, path):
"""Store the current Id mappings into a TSV file."""
with open(path, "w") as writer:
for key, value in self.mapping.iteritems():
writer.write("{}\t{}\n".format(key, value))
|
[
"def",
"store_mapping",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"w\"",
")",
"as",
"writer",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"mapping",
".",
"iteritems",
"(",
")",
":",
"writer",
".",
"write",
"(",
"\"{}\\t{}\\n\"",
".",
"format",
"(",
"key",
",",
"value",
")",
")"
] |
Store the current Id mappings into a TSV file.
|
[
"Store",
"the",
"current",
"Id",
"mappings",
"into",
"a",
"TSV",
"file",
"."
] |
python
|
train
| 48.6 |
divio/django-filer
|
filer/admin/folderadmin.py
|
https://github.com/divio/django-filer/blob/946629087943d41eff290f07bfdf240b8853dd88/filer/admin/folderadmin.py#L697-L792
|
def delete_files_or_folders(self, request, files_queryset, folders_queryset):
"""
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on
one of the related childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected files and/or folders and redirects back to
the folder.
"""
opts = self.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not self.has_delete_permission(request):
raise PermissionDenied
current_folder = self._get_current_action_folder(
request, files_queryset, folders_queryset)
all_protected = []
# Populate deletable_objects, a data structure of all related objects
# that will also be deleted. Hopefully this also checks for necessary
# permissions.
# TODO: Check if permissions are really verified
using = router.db_for_write(self.model)
deletable_files, model_count_files, perms_needed_files, protected_files = get_deleted_objects(files_queryset, files_queryset.model._meta, request.user, self.admin_site, using)
deletable_folders, model_count_folder, perms_needed_folders, protected_folders = get_deleted_objects(folders_queryset, folders_queryset.model._meta, request.user, self.admin_site, using)
all_protected.extend(protected_files)
all_protected.extend(protected_folders)
all_deletable_objects = [deletable_files, deletable_folders]
all_perms_needed = perms_needed_files.union(perms_needed_folders)
# The user has already confirmed the deletion. Do the deletion and
# return a None to display the change list view again.
if request.POST.get('post'):
if all_perms_needed:
raise PermissionDenied
n = files_queryset.count() + folders_queryset.count()
if n:
# delete all explicitly selected files
for f in files_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all files in all selected folders and their children
# This would happen automatically by ways of the delete
# cascade, but then the individual .delete() methods won't be
# called and the files won't be deleted from the filesystem.
folder_ids = set()
for folder in folders_queryset:
folder_ids.add(folder.id)
folder_ids.update(
folder.get_descendants().values_list('id', flat=True))
for f in File.objects.filter(folder__in=folder_ids):
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all folders
for f in folders_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
self.message_user(request, _("Successfully deleted %(count)d files and/or folders.") % {"count": n, })
# Return None to display the change list page again.
return None
if all_perms_needed or all_protected:
title = _("Cannot delete files and/or folders")
else:
title = _("Are you sure?")
context = self.admin_site.each_context(request)
context.update({
"title": title,
"instance": current_folder,
"breadcrumbs_action": _("Delete files and/or folders"),
"deletable_objects": all_deletable_objects,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": all_perms_needed,
"protected": all_protected,
"opts": opts,
'is_popup': popup_status(request),
'filer_admin_context': AdminContext(request),
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(
request,
"admin/filer/delete_selected_files_confirmation.html",
context
)
|
[
"def",
"delete_files_or_folders",
"(",
"self",
",",
"request",
",",
"files_queryset",
",",
"folders_queryset",
")",
":",
"opts",
"=",
"self",
".",
"model",
".",
"_meta",
"app_label",
"=",
"opts",
".",
"app_label",
"# Check that the user has delete permission for the actual model",
"if",
"not",
"self",
".",
"has_delete_permission",
"(",
"request",
")",
":",
"raise",
"PermissionDenied",
"current_folder",
"=",
"self",
".",
"_get_current_action_folder",
"(",
"request",
",",
"files_queryset",
",",
"folders_queryset",
")",
"all_protected",
"=",
"[",
"]",
"# Populate deletable_objects, a data structure of all related objects",
"# that will also be deleted. Hopefully this also checks for necessary",
"# permissions.",
"# TODO: Check if permissions are really verified",
"using",
"=",
"router",
".",
"db_for_write",
"(",
"self",
".",
"model",
")",
"deletable_files",
",",
"model_count_files",
",",
"perms_needed_files",
",",
"protected_files",
"=",
"get_deleted_objects",
"(",
"files_queryset",
",",
"files_queryset",
".",
"model",
".",
"_meta",
",",
"request",
".",
"user",
",",
"self",
".",
"admin_site",
",",
"using",
")",
"deletable_folders",
",",
"model_count_folder",
",",
"perms_needed_folders",
",",
"protected_folders",
"=",
"get_deleted_objects",
"(",
"folders_queryset",
",",
"folders_queryset",
".",
"model",
".",
"_meta",
",",
"request",
".",
"user",
",",
"self",
".",
"admin_site",
",",
"using",
")",
"all_protected",
".",
"extend",
"(",
"protected_files",
")",
"all_protected",
".",
"extend",
"(",
"protected_folders",
")",
"all_deletable_objects",
"=",
"[",
"deletable_files",
",",
"deletable_folders",
"]",
"all_perms_needed",
"=",
"perms_needed_files",
".",
"union",
"(",
"perms_needed_folders",
")",
"# The user has already confirmed the deletion. Do the deletion and",
"# return a None to display the change list view again.",
"if",
"request",
".",
"POST",
".",
"get",
"(",
"'post'",
")",
":",
"if",
"all_perms_needed",
":",
"raise",
"PermissionDenied",
"n",
"=",
"files_queryset",
".",
"count",
"(",
")",
"+",
"folders_queryset",
".",
"count",
"(",
")",
"if",
"n",
":",
"# delete all explicitly selected files",
"for",
"f",
"in",
"files_queryset",
":",
"self",
".",
"log_deletion",
"(",
"request",
",",
"f",
",",
"force_text",
"(",
"f",
")",
")",
"f",
".",
"delete",
"(",
")",
"# delete all files in all selected folders and their children",
"# This would happen automatically by ways of the delete",
"# cascade, but then the individual .delete() methods won't be",
"# called and the files won't be deleted from the filesystem.",
"folder_ids",
"=",
"set",
"(",
")",
"for",
"folder",
"in",
"folders_queryset",
":",
"folder_ids",
".",
"add",
"(",
"folder",
".",
"id",
")",
"folder_ids",
".",
"update",
"(",
"folder",
".",
"get_descendants",
"(",
")",
".",
"values_list",
"(",
"'id'",
",",
"flat",
"=",
"True",
")",
")",
"for",
"f",
"in",
"File",
".",
"objects",
".",
"filter",
"(",
"folder__in",
"=",
"folder_ids",
")",
":",
"self",
".",
"log_deletion",
"(",
"request",
",",
"f",
",",
"force_text",
"(",
"f",
")",
")",
"f",
".",
"delete",
"(",
")",
"# delete all folders",
"for",
"f",
"in",
"folders_queryset",
":",
"self",
".",
"log_deletion",
"(",
"request",
",",
"f",
",",
"force_text",
"(",
"f",
")",
")",
"f",
".",
"delete",
"(",
")",
"self",
".",
"message_user",
"(",
"request",
",",
"_",
"(",
"\"Successfully deleted %(count)d files and/or folders.\"",
")",
"%",
"{",
"\"count\"",
":",
"n",
",",
"}",
")",
"# Return None to display the change list page again.",
"return",
"None",
"if",
"all_perms_needed",
"or",
"all_protected",
":",
"title",
"=",
"_",
"(",
"\"Cannot delete files and/or folders\"",
")",
"else",
":",
"title",
"=",
"_",
"(",
"\"Are you sure?\"",
")",
"context",
"=",
"self",
".",
"admin_site",
".",
"each_context",
"(",
"request",
")",
"context",
".",
"update",
"(",
"{",
"\"title\"",
":",
"title",
",",
"\"instance\"",
":",
"current_folder",
",",
"\"breadcrumbs_action\"",
":",
"_",
"(",
"\"Delete files and/or folders\"",
")",
",",
"\"deletable_objects\"",
":",
"all_deletable_objects",
",",
"\"files_queryset\"",
":",
"files_queryset",
",",
"\"folders_queryset\"",
":",
"folders_queryset",
",",
"\"perms_lacking\"",
":",
"all_perms_needed",
",",
"\"protected\"",
":",
"all_protected",
",",
"\"opts\"",
":",
"opts",
",",
"'is_popup'",
":",
"popup_status",
"(",
"request",
")",
",",
"'filer_admin_context'",
":",
"AdminContext",
"(",
"request",
")",
",",
"\"root_path\"",
":",
"reverse",
"(",
"'admin:index'",
")",
",",
"\"app_label\"",
":",
"app_label",
",",
"\"action_checkbox_name\"",
":",
"helpers",
".",
"ACTION_CHECKBOX_NAME",
",",
"}",
")",
"# Display the destination folder selection page",
"return",
"render",
"(",
"request",
",",
"\"admin/filer/delete_selected_files_confirmation.html\"",
",",
"context",
")"
] |
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on
one of the related childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected files and/or folders and redirects back to
the folder.
|
[
"Action",
"which",
"deletes",
"the",
"selected",
"files",
"and",
"/",
"or",
"folders",
"."
] |
python
|
train
| 45.958333 |
pyca/pyopenssl
|
src/OpenSSL/crypto.py
|
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L2569-L2579
|
def b64_encode(self):
"""
Generate a base64 encoded representation of this SPKI object.
:return: The base64 encoded string.
:rtype: :py:class:`bytes`
"""
encoded = _lib.NETSCAPE_SPKI_b64_encode(self._spki)
result = _ffi.string(encoded)
_lib.OPENSSL_free(encoded)
return result
|
[
"def",
"b64_encode",
"(",
"self",
")",
":",
"encoded",
"=",
"_lib",
".",
"NETSCAPE_SPKI_b64_encode",
"(",
"self",
".",
"_spki",
")",
"result",
"=",
"_ffi",
".",
"string",
"(",
"encoded",
")",
"_lib",
".",
"OPENSSL_free",
"(",
"encoded",
")",
"return",
"result"
] |
Generate a base64 encoded representation of this SPKI object.
:return: The base64 encoded string.
:rtype: :py:class:`bytes`
|
[
"Generate",
"a",
"base64",
"encoded",
"representation",
"of",
"this",
"SPKI",
"object",
"."
] |
python
|
test
| 30.818182 |
mathiasertl/xmpp-backends
|
xmpp_backends/django/models.py
|
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/django/models.py#L39-L47
|
def set_password(self, raw_password):
"""Calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` for the user.
If password is ``None``, calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_unusable_password`.
"""
if raw_password is None:
self.set_unusable_password()
else:
xmpp_backend.set_password(self.node, self.domain, raw_password)
|
[
"def",
"set_password",
"(",
"self",
",",
"raw_password",
")",
":",
"if",
"raw_password",
"is",
"None",
":",
"self",
".",
"set_unusable_password",
"(",
")",
"else",
":",
"xmpp_backend",
".",
"set_password",
"(",
"self",
".",
"node",
",",
"self",
".",
"domain",
",",
"raw_password",
")"
] |
Calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` for the user.
If password is ``None``, calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_unusable_password`.
|
[
"Calls",
":",
"py",
":",
"func",
":",
"~xmpp_backends",
".",
"base",
".",
"XmppBackendBase",
".",
"set_password",
"for",
"the",
"user",
"."
] |
python
|
train
| 45.111111 |
zsimic/runez
|
src/runez/logsetup.py
|
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/logsetup.py#L383-L401
|
def enable_faulthandler(cls, signum=signal.SIGUSR1):
"""
Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable)
"""
with cls._lock:
if not signum:
cls._disable_faulthandler()
return
if not cls.file_handler or faulthandler is None:
return
cls.faulthandler_signum = signum
dump_file = cls.file_handler.stream
faulthandler.enable(file=dump_file, all_threads=True)
faulthandler.register(signum, file=dump_file, all_threads=True, chain=False)
|
[
"def",
"enable_faulthandler",
"(",
"cls",
",",
"signum",
"=",
"signal",
".",
"SIGUSR1",
")",
":",
"with",
"cls",
".",
"_lock",
":",
"if",
"not",
"signum",
":",
"cls",
".",
"_disable_faulthandler",
"(",
")",
"return",
"if",
"not",
"cls",
".",
"file_handler",
"or",
"faulthandler",
"is",
"None",
":",
"return",
"cls",
".",
"faulthandler_signum",
"=",
"signum",
"dump_file",
"=",
"cls",
".",
"file_handler",
".",
"stream",
"faulthandler",
".",
"enable",
"(",
"file",
"=",
"dump_file",
",",
"all_threads",
"=",
"True",
")",
"faulthandler",
".",
"register",
"(",
"signum",
",",
"file",
"=",
"dump_file",
",",
"all_threads",
"=",
"True",
",",
"chain",
"=",
"False",
")"
] |
Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable)
|
[
"Enable",
"dumping",
"thread",
"stack",
"traces",
"when",
"specified",
"signals",
"are",
"received",
"similar",
"to",
"java",
"s",
"handling",
"of",
"SIGQUIT"
] |
python
|
train
| 47.631579 |
mdgoldberg/sportsref
|
sportsref/nfl/teams.py
|
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nfl/teams.py#L337-L349
|
def def_alignment(self, year):
"""Returns the name of the defensive alignment the team ran in the
given year.
:year: Int representing the season year.
:returns: A string representing the defensive alignment.
"""
scheme_text = self._year_info_pq(year, 'Defensive Alignment').text()
m = re.search(r'Defensive Alignment[:\s]*(.+)\s*', scheme_text, re.I)
if m:
return m.group(1)
else:
return None
|
[
"def",
"def_alignment",
"(",
"self",
",",
"year",
")",
":",
"scheme_text",
"=",
"self",
".",
"_year_info_pq",
"(",
"year",
",",
"'Defensive Alignment'",
")",
".",
"text",
"(",
")",
"m",
"=",
"re",
".",
"search",
"(",
"r'Defensive Alignment[:\\s]*(.+)\\s*'",
",",
"scheme_text",
",",
"re",
".",
"I",
")",
"if",
"m",
":",
"return",
"m",
".",
"group",
"(",
"1",
")",
"else",
":",
"return",
"None"
] |
Returns the name of the defensive alignment the team ran in the
given year.
:year: Int representing the season year.
:returns: A string representing the defensive alignment.
|
[
"Returns",
"the",
"name",
"of",
"the",
"defensive",
"alignment",
"the",
"team",
"ran",
"in",
"the",
"given",
"year",
"."
] |
python
|
test
| 36.692308 |
GPflow/GPflow
|
gpflow/training/natgrad_optimizer.py
|
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/training/natgrad_optimizer.py#L386-L398
|
def _inverse_lower_triangular(M):
"""
Take inverse of lower triangular (e.g. Cholesky) matrix. This function
broadcasts over the first index.
:param M: Tensor with lower triangular structure of shape DxNxN
:return: The inverse of the Cholesky decomposition. Same shape as input.
"""
if M.get_shape().ndims != 3: # pragma: no cover
raise ValueError("Number of dimensions for input is required to be 3.")
D, N = tf.shape(M)[0], tf.shape(M)[1]
I_DNN = tf.eye(N, dtype=M.dtype)[None, :, :] * tf.ones((D, 1, 1), dtype=M.dtype)
return tf.matrix_triangular_solve(M, I_DNN)
|
[
"def",
"_inverse_lower_triangular",
"(",
"M",
")",
":",
"if",
"M",
".",
"get_shape",
"(",
")",
".",
"ndims",
"!=",
"3",
":",
"# pragma: no cover",
"raise",
"ValueError",
"(",
"\"Number of dimensions for input is required to be 3.\"",
")",
"D",
",",
"N",
"=",
"tf",
".",
"shape",
"(",
"M",
")",
"[",
"0",
"]",
",",
"tf",
".",
"shape",
"(",
"M",
")",
"[",
"1",
"]",
"I_DNN",
"=",
"tf",
".",
"eye",
"(",
"N",
",",
"dtype",
"=",
"M",
".",
"dtype",
")",
"[",
"None",
",",
":",
",",
":",
"]",
"*",
"tf",
".",
"ones",
"(",
"(",
"D",
",",
"1",
",",
"1",
")",
",",
"dtype",
"=",
"M",
".",
"dtype",
")",
"return",
"tf",
".",
"matrix_triangular_solve",
"(",
"M",
",",
"I_DNN",
")"
] |
Take inverse of lower triangular (e.g. Cholesky) matrix. This function
broadcasts over the first index.
:param M: Tensor with lower triangular structure of shape DxNxN
:return: The inverse of the Cholesky decomposition. Same shape as input.
|
[
"Take",
"inverse",
"of",
"lower",
"triangular",
"(",
"e",
".",
"g",
".",
"Cholesky",
")",
"matrix",
".",
"This",
"function",
"broadcasts",
"over",
"the",
"first",
"index",
"."
] |
python
|
train
| 46.384615 |
benoitkugler/abstractDataLibrary
|
pyDLib/Core/groups.py
|
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/groups.py#L135-L143
|
def extend(self, collection):
"""Merges collections. Ensure uniqueness of ids"""
l_ids = set([a.Id for a in self])
for acces in collection:
if not acces.Id in l_ids:
list.append(self,acces)
info = collection.get_info(Id=acces.Id)
if info:
self.infos[acces.Id] = info
|
[
"def",
"extend",
"(",
"self",
",",
"collection",
")",
":",
"l_ids",
"=",
"set",
"(",
"[",
"a",
".",
"Id",
"for",
"a",
"in",
"self",
"]",
")",
"for",
"acces",
"in",
"collection",
":",
"if",
"not",
"acces",
".",
"Id",
"in",
"l_ids",
":",
"list",
".",
"append",
"(",
"self",
",",
"acces",
")",
"info",
"=",
"collection",
".",
"get_info",
"(",
"Id",
"=",
"acces",
".",
"Id",
")",
"if",
"info",
":",
"self",
".",
"infos",
"[",
"acces",
".",
"Id",
"]",
"=",
"info"
] |
Merges collections. Ensure uniqueness of ids
|
[
"Merges",
"collections",
".",
"Ensure",
"uniqueness",
"of",
"ids"
] |
python
|
train
| 40.222222 |
mwgielen/jackal
|
jackal/utils.py
|
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/utils.py#L85-L189
|
def draw_interface(objects, callback, callback_text):
"""
Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868
"""
screen = curses.initscr()
height, width = screen.getmaxyx()
curses.noecho()
curses.cbreak()
curses.start_color()
screen.keypad( 1 )
curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_CYAN)
highlightText = curses.color_pair( 1 )
normalText = curses.A_NORMAL
screen.border( 0 )
curses.curs_set( 0 )
max_row = height - 15 # max number of rows
box = curses.newwin( max_row + 2, int(width - 2), 1, 1 )
box.box()
fmt = PartialFormatter()
row_num = len( objects )
pages = int( ceil( row_num / max_row ) )
position = 1
page = 1
for i in range( 1, max_row + 1 ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if (i == position):
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
while x != 27:
if x == curses.KEY_DOWN:
if page == 1:
if position < i:
position = position + 1
else:
if pages > 1:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
elif page == pages:
if position < row_num:
position = position + 1
else:
if position < max_row + ( max_row * ( page - 1 ) ):
position = position + 1
else:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
if x == curses.KEY_UP:
if page == 1:
if position > 1:
position = position - 1
else:
if position > ( 1 + ( max_row * ( page - 1 ) ) ):
position = position - 1
else:
page = page - 1
position = max_row + ( max_row * ( page - 1 ) )
screen.erase()
if x == ord( "\n" ) and row_num != 0:
screen.erase()
screen.border( 0 )
service = objects[position -1]
text = fmt.format(callback_text, **service)
screen.addstr( max_row + 4, 3, text)
text = callback(service)
count = 0
for line in text:
screen.addstr( max_row + 5 + count, 3, line)
count += 1
box.erase()
screen.border( 0 )
box.border( 0 )
for i in range( 1 + ( max_row * ( page - 1 ) ), max_row + 1 + ( max_row * ( page - 1 ) ) ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if ( i + ( max_row * ( page - 1 ) ) == position + ( max_row * ( page - 1 ) ) ):
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
curses.endwin()
exit()
|
[
"def",
"draw_interface",
"(",
"objects",
",",
"callback",
",",
"callback_text",
")",
":",
"screen",
"=",
"curses",
".",
"initscr",
"(",
")",
"height",
",",
"width",
"=",
"screen",
".",
"getmaxyx",
"(",
")",
"curses",
".",
"noecho",
"(",
")",
"curses",
".",
"cbreak",
"(",
")",
"curses",
".",
"start_color",
"(",
")",
"screen",
".",
"keypad",
"(",
"1",
")",
"curses",
".",
"init_pair",
"(",
"1",
",",
"curses",
".",
"COLOR_BLACK",
",",
"curses",
".",
"COLOR_CYAN",
")",
"highlightText",
"=",
"curses",
".",
"color_pair",
"(",
"1",
")",
"normalText",
"=",
"curses",
".",
"A_NORMAL",
"screen",
".",
"border",
"(",
"0",
")",
"curses",
".",
"curs_set",
"(",
"0",
")",
"max_row",
"=",
"height",
"-",
"15",
"# max number of rows",
"box",
"=",
"curses",
".",
"newwin",
"(",
"max_row",
"+",
"2",
",",
"int",
"(",
"width",
"-",
"2",
")",
",",
"1",
",",
"1",
")",
"box",
".",
"box",
"(",
")",
"fmt",
"=",
"PartialFormatter",
"(",
")",
"row_num",
"=",
"len",
"(",
"objects",
")",
"pages",
"=",
"int",
"(",
"ceil",
"(",
"row_num",
"/",
"max_row",
")",
")",
"position",
"=",
"1",
"page",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"max_row",
"+",
"1",
")",
":",
"if",
"row_num",
"==",
"0",
":",
"box",
".",
"addstr",
"(",
"1",
",",
"1",
",",
"\"There aren't strings\"",
",",
"highlightText",
")",
"else",
":",
"if",
"(",
"i",
"==",
"position",
")",
":",
"box",
".",
"addstr",
"(",
"i",
",",
"2",
",",
"str",
"(",
"i",
")",
"+",
"\" - \"",
"+",
"objects",
"[",
"i",
"-",
"1",
"]",
"[",
"'string'",
"]",
",",
"highlightText",
")",
"else",
":",
"box",
".",
"addstr",
"(",
"i",
",",
"2",
",",
"str",
"(",
"i",
")",
"+",
"\" - \"",
"+",
"objects",
"[",
"i",
"-",
"1",
"]",
"[",
"'string'",
"]",
",",
"normalText",
")",
"if",
"i",
"==",
"row_num",
":",
"break",
"screen",
".",
"refresh",
"(",
")",
"box",
".",
"refresh",
"(",
")",
"x",
"=",
"screen",
".",
"getch",
"(",
")",
"while",
"x",
"!=",
"27",
":",
"if",
"x",
"==",
"curses",
".",
"KEY_DOWN",
":",
"if",
"page",
"==",
"1",
":",
"if",
"position",
"<",
"i",
":",
"position",
"=",
"position",
"+",
"1",
"else",
":",
"if",
"pages",
">",
"1",
":",
"page",
"=",
"page",
"+",
"1",
"position",
"=",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
"elif",
"page",
"==",
"pages",
":",
"if",
"position",
"<",
"row_num",
":",
"position",
"=",
"position",
"+",
"1",
"else",
":",
"if",
"position",
"<",
"max_row",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
":",
"position",
"=",
"position",
"+",
"1",
"else",
":",
"page",
"=",
"page",
"+",
"1",
"position",
"=",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
"if",
"x",
"==",
"curses",
".",
"KEY_UP",
":",
"if",
"page",
"==",
"1",
":",
"if",
"position",
">",
"1",
":",
"position",
"=",
"position",
"-",
"1",
"else",
":",
"if",
"position",
">",
"(",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
")",
":",
"position",
"=",
"position",
"-",
"1",
"else",
":",
"page",
"=",
"page",
"-",
"1",
"position",
"=",
"max_row",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
"screen",
".",
"erase",
"(",
")",
"if",
"x",
"==",
"ord",
"(",
"\"\\n\"",
")",
"and",
"row_num",
"!=",
"0",
":",
"screen",
".",
"erase",
"(",
")",
"screen",
".",
"border",
"(",
"0",
")",
"service",
"=",
"objects",
"[",
"position",
"-",
"1",
"]",
"text",
"=",
"fmt",
".",
"format",
"(",
"callback_text",
",",
"*",
"*",
"service",
")",
"screen",
".",
"addstr",
"(",
"max_row",
"+",
"4",
",",
"3",
",",
"text",
")",
"text",
"=",
"callback",
"(",
"service",
")",
"count",
"=",
"0",
"for",
"line",
"in",
"text",
":",
"screen",
".",
"addstr",
"(",
"max_row",
"+",
"5",
"+",
"count",
",",
"3",
",",
"line",
")",
"count",
"+=",
"1",
"box",
".",
"erase",
"(",
")",
"screen",
".",
"border",
"(",
"0",
")",
"box",
".",
"border",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
",",
"max_row",
"+",
"1",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
")",
":",
"if",
"row_num",
"==",
"0",
":",
"box",
".",
"addstr",
"(",
"1",
",",
"1",
",",
"\"There aren't strings\"",
",",
"highlightText",
")",
"else",
":",
"if",
"(",
"i",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
"==",
"position",
"+",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
")",
":",
"box",
".",
"addstr",
"(",
"i",
"-",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
",",
"2",
",",
"str",
"(",
"i",
")",
"+",
"\" - \"",
"+",
"objects",
"[",
"i",
"-",
"1",
"]",
"[",
"'string'",
"]",
",",
"highlightText",
")",
"else",
":",
"box",
".",
"addstr",
"(",
"i",
"-",
"(",
"max_row",
"*",
"(",
"page",
"-",
"1",
")",
")",
",",
"2",
",",
"str",
"(",
"i",
")",
"+",
"\" - \"",
"+",
"objects",
"[",
"i",
"-",
"1",
"]",
"[",
"'string'",
"]",
",",
"normalText",
")",
"if",
"i",
"==",
"row_num",
":",
"break",
"screen",
".",
"refresh",
"(",
")",
"box",
".",
"refresh",
"(",
")",
"x",
"=",
"screen",
".",
"getch",
"(",
")",
"curses",
".",
"endwin",
"(",
")",
"exit",
"(",
")"
] |
Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868
|
[
"Draws",
"a",
"ncurses",
"interface",
".",
"Based",
"on",
"the",
"given",
"object",
"list",
"every",
"object",
"should",
"have",
"a",
"string",
"key",
"this",
"is",
"whats",
"displayed",
"on",
"the",
"screen",
"callback",
"is",
"called",
"with",
"the",
"selected",
"object",
".",
"Rest",
"of",
"the",
"code",
"is",
"modified",
"from",
":",
"https",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"a",
"/",
"30834868"
] |
python
|
valid
| 35.304762 |
rochacbruno/flask_simplelogin
|
example/manage.py
|
https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/example/manage.py#L23-L41
|
def create_user(**data):
"""Creates user with encrypted password"""
if 'username' not in data or 'password' not in data:
raise ValueError('username and password are required.')
# Hash the user password
data['password'] = generate_password_hash(
data.pop('password'),
method='pbkdf2:sha256'
)
# Here you insert the `data` in your users database
# for this simple example we are recording in a json file
db_users = json.load(open('users.json'))
# add the new created user to json
db_users[data['username']] = data
# commit changes to database
json.dump(db_users, open('users.json', 'w'))
return data
|
[
"def",
"create_user",
"(",
"*",
"*",
"data",
")",
":",
"if",
"'username'",
"not",
"in",
"data",
"or",
"'password'",
"not",
"in",
"data",
":",
"raise",
"ValueError",
"(",
"'username and password are required.'",
")",
"# Hash the user password",
"data",
"[",
"'password'",
"]",
"=",
"generate_password_hash",
"(",
"data",
".",
"pop",
"(",
"'password'",
")",
",",
"method",
"=",
"'pbkdf2:sha256'",
")",
"# Here you insert the `data` in your users database",
"# for this simple example we are recording in a json file",
"db_users",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"'users.json'",
")",
")",
"# add the new created user to json",
"db_users",
"[",
"data",
"[",
"'username'",
"]",
"]",
"=",
"data",
"# commit changes to database",
"json",
".",
"dump",
"(",
"db_users",
",",
"open",
"(",
"'users.json'",
",",
"'w'",
")",
")",
"return",
"data"
] |
Creates user with encrypted password
|
[
"Creates",
"user",
"with",
"encrypted",
"password"
] |
python
|
train
| 34.578947 |
edoburu/django-tag-parser
|
tag_parser/basetags.py
|
https://github.com/edoburu/django-tag-parser/blob/c24256cfdd0248434f2e3df3444ed9f945d4181f/tag_parser/basetags.py#L272-L299
|
def get_context(self, parent_context, data):
"""
Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context`
"""
if django.VERSION >= (1, 8):
new_context = parent_context.new(data)
else:
settings = {
'autoescape': parent_context.autoescape,
'current_app': parent_context.current_app,
'use_l10n': parent_context.use_l10n,
'use_tz': parent_context.use_tz,
}
new_context = Context(data, **settings)
# Pass CSRF token for same reasons as @register.inclusion_tag does.
csrf_token = parent_context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return new_context
|
[
"def",
"get_context",
"(",
"self",
",",
"parent_context",
",",
"data",
")",
":",
"if",
"django",
".",
"VERSION",
">=",
"(",
"1",
",",
"8",
")",
":",
"new_context",
"=",
"parent_context",
".",
"new",
"(",
"data",
")",
"else",
":",
"settings",
"=",
"{",
"'autoescape'",
":",
"parent_context",
".",
"autoescape",
",",
"'current_app'",
":",
"parent_context",
".",
"current_app",
",",
"'use_l10n'",
":",
"parent_context",
".",
"use_l10n",
",",
"'use_tz'",
":",
"parent_context",
".",
"use_tz",
",",
"}",
"new_context",
"=",
"Context",
"(",
"data",
",",
"*",
"*",
"settings",
")",
"# Pass CSRF token for same reasons as @register.inclusion_tag does.",
"csrf_token",
"=",
"parent_context",
".",
"get",
"(",
"'csrf_token'",
",",
"None",
")",
"if",
"csrf_token",
"is",
"not",
"None",
":",
"new_context",
"[",
"'csrf_token'",
"]",
"=",
"csrf_token",
"return",
"new_context"
] |
Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context`
|
[
"Wrap",
"the",
"context",
"data",
"in",
"a",
":",
"class",
":",
"~django",
".",
"template",
".",
"Context",
"object",
"."
] |
python
|
test
| 38.571429 |
jd/tenacity
|
tenacity/compat.py
|
https://github.com/jd/tenacity/blob/354c40b7dc8e728c438668100dd020b65c84dfc6/tenacity/compat.py#L120-L136
|
def stop_func_accept_retry_state(stop_func):
"""Wrap "stop" function to accept "retry_state" parameter."""
if not six.callable(stop_func):
return stop_func
if func_takes_retry_state(stop_func):
return stop_func
@_utils.wraps(stop_func)
def wrapped_stop_func(retry_state):
warn_about_non_retry_state_deprecation(
'stop', stop_func, stacklevel=4)
return stop_func(
retry_state.attempt_number,
retry_state.seconds_since_start,
)
return wrapped_stop_func
|
[
"def",
"stop_func_accept_retry_state",
"(",
"stop_func",
")",
":",
"if",
"not",
"six",
".",
"callable",
"(",
"stop_func",
")",
":",
"return",
"stop_func",
"if",
"func_takes_retry_state",
"(",
"stop_func",
")",
":",
"return",
"stop_func",
"@",
"_utils",
".",
"wraps",
"(",
"stop_func",
")",
"def",
"wrapped_stop_func",
"(",
"retry_state",
")",
":",
"warn_about_non_retry_state_deprecation",
"(",
"'stop'",
",",
"stop_func",
",",
"stacklevel",
"=",
"4",
")",
"return",
"stop_func",
"(",
"retry_state",
".",
"attempt_number",
",",
"retry_state",
".",
"seconds_since_start",
",",
")",
"return",
"wrapped_stop_func"
] |
Wrap "stop" function to accept "retry_state" parameter.
|
[
"Wrap",
"stop",
"function",
"to",
"accept",
"retry_state",
"parameter",
"."
] |
python
|
train
| 31.529412 |
Falkonry/falkonry-python-client
|
falkonryclient/service/http.py
|
https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/http.py#L197-L215
|
def delete(self, url):
"""
To make a DELETE request to Falkonry API server
:param url: string
"""
response = requests.delete(
self.host + url,
headers={
'Authorization': 'Bearer ' + self.token,
'x-falkonry-source':self.sourceHeader
},
verify=False
)
if response.status_code == 204:
return None
elif response.status_code == 401:
raise Exception(json.dumps({'message':'Unauthorized Access'}))
else:
raise Exception(response.content)
|
[
"def",
"delete",
"(",
"self",
",",
"url",
")",
":",
"response",
"=",
"requests",
".",
"delete",
"(",
"self",
".",
"host",
"+",
"url",
",",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Bearer '",
"+",
"self",
".",
"token",
",",
"'x-falkonry-source'",
":",
"self",
".",
"sourceHeader",
"}",
",",
"verify",
"=",
"False",
")",
"if",
"response",
".",
"status_code",
"==",
"204",
":",
"return",
"None",
"elif",
"response",
".",
"status_code",
"==",
"401",
":",
"raise",
"Exception",
"(",
"json",
".",
"dumps",
"(",
"{",
"'message'",
":",
"'Unauthorized Access'",
"}",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"response",
".",
"content",
")"
] |
To make a DELETE request to Falkonry API server
:param url: string
|
[
"To",
"make",
"a",
"DELETE",
"request",
"to",
"Falkonry",
"API",
"server",
":",
"param",
"url",
":",
"string"
] |
python
|
train
| 31.368421 |
angr/claripy
|
claripy/vsa/strided_interval.py
|
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/vsa/strided_interval.py#L507-L542
|
def _nsplit(self):
"""
Split `self` at the north pole, which is the same as in signed arithmetic.
:return: A list of split StridedIntervals
"""
north_pole_left = self.max_int(self.bits - 1) # 01111...1
north_pole_right = 2 ** (self.bits - 1) # 1000...0
# Is `self` straddling the north pole?
straddling = False
if self.upper_bound >= north_pole_right:
if self.lower_bound > self.upper_bound:
# Yes it does!
straddling = True
elif self.lower_bound <= north_pole_left:
straddling = True
else:
if self.lower_bound > self.upper_bound and self.lower_bound <= north_pole_left:
straddling = True
if straddling:
a_upper_bound = north_pole_left - ((north_pole_left - self.lower_bound) % self.stride)
a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound,
upper_bound=a_upper_bound, uninitialized=self.uninitialized)
b_lower_bound = a_upper_bound + self.stride
b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound,
upper_bound=self.upper_bound, uninitialized=self.uninitialized)
return [ a, b ]
else:
return [ self.copy() ]
|
[
"def",
"_nsplit",
"(",
"self",
")",
":",
"north_pole_left",
"=",
"self",
".",
"max_int",
"(",
"self",
".",
"bits",
"-",
"1",
")",
"# 01111...1",
"north_pole_right",
"=",
"2",
"**",
"(",
"self",
".",
"bits",
"-",
"1",
")",
"# 1000...0",
"# Is `self` straddling the north pole?",
"straddling",
"=",
"False",
"if",
"self",
".",
"upper_bound",
">=",
"north_pole_right",
":",
"if",
"self",
".",
"lower_bound",
">",
"self",
".",
"upper_bound",
":",
"# Yes it does!",
"straddling",
"=",
"True",
"elif",
"self",
".",
"lower_bound",
"<=",
"north_pole_left",
":",
"straddling",
"=",
"True",
"else",
":",
"if",
"self",
".",
"lower_bound",
">",
"self",
".",
"upper_bound",
"and",
"self",
".",
"lower_bound",
"<=",
"north_pole_left",
":",
"straddling",
"=",
"True",
"if",
"straddling",
":",
"a_upper_bound",
"=",
"north_pole_left",
"-",
"(",
"(",
"north_pole_left",
"-",
"self",
".",
"lower_bound",
")",
"%",
"self",
".",
"stride",
")",
"a",
"=",
"StridedInterval",
"(",
"bits",
"=",
"self",
".",
"bits",
",",
"stride",
"=",
"self",
".",
"stride",
",",
"lower_bound",
"=",
"self",
".",
"lower_bound",
",",
"upper_bound",
"=",
"a_upper_bound",
",",
"uninitialized",
"=",
"self",
".",
"uninitialized",
")",
"b_lower_bound",
"=",
"a_upper_bound",
"+",
"self",
".",
"stride",
"b",
"=",
"StridedInterval",
"(",
"bits",
"=",
"self",
".",
"bits",
",",
"stride",
"=",
"self",
".",
"stride",
",",
"lower_bound",
"=",
"b_lower_bound",
",",
"upper_bound",
"=",
"self",
".",
"upper_bound",
",",
"uninitialized",
"=",
"self",
".",
"uninitialized",
")",
"return",
"[",
"a",
",",
"b",
"]",
"else",
":",
"return",
"[",
"self",
".",
"copy",
"(",
")",
"]"
] |
Split `self` at the north pole, which is the same as in signed arithmetic.
:return: A list of split StridedIntervals
|
[
"Split",
"self",
"at",
"the",
"north",
"pole",
"which",
"is",
"the",
"same",
"as",
"in",
"signed",
"arithmetic",
"."
] |
python
|
train
| 38.277778 |
SmokinCaterpillar/pypet
|
pypet/pypetlogging.py
|
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/pypetlogging.py#L375-L393
|
def show_progress(self, n, total_runs):
"""Displays a progressbar"""
if self.report_progress:
percentage, logger_name, log_level = self.report_progress
if logger_name == 'print':
logger = 'print'
else:
logger = logging.getLogger(logger_name)
if n == -1:
# Compute the number of digits and avoid log10(0)
digits = int(math.log10(total_runs + 0.1)) + 1
self._format_string = 'PROGRESS: Finished %' + '%d' % digits + 'd/%d runs '
fmt_string = self._format_string % (n + 1, total_runs) + '%s'
reprint = log_level == 0
progressbar(n, total_runs, percentage_step=percentage,
logger=logger, log_level=log_level,
fmt_string=fmt_string, reprint=reprint)
|
[
"def",
"show_progress",
"(",
"self",
",",
"n",
",",
"total_runs",
")",
":",
"if",
"self",
".",
"report_progress",
":",
"percentage",
",",
"logger_name",
",",
"log_level",
"=",
"self",
".",
"report_progress",
"if",
"logger_name",
"==",
"'print'",
":",
"logger",
"=",
"'print'",
"else",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"logger_name",
")",
"if",
"n",
"==",
"-",
"1",
":",
"# Compute the number of digits and avoid log10(0)",
"digits",
"=",
"int",
"(",
"math",
".",
"log10",
"(",
"total_runs",
"+",
"0.1",
")",
")",
"+",
"1",
"self",
".",
"_format_string",
"=",
"'PROGRESS: Finished %'",
"+",
"'%d'",
"%",
"digits",
"+",
"'d/%d runs '",
"fmt_string",
"=",
"self",
".",
"_format_string",
"%",
"(",
"n",
"+",
"1",
",",
"total_runs",
")",
"+",
"'%s'",
"reprint",
"=",
"log_level",
"==",
"0",
"progressbar",
"(",
"n",
",",
"total_runs",
",",
"percentage_step",
"=",
"percentage",
",",
"logger",
"=",
"logger",
",",
"log_level",
"=",
"log_level",
",",
"fmt_string",
"=",
"fmt_string",
",",
"reprint",
"=",
"reprint",
")"
] |
Displays a progressbar
|
[
"Displays",
"a",
"progressbar"
] |
python
|
test
| 45.052632 |
senaite/senaite.core
|
bika/lims/content/batch.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/batch.py#L355-L362
|
def getAnalysisRequestsBrains(self, **kwargs):
"""Return all the Analysis Requests brains linked to the Batch
kargs are passed directly to the catalog.
"""
kwargs['getBatchUID'] = self.UID()
catalog = getToolByName(self, CATALOG_ANALYSIS_REQUEST_LISTING)
brains = catalog(kwargs)
return brains
|
[
"def",
"getAnalysisRequestsBrains",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'getBatchUID'",
"]",
"=",
"self",
".",
"UID",
"(",
")",
"catalog",
"=",
"getToolByName",
"(",
"self",
",",
"CATALOG_ANALYSIS_REQUEST_LISTING",
")",
"brains",
"=",
"catalog",
"(",
"kwargs",
")",
"return",
"brains"
] |
Return all the Analysis Requests brains linked to the Batch
kargs are passed directly to the catalog.
|
[
"Return",
"all",
"the",
"Analysis",
"Requests",
"brains",
"linked",
"to",
"the",
"Batch",
"kargs",
"are",
"passed",
"directly",
"to",
"the",
"catalog",
"."
] |
python
|
train
| 42.75 |
senaite/senaite.core
|
bika/lims/utils/analysis.py
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/utils/analysis.py#L303-L373
|
def format_numeric_result(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:result: should be a string to preserve the decimal precision.
:returns: the formatted result as string
"""
try:
result = float(result)
except ValueError:
return result
# continuing with 'nan' result will cause formatting to fail.
if math.isnan(result):
return result
# Scientific notation?
# Get the default precision for scientific notation
threshold = analysis.getExponentialFormatPrecision()
precision = analysis.getPrecision(result)
formatted = _format_decimal_or_sci(result, precision, threshold, sciformat)
return formatDecimalMark(formatted, decimalmark)
|
[
"def",
"format_numeric_result",
"(",
"analysis",
",",
"result",
",",
"decimalmark",
"=",
"'.'",
",",
"sciformat",
"=",
"1",
")",
":",
"try",
":",
"result",
"=",
"float",
"(",
"result",
")",
"except",
"ValueError",
":",
"return",
"result",
"# continuing with 'nan' result will cause formatting to fail.",
"if",
"math",
".",
"isnan",
"(",
"result",
")",
":",
"return",
"result",
"# Scientific notation?",
"# Get the default precision for scientific notation",
"threshold",
"=",
"analysis",
".",
"getExponentialFormatPrecision",
"(",
")",
"precision",
"=",
"analysis",
".",
"getPrecision",
"(",
"result",
")",
"formatted",
"=",
"_format_decimal_or_sci",
"(",
"result",
",",
"precision",
",",
"threshold",
",",
"sciformat",
")",
"return",
"formatDecimalMark",
"(",
"formatted",
",",
"decimalmark",
")"
] |
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:result: should be a string to preserve the decimal precision.
:returns: the formatted result as string
|
[
"Returns",
"the",
"formatted",
"number",
"part",
"of",
"a",
"results",
"value",
".",
"This",
"is",
"responsible",
"for",
"deciding",
"the",
"precision",
"and",
"notation",
"of",
"numeric",
"values",
"in",
"accordance",
"to",
"the",
"uncertainty",
".",
"If",
"a",
"non",
"-",
"numeric",
"result",
"value",
"is",
"given",
"the",
"value",
"will",
"be",
"returned",
"unchanged",
"."
] |
python
|
train
| 42.521127 |
inveniosoftware/invenio-github
|
invenio_github/api.py
|
https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/api.py#L133-L185
|
def sync(self, hooks=True, async_hooks=True):
"""Synchronize user repositories.
:param bool hooks: True for syncing hooks.
:param bool async_hooks: True for sending of an asynchronous task to
sync hooks.
.. note::
Syncing happens from GitHub's direction only. This means that we
consider the information on GitHub as valid, and we overwrite our
own state based on this information.
"""
active_repos = {}
github_repos = {repo.id: repo for repo in self.api.repositories()
if repo.permissions['admin']}
for gh_repo_id, gh_repo in github_repos.items():
active_repos[gh_repo_id] = {
'id': gh_repo_id,
'full_name': gh_repo.full_name,
'description': gh_repo.description,
}
if hooks:
self._sync_hooks(list(active_repos.keys()),
asynchronous=async_hooks)
# Update changed names for repositories stored in DB
db_repos = Repository.query.filter(
Repository.user_id == self.user_id,
Repository.github_id.in_(github_repos.keys())
)
for repo in db_repos:
gh_repo = github_repos.get(repo.github_id)
if gh_repo and repo.name != gh_repo.full_name:
repo.name = gh_repo.full_name
db.session.add(repo)
# Remove ownership from repositories that the user has no longer
# 'admin' permissions, or have been deleted.
Repository.query.filter(
Repository.user_id == self.user_id,
~Repository.github_id.in_(github_repos.keys())
).update(dict(user_id=None, hook=None), synchronize_session=False)
# Update repos and last sync
self.account.extra_data.update(dict(
repos=active_repos,
last_sync=iso_utcnow(),
))
self.account.extra_data.changed()
db.session.add(self.account)
|
[
"def",
"sync",
"(",
"self",
",",
"hooks",
"=",
"True",
",",
"async_hooks",
"=",
"True",
")",
":",
"active_repos",
"=",
"{",
"}",
"github_repos",
"=",
"{",
"repo",
".",
"id",
":",
"repo",
"for",
"repo",
"in",
"self",
".",
"api",
".",
"repositories",
"(",
")",
"if",
"repo",
".",
"permissions",
"[",
"'admin'",
"]",
"}",
"for",
"gh_repo_id",
",",
"gh_repo",
"in",
"github_repos",
".",
"items",
"(",
")",
":",
"active_repos",
"[",
"gh_repo_id",
"]",
"=",
"{",
"'id'",
":",
"gh_repo_id",
",",
"'full_name'",
":",
"gh_repo",
".",
"full_name",
",",
"'description'",
":",
"gh_repo",
".",
"description",
",",
"}",
"if",
"hooks",
":",
"self",
".",
"_sync_hooks",
"(",
"list",
"(",
"active_repos",
".",
"keys",
"(",
")",
")",
",",
"asynchronous",
"=",
"async_hooks",
")",
"# Update changed names for repositories stored in DB",
"db_repos",
"=",
"Repository",
".",
"query",
".",
"filter",
"(",
"Repository",
".",
"user_id",
"==",
"self",
".",
"user_id",
",",
"Repository",
".",
"github_id",
".",
"in_",
"(",
"github_repos",
".",
"keys",
"(",
")",
")",
")",
"for",
"repo",
"in",
"db_repos",
":",
"gh_repo",
"=",
"github_repos",
".",
"get",
"(",
"repo",
".",
"github_id",
")",
"if",
"gh_repo",
"and",
"repo",
".",
"name",
"!=",
"gh_repo",
".",
"full_name",
":",
"repo",
".",
"name",
"=",
"gh_repo",
".",
"full_name",
"db",
".",
"session",
".",
"add",
"(",
"repo",
")",
"# Remove ownership from repositories that the user has no longer",
"# 'admin' permissions, or have been deleted.",
"Repository",
".",
"query",
".",
"filter",
"(",
"Repository",
".",
"user_id",
"==",
"self",
".",
"user_id",
",",
"~",
"Repository",
".",
"github_id",
".",
"in_",
"(",
"github_repos",
".",
"keys",
"(",
")",
")",
")",
".",
"update",
"(",
"dict",
"(",
"user_id",
"=",
"None",
",",
"hook",
"=",
"None",
")",
",",
"synchronize_session",
"=",
"False",
")",
"# Update repos and last sync",
"self",
".",
"account",
".",
"extra_data",
".",
"update",
"(",
"dict",
"(",
"repos",
"=",
"active_repos",
",",
"last_sync",
"=",
"iso_utcnow",
"(",
")",
",",
")",
")",
"self",
".",
"account",
".",
"extra_data",
".",
"changed",
"(",
")",
"db",
".",
"session",
".",
"add",
"(",
"self",
".",
"account",
")"
] |
Synchronize user repositories.
:param bool hooks: True for syncing hooks.
:param bool async_hooks: True for sending of an asynchronous task to
sync hooks.
.. note::
Syncing happens from GitHub's direction only. This means that we
consider the information on GitHub as valid, and we overwrite our
own state based on this information.
|
[
"Synchronize",
"user",
"repositories",
"."
] |
python
|
train
| 37.886792 |
HydraChain/hydrachain
|
hydrachain/examples/native/fungible/fungible_contract.py
|
https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/examples/native/fungible/fungible_contract.py#L49-L60
|
def transfer(ctx, _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
"""
log.DEV('In Fungible.transfer')
if ctx.accounts[ctx.msg_sender] >= _value:
ctx.accounts[ctx.msg_sender] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(ctx.msg_sender, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS
|
[
"def",
"transfer",
"(",
"ctx",
",",
"_to",
"=",
"'address'",
",",
"_value",
"=",
"'uint256'",
",",
"returns",
"=",
"STATUS",
")",
":",
"log",
".",
"DEV",
"(",
"'In Fungible.transfer'",
")",
"if",
"ctx",
".",
"accounts",
"[",
"ctx",
".",
"msg_sender",
"]",
">=",
"_value",
":",
"ctx",
".",
"accounts",
"[",
"ctx",
".",
"msg_sender",
"]",
"-=",
"_value",
"ctx",
".",
"accounts",
"[",
"_to",
"]",
"+=",
"_value",
"ctx",
".",
"Transfer",
"(",
"ctx",
".",
"msg_sender",
",",
"_to",
",",
"_value",
")",
"return",
"OK",
"else",
":",
"return",
"INSUFFICIENTFUNDS"
] |
Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
|
[
"Standardized",
"Contract",
"API",
":",
"function",
"transfer",
"(",
"address",
"_to",
"uint256",
"_value",
")",
"returns",
"(",
"bool",
"_success",
")"
] |
python
|
test
| 41.25 |
elliterate/capybara.py
|
capybara/node/matchers.py
|
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/node/matchers.py#L784-L798
|
def has_unchecked_field(self, locator, **kwargs):
"""
Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently unchecked.
Args:
locator (str): The label, name, or id of an unchecked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists.
"""
kwargs["checked"] = False
return self.has_selector("field", locator, **kwargs)
|
[
"def",
"has_unchecked_field",
"(",
"self",
",",
"locator",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"checked\"",
"]",
"=",
"False",
"return",
"self",
".",
"has_selector",
"(",
"\"field\"",
",",
"locator",
",",
"*",
"*",
"kwargs",
")"
] |
Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently unchecked.
Args:
locator (str): The label, name, or id of an unchecked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists.
|
[
"Checks",
"if",
"the",
"page",
"or",
"current",
"node",
"has",
"a",
"radio",
"button",
"or",
"checkbox",
"with",
"the",
"given",
"label",
"value",
"or",
"id",
"that",
"is",
"currently",
"unchecked",
"."
] |
python
|
test
| 34.866667 |
tyarkoni/pliers
|
pliers/diagnostics/diagnostics.py
|
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/diagnostics/diagnostics.py#L49-L60
|
def variance_inflation_factors(df):
'''
Computes the variance inflation factor (VIF) for each column in the df.
Returns a pandas Series of VIFs
Args:
df: pandas DataFrame with columns to run diagnostics on
'''
corr = np.corrcoef(df, rowvar=0)
corr_inv = np.linalg.inv(corr)
vifs = np.diagonal(corr_inv)
return pd.Series(vifs, df.columns, name='VIF')
|
[
"def",
"variance_inflation_factors",
"(",
"df",
")",
":",
"corr",
"=",
"np",
".",
"corrcoef",
"(",
"df",
",",
"rowvar",
"=",
"0",
")",
"corr_inv",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"corr",
")",
"vifs",
"=",
"np",
".",
"diagonal",
"(",
"corr_inv",
")",
"return",
"pd",
".",
"Series",
"(",
"vifs",
",",
"df",
".",
"columns",
",",
"name",
"=",
"'VIF'",
")"
] |
Computes the variance inflation factor (VIF) for each column in the df.
Returns a pandas Series of VIFs
Args:
df: pandas DataFrame with columns to run diagnostics on
|
[
"Computes",
"the",
"variance",
"inflation",
"factor",
"(",
"VIF",
")",
"for",
"each",
"column",
"in",
"the",
"df",
".",
"Returns",
"a",
"pandas",
"Series",
"of",
"VIFs"
] |
python
|
train
| 31.916667 |
log2timeline/plaso
|
plaso/parsers/systemd_journal.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/systemd_journal.py#L131-L164
|
def _ParseEntryArrayObject(self, file_object, file_offset):
"""Parses an entry array object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry array object relative to the start
of the file-like object.
Returns:
systemd_journal_entry_array_object: entry array object.
Raises:
ParseError: if the entry array object cannot be parsed.
"""
entry_array_object_map = self._GetDataTypeMap(
'systemd_journal_entry_array_object')
try:
entry_array_object, _ = self._ReadStructureFromFileObject(
file_object, file_offset, entry_array_object_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse entry array object at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY:
raise errors.ParseError('Unsupported object type: {0:d}.'.format(
entry_array_object.object_type))
if entry_array_object.object_flags != 0:
raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(
entry_array_object.object_flags))
return entry_array_object
|
[
"def",
"_ParseEntryArrayObject",
"(",
"self",
",",
"file_object",
",",
"file_offset",
")",
":",
"entry_array_object_map",
"=",
"self",
".",
"_GetDataTypeMap",
"(",
"'systemd_journal_entry_array_object'",
")",
"try",
":",
"entry_array_object",
",",
"_",
"=",
"self",
".",
"_ReadStructureFromFileObject",
"(",
"file_object",
",",
"file_offset",
",",
"entry_array_object_map",
")",
"except",
"(",
"ValueError",
",",
"errors",
".",
"ParseError",
")",
"as",
"exception",
":",
"raise",
"errors",
".",
"ParseError",
"(",
"(",
"'Unable to parse entry array object at offset: 0x{0:08x} with error: '",
"'{1!s}'",
")",
".",
"format",
"(",
"file_offset",
",",
"exception",
")",
")",
"if",
"entry_array_object",
".",
"object_type",
"!=",
"self",
".",
"_OBJECT_TYPE_ENTRY_ARRAY",
":",
"raise",
"errors",
".",
"ParseError",
"(",
"'Unsupported object type: {0:d}.'",
".",
"format",
"(",
"entry_array_object",
".",
"object_type",
")",
")",
"if",
"entry_array_object",
".",
"object_flags",
"!=",
"0",
":",
"raise",
"errors",
".",
"ParseError",
"(",
"'Unsupported object flags: 0x{0:02x}.'",
".",
"format",
"(",
"entry_array_object",
".",
"object_flags",
")",
")",
"return",
"entry_array_object"
] |
Parses an entry array object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry array object relative to the start
of the file-like object.
Returns:
systemd_journal_entry_array_object: entry array object.
Raises:
ParseError: if the entry array object cannot be parsed.
|
[
"Parses",
"an",
"entry",
"array",
"object",
"."
] |
python
|
train
| 36.5 |
Crunch-io/crunch-cube
|
src/cr/cube/crunch_cube.py
|
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/crunch_cube.py#L1445-L1449
|
def weighted_n(self):
"""float count of returned rows adjusted for weighting."""
if not self.is_weighted:
return float(self.unweighted_n)
return float(sum(self._cube_dict["result"]["measures"]["count"]["data"]))
|
[
"def",
"weighted_n",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_weighted",
":",
"return",
"float",
"(",
"self",
".",
"unweighted_n",
")",
"return",
"float",
"(",
"sum",
"(",
"self",
".",
"_cube_dict",
"[",
"\"result\"",
"]",
"[",
"\"measures\"",
"]",
"[",
"\"count\"",
"]",
"[",
"\"data\"",
"]",
")",
")"
] |
float count of returned rows adjusted for weighting.
|
[
"float",
"count",
"of",
"returned",
"rows",
"adjusted",
"for",
"weighting",
"."
] |
python
|
train
| 48.6 |
ray-project/ray
|
python/ray/function_manager.py
|
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L455-L489
|
def get_execution_info(self, driver_id, function_descriptor):
"""Get the FunctionExecutionInfo of a remote function.
Args:
driver_id: ID of the driver that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object.
"""
if self._worker.load_code_from_local:
# Load function from local code.
# Currently, we don't support isolating code by drivers,
# thus always set driver ID to NIL here.
driver_id = ray.DriverID.nil()
if not function_descriptor.is_actor_method():
self._load_function_from_local(driver_id, function_descriptor)
else:
# Load function from GCS.
# Wait until the function to be executed has actually been
# registered on this worker. We will push warnings to the user if
# we spend too long in this loop.
# The driver function may not be found in sys.path. Try to load
# the function from GCS.
with profiling.profile("wait_for_function"):
self._wait_for_function(function_descriptor, driver_id)
try:
function_id = function_descriptor.function_id
info = self._function_execution_info[driver_id][function_id]
except KeyError as e:
message = ("Error occurs in get_execution_info: "
"driver_id: %s, function_descriptor: %s. Message: %s" %
(driver_id, function_descriptor, e))
raise KeyError(message)
return info
|
[
"def",
"get_execution_info",
"(",
"self",
",",
"driver_id",
",",
"function_descriptor",
")",
":",
"if",
"self",
".",
"_worker",
".",
"load_code_from_local",
":",
"# Load function from local code.",
"# Currently, we don't support isolating code by drivers,",
"# thus always set driver ID to NIL here.",
"driver_id",
"=",
"ray",
".",
"DriverID",
".",
"nil",
"(",
")",
"if",
"not",
"function_descriptor",
".",
"is_actor_method",
"(",
")",
":",
"self",
".",
"_load_function_from_local",
"(",
"driver_id",
",",
"function_descriptor",
")",
"else",
":",
"# Load function from GCS.",
"# Wait until the function to be executed has actually been",
"# registered on this worker. We will push warnings to the user if",
"# we spend too long in this loop.",
"# The driver function may not be found in sys.path. Try to load",
"# the function from GCS.",
"with",
"profiling",
".",
"profile",
"(",
"\"wait_for_function\"",
")",
":",
"self",
".",
"_wait_for_function",
"(",
"function_descriptor",
",",
"driver_id",
")",
"try",
":",
"function_id",
"=",
"function_descriptor",
".",
"function_id",
"info",
"=",
"self",
".",
"_function_execution_info",
"[",
"driver_id",
"]",
"[",
"function_id",
"]",
"except",
"KeyError",
"as",
"e",
":",
"message",
"=",
"(",
"\"Error occurs in get_execution_info: \"",
"\"driver_id: %s, function_descriptor: %s. Message: %s\"",
"%",
"(",
"driver_id",
",",
"function_descriptor",
",",
"e",
")",
")",
"raise",
"KeyError",
"(",
"message",
")",
"return",
"info"
] |
Get the FunctionExecutionInfo of a remote function.
Args:
driver_id: ID of the driver that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object.
|
[
"Get",
"the",
"FunctionExecutionInfo",
"of",
"a",
"remote",
"function",
"."
] |
python
|
train
| 46.914286 |
tarzanjw/python-mysql-binlog-to-blinker
|
mysqlbinlog2blinker/__init__.py
|
https://github.com/tarzanjw/python-mysql-binlog-to-blinker/blob/d61ab5962345377e142a225b16f731ab4196fc26/mysqlbinlog2blinker/__init__.py#L56-L90
|
def start_replication(mysql_settings,
binlog_pos_memory=(None, 2),
**kwargs):
""" Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
"""
if not isinstance(binlog_pos_memory, _bpm.BaseBinlogPosMemory):
if not isinstance(binlog_pos_memory, (tuple, list)):
raise ValueError('Invalid binlog position memory: %s'
% binlog_pos_memory)
binlog_pos_memory = _bpm.FileBasedBinlogPosMemory(*binlog_pos_memory)
mysql_settings.setdefault('connect_timeout', 5)
kwargs.setdefault('blocking', True)
kwargs.setdefault('resume_stream', True)
with binlog_pos_memory:
kwargs.setdefault('log_file', binlog_pos_memory.log_file)
kwargs.setdefault('log_pos', binlog_pos_memory.log_pos)
_logger.info('Start replication from %s with:\n%s'
% (mysql_settings, kwargs))
start_publishing(mysql_settings, **kwargs)
|
[
"def",
"start_replication",
"(",
"mysql_settings",
",",
"binlog_pos_memory",
"=",
"(",
"None",
",",
"2",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"binlog_pos_memory",
",",
"_bpm",
".",
"BaseBinlogPosMemory",
")",
":",
"if",
"not",
"isinstance",
"(",
"binlog_pos_memory",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid binlog position memory: %s'",
"%",
"binlog_pos_memory",
")",
"binlog_pos_memory",
"=",
"_bpm",
".",
"FileBasedBinlogPosMemory",
"(",
"*",
"binlog_pos_memory",
")",
"mysql_settings",
".",
"setdefault",
"(",
"'connect_timeout'",
",",
"5",
")",
"kwargs",
".",
"setdefault",
"(",
"'blocking'",
",",
"True",
")",
"kwargs",
".",
"setdefault",
"(",
"'resume_stream'",
",",
"True",
")",
"with",
"binlog_pos_memory",
":",
"kwargs",
".",
"setdefault",
"(",
"'log_file'",
",",
"binlog_pos_memory",
".",
"log_file",
")",
"kwargs",
".",
"setdefault",
"(",
"'log_pos'",
",",
"binlog_pos_memory",
".",
"log_pos",
")",
"_logger",
".",
"info",
"(",
"'Start replication from %s with:\\n%s'",
"%",
"(",
"mysql_settings",
",",
"kwargs",
")",
")",
"start_publishing",
"(",
"mysql_settings",
",",
"*",
"*",
"kwargs",
")"
] |
Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
|
[
"Start",
"replication",
"on",
"server",
"specified",
"by",
"*",
"mysql_settings",
"*"
] |
python
|
train
| 46.342857 |
tableau/document-api-python
|
tableaudocumentapi/xfile.py
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/xfile.py#L58-L74
|
def find_file_in_zip(zip_file):
'''Returns the twb/tds file from a Tableau packaged file format. Packaged
files can contain cache entries which are also valid XML, so only look for
files with a .tds or .twb extension.
'''
candidate_files = filter(lambda x: x.split('.')[-1] in ('twb', 'tds'),
zip_file.namelist())
for filename in candidate_files:
with zip_file.open(filename) as xml_candidate:
try:
ET.parse(xml_candidate)
return filename
except ET.ParseError:
# That's not an XML file by gosh
pass
|
[
"def",
"find_file_in_zip",
"(",
"zip_file",
")",
":",
"candidate_files",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"in",
"(",
"'twb'",
",",
"'tds'",
")",
",",
"zip_file",
".",
"namelist",
"(",
")",
")",
"for",
"filename",
"in",
"candidate_files",
":",
"with",
"zip_file",
".",
"open",
"(",
"filename",
")",
"as",
"xml_candidate",
":",
"try",
":",
"ET",
".",
"parse",
"(",
"xml_candidate",
")",
"return",
"filename",
"except",
"ET",
".",
"ParseError",
":",
"# That's not an XML file by gosh",
"pass"
] |
Returns the twb/tds file from a Tableau packaged file format. Packaged
files can contain cache entries which are also valid XML, so only look for
files with a .tds or .twb extension.
|
[
"Returns",
"the",
"twb",
"/",
"tds",
"file",
"from",
"a",
"Tableau",
"packaged",
"file",
"format",
".",
"Packaged",
"files",
"can",
"contain",
"cache",
"entries",
"which",
"are",
"also",
"valid",
"XML",
"so",
"only",
"look",
"for",
"files",
"with",
"a",
".",
"tds",
"or",
".",
"twb",
"extension",
"."
] |
python
|
train
| 37.235294 |
awslabs/aws-sam-cli
|
samcli/local/lambdafn/runtime.py
|
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/lambdafn/runtime.py#L118-L149
|
def _configure_interrupt(self, function_name, timeout, container, is_debugging):
"""
When a Lambda function is executing, we setup certain interrupt handlers to stop the execution.
Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though,
we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container.
:param string function_name: Name of the function we are running
:param integer timeout: Timeout in seconds
:param samcli.local.docker.container.Container container: Instance of a container to terminate
:param bool is_debugging: Are we debugging?
:return threading.Timer: Timer object, if we setup a timer. None otherwise
"""
def timer_handler():
# NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures
LOG.info("Function '%s' timed out after %d seconds", function_name, timeout)
self._container_manager.stop(container)
def signal_handler(sig, frame):
# NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures
LOG.info("Execution of function %s was interrupted", function_name)
self._container_manager.stop(container)
if is_debugging:
LOG.debug("Setting up SIGTERM interrupt handler")
signal.signal(signal.SIGTERM, signal_handler)
else:
# Start a timer, we'll use this to abort the function if it runs beyond the specified timeout
LOG.debug("Starting a timer for %s seconds for function '%s'", timeout, function_name)
timer = threading.Timer(timeout, timer_handler, ())
timer.start()
return timer
|
[
"def",
"_configure_interrupt",
"(",
"self",
",",
"function_name",
",",
"timeout",
",",
"container",
",",
"is_debugging",
")",
":",
"def",
"timer_handler",
"(",
")",
":",
"# NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures",
"LOG",
".",
"info",
"(",
"\"Function '%s' timed out after %d seconds\"",
",",
"function_name",
",",
"timeout",
")",
"self",
".",
"_container_manager",
".",
"stop",
"(",
"container",
")",
"def",
"signal_handler",
"(",
"sig",
",",
"frame",
")",
":",
"# NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures",
"LOG",
".",
"info",
"(",
"\"Execution of function %s was interrupted\"",
",",
"function_name",
")",
"self",
".",
"_container_manager",
".",
"stop",
"(",
"container",
")",
"if",
"is_debugging",
":",
"LOG",
".",
"debug",
"(",
"\"Setting up SIGTERM interrupt handler\"",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"signal_handler",
")",
"else",
":",
"# Start a timer, we'll use this to abort the function if it runs beyond the specified timeout",
"LOG",
".",
"debug",
"(",
"\"Starting a timer for %s seconds for function '%s'\"",
",",
"timeout",
",",
"function_name",
")",
"timer",
"=",
"threading",
".",
"Timer",
"(",
"timeout",
",",
"timer_handler",
",",
"(",
")",
")",
"timer",
".",
"start",
"(",
")",
"return",
"timer"
] |
When a Lambda function is executing, we setup certain interrupt handlers to stop the execution.
Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though,
we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container.
:param string function_name: Name of the function we are running
:param integer timeout: Timeout in seconds
:param samcli.local.docker.container.Container container: Instance of a container to terminate
:param bool is_debugging: Are we debugging?
:return threading.Timer: Timer object, if we setup a timer. None otherwise
|
[
"When",
"a",
"Lambda",
"function",
"is",
"executing",
"we",
"setup",
"certain",
"interrupt",
"handlers",
"to",
"stop",
"the",
"execution",
".",
"Usually",
"we",
"setup",
"a",
"function",
"timeout",
"interrupt",
"to",
"kill",
"the",
"container",
"after",
"timeout",
"expires",
".",
"If",
"debugging",
"though",
"we",
"don",
"t",
"enforce",
"a",
"timeout",
".",
"But",
"we",
"setup",
"a",
"SIGINT",
"interrupt",
"to",
"catch",
"Ctrl",
"+",
"C",
"and",
"terminate",
"the",
"container",
"."
] |
python
|
train
| 57.375 |
rigetti/pyquil
|
pyquil/device.py
|
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/device.py#L255-L263
|
def fCPHASEs(self):
"""
Get a dictionary of CPHASE fidelities (normalized to unity) from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CPHASE fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float]
"""
return {tuple(es.targets): es.fCPHASE for es in self.edges_specs}
|
[
"def",
"fCPHASEs",
"(",
"self",
")",
":",
"return",
"{",
"tuple",
"(",
"es",
".",
"targets",
")",
":",
"es",
".",
"fCPHASE",
"for",
"es",
"in",
"self",
".",
"edges_specs",
"}"
] |
Get a dictionary of CPHASE fidelities (normalized to unity) from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CPHASE fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float]
|
[
"Get",
"a",
"dictionary",
"of",
"CPHASE",
"fidelities",
"(",
"normalized",
"to",
"unity",
")",
"from",
"the",
"specs",
"keyed",
"by",
"targets",
"(",
"qubit",
"-",
"qubit",
"pairs",
")",
"."
] |
python
|
train
| 39.777778 |
ThreatConnect-Inc/tcex
|
tcex/tcex.py
|
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex.py#L427-L433
|
def error_codes(self):
"""ThreatConnect error codes."""
if self._error_codes is None:
from .tcex_error_codes import TcExErrorCodes
self._error_codes = TcExErrorCodes()
return self._error_codes
|
[
"def",
"error_codes",
"(",
"self",
")",
":",
"if",
"self",
".",
"_error_codes",
"is",
"None",
":",
"from",
".",
"tcex_error_codes",
"import",
"TcExErrorCodes",
"self",
".",
"_error_codes",
"=",
"TcExErrorCodes",
"(",
")",
"return",
"self",
".",
"_error_codes"
] |
ThreatConnect error codes.
|
[
"ThreatConnect",
"error",
"codes",
"."
] |
python
|
train
| 33.571429 |
apache/airflow
|
airflow/contrib/operators/cassandra_to_gcs.py
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/cassandra_to_gcs.py#L258-L266
|
def convert_tuple_type(cls, name, value):
"""
Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elements defined in cassandra.
"""
names = ['field_' + str(i) for i in range(len(value))]
values = [cls.convert_value(name, value) for name, value in zip(names, value)]
return cls.generate_data_dict(names, values)
|
[
"def",
"convert_tuple_type",
"(",
"cls",
",",
"name",
",",
"value",
")",
":",
"names",
"=",
"[",
"'field_'",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"value",
")",
")",
"]",
"values",
"=",
"[",
"cls",
".",
"convert_value",
"(",
"name",
",",
"value",
")",
"for",
"name",
",",
"value",
"in",
"zip",
"(",
"names",
",",
"value",
")",
"]",
"return",
"cls",
".",
"generate_data_dict",
"(",
"names",
",",
"values",
")"
] |
Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elements defined in cassandra.
|
[
"Converts",
"a",
"tuple",
"to",
"RECORD",
"that",
"contains",
"n",
"fields",
"each",
"will",
"be",
"converted",
"to",
"its",
"corresponding",
"data",
"type",
"in",
"bq",
"and",
"will",
"be",
"named",
"field_<index",
">",
"where",
"index",
"is",
"determined",
"by",
"the",
"order",
"of",
"the",
"tuple",
"elements",
"defined",
"in",
"cassandra",
"."
] |
python
|
test
| 57 |
GNS3/gns3-server
|
gns3server/compute/vmware/__init__.py
|
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/vmware/__init__.py#L144-L164
|
def _check_vmware_player_requirements(self, player_version):
"""
Check minimum requirements to use VMware Player.
VIX 1.13 was the release for Player 6.
VIX 1.14 was the release for Player 7.
VIX 1.15 was the release for Workstation Player 12.
:param player_version: VMware Player major version.
"""
player_version = int(player_version)
if player_version < 6:
raise VMwareError("Using VMware Player requires version 6 or above")
elif player_version == 6:
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
elif player_version == 7:
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
elif player_version >= 12:
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
self._host_type = "player"
|
[
"def",
"_check_vmware_player_requirements",
"(",
"self",
",",
"player_version",
")",
":",
"player_version",
"=",
"int",
"(",
"player_version",
")",
"if",
"player_version",
"<",
"6",
":",
"raise",
"VMwareError",
"(",
"\"Using VMware Player requires version 6 or above\"",
")",
"elif",
"player_version",
"==",
"6",
":",
"yield",
"from",
"self",
".",
"check_vmrun_version",
"(",
"minimum_required_version",
"=",
"\"1.13.0\"",
")",
"elif",
"player_version",
"==",
"7",
":",
"yield",
"from",
"self",
".",
"check_vmrun_version",
"(",
"minimum_required_version",
"=",
"\"1.14.0\"",
")",
"elif",
"player_version",
">=",
"12",
":",
"yield",
"from",
"self",
".",
"check_vmrun_version",
"(",
"minimum_required_version",
"=",
"\"1.15.0\"",
")",
"self",
".",
"_host_type",
"=",
"\"player\""
] |
Check minimum requirements to use VMware Player.
VIX 1.13 was the release for Player 6.
VIX 1.14 was the release for Player 7.
VIX 1.15 was the release for Workstation Player 12.
:param player_version: VMware Player major version.
|
[
"Check",
"minimum",
"requirements",
"to",
"use",
"VMware",
"Player",
"."
] |
python
|
train
| 42 |
ellmetha/django-machina
|
machina/apps/forum_moderation/views.py
|
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_moderation/views.py#L454-L456
|
def post(self, request, *args, **kwargs):
""" Handles POST requests. """
return self.disapprove(request, *args, **kwargs)
|
[
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"disapprove",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Handles POST requests.
|
[
"Handles",
"POST",
"requests",
"."
] |
python
|
train
| 45 |
noirbizarre/django.js
|
setup.py
|
https://github.com/noirbizarre/django.js/blob/65b267b04ffc0f969b9f8e2f8ce2f922397c8af1/setup.py#L22-L32
|
def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
'''
content = codecs.open(filename, encoding='utf-8').read()
for regex, replacement in PYPI_RST_FILTERS:
content = re.sub(regex, replacement, content)
return content
|
[
"def",
"rst",
"(",
"filename",
")",
":",
"content",
"=",
"codecs",
".",
"open",
"(",
"filename",
",",
"encoding",
"=",
"'utf-8'",
")",
".",
"read",
"(",
")",
"for",
"regex",
",",
"replacement",
"in",
"PYPI_RST_FILTERS",
":",
"content",
"=",
"re",
".",
"sub",
"(",
"regex",
",",
"replacement",
",",
"content",
")",
"return",
"content"
] |
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
|
[
"Load",
"rst",
"file",
"and",
"sanitize",
"it",
"for",
"PyPI",
".",
"Remove",
"unsupported",
"github",
"tags",
":",
"-",
"code",
"-",
"block",
"directive",
"-",
"travis",
"ci",
"build",
"badge"
] |
python
|
train
| 31.181818 |
assamite/creamas
|
creamas/mp.py
|
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L692-L702
|
async def _get_smallest_env(self):
"""Get address of the slave environment manager with the smallest
number of agents.
"""
async def slave_task(mgr_addr):
r_manager = await self.env.connect(mgr_addr, timeout=TIMEOUT)
ret = await r_manager.get_agents(addr=True)
return mgr_addr, len(ret)
sizes = await create_tasks(slave_task, self.addrs, flatten=False)
return sorted(sizes, key=lambda x: x[1])[0][0]
|
[
"async",
"def",
"_get_smallest_env",
"(",
"self",
")",
":",
"async",
"def",
"slave_task",
"(",
"mgr_addr",
")",
":",
"r_manager",
"=",
"await",
"self",
".",
"env",
".",
"connect",
"(",
"mgr_addr",
",",
"timeout",
"=",
"TIMEOUT",
")",
"ret",
"=",
"await",
"r_manager",
".",
"get_agents",
"(",
"addr",
"=",
"True",
")",
"return",
"mgr_addr",
",",
"len",
"(",
"ret",
")",
"sizes",
"=",
"await",
"create_tasks",
"(",
"slave_task",
",",
"self",
".",
"addrs",
",",
"flatten",
"=",
"False",
")",
"return",
"sorted",
"(",
"sizes",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]"
] |
Get address of the slave environment manager with the smallest
number of agents.
|
[
"Get",
"address",
"of",
"the",
"slave",
"environment",
"manager",
"with",
"the",
"smallest",
"number",
"of",
"agents",
"."
] |
python
|
train
| 43.090909 |
nad2000/W3C-Validator
|
w3c_validator/validator.py
|
https://github.com/nad2000/W3C-Validator/blob/81eb35ef9c1fa87c82731b335c46f873e97a4dea/w3c_validator/validator.py#L69-L128
|
def main():
"""Parser the command line and run the validator."""
parser = argparse.ArgumentParser(
description="[v" + __version__ + "] " + __doc__,
prog="w3c_validator",
)
parser.add_argument(
"--log",
default="INFO",
help=("log level: DEBUG, INFO or INFO "
"(default: INFO)"))
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__)
parser.add_argument(
"--verbose", help="increase output verbosity", action="store_true")
parser.add_argument(
"source", metavar="F", type=str, nargs="+", help="file or URL")
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log))
LOGGER.info("Files to validate: \n {0}".format("\n ".join(args.source)))
LOGGER.info("Number of files: {0}".format(len(args.source)))
errors = 0
warnings = 0
for f in args.source:
LOGGER.info("validating: %s ..." % f)
retrys = 0
while retrys < 2:
result = validate(f, verbose=args.verbose)
if result:
break
time.sleep(2)
retrys += 1
LOGGER.info("retrying: %s ..." % f)
else:
LOGGER.info("failed: %s" % f)
errors += 1
continue
# import pdb; pdb.set_trace()
if f.endswith(".css"):
errorcount = result["cssvalidation"]["result"]["errorcount"]
warningcount = result["cssvalidation"]["result"]["warningcount"]
errors += errorcount
warnings += warningcount
if errorcount > 0:
LOGGER.info("errors: %d" % errorcount)
if warningcount > 0:
LOGGER.info("warnings: %d" % warningcount)
else:
for msg in result["messages"]:
print_msg(msg)
if msg["type"] == "error":
errors += 1
else:
warnings += 1
sys.exit(min(errors, 255))
|
[
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"[v\"",
"+",
"__version__",
"+",
"\"] \"",
"+",
"__doc__",
",",
"prog",
"=",
"\"w3c_validator\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--log\"",
",",
"default",
"=",
"\"INFO\"",
",",
"help",
"=",
"(",
"\"log level: DEBUG, INFO or INFO \"",
"\"(default: INFO)\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"\"%(prog)s \"",
"+",
"__version__",
")",
"parser",
".",
"add_argument",
"(",
"\"--verbose\"",
",",
"help",
"=",
"\"increase output verbosity\"",
",",
"action",
"=",
"\"store_true\"",
")",
"parser",
".",
"add_argument",
"(",
"\"source\"",
",",
"metavar",
"=",
"\"F\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"file or URL\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"getattr",
"(",
"logging",
",",
"args",
".",
"log",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"Files to validate: \\n {0}\"",
".",
"format",
"(",
"\"\\n \"",
".",
"join",
"(",
"args",
".",
"source",
")",
")",
")",
"LOGGER",
".",
"info",
"(",
"\"Number of files: {0}\"",
".",
"format",
"(",
"len",
"(",
"args",
".",
"source",
")",
")",
")",
"errors",
"=",
"0",
"warnings",
"=",
"0",
"for",
"f",
"in",
"args",
".",
"source",
":",
"LOGGER",
".",
"info",
"(",
"\"validating: %s ...\"",
"%",
"f",
")",
"retrys",
"=",
"0",
"while",
"retrys",
"<",
"2",
":",
"result",
"=",
"validate",
"(",
"f",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"if",
"result",
":",
"break",
"time",
".",
"sleep",
"(",
"2",
")",
"retrys",
"+=",
"1",
"LOGGER",
".",
"info",
"(",
"\"retrying: %s ...\"",
"%",
"f",
")",
"else",
":",
"LOGGER",
".",
"info",
"(",
"\"failed: %s\"",
"%",
"f",
")",
"errors",
"+=",
"1",
"continue",
"# import pdb; pdb.set_trace()",
"if",
"f",
".",
"endswith",
"(",
"\".css\"",
")",
":",
"errorcount",
"=",
"result",
"[",
"\"cssvalidation\"",
"]",
"[",
"\"result\"",
"]",
"[",
"\"errorcount\"",
"]",
"warningcount",
"=",
"result",
"[",
"\"cssvalidation\"",
"]",
"[",
"\"result\"",
"]",
"[",
"\"warningcount\"",
"]",
"errors",
"+=",
"errorcount",
"warnings",
"+=",
"warningcount",
"if",
"errorcount",
">",
"0",
":",
"LOGGER",
".",
"info",
"(",
"\"errors: %d\"",
"%",
"errorcount",
")",
"if",
"warningcount",
">",
"0",
":",
"LOGGER",
".",
"info",
"(",
"\"warnings: %d\"",
"%",
"warningcount",
")",
"else",
":",
"for",
"msg",
"in",
"result",
"[",
"\"messages\"",
"]",
":",
"print_msg",
"(",
"msg",
")",
"if",
"msg",
"[",
"\"type\"",
"]",
"==",
"\"error\"",
":",
"errors",
"+=",
"1",
"else",
":",
"warnings",
"+=",
"1",
"sys",
".",
"exit",
"(",
"min",
"(",
"errors",
",",
"255",
")",
")"
] |
Parser the command line and run the validator.
|
[
"Parser",
"the",
"command",
"line",
"and",
"run",
"the",
"validator",
"."
] |
python
|
train
| 33.133333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.