repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
StanfordBioinformatics/scgpm_seqresults_dnanexus
scgpm_seqresults_dnanexus/dnanexus_utils.py
https://github.com/StanfordBioinformatics/scgpm_seqresults_dnanexus/blob/2bdaae5ec5d38a07fec99e0c5379074a591d77b6/scgpm_seqresults_dnanexus/dnanexus_utils.py#L556-L591
def download_fastqs(self,dest_dir,barcode,overwrite=False): """ Downloads all FASTQ files in the project that match the specified barcode, or if a barcode isn't given, all FASTQ files as in this case it is assumed that this is not a multiplexed experiment. Files are downloaded to the directory specified by dest_dir. Args: barcode: `str`. The barcode sequence used. dest_dir: `str`. The local directory in which the FASTQs will be downloaded. overwrite: `bool`. If True, then if the file to download already exists in dest_dir, the file will be downloaded again, overwriting it. If False, the file will not be downloaded again from DNAnexus. Returns: `dict`: The key is the barcode, and the value is a dict with integer keys of 1 for the forward reads file, and 2 for the reverse reads file. If not paired-end, Raises: `Exception`: The barcode is specified and less than or greater than 2 FASTQ files are found. """ fastq_props = self.get_fastq_files_props(barcode=barcode) res = {} for f in fastq_props: props = fastq_props[f] read_num = int(props["read"]) barcode = props["barcode"] if barcode not in res: res[barcode] = {} name = props["fastq_file_name"] filedest = os.path.abspath(os.path.join(dest_dir,name)) res[barcode][read_num] = filedest if os.path.exists(filedest) and not overwrite: continue debug_logger.debug("Downloading FASTQ file {name} from DNAnexus project {project} to {path}.".format(name=name,project=self.dx_project_name,path=filedest)) dxpy.download_dxfile(f,filedest) return res
[ "def", "download_fastqs", "(", "self", ",", "dest_dir", ",", "barcode", ",", "overwrite", "=", "False", ")", ":", "fastq_props", "=", "self", ".", "get_fastq_files_props", "(", "barcode", "=", "barcode", ")", "res", "=", "{", "}", "for", "f", "in", "fastq_props", ":", "props", "=", "fastq_props", "[", "f", "]", "read_num", "=", "int", "(", "props", "[", "\"read\"", "]", ")", "barcode", "=", "props", "[", "\"barcode\"", "]", "if", "barcode", "not", "in", "res", ":", "res", "[", "barcode", "]", "=", "{", "}", "name", "=", "props", "[", "\"fastq_file_name\"", "]", "filedest", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "dest_dir", ",", "name", ")", ")", "res", "[", "barcode", "]", "[", "read_num", "]", "=", "filedest", "if", "os", ".", "path", ".", "exists", "(", "filedest", ")", "and", "not", "overwrite", ":", "continue", "debug_logger", ".", "debug", "(", "\"Downloading FASTQ file {name} from DNAnexus project {project} to {path}.\"", ".", "format", "(", "name", "=", "name", ",", "project", "=", "self", ".", "dx_project_name", ",", "path", "=", "filedest", ")", ")", "dxpy", ".", "download_dxfile", "(", "f", ",", "filedest", ")", "return", "res" ]
Downloads all FASTQ files in the project that match the specified barcode, or if a barcode isn't given, all FASTQ files as in this case it is assumed that this is not a multiplexed experiment. Files are downloaded to the directory specified by dest_dir. Args: barcode: `str`. The barcode sequence used. dest_dir: `str`. The local directory in which the FASTQs will be downloaded. overwrite: `bool`. If True, then if the file to download already exists in dest_dir, the file will be downloaded again, overwriting it. If False, the file will not be downloaded again from DNAnexus. Returns: `dict`: The key is the barcode, and the value is a dict with integer keys of 1 for the forward reads file, and 2 for the reverse reads file. If not paired-end, Raises: `Exception`: The barcode is specified and less than or greater than 2 FASTQ files are found.
[ "Downloads", "all", "FASTQ", "files", "in", "the", "project", "that", "match", "the", "specified", "barcode", "or", "if", "a", "barcode", "isn", "t", "given", "all", "FASTQ", "files", "as", "in", "this", "case", "it", "is", "assumed", "that", "this", "is", "not", "a", "multiplexed", "experiment", ".", "Files", "are", "downloaded", "to", "the", "directory", "specified", "by", "dest_dir", ".", "Args", ":", "barcode", ":", "str", ".", "The", "barcode", "sequence", "used", ".", "dest_dir", ":", "str", ".", "The", "local", "directory", "in", "which", "the", "FASTQs", "will", "be", "downloaded", ".", "overwrite", ":", "bool", ".", "If", "True", "then", "if", "the", "file", "to", "download", "already", "exists", "in", "dest_dir", "the", "file", "will", "be", "downloaded", "again", "overwriting", "it", ".", "If", "False", "the", "file", "will", "not", "be", "downloaded", "again", "from", "DNAnexus", ".", "Returns", ":", "dict", ":", "The", "key", "is", "the", "barcode", "and", "the", "value", "is", "a", "dict", "with", "integer", "keys", "of", "1", "for", "the", "forward", "reads", "file", "and", "2", "for", "the", "reverse", "reads", "file", ".", "If", "not", "paired", "-", "end", "Raises", ":", "Exception", ":", "The", "barcode", "is", "specified", "and", "less", "than", "or", "greater", "than", "2", "FASTQ", "files", "are", "found", "." ]
python
train
51.388889
MinchinWeb/minchin.text
minchin/text.py
https://github.com/MinchinWeb/minchin.text/blob/4d136039561892c3adab49fe81b2f246e5a1507b/minchin/text.py#L253-L263
def wait(sec): ''' Prints a timer with the format 0:00 to the console, and then clears the line when the timer is done ''' while sec > 0: sys.stdout.write('\r' + str(sec//60).zfill(1) + ":" + str(sec % 60).zfill(2) + ' ') sec -= 1 time.sleep(1) sys.stdout.write('\r' + ' ' + '\r')
[ "def", "wait", "(", "sec", ")", ":", "while", "sec", ">", "0", ":", "sys", ".", "stdout", ".", "write", "(", "'\\r'", "+", "str", "(", "sec", "//", "60", ")", ".", "zfill", "(", "1", ")", "+", "\":\"", "+", "str", "(", "sec", "%", "60", ")", ".", "zfill", "(", "2", ")", "+", "' '", ")", "sec", "-=", "1", "time", ".", "sleep", "(", "1", ")", "sys", ".", "stdout", ".", "write", "(", "'\\r'", "+", "' '", "+", "'\\r'", ")" ]
Prints a timer with the format 0:00 to the console, and then clears the line when the timer is done
[ "Prints", "a", "timer", "with", "the", "format", "0", ":", "00", "to", "the", "console", "and", "then", "clears", "the", "line", "when", "the", "timer", "is", "done" ]
python
test
32.818182
NuGrid/NuGridPy
nugridpy/mesa.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L3827-L3944
def abu_profiles(p,ifig=1,xlm=xlm,ylm=(-8,0),show=False,abunds='All',xaxis=xaxis_type, figsize1=(8,8)): '''Four panels of abundance plots Parameters ---------- p : instance mesa_profile instance xlm : tuple xlimits: mass_min, mass_max abus : 'All' plots many 'commonly used' isotopes up to Fe if they are in your mesa output. otherwise provide a list of lists of desired abus show : Boolean False for batch use True for interactive use xaxis : character Lagrangian mass is radial mass coordinate Eulerian radius is radial coordinate, in Mm ''' matplotlib.rc('figure',facecolor='white',figsize=figsize1) # create subplot structure f, ([ax1,ax2],[ax3,ax4]) = pl.subplots(2, 2, sharex=False, sharey=True, figsize=figsize1) # define 4 groups of elements, one for each of the 4 subplots all_isos=[['h1','he3','he4','li6','c12','c13','n13','n14','n15','o16','o17','o18','f19'],['ne20','ne21','ne22','na22','na23','mg24','mg25','mg26','al26','al27','si28','si29','si30'], ['p31', 's32','s33', 's34','s36','cl35','cl37','ar36', 'ar38','ar40', 'k39', 'k40','k41'], ['ca40','ca42','ca48','sc45','ti46','ti48','ti50','v50','v51','cr52','cr54','mn55','fe56']] if abunds == 'All': abus=[[],[],[],[]] j=0 for i, row in enumerate(all_isos): for iso in row: if iso in p.cols: abus[i].append(iso) j+=1 abus1=[] abus2 =[[],[],[],[]] for l in range(len(abus)): for k in range(len(abus[l])): abus1.append(abus[l][k]) is_small_isos = False for i in range(len(abus)): if len(abus[i]) < 5: is_small_isos = True print("Missing isotopes from the default list. Distributing the ones you have over the panels.") if is_small_isos: n=4 quo, rem = divmod(len(abus1), n) for i in range(len(abus2)): for k in range(i*quo,(i+1)*quo+rem): abus2[i].append(abus1[k]) abus = abus2 #print(abus) else: abus = abus ax = [ax1,ax2,ax3,ax4] xxx = p.get('radius') if xaxis is "Eulerian" else p.get('mass') mass = p.get('mass') # in units of Msun radius = p.get('radius')*ast.rsun_cm/1.e8 # in units of Mm if xaxis is "Eulerian": xxx = radius if xlm[0] == 0 and xlm[1] == 0: indtop = 0 indbot = len(mass)-1 else: indbot = np.where(radius>=xlm[0])[0][-1] indtop = np.where(radius<xlm[1])[0][0] xll = (radius[indbot],radius[indtop]) xxlabel = "Radius (Mm)" elif xaxis is "Lagrangian": xxx = mass xll = xlm xxlabel = "$M / \mathrm{M_{sun}}$" else: print("Error: don't understand xaxis choice, must be Lagrangian or Eulerian") for i in range(4): for thing in abus[i]: ind = abus[i].index(thing) ax[i].plot(xxx, np.log10(p.get(thing)), ls=u.linestylecb(ind,a,b)[0],\ marker=u.linestylecb(ind,a,b)[1], color=u.linestylecb(ind,a,b)[2],\ markevery=50,label=thing) # set x and y lims and labels ax[i].set_ylim(ylm) ax[i].set_xlim(xll) ax[i].legend(loc=1) ax[i].set_xlabel(xxlabel) if i%2 == 0: ax[i].set_ylabel('log X') # ax[i].set_aspect('equal') title_str = "Abundance plot: "+'t ='+str(title_format%p.header_attr['star_age'])\ +' dt ='+str(title_format%p.header_attr['time_step'])\ +'model number = '+str(int(p.header_attr['model_number'])) f.suptitle(title_str, fontsize=12) f.tight_layout() f.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.9, wspace=0, hspace=0.1) f.savefig('abuprof'+str(int(p.header_attr['model_number'])).zfill(6)+'.png')
[ "def", "abu_profiles", "(", "p", ",", "ifig", "=", "1", ",", "xlm", "=", "xlm", ",", "ylm", "=", "(", "-", "8", ",", "0", ")", ",", "show", "=", "False", ",", "abunds", "=", "'All'", ",", "xaxis", "=", "xaxis_type", ",", "figsize1", "=", "(", "8", ",", "8", ")", ")", ":", "matplotlib", ".", "rc", "(", "'figure'", ",", "facecolor", "=", "'white'", ",", "figsize", "=", "figsize1", ")", "# create subplot structure", "f", ",", "(", "[", "ax1", ",", "ax2", "]", ",", "[", "ax3", ",", "ax4", "]", ")", "=", "pl", ".", "subplots", "(", "2", ",", "2", ",", "sharex", "=", "False", ",", "sharey", "=", "True", ",", "figsize", "=", "figsize1", ")", "# define 4 groups of elements, one for each of the 4 subplots ", "all_isos", "=", "[", "[", "'h1'", ",", "'he3'", ",", "'he4'", ",", "'li6'", ",", "'c12'", ",", "'c13'", ",", "'n13'", ",", "'n14'", ",", "'n15'", ",", "'o16'", ",", "'o17'", ",", "'o18'", ",", "'f19'", "]", ",", "[", "'ne20'", ",", "'ne21'", ",", "'ne22'", ",", "'na22'", ",", "'na23'", ",", "'mg24'", ",", "'mg25'", ",", "'mg26'", ",", "'al26'", ",", "'al27'", ",", "'si28'", ",", "'si29'", ",", "'si30'", "]", ",", "[", "'p31'", ",", "'s32'", ",", "'s33'", ",", "'s34'", ",", "'s36'", ",", "'cl35'", ",", "'cl37'", ",", "'ar36'", ",", "'ar38'", ",", "'ar40'", ",", "'k39'", ",", "'k40'", ",", "'k41'", "]", ",", "[", "'ca40'", ",", "'ca42'", ",", "'ca48'", ",", "'sc45'", ",", "'ti46'", ",", "'ti48'", ",", "'ti50'", ",", "'v50'", ",", "'v51'", ",", "'cr52'", ",", "'cr54'", ",", "'mn55'", ",", "'fe56'", "]", "]", "if", "abunds", "==", "'All'", ":", "abus", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "]", "j", "=", "0", "for", "i", ",", "row", "in", "enumerate", "(", "all_isos", ")", ":", "for", "iso", "in", "row", ":", "if", "iso", "in", "p", ".", "cols", ":", "abus", "[", "i", "]", ".", "append", "(", "iso", ")", "j", "+=", "1", "abus1", "=", "[", "]", "abus2", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "]", "for", "l", "in", "range", "(", "len", "(", "abus", ")", ")", ":", "for", "k", "in", "range", "(", "len", "(", "abus", "[", "l", "]", ")", ")", ":", "abus1", ".", "append", "(", "abus", "[", "l", "]", "[", "k", "]", ")", "is_small_isos", "=", "False", "for", "i", "in", "range", "(", "len", "(", "abus", ")", ")", ":", "if", "len", "(", "abus", "[", "i", "]", ")", "<", "5", ":", "is_small_isos", "=", "True", "print", "(", "\"Missing isotopes from the default list. Distributing the ones you have over the panels.\"", ")", "if", "is_small_isos", ":", "n", "=", "4", "quo", ",", "rem", "=", "divmod", "(", "len", "(", "abus1", ")", ",", "n", ")", "for", "i", "in", "range", "(", "len", "(", "abus2", ")", ")", ":", "for", "k", "in", "range", "(", "i", "*", "quo", ",", "(", "i", "+", "1", ")", "*", "quo", "+", "rem", ")", ":", "abus2", "[", "i", "]", ".", "append", "(", "abus1", "[", "k", "]", ")", "abus", "=", "abus2", "#print(abus) ", "else", ":", "abus", "=", "abus", "ax", "=", "[", "ax1", ",", "ax2", ",", "ax3", ",", "ax4", "]", "xxx", "=", "p", ".", "get", "(", "'radius'", ")", "if", "xaxis", "is", "\"Eulerian\"", "else", "p", ".", "get", "(", "'mass'", ")", "mass", "=", "p", ".", "get", "(", "'mass'", ")", "# in units of Msun", "radius", "=", "p", ".", "get", "(", "'radius'", ")", "*", "ast", ".", "rsun_cm", "/", "1.e8", "# in units of Mm", "if", "xaxis", "is", "\"Eulerian\"", ":", "xxx", "=", "radius", "if", "xlm", "[", "0", "]", "==", "0", "and", "xlm", "[", "1", "]", "==", "0", ":", "indtop", "=", "0", "indbot", "=", "len", "(", "mass", ")", "-", "1", "else", ":", "indbot", "=", "np", ".", "where", "(", "radius", ">=", "xlm", "[", "0", "]", ")", "[", "0", "]", "[", "-", "1", "]", "indtop", "=", "np", ".", "where", "(", "radius", "<", "xlm", "[", "1", "]", ")", "[", "0", "]", "[", "0", "]", "xll", "=", "(", "radius", "[", "indbot", "]", ",", "radius", "[", "indtop", "]", ")", "xxlabel", "=", "\"Radius (Mm)\"", "elif", "xaxis", "is", "\"Lagrangian\"", ":", "xxx", "=", "mass", "xll", "=", "xlm", "xxlabel", "=", "\"$M / \\mathrm{M_{sun}}$\"", "else", ":", "print", "(", "\"Error: don't understand xaxis choice, must be Lagrangian or Eulerian\"", ")", "for", "i", "in", "range", "(", "4", ")", ":", "for", "thing", "in", "abus", "[", "i", "]", ":", "ind", "=", "abus", "[", "i", "]", ".", "index", "(", "thing", ")", "ax", "[", "i", "]", ".", "plot", "(", "xxx", ",", "np", ".", "log10", "(", "p", ".", "get", "(", "thing", ")", ")", ",", "ls", "=", "u", ".", "linestylecb", "(", "ind", ",", "a", ",", "b", ")", "[", "0", "]", ",", "marker", "=", "u", ".", "linestylecb", "(", "ind", ",", "a", ",", "b", ")", "[", "1", "]", ",", "color", "=", "u", ".", "linestylecb", "(", "ind", ",", "a", ",", "b", ")", "[", "2", "]", ",", "markevery", "=", "50", ",", "label", "=", "thing", ")", "# set x and y lims and labels", "ax", "[", "i", "]", ".", "set_ylim", "(", "ylm", ")", "ax", "[", "i", "]", ".", "set_xlim", "(", "xll", ")", "ax", "[", "i", "]", ".", "legend", "(", "loc", "=", "1", ")", "ax", "[", "i", "]", ".", "set_xlabel", "(", "xxlabel", ")", "if", "i", "%", "2", "==", "0", ":", "ax", "[", "i", "]", ".", "set_ylabel", "(", "'log X'", ")", "# ax[i].set_aspect('equal')", "title_str", "=", "\"Abundance plot: \"", "+", "'t ='", "+", "str", "(", "title_format", "%", "p", ".", "header_attr", "[", "'star_age'", "]", ")", "+", "' dt ='", "+", "str", "(", "title_format", "%", "p", ".", "header_attr", "[", "'time_step'", "]", ")", "+", "'model number = '", "+", "str", "(", "int", "(", "p", ".", "header_attr", "[", "'model_number'", "]", ")", ")", "f", ".", "suptitle", "(", "title_str", ",", "fontsize", "=", "12", ")", "f", ".", "tight_layout", "(", ")", "f", ".", "subplots_adjust", "(", "left", "=", "0.1", ",", "bottom", "=", "0.1", ",", "right", "=", "0.95", ",", "top", "=", "0.9", ",", "wspace", "=", "0", ",", "hspace", "=", "0.1", ")", "f", ".", "savefig", "(", "'abuprof'", "+", "str", "(", "int", "(", "p", ".", "header_attr", "[", "'model_number'", "]", ")", ")", ".", "zfill", "(", "6", ")", "+", "'.png'", ")" ]
Four panels of abundance plots Parameters ---------- p : instance mesa_profile instance xlm : tuple xlimits: mass_min, mass_max abus : 'All' plots many 'commonly used' isotopes up to Fe if they are in your mesa output. otherwise provide a list of lists of desired abus show : Boolean False for batch use True for interactive use xaxis : character Lagrangian mass is radial mass coordinate Eulerian radius is radial coordinate, in Mm
[ "Four", "panels", "of", "abundance", "plots" ]
python
train
34.228814
konstantint/matplotlib-venn
matplotlib_venn/_common.py
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_common.py#L92-L105
def prepare_venn_axes(ax, centers, radii): ''' Sets properties of the axis object to suit venn plotting. I.e. hides ticks, makes proper xlim/ylim. ''' ax.set_aspect('equal') ax.set_xticks([]) ax.set_yticks([]) min_x = min([centers[i][0] - radii[i] for i in range(len(radii))]) max_x = max([centers[i][0] + radii[i] for i in range(len(radii))]) min_y = min([centers[i][1] - radii[i] for i in range(len(radii))]) max_y = max([centers[i][1] + radii[i] for i in range(len(radii))]) ax.set_xlim([min_x - 0.1, max_x + 0.1]) ax.set_ylim([min_y - 0.1, max_y + 0.1]) ax.set_axis_off()
[ "def", "prepare_venn_axes", "(", "ax", ",", "centers", ",", "radii", ")", ":", "ax", ".", "set_aspect", "(", "'equal'", ")", "ax", ".", "set_xticks", "(", "[", "]", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "min_x", "=", "min", "(", "[", "centers", "[", "i", "]", "[", "0", "]", "-", "radii", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "radii", ")", ")", "]", ")", "max_x", "=", "max", "(", "[", "centers", "[", "i", "]", "[", "0", "]", "+", "radii", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "radii", ")", ")", "]", ")", "min_y", "=", "min", "(", "[", "centers", "[", "i", "]", "[", "1", "]", "-", "radii", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "radii", ")", ")", "]", ")", "max_y", "=", "max", "(", "[", "centers", "[", "i", "]", "[", "1", "]", "+", "radii", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "radii", ")", ")", "]", ")", "ax", ".", "set_xlim", "(", "[", "min_x", "-", "0.1", ",", "max_x", "+", "0.1", "]", ")", "ax", ".", "set_ylim", "(", "[", "min_y", "-", "0.1", ",", "max_y", "+", "0.1", "]", ")", "ax", ".", "set_axis_off", "(", ")" ]
Sets properties of the axis object to suit venn plotting. I.e. hides ticks, makes proper xlim/ylim.
[ "Sets", "properties", "of", "the", "axis", "object", "to", "suit", "venn", "plotting", ".", "I", ".", "e", ".", "hides", "ticks", "makes", "proper", "xlim", "/", "ylim", "." ]
python
train
43.857143
pantsbuild/pants
src/python/pants/auth/basic_auth.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/auth/basic_auth.py#L52-L91
def authenticate(self, provider, creds=None, cookies=None): """Authenticate against the specified provider. :param str provider: Authorize against this provider. :param pants.auth.basic_auth.BasicAuthCreds creds: The creds to use. If unspecified, assumes that creds are set in the netrc file. :param pants.auth.cookies.Cookies cookies: Store the auth cookies in this instance. If unspecified, uses the global instance. :raises pants.auth.basic_auth.BasicAuthException: If auth fails due to misconfiguration or rejection by the server. """ cookies = cookies or Cookies.global_instance() if not provider: raise BasicAuthException('No basic auth provider specified.') provider_config = self.get_options().providers.get(provider) if not provider_config: raise BasicAuthException('No config found for provider {}.'.format(provider)) url = provider_config.get('url') if not url: raise BasicAuthException('No url found in config for provider {}.'.format(provider)) if not self.get_options().allow_insecure_urls and not url.startswith('https://'): raise BasicAuthException('Auth url for provider {} is not secure: {}.'.format(provider, url)) if creds: auth = requests.auth.HTTPBasicAuth(creds.username, creds.password) else: auth = None # requests will use the netrc creds. response = requests.get(url, auth=auth) if response.status_code != requests.codes.ok: if response.status_code == requests.codes.unauthorized: parsed = www_authenticate.parse(response.headers.get('WWW-Authenticate', '')) if 'Basic' in parsed: raise Challenged(url, response.status_code, response.reason, parsed['Basic']['realm']) raise BasicAuthException(url, response.status_code, response.reason) cookies.update(response.cookies)
[ "def", "authenticate", "(", "self", ",", "provider", ",", "creds", "=", "None", ",", "cookies", "=", "None", ")", ":", "cookies", "=", "cookies", "or", "Cookies", ".", "global_instance", "(", ")", "if", "not", "provider", ":", "raise", "BasicAuthException", "(", "'No basic auth provider specified.'", ")", "provider_config", "=", "self", ".", "get_options", "(", ")", ".", "providers", ".", "get", "(", "provider", ")", "if", "not", "provider_config", ":", "raise", "BasicAuthException", "(", "'No config found for provider {}.'", ".", "format", "(", "provider", ")", ")", "url", "=", "provider_config", ".", "get", "(", "'url'", ")", "if", "not", "url", ":", "raise", "BasicAuthException", "(", "'No url found in config for provider {}.'", ".", "format", "(", "provider", ")", ")", "if", "not", "self", ".", "get_options", "(", ")", ".", "allow_insecure_urls", "and", "not", "url", ".", "startswith", "(", "'https://'", ")", ":", "raise", "BasicAuthException", "(", "'Auth url for provider {} is not secure: {}.'", ".", "format", "(", "provider", ",", "url", ")", ")", "if", "creds", ":", "auth", "=", "requests", ".", "auth", ".", "HTTPBasicAuth", "(", "creds", ".", "username", ",", "creds", ".", "password", ")", "else", ":", "auth", "=", "None", "# requests will use the netrc creds.", "response", "=", "requests", ".", "get", "(", "url", ",", "auth", "=", "auth", ")", "if", "response", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "if", "response", ".", "status_code", "==", "requests", ".", "codes", ".", "unauthorized", ":", "parsed", "=", "www_authenticate", ".", "parse", "(", "response", ".", "headers", ".", "get", "(", "'WWW-Authenticate'", ",", "''", ")", ")", "if", "'Basic'", "in", "parsed", ":", "raise", "Challenged", "(", "url", ",", "response", ".", "status_code", ",", "response", ".", "reason", ",", "parsed", "[", "'Basic'", "]", "[", "'realm'", "]", ")", "raise", "BasicAuthException", "(", "url", ",", "response", ".", "status_code", ",", "response", ".", "reason", ")", "cookies", ".", "update", "(", "response", ".", "cookies", ")" ]
Authenticate against the specified provider. :param str provider: Authorize against this provider. :param pants.auth.basic_auth.BasicAuthCreds creds: The creds to use. If unspecified, assumes that creds are set in the netrc file. :param pants.auth.cookies.Cookies cookies: Store the auth cookies in this instance. If unspecified, uses the global instance. :raises pants.auth.basic_auth.BasicAuthException: If auth fails due to misconfiguration or rejection by the server.
[ "Authenticate", "against", "the", "specified", "provider", "." ]
python
train
45.725
thiagopbueno/pyrddl
pyrddl/parser.py
https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/parser.py#L309-L316
def p_type_list(self, p): '''type_list : type_list type_def | empty''' if p[1] is None: p[0] = [] else: p[1].append(p[2]) p[0] = p[1]
[ "def", "p_type_list", "(", "self", ",", "p", ")", ":", "if", "p", "[", "1", "]", "is", "None", ":", "p", "[", "0", "]", "=", "[", "]", "else", ":", "p", "[", "1", "]", ".", "append", "(", "p", "[", "2", "]", ")", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
type_list : type_list type_def | empty
[ "type_list", ":", "type_list", "type_def", "|", "empty" ]
python
train
25.875
ktbyers/netmiko
netmiko/base_connection.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/base_connection.py#L1572-L1658
def strip_ansi_escape_codes(self, string_buffer): """ Remove any ANSI (VT100) ESC codes from the output http://en.wikipedia.org/wiki/ANSI_escape_code Note: this does not capture ALL possible ANSI Escape Codes only the ones I have encountered Current codes that are filtered: ESC = '\x1b' or chr(27) ESC = is the escape character [^ in hex ('\x1b') ESC[24;27H Position cursor ESC[?25h Show the cursor ESC[E Next line (HP does ESC-E) ESC[K Erase line from cursor to the end of line ESC[2K Erase entire line ESC[1;24r Enable scrolling from start to row end ESC[?6l Reset mode screen with options 640 x 200 monochrome (graphics) ESC[?7l Disable line wrapping ESC[2J Code erase display ESC[00;32m Color Green (30 to 37 are different colors) more general pattern is ESC[\d\d;\d\dm and ESC[\d\d;\d\d;\d\dm ESC[6n Get cursor position HP ProCurve and Cisco SG300 require this (possible others). :param string_buffer: The string to be processed to remove ANSI escape codes :type string_buffer: str """ # noqa log.debug("In strip_ansi_escape_codes") log.debug("repr = {}".format(repr(string_buffer))) code_position_cursor = chr(27) + r"\[\d+;\d+H" code_show_cursor = chr(27) + r"\[\?25h" code_next_line = chr(27) + r"E" code_erase_line_end = chr(27) + r"\[K" code_erase_line = chr(27) + r"\[2K" code_erase_start_line = chr(27) + r"\[K" code_enable_scroll = chr(27) + r"\[\d+;\d+r" code_form_feed = chr(27) + r"\[1L" code_carriage_return = chr(27) + r"\[1M" code_disable_line_wrapping = chr(27) + r"\[\?7l" code_reset_mode_screen_options = chr(27) + r"\[\?\d+l" code_reset_graphics_mode = chr(27) + r"\[00m" code_erase_display = chr(27) + r"\[2J" code_graphics_mode = chr(27) + r"\[\d\d;\d\dm" code_graphics_mode2 = chr(27) + r"\[\d\d;\d\d;\d\dm" code_get_cursor_position = chr(27) + r"\[6n" code_cursor_position = chr(27) + r"\[m" code_erase_display = chr(27) + r"\[J" code_attrs_off = chr(27) + r"[0m" code_reverse = chr(27) + r"[7m" code_set = [ code_position_cursor, code_show_cursor, code_erase_line, code_enable_scroll, code_erase_start_line, code_form_feed, code_carriage_return, code_disable_line_wrapping, code_erase_line_end, code_reset_mode_screen_options, code_reset_graphics_mode, code_erase_display, code_graphics_mode, code_graphics_mode2, code_get_cursor_position, code_cursor_position, code_erase_display, code_attrs_off, code_reverse, ] output = string_buffer for ansi_esc_code in code_set: output = re.sub(ansi_esc_code, "", output) # CODE_NEXT_LINE must substitute with return output = re.sub(code_next_line, self.RETURN, output) log.debug("new_output = {0}".format(output)) log.debug("repr = {0}".format(repr(output))) return output
[ "def", "strip_ansi_escape_codes", "(", "self", ",", "string_buffer", ")", ":", "# noqa", "log", ".", "debug", "(", "\"In strip_ansi_escape_codes\"", ")", "log", ".", "debug", "(", "\"repr = {}\"", ".", "format", "(", "repr", "(", "string_buffer", ")", ")", ")", "code_position_cursor", "=", "chr", "(", "27", ")", "+", "r\"\\[\\d+;\\d+H\"", "code_show_cursor", "=", "chr", "(", "27", ")", "+", "r\"\\[\\?25h\"", "code_next_line", "=", "chr", "(", "27", ")", "+", "r\"E\"", "code_erase_line_end", "=", "chr", "(", "27", ")", "+", "r\"\\[K\"", "code_erase_line", "=", "chr", "(", "27", ")", "+", "r\"\\[2K\"", "code_erase_start_line", "=", "chr", "(", "27", ")", "+", "r\"\\[K\"", "code_enable_scroll", "=", "chr", "(", "27", ")", "+", "r\"\\[\\d+;\\d+r\"", "code_form_feed", "=", "chr", "(", "27", ")", "+", "r\"\\[1L\"", "code_carriage_return", "=", "chr", "(", "27", ")", "+", "r\"\\[1M\"", "code_disable_line_wrapping", "=", "chr", "(", "27", ")", "+", "r\"\\[\\?7l\"", "code_reset_mode_screen_options", "=", "chr", "(", "27", ")", "+", "r\"\\[\\?\\d+l\"", "code_reset_graphics_mode", "=", "chr", "(", "27", ")", "+", "r\"\\[00m\"", "code_erase_display", "=", "chr", "(", "27", ")", "+", "r\"\\[2J\"", "code_graphics_mode", "=", "chr", "(", "27", ")", "+", "r\"\\[\\d\\d;\\d\\dm\"", "code_graphics_mode2", "=", "chr", "(", "27", ")", "+", "r\"\\[\\d\\d;\\d\\d;\\d\\dm\"", "code_get_cursor_position", "=", "chr", "(", "27", ")", "+", "r\"\\[6n\"", "code_cursor_position", "=", "chr", "(", "27", ")", "+", "r\"\\[m\"", "code_erase_display", "=", "chr", "(", "27", ")", "+", "r\"\\[J\"", "code_attrs_off", "=", "chr", "(", "27", ")", "+", "r\"[0m\"", "code_reverse", "=", "chr", "(", "27", ")", "+", "r\"[7m\"", "code_set", "=", "[", "code_position_cursor", ",", "code_show_cursor", ",", "code_erase_line", ",", "code_enable_scroll", ",", "code_erase_start_line", ",", "code_form_feed", ",", "code_carriage_return", ",", "code_disable_line_wrapping", ",", "code_erase_line_end", ",", "code_reset_mode_screen_options", ",", "code_reset_graphics_mode", ",", "code_erase_display", ",", "code_graphics_mode", ",", "code_graphics_mode2", ",", "code_get_cursor_position", ",", "code_cursor_position", ",", "code_erase_display", ",", "code_attrs_off", ",", "code_reverse", ",", "]", "output", "=", "string_buffer", "for", "ansi_esc_code", "in", "code_set", ":", "output", "=", "re", ".", "sub", "(", "ansi_esc_code", ",", "\"\"", ",", "output", ")", "# CODE_NEXT_LINE must substitute with return", "output", "=", "re", ".", "sub", "(", "code_next_line", ",", "self", ".", "RETURN", ",", "output", ")", "log", ".", "debug", "(", "\"new_output = {0}\"", ".", "format", "(", "output", ")", ")", "log", ".", "debug", "(", "\"repr = {0}\"", ".", "format", "(", "repr", "(", "output", ")", ")", ")", "return", "output" ]
Remove any ANSI (VT100) ESC codes from the output http://en.wikipedia.org/wiki/ANSI_escape_code Note: this does not capture ALL possible ANSI Escape Codes only the ones I have encountered Current codes that are filtered: ESC = '\x1b' or chr(27) ESC = is the escape character [^ in hex ('\x1b') ESC[24;27H Position cursor ESC[?25h Show the cursor ESC[E Next line (HP does ESC-E) ESC[K Erase line from cursor to the end of line ESC[2K Erase entire line ESC[1;24r Enable scrolling from start to row end ESC[?6l Reset mode screen with options 640 x 200 monochrome (graphics) ESC[?7l Disable line wrapping ESC[2J Code erase display ESC[00;32m Color Green (30 to 37 are different colors) more general pattern is ESC[\d\d;\d\dm and ESC[\d\d;\d\d;\d\dm ESC[6n Get cursor position HP ProCurve and Cisco SG300 require this (possible others). :param string_buffer: The string to be processed to remove ANSI escape codes :type string_buffer: str
[ "Remove", "any", "ANSI", "(", "VT100", ")", "ESC", "codes", "from", "the", "output" ]
python
train
38.08046
brmscheiner/ideogram
ideogram/importAnalysis.py
https://github.com/brmscheiner/ideogram/blob/422bf566c51fd56f7bbb6e75b16d18d52b4c7568/ideogram/importAnalysis.py#L51-L55
def getImportFromObjects(node): '''Returns a list of objects referenced by import from node''' somenames = [x.asname for x in node.names if x.asname] othernames = [x.name for x in node.names if not x.asname] return somenames+othernames
[ "def", "getImportFromObjects", "(", "node", ")", ":", "somenames", "=", "[", "x", ".", "asname", "for", "x", "in", "node", ".", "names", "if", "x", ".", "asname", "]", "othernames", "=", "[", "x", ".", "name", "for", "x", "in", "node", ".", "names", "if", "not", "x", ".", "asname", "]", "return", "somenames", "+", "othernames" ]
Returns a list of objects referenced by import from node
[ "Returns", "a", "list", "of", "objects", "referenced", "by", "import", "from", "node" ]
python
train
49.4
lordmauve/lepton
examples/games/bonk/controls.py
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/controls.py#L78-L83
def bind_key_name(self, function, object_name): """Bind a key to an object name""" for funcname, name in self.name_map.items(): if funcname == function: self.name_map[ funcname] = object_name
[ "def", "bind_key_name", "(", "self", ",", "function", ",", "object_name", ")", ":", "for", "funcname", ",", "name", "in", "self", ".", "name_map", ".", "items", "(", ")", ":", "if", "funcname", "==", "function", ":", "self", ".", "name_map", "[", "funcname", "]", "=", "object_name" ]
Bind a key to an object name
[ "Bind", "a", "key", "to", "an", "object", "name" ]
python
train
42.5
fedora-infra/fedora-messaging
fedora_messaging/api.py
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/api.py#L124-L197
def consume(callback, bindings=None, queues=None): """ Start a message consumer that executes the provided callback when messages are received. This API is blocking and will not return until the process receives a signal from the operating system. .. warning:: This API is runs the callback in the IO loop thread. This means if your callback could run for a length of time near the heartbeat interval, which is likely on the order of 60 seconds, the broker will kill the TCP connection and the message will be re-delivered on start-up. For now, use the :func:`twisted_consume` API which runs the callback in a thread and continues to handle AMQP events while the callback runs if you have a long-running callback. The callback receives a single positional argument, the message: >>> from fedora_messaging import api >>> def my_callback(message): ... print(message) >>> bindings = [{'exchange': 'amq.topic', 'queue': 'demo', 'routing_keys': ['#']}] >>> queues = { ... "demo": {"durable": False, "auto_delete": True, "exclusive": True, "arguments": {}} ... } >>> api.consume(my_callback, bindings=bindings, queues=queues) If the bindings and queue arguments are not provided, they will be loaded from the configuration. For complete documentation on writing consumers, see the :ref:`consumers` documentation. Args: callback (callable): A callable object that accepts one positional argument, a :class:`Message` or a class object that implements the ``__call__`` method. The class will be instantiated before use. bindings (dict or list of dict): Bindings to declare before consuming. This should be the same format as the :ref:`conf-bindings` configuration. queues (dict): The queue or queues to declare and consume from. This should be in the same format as the :ref:`conf-queues` configuration dictionary where each key is a queue name and each value is a dictionary of settings for that queue. Raises: fedora_messaging.exceptions.HaltConsumer: If the consumer requests that it be stopped. ValueError: If the consumer provide callback that is not a class that implements __call__ and is not a function, if the bindings argument is not a dict or list of dicts with the proper keys, or if the queues argument isn't a dict with the proper keys. """ if isinstance(bindings, dict): bindings = [bindings] if bindings is None: bindings = config.conf["bindings"] else: try: config.validate_bindings(bindings) except exceptions.ConfigurationException as e: raise ValueError(e.message) if queues is None: queues = config.conf["queues"] else: try: config.validate_queues(queues) except exceptions.ConfigurationException as e: raise ValueError(e.message) session = _session.ConsumerSession() session.consume(callback, bindings=bindings, queues=queues)
[ "def", "consume", "(", "callback", ",", "bindings", "=", "None", ",", "queues", "=", "None", ")", ":", "if", "isinstance", "(", "bindings", ",", "dict", ")", ":", "bindings", "=", "[", "bindings", "]", "if", "bindings", "is", "None", ":", "bindings", "=", "config", ".", "conf", "[", "\"bindings\"", "]", "else", ":", "try", ":", "config", ".", "validate_bindings", "(", "bindings", ")", "except", "exceptions", ".", "ConfigurationException", "as", "e", ":", "raise", "ValueError", "(", "e", ".", "message", ")", "if", "queues", "is", "None", ":", "queues", "=", "config", ".", "conf", "[", "\"queues\"", "]", "else", ":", "try", ":", "config", ".", "validate_queues", "(", "queues", ")", "except", "exceptions", ".", "ConfigurationException", "as", "e", ":", "raise", "ValueError", "(", "e", ".", "message", ")", "session", "=", "_session", ".", "ConsumerSession", "(", ")", "session", ".", "consume", "(", "callback", ",", "bindings", "=", "bindings", ",", "queues", "=", "queues", ")" ]
Start a message consumer that executes the provided callback when messages are received. This API is blocking and will not return until the process receives a signal from the operating system. .. warning:: This API is runs the callback in the IO loop thread. This means if your callback could run for a length of time near the heartbeat interval, which is likely on the order of 60 seconds, the broker will kill the TCP connection and the message will be re-delivered on start-up. For now, use the :func:`twisted_consume` API which runs the callback in a thread and continues to handle AMQP events while the callback runs if you have a long-running callback. The callback receives a single positional argument, the message: >>> from fedora_messaging import api >>> def my_callback(message): ... print(message) >>> bindings = [{'exchange': 'amq.topic', 'queue': 'demo', 'routing_keys': ['#']}] >>> queues = { ... "demo": {"durable": False, "auto_delete": True, "exclusive": True, "arguments": {}} ... } >>> api.consume(my_callback, bindings=bindings, queues=queues) If the bindings and queue arguments are not provided, they will be loaded from the configuration. For complete documentation on writing consumers, see the :ref:`consumers` documentation. Args: callback (callable): A callable object that accepts one positional argument, a :class:`Message` or a class object that implements the ``__call__`` method. The class will be instantiated before use. bindings (dict or list of dict): Bindings to declare before consuming. This should be the same format as the :ref:`conf-bindings` configuration. queues (dict): The queue or queues to declare and consume from. This should be in the same format as the :ref:`conf-queues` configuration dictionary where each key is a queue name and each value is a dictionary of settings for that queue. Raises: fedora_messaging.exceptions.HaltConsumer: If the consumer requests that it be stopped. ValueError: If the consumer provide callback that is not a class that implements __call__ and is not a function, if the bindings argument is not a dict or list of dicts with the proper keys, or if the queues argument isn't a dict with the proper keys.
[ "Start", "a", "message", "consumer", "that", "executes", "the", "provided", "callback", "when", "messages", "are", "received", "." ]
python
train
42.081081
tcalmant/ipopo
pelix/misc/mqtt_client.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/misc/mqtt_client.py#L415-L427
def __on_publish(self, client, userdata, mid): # pylint: disable=W0613 """ A message has been published by a server :param client: Client that received the message :param userdata: User data (unused) :param mid: Message ID """ try: self.__in_flight[mid].set() except KeyError: pass
[ "def", "__on_publish", "(", "self", ",", "client", ",", "userdata", ",", "mid", ")", ":", "# pylint: disable=W0613", "try", ":", "self", ".", "__in_flight", "[", "mid", "]", ".", "set", "(", ")", "except", "KeyError", ":", "pass" ]
A message has been published by a server :param client: Client that received the message :param userdata: User data (unused) :param mid: Message ID
[ "A", "message", "has", "been", "published", "by", "a", "server" ]
python
train
28.153846
mdeous/fatbotslim
fatbotslim/irc/bot.py
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/irc/bot.py#L198-L205
def _connect(self): """ Connects the bot to the server and identifies itself. """ self.conn = self._create_connection() spawn(self.conn.connect) self.set_nick(self.nick) self.cmd(u'USER', u'{0} 3 * {1}'.format(self.nick, self.realname))
[ "def", "_connect", "(", "self", ")", ":", "self", ".", "conn", "=", "self", ".", "_create_connection", "(", ")", "spawn", "(", "self", ".", "conn", ".", "connect", ")", "self", ".", "set_nick", "(", "self", ".", "nick", ")", "self", ".", "cmd", "(", "u'USER'", ",", "u'{0} 3 * {1}'", ".", "format", "(", "self", ".", "nick", ",", "self", ".", "realname", ")", ")" ]
Connects the bot to the server and identifies itself.
[ "Connects", "the", "bot", "to", "the", "server", "and", "identifies", "itself", "." ]
python
train
35.625
vertexproject/synapse
synapse/lib/version.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/version.py#L110-L124
def fmtVersion(*vsnparts): ''' Join a string of parts together with a . separator. Args: *vsnparts: Returns: ''' if len(vsnparts) < 1: raise s_exc.BadTypeValu(valu=repr(vsnparts), name='fmtVersion', mesg='Not enough version parts to form a version string with.',) ret = '.'.join([str(part).lower() for part in vsnparts]) return ret
[ "def", "fmtVersion", "(", "*", "vsnparts", ")", ":", "if", "len", "(", "vsnparts", ")", "<", "1", ":", "raise", "s_exc", ".", "BadTypeValu", "(", "valu", "=", "repr", "(", "vsnparts", ")", ",", "name", "=", "'fmtVersion'", ",", "mesg", "=", "'Not enough version parts to form a version string with.'", ",", ")", "ret", "=", "'.'", ".", "join", "(", "[", "str", "(", "part", ")", ".", "lower", "(", ")", "for", "part", "in", "vsnparts", "]", ")", "return", "ret" ]
Join a string of parts together with a . separator. Args: *vsnparts: Returns:
[ "Join", "a", "string", "of", "parts", "together", "with", "a", ".", "separator", "." ]
python
train
26.666667
openeventdata/mordecai
mordecai/geoparse.py
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L819-L831
def ranker(self, X, meta): """ Sort the place features list by the score of its relevance. """ # total score is just a sum of each row total_score = X.sum(axis=1).transpose() total_score = np.squeeze(np.asarray(total_score)) # matrix to array ranks = total_score.argsort() ranks = ranks[::-1] # sort the list of dicts according to ranks sorted_meta = [meta[r] for r in ranks] sorted_X = X[ranks] return (sorted_X, sorted_meta)
[ "def", "ranker", "(", "self", ",", "X", ",", "meta", ")", ":", "# total score is just a sum of each row", "total_score", "=", "X", ".", "sum", "(", "axis", "=", "1", ")", ".", "transpose", "(", ")", "total_score", "=", "np", ".", "squeeze", "(", "np", ".", "asarray", "(", "total_score", ")", ")", "# matrix to array", "ranks", "=", "total_score", ".", "argsort", "(", ")", "ranks", "=", "ranks", "[", ":", ":", "-", "1", "]", "# sort the list of dicts according to ranks", "sorted_meta", "=", "[", "meta", "[", "r", "]", "for", "r", "in", "ranks", "]", "sorted_X", "=", "X", "[", "ranks", "]", "return", "(", "sorted_X", ",", "sorted_meta", ")" ]
Sort the place features list by the score of its relevance.
[ "Sort", "the", "place", "features", "list", "by", "the", "score", "of", "its", "relevance", "." ]
python
train
39.307692
tanghaibao/jcvi
jcvi/utils/taxonomy.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/taxonomy.py#L116-L132
def MRCA(list_of_taxids): """ This gets the most recent common ancester (MRCA) for a list of taxids >>> mylist = [3702, 3649, 3694, 3880] >>> MRCA(mylist) 'rosids' """ from ete2 import Tree t = TaxIDTree(list_of_taxids) t = Tree(str(t), format=8) ancestor = t.get_common_ancestor(*t.get_leaves()) return ancestor.name
[ "def", "MRCA", "(", "list_of_taxids", ")", ":", "from", "ete2", "import", "Tree", "t", "=", "TaxIDTree", "(", "list_of_taxids", ")", "t", "=", "Tree", "(", "str", "(", "t", ")", ",", "format", "=", "8", ")", "ancestor", "=", "t", ".", "get_common_ancestor", "(", "*", "t", ".", "get_leaves", "(", ")", ")", "return", "ancestor", ".", "name" ]
This gets the most recent common ancester (MRCA) for a list of taxids >>> mylist = [3702, 3649, 3694, 3880] >>> MRCA(mylist) 'rosids'
[ "This", "gets", "the", "most", "recent", "common", "ancester", "(", "MRCA", ")", "for", "a", "list", "of", "taxids" ]
python
train
20.588235
hyperledger/indy-plenum
plenum/server/node.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L3565-L3574
def blacklistClient(self, clientName: str, reason: str = None, code: int = None): """ Add the client specified by `clientName` to this node's blacklist """ msg = "{} blacklisting client {}".format(self, clientName) if reason: msg += " for reason {}".format(reason) logger.display(msg) self.clientBlacklister.blacklist(clientName)
[ "def", "blacklistClient", "(", "self", ",", "clientName", ":", "str", ",", "reason", ":", "str", "=", "None", ",", "code", ":", "int", "=", "None", ")", ":", "msg", "=", "\"{} blacklisting client {}\"", ".", "format", "(", "self", ",", "clientName", ")", "if", "reason", ":", "msg", "+=", "\" for reason {}\"", ".", "format", "(", "reason", ")", "logger", ".", "display", "(", "msg", ")", "self", ".", "clientBlacklister", ".", "blacklist", "(", "clientName", ")" ]
Add the client specified by `clientName` to this node's blacklist
[ "Add", "the", "client", "specified", "by", "clientName", "to", "this", "node", "s", "blacklist" ]
python
train
41.2
OLC-Bioinformatics/sipprverse
sixteenS/sixteenS.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteenS.py#L79-L102
def reporter(self): """ Creates a report of the results """ # Create the path in which the reports are stored make_path(self.reportpath) header = 'Strain,Gene,PercentIdentity,Genus,FoldCoverage\n' data = '' with open(os.path.join(self.reportpath, self.analysistype + '.csv'), 'w') as report: for sample in self.runmetadata.samples: data += sample.name + ',' if sample[self.analysistype].results: if not sample[self.analysistype].multiple: for name, identity in sample[self.analysistype].results.items(): if name == sample[self.analysistype].besthit[0]: data += '{},{},{},{}\n'.format(name, identity, sample[self.analysistype].genera[name], sample[self.analysistype].avgdepth[name]) else: data += '{},{},{},{}\n'.format('multiple', 'NA', ';'.join(sample[self.analysistype] .classification), 'NA') else: data += '\n' report.write(header) report.write(data)
[ "def", "reporter", "(", "self", ")", ":", "# Create the path in which the reports are stored", "make_path", "(", "self", ".", "reportpath", ")", "header", "=", "'Strain,Gene,PercentIdentity,Genus,FoldCoverage\\n'", "data", "=", "''", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportpath", ",", "self", ".", "analysistype", "+", "'.csv'", ")", ",", "'w'", ")", "as", "report", ":", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "data", "+=", "sample", ".", "name", "+", "','", "if", "sample", "[", "self", ".", "analysistype", "]", ".", "results", ":", "if", "not", "sample", "[", "self", ".", "analysistype", "]", ".", "multiple", ":", "for", "name", ",", "identity", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "results", ".", "items", "(", ")", ":", "if", "name", "==", "sample", "[", "self", ".", "analysistype", "]", ".", "besthit", "[", "0", "]", ":", "data", "+=", "'{},{},{},{}\\n'", ".", "format", "(", "name", ",", "identity", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "genera", "[", "name", "]", ",", "sample", "[", "self", ".", "analysistype", "]", ".", "avgdepth", "[", "name", "]", ")", "else", ":", "data", "+=", "'{},{},{},{}\\n'", ".", "format", "(", "'multiple'", ",", "'NA'", ",", "';'", ".", "join", "(", "sample", "[", "self", ".", "analysistype", "]", ".", "classification", ")", ",", "'NA'", ")", "else", ":", "data", "+=", "'\\n'", "report", ".", "write", "(", "header", ")", "report", ".", "write", "(", "data", ")" ]
Creates a report of the results
[ "Creates", "a", "report", "of", "the", "results" ]
python
train
53.833333
amanusk/s-tui
s_tui/sturwid/bar_graph_vector.py
https://github.com/amanusk/s-tui/blob/5e89d15081e716024db28ec03b1e3a7710330951/s_tui/sturwid/bar_graph_vector.py#L112-L129
def get_label_scale(self, min_val, max_val, size): """Dynamically change the scale of the graph (y lable)""" if size < self.SCALE_DENSITY: label_cnt = 1 else: label_cnt = int(size / self.SCALE_DENSITY) try: if max_val >= 100: label = [int((min_val + i * (max_val - min_val) / label_cnt)) for i in range(label_cnt + 1)] else: label = [round((min_val + i * (max_val - min_val) / label_cnt), 1) for i in range(label_cnt + 1)] return label except ZeroDivisionError: logging.debug("Side label creation divided by 0") return ""
[ "def", "get_label_scale", "(", "self", ",", "min_val", ",", "max_val", ",", "size", ")", ":", "if", "size", "<", "self", ".", "SCALE_DENSITY", ":", "label_cnt", "=", "1", "else", ":", "label_cnt", "=", "int", "(", "size", "/", "self", ".", "SCALE_DENSITY", ")", "try", ":", "if", "max_val", ">=", "100", ":", "label", "=", "[", "int", "(", "(", "min_val", "+", "i", "*", "(", "max_val", "-", "min_val", ")", "/", "label_cnt", ")", ")", "for", "i", "in", "range", "(", "label_cnt", "+", "1", ")", "]", "else", ":", "label", "=", "[", "round", "(", "(", "min_val", "+", "i", "*", "(", "max_val", "-", "min_val", ")", "/", "label_cnt", ")", ",", "1", ")", "for", "i", "in", "range", "(", "label_cnt", "+", "1", ")", "]", "return", "label", "except", "ZeroDivisionError", ":", "logging", ".", "debug", "(", "\"Side label creation divided by 0\"", ")", "return", "\"\"" ]
Dynamically change the scale of the graph (y lable)
[ "Dynamically", "change", "the", "scale", "of", "the", "graph", "(", "y", "lable", ")" ]
python
train
41.222222
kelproject/pykube
pykube/http.py
https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L195-L230
def get_kwargs(self, **kwargs): """ Creates a full URL to request based on arguments. :Parametes: - `kwargs`: All keyword arguments to build a kubernetes API endpoint """ version = kwargs.pop("version", "v1") if version == "v1": base = kwargs.pop("base", "/api") elif "/" in version: base = kwargs.pop("base", "/apis") else: if "base" not in kwargs: raise TypeError("unknown API version; base kwarg must be specified.") base = kwargs.pop("base") bits = [base, version] # Overwrite (default) namespace from context if it was set if "namespace" in kwargs: n = kwargs.pop("namespace") if n is not None: if n: namespace = n else: namespace = self.config.namespace if namespace: bits.extend([ "namespaces", namespace, ]) url = kwargs.get("url", "") if url.startswith("/"): url = url[1:] bits.append(url) kwargs["url"] = self.url + posixpath.join(*bits) return kwargs
[ "def", "get_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "version", "=", "kwargs", ".", "pop", "(", "\"version\"", ",", "\"v1\"", ")", "if", "version", "==", "\"v1\"", ":", "base", "=", "kwargs", ".", "pop", "(", "\"base\"", ",", "\"/api\"", ")", "elif", "\"/\"", "in", "version", ":", "base", "=", "kwargs", ".", "pop", "(", "\"base\"", ",", "\"/apis\"", ")", "else", ":", "if", "\"base\"", "not", "in", "kwargs", ":", "raise", "TypeError", "(", "\"unknown API version; base kwarg must be specified.\"", ")", "base", "=", "kwargs", ".", "pop", "(", "\"base\"", ")", "bits", "=", "[", "base", ",", "version", "]", "# Overwrite (default) namespace from context if it was set", "if", "\"namespace\"", "in", "kwargs", ":", "n", "=", "kwargs", ".", "pop", "(", "\"namespace\"", ")", "if", "n", "is", "not", "None", ":", "if", "n", ":", "namespace", "=", "n", "else", ":", "namespace", "=", "self", ".", "config", ".", "namespace", "if", "namespace", ":", "bits", ".", "extend", "(", "[", "\"namespaces\"", ",", "namespace", ",", "]", ")", "url", "=", "kwargs", ".", "get", "(", "\"url\"", ",", "\"\"", ")", "if", "url", ".", "startswith", "(", "\"/\"", ")", ":", "url", "=", "url", "[", "1", ":", "]", "bits", ".", "append", "(", "url", ")", "kwargs", "[", "\"url\"", "]", "=", "self", ".", "url", "+", "posixpath", ".", "join", "(", "*", "bits", ")", "return", "kwargs" ]
Creates a full URL to request based on arguments. :Parametes: - `kwargs`: All keyword arguments to build a kubernetes API endpoint
[ "Creates", "a", "full", "URL", "to", "request", "based", "on", "arguments", "." ]
python
train
34.5
user-cont/conu
conu/backend/k8s/pod.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/k8s/pod.py#L239-L257
def get_from_string(cls, string_phase): """ Convert string value obtained from k8s API to PodPhase enum value :param string_phase: str, phase value from Kubernetes API :return: PodPhase """ if string_phase == 'Pending': return cls.PENDING elif string_phase == 'Running': return cls.RUNNING elif string_phase == 'Succeeded': return cls.SUCCEEDED elif string_phase == 'Failed': return cls.FAILED elif string_phase == 'Unknown': return cls.UNKNOWN return cls.UNKNOWN
[ "def", "get_from_string", "(", "cls", ",", "string_phase", ")", ":", "if", "string_phase", "==", "'Pending'", ":", "return", "cls", ".", "PENDING", "elif", "string_phase", "==", "'Running'", ":", "return", "cls", ".", "RUNNING", "elif", "string_phase", "==", "'Succeeded'", ":", "return", "cls", ".", "SUCCEEDED", "elif", "string_phase", "==", "'Failed'", ":", "return", "cls", ".", "FAILED", "elif", "string_phase", "==", "'Unknown'", ":", "return", "cls", ".", "UNKNOWN", "return", "cls", ".", "UNKNOWN" ]
Convert string value obtained from k8s API to PodPhase enum value :param string_phase: str, phase value from Kubernetes API :return: PodPhase
[ "Convert", "string", "value", "obtained", "from", "k8s", "API", "to", "PodPhase", "enum", "value", ":", "param", "string_phase", ":", "str", "phase", "value", "from", "Kubernetes", "API", ":", "return", ":", "PodPhase" ]
python
train
31.315789
NuGrid/NuGridPy
nugridpy/astronomy.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/astronomy.py#L333-L359
def period(A,M1,M2): """ calculate binary period from separation. Parameters ---------- A : float separation A Rsun. M1, M2 : float M in Msun. Returns ------- p period in days. """ A *= rsun_cm print(A) velocity = np.sqrt(grav_const*msun_g*(M1+M2)/A) print(old_div(velocity,1.e5)) p = 2.*np.pi * A / velocity p /= (60*60*24.) return p
[ "def", "period", "(", "A", ",", "M1", ",", "M2", ")", ":", "A", "*=", "rsun_cm", "print", "(", "A", ")", "velocity", "=", "np", ".", "sqrt", "(", "grav_const", "*", "msun_g", "*", "(", "M1", "+", "M2", ")", "/", "A", ")", "print", "(", "old_div", "(", "velocity", ",", "1.e5", ")", ")", "p", "=", "2.", "*", "np", ".", "pi", "*", "A", "/", "velocity", "p", "/=", "(", "60", "*", "60", "*", "24.", ")", "return", "p" ]
calculate binary period from separation. Parameters ---------- A : float separation A Rsun. M1, M2 : float M in Msun. Returns ------- p period in days.
[ "calculate", "binary", "period", "from", "separation", "." ]
python
train
15.296296
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L169-L222
def proximal_translation(prox_factory, y): r"""Calculate the proximal of the translated function F(x - y). Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F``. y : Element in domain of ``F``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a functional :math:`F`, this is calculated according to the rule .. math:: \mathrm{prox}_{\sigma F( \cdot - y)}(x) = y + \mathrm{prox}_{\sigma F}(x - y) where :math:`y` is the translation, and :math:`\sigma` is the step size. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def translation_prox_factory(sigma): """Create proximal for the translation with a given sigma. Parameters ---------- sigma : positive float Step size parameter Returns ------- proximal : `Operator` The proximal operator of ``s * F( . - y)`` where ``s`` is the step size """ return (ConstantOperator(y) + prox_factory(sigma) * (IdentityOperator(y.space) - ConstantOperator(y))) return translation_prox_factory
[ "def", "proximal_translation", "(", "prox_factory", ",", "y", ")", ":", "def", "translation_prox_factory", "(", "sigma", ")", ":", "\"\"\"Create proximal for the translation with a given sigma.\n\n Parameters\n ----------\n sigma : positive float\n Step size parameter\n\n Returns\n -------\n proximal : `Operator`\n The proximal operator of ``s * F( . - y)`` where ``s`` is the\n step size\n \"\"\"", "return", "(", "ConstantOperator", "(", "y", ")", "+", "prox_factory", "(", "sigma", ")", "*", "(", "IdentityOperator", "(", "y", ".", "space", ")", "-", "ConstantOperator", "(", "y", ")", ")", ")", "return", "translation_prox_factory" ]
r"""Calculate the proximal of the translated function F(x - y). Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F``. y : Element in domain of ``F``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a functional :math:`F`, this is calculated according to the rule .. math:: \mathrm{prox}_{\sigma F( \cdot - y)}(x) = y + \mathrm{prox}_{\sigma F}(x - y) where :math:`y` is the translation, and :math:`\sigma` is the step size. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011.
[ "r", "Calculate", "the", "proximal", "of", "the", "translated", "function", "F", "(", "x", "-", "y", ")", "." ]
python
train
29.5
jreese/ent
ent/ent.py
https://github.com/jreese/ent/blob/65f7c6498536c551ee1fdb43c3c429f24aa0f755/ent/ent.py#L133-L176
def merge(cls, *args, **kwargs): """Create a new Ent from one or more existing Ents. Keys in the later Ent objects will overwrite the keys of the previous Ents. Later keys of different type than in earlier Ents will be bravely ignored. The following keyword arguments are recognized: newkeys: boolean value to determine whether keys from later Ents should be included if they do not exist in earlier Ents. ignore: list of strings of key names that should not be overridden by later Ent keys. """ newkeys = bool(kwargs.get('newkeys', False)) ignore = kwargs.get('ignore', list()) if len(args) < 1: raise ValueError('no ents given to Ent.merge()') elif not all(isinstance(s, Ent) for s in args): raise ValueError('all positional arguments to Ent.merge() must ' 'be instances of Ent') ent = args[0] data = cls.load(ent) for ent in args[1:]: for key, value in ent.__dict__.items(): if key in ignore: continue if key in data.__dict__: v1 = data.__dict__[key] if type(value) == type(v1): if isinstance(v1, Ent): data.__dict__[key] = cls.merge(v1, value, **kwargs) else: data.__dict__[key] = cls.load(value) elif newkeys: data.__dict__[key] = value return data
[ "def", "merge", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "newkeys", "=", "bool", "(", "kwargs", ".", "get", "(", "'newkeys'", ",", "False", ")", ")", "ignore", "=", "kwargs", ".", "get", "(", "'ignore'", ",", "list", "(", ")", ")", "if", "len", "(", "args", ")", "<", "1", ":", "raise", "ValueError", "(", "'no ents given to Ent.merge()'", ")", "elif", "not", "all", "(", "isinstance", "(", "s", ",", "Ent", ")", "for", "s", "in", "args", ")", ":", "raise", "ValueError", "(", "'all positional arguments to Ent.merge() must '", "'be instances of Ent'", ")", "ent", "=", "args", "[", "0", "]", "data", "=", "cls", ".", "load", "(", "ent", ")", "for", "ent", "in", "args", "[", "1", ":", "]", ":", "for", "key", ",", "value", "in", "ent", ".", "__dict__", ".", "items", "(", ")", ":", "if", "key", "in", "ignore", ":", "continue", "if", "key", "in", "data", ".", "__dict__", ":", "v1", "=", "data", ".", "__dict__", "[", "key", "]", "if", "type", "(", "value", ")", "==", "type", "(", "v1", ")", ":", "if", "isinstance", "(", "v1", ",", "Ent", ")", ":", "data", ".", "__dict__", "[", "key", "]", "=", "cls", ".", "merge", "(", "v1", ",", "value", ",", "*", "*", "kwargs", ")", "else", ":", "data", ".", "__dict__", "[", "key", "]", "=", "cls", ".", "load", "(", "value", ")", "elif", "newkeys", ":", "data", ".", "__dict__", "[", "key", "]", "=", "value", "return", "data" ]
Create a new Ent from one or more existing Ents. Keys in the later Ent objects will overwrite the keys of the previous Ents. Later keys of different type than in earlier Ents will be bravely ignored. The following keyword arguments are recognized: newkeys: boolean value to determine whether keys from later Ents should be included if they do not exist in earlier Ents. ignore: list of strings of key names that should not be overridden by later Ent keys.
[ "Create", "a", "new", "Ent", "from", "one", "or", "more", "existing", "Ents", ".", "Keys", "in", "the", "later", "Ent", "objects", "will", "overwrite", "the", "keys", "of", "the", "previous", "Ents", ".", "Later", "keys", "of", "different", "type", "than", "in", "earlier", "Ents", "will", "be", "bravely", "ignored", "." ]
python
train
35.5
marshmallow-code/marshmallow
src/marshmallow/schema.py
https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/schema.py#L579-L682
def _deserialize( self, data, fields_dict, error_store, many=False, partial=False, unknown=RAISE, dict_class=dict, index_errors=True, index=None, ): """Deserialize ``data`` based on the schema defined by ``fields_dict``. :param dict data: The data to deserialize. :param dict fields_dict: Mapping of field names to :class:`Field` objects. :param ErrorStore error_store: Structure to store errors. :param bool many: Set to `True` if ``data`` should be deserialized as a collection. :param bool|tuple partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. :param type dict_class: Dictionary class used to construct the output. :param bool index_errors: Whether to store the index of invalid items in ``self.errors`` when ``many=True``. :param int index: Index of the item being serialized (for storing errors) if serializing a collection, otherwise `None`. :return: A dictionary of the deserialized data. """ index = index if index_errors else None if many: if not is_collection(data): error_store.store_error([self.error_messages['type']], index=index) ret = [] else: self._pending = True ret = [ self._deserialize( d, fields_dict, error_store, many=False, partial=partial, unknown=unknown, dict_class=dict_class, index=idx, index_errors=index_errors, ) for idx, d in enumerate(data) ] self._pending = False return ret ret = dict_class() # Check data is a dict if not isinstance(data, Mapping): error_store.store_error([self.error_messages['type']], index=index) else: partial_is_collection = is_collection(partial) for attr_name, field_obj in iteritems(fields_dict): if field_obj.dump_only: continue field_name = attr_name if field_obj.data_key: field_name = field_obj.data_key raw_value = data.get(field_name, missing) if raw_value is missing: # Ignore missing field if we're allowed to. if ( partial is True or (partial_is_collection and attr_name in partial) ): continue d_kwargs = {} if isinstance(field_obj, Nested): # Allow partial loading of nested schemas. if partial_is_collection: prefix = field_name + '.' len_prefix = len(prefix) sub_partial = [f[len_prefix:] for f in partial if f.startswith(prefix)] else: sub_partial = partial d_kwargs['partial'] = sub_partial getter = lambda val: field_obj.deserialize( val, field_name, data, **d_kwargs ) value = self._call_and_store( getter_func=getter, data=raw_value, field_name=field_name, error_store=error_store, index=index, ) if value is not missing: key = fields_dict[attr_name].attribute or attr_name set_value(ret, key, value) if unknown != EXCLUDE: fields = { field_obj.data_key or field_name for field_name, field_obj in fields_dict.items() if not field_obj.dump_only } for key in set(data) - fields: value = data[key] if unknown == INCLUDE: set_value(ret, key, value) elif unknown == RAISE: error_store.store_error( [self.error_messages['unknown']], key, (index if index_errors else None), ) return ret
[ "def", "_deserialize", "(", "self", ",", "data", ",", "fields_dict", ",", "error_store", ",", "many", "=", "False", ",", "partial", "=", "False", ",", "unknown", "=", "RAISE", ",", "dict_class", "=", "dict", ",", "index_errors", "=", "True", ",", "index", "=", "None", ",", ")", ":", "index", "=", "index", "if", "index_errors", "else", "None", "if", "many", ":", "if", "not", "is_collection", "(", "data", ")", ":", "error_store", ".", "store_error", "(", "[", "self", ".", "error_messages", "[", "'type'", "]", "]", ",", "index", "=", "index", ")", "ret", "=", "[", "]", "else", ":", "self", ".", "_pending", "=", "True", "ret", "=", "[", "self", ".", "_deserialize", "(", "d", ",", "fields_dict", ",", "error_store", ",", "many", "=", "False", ",", "partial", "=", "partial", ",", "unknown", "=", "unknown", ",", "dict_class", "=", "dict_class", ",", "index", "=", "idx", ",", "index_errors", "=", "index_errors", ",", ")", "for", "idx", ",", "d", "in", "enumerate", "(", "data", ")", "]", "self", ".", "_pending", "=", "False", "return", "ret", "ret", "=", "dict_class", "(", ")", "# Check data is a dict", "if", "not", "isinstance", "(", "data", ",", "Mapping", ")", ":", "error_store", ".", "store_error", "(", "[", "self", ".", "error_messages", "[", "'type'", "]", "]", ",", "index", "=", "index", ")", "else", ":", "partial_is_collection", "=", "is_collection", "(", "partial", ")", "for", "attr_name", ",", "field_obj", "in", "iteritems", "(", "fields_dict", ")", ":", "if", "field_obj", ".", "dump_only", ":", "continue", "field_name", "=", "attr_name", "if", "field_obj", ".", "data_key", ":", "field_name", "=", "field_obj", ".", "data_key", "raw_value", "=", "data", ".", "get", "(", "field_name", ",", "missing", ")", "if", "raw_value", "is", "missing", ":", "# Ignore missing field if we're allowed to.", "if", "(", "partial", "is", "True", "or", "(", "partial_is_collection", "and", "attr_name", "in", "partial", ")", ")", ":", "continue", "d_kwargs", "=", "{", "}", "if", "isinstance", "(", "field_obj", ",", "Nested", ")", ":", "# Allow partial loading of nested schemas.", "if", "partial_is_collection", ":", "prefix", "=", "field_name", "+", "'.'", "len_prefix", "=", "len", "(", "prefix", ")", "sub_partial", "=", "[", "f", "[", "len_prefix", ":", "]", "for", "f", "in", "partial", "if", "f", ".", "startswith", "(", "prefix", ")", "]", "else", ":", "sub_partial", "=", "partial", "d_kwargs", "[", "'partial'", "]", "=", "sub_partial", "getter", "=", "lambda", "val", ":", "field_obj", ".", "deserialize", "(", "val", ",", "field_name", ",", "data", ",", "*", "*", "d_kwargs", ")", "value", "=", "self", ".", "_call_and_store", "(", "getter_func", "=", "getter", ",", "data", "=", "raw_value", ",", "field_name", "=", "field_name", ",", "error_store", "=", "error_store", ",", "index", "=", "index", ",", ")", "if", "value", "is", "not", "missing", ":", "key", "=", "fields_dict", "[", "attr_name", "]", ".", "attribute", "or", "attr_name", "set_value", "(", "ret", ",", "key", ",", "value", ")", "if", "unknown", "!=", "EXCLUDE", ":", "fields", "=", "{", "field_obj", ".", "data_key", "or", "field_name", "for", "field_name", ",", "field_obj", "in", "fields_dict", ".", "items", "(", ")", "if", "not", "field_obj", ".", "dump_only", "}", "for", "key", "in", "set", "(", "data", ")", "-", "fields", ":", "value", "=", "data", "[", "key", "]", "if", "unknown", "==", "INCLUDE", ":", "set_value", "(", "ret", ",", "key", ",", "value", ")", "elif", "unknown", "==", "RAISE", ":", "error_store", ".", "store_error", "(", "[", "self", ".", "error_messages", "[", "'unknown'", "]", "]", ",", "key", ",", "(", "index", "if", "index_errors", "else", "None", ")", ",", ")", "return", "ret" ]
Deserialize ``data`` based on the schema defined by ``fields_dict``. :param dict data: The data to deserialize. :param dict fields_dict: Mapping of field names to :class:`Field` objects. :param ErrorStore error_store: Structure to store errors. :param bool many: Set to `True` if ``data`` should be deserialized as a collection. :param bool|tuple partial: Whether to ignore missing fields and not require any fields declared. Propagates down to ``Nested`` fields as well. If its value is an iterable, only missing fields listed in that iterable will be ignored. Use dot delimiters to specify nested fields. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. :param type dict_class: Dictionary class used to construct the output. :param bool index_errors: Whether to store the index of invalid items in ``self.errors`` when ``many=True``. :param int index: Index of the item being serialized (for storing errors) if serializing a collection, otherwise `None`. :return: A dictionary of the deserialized data.
[ "Deserialize", "data", "based", "on", "the", "schema", "defined", "by", "fields_dict", "." ]
python
train
45.932692
hydraplatform/hydra-base
hydra_base/lib/template.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/template.py#L1724-L1753
def validate_attr(resource_attr_id, scenario_id, template_id=None): """ Check that a resource attribute satisfies the requirements of all the types of the resource. """ rs = db.DBSession.query(ResourceScenario).\ filter(ResourceScenario.resource_attr_id==resource_attr_id, ResourceScenario.scenario_id==scenario_id).options( joinedload_all("resourceattr")).options( joinedload_all("dataset") ).one() error = None try: _do_validate_resourcescenario(rs, template_id) except HydraError as e: error = JSONObject(dict( ref_key = rs.resourceattr.ref_key, ref_id = rs.resourceattr.get_resource_id(), ref_name = rs.resourceattr.get_resource().get_name(), resource_attr_id = rs.resource_attr_id, attr_id = rs.resourceattr.attr.id, attr_name = rs.resourceattr.attr.name, dataset_id = rs.dataset_id, scenario_id=scenario_id, template_id=template_id, error_text=e.args[0])) return error
[ "def", "validate_attr", "(", "resource_attr_id", ",", "scenario_id", ",", "template_id", "=", "None", ")", ":", "rs", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceScenario", ")", ".", "filter", "(", "ResourceScenario", ".", "resource_attr_id", "==", "resource_attr_id", ",", "ResourceScenario", ".", "scenario_id", "==", "scenario_id", ")", ".", "options", "(", "joinedload_all", "(", "\"resourceattr\"", ")", ")", ".", "options", "(", "joinedload_all", "(", "\"dataset\"", ")", ")", ".", "one", "(", ")", "error", "=", "None", "try", ":", "_do_validate_resourcescenario", "(", "rs", ",", "template_id", ")", "except", "HydraError", "as", "e", ":", "error", "=", "JSONObject", "(", "dict", "(", "ref_key", "=", "rs", ".", "resourceattr", ".", "ref_key", ",", "ref_id", "=", "rs", ".", "resourceattr", ".", "get_resource_id", "(", ")", ",", "ref_name", "=", "rs", ".", "resourceattr", ".", "get_resource", "(", ")", ".", "get_name", "(", ")", ",", "resource_attr_id", "=", "rs", ".", "resource_attr_id", ",", "attr_id", "=", "rs", ".", "resourceattr", ".", "attr", ".", "id", ",", "attr_name", "=", "rs", ".", "resourceattr", ".", "attr", ".", "name", ",", "dataset_id", "=", "rs", ".", "dataset_id", ",", "scenario_id", "=", "scenario_id", ",", "template_id", "=", "template_id", ",", "error_text", "=", "e", ".", "args", "[", "0", "]", ")", ")", "return", "error" ]
Check that a resource attribute satisfies the requirements of all the types of the resource.
[ "Check", "that", "a", "resource", "attribute", "satisfies", "the", "requirements", "of", "all", "the", "types", "of", "the", "resource", "." ]
python
train
38.566667
gebn/nibble
nibble/decorators.py
https://github.com/gebn/nibble/blob/e82a2c43509ed38f3d039040591cc630fa676cb0/nibble/decorators.py#L43-L62
def python_2_format_compatible(method): """ Handles bytestring and unicode inputs for the `__format__()` method in Python 2. This function has no effect in Python 3. :param method: The `__format__()` method to wrap. :return: The wrapped method. """ if six.PY3: return method def wrapper(self, format_spec): formatted = method(self, format_spec) if isinstance(format_spec, str): # bytestring return formatted.encode('utf-8') # unicode return formatted return wrapper
[ "def", "python_2_format_compatible", "(", "method", ")", ":", "if", "six", ".", "PY3", ":", "return", "method", "def", "wrapper", "(", "self", ",", "format_spec", ")", ":", "formatted", "=", "method", "(", "self", ",", "format_spec", ")", "if", "isinstance", "(", "format_spec", ",", "str", ")", ":", "# bytestring", "return", "formatted", ".", "encode", "(", "'utf-8'", ")", "# unicode", "return", "formatted", "return", "wrapper" ]
Handles bytestring and unicode inputs for the `__format__()` method in Python 2. This function has no effect in Python 3. :param method: The `__format__()` method to wrap. :return: The wrapped method.
[ "Handles", "bytestring", "and", "unicode", "inputs", "for", "the", "__format__", "()", "method", "in", "Python", "2", ".", "This", "function", "has", "no", "effect", "in", "Python", "3", "." ]
python
train
27.45
GuyAllard/markov_clustering
markov_clustering/mcl.py
https://github.com/GuyAllard/markov_clustering/blob/28787cf64ef06bf024ff915246008c767ea830cf/markov_clustering/mcl.py#L56-L79
def add_self_loops(matrix, loop_value): """ Add self-loops to the matrix by setting the diagonal to loop_value :param matrix: The matrix to add loops to :param loop_value: Value to use for self-loops :returns: The matrix with self-loops """ shape = matrix.shape assert shape[0] == shape[1], "Error, matrix is not square" if isspmatrix(matrix): new_matrix = matrix.todok() else: new_matrix = matrix.copy() for i in range(shape[0]): new_matrix[i, i] = loop_value if isspmatrix(matrix): return new_matrix.tocsc() return new_matrix
[ "def", "add_self_loops", "(", "matrix", ",", "loop_value", ")", ":", "shape", "=", "matrix", ".", "shape", "assert", "shape", "[", "0", "]", "==", "shape", "[", "1", "]", ",", "\"Error, matrix is not square\"", "if", "isspmatrix", "(", "matrix", ")", ":", "new_matrix", "=", "matrix", ".", "todok", "(", ")", "else", ":", "new_matrix", "=", "matrix", ".", "copy", "(", ")", "for", "i", "in", "range", "(", "shape", "[", "0", "]", ")", ":", "new_matrix", "[", "i", ",", "i", "]", "=", "loop_value", "if", "isspmatrix", "(", "matrix", ")", ":", "return", "new_matrix", ".", "tocsc", "(", ")", "return", "new_matrix" ]
Add self-loops to the matrix by setting the diagonal to loop_value :param matrix: The matrix to add loops to :param loop_value: Value to use for self-loops :returns: The matrix with self-loops
[ "Add", "self", "-", "loops", "to", "the", "matrix", "by", "setting", "the", "diagonal", "to", "loop_value", ":", "param", "matrix", ":", "The", "matrix", "to", "add", "loops", "to", ":", "param", "loop_value", ":", "Value", "to", "use", "for", "self", "-", "loops", ":", "returns", ":", "The", "matrix", "with", "self", "-", "loops" ]
python
train
25.041667
niklasf/python-chess
chess/engine.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/engine.py#L2158-L2180
async def popen_uci(command: Union[str, List[str]], *, setpgrp: bool = False, loop=None, **popen_args: Any) -> Tuple[asyncio.SubprocessTransport, UciProtocol]: """ Spawns and initializes an UCI engine. :param command: Path of the engine executable, or a list including the path and arguments. :param setpgrp: Open the engine process in a new process group. This will stop signals (such as keyboard interrupts) from propagating from the parent process. Defaults to ``False``. :param popen_args: Additional arguments for `popen <https://docs.python.org/3/library/subprocess.html#popen-constructor>`_. Do not set ``stdin``, ``stdout``, ``bufsize`` or ``universal_newlines``. Returns a subprocess transport and engine protocol pair. """ transport, protocol = await UciProtocol.popen(command, setpgrp=setpgrp, loop=loop, **popen_args) try: await protocol.initialize() except: transport.close() raise return transport, protocol
[ "async", "def", "popen_uci", "(", "command", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", ",", "*", ",", "setpgrp", ":", "bool", "=", "False", ",", "loop", "=", "None", ",", "*", "*", "popen_args", ":", "Any", ")", "->", "Tuple", "[", "asyncio", ".", "SubprocessTransport", ",", "UciProtocol", "]", ":", "transport", ",", "protocol", "=", "await", "UciProtocol", ".", "popen", "(", "command", ",", "setpgrp", "=", "setpgrp", ",", "loop", "=", "loop", ",", "*", "*", "popen_args", ")", "try", ":", "await", "protocol", ".", "initialize", "(", ")", "except", ":", "transport", ".", "close", "(", ")", "raise", "return", "transport", ",", "protocol" ]
Spawns and initializes an UCI engine. :param command: Path of the engine executable, or a list including the path and arguments. :param setpgrp: Open the engine process in a new process group. This will stop signals (such as keyboard interrupts) from propagating from the parent process. Defaults to ``False``. :param popen_args: Additional arguments for `popen <https://docs.python.org/3/library/subprocess.html#popen-constructor>`_. Do not set ``stdin``, ``stdout``, ``bufsize`` or ``universal_newlines``. Returns a subprocess transport and engine protocol pair.
[ "Spawns", "and", "initializes", "an", "UCI", "engine", "." ]
python
train
44.217391
ronaldguillen/wave
wave/serializers.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/serializers.py#L1448-L1458
def get_default_field_names(self, declared_fields, model_info): """ Return the default list of field names that will be used if the `Meta.fields` option is not specified. """ return ( [self.url_field_name] + list(declared_fields.keys()) + list(model_info.fields.keys()) + list(model_info.forward_relations.keys()) )
[ "def", "get_default_field_names", "(", "self", ",", "declared_fields", ",", "model_info", ")", ":", "return", "(", "[", "self", ".", "url_field_name", "]", "+", "list", "(", "declared_fields", ".", "keys", "(", ")", ")", "+", "list", "(", "model_info", ".", "fields", ".", "keys", "(", ")", ")", "+", "list", "(", "model_info", ".", "forward_relations", ".", "keys", "(", ")", ")", ")" ]
Return the default list of field names that will be used if the `Meta.fields` option is not specified.
[ "Return", "the", "default", "list", "of", "field", "names", "that", "will", "be", "used", "if", "the", "Meta", ".", "fields", "option", "is", "not", "specified", "." ]
python
train
36.454545
Netflix-Skunkworks/cloudaux
cloudaux/orchestration/aws/elb.py
https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/orchestration/aws/elb.py#L10-L57
def _reformat_policy(policy): """ Policies returned from boto3 are massive, ugly, and difficult to read. This method flattens and reformats the policy. :param policy: Result from invoking describe_load_balancer_policies(...) :return: Returns a tuple containing policy_name and the reformatted policy dict. """ policy_name = policy['PolicyName'] ret = {} ret['type'] = policy['PolicyTypeName'] attrs = policy['PolicyAttributeDescriptions'] if ret['type'] != 'SSLNegotiationPolicyType': return policy_name, ret attributes = dict() for attr in attrs: attributes[attr['AttributeName']] = attr['AttributeValue'] ret['protocols'] = dict() ret['protocols']['sslv2'] = bool(attributes.get('Protocol-SSLv2')) ret['protocols']['sslv3'] = bool(attributes.get('Protocol-SSLv3')) ret['protocols']['tlsv1'] = bool(attributes.get('Protocol-TLSv1')) ret['protocols']['tlsv1_1'] = bool(attributes.get('Protocol-TLSv1.1')) ret['protocols']['tlsv1_2'] = bool(attributes.get('Protocol-TLSv1.2')) ret['server_defined_cipher_order'] = bool(attributes.get('Server-Defined-Cipher-Order')) ret['reference_security_policy'] = attributes.get('Reference-Security-Policy', None) non_ciphers = [ 'Server-Defined-Cipher-Order', 'Protocol-SSLv2', 'Protocol-SSLv3', 'Protocol-TLSv1', 'Protocol-TLSv1.1', 'Protocol-TLSv1.2', 'Reference-Security-Policy' ] ciphers = [] for cipher in attributes: if attributes[cipher] == 'true' and cipher not in non_ciphers: ciphers.append(cipher) ciphers.sort() ret['supported_ciphers'] = ciphers return policy_name, ret
[ "def", "_reformat_policy", "(", "policy", ")", ":", "policy_name", "=", "policy", "[", "'PolicyName'", "]", "ret", "=", "{", "}", "ret", "[", "'type'", "]", "=", "policy", "[", "'PolicyTypeName'", "]", "attrs", "=", "policy", "[", "'PolicyAttributeDescriptions'", "]", "if", "ret", "[", "'type'", "]", "!=", "'SSLNegotiationPolicyType'", ":", "return", "policy_name", ",", "ret", "attributes", "=", "dict", "(", ")", "for", "attr", "in", "attrs", ":", "attributes", "[", "attr", "[", "'AttributeName'", "]", "]", "=", "attr", "[", "'AttributeValue'", "]", "ret", "[", "'protocols'", "]", "=", "dict", "(", ")", "ret", "[", "'protocols'", "]", "[", "'sslv2'", "]", "=", "bool", "(", "attributes", ".", "get", "(", "'Protocol-SSLv2'", ")", ")", "ret", "[", "'protocols'", "]", "[", "'sslv3'", "]", "=", "bool", "(", "attributes", ".", "get", "(", "'Protocol-SSLv3'", ")", ")", "ret", "[", "'protocols'", "]", "[", "'tlsv1'", "]", "=", "bool", "(", "attributes", ".", "get", "(", "'Protocol-TLSv1'", ")", ")", "ret", "[", "'protocols'", "]", "[", "'tlsv1_1'", "]", "=", "bool", "(", "attributes", ".", "get", "(", "'Protocol-TLSv1.1'", ")", ")", "ret", "[", "'protocols'", "]", "[", "'tlsv1_2'", "]", "=", "bool", "(", "attributes", ".", "get", "(", "'Protocol-TLSv1.2'", ")", ")", "ret", "[", "'server_defined_cipher_order'", "]", "=", "bool", "(", "attributes", ".", "get", "(", "'Server-Defined-Cipher-Order'", ")", ")", "ret", "[", "'reference_security_policy'", "]", "=", "attributes", ".", "get", "(", "'Reference-Security-Policy'", ",", "None", ")", "non_ciphers", "=", "[", "'Server-Defined-Cipher-Order'", ",", "'Protocol-SSLv2'", ",", "'Protocol-SSLv3'", ",", "'Protocol-TLSv1'", ",", "'Protocol-TLSv1.1'", ",", "'Protocol-TLSv1.2'", ",", "'Reference-Security-Policy'", "]", "ciphers", "=", "[", "]", "for", "cipher", "in", "attributes", ":", "if", "attributes", "[", "cipher", "]", "==", "'true'", "and", "cipher", "not", "in", "non_ciphers", ":", "ciphers", ".", "append", "(", "cipher", ")", "ciphers", ".", "sort", "(", ")", "ret", "[", "'supported_ciphers'", "]", "=", "ciphers", "return", "policy_name", ",", "ret" ]
Policies returned from boto3 are massive, ugly, and difficult to read. This method flattens and reformats the policy. :param policy: Result from invoking describe_load_balancer_policies(...) :return: Returns a tuple containing policy_name and the reformatted policy dict.
[ "Policies", "returned", "from", "boto3", "are", "massive", "ugly", "and", "difficult", "to", "read", ".", "This", "method", "flattens", "and", "reformats", "the", "policy", "." ]
python
valid
35.083333
chaoss/grimoirelab-elk
grimoire_elk/enriched/crates.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/crates.py#L99-L125
def get_rich_events(self, item): """ In the events there are some common fields with the crate. The name of the field must be the same in the create and in the downloads event so we can filer using it in crate and event at the same time. * Fields that don't change: the field does not change with the events in a create so the value is always the same in the events of a create. * Fields that change: the value of the field changes with events """ if "version_downloads_data" not in item['data']: return [] # To get values from the task eitem = self.get_rich_item(item) for sample in item['data']["version_downloads_data"]["version_downloads"]: event = deepcopy(eitem) event['download_sample_id'] = sample['id'] event['sample_date'] = sample['date'] sample_date = parser.parse(event['sample_date']) event['sample_version'] = sample['version'] event['sample_downloads'] = sample['downloads'] event.update(self.get_grimoire_fields(sample_date.isoformat(), "downloads_event")) yield event
[ "def", "get_rich_events", "(", "self", ",", "item", ")", ":", "if", "\"version_downloads_data\"", "not", "in", "item", "[", "'data'", "]", ":", "return", "[", "]", "# To get values from the task", "eitem", "=", "self", ".", "get_rich_item", "(", "item", ")", "for", "sample", "in", "item", "[", "'data'", "]", "[", "\"version_downloads_data\"", "]", "[", "\"version_downloads\"", "]", ":", "event", "=", "deepcopy", "(", "eitem", ")", "event", "[", "'download_sample_id'", "]", "=", "sample", "[", "'id'", "]", "event", "[", "'sample_date'", "]", "=", "sample", "[", "'date'", "]", "sample_date", "=", "parser", ".", "parse", "(", "event", "[", "'sample_date'", "]", ")", "event", "[", "'sample_version'", "]", "=", "sample", "[", "'version'", "]", "event", "[", "'sample_downloads'", "]", "=", "sample", "[", "'downloads'", "]", "event", ".", "update", "(", "self", ".", "get_grimoire_fields", "(", "sample_date", ".", "isoformat", "(", ")", ",", "\"downloads_event\"", ")", ")", "yield", "event" ]
In the events there are some common fields with the crate. The name of the field must be the same in the create and in the downloads event so we can filer using it in crate and event at the same time. * Fields that don't change: the field does not change with the events in a create so the value is always the same in the events of a create. * Fields that change: the value of the field changes with events
[ "In", "the", "events", "there", "are", "some", "common", "fields", "with", "the", "crate", ".", "The", "name", "of", "the", "field", "must", "be", "the", "same", "in", "the", "create", "and", "in", "the", "downloads", "event", "so", "we", "can", "filer", "using", "it", "in", "crate", "and", "event", "at", "the", "same", "time", "." ]
python
train
43.259259
byt3bl33d3r/CrackMapExec
cme/protocols/smb/database.py
https://github.com/byt3bl33d3r/CrackMapExec/blob/333f1c4e06884e85b2776459963ef85d182aba8e/cme/protocols/smb/database.py#L346-L354
def is_user_valid(self, userID): """ Check if this User ID is valid. """ cur = self.conn.cursor() cur.execute('SELECT * FROM users WHERE id=? LIMIT 1', [userID]) results = cur.fetchall() cur.close() return len(results) > 0
[ "def", "is_user_valid", "(", "self", ",", "userID", ")", ":", "cur", "=", "self", ".", "conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "'SELECT * FROM users WHERE id=? LIMIT 1'", ",", "[", "userID", "]", ")", "results", "=", "cur", ".", "fetchall", "(", ")", "cur", ".", "close", "(", ")", "return", "len", "(", "results", ")", ">", "0" ]
Check if this User ID is valid.
[ "Check", "if", "this", "User", "ID", "is", "valid", "." ]
python
train
30.888889
marcomusy/vtkplotter
vtkplotter/analysis.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/analysis.py#L1590-L1623
def splitByConnectivity(actor, maxdepth=100): """ Split a mesh by connectivity and order the pieces by increasing area. :param int maxdepth: only consider this number of mesh parts. .. hint:: |splitmesh| |splitmesh.py|_ """ actor.addIDs() pd = actor.polydata() cf = vtk.vtkConnectivityFilter() cf.SetInputData(pd) cf.SetExtractionModeToAllRegions() cf.ColorRegionsOn() cf.Update() cpd = cf.GetOutput() a = Actor(cpd) alist = [] for t in range(max(a.scalars("RegionId")) - 1): if t == maxdepth: break suba = a.clone().threshold("RegionId", t - 0.1, t + 0.1) area = suba.area() alist.append([suba, area]) alist.sort(key=lambda x: x[1]) alist.reverse() blist = [] for i, l in enumerate(alist): l[0].color(i + 1) l[0].mapper.ScalarVisibilityOff() blist.append(l[0]) return blist
[ "def", "splitByConnectivity", "(", "actor", ",", "maxdepth", "=", "100", ")", ":", "actor", ".", "addIDs", "(", ")", "pd", "=", "actor", ".", "polydata", "(", ")", "cf", "=", "vtk", ".", "vtkConnectivityFilter", "(", ")", "cf", ".", "SetInputData", "(", "pd", ")", "cf", ".", "SetExtractionModeToAllRegions", "(", ")", "cf", ".", "ColorRegionsOn", "(", ")", "cf", ".", "Update", "(", ")", "cpd", "=", "cf", ".", "GetOutput", "(", ")", "a", "=", "Actor", "(", "cpd", ")", "alist", "=", "[", "]", "for", "t", "in", "range", "(", "max", "(", "a", ".", "scalars", "(", "\"RegionId\"", ")", ")", "-", "1", ")", ":", "if", "t", "==", "maxdepth", ":", "break", "suba", "=", "a", ".", "clone", "(", ")", ".", "threshold", "(", "\"RegionId\"", ",", "t", "-", "0.1", ",", "t", "+", "0.1", ")", "area", "=", "suba", ".", "area", "(", ")", "alist", ".", "append", "(", "[", "suba", ",", "area", "]", ")", "alist", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "alist", ".", "reverse", "(", ")", "blist", "=", "[", "]", "for", "i", ",", "l", "in", "enumerate", "(", "alist", ")", ":", "l", "[", "0", "]", ".", "color", "(", "i", "+", "1", ")", "l", "[", "0", "]", ".", "mapper", ".", "ScalarVisibilityOff", "(", ")", "blist", ".", "append", "(", "l", "[", "0", "]", ")", "return", "blist" ]
Split a mesh by connectivity and order the pieces by increasing area. :param int maxdepth: only consider this number of mesh parts. .. hint:: |splitmesh| |splitmesh.py|_
[ "Split", "a", "mesh", "by", "connectivity", "and", "order", "the", "pieces", "by", "increasing", "area", "." ]
python
train
26.382353
NLeSC/noodles
noodles/patterns/functional_patterns.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/patterns/functional_patterns.py#L43-L54
def filter(pred: Callable, xs: Iterable): """ Applied a predicate to a list returning a :py:class:`PromisedObject` containing the values satisfying the predicate. :param pred: predicate function. :param xs: iterable object. :returns: :py:class:`PromisedObject` """ generator = (x for x in xs if pred(x)) return gather(*generator)
[ "def", "filter", "(", "pred", ":", "Callable", ",", "xs", ":", "Iterable", ")", ":", "generator", "=", "(", "x", "for", "x", "in", "xs", "if", "pred", "(", "x", ")", ")", "return", "gather", "(", "*", "generator", ")" ]
Applied a predicate to a list returning a :py:class:`PromisedObject` containing the values satisfying the predicate. :param pred: predicate function. :param xs: iterable object. :returns: :py:class:`PromisedObject`
[ "Applied", "a", "predicate", "to", "a", "list", "returning", "a", ":", "py", ":", "class", ":", "PromisedObject", "containing", "the", "values", "satisfying", "the", "predicate", "." ]
python
train
29.666667
Azure/msrestazure-for-python
msrestazure/polling/arm_polling.py
https://github.com/Azure/msrestazure-for-python/blob/5f99262305692525d03ca87d2c5356b05c5aa874/msrestazure/polling/arm_polling.py#L368-L386
def initialize(self, client, initial_response, deserialization_callback): """Set the initial status of this LRO. :param initial_response: The initial response of the poller :raises: CloudError if initial status is incorrect LRO state """ self._client = client self._response = initial_response self._operation = LongRunningOperation(initial_response, deserialization_callback, self._lro_options) try: self._operation.set_initial_status(initial_response) except BadStatus: self._operation.status = 'Failed' raise CloudError(initial_response) except BadResponse as err: self._operation.status = 'Failed' raise CloudError(initial_response, str(err)) except OperationFailed: raise CloudError(initial_response)
[ "def", "initialize", "(", "self", ",", "client", ",", "initial_response", ",", "deserialization_callback", ")", ":", "self", ".", "_client", "=", "client", "self", ".", "_response", "=", "initial_response", "self", ".", "_operation", "=", "LongRunningOperation", "(", "initial_response", ",", "deserialization_callback", ",", "self", ".", "_lro_options", ")", "try", ":", "self", ".", "_operation", ".", "set_initial_status", "(", "initial_response", ")", "except", "BadStatus", ":", "self", ".", "_operation", ".", "status", "=", "'Failed'", "raise", "CloudError", "(", "initial_response", ")", "except", "BadResponse", "as", "err", ":", "self", ".", "_operation", ".", "status", "=", "'Failed'", "raise", "CloudError", "(", "initial_response", ",", "str", "(", "err", ")", ")", "except", "OperationFailed", ":", "raise", "CloudError", "(", "initial_response", ")" ]
Set the initial status of this LRO. :param initial_response: The initial response of the poller :raises: CloudError if initial status is incorrect LRO state
[ "Set", "the", "initial", "status", "of", "this", "LRO", "." ]
python
train
44.631579
Qiskit/qiskit-terra
qiskit/dagcircuit/dagcircuit.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/dagcircuit/dagcircuit.py#L476-L493
def _map_condition(self, wire_map, condition): """Use the wire_map dict to change the condition tuple's creg name. Args: wire_map (dict): a map from wires to wires condition (tuple): (ClassicalRegister,int) Returns: tuple(ClassicalRegister,int): new condition """ if condition is None: new_condition = None else: # Map the register name, using fact that registers must not be # fragmented by the wire_map (this must have been checked # elsewhere) bit0 = (condition[0], 0) new_condition = (wire_map.get(bit0, bit0)[0], condition[1]) return new_condition
[ "def", "_map_condition", "(", "self", ",", "wire_map", ",", "condition", ")", ":", "if", "condition", "is", "None", ":", "new_condition", "=", "None", "else", ":", "# Map the register name, using fact that registers must not be", "# fragmented by the wire_map (this must have been checked", "# elsewhere)", "bit0", "=", "(", "condition", "[", "0", "]", ",", "0", ")", "new_condition", "=", "(", "wire_map", ".", "get", "(", "bit0", ",", "bit0", ")", "[", "0", "]", ",", "condition", "[", "1", "]", ")", "return", "new_condition" ]
Use the wire_map dict to change the condition tuple's creg name. Args: wire_map (dict): a map from wires to wires condition (tuple): (ClassicalRegister,int) Returns: tuple(ClassicalRegister,int): new condition
[ "Use", "the", "wire_map", "dict", "to", "change", "the", "condition", "tuple", "s", "creg", "name", "." ]
python
test
38.888889
bcbio/bcbio-nextgen
bcbio/rnaseq/featureCounts.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/featureCounts.py#L88-L100
def _strand_flag(data): """ 0: unstranded 1: stranded 2: reverse stranded """ strand_flag = {"unstranded": "0", "firststrand": "2", "secondstrand": "1"} stranded = dd.get_strandedness(data) assert stranded in strand_flag, ("%s is not a valid strandedness value. " "Valid values are 'firststrand', 'secondstrand', " "and 'unstranded") return strand_flag[stranded]
[ "def", "_strand_flag", "(", "data", ")", ":", "strand_flag", "=", "{", "\"unstranded\"", ":", "\"0\"", ",", "\"firststrand\"", ":", "\"2\"", ",", "\"secondstrand\"", ":", "\"1\"", "}", "stranded", "=", "dd", ".", "get_strandedness", "(", "data", ")", "assert", "stranded", "in", "strand_flag", ",", "(", "\"%s is not a valid strandedness value. \"", "\"Valid values are 'firststrand', 'secondstrand', \"", "\"and 'unstranded\"", ")", "return", "strand_flag", "[", "stranded", "]" ]
0: unstranded 1: stranded 2: reverse stranded
[ "0", ":", "unstranded", "1", ":", "stranded", "2", ":", "reverse", "stranded" ]
python
train
37.769231
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L4726-L4739
def cublasZherk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc): """ Rank-k operation on Hermitian matrix. """ status = _libcublas.cublasZherk_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, ctypes.byref(ctypes.c_double(beta)), int(C), ldc) cublasCheckStatus(status)
[ "def", "cublasZherk", "(", "handle", ",", "uplo", ",", "trans", ",", "n", ",", "k", ",", "alpha", ",", "A", ",", "lda", ",", "beta", ",", "C", ",", "ldc", ")", ":", "status", "=", "_libcublas", ".", "cublasZherk_v2", "(", "handle", ",", "_CUBLAS_FILL_MODE", "[", "uplo", "]", ",", "_CUBLAS_OP", "[", "trans", "]", ",", "n", ",", "k", ",", "ctypes", ".", "byref", "(", "ctypes", ".", "c_double", "(", "alpha", ")", ")", ",", "int", "(", "A", ")", ",", "lda", ",", "ctypes", ".", "byref", "(", "ctypes", ".", "c_double", "(", "beta", ")", ")", ",", "int", "(", "C", ")", ",", "ldc", ")", "cublasCheckStatus", "(", "status", ")" ]
Rank-k operation on Hermitian matrix.
[ "Rank", "-", "k", "operation", "on", "Hermitian", "matrix", "." ]
python
train
41.928571
mailund/statusbar
statusbar/__init__.py
https://github.com/mailund/statusbar/blob/e42ac88cdaae281d47318dd8dcf156bfff2a7b2a/statusbar/__init__.py#L132-L143
def add_progress(self, count, symbol='#', color=None, on_color=None, attrs=None): """Add a section of progress to the progressbar. The progress is captured by "count" and displayed as a fraction of the statusbar width proportional to this count over the total progress displayed. The progress will be displayed using the "symbol" character and the foreground and background colours and display style determined by the the "fg", "bg" and "style" parameters. For these, use the colorama package to set up the formatting. """ self._progress.add_progress(count, symbol, color, on_color, attrs)
[ "def", "add_progress", "(", "self", ",", "count", ",", "symbol", "=", "'#'", ",", "color", "=", "None", ",", "on_color", "=", "None", ",", "attrs", "=", "None", ")", ":", "self", ".", "_progress", ".", "add_progress", "(", "count", ",", "symbol", ",", "color", ",", "on_color", ",", "attrs", ")" ]
Add a section of progress to the progressbar. The progress is captured by "count" and displayed as a fraction of the statusbar width proportional to this count over the total progress displayed. The progress will be displayed using the "symbol" character and the foreground and background colours and display style determined by the the "fg", "bg" and "style" parameters. For these, use the colorama package to set up the formatting.
[ "Add", "a", "section", "of", "progress", "to", "the", "progressbar", "." ]
python
train
56
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L1432-L1446
def simxPackInts(intList): ''' Please have a look at the function description/documentation in the V-REP user manual ''' if sys.version_info[0] == 3: s=bytes() for i in range(len(intList)): s=s+struct.pack('<i',intList[i]) s=bytearray(s) else: s='' for i in range(len(intList)): s+=struct.pack('<i',intList[i]) return s
[ "def", "simxPackInts", "(", "intList", ")", ":", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", ":", "s", "=", "bytes", "(", ")", "for", "i", "in", "range", "(", "len", "(", "intList", ")", ")", ":", "s", "=", "s", "+", "struct", ".", "pack", "(", "'<i'", ",", "intList", "[", "i", "]", ")", "s", "=", "bytearray", "(", "s", ")", "else", ":", "s", "=", "''", "for", "i", "in", "range", "(", "len", "(", "intList", ")", ")", ":", "s", "+=", "struct", ".", "pack", "(", "'<i'", ",", "intList", "[", "i", "]", ")", "return", "s" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
26.533333
gwastro/pycbc
pycbc/filter/matchedfilter.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/matchedfilter.py#L1122-L1169
def get_cutoff_indices(flow, fhigh, df, N): """ Gets the indices of a frequency series at which to stop an overlap calculation. Parameters ---------- flow: float The frequency (in Hz) of the lower index. fhigh: float The frequency (in Hz) of the upper index. df: float The frequency step (in Hz) of the frequency series. N: int The number of points in the **time** series. Can be odd or even. Returns ------- kmin: int kmax: int """ if flow: kmin = int(flow / df) if kmin < 0: err_msg = "Start frequency cannot be negative. " err_msg += "Supplied value and kmin {} and {}".format(flow, kmin) raise ValueError(err_msg) else: kmin = 1 if fhigh: kmax = int(fhigh / df ) if kmax > int((N + 1)/2.): kmax = int((N + 1)/2.) else: # int() truncates towards 0, so this is # equivalent to the floor of the float kmax = int((N + 1)/2.) if kmax <= kmin: err_msg = "Kmax cannot be less than or equal to kmin. " err_msg += "Provided values of freqencies (min,max) were " err_msg += "{} and {} ".format(flow, fhigh) err_msg += "corresponding to (kmin, kmax) of " err_msg += "{} and {}.".format(kmin, kmax) raise ValueError(err_msg) return kmin,kmax
[ "def", "get_cutoff_indices", "(", "flow", ",", "fhigh", ",", "df", ",", "N", ")", ":", "if", "flow", ":", "kmin", "=", "int", "(", "flow", "/", "df", ")", "if", "kmin", "<", "0", ":", "err_msg", "=", "\"Start frequency cannot be negative. \"", "err_msg", "+=", "\"Supplied value and kmin {} and {}\"", ".", "format", "(", "flow", ",", "kmin", ")", "raise", "ValueError", "(", "err_msg", ")", "else", ":", "kmin", "=", "1", "if", "fhigh", ":", "kmax", "=", "int", "(", "fhigh", "/", "df", ")", "if", "kmax", ">", "int", "(", "(", "N", "+", "1", ")", "/", "2.", ")", ":", "kmax", "=", "int", "(", "(", "N", "+", "1", ")", "/", "2.", ")", "else", ":", "# int() truncates towards 0, so this is", "# equivalent to the floor of the float", "kmax", "=", "int", "(", "(", "N", "+", "1", ")", "/", "2.", ")", "if", "kmax", "<=", "kmin", ":", "err_msg", "=", "\"Kmax cannot be less than or equal to kmin. \"", "err_msg", "+=", "\"Provided values of freqencies (min,max) were \"", "err_msg", "+=", "\"{} and {} \"", ".", "format", "(", "flow", ",", "fhigh", ")", "err_msg", "+=", "\"corresponding to (kmin, kmax) of \"", "err_msg", "+=", "\"{} and {}.\"", ".", "format", "(", "kmin", ",", "kmax", ")", "raise", "ValueError", "(", "err_msg", ")", "return", "kmin", ",", "kmax" ]
Gets the indices of a frequency series at which to stop an overlap calculation. Parameters ---------- flow: float The frequency (in Hz) of the lower index. fhigh: float The frequency (in Hz) of the upper index. df: float The frequency step (in Hz) of the frequency series. N: int The number of points in the **time** series. Can be odd or even. Returns ------- kmin: int kmax: int
[ "Gets", "the", "indices", "of", "a", "frequency", "series", "at", "which", "to", "stop", "an", "overlap", "calculation", "." ]
python
train
28.541667
quantopian/pgcontents
pgcontents/query.py
https://github.com/quantopian/pgcontents/blob/ed36268b7917332d16868208e1e565742a8753e1/pgcontents/query.py#L454-L508
def rename_directory(db, user_id, old_api_path, new_api_path): """ Rename a directory. """ old_db_path = from_api_dirname(old_api_path) new_db_path = from_api_dirname(new_api_path) if old_db_path == '/': raise RenameRoot('Renaming the root directory is not permitted.') # Overwriting existing directories is disallowed. if _dir_exists(db, user_id, new_db_path): raise DirectoryExists(new_api_path) # Set this foreign key constraint to deferred so it's not violated # when we run the first statement to update the name of the directory. db.execute('SET CONSTRAINTS ' 'pgcontents.directories_parent_user_id_fkey DEFERRED') # Update name column for the directory that's being renamed db.execute( directories.update().where( and_( directories.c.user_id == user_id, directories.c.name == old_db_path, ) ).values( name=new_db_path, ) ) # Update the name and parent_name of any descendant directories. Do # this in a single statement so the non-deferrable check constraint # is satisfied. db.execute( directories.update().where( and_( directories.c.user_id == user_id, directories.c.name.startswith(old_db_path), directories.c.parent_name.startswith(old_db_path), ) ).values( name=func.concat( new_db_path, func.right(directories.c.name, -func.length(old_db_path)) ), parent_name=func.concat( new_db_path, func.right( directories.c.parent_name, -func.length(old_db_path) ) ), ) )
[ "def", "rename_directory", "(", "db", ",", "user_id", ",", "old_api_path", ",", "new_api_path", ")", ":", "old_db_path", "=", "from_api_dirname", "(", "old_api_path", ")", "new_db_path", "=", "from_api_dirname", "(", "new_api_path", ")", "if", "old_db_path", "==", "'/'", ":", "raise", "RenameRoot", "(", "'Renaming the root directory is not permitted.'", ")", "# Overwriting existing directories is disallowed.", "if", "_dir_exists", "(", "db", ",", "user_id", ",", "new_db_path", ")", ":", "raise", "DirectoryExists", "(", "new_api_path", ")", "# Set this foreign key constraint to deferred so it's not violated", "# when we run the first statement to update the name of the directory.", "db", ".", "execute", "(", "'SET CONSTRAINTS '", "'pgcontents.directories_parent_user_id_fkey DEFERRED'", ")", "# Update name column for the directory that's being renamed", "db", ".", "execute", "(", "directories", ".", "update", "(", ")", ".", "where", "(", "and_", "(", "directories", ".", "c", ".", "user_id", "==", "user_id", ",", "directories", ".", "c", ".", "name", "==", "old_db_path", ",", ")", ")", ".", "values", "(", "name", "=", "new_db_path", ",", ")", ")", "# Update the name and parent_name of any descendant directories. Do", "# this in a single statement so the non-deferrable check constraint", "# is satisfied.", "db", ".", "execute", "(", "directories", ".", "update", "(", ")", ".", "where", "(", "and_", "(", "directories", ".", "c", ".", "user_id", "==", "user_id", ",", "directories", ".", "c", ".", "name", ".", "startswith", "(", "old_db_path", ")", ",", "directories", ".", "c", ".", "parent_name", ".", "startswith", "(", "old_db_path", ")", ",", ")", ")", ".", "values", "(", "name", "=", "func", ".", "concat", "(", "new_db_path", ",", "func", ".", "right", "(", "directories", ".", "c", ".", "name", ",", "-", "func", ".", "length", "(", "old_db_path", ")", ")", ")", ",", "parent_name", "=", "func", ".", "concat", "(", "new_db_path", ",", "func", ".", "right", "(", "directories", ".", "c", ".", "parent_name", ",", "-", "func", ".", "length", "(", "old_db_path", ")", ")", ")", ",", ")", ")" ]
Rename a directory.
[ "Rename", "a", "directory", "." ]
python
test
32.490909
Basic-Components/msgpack-rpc-protocol
python/pymprpc/mixins/encoder_decoder_mixin.py
https://github.com/Basic-Components/msgpack-rpc-protocol/blob/7983ace5d5cfd7214df6803f9b1de458df5fe3b1/python/pymprpc/mixins/encoder_decoder_mixin.py#L66-L91
def decoder(self, response: bytes): """编码请求为bytes. 检查是否使用debug模式和是否对数据进行压缩.之后根据状态将python字典形式的请求编码为字节串. Parameters: response (bytes): - 响应的字节串编码 Return: (Dict[str, Any]): - python字典形式的响应 """ response = response[:-(len(self.SEPARATOR))] if self.compreser is not None: response = self.compreser.decompress(response) if self.debug is True: response = json.loads(response.decode('utf-8')) else: response = msgpack.unpackb(response, encoding='utf-8') version = response.get("MPRPC") if version and version == self.VERSION: return response else: raise ProtocolException("Wrong Protocol")
[ "def", "decoder", "(", "self", ",", "response", ":", "bytes", ")", ":", "response", "=", "response", "[", ":", "-", "(", "len", "(", "self", ".", "SEPARATOR", ")", ")", "]", "if", "self", ".", "compreser", "is", "not", "None", ":", "response", "=", "self", ".", "compreser", ".", "decompress", "(", "response", ")", "if", "self", ".", "debug", "is", "True", ":", "response", "=", "json", ".", "loads", "(", "response", ".", "decode", "(", "'utf-8'", ")", ")", "else", ":", "response", "=", "msgpack", ".", "unpackb", "(", "response", ",", "encoding", "=", "'utf-8'", ")", "version", "=", "response", ".", "get", "(", "\"MPRPC\"", ")", "if", "version", "and", "version", "==", "self", ".", "VERSION", ":", "return", "response", "else", ":", "raise", "ProtocolException", "(", "\"Wrong Protocol\"", ")" ]
编码请求为bytes. 检查是否使用debug模式和是否对数据进行压缩.之后根据状态将python字典形式的请求编码为字节串. Parameters: response (bytes): - 响应的字节串编码 Return: (Dict[str, Any]): - python字典形式的响应
[ "编码请求为bytes", "." ]
python
train
28.5
david-caro/python-autosemver
autosemver/__init__.py
https://github.com/david-caro/python-autosemver/blob/3bc0adb70c33e4bd3623ae4c1944d5ee37f4303d/autosemver/__init__.py#L140-L144
def distutils_old_autosemver_case(metadata, attr, value): """DEPRECATED""" metadata = distutils_default_case(metadata, attr, value) create_changelog(bugtracker_url=getattr(metadata, 'bugtracker_url', '')) return metadata
[ "def", "distutils_old_autosemver_case", "(", "metadata", ",", "attr", ",", "value", ")", ":", "metadata", "=", "distutils_default_case", "(", "metadata", ",", "attr", ",", "value", ")", "create_changelog", "(", "bugtracker_url", "=", "getattr", "(", "metadata", ",", "'bugtracker_url'", ",", "''", ")", ")", "return", "metadata" ]
DEPRECATED
[ "DEPRECATED" ]
python
train
46.4
spacetelescope/synphot_refactor
synphot/units.py
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L143-L225
def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs): """Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed. """ if not isinstance(fluxes, u.Quantity): fluxes = fluxes * PHOTLAM out_flux_unit = validate_unit(out_flux_unit) out_flux_unit_name = out_flux_unit.to_string() in_flux_unit_name = fluxes.unit.to_string() # No conversion necessary if in_flux_unit_name == out_flux_unit_name: return fluxes in_flux_type = fluxes.unit.physical_type out_flux_type = out_flux_unit.physical_type # Wavelengths must Quantity if not isinstance(wavelengths, u.Quantity): wavelengths = wavelengths * u.AA eqv = u.spectral_density(wavelengths) # Use built-in astropy equivalencies try: out_flux = fluxes.to(out_flux_unit, eqv) # Use PHOTLAM as in-between unit except u.UnitConversionError: # Convert input unit to PHOTLAM if fluxes.unit == PHOTLAM: flux_photlam = fluxes elif in_flux_type != 'unknown': flux_photlam = fluxes.to(PHOTLAM, eqv) else: flux_photlam = _convert_flux( wavelengths, fluxes, PHOTLAM, **kwargs) # Convert PHOTLAM to output unit if out_flux_unit == PHOTLAM: out_flux = flux_photlam elif out_flux_type != 'unknown': out_flux = flux_photlam.to(out_flux_unit, eqv) else: out_flux = _convert_flux( wavelengths, flux_photlam, out_flux_unit, **kwargs) return out_flux
[ "def", "convert_flux", "(", "wavelengths", ",", "fluxes", ",", "out_flux_unit", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "fluxes", ",", "u", ".", "Quantity", ")", ":", "fluxes", "=", "fluxes", "*", "PHOTLAM", "out_flux_unit", "=", "validate_unit", "(", "out_flux_unit", ")", "out_flux_unit_name", "=", "out_flux_unit", ".", "to_string", "(", ")", "in_flux_unit_name", "=", "fluxes", ".", "unit", ".", "to_string", "(", ")", "# No conversion necessary", "if", "in_flux_unit_name", "==", "out_flux_unit_name", ":", "return", "fluxes", "in_flux_type", "=", "fluxes", ".", "unit", ".", "physical_type", "out_flux_type", "=", "out_flux_unit", ".", "physical_type", "# Wavelengths must Quantity", "if", "not", "isinstance", "(", "wavelengths", ",", "u", ".", "Quantity", ")", ":", "wavelengths", "=", "wavelengths", "*", "u", ".", "AA", "eqv", "=", "u", ".", "spectral_density", "(", "wavelengths", ")", "# Use built-in astropy equivalencies", "try", ":", "out_flux", "=", "fluxes", ".", "to", "(", "out_flux_unit", ",", "eqv", ")", "# Use PHOTLAM as in-between unit", "except", "u", ".", "UnitConversionError", ":", "# Convert input unit to PHOTLAM", "if", "fluxes", ".", "unit", "==", "PHOTLAM", ":", "flux_photlam", "=", "fluxes", "elif", "in_flux_type", "!=", "'unknown'", ":", "flux_photlam", "=", "fluxes", ".", "to", "(", "PHOTLAM", ",", "eqv", ")", "else", ":", "flux_photlam", "=", "_convert_flux", "(", "wavelengths", ",", "fluxes", ",", "PHOTLAM", ",", "*", "*", "kwargs", ")", "# Convert PHOTLAM to output unit", "if", "out_flux_unit", "==", "PHOTLAM", ":", "out_flux", "=", "flux_photlam", "elif", "out_flux_type", "!=", "'unknown'", ":", "out_flux", "=", "flux_photlam", ".", "to", "(", "out_flux_unit", ",", "eqv", ")", "else", ":", "out_flux", "=", "_convert_flux", "(", "wavelengths", ",", "flux_photlam", ",", "out_flux_unit", ",", "*", "*", "kwargs", ")", "return", "out_flux" ]
Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed.
[ "Perform", "conversion", "for", ":", "ref", ":", "supported", "flux", "units", "<synphot", "-", "flux", "-", "units", ">", "." ]
python
train
31.542169
ransford/sllurp
sllurp/log.py
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/log.py#L9-L24
def init_logging(debug=False, logfile=None): """Initialize logging.""" loglevel = logging.DEBUG if debug else logging.INFO logformat = '%(asctime)s %(name)s: %(levelname)s: %(message)s' formatter = logging.Formatter(logformat) stderr = logging.StreamHandler() stderr.setFormatter(formatter) root = logging.getLogger() root.setLevel(loglevel) root.handlers = [stderr] if logfile: fhandler = logging.FileHandler(logfile) fhandler.setFormatter(formatter) root.addHandler(fhandler)
[ "def", "init_logging", "(", "debug", "=", "False", ",", "logfile", "=", "None", ")", ":", "loglevel", "=", "logging", ".", "DEBUG", "if", "debug", "else", "logging", ".", "INFO", "logformat", "=", "'%(asctime)s %(name)s: %(levelname)s: %(message)s'", "formatter", "=", "logging", ".", "Formatter", "(", "logformat", ")", "stderr", "=", "logging", ".", "StreamHandler", "(", ")", "stderr", ".", "setFormatter", "(", "formatter", ")", "root", "=", "logging", ".", "getLogger", "(", ")", "root", ".", "setLevel", "(", "loglevel", ")", "root", ".", "handlers", "=", "[", "stderr", "]", "if", "logfile", ":", "fhandler", "=", "logging", ".", "FileHandler", "(", "logfile", ")", "fhandler", ".", "setFormatter", "(", "formatter", ")", "root", ".", "addHandler", "(", "fhandler", ")" ]
Initialize logging.
[ "Initialize", "logging", "." ]
python
train
33
SBRG/ssbio
ssbio/utils.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L26-L46
def is_ipynb(): """Return True if the module is running in IPython kernel, False if in IPython shell or other Python shell. Copied from: http://stackoverflow.com/a/37661854/1592810 There are other methods there too >>> is_ipynb() False """ try: shell = get_ipython().__class__.__name__ if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole? return True elif shell == 'TerminalInteractiveShell': # Terminal running IPython? return False else: return False # Other type (?) except NameError: return False
[ "def", "is_ipynb", "(", ")", ":", "try", ":", "shell", "=", "get_ipython", "(", ")", ".", "__class__", ".", "__name__", "if", "shell", "==", "'ZMQInteractiveShell'", ":", "# Jupyter notebook or qtconsole?", "return", "True", "elif", "shell", "==", "'TerminalInteractiveShell'", ":", "# Terminal running IPython?", "return", "False", "else", ":", "return", "False", "# Other type (?)", "except", "NameError", ":", "return", "False" ]
Return True if the module is running in IPython kernel, False if in IPython shell or other Python shell. Copied from: http://stackoverflow.com/a/37661854/1592810 There are other methods there too >>> is_ipynb() False
[ "Return", "True", "if", "the", "module", "is", "running", "in", "IPython", "kernel", "False", "if", "in", "IPython", "shell", "or", "other", "Python", "shell", "." ]
python
train
29.190476
django-danceschool/django-danceschool
danceschool/financial/helpers.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/financial/helpers.py#L280-L508
def createExpenseItemsForEvents(request=None, datetimeTuple=None, rule=None, event=None): ''' For each StaffMember-related Repeated Expense Rule, look for EventStaffMember instances in the designated time window that do not already have expenses associated with them. For hourly rental expenses, then generate new expenses that are associated with this rule. For non-hourly expenses, generate new expenses based on the non-overlapping intervals of days, weeks or months for which there is not already an ExpenseItem associated with the rule in question. ''' # This is used repeatedly, so it is put at the top submissionUser = getattr(request, 'user', None) # Return the number of new expense items created generate_count = 0 # First, construct the set of rules that need to be checked for affiliated events rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \ Q(Q(staffmemberwageinfo__isnull=False) | Q(staffdefaultwage__isnull=False)) if rule: rule_filters = rule_filters & Q(id=rule.id) rulesToCheck = RepeatedExpenseRule.objects.filter( rule_filters).distinct().order_by( '-staffmemberwageinfo__category', '-staffdefaultwage__category' ) # These are the filters placed on Events that overlap the window in which # expenses are being generated. event_timefilters = Q() if datetimeTuple and len(datetimeTuple) == 2: timelist = list(datetimeTuple) timelist.sort() event_timefilters = event_timefilters & ( Q(event__startTime__gte=timelist[0]) & Q(event__startTime__lte=timelist[1]) ) if event: event_timefilters = event_timefilters & Q(event__id=event.id) # Now, we loop through the set of rules that need to be applied, then loop # through the Events in the window in question that involved the staff # member indicated by the rule. for rule in rulesToCheck: staffMember = getattr(rule, 'staffMember', None) staffCategory = getattr(rule, 'category', None) # No need to continue if expenses are not to be generated if ( (not staffMember and not staffCategory) or ( not staffMember and not getConstant('financial__autoGenerateFromStaffCategoryDefaults') ) ): continue # For construction of expense descriptions replacements = { 'type': _('Staff'), 'to': _('payment to'), 'for': _('for'), } # This is the generic category for all Event staff, but it may be overridden below expense_category = getConstant('financial__otherStaffExpenseCat') if staffCategory: if staffMember: # This staff member in this category eventstaff_filter = Q(staffMember=staffMember) & Q(category=staffCategory) elif getConstant('financial__autoGenerateFromStaffCategoryDefaults'): # Any staff member who does not already have a rule specified this category eventstaff_filter = ( Q(category=staffCategory) & ~Q(staffMember__expenserules__category=staffCategory) ) replacements['type'] = staffCategory.name # For standard categories of staff, map the EventStaffCategory to # an ExpenseCategory using the stored constants. Otherwise, the # ExpenseCategory is a generic one. if staffCategory == getConstant('general__eventStaffCategoryAssistant'): expense_category = getConstant('financial__assistantClassInstructionExpenseCat') elif staffCategory in [ getConstant('general__eventStaffCategoryInstructor'), getConstant('general__eventStaffCategorySubstitute') ]: expense_category = getConstant('financial__classInstructionExpenseCat') else: # We don't want to generate duplicate expenses when there is both a category-limited # rule and a non-limited rule for the same person, so we have to construct the list # of categories that are to be excluded if no category is specified by this rule. coveredCategories = list(staffMember.expenserules.filter( category__isnull=False).values_list('category__id', flat=True)) eventstaff_filter = Q(staffMember=staffMember) & ~Q(category__id__in=coveredCategories) if rule.advanceDays is not None: if rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.end: event_timefilters = event_timefilters & Q( event__endTime__lte=timezone.now() + timedelta(days=rule.advanceDays) ) elif rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.start: event_timefilters = event_timefilters & Q( event__startTime__lte=timezone.now() + timedelta(days=rule.advanceDays) ) if rule.priorDays is not None: if rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.end: event_timefilters = event_timefilters & Q( event__endTime__gte=timezone.now() - timedelta(days=rule.priorDays) ) elif rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.start: event_timefilters = event_timefilters & Q( event__startTime__gte=timezone.now() - timedelta(days=rule.priorDays) ) if rule.startDate: event_timefilters = event_timefilters & Q(event__startTime__gte=timezone.now().replace( year=rule.startDate.year, month=rule.startDate.month, day=rule.startDate.day, hour=0, minute=0, second=0, microsecond=0, )) if rule.endDate: event_timefilters = event_timefilters & Q(event__startTime__lte=timezone.now().replace( year=rule.endDate.year, month=rule.endDate.month, day=rule.endDate.day, hour=0, minute=0, second=0, microsecond=0, )) # Loop through EventStaffMembers for which there are not already # directly allocated expenses under this rule, and create new # ExpenseItems for them depending on whether the rule requires hourly # expenses or non-hourly ones to be generated. staffers = EventStaffMember.objects.filter(eventstaff_filter & event_timefilters).exclude( Q(event__expenseitem__expenseRule=rule)).distinct() if rule.applyRateRule == rule.RateRuleChoices.hourly: for staffer in staffers: # Hourly expenses are always generated without checking for # overlapping windows, because the periods over which hourly # expenses are defined are disjoint. However, hourly expenses # are allocated directly to events, so we just need to create # expenses for any events that do not already have an Expense # Item generate under this rule. replacements['event'] = staffer.event.name replacements['name'] = staffer.staffMember.fullName replacements['dates'] = staffer.event.startTime.strftime('%Y-%m-%d') if ( staffer.event.startTime.strftime('%Y-%m-%d') != staffer.event.endTime.strftime('%Y-%m-%d') ): replacements['dates'] += ' %s %s' % ( _('to'), staffer.event.endTime.strftime('%Y-%m-%d') ) # Find or create the TransactionParty associated with the staff member. staffer_party = TransactionParty.objects.get_or_create( staffMember=staffer.staffMember, defaults={ 'name': staffer.staffMember.fullName, 'user': getattr(staffer.staffMember, 'userAccount', None) } )[0] params = { 'event': staffer.event, 'category': expense_category, 'expenseRule': rule, 'description': '%(type)s %(to)s %(name)s %(for)s: %(event)s, %(dates)s' % \ replacements, 'submissionUser': submissionUser, 'hours': staffer.netHours, 'wageRate': rule.rentalRate, 'total': staffer.netHours * rule.rentalRate, 'accrualDate': staffer.event.startTime, 'payTo': staffer_party, } ExpenseItem.objects.create(**params) generate_count += 1 else: # Non-hourly expenses are generated by constructing the time # intervals in which the occurrence occurs, and removing from that # interval any intervals in which an expense has already been # generated under this rule (so, for example, monthly rentals will # now show up multiple times). So, we just need to construct the set # of intervals for which to construct expenses. We first need to # split the set of EventStaffMember objects by StaffMember (in case # this rule is not person-specific) and then run this provedure # separated by StaffMember. members = StaffMember.objects.filter(eventstaffmember__in=staffers) for member in members: events = [x.event for x in staffers.filter(staffMember=member)] # Find or create the TransactionParty associated with the staff member. staffer_party = TransactionParty.objects.get_or_create( staffMember=member, defaults={ 'name': member.fullName, 'user': getattr(member, 'userAccount', None) } )[0] intervals = [ (x.localStartTime, x.localEndTime) for x in EventOccurrence.objects.filter(event__in=events) ] remaining_intervals = rule.getWindowsAndTotals(intervals) for startTime, endTime, total, description in remaining_intervals: replacements['when'] = description replacements['name'] = member.fullName params = { 'category': expense_category, 'expenseRule': rule, 'periodStart': startTime, 'periodEnd': endTime, 'description': '%(type)s %(to)s %(name)s %(for)s %(when)s' % replacements, 'submissionUser': submissionUser, 'total': total, 'accrualDate': startTime, 'payTo': staffer_party, } ExpenseItem.objects.create(**params) generate_count += 1 rulesToCheck.update(lastRun=timezone.now()) return generate_count
[ "def", "createExpenseItemsForEvents", "(", "request", "=", "None", ",", "datetimeTuple", "=", "None", ",", "rule", "=", "None", ",", "event", "=", "None", ")", ":", "# This is used repeatedly, so it is put at the top\r", "submissionUser", "=", "getattr", "(", "request", ",", "'user'", ",", "None", ")", "# Return the number of new expense items created\r", "generate_count", "=", "0", "# First, construct the set of rules that need to be checked for affiliated events\r", "rule_filters", "=", "Q", "(", "disabled", "=", "False", ")", "&", "Q", "(", "rentalRate__gt", "=", "0", ")", "&", "Q", "(", "Q", "(", "staffmemberwageinfo__isnull", "=", "False", ")", "|", "Q", "(", "staffdefaultwage__isnull", "=", "False", ")", ")", "if", "rule", ":", "rule_filters", "=", "rule_filters", "&", "Q", "(", "id", "=", "rule", ".", "id", ")", "rulesToCheck", "=", "RepeatedExpenseRule", ".", "objects", ".", "filter", "(", "rule_filters", ")", ".", "distinct", "(", ")", ".", "order_by", "(", "'-staffmemberwageinfo__category'", ",", "'-staffdefaultwage__category'", ")", "# These are the filters placed on Events that overlap the window in which\r", "# expenses are being generated.\r", "event_timefilters", "=", "Q", "(", ")", "if", "datetimeTuple", "and", "len", "(", "datetimeTuple", ")", "==", "2", ":", "timelist", "=", "list", "(", "datetimeTuple", ")", "timelist", ".", "sort", "(", ")", "event_timefilters", "=", "event_timefilters", "&", "(", "Q", "(", "event__startTime__gte", "=", "timelist", "[", "0", "]", ")", "&", "Q", "(", "event__startTime__lte", "=", "timelist", "[", "1", "]", ")", ")", "if", "event", ":", "event_timefilters", "=", "event_timefilters", "&", "Q", "(", "event__id", "=", "event", ".", "id", ")", "# Now, we loop through the set of rules that need to be applied, then loop\r", "# through the Events in the window in question that involved the staff\r", "# member indicated by the rule.\r", "for", "rule", "in", "rulesToCheck", ":", "staffMember", "=", "getattr", "(", "rule", ",", "'staffMember'", ",", "None", ")", "staffCategory", "=", "getattr", "(", "rule", ",", "'category'", ",", "None", ")", "# No need to continue if expenses are not to be generated\r", "if", "(", "(", "not", "staffMember", "and", "not", "staffCategory", ")", "or", "(", "not", "staffMember", "and", "not", "getConstant", "(", "'financial__autoGenerateFromStaffCategoryDefaults'", ")", ")", ")", ":", "continue", "# For construction of expense descriptions\r", "replacements", "=", "{", "'type'", ":", "_", "(", "'Staff'", ")", ",", "'to'", ":", "_", "(", "'payment to'", ")", ",", "'for'", ":", "_", "(", "'for'", ")", ",", "}", "# This is the generic category for all Event staff, but it may be overridden below\r", "expense_category", "=", "getConstant", "(", "'financial__otherStaffExpenseCat'", ")", "if", "staffCategory", ":", "if", "staffMember", ":", "# This staff member in this category\r", "eventstaff_filter", "=", "Q", "(", "staffMember", "=", "staffMember", ")", "&", "Q", "(", "category", "=", "staffCategory", ")", "elif", "getConstant", "(", "'financial__autoGenerateFromStaffCategoryDefaults'", ")", ":", "# Any staff member who does not already have a rule specified this category\r", "eventstaff_filter", "=", "(", "Q", "(", "category", "=", "staffCategory", ")", "&", "~", "Q", "(", "staffMember__expenserules__category", "=", "staffCategory", ")", ")", "replacements", "[", "'type'", "]", "=", "staffCategory", ".", "name", "# For standard categories of staff, map the EventStaffCategory to\r", "# an ExpenseCategory using the stored constants. Otherwise, the\r", "# ExpenseCategory is a generic one.\r", "if", "staffCategory", "==", "getConstant", "(", "'general__eventStaffCategoryAssistant'", ")", ":", "expense_category", "=", "getConstant", "(", "'financial__assistantClassInstructionExpenseCat'", ")", "elif", "staffCategory", "in", "[", "getConstant", "(", "'general__eventStaffCategoryInstructor'", ")", ",", "getConstant", "(", "'general__eventStaffCategorySubstitute'", ")", "]", ":", "expense_category", "=", "getConstant", "(", "'financial__classInstructionExpenseCat'", ")", "else", ":", "# We don't want to generate duplicate expenses when there is both a category-limited\r", "# rule and a non-limited rule for the same person, so we have to construct the list\r", "# of categories that are to be excluded if no category is specified by this rule.\r", "coveredCategories", "=", "list", "(", "staffMember", ".", "expenserules", ".", "filter", "(", "category__isnull", "=", "False", ")", ".", "values_list", "(", "'category__id'", ",", "flat", "=", "True", ")", ")", "eventstaff_filter", "=", "Q", "(", "staffMember", "=", "staffMember", ")", "&", "~", "Q", "(", "category__id__in", "=", "coveredCategories", ")", "if", "rule", ".", "advanceDays", "is", "not", "None", ":", "if", "rule", ".", "advanceDaysReference", "==", "RepeatedExpenseRule", ".", "MilestoneChoices", ".", "end", ":", "event_timefilters", "=", "event_timefilters", "&", "Q", "(", "event__endTime__lte", "=", "timezone", ".", "now", "(", ")", "+", "timedelta", "(", "days", "=", "rule", ".", "advanceDays", ")", ")", "elif", "rule", ".", "advanceDaysReference", "==", "RepeatedExpenseRule", ".", "MilestoneChoices", ".", "start", ":", "event_timefilters", "=", "event_timefilters", "&", "Q", "(", "event__startTime__lte", "=", "timezone", ".", "now", "(", ")", "+", "timedelta", "(", "days", "=", "rule", ".", "advanceDays", ")", ")", "if", "rule", ".", "priorDays", "is", "not", "None", ":", "if", "rule", ".", "priorDaysReference", "==", "RepeatedExpenseRule", ".", "MilestoneChoices", ".", "end", ":", "event_timefilters", "=", "event_timefilters", "&", "Q", "(", "event__endTime__gte", "=", "timezone", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "rule", ".", "priorDays", ")", ")", "elif", "rule", ".", "priorDaysReference", "==", "RepeatedExpenseRule", ".", "MilestoneChoices", ".", "start", ":", "event_timefilters", "=", "event_timefilters", "&", "Q", "(", "event__startTime__gte", "=", "timezone", ".", "now", "(", ")", "-", "timedelta", "(", "days", "=", "rule", ".", "priorDays", ")", ")", "if", "rule", ".", "startDate", ":", "event_timefilters", "=", "event_timefilters", "&", "Q", "(", "event__startTime__gte", "=", "timezone", ".", "now", "(", ")", ".", "replace", "(", "year", "=", "rule", ".", "startDate", ".", "year", ",", "month", "=", "rule", ".", "startDate", ".", "month", ",", "day", "=", "rule", ".", "startDate", ".", "day", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ",", ")", ")", "if", "rule", ".", "endDate", ":", "event_timefilters", "=", "event_timefilters", "&", "Q", "(", "event__startTime__lte", "=", "timezone", ".", "now", "(", ")", ".", "replace", "(", "year", "=", "rule", ".", "endDate", ".", "year", ",", "month", "=", "rule", ".", "endDate", ".", "month", ",", "day", "=", "rule", ".", "endDate", ".", "day", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ",", ")", ")", "# Loop through EventStaffMembers for which there are not already\r", "# directly allocated expenses under this rule, and create new\r", "# ExpenseItems for them depending on whether the rule requires hourly\r", "# expenses or non-hourly ones to be generated.\r", "staffers", "=", "EventStaffMember", ".", "objects", ".", "filter", "(", "eventstaff_filter", "&", "event_timefilters", ")", ".", "exclude", "(", "Q", "(", "event__expenseitem__expenseRule", "=", "rule", ")", ")", ".", "distinct", "(", ")", "if", "rule", ".", "applyRateRule", "==", "rule", ".", "RateRuleChoices", ".", "hourly", ":", "for", "staffer", "in", "staffers", ":", "# Hourly expenses are always generated without checking for\r", "# overlapping windows, because the periods over which hourly\r", "# expenses are defined are disjoint. However, hourly expenses\r", "# are allocated directly to events, so we just need to create\r", "# expenses for any events that do not already have an Expense\r", "# Item generate under this rule.\r", "replacements", "[", "'event'", "]", "=", "staffer", ".", "event", ".", "name", "replacements", "[", "'name'", "]", "=", "staffer", ".", "staffMember", ".", "fullName", "replacements", "[", "'dates'", "]", "=", "staffer", ".", "event", ".", "startTime", ".", "strftime", "(", "'%Y-%m-%d'", ")", "if", "(", "staffer", ".", "event", ".", "startTime", ".", "strftime", "(", "'%Y-%m-%d'", ")", "!=", "staffer", ".", "event", ".", "endTime", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", ":", "replacements", "[", "'dates'", "]", "+=", "' %s %s'", "%", "(", "_", "(", "'to'", ")", ",", "staffer", ".", "event", ".", "endTime", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", "# Find or create the TransactionParty associated with the staff member.\r", "staffer_party", "=", "TransactionParty", ".", "objects", ".", "get_or_create", "(", "staffMember", "=", "staffer", ".", "staffMember", ",", "defaults", "=", "{", "'name'", ":", "staffer", ".", "staffMember", ".", "fullName", ",", "'user'", ":", "getattr", "(", "staffer", ".", "staffMember", ",", "'userAccount'", ",", "None", ")", "}", ")", "[", "0", "]", "params", "=", "{", "'event'", ":", "staffer", ".", "event", ",", "'category'", ":", "expense_category", ",", "'expenseRule'", ":", "rule", ",", "'description'", ":", "'%(type)s %(to)s %(name)s %(for)s: %(event)s, %(dates)s'", "%", "replacements", ",", "'submissionUser'", ":", "submissionUser", ",", "'hours'", ":", "staffer", ".", "netHours", ",", "'wageRate'", ":", "rule", ".", "rentalRate", ",", "'total'", ":", "staffer", ".", "netHours", "*", "rule", ".", "rentalRate", ",", "'accrualDate'", ":", "staffer", ".", "event", ".", "startTime", ",", "'payTo'", ":", "staffer_party", ",", "}", "ExpenseItem", ".", "objects", ".", "create", "(", "*", "*", "params", ")", "generate_count", "+=", "1", "else", ":", "# Non-hourly expenses are generated by constructing the time\r", "# intervals in which the occurrence occurs, and removing from that\r", "# interval any intervals in which an expense has already been\r", "# generated under this rule (so, for example, monthly rentals will\r", "# now show up multiple times). So, we just need to construct the set\r", "# of intervals for which to construct expenses. We first need to\r", "# split the set of EventStaffMember objects by StaffMember (in case\r", "# this rule is not person-specific) and then run this provedure\r", "# separated by StaffMember.\r", "members", "=", "StaffMember", ".", "objects", ".", "filter", "(", "eventstaffmember__in", "=", "staffers", ")", "for", "member", "in", "members", ":", "events", "=", "[", "x", ".", "event", "for", "x", "in", "staffers", ".", "filter", "(", "staffMember", "=", "member", ")", "]", "# Find or create the TransactionParty associated with the staff member.\r", "staffer_party", "=", "TransactionParty", ".", "objects", ".", "get_or_create", "(", "staffMember", "=", "member", ",", "defaults", "=", "{", "'name'", ":", "member", ".", "fullName", ",", "'user'", ":", "getattr", "(", "member", ",", "'userAccount'", ",", "None", ")", "}", ")", "[", "0", "]", "intervals", "=", "[", "(", "x", ".", "localStartTime", ",", "x", ".", "localEndTime", ")", "for", "x", "in", "EventOccurrence", ".", "objects", ".", "filter", "(", "event__in", "=", "events", ")", "]", "remaining_intervals", "=", "rule", ".", "getWindowsAndTotals", "(", "intervals", ")", "for", "startTime", ",", "endTime", ",", "total", ",", "description", "in", "remaining_intervals", ":", "replacements", "[", "'when'", "]", "=", "description", "replacements", "[", "'name'", "]", "=", "member", ".", "fullName", "params", "=", "{", "'category'", ":", "expense_category", ",", "'expenseRule'", ":", "rule", ",", "'periodStart'", ":", "startTime", ",", "'periodEnd'", ":", "endTime", ",", "'description'", ":", "'%(type)s %(to)s %(name)s %(for)s %(when)s'", "%", "replacements", ",", "'submissionUser'", ":", "submissionUser", ",", "'total'", ":", "total", ",", "'accrualDate'", ":", "startTime", ",", "'payTo'", ":", "staffer_party", ",", "}", "ExpenseItem", ".", "objects", ".", "create", "(", "*", "*", "params", ")", "generate_count", "+=", "1", "rulesToCheck", ".", "update", "(", "lastRun", "=", "timezone", ".", "now", "(", ")", ")", "return", "generate_count" ]
For each StaffMember-related Repeated Expense Rule, look for EventStaffMember instances in the designated time window that do not already have expenses associated with them. For hourly rental expenses, then generate new expenses that are associated with this rule. For non-hourly expenses, generate new expenses based on the non-overlapping intervals of days, weeks or months for which there is not already an ExpenseItem associated with the rule in question.
[ "For", "each", "StaffMember", "-", "related", "Repeated", "Expense", "Rule", "look", "for", "EventStaffMember", "instances", "in", "the", "designated", "time", "window", "that", "do", "not", "already", "have", "expenses", "associated", "with", "them", ".", "For", "hourly", "rental", "expenses", "then", "generate", "new", "expenses", "that", "are", "associated", "with", "this", "rule", ".", "For", "non", "-", "hourly", "expenses", "generate", "new", "expenses", "based", "on", "the", "non", "-", "overlapping", "intervals", "of", "days", "weeks", "or", "months", "for", "which", "there", "is", "not", "already", "an", "ExpenseItem", "associated", "with", "the", "rule", "in", "question", "." ]
python
train
49.733624
usc-isi-i2/dig-sandpaper
digsandpaper/coarse/postprocess/similarity_score_rerank_component.py
https://github.com/usc-isi-i2/dig-sandpaper/blob/c7a905ceec28ad0cc9e7da7ede2fd3d2fc93c3d6/digsandpaper/coarse/postprocess/similarity_score_rerank_component.py#L85-L110
def add_highlights_docs(docs): """ "highlight": { "knowledge_graph.title.value": [ "Before 1 January 2018, will <em>South</em> <em>Korea</em> file a World Trade Organization dispute against the United States related to solar panels?" ] } """ if not isinstance(docs, list): docs = [docs] for doc in docs: if 'matched_sentence' in doc['_source']: matched_sentences = doc['_source']['matched_sentence'] for sentence in matched_sentences: # also add matched sentence to knowledge graph doc['_source']['knowledge_graph']['matched_sentence'] = [{'key': sentence, 'value': sentence}] paragraph = SimilarityScoreRerank.get_description(doc) if paragraph: high_para = SimilarityScoreRerank.create_highlighted_sentences(matched_sentences, paragraph) if high_para: if 'highlight' not in doc: doc['highlight'] = dict() doc['highlight']['knowledge_graph.description.value'] = [high_para] return docs
[ "def", "add_highlights_docs", "(", "docs", ")", ":", "if", "not", "isinstance", "(", "docs", ",", "list", ")", ":", "docs", "=", "[", "docs", "]", "for", "doc", "in", "docs", ":", "if", "'matched_sentence'", "in", "doc", "[", "'_source'", "]", ":", "matched_sentences", "=", "doc", "[", "'_source'", "]", "[", "'matched_sentence'", "]", "for", "sentence", "in", "matched_sentences", ":", "# also add matched sentence to knowledge graph", "doc", "[", "'_source'", "]", "[", "'knowledge_graph'", "]", "[", "'matched_sentence'", "]", "=", "[", "{", "'key'", ":", "sentence", ",", "'value'", ":", "sentence", "}", "]", "paragraph", "=", "SimilarityScoreRerank", ".", "get_description", "(", "doc", ")", "if", "paragraph", ":", "high_para", "=", "SimilarityScoreRerank", ".", "create_highlighted_sentences", "(", "matched_sentences", ",", "paragraph", ")", "if", "high_para", ":", "if", "'highlight'", "not", "in", "doc", ":", "doc", "[", "'highlight'", "]", "=", "dict", "(", ")", "doc", "[", "'highlight'", "]", "[", "'knowledge_graph.description.value'", "]", "=", "[", "high_para", "]", "return", "docs" ]
"highlight": { "knowledge_graph.title.value": [ "Before 1 January 2018, will <em>South</em> <em>Korea</em> file a World Trade Organization dispute against the United States related to solar panels?" ] }
[ "highlight", ":", "{", "knowledge_graph", ".", "title", ".", "value", ":", "[", "Before", "1", "January", "2018", "will", "<em", ">", "South<", "/", "em", ">", "<em", ">", "Korea<", "/", "em", ">", "file", "a", "World", "Trade", "Organization", "dispute", "against", "the", "United", "States", "related", "to", "solar", "panels?", "]", "}" ]
python
train
45.923077
foremast/foremast
src/foremast/securitygroup/create_securitygroup.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/securitygroup/create_securitygroup.py#L195-L200
def resolve_self_references(self, rules): """Resolves `$self` references to actual application name in security group rules.""" with suppress(KeyError): rule = rules.pop('$self') rules[self.app_name] = rule return rules
[ "def", "resolve_self_references", "(", "self", ",", "rules", ")", ":", "with", "suppress", "(", "KeyError", ")", ":", "rule", "=", "rules", ".", "pop", "(", "'$self'", ")", "rules", "[", "self", ".", "app_name", "]", "=", "rule", "return", "rules" ]
Resolves `$self` references to actual application name in security group rules.
[ "Resolves", "$self", "references", "to", "actual", "application", "name", "in", "security", "group", "rules", "." ]
python
train
43.666667
ProjetPP/PPP-datamodel-Python
ppp_datamodel/nodes/abstractnode.py
https://github.com/ProjetPP/PPP-datamodel-Python/blob/0c7958fb4df75468fd3137240a5065925c239776/ppp_datamodel/nodes/abstractnode.py#L41-L46
def fold(self, predicate): """Takes a predicate and applies it to each node starting from the leaves and making the return value propagate.""" childs = {x:y.fold(predicate) for (x,y) in self._attributes.items() if isinstance(y, SerializableTypedAttributesHolder)} return predicate(self, childs)
[ "def", "fold", "(", "self", ",", "predicate", ")", ":", "childs", "=", "{", "x", ":", "y", ".", "fold", "(", "predicate", ")", "for", "(", "x", ",", "y", ")", "in", "self", ".", "_attributes", ".", "items", "(", ")", "if", "isinstance", "(", "y", ",", "SerializableTypedAttributesHolder", ")", "}", "return", "predicate", "(", "self", ",", "childs", ")" ]
Takes a predicate and applies it to each node starting from the leaves and making the return value propagate.
[ "Takes", "a", "predicate", "and", "applies", "it", "to", "each", "node", "starting", "from", "the", "leaves", "and", "making", "the", "return", "value", "propagate", "." ]
python
train
56.5
angr/angr
angr/analyses/backward_slice.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/backward_slice.py#L97-L124
def dbg_repr(self, max_display=10): """ Debugging output of this slice. :param max_display: The maximum number of SimRun slices to show. :return: A string representation. """ s = repr(self) + "\n" if len(self.chosen_statements) > max_display: s += "%d SimRuns in program slice, displaying %d.\n" % (len(self.chosen_statements), max_display) else: s += "%d SimRuns in program slice.\n" % len(self.chosen_statements) # Pretty-print the first `max_display` basic blocks if max_display is None: # Output all run_addrs = sorted(self.chosen_statements.keys()) else: # Only output the first "max_display" ones run_addrs = sorted(self.chosen_statements.keys())[ : max_display] for run_addr in run_addrs: s += self.dbg_repr_run(run_addr) + "\n" return s
[ "def", "dbg_repr", "(", "self", ",", "max_display", "=", "10", ")", ":", "s", "=", "repr", "(", "self", ")", "+", "\"\\n\"", "if", "len", "(", "self", ".", "chosen_statements", ")", ">", "max_display", ":", "s", "+=", "\"%d SimRuns in program slice, displaying %d.\\n\"", "%", "(", "len", "(", "self", ".", "chosen_statements", ")", ",", "max_display", ")", "else", ":", "s", "+=", "\"%d SimRuns in program slice.\\n\"", "%", "len", "(", "self", ".", "chosen_statements", ")", "# Pretty-print the first `max_display` basic blocks", "if", "max_display", "is", "None", ":", "# Output all", "run_addrs", "=", "sorted", "(", "self", ".", "chosen_statements", ".", "keys", "(", ")", ")", "else", ":", "# Only output the first \"max_display\" ones", "run_addrs", "=", "sorted", "(", "self", ".", "chosen_statements", ".", "keys", "(", ")", ")", "[", ":", "max_display", "]", "for", "run_addr", "in", "run_addrs", ":", "s", "+=", "self", ".", "dbg_repr_run", "(", "run_addr", ")", "+", "\"\\n\"", "return", "s" ]
Debugging output of this slice. :param max_display: The maximum number of SimRun slices to show. :return: A string representation.
[ "Debugging", "output", "of", "this", "slice", "." ]
python
train
32.964286
collectiveacuity/labPack
labpack/databases/couchbase.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/couchbase.py#L695-L725
def exists(self, doc_id, rev_id=''): ''' a method to determine if document exists :param doc_id: string with id of document in bucket :param rev_id: [optional] string with revision id of document in bucket :return: boolean indicating existence of document ''' title = '%s.exists' % self.__class__.__name__ # validate inputs input_fields = { 'doc_id': doc_id, 'rev_id': rev_id } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # send request and construct response url = self.bucket_url + '/%s' % doc_id params = None if rev_id: params = { 'rev': rev_id } response = requests.get(url, params=params) if not 'error' in response.json(): return True return False
[ "def", "exists", "(", "self", ",", "doc_id", ",", "rev_id", "=", "''", ")", ":", "title", "=", "'%s.exists'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'doc_id'", ":", "doc_id", ",", "'rev_id'", ":", "rev_id", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# send request and construct response", "url", "=", "self", ".", "bucket_url", "+", "'/%s'", "%", "doc_id", "params", "=", "None", "if", "rev_id", ":", "params", "=", "{", "'rev'", ":", "rev_id", "}", "response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "not", "'error'", "in", "response", ".", "json", "(", ")", ":", "return", "True", "return", "False" ]
a method to determine if document exists :param doc_id: string with id of document in bucket :param rev_id: [optional] string with revision id of document in bucket :return: boolean indicating existence of document
[ "a", "method", "to", "determine", "if", "document", "exists", ":", "param", "doc_id", ":", "string", "with", "id", "of", "document", "in", "bucket", ":", "param", "rev_id", ":", "[", "optional", "]", "string", "with", "revision", "id", "of", "document", "in", "bucket", ":", "return", ":", "boolean", "indicating", "existence", "of", "document" ]
python
train
32.290323
odlgroup/odl
odl/space/npy_tensors.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/npy_tensors.py#L2234-L2256
def inner(self, x1, x2): """Return the weighted inner product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `NumpyTensor` Tensors whose inner product is calculated. Returns ------- inner : float or complex The inner product of the two provided tensors. """ if self.exponent != 2.0: raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) else: inner = self.const * _inner_default(x1, x2) if x1.space.field is None: return inner else: return x1.space.field.element(inner)
[ "def", "inner", "(", "self", ",", "x1", ",", "x2", ")", ":", "if", "self", ".", "exponent", "!=", "2.0", ":", "raise", "NotImplementedError", "(", "'no inner product defined for '", "'exponent != 2 (got {})'", "''", ".", "format", "(", "self", ".", "exponent", ")", ")", "else", ":", "inner", "=", "self", ".", "const", "*", "_inner_default", "(", "x1", ",", "x2", ")", "if", "x1", ".", "space", ".", "field", "is", "None", ":", "return", "inner", "else", ":", "return", "x1", ".", "space", ".", "field", ".", "element", "(", "inner", ")" ]
Return the weighted inner product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `NumpyTensor` Tensors whose inner product is calculated. Returns ------- inner : float or complex The inner product of the two provided tensors.
[ "Return", "the", "weighted", "inner", "product", "of", "x1", "and", "x2", "." ]
python
train
33.478261
jaraco/jaraco.windows
jaraco/windows/filesystem/__init__.py
https://github.com/jaraco/jaraco.windows/blob/51811efed50b46ad08daa25408a1cc806bc8d519/jaraco/windows/filesystem/__init__.py#L48-L54
def _is_target_a_directory(link, rel_target): """ If creating a symlink from link to a target, determine if target is a directory (relative to dirname(link)). """ target = os.path.join(os.path.dirname(link), rel_target) return os.path.isdir(target)
[ "def", "_is_target_a_directory", "(", "link", ",", "rel_target", ")", ":", "target", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "link", ")", ",", "rel_target", ")", "return", "os", ".", "path", ".", "isdir", "(", "target", ")" ]
If creating a symlink from link to a target, determine if target is a directory (relative to dirname(link)).
[ "If", "creating", "a", "symlink", "from", "link", "to", "a", "target", "determine", "if", "target", "is", "a", "directory", "(", "relative", "to", "dirname", "(", "link", "))", "." ]
python
train
35.428571
cloudflare/sqlalchemy-clickhouse
connector.py
https://github.com/cloudflare/sqlalchemy-clickhouse/blob/fc46142445d4510566f6412964df2fb9d2f4bd2e/connector.py#L350-L361
def _process_response(self, response): """ Update the internal state with the data from the response """ assert self._state == self._STATE_RUNNING, "Should be running if processing response" cols = None data = [] for r in response: if not cols: cols = [(f, r._fields[f].db_type) for f in r._fields] data.append([getattr(r, f) for f in r._fields]) self._data = data self._columns = cols self._state = self._STATE_FINISHED
[ "def", "_process_response", "(", "self", ",", "response", ")", ":", "assert", "self", ".", "_state", "==", "self", ".", "_STATE_RUNNING", ",", "\"Should be running if processing response\"", "cols", "=", "None", "data", "=", "[", "]", "for", "r", "in", "response", ":", "if", "not", "cols", ":", "cols", "=", "[", "(", "f", ",", "r", ".", "_fields", "[", "f", "]", ".", "db_type", ")", "for", "f", "in", "r", ".", "_fields", "]", "data", ".", "append", "(", "[", "getattr", "(", "r", ",", "f", ")", "for", "f", "in", "r", ".", "_fields", "]", ")", "self", ".", "_data", "=", "data", "self", ".", "_columns", "=", "cols", "self", ".", "_state", "=", "self", ".", "_STATE_FINISHED" ]
Update the internal state with the data from the response
[ "Update", "the", "internal", "state", "with", "the", "data", "from", "the", "response" ]
python
train
42.75
google/grumpy
third_party/pythonparser/parser.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/parser.py#L1178-L1199
def if_stmt(self, if_loc, test, if_colon_loc, body, elifs, else_opt): """if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]""" stmt = ast.If(orelse=[], else_loc=None, else_colon_loc=None) if else_opt: stmt.else_loc, stmt.else_colon_loc, stmt.orelse = else_opt for elif_ in reversed(elifs): stmt.keyword_loc, stmt.test, stmt.if_colon_loc, stmt.body = elif_ stmt.loc = stmt.keyword_loc.join(stmt.body[-1].loc) if stmt.orelse: stmt.loc = stmt.loc.join(stmt.orelse[-1].loc) stmt = ast.If(orelse=[stmt], else_loc=None, else_colon_loc=None) stmt.keyword_loc, stmt.test, stmt.if_colon_loc, stmt.body = \ if_loc, test, if_colon_loc, body stmt.loc = stmt.keyword_loc.join(stmt.body[-1].loc) if stmt.orelse: stmt.loc = stmt.loc.join(stmt.orelse[-1].loc) return stmt
[ "def", "if_stmt", "(", "self", ",", "if_loc", ",", "test", ",", "if_colon_loc", ",", "body", ",", "elifs", ",", "else_opt", ")", ":", "stmt", "=", "ast", ".", "If", "(", "orelse", "=", "[", "]", ",", "else_loc", "=", "None", ",", "else_colon_loc", "=", "None", ")", "if", "else_opt", ":", "stmt", ".", "else_loc", ",", "stmt", ".", "else_colon_loc", ",", "stmt", ".", "orelse", "=", "else_opt", "for", "elif_", "in", "reversed", "(", "elifs", ")", ":", "stmt", ".", "keyword_loc", ",", "stmt", ".", "test", ",", "stmt", ".", "if_colon_loc", ",", "stmt", ".", "body", "=", "elif_", "stmt", ".", "loc", "=", "stmt", ".", "keyword_loc", ".", "join", "(", "stmt", ".", "body", "[", "-", "1", "]", ".", "loc", ")", "if", "stmt", ".", "orelse", ":", "stmt", ".", "loc", "=", "stmt", ".", "loc", ".", "join", "(", "stmt", ".", "orelse", "[", "-", "1", "]", ".", "loc", ")", "stmt", "=", "ast", ".", "If", "(", "orelse", "=", "[", "stmt", "]", ",", "else_loc", "=", "None", ",", "else_colon_loc", "=", "None", ")", "stmt", ".", "keyword_loc", ",", "stmt", ".", "test", ",", "stmt", ".", "if_colon_loc", ",", "stmt", ".", "body", "=", "if_loc", ",", "test", ",", "if_colon_loc", ",", "body", "stmt", ".", "loc", "=", "stmt", ".", "keyword_loc", ".", "join", "(", "stmt", ".", "body", "[", "-", "1", "]", ".", "loc", ")", "if", "stmt", ".", "orelse", ":", "stmt", ".", "loc", "=", "stmt", ".", "loc", ".", "join", "(", "stmt", ".", "orelse", "[", "-", "1", "]", ".", "loc", ")", "return", "stmt" ]
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
[ "if_stmt", ":", "if", "test", ":", "suite", "(", "elif", "test", ":", "suite", ")", "*", "[", "else", ":", "suite", "]" ]
python
valid
44.136364
polysquare/cmake-ast
cmakeast/ast.py
https://github.com/polysquare/cmake-ast/blob/431a32d595d76f1f8f993eb6ddcc79effbadff9d/cmakeast/ast.py#L624-L629
def maybe_start_recording(tokens, index): """Return a new _RSTCommentBlockRecorder when its time to record.""" if tokens[index].type == TokenType.BeginRSTComment: return _RSTCommentBlockRecorder(index, tokens[index].line) return None
[ "def", "maybe_start_recording", "(", "tokens", ",", "index", ")", ":", "if", "tokens", "[", "index", "]", ".", "type", "==", "TokenType", ".", "BeginRSTComment", ":", "return", "_RSTCommentBlockRecorder", "(", "index", ",", "tokens", "[", "index", "]", ".", "line", ")", "return", "None" ]
Return a new _RSTCommentBlockRecorder when its time to record.
[ "Return", "a", "new", "_RSTCommentBlockRecorder", "when", "its", "time", "to", "record", "." ]
python
train
44.166667
petrjasek/eve-elastic
eve_elastic/elastic.py
https://github.com/petrjasek/eve-elastic/blob/f146f31b348d22ac5559cf78717b3bb02efcb2d7/eve_elastic/elastic.py#L613-L615
def find_one_raw(self, resource, _id): """Find document by id.""" return self._find_by_id(resource=resource, _id=_id)
[ "def", "find_one_raw", "(", "self", ",", "resource", ",", "_id", ")", ":", "return", "self", ".", "_find_by_id", "(", "resource", "=", "resource", ",", "_id", "=", "_id", ")" ]
Find document by id.
[ "Find", "document", "by", "id", "." ]
python
train
43.666667
src-d/modelforge
modelforge/gcs_backend.py
https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/gcs_backend.py#L152-L155
def fetch_model(self, source: str, file: Union[str, BinaryIO], chunk_size: int=DEFAULT_DOWNLOAD_CHUNK_SIZE) -> None: """Download the model from GCS.""" download_http(source, file, self._log, chunk_size)
[ "def", "fetch_model", "(", "self", ",", "source", ":", "str", ",", "file", ":", "Union", "[", "str", ",", "BinaryIO", "]", ",", "chunk_size", ":", "int", "=", "DEFAULT_DOWNLOAD_CHUNK_SIZE", ")", "->", "None", ":", "download_http", "(", "source", ",", "file", ",", "self", ".", "_log", ",", "chunk_size", ")" ]
Download the model from GCS.
[ "Download", "the", "model", "from", "GCS", "." ]
python
train
58.75
DataONEorg/d1_python
lib_common/src/d1_common/wrap/access_policy.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/wrap/access_policy.py#L596-L603
def _norm_perm_list_from_perm_dict(self, perm_dict): """Return a minimal, ordered, hashable list of subjects and permissions.""" high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict) return [ [k, list(sorted(high_perm_dict[k]))] for k in ORDERED_PERM_LIST if high_perm_dict.get(k, False) ]
[ "def", "_norm_perm_list_from_perm_dict", "(", "self", ",", "perm_dict", ")", ":", "high_perm_dict", "=", "self", ".", "_highest_perm_dict_from_perm_dict", "(", "perm_dict", ")", "return", "[", "[", "k", ",", "list", "(", "sorted", "(", "high_perm_dict", "[", "k", "]", ")", ")", "]", "for", "k", "in", "ORDERED_PERM_LIST", "if", "high_perm_dict", ".", "get", "(", "k", ",", "False", ")", "]" ]
Return a minimal, ordered, hashable list of subjects and permissions.
[ "Return", "a", "minimal", "ordered", "hashable", "list", "of", "subjects", "and", "permissions", "." ]
python
train
45.375
ladybug-tools/ladybug
ladybug/sunpath.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/sunpath.py#L798-L819
def _calculate_sun_vector(self): """Calculate sun vector for this sun.""" z_axis = Vector3(0., 0., -1.) x_axis = Vector3(1., 0., 0.) north_vector = Vector3(0., 1., 0.) # rotate north vector based on azimuth, altitude, and north _sun_vector = north_vector \ .rotate_around(x_axis, self.altitude_in_radians) \ .rotate_around(z_axis, self.azimuth_in_radians) \ .rotate_around(z_axis, math.radians(-1 * self.north_angle)) _sun_vector.normalize() try: _sun_vector.flip() except AttributeError: # euclid3 _sun_vector = Vector3(-1 * _sun_vector.x, -1 * _sun_vector.y, -1 * _sun_vector.z) self._sun_vector = _sun_vector
[ "def", "_calculate_sun_vector", "(", "self", ")", ":", "z_axis", "=", "Vector3", "(", "0.", ",", "0.", ",", "-", "1.", ")", "x_axis", "=", "Vector3", "(", "1.", ",", "0.", ",", "0.", ")", "north_vector", "=", "Vector3", "(", "0.", ",", "1.", ",", "0.", ")", "# rotate north vector based on azimuth, altitude, and north", "_sun_vector", "=", "north_vector", ".", "rotate_around", "(", "x_axis", ",", "self", ".", "altitude_in_radians", ")", ".", "rotate_around", "(", "z_axis", ",", "self", ".", "azimuth_in_radians", ")", ".", "rotate_around", "(", "z_axis", ",", "math", ".", "radians", "(", "-", "1", "*", "self", ".", "north_angle", ")", ")", "_sun_vector", ".", "normalize", "(", ")", "try", ":", "_sun_vector", ".", "flip", "(", ")", "except", "AttributeError", ":", "# euclid3", "_sun_vector", "=", "Vector3", "(", "-", "1", "*", "_sun_vector", ".", "x", ",", "-", "1", "*", "_sun_vector", ".", "y", ",", "-", "1", "*", "_sun_vector", ".", "z", ")", "self", ".", "_sun_vector", "=", "_sun_vector" ]
Calculate sun vector for this sun.
[ "Calculate", "sun", "vector", "for", "this", "sun", "." ]
python
train
36.954545
satellogic/telluric
telluric/collections.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/collections.py#L524-L542
def open(cls, filename, crs=None): """Creates a FileCollection from a file in disk. Parameters ---------- filename : str Path of the file to read. crs : CRS overrides the crs of the collection, this funtion will not reprojects """ with fiona.Env(): with fiona.open(filename, 'r') as source: original_crs = CRS(source.crs) schema = source.schema length = len(source) crs = crs or original_crs ret_val = cls(filename, crs, schema, length) return ret_val
[ "def", "open", "(", "cls", ",", "filename", ",", "crs", "=", "None", ")", ":", "with", "fiona", ".", "Env", "(", ")", ":", "with", "fiona", ".", "open", "(", "filename", ",", "'r'", ")", "as", "source", ":", "original_crs", "=", "CRS", "(", "source", ".", "crs", ")", "schema", "=", "source", ".", "schema", "length", "=", "len", "(", "source", ")", "crs", "=", "crs", "or", "original_crs", "ret_val", "=", "cls", "(", "filename", ",", "crs", ",", "schema", ",", "length", ")", "return", "ret_val" ]
Creates a FileCollection from a file in disk. Parameters ---------- filename : str Path of the file to read. crs : CRS overrides the crs of the collection, this funtion will not reprojects
[ "Creates", "a", "FileCollection", "from", "a", "file", "in", "disk", "." ]
python
train
31.526316
BlockHub/blockhubdpostools
dpostools/legacy.py
https://github.com/BlockHub/blockhubdpostools/blob/27712cd97cd3658ee54a4330ff3135b51a01d7d1/dpostools/legacy.py#L177-L212
def payout(address): """returns all received transactions between the address and registered delegate accounts ORDER by timestamp ASC.""" qry = DbCursor().execute_and_fetchall(""" SELECT DISTINCT transactions."id", transactions."amount", transactions."timestamp", transactions."recipientId", transactions."senderId", transactions."rawasset", transactions."type", transactions."fee" FROM transactions, delegates WHERE transactions."senderId" IN ( SELECT transactions."senderId" FROM transactions, delegates WHERE transactions."id" = delegates."transactionId" ) AND transactions."recipientId" = '{}' ORDER BY transactions."timestamp" ASC""".format(address)) Transaction = namedtuple( 'transaction', 'id amount timestamp recipientId senderId rawasset type fee') named_transactions = [] for i in qry: tx_id = Transaction( id=i[0], amount=i[1], timestamp=i[2], recipientId=i[3], senderId=i[4], rawasset=i[5], type=i[6], fee=i[7], ) named_transactions.append(tx_id) return named_transactions
[ "def", "payout", "(", "address", ")", ":", "qry", "=", "DbCursor", "(", ")", ".", "execute_and_fetchall", "(", "\"\"\"\n SELECT DISTINCT transactions.\"id\", transactions.\"amount\",\n transactions.\"timestamp\", transactions.\"recipientId\",\n transactions.\"senderId\", transactions.\"rawasset\",\n transactions.\"type\", transactions.\"fee\"\n FROM transactions, delegates\n WHERE transactions.\"senderId\" IN (\n SELECT transactions.\"senderId\" \n FROM transactions, delegates \n WHERE transactions.\"id\" = delegates.\"transactionId\"\n )\n AND transactions.\"recipientId\" = '{}'\n ORDER BY transactions.\"timestamp\" ASC\"\"\"", ".", "format", "(", "address", ")", ")", "Transaction", "=", "namedtuple", "(", "'transaction'", ",", "'id amount timestamp recipientId senderId rawasset type fee'", ")", "named_transactions", "=", "[", "]", "for", "i", "in", "qry", ":", "tx_id", "=", "Transaction", "(", "id", "=", "i", "[", "0", "]", ",", "amount", "=", "i", "[", "1", "]", ",", "timestamp", "=", "i", "[", "2", "]", ",", "recipientId", "=", "i", "[", "3", "]", ",", "senderId", "=", "i", "[", "4", "]", ",", "rawasset", "=", "i", "[", "5", "]", ",", "type", "=", "i", "[", "6", "]", ",", "fee", "=", "i", "[", "7", "]", ",", ")", "named_transactions", ".", "append", "(", "tx_id", ")", "return", "named_transactions" ]
returns all received transactions between the address and registered delegate accounts ORDER by timestamp ASC.
[ "returns", "all", "received", "transactions", "between", "the", "address", "and", "registered", "delegate", "accounts", "ORDER", "by", "timestamp", "ASC", "." ]
python
valid
39.388889
UCBerkeleySETI/blimpy
blimpy/utils.py
https://github.com/UCBerkeleySETI/blimpy/blob/b8822d3e3e911944370d84371a91fa0c29e9772e/blimpy/utils.py#L87-L130
def unpack_2to8(data): """ Promote 2-bit unisgned data into 8-bit unsigned data. Args: data: Numpy array with dtype == uint8 Notes: DATA MUST BE LOADED as np.array() with dtype='uint8'. This works with some clever shifting and AND / OR operations. Data is LOADED as 8-bit, then promoted to 32-bits: /ABCD EFGH/ (8 bits of data) /0000 0000/0000 0000/0000 0000/ABCD EFGH/ (8 bits of data as a 32-bit word) Once promoted, we can do some shifting, AND and OR operations: /0000 0000/0000 ABCD/EFGH 0000/0000 0000/ (shifted << 12) /0000 0000/0000 ABCD/EFGH 0000/ABCD EFGH/ (bitwise OR of previous two lines) /0000 0000/0000 ABCD/0000 0000/0000 EFGH/ (bitwise AND with mask 0xF000F) /0000 00AB/CD00 0000/0000 00EF/GH00 0000/ (prev. line shifted << 6) /0000 00AB/CD00 ABCD/0000 00EF/GH00 EFGH/ (bitwise OR of previous two lines) /0000 00AB/0000 00CD/0000 00EF/0000 00GH/ (bitwise AND with 0x3030303) Then we change the view of the data to interpret it as 4x8 bit: [000000AB, 000000CD, 000000EF, 000000GH] (change view from 32-bit to 4x8-bit) The converted bits are then mapped to values in the range [-40, 40] according to a lookup chart. The mapping is based on specifications in the breakthough docs: https://github.com/UCBerkeleySETI/breakthrough/blob/master/doc/RAW-File-Format.md """ two_eight_lookup = {0: 40, 1: 12, 2: -12, 3: -40} tmp = data.astype(np.uint32) tmp = (tmp | (tmp << 12)) & 0xF000F tmp = (tmp | (tmp << 6)) & 0x3030303 tmp = tmp.byteswap() tmp = tmp.view('uint8') mapped = np.array(tmp, dtype=np.int8) for k, v in two_eight_lookup.items(): mapped[tmp == k] = v return mapped
[ "def", "unpack_2to8", "(", "data", ")", ":", "two_eight_lookup", "=", "{", "0", ":", "40", ",", "1", ":", "12", ",", "2", ":", "-", "12", ",", "3", ":", "-", "40", "}", "tmp", "=", "data", ".", "astype", "(", "np", ".", "uint32", ")", "tmp", "=", "(", "tmp", "|", "(", "tmp", "<<", "12", ")", ")", "&", "0xF000F", "tmp", "=", "(", "tmp", "|", "(", "tmp", "<<", "6", ")", ")", "&", "0x3030303", "tmp", "=", "tmp", ".", "byteswap", "(", ")", "tmp", "=", "tmp", ".", "view", "(", "'uint8'", ")", "mapped", "=", "np", ".", "array", "(", "tmp", ",", "dtype", "=", "np", ".", "int8", ")", "for", "k", ",", "v", "in", "two_eight_lookup", ".", "items", "(", ")", ":", "mapped", "[", "tmp", "==", "k", "]", "=", "v", "return", "mapped" ]
Promote 2-bit unisgned data into 8-bit unsigned data. Args: data: Numpy array with dtype == uint8 Notes: DATA MUST BE LOADED as np.array() with dtype='uint8'. This works with some clever shifting and AND / OR operations. Data is LOADED as 8-bit, then promoted to 32-bits: /ABCD EFGH/ (8 bits of data) /0000 0000/0000 0000/0000 0000/ABCD EFGH/ (8 bits of data as a 32-bit word) Once promoted, we can do some shifting, AND and OR operations: /0000 0000/0000 ABCD/EFGH 0000/0000 0000/ (shifted << 12) /0000 0000/0000 ABCD/EFGH 0000/ABCD EFGH/ (bitwise OR of previous two lines) /0000 0000/0000 ABCD/0000 0000/0000 EFGH/ (bitwise AND with mask 0xF000F) /0000 00AB/CD00 0000/0000 00EF/GH00 0000/ (prev. line shifted << 6) /0000 00AB/CD00 ABCD/0000 00EF/GH00 EFGH/ (bitwise OR of previous two lines) /0000 00AB/0000 00CD/0000 00EF/0000 00GH/ (bitwise AND with 0x3030303) Then we change the view of the data to interpret it as 4x8 bit: [000000AB, 000000CD, 000000EF, 000000GH] (change view from 32-bit to 4x8-bit) The converted bits are then mapped to values in the range [-40, 40] according to a lookup chart. The mapping is based on specifications in the breakthough docs: https://github.com/UCBerkeleySETI/breakthrough/blob/master/doc/RAW-File-Format.md
[ "Promote", "2", "-", "bit", "unisgned", "data", "into", "8", "-", "bit", "unsigned", "data", "." ]
python
test
41.590909
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L386-L391
def transform(self, m): """Replace point by its transformation with matrix-like m.""" if len(m) != 6: raise ValueError("bad sequ. length") self.x, self.y = TOOLS._transform_point(self, m) return self
[ "def", "transform", "(", "self", ",", "m", ")", ":", "if", "len", "(", "m", ")", "!=", "6", ":", "raise", "ValueError", "(", "\"bad sequ. length\"", ")", "self", ".", "x", ",", "self", ".", "y", "=", "TOOLS", ".", "_transform_point", "(", "self", ",", "m", ")", "return", "self" ]
Replace point by its transformation with matrix-like m.
[ "Replace", "point", "by", "its", "transformation", "with", "matrix", "-", "like", "m", "." ]
python
train
39.666667
tensorpack/tensorpack
examples/FasterRCNN/model_rpn.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_rpn.py#L104-L153
def generate_rpn_proposals(boxes, scores, img_shape, pre_nms_topk, post_nms_topk=None): """ Sample RPN proposals by the following steps: 1. Pick top k1 by scores 2. NMS them 3. Pick top k2 by scores. Default k2 == k1, i.e. does not filter the NMS output. Args: boxes: nx4 float dtype, the proposal boxes. Decoded to floatbox already scores: n float, the logits img_shape: [h, w] pre_nms_topk, post_nms_topk (int): See above. Returns: boxes: kx4 float scores: k logits """ assert boxes.shape.ndims == 2, boxes.shape if post_nms_topk is None: post_nms_topk = pre_nms_topk topk = tf.minimum(pre_nms_topk, tf.size(scores)) topk_scores, topk_indices = tf.nn.top_k(scores, k=topk, sorted=False) topk_boxes = tf.gather(boxes, topk_indices) topk_boxes = clip_boxes(topk_boxes, img_shape) topk_boxes_x1y1x2y2 = tf.reshape(topk_boxes, (-1, 2, 2)) topk_boxes_x1y1, topk_boxes_x2y2 = tf.split(topk_boxes_x1y1x2y2, 2, axis=1) # nx1x2 each wbhb = tf.squeeze(topk_boxes_x2y2 - topk_boxes_x1y1, axis=1) valid = tf.reduce_all(wbhb > cfg.RPN.MIN_SIZE, axis=1) # n, topk_valid_boxes_x1y1x2y2 = tf.boolean_mask(topk_boxes_x1y1x2y2, valid) topk_valid_scores = tf.boolean_mask(topk_scores, valid) # TODO not needed topk_valid_boxes_y1x1y2x2 = tf.reshape( tf.reverse(topk_valid_boxes_x1y1x2y2, axis=[2]), (-1, 4), name='nms_input_boxes') nms_indices = tf.image.non_max_suppression( topk_valid_boxes_y1x1y2x2, topk_valid_scores, max_output_size=post_nms_topk, iou_threshold=cfg.RPN.PROPOSAL_NMS_THRESH) topk_valid_boxes = tf.reshape(topk_valid_boxes_x1y1x2y2, (-1, 4)) proposal_boxes = tf.gather(topk_valid_boxes, nms_indices) proposal_scores = tf.gather(topk_valid_scores, nms_indices) tf.sigmoid(proposal_scores, name='probs') # for visualization return tf.stop_gradient(proposal_boxes, name='boxes'), tf.stop_gradient(proposal_scores, name='scores')
[ "def", "generate_rpn_proposals", "(", "boxes", ",", "scores", ",", "img_shape", ",", "pre_nms_topk", ",", "post_nms_topk", "=", "None", ")", ":", "assert", "boxes", ".", "shape", ".", "ndims", "==", "2", ",", "boxes", ".", "shape", "if", "post_nms_topk", "is", "None", ":", "post_nms_topk", "=", "pre_nms_topk", "topk", "=", "tf", ".", "minimum", "(", "pre_nms_topk", ",", "tf", ".", "size", "(", "scores", ")", ")", "topk_scores", ",", "topk_indices", "=", "tf", ".", "nn", ".", "top_k", "(", "scores", ",", "k", "=", "topk", ",", "sorted", "=", "False", ")", "topk_boxes", "=", "tf", ".", "gather", "(", "boxes", ",", "topk_indices", ")", "topk_boxes", "=", "clip_boxes", "(", "topk_boxes", ",", "img_shape", ")", "topk_boxes_x1y1x2y2", "=", "tf", ".", "reshape", "(", "topk_boxes", ",", "(", "-", "1", ",", "2", ",", "2", ")", ")", "topk_boxes_x1y1", ",", "topk_boxes_x2y2", "=", "tf", ".", "split", "(", "topk_boxes_x1y1x2y2", ",", "2", ",", "axis", "=", "1", ")", "# nx1x2 each", "wbhb", "=", "tf", ".", "squeeze", "(", "topk_boxes_x2y2", "-", "topk_boxes_x1y1", ",", "axis", "=", "1", ")", "valid", "=", "tf", ".", "reduce_all", "(", "wbhb", ">", "cfg", ".", "RPN", ".", "MIN_SIZE", ",", "axis", "=", "1", ")", "# n,", "topk_valid_boxes_x1y1x2y2", "=", "tf", ".", "boolean_mask", "(", "topk_boxes_x1y1x2y2", ",", "valid", ")", "topk_valid_scores", "=", "tf", ".", "boolean_mask", "(", "topk_scores", ",", "valid", ")", "# TODO not needed", "topk_valid_boxes_y1x1y2x2", "=", "tf", ".", "reshape", "(", "tf", ".", "reverse", "(", "topk_valid_boxes_x1y1x2y2", ",", "axis", "=", "[", "2", "]", ")", ",", "(", "-", "1", ",", "4", ")", ",", "name", "=", "'nms_input_boxes'", ")", "nms_indices", "=", "tf", ".", "image", ".", "non_max_suppression", "(", "topk_valid_boxes_y1x1y2x2", ",", "topk_valid_scores", ",", "max_output_size", "=", "post_nms_topk", ",", "iou_threshold", "=", "cfg", ".", "RPN", ".", "PROPOSAL_NMS_THRESH", ")", "topk_valid_boxes", "=", "tf", ".", "reshape", "(", "topk_valid_boxes_x1y1x2y2", ",", "(", "-", "1", ",", "4", ")", ")", "proposal_boxes", "=", "tf", ".", "gather", "(", "topk_valid_boxes", ",", "nms_indices", ")", "proposal_scores", "=", "tf", ".", "gather", "(", "topk_valid_scores", ",", "nms_indices", ")", "tf", ".", "sigmoid", "(", "proposal_scores", ",", "name", "=", "'probs'", ")", "# for visualization", "return", "tf", ".", "stop_gradient", "(", "proposal_boxes", ",", "name", "=", "'boxes'", ")", ",", "tf", ".", "stop_gradient", "(", "proposal_scores", ",", "name", "=", "'scores'", ")" ]
Sample RPN proposals by the following steps: 1. Pick top k1 by scores 2. NMS them 3. Pick top k2 by scores. Default k2 == k1, i.e. does not filter the NMS output. Args: boxes: nx4 float dtype, the proposal boxes. Decoded to floatbox already scores: n float, the logits img_shape: [h, w] pre_nms_topk, post_nms_topk (int): See above. Returns: boxes: kx4 float scores: k logits
[ "Sample", "RPN", "proposals", "by", "the", "following", "steps", ":", "1", ".", "Pick", "top", "k1", "by", "scores", "2", ".", "NMS", "them", "3", ".", "Pick", "top", "k2", "by", "scores", ".", "Default", "k2", "==", "k1", "i", ".", "e", ".", "does", "not", "filter", "the", "NMS", "output", "." ]
python
train
40.68
NoviceLive/intellicoder
intellicoder/utils.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L57-L62
def run_program(program, *args): """Wrap subprocess.check_output to make life easier.""" real_args = [program] real_args.extend(args) logging.debug(_('check_output arguments: %s'), real_args) check_output(real_args, universal_newlines=True)
[ "def", "run_program", "(", "program", ",", "*", "args", ")", ":", "real_args", "=", "[", "program", "]", "real_args", ".", "extend", "(", "args", ")", "logging", ".", "debug", "(", "_", "(", "'check_output arguments: %s'", ")", ",", "real_args", ")", "check_output", "(", "real_args", ",", "universal_newlines", "=", "True", ")" ]
Wrap subprocess.check_output to make life easier.
[ "Wrap", "subprocess", ".", "check_output", "to", "make", "life", "easier", "." ]
python
train
42.5
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L4515-L4522
def prepend(self, frame_p): """ Push frame to the front of the message, i.e. before all other frames. Message takes ownership of frame, will destroy it when message is sent. Returns 0 on success, -1 on error. Deprecates zmsg_push, which did not nullify the caller's frame reference. """ return lib.zmsg_prepend(self._as_parameter_, byref(zframe_p.from_param(frame_p)))
[ "def", "prepend", "(", "self", ",", "frame_p", ")", ":", "return", "lib", ".", "zmsg_prepend", "(", "self", ".", "_as_parameter_", ",", "byref", "(", "zframe_p", ".", "from_param", "(", "frame_p", ")", ")", ")" ]
Push frame to the front of the message, i.e. before all other frames. Message takes ownership of frame, will destroy it when message is sent. Returns 0 on success, -1 on error. Deprecates zmsg_push, which did not nullify the caller's frame reference.
[ "Push", "frame", "to", "the", "front", "of", "the", "message", "i", ".", "e", ".", "before", "all", "other", "frames", ".", "Message", "takes", "ownership", "of", "frame", "will", "destroy", "it", "when", "message", "is", "sent", ".", "Returns", "0", "on", "success", "-", "1", "on", "error", ".", "Deprecates", "zmsg_push", "which", "did", "not", "nullify", "the", "caller", "s", "frame", "reference", "." ]
python
train
49.125
pantsbuild/pants
src/python/pants/goal/goal.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/goal/goal.py#L222-L237
def uninstall_task(self, name): """Removes the named task from this goal. Allows external plugins to modify the execution plan. Use with caution. Note: Does not relax a serialization requirement that originated from the uninstalled task's install() call. :API: public """ if name in self._task_type_by_name: self._task_type_by_name[name].options_scope = None del self._task_type_by_name[name] self._ordered_task_names = [x for x in self._ordered_task_names if x != name] else: raise GoalError('Cannot uninstall unknown task: {0}'.format(name))
[ "def", "uninstall_task", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_task_type_by_name", ":", "self", ".", "_task_type_by_name", "[", "name", "]", ".", "options_scope", "=", "None", "del", "self", ".", "_task_type_by_name", "[", "name", "]", "self", ".", "_ordered_task_names", "=", "[", "x", "for", "x", "in", "self", ".", "_ordered_task_names", "if", "x", "!=", "name", "]", "else", ":", "raise", "GoalError", "(", "'Cannot uninstall unknown task: {0}'", ".", "format", "(", "name", ")", ")" ]
Removes the named task from this goal. Allows external plugins to modify the execution plan. Use with caution. Note: Does not relax a serialization requirement that originated from the uninstalled task's install() call. :API: public
[ "Removes", "the", "named", "task", "from", "this", "goal", "." ]
python
train
36.6875
onnx/onnxmltools
onnxmltools/utils/main.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/utils/main.py#L57-L75
def set_model_domain(model, domain): """ Sets the domain on the ONNX model. :param model: instance of an ONNX model :param domain: string containing the domain name of the model Example: :: from onnxmltools.utils import set_model_domain onnx_model = load_model("SqueezeNet.onnx") set_model_domain(onnx_model, "com.acme") """ if model is None or not isinstance(model, onnx_proto.ModelProto): raise ValueError("Model is not a valid ONNX model.") if not convert_utils.is_string_type(domain): raise ValueError("Domain must be a string type.") model.domain = domain
[ "def", "set_model_domain", "(", "model", ",", "domain", ")", ":", "if", "model", "is", "None", "or", "not", "isinstance", "(", "model", ",", "onnx_proto", ".", "ModelProto", ")", ":", "raise", "ValueError", "(", "\"Model is not a valid ONNX model.\"", ")", "if", "not", "convert_utils", ".", "is_string_type", "(", "domain", ")", ":", "raise", "ValueError", "(", "\"Domain must be a string type.\"", ")", "model", ".", "domain", "=", "domain" ]
Sets the domain on the ONNX model. :param model: instance of an ONNX model :param domain: string containing the domain name of the model Example: :: from onnxmltools.utils import set_model_domain onnx_model = load_model("SqueezeNet.onnx") set_model_domain(onnx_model, "com.acme")
[ "Sets", "the", "domain", "on", "the", "ONNX", "model", "." ]
python
train
32.894737
gem/oq-engine
openquake/baselib/hdf5.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/hdf5.py#L376-L385
def set_nbytes(self, key, nbytes=None): """ Set the `nbytes` attribute on the HDF5 object identified by `key`. """ obj = super().__getitem__(key) if nbytes is not None: # size set from outside obj.attrs['nbytes'] = nbytes else: # recursively determine the size of the datagroup obj.attrs['nbytes'] = nbytes = ByteCounter.get_nbytes(obj) return nbytes
[ "def", "set_nbytes", "(", "self", ",", "key", ",", "nbytes", "=", "None", ")", ":", "obj", "=", "super", "(", ")", ".", "__getitem__", "(", "key", ")", "if", "nbytes", "is", "not", "None", ":", "# size set from outside", "obj", ".", "attrs", "[", "'nbytes'", "]", "=", "nbytes", "else", ":", "# recursively determine the size of the datagroup", "obj", ".", "attrs", "[", "'nbytes'", "]", "=", "nbytes", "=", "ByteCounter", ".", "get_nbytes", "(", "obj", ")", "return", "nbytes" ]
Set the `nbytes` attribute on the HDF5 object identified by `key`.
[ "Set", "the", "nbytes", "attribute", "on", "the", "HDF5", "object", "identified", "by", "key", "." ]
python
train
42.3
ScienceLogic/amiuploader
amiimporter/AWSUtilities.py
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/AWSUtilities.py#L233-L253
def run_ec2_import(self, config_file_location, description, region='us-east-1'): """ Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami """ import_cmd = "aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'" \ " --disk-containers file://{}"\ .format(description, self.aws_project, region, config_file_location) try: res = subprocess.check_output(shlex.split(import_cmd), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: print "Error importing to ec2" print "output: {}".format(e.output) sys.exit(5) print "got res: {}".format(res) res_json = json.loads(res) task_running, import_id = self.check_task_status_and_id(res_json) return import_id
[ "def", "run_ec2_import", "(", "self", ",", "config_file_location", ",", "description", ",", "region", "=", "'us-east-1'", ")", ":", "import_cmd", "=", "\"aws ec2 import-image --description '{}' --profile '{}' --region '{}' --output 'json'\"", "\" --disk-containers file://{}\"", ".", "format", "(", "description", ",", "self", ".", "aws_project", ",", "region", ",", "config_file_location", ")", "try", ":", "res", "=", "subprocess", ".", "check_output", "(", "shlex", ".", "split", "(", "import_cmd", ")", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "print", "\"Error importing to ec2\"", "print", "\"output: {}\"", ".", "format", "(", "e", ".", "output", ")", "sys", ".", "exit", "(", "5", ")", "print", "\"got res: {}\"", ".", "format", "(", "res", ")", "res_json", "=", "json", ".", "loads", "(", "res", ")", "task_running", ",", "import_id", "=", "self", ".", "check_task_status_and_id", "(", "res_json", ")", "return", "import_id" ]
Runs the command to import an uploaded vmdk to aws ec2 :param config_file_location: config file of import param location :param description: description to attach to the import task :return: the import task id for the given ami
[ "Runs", "the", "command", "to", "import", "an", "uploaded", "vmdk", "to", "aws", "ec2", ":", "param", "config_file_location", ":", "config", "file", "of", "import", "param", "location", ":", "param", "description", ":", "description", "to", "attach", "to", "the", "import", "task", ":", "return", ":", "the", "import", "task", "id", "for", "the", "given", "ami" ]
python
train
49.285714
jplusplus/statscraper
statscraper/base_scraper.py
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/base_scraper.py#L88-L140
def append(self, val): """Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects. """ val.resultset = self val.dataset = self.dataset # Check result dimensions against available dimensions for this dataset if val.dataset: dataset_dimensions = self.dataset.dimensions for k, v in val.raw_dimensions.items(): if k not in dataset_dimensions: d = Dimension(k) else: d = dataset_dimensions[k] # Normalize if we have a datatype and a foreign dialect normalized_value = unicode(v) if d.dialect and d.datatype: if d.dialect in d.datatype.dialects: for av in d.allowed_values: # Not all allowed_value have all dialects if unicode(v) in av.dialects.get(d.dialect, []): normalized_value = av.value # Use first match # We do not support multiple matches # This is by design. break # Create DimensionValue object if isinstance(v, DimensionValue): dim = v v.value = normalized_value else: if k in dataset_dimensions: dim = DimensionValue(normalized_value, d) else: dim = DimensionValue(normalized_value, Dimension()) val.dimensionvalues.append(dim) # Add last list of dimension values to the ResultSet # They will usually be the same for each result self.dimensionvalues = val.dimensionvalues super(ResultSet, self).append(val)
[ "def", "append", "(", "self", ",", "val", ")", ":", "val", ".", "resultset", "=", "self", "val", ".", "dataset", "=", "self", ".", "dataset", "# Check result dimensions against available dimensions for this dataset", "if", "val", ".", "dataset", ":", "dataset_dimensions", "=", "self", ".", "dataset", ".", "dimensions", "for", "k", ",", "v", "in", "val", ".", "raw_dimensions", ".", "items", "(", ")", ":", "if", "k", "not", "in", "dataset_dimensions", ":", "d", "=", "Dimension", "(", "k", ")", "else", ":", "d", "=", "dataset_dimensions", "[", "k", "]", "# Normalize if we have a datatype and a foreign dialect", "normalized_value", "=", "unicode", "(", "v", ")", "if", "d", ".", "dialect", "and", "d", ".", "datatype", ":", "if", "d", ".", "dialect", "in", "d", ".", "datatype", ".", "dialects", ":", "for", "av", "in", "d", ".", "allowed_values", ":", "# Not all allowed_value have all dialects", "if", "unicode", "(", "v", ")", "in", "av", ".", "dialects", ".", "get", "(", "d", ".", "dialect", ",", "[", "]", ")", ":", "normalized_value", "=", "av", ".", "value", "# Use first match", "# We do not support multiple matches", "# This is by design.", "break", "# Create DimensionValue object", "if", "isinstance", "(", "v", ",", "DimensionValue", ")", ":", "dim", "=", "v", "v", ".", "value", "=", "normalized_value", "else", ":", "if", "k", "in", "dataset_dimensions", ":", "dim", "=", "DimensionValue", "(", "normalized_value", ",", "d", ")", "else", ":", "dim", "=", "DimensionValue", "(", "normalized_value", ",", "Dimension", "(", ")", ")", "val", ".", "dimensionvalues", ".", "append", "(", "dim", ")", "# Add last list of dimension values to the ResultSet", "# They will usually be the same for each result", "self", ".", "dimensionvalues", "=", "val", ".", "dimensionvalues", "super", "(", "ResultSet", ",", "self", ")", ".", "append", "(", "val", ")" ]
Connect any new results to the resultset. This is where all the heavy lifting is done for creating results: - We add a datatype here, so that each result can handle validation etc independently. This is so that scraper authors don't need to worry about creating and passing around datatype objects. - As the scraper author yields result objects, we append them to a resultset. - This is also where we normalize dialects.
[ "Connect", "any", "new", "results", "to", "the", "resultset", "." ]
python
train
43.018868
praekelt/django-moderator
moderator/templatetags/moderator_inclusion_tags.py
https://github.com/praekelt/django-moderator/blob/72f1d5259128ff5a1a0341d4a573bfd561ba4665/moderator/templatetags/moderator_inclusion_tags.py#L8-L22
def report_comment_abuse(context, obj): """ Checks whether a user can report abuse (has not liked comment previously) or has reported abuse previously and renders appropriate response. If requesting user is part of the 'Moderators' group a vote equal to ABUSE_CUTOFF setting will be made, thereby immediately marking the comment as abusive. """ context.update({ 'content_obj': obj, 'vote': -1, 'content_type': "-".join((obj._meta.app_label, obj._meta.module_name)), }) return context
[ "def", "report_comment_abuse", "(", "context", ",", "obj", ")", ":", "context", ".", "update", "(", "{", "'content_obj'", ":", "obj", ",", "'vote'", ":", "-", "1", ",", "'content_type'", ":", "\"-\"", ".", "join", "(", "(", "obj", ".", "_meta", ".", "app_label", ",", "obj", ".", "_meta", ".", "module_name", ")", ")", ",", "}", ")", "return", "context" ]
Checks whether a user can report abuse (has not liked comment previously) or has reported abuse previously and renders appropriate response. If requesting user is part of the 'Moderators' group a vote equal to ABUSE_CUTOFF setting will be made, thereby immediately marking the comment as abusive.
[ "Checks", "whether", "a", "user", "can", "report", "abuse", "(", "has", "not", "liked", "comment", "previously", ")", "or", "has", "reported", "abuse", "previously", "and", "renders", "appropriate", "response", "." ]
python
train
35.6
inveniosoftware/invenio-github
invenio_github/api.py
https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/api.py#L204-L222
def sync_repo_hook(self, repo_id): """Sync a GitHub repo's hook with the locally stored repo.""" # Get the hook that we may have set in the past gh_repo = self.api.repository_with_id(repo_id) hooks = (hook.id for hook in gh_repo.hooks() if hook.config.get('url', '') == self.webhook_url) hook_id = next(hooks, None) # If hook on GitHub exists, get or create corresponding db object and # enable the hook. Otherwise remove the old hook information. if hook_id: Repository.enable(user_id=self.user_id, github_id=gh_repo.id, name=gh_repo.full_name, hook=hook_id) else: Repository.disable(user_id=self.user_id, github_id=gh_repo.id, name=gh_repo.full_name)
[ "def", "sync_repo_hook", "(", "self", ",", "repo_id", ")", ":", "# Get the hook that we may have set in the past", "gh_repo", "=", "self", ".", "api", ".", "repository_with_id", "(", "repo_id", ")", "hooks", "=", "(", "hook", ".", "id", "for", "hook", "in", "gh_repo", ".", "hooks", "(", ")", "if", "hook", ".", "config", ".", "get", "(", "'url'", ",", "''", ")", "==", "self", ".", "webhook_url", ")", "hook_id", "=", "next", "(", "hooks", ",", "None", ")", "# If hook on GitHub exists, get or create corresponding db object and", "# enable the hook. Otherwise remove the old hook information.", "if", "hook_id", ":", "Repository", ".", "enable", "(", "user_id", "=", "self", ".", "user_id", ",", "github_id", "=", "gh_repo", ".", "id", ",", "name", "=", "gh_repo", ".", "full_name", ",", "hook", "=", "hook_id", ")", "else", ":", "Repository", ".", "disable", "(", "user_id", "=", "self", ".", "user_id", ",", "github_id", "=", "gh_repo", ".", "id", ",", "name", "=", "gh_repo", ".", "full_name", ")" ]
Sync a GitHub repo's hook with the locally stored repo.
[ "Sync", "a", "GitHub", "repo", "s", "hook", "with", "the", "locally", "stored", "repo", "." ]
python
train
47.368421
abw333/dominoes
dominoes/search.py
https://github.com/abw333/dominoes/blob/ea9f532c9b834117a5c07d214711515872f7537e/dominoes/search.py#L40-L81
def alphabeta(game, alpha_beta=(-float('inf'), float('inf')), player=dominoes.players.identity): ''' Runs minimax search with alpha-beta pruning on the provided game. :param Game game: game to search :param tuple alpha_beta: a tuple of two floats that indicate the initial values of alpha and beta, respectively. The default is (-inf, inf). :param callable player: player used to sort moves to be explored. Ordering better moves first may significantly reduce the amount of moves that need to be explored. The identity player is the default. ''' # base case - game is over if game.result is not None: return [], game.result.points if game.turn % 2: # minimizing player best_value = float('inf') op = operator.lt update = lambda ab, v: (ab[0], min(ab[1], v)) else: # maximizing player best_value = -float('inf') op = operator.gt update = lambda ab, v: (max(ab[0], v), ab[1]) # recursive case - game is not over for move, new_game in make_moves(game, player): moves, value = alphabeta(new_game, alpha_beta, player) if op(value, best_value): best_value = value best_moves = moves best_moves.insert(0, move) alpha_beta = update(alpha_beta, best_value) if alpha_beta[1] <= alpha_beta[0]: # alpha-beta cutoff break return best_moves, best_value
[ "def", "alphabeta", "(", "game", ",", "alpha_beta", "=", "(", "-", "float", "(", "'inf'", ")", ",", "float", "(", "'inf'", ")", ")", ",", "player", "=", "dominoes", ".", "players", ".", "identity", ")", ":", "# base case - game is over", "if", "game", ".", "result", "is", "not", "None", ":", "return", "[", "]", ",", "game", ".", "result", ".", "points", "if", "game", ".", "turn", "%", "2", ":", "# minimizing player", "best_value", "=", "float", "(", "'inf'", ")", "op", "=", "operator", ".", "lt", "update", "=", "lambda", "ab", ",", "v", ":", "(", "ab", "[", "0", "]", ",", "min", "(", "ab", "[", "1", "]", ",", "v", ")", ")", "else", ":", "# maximizing player", "best_value", "=", "-", "float", "(", "'inf'", ")", "op", "=", "operator", ".", "gt", "update", "=", "lambda", "ab", ",", "v", ":", "(", "max", "(", "ab", "[", "0", "]", ",", "v", ")", ",", "ab", "[", "1", "]", ")", "# recursive case - game is not over", "for", "move", ",", "new_game", "in", "make_moves", "(", "game", ",", "player", ")", ":", "moves", ",", "value", "=", "alphabeta", "(", "new_game", ",", "alpha_beta", ",", "player", ")", "if", "op", "(", "value", ",", "best_value", ")", ":", "best_value", "=", "value", "best_moves", "=", "moves", "best_moves", ".", "insert", "(", "0", ",", "move", ")", "alpha_beta", "=", "update", "(", "alpha_beta", ",", "best_value", ")", "if", "alpha_beta", "[", "1", "]", "<=", "alpha_beta", "[", "0", "]", ":", "# alpha-beta cutoff", "break", "return", "best_moves", ",", "best_value" ]
Runs minimax search with alpha-beta pruning on the provided game. :param Game game: game to search :param tuple alpha_beta: a tuple of two floats that indicate the initial values of alpha and beta, respectively. The default is (-inf, inf). :param callable player: player used to sort moves to be explored. Ordering better moves first may significantly reduce the amount of moves that need to be explored. The identity player is the default.
[ "Runs", "minimax", "search", "with", "alpha", "-", "beta", "pruning", "on", "the", "provided", "game", "." ]
python
train
37.833333
rosenbrockc/fortpy
fortpy/parsers/docstring.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/parsers/docstring.py#L478-L486
def _parse_docline(self, line, container): """Parses a single line of code following a docblock to see if it as a valid code element that can be decorated. If so, return the name of the code element.""" match = self.RE_DECOR.match(line) if match is not None: return "{}.{}".format(container.name, match.group("name")) else: return container.name
[ "def", "_parse_docline", "(", "self", ",", "line", ",", "container", ")", ":", "match", "=", "self", ".", "RE_DECOR", ".", "match", "(", "line", ")", "if", "match", "is", "not", "None", ":", "return", "\"{}.{}\"", ".", "format", "(", "container", ".", "name", ",", "match", ".", "group", "(", "\"name\"", ")", ")", "else", ":", "return", "container", ".", "name" ]
Parses a single line of code following a docblock to see if it as a valid code element that can be decorated. If so, return the name of the code element.
[ "Parses", "a", "single", "line", "of", "code", "following", "a", "docblock", "to", "see", "if", "it", "as", "a", "valid", "code", "element", "that", "can", "be", "decorated", ".", "If", "so", "return", "the", "name", "of", "the", "code", "element", "." ]
python
train
45.444444
frc1418/tbapy
tbapy/main.py
https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L120-L139
def team_events(self, team, year=None, simple=False, keys=False): """ Get team events a team has participated in. :param team: Team to get events for. :param year: Year to get events from. :param simple: Get only vital data. :param keys: Get just the keys of the events. Set to True if you only need the keys of each event and not their full data. :return: List of strings or Teams """ if year: if keys: return self._get('team/%s/events/%s/keys' % (self.team_key(team), year)) else: return [Event(raw) for raw in self._get('team/%s/events/%s%s' % (self.team_key(team), year, '/simple' if simple else ''))] else: if keys: return self._get('team/%s/events/keys' % self.team_key(team)) else: return [Event(raw) for raw in self._get('team/%s/events%s' % (self.team_key(team), '/simple' if simple else ''))]
[ "def", "team_events", "(", "self", ",", "team", ",", "year", "=", "None", ",", "simple", "=", "False", ",", "keys", "=", "False", ")", ":", "if", "year", ":", "if", "keys", ":", "return", "self", ".", "_get", "(", "'team/%s/events/%s/keys'", "%", "(", "self", ".", "team_key", "(", "team", ")", ",", "year", ")", ")", "else", ":", "return", "[", "Event", "(", "raw", ")", "for", "raw", "in", "self", ".", "_get", "(", "'team/%s/events/%s%s'", "%", "(", "self", ".", "team_key", "(", "team", ")", ",", "year", ",", "'/simple'", "if", "simple", "else", "''", ")", ")", "]", "else", ":", "if", "keys", ":", "return", "self", ".", "_get", "(", "'team/%s/events/keys'", "%", "self", ".", "team_key", "(", "team", ")", ")", "else", ":", "return", "[", "Event", "(", "raw", ")", "for", "raw", "in", "self", ".", "_get", "(", "'team/%s/events%s'", "%", "(", "self", ".", "team_key", "(", "team", ")", ",", "'/simple'", "if", "simple", "else", "''", ")", ")", "]" ]
Get team events a team has participated in. :param team: Team to get events for. :param year: Year to get events from. :param simple: Get only vital data. :param keys: Get just the keys of the events. Set to True if you only need the keys of each event and not their full data. :return: List of strings or Teams
[ "Get", "team", "events", "a", "team", "has", "participated", "in", "." ]
python
train
48.8
fm4d/KickassAPI
KickassAPI.py
https://github.com/fm4d/KickassAPI/blob/6ecc6846dcec0d6f6e493bf776031aa92d55604f/KickassAPI.py#L60-L65
def lookup(self): """ Prints name, author, size and age """ print "%s by %s, size: %s, uploaded %s ago" % (self.name, self.author, self.size, self.age)
[ "def", "lookup", "(", "self", ")", ":", "print", "\"%s by %s, size: %s, uploaded %s ago\"", "%", "(", "self", ".", "name", ",", "self", ".", "author", ",", "self", ".", "size", ",", "self", ".", "age", ")" ]
Prints name, author, size and age
[ "Prints", "name", "author", "size", "and", "age" ]
python
train
38.833333
CalebBell/fluids
fluids/particle_size_distribution.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/particle_size_distribution.py#L851-L887
def pdf_Gates_Gaudin_Schuhman_basis_integral(d, d_characteristic, m, n): r'''Calculates the integral of the multiplication of d^n by the Gates, Gaudin and Schuhman (GGS) model given a particle diameter `d`, characteristic (maximum) particle diameter `d_characteristic`, and exponent `m`. .. math:: \int d^n\cdot q(d)\; dd =\frac{m}{m+n} d^n \left(\frac{d} {d_{characteristic}}\right)^m Parameters ---------- d : float Specified particle diameter, [m] d_characteristic : float Characteristic particle diameter; in this model, it is the largest particle size diameter in the distribution, [m] m : float Particle size distribution exponent, [-] n : int Exponent of the multiplied n, [-] Returns ------- pdf_basis_integral : float Integral of Rosin Rammler pdf multiplied by d^n, [-] Notes ----- This integral does not have any numerical issues as `d` approaches 0. Examples -------- >>> pdf_Gates_Gaudin_Schuhman_basis_integral(d=2E-4, d_characteristic=1E-3, m=2.3, n=-3) -10136984887.543015 ''' return m/(m+n)*d**n*(d/d_characteristic)**m
[ "def", "pdf_Gates_Gaudin_Schuhman_basis_integral", "(", "d", ",", "d_characteristic", ",", "m", ",", "n", ")", ":", "return", "m", "/", "(", "m", "+", "n", ")", "*", "d", "**", "n", "*", "(", "d", "/", "d_characteristic", ")", "**", "m" ]
r'''Calculates the integral of the multiplication of d^n by the Gates, Gaudin and Schuhman (GGS) model given a particle diameter `d`, characteristic (maximum) particle diameter `d_characteristic`, and exponent `m`. .. math:: \int d^n\cdot q(d)\; dd =\frac{m}{m+n} d^n \left(\frac{d} {d_{characteristic}}\right)^m Parameters ---------- d : float Specified particle diameter, [m] d_characteristic : float Characteristic particle diameter; in this model, it is the largest particle size diameter in the distribution, [m] m : float Particle size distribution exponent, [-] n : int Exponent of the multiplied n, [-] Returns ------- pdf_basis_integral : float Integral of Rosin Rammler pdf multiplied by d^n, [-] Notes ----- This integral does not have any numerical issues as `d` approaches 0. Examples -------- >>> pdf_Gates_Gaudin_Schuhman_basis_integral(d=2E-4, d_characteristic=1E-3, m=2.3, n=-3) -10136984887.543015
[ "r", "Calculates", "the", "integral", "of", "the", "multiplication", "of", "d^n", "by", "the", "Gates", "Gaudin", "and", "Schuhman", "(", "GGS", ")", "model", "given", "a", "particle", "diameter", "d", "characteristic", "(", "maximum", ")", "particle", "diameter", "d_characteristic", "and", "exponent", "m", ".", "..", "math", "::", "\\", "int", "d^n", "\\", "cdot", "q", "(", "d", ")", "\\", ";", "dd", "=", "\\", "frac", "{", "m", "}", "{", "m", "+", "n", "}", "d^n", "\\", "left", "(", "\\", "frac", "{", "d", "}", "{", "d_", "{", "characteristic", "}}", "\\", "right", ")", "^m", "Parameters", "----------", "d", ":", "float", "Specified", "particle", "diameter", "[", "m", "]", "d_characteristic", ":", "float", "Characteristic", "particle", "diameter", ";", "in", "this", "model", "it", "is", "the", "largest", "particle", "size", "diameter", "in", "the", "distribution", "[", "m", "]", "m", ":", "float", "Particle", "size", "distribution", "exponent", "[", "-", "]", "n", ":", "int", "Exponent", "of", "the", "multiplied", "n", "[", "-", "]" ]
python
train
31.864865
Clinical-Genomics/scout
scout/adapter/mongo/clinvar.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/clinvar.py#L34-L68
def delete_submission(self, submission_id): """Deletes a Clinvar submission object, along with all associated clinvar objects (variants and casedata) Args: submission_id(str): the ID of the submission to be deleted Returns: deleted_objects(int): the number of associated objects removed (variants and/or casedata) deleted_submissions(int): 1 if it's deleted, 0 if something went wrong """ LOG.info("Deleting clinvar submission %s", submission_id) submission_obj = self.clinvar_submission_collection.find_one({ '_id' : ObjectId(submission_id)}) submission_variants = submission_obj.get('variant_data') submission_casedata = submission_obj.get('case_data') submission_objects = [] if submission_variants and submission_casedata: submission_objects = submission_variants + submission_casedata elif submission_variants: submission_objects = submission_variants elif submission_casedata: submission_objects = submission_casedata # Delete all variants and casedata objects associated with this submission result = self.clinvar_collection.delete_many({'_id': { "$in": submission_objects} }) deleted_objects = result.deleted_count # Delete the submission itself result = self.clinvar_submission_collection.delete_one({'_id': ObjectId(submission_id)}) deleted_submissions = result.deleted_count #return deleted_count, deleted_submissions return deleted_objects,deleted_submissions
[ "def", "delete_submission", "(", "self", ",", "submission_id", ")", ":", "LOG", ".", "info", "(", "\"Deleting clinvar submission %s\"", ",", "submission_id", ")", "submission_obj", "=", "self", ".", "clinvar_submission_collection", ".", "find_one", "(", "{", "'_id'", ":", "ObjectId", "(", "submission_id", ")", "}", ")", "submission_variants", "=", "submission_obj", ".", "get", "(", "'variant_data'", ")", "submission_casedata", "=", "submission_obj", ".", "get", "(", "'case_data'", ")", "submission_objects", "=", "[", "]", "if", "submission_variants", "and", "submission_casedata", ":", "submission_objects", "=", "submission_variants", "+", "submission_casedata", "elif", "submission_variants", ":", "submission_objects", "=", "submission_variants", "elif", "submission_casedata", ":", "submission_objects", "=", "submission_casedata", "# Delete all variants and casedata objects associated with this submission", "result", "=", "self", ".", "clinvar_collection", ".", "delete_many", "(", "{", "'_id'", ":", "{", "\"$in\"", ":", "submission_objects", "}", "}", ")", "deleted_objects", "=", "result", ".", "deleted_count", "# Delete the submission itself", "result", "=", "self", ".", "clinvar_submission_collection", ".", "delete_one", "(", "{", "'_id'", ":", "ObjectId", "(", "submission_id", ")", "}", ")", "deleted_submissions", "=", "result", ".", "deleted_count", "#return deleted_count, deleted_submissions", "return", "deleted_objects", ",", "deleted_submissions" ]
Deletes a Clinvar submission object, along with all associated clinvar objects (variants and casedata) Args: submission_id(str): the ID of the submission to be deleted Returns: deleted_objects(int): the number of associated objects removed (variants and/or casedata) deleted_submissions(int): 1 if it's deleted, 0 if something went wrong
[ "Deletes", "a", "Clinvar", "submission", "object", "along", "with", "all", "associated", "clinvar", "objects", "(", "variants", "and", "casedata", ")" ]
python
test
45.628571
kylejusticemagnuson/pyti
pyti/bollinger_bands.py
https://github.com/kylejusticemagnuson/pyti/blob/2f78430dfd60a0d20f4e7fc0cb4588c03107c4b2/pyti/bollinger_bands.py#L102-L117
def percent_bandwidth(data, period, std=2.0): """ Percent Bandwidth. Formula: %_bw = data() - l_bb() / bb_range() """ catch_errors.check_for_period_error(data, period) period = int(period) percent_bandwidth = ((np.array(data) - lower_bollinger_band(data, period, std)) / bb_range(data, period, std) ) return percent_bandwidth
[ "def", "percent_bandwidth", "(", "data", ",", "period", ",", "std", "=", "2.0", ")", ":", "catch_errors", ".", "check_for_period_error", "(", "data", ",", "period", ")", "period", "=", "int", "(", "period", ")", "percent_bandwidth", "=", "(", "(", "np", ".", "array", "(", "data", ")", "-", "lower_bollinger_band", "(", "data", ",", "period", ",", "std", ")", ")", "/", "bb_range", "(", "data", ",", "period", ",", "std", ")", ")", "return", "percent_bandwidth" ]
Percent Bandwidth. Formula: %_bw = data() - l_bb() / bb_range()
[ "Percent", "Bandwidth", "." ]
python
train
26.5
dpmcmlxxvi/pixelscan
pixelscan/pixelscan.py
https://github.com/dpmcmlxxvi/pixelscan/blob/d641207b13a8fc5bf7ac9964b982971652bb0a7e/pixelscan/pixelscan.py#L186-L193
def next(self): """Next point in iteration """ if self.count < len(self.reservoir): self.count += 1 return self.reservoir[self.count-1] raise StopIteration("Reservoir exhausted")
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "count", "<", "len", "(", "self", ".", "reservoir", ")", ":", "self", ".", "count", "+=", "1", "return", "self", ".", "reservoir", "[", "self", ".", "count", "-", "1", "]", "raise", "StopIteration", "(", "\"Reservoir exhausted\"", ")" ]
Next point in iteration
[ "Next", "point", "in", "iteration" ]
python
train
28.5
rackerlabs/simpl
simpl/git.py
https://github.com/rackerlabs/simpl/blob/60ed3336a931cd6a7a7246e60f26165d9dc7c99c/simpl/git.py#L517-L546
def init(cls, repo_dir=None, temp=False, initial_commit=False): """Run `git init` in the repo_dir. Defaults to current working directory if repo_dir is not supplied. If 'temp' is True, a temporary directory will be created for you and the repository will be initialized. The tempdir is scheduled for deletion (when the process exits) through an exit function registered with the atexit module. If 'temp' is True, repo_dir is ignored. """ if temp: suffix = '.temp_simpl_GitRepo' repo_dir = create_tempdir(suffix=suffix, delete=True) else: repo_dir = repo_dir or os.getcwd() git_init(repo_dir) instance = cls(repo_dir) # NOTE(larsbutler): If we wanted to be defensive about this and favor # compatibility over elegance, we could just automatically add a # `git commit` (empty, no message) after every `git init`. I would # recommend doing this in the :class:`GitRepo` class, not in the # module-level util functions. Adding an extra commit shouldn't cause # any problems. if initial_commit: # unknown revision, needs a commit to run most commands instance.commit( message='Initial commit', amend=False, stage=False) return instance
[ "def", "init", "(", "cls", ",", "repo_dir", "=", "None", ",", "temp", "=", "False", ",", "initial_commit", "=", "False", ")", ":", "if", "temp", ":", "suffix", "=", "'.temp_simpl_GitRepo'", "repo_dir", "=", "create_tempdir", "(", "suffix", "=", "suffix", ",", "delete", "=", "True", ")", "else", ":", "repo_dir", "=", "repo_dir", "or", "os", ".", "getcwd", "(", ")", "git_init", "(", "repo_dir", ")", "instance", "=", "cls", "(", "repo_dir", ")", "# NOTE(larsbutler): If we wanted to be defensive about this and favor", "# compatibility over elegance, we could just automatically add a", "# `git commit` (empty, no message) after every `git init`. I would", "# recommend doing this in the :class:`GitRepo` class, not in the", "# module-level util functions. Adding an extra commit shouldn't cause", "# any problems.", "if", "initial_commit", ":", "# unknown revision, needs a commit to run most commands", "instance", ".", "commit", "(", "message", "=", "'Initial commit'", ",", "amend", "=", "False", ",", "stage", "=", "False", ")", "return", "instance" ]
Run `git init` in the repo_dir. Defaults to current working directory if repo_dir is not supplied. If 'temp' is True, a temporary directory will be created for you and the repository will be initialized. The tempdir is scheduled for deletion (when the process exits) through an exit function registered with the atexit module. If 'temp' is True, repo_dir is ignored.
[ "Run", "git", "init", "in", "the", "repo_dir", "." ]
python
train
44.633333
mitsei/dlkit
dlkit/handcar/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L1246-L1269
def get_asset_composition_design_session(self): """Gets the session for creating asset compositions. return: (osid.repository.AssetCompositionDesignSession) - an AssetCompositionDesignSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_composition_design() is false compliance: optional - This method must be implemented if supports_asset_composition_design() is true. """ if not self.supports_asset_composition_design(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed('import error') try: session = sessions.AssetCompositionDesignSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise OperationFailed('attribute error') return session
[ "def", "get_asset_composition_design_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_asset_composition_design", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "OperationFailed", "(", "'import error'", ")", "try", ":", "session", "=", "sessions", ".", "AssetCompositionDesignSession", "(", "proxy", "=", "self", ".", "_proxy", ",", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "OperationFailed", "(", "'attribute error'", ")", "return", "session" ]
Gets the session for creating asset compositions. return: (osid.repository.AssetCompositionDesignSession) - an AssetCompositionDesignSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_composition_design() is false compliance: optional - This method must be implemented if supports_asset_composition_design() is true.
[ "Gets", "the", "session", "for", "creating", "asset", "compositions", "." ]
python
train
41.666667
tensorflow/hub
tensorflow_hub/module.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/module.py#L341-L349
def variables(self): """Returns the list of all tf.Variables created by module instantiation.""" result = [] for _, value in sorted(self.variable_map.items()): if isinstance(value, list): result.extend(value) else: result.append(value) return result
[ "def", "variables", "(", "self", ")", ":", "result", "=", "[", "]", "for", "_", ",", "value", "in", "sorted", "(", "self", ".", "variable_map", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "result", ".", "extend", "(", "value", ")", "else", ":", "result", ".", "append", "(", "value", ")", "return", "result" ]
Returns the list of all tf.Variables created by module instantiation.
[ "Returns", "the", "list", "of", "all", "tf", ".", "Variables", "created", "by", "module", "instantiation", "." ]
python
train
31.666667
ryanvarley/ExoData
exodata/example.py
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/example.py#L85-L113
def genExamplePlanet(binaryLetter=''): """ Creates a fake planet with some defaults :param `binaryLetter`: host star is part of a binary with letter binaryletter :return: """ planetPar = PlanetParameters() planetPar.addParam('discoverymethod', 'transit') planetPar.addParam('discoveryyear', '2001') planetPar.addParam('eccentricity', '0.09') planetPar.addParam('inclination', '89.2') planetPar.addParam('lastupdate', '12/12/08') planetPar.addParam('mass', '3.9') planetPar.addParam('name', 'Example Star {0}{1} b'.format(ac._ExampleSystemCount, binaryLetter)) planetPar.addParam('period', '111.2') planetPar.addParam('radius', '0.92') planetPar.addParam('semimajoraxis', '0.449') planetPar.addParam('temperature', '339.6') planetPar.addParam('transittime', '2454876.344') planetPar.addParam('separation', '330', {'unit': 'AU'}) examplePlanet = Planet(planetPar.params) examplePlanet.flags.addFlag('Fake') exampleStar = genExampleStar(binaryLetter=binaryLetter) exampleStar._addChild(examplePlanet) examplePlanet.parent = exampleStar return examplePlanet
[ "def", "genExamplePlanet", "(", "binaryLetter", "=", "''", ")", ":", "planetPar", "=", "PlanetParameters", "(", ")", "planetPar", ".", "addParam", "(", "'discoverymethod'", ",", "'transit'", ")", "planetPar", ".", "addParam", "(", "'discoveryyear'", ",", "'2001'", ")", "planetPar", ".", "addParam", "(", "'eccentricity'", ",", "'0.09'", ")", "planetPar", ".", "addParam", "(", "'inclination'", ",", "'89.2'", ")", "planetPar", ".", "addParam", "(", "'lastupdate'", ",", "'12/12/08'", ")", "planetPar", ".", "addParam", "(", "'mass'", ",", "'3.9'", ")", "planetPar", ".", "addParam", "(", "'name'", ",", "'Example Star {0}{1} b'", ".", "format", "(", "ac", ".", "_ExampleSystemCount", ",", "binaryLetter", ")", ")", "planetPar", ".", "addParam", "(", "'period'", ",", "'111.2'", ")", "planetPar", ".", "addParam", "(", "'radius'", ",", "'0.92'", ")", "planetPar", ".", "addParam", "(", "'semimajoraxis'", ",", "'0.449'", ")", "planetPar", ".", "addParam", "(", "'temperature'", ",", "'339.6'", ")", "planetPar", ".", "addParam", "(", "'transittime'", ",", "'2454876.344'", ")", "planetPar", ".", "addParam", "(", "'separation'", ",", "'330'", ",", "{", "'unit'", ":", "'AU'", "}", ")", "examplePlanet", "=", "Planet", "(", "planetPar", ".", "params", ")", "examplePlanet", ".", "flags", ".", "addFlag", "(", "'Fake'", ")", "exampleStar", "=", "genExampleStar", "(", "binaryLetter", "=", "binaryLetter", ")", "exampleStar", ".", "_addChild", "(", "examplePlanet", ")", "examplePlanet", ".", "parent", "=", "exampleStar", "return", "examplePlanet" ]
Creates a fake planet with some defaults :param `binaryLetter`: host star is part of a binary with letter binaryletter :return:
[ "Creates", "a", "fake", "planet", "with", "some", "defaults", ":", "param", "binaryLetter", ":", "host", "star", "is", "part", "of", "a", "binary", "with", "letter", "binaryletter", ":", "return", ":" ]
python
train
38.793103
Josef-Friedrich/phrydy
phrydy/mediafile.py
https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L933-L936
def deserialize(self, apic_frame): """Convert APIC frame into Image.""" return Image(data=apic_frame.data, desc=apic_frame.desc, type=apic_frame.type)
[ "def", "deserialize", "(", "self", ",", "apic_frame", ")", ":", "return", "Image", "(", "data", "=", "apic_frame", ".", "data", ",", "desc", "=", "apic_frame", ".", "desc", ",", "type", "=", "apic_frame", ".", "type", ")" ]
Convert APIC frame into Image.
[ "Convert", "APIC", "frame", "into", "Image", "." ]
python
train
46
rhattersley/pyepsg
pyepsg.py
https://github.com/rhattersley/pyepsg/blob/0ddd79287f8d42483c8f70ce50aba1cc6b5d780a/pyepsg.py#L186-L218
def domain_of_validity(self): """ Return the domain of validity for this CRS as: (west, east, south, north). For example:: >>> print(get(21781).domain_of_validity()) [5.96, 10.49, 45.82, 47.81] """ # TODO: Generalise interface to return a polygon? (Can we find # something that uses a polygon instead?) domain = self.element.find(GML_NS + 'domainOfValidity') domain_href = domain.attrib[XLINK_NS + 'href'] url = '{prefix}{code}.gml?download'.format(prefix=EPSG_IO_URL, code=domain_href) xml = requests.get(url).content gml = ET.fromstring(xml) def extract_bound(tag): ns = '{http://www.isotc211.org/2005/gmd}' xpath = './/{ns}EX_GeographicBoundingBox/{ns}{tag}/'.format( ns=ns, tag=tag) bound = gml.find(xpath) return float(bound.text) tags = ('westBoundLongitude', 'eastBoundLongitude', 'southBoundLatitude', 'northBoundLatitude') bounds = [extract_bound(tag) for tag in tags] return bounds
[ "def", "domain_of_validity", "(", "self", ")", ":", "# TODO: Generalise interface to return a polygon? (Can we find", "# something that uses a polygon instead?)", "domain", "=", "self", ".", "element", ".", "find", "(", "GML_NS", "+", "'domainOfValidity'", ")", "domain_href", "=", "domain", ".", "attrib", "[", "XLINK_NS", "+", "'href'", "]", "url", "=", "'{prefix}{code}.gml?download'", ".", "format", "(", "prefix", "=", "EPSG_IO_URL", ",", "code", "=", "domain_href", ")", "xml", "=", "requests", ".", "get", "(", "url", ")", ".", "content", "gml", "=", "ET", ".", "fromstring", "(", "xml", ")", "def", "extract_bound", "(", "tag", ")", ":", "ns", "=", "'{http://www.isotc211.org/2005/gmd}'", "xpath", "=", "'.//{ns}EX_GeographicBoundingBox/{ns}{tag}/'", ".", "format", "(", "ns", "=", "ns", ",", "tag", "=", "tag", ")", "bound", "=", "gml", ".", "find", "(", "xpath", ")", "return", "float", "(", "bound", ".", "text", ")", "tags", "=", "(", "'westBoundLongitude'", ",", "'eastBoundLongitude'", ",", "'southBoundLatitude'", ",", "'northBoundLatitude'", ")", "bounds", "=", "[", "extract_bound", "(", "tag", ")", "for", "tag", "in", "tags", "]", "return", "bounds" ]
Return the domain of validity for this CRS as: (west, east, south, north). For example:: >>> print(get(21781).domain_of_validity()) [5.96, 10.49, 45.82, 47.81]
[ "Return", "the", "domain", "of", "validity", "for", "this", "CRS", "as", ":", "(", "west", "east", "south", "north", ")", "." ]
python
train
35.272727
lvieirajr/mongorest
mongorest/collection.py
https://github.com/lvieirajr/mongorest/blob/00f4487ded33254434bc51ff09d48c7a936bd465/mongorest/collection.py#L367-L372
def documents(cls, filter=None, **kwargs): """ Returns a list of Documents if any document is filtered """ documents = [cls(document) for document in cls.find(filter, **kwargs)] return [document for document in documents if document.document]
[ "def", "documents", "(", "cls", ",", "filter", "=", "None", ",", "*", "*", "kwargs", ")", ":", "documents", "=", "[", "cls", "(", "document", ")", "for", "document", "in", "cls", ".", "find", "(", "filter", ",", "*", "*", "kwargs", ")", "]", "return", "[", "document", "for", "document", "in", "documents", "if", "document", ".", "document", "]" ]
Returns a list of Documents if any document is filtered
[ "Returns", "a", "list", "of", "Documents", "if", "any", "document", "is", "filtered" ]
python
train
46.166667
brechtm/rinohtype
src/rinoh/number.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/number.py#L70-L76
def romanize(number): """Convert `number` to a Roman numeral.""" roman = [] for numeral, value in NUMERALS: times, number = divmod(number, value) roman.append(times * numeral) return ''.join(roman)
[ "def", "romanize", "(", "number", ")", ":", "roman", "=", "[", "]", "for", "numeral", ",", "value", "in", "NUMERALS", ":", "times", ",", "number", "=", "divmod", "(", "number", ",", "value", ")", "roman", ".", "append", "(", "times", "*", "numeral", ")", "return", "''", ".", "join", "(", "roman", ")" ]
Convert `number` to a Roman numeral.
[ "Convert", "number", "to", "a", "Roman", "numeral", "." ]
python
train
31.857143
widdowquinn/pyani
pyani/pyani_tools.py
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L51-L55
def add_coverage(self, qname, sname, qcover, scover=None): """Add percentage coverage values to self.alignment_coverage.""" self.alignment_coverage.loc[qname, sname] = qcover if scover: self.alignment_coverage.loc[sname, qname] = scover
[ "def", "add_coverage", "(", "self", ",", "qname", ",", "sname", ",", "qcover", ",", "scover", "=", "None", ")", ":", "self", ".", "alignment_coverage", ".", "loc", "[", "qname", ",", "sname", "]", "=", "qcover", "if", "scover", ":", "self", ".", "alignment_coverage", ".", "loc", "[", "sname", ",", "qname", "]", "=", "scover" ]
Add percentage coverage values to self.alignment_coverage.
[ "Add", "percentage", "coverage", "values", "to", "self", ".", "alignment_coverage", "." ]
python
train
53.6
inonit/drf-haystack
drf_haystack/mixins.py
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L107-L114
def get_facet_objects_serializer(self, *args, **kwargs): """ Return the serializer instance which should be used for serializing faceted objects. """ facet_objects_serializer_class = self.get_facet_objects_serializer_class() kwargs["context"] = self.get_serializer_context() return facet_objects_serializer_class(*args, **kwargs)
[ "def", "get_facet_objects_serializer", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "facet_objects_serializer_class", "=", "self", ".", "get_facet_objects_serializer_class", "(", ")", "kwargs", "[", "\"context\"", "]", "=", "self", ".", "get_serializer_context", "(", ")", "return", "facet_objects_serializer_class", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return the serializer instance which should be used for serializing faceted objects.
[ "Return", "the", "serializer", "instance", "which", "should", "be", "used", "for", "serializing", "faceted", "objects", "." ]
python
train
47.25
ashmastaflash/kal-wrapper
kalibrate/fn.py
https://github.com/ashmastaflash/kal-wrapper/blob/80ee03ab7bd3172ac26b769d6b442960f3424b0e/kalibrate/fn.py#L140-L154
def determine_band_channel(kal_out): """Return band, channel, target frequency from kal output.""" band = "" channel = "" tgt_freq = "" while band == "": for line in kal_out.splitlines(): if "Using " in line and " channel " in line: band = str(line.split()[1]) channel = str(line.split()[3]) tgt_freq = str(line.split()[4]).replace( "(", "").replace(")", "") if band == "": band = None return(band, channel, tgt_freq)
[ "def", "determine_band_channel", "(", "kal_out", ")", ":", "band", "=", "\"\"", "channel", "=", "\"\"", "tgt_freq", "=", "\"\"", "while", "band", "==", "\"\"", ":", "for", "line", "in", "kal_out", ".", "splitlines", "(", ")", ":", "if", "\"Using \"", "in", "line", "and", "\" channel \"", "in", "line", ":", "band", "=", "str", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "channel", "=", "str", "(", "line", ".", "split", "(", ")", "[", "3", "]", ")", "tgt_freq", "=", "str", "(", "line", ".", "split", "(", ")", "[", "4", "]", ")", ".", "replace", "(", "\"(\"", ",", "\"\"", ")", ".", "replace", "(", "\")\"", ",", "\"\"", ")", "if", "band", "==", "\"\"", ":", "band", "=", "None", "return", "(", "band", ",", "channel", ",", "tgt_freq", ")" ]
Return band, channel, target frequency from kal output.
[ "Return", "band", "channel", "target", "frequency", "from", "kal", "output", "." ]
python
train
35.666667
swistakm/graceful
src/graceful/authentication.py
https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/authentication.py#L331-L365
def try_storage(self, identifier, req, resp, resource, uri_kwargs): """Try to find user in configured user storage object. Args: identifier: User identifier. Returns: user object. """ if identifier is None: user = None # note: if user_storage is defined, always use it in order to # authenticate user. elif self.user_storage is not None: user = self.user_storage.get_user( self, identifier, req, resp, resource, uri_kwargs ) # note: some authentication middleware classes may not require # to be initialized with their own user_storage. In such # case this will always authenticate with "syntetic user" # if there is a valid indentity. elif self.user_storage is None and not self.only_with_storage: user = { 'identified_with': self, 'identifier': identifier } else: # pragma: nocover # note: this should not happen if the base class is properly # initialized. Still, user can skip super().__init__() call. user = None return user
[ "def", "try_storage", "(", "self", ",", "identifier", ",", "req", ",", "resp", ",", "resource", ",", "uri_kwargs", ")", ":", "if", "identifier", "is", "None", ":", "user", "=", "None", "# note: if user_storage is defined, always use it in order to", "# authenticate user.", "elif", "self", ".", "user_storage", "is", "not", "None", ":", "user", "=", "self", ".", "user_storage", ".", "get_user", "(", "self", ",", "identifier", ",", "req", ",", "resp", ",", "resource", ",", "uri_kwargs", ")", "# note: some authentication middleware classes may not require", "# to be initialized with their own user_storage. In such", "# case this will always authenticate with \"syntetic user\"", "# if there is a valid indentity.", "elif", "self", ".", "user_storage", "is", "None", "and", "not", "self", ".", "only_with_storage", ":", "user", "=", "{", "'identified_with'", ":", "self", ",", "'identifier'", ":", "identifier", "}", "else", ":", "# pragma: nocover", "# note: this should not happen if the base class is properly", "# initialized. Still, user can skip super().__init__() call.", "user", "=", "None", "return", "user" ]
Try to find user in configured user storage object. Args: identifier: User identifier. Returns: user object.
[ "Try", "to", "find", "user", "in", "configured", "user", "storage", "object", "." ]
python
train
34.857143
inspirehep/inspire-schemas
inspire_schemas/builders/authors.py
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/authors.py#L399-L436
def add_acquisition_source( self, method, submission_number=None, internal_uid=None, email=None, orcid=None, source=None, datetime=None, ): """Add acquisition source. :type submission_number: integer :type email: integer :type source: string :param method: method of acquisition for the suggested document :type method: string :param orcid: orcid of the user that is creating the record :type orcid: string :param internal_uid: id of the user that is creating the record :type internal_uid: string :param datetime: UTC datetime in ISO 8601 format :type datetime: string """ acquisition_source = self._sourced_dict(source) acquisition_source['submission_number'] = str(submission_number) for key in ('datetime', 'email', 'method', 'orcid', 'internal_uid'): if locals()[key] is not None: acquisition_source[key] = locals()[key] self.obj['acquisition_source'] = acquisition_source
[ "def", "add_acquisition_source", "(", "self", ",", "method", ",", "submission_number", "=", "None", ",", "internal_uid", "=", "None", ",", "email", "=", "None", ",", "orcid", "=", "None", ",", "source", "=", "None", ",", "datetime", "=", "None", ",", ")", ":", "acquisition_source", "=", "self", ".", "_sourced_dict", "(", "source", ")", "acquisition_source", "[", "'submission_number'", "]", "=", "str", "(", "submission_number", ")", "for", "key", "in", "(", "'datetime'", ",", "'email'", ",", "'method'", ",", "'orcid'", ",", "'internal_uid'", ")", ":", "if", "locals", "(", ")", "[", "key", "]", "is", "not", "None", ":", "acquisition_source", "[", "key", "]", "=", "locals", "(", ")", "[", "key", "]", "self", ".", "obj", "[", "'acquisition_source'", "]", "=", "acquisition_source" ]
Add acquisition source. :type submission_number: integer :type email: integer :type source: string :param method: method of acquisition for the suggested document :type method: string :param orcid: orcid of the user that is creating the record :type orcid: string :param internal_uid: id of the user that is creating the record :type internal_uid: string :param datetime: UTC datetime in ISO 8601 format :type datetime: string
[ "Add", "acquisition", "source", "." ]
python
train
28.447368
kajala/django-jutil
jutil/dates.py
https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/dates.py#L151-L163
def last_year(today: datetime=None, tz=None): """ Returns last year begin (inclusive) and end (exclusive). :param today: Some date (defaults current datetime) :param tz: Timezone (defaults pytz UTC) :return: begin (inclusive), end (exclusive) """ if today is None: today = datetime.utcnow() end = datetime(day=1, month=1, year=today.year) end_incl = end - timedelta(seconds=1) begin = datetime(day=1, month=1, year=end_incl.year) return localize_time_range(begin, end, tz)
[ "def", "last_year", "(", "today", ":", "datetime", "=", "None", ",", "tz", "=", "None", ")", ":", "if", "today", "is", "None", ":", "today", "=", "datetime", ".", "utcnow", "(", ")", "end", "=", "datetime", "(", "day", "=", "1", ",", "month", "=", "1", ",", "year", "=", "today", ".", "year", ")", "end_incl", "=", "end", "-", "timedelta", "(", "seconds", "=", "1", ")", "begin", "=", "datetime", "(", "day", "=", "1", ",", "month", "=", "1", ",", "year", "=", "end_incl", ".", "year", ")", "return", "localize_time_range", "(", "begin", ",", "end", ",", "tz", ")" ]
Returns last year begin (inclusive) and end (exclusive). :param today: Some date (defaults current datetime) :param tz: Timezone (defaults pytz UTC) :return: begin (inclusive), end (exclusive)
[ "Returns", "last", "year", "begin", "(", "inclusive", ")", "and", "end", "(", "exclusive", ")", ".", ":", "param", "today", ":", "Some", "date", "(", "defaults", "current", "datetime", ")", ":", "param", "tz", ":", "Timezone", "(", "defaults", "pytz", "UTC", ")", ":", "return", ":", "begin", "(", "inclusive", ")", "end", "(", "exclusive", ")" ]
python
train
39.384615
SoCo/SoCo
dev_tools/analyse_ws.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L177-L180
def to_file_mode(self): """ Write all the messages to files """ for message_no in range(len(self.messages)): self.__to_file(message_no)
[ "def", "to_file_mode", "(", "self", ")", ":", "for", "message_no", "in", "range", "(", "len", "(", "self", ".", "messages", ")", ")", ":", "self", ".", "__to_file", "(", "message_no", ")" ]
Write all the messages to files
[ "Write", "all", "the", "messages", "to", "files" ]
python
train
40
dropbox/stone
stone/frontend/parser.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/parser.py#L769-L772
def p_example_multiline(self, p): """example_field : ID EQ NL INDENT ex_map NL DEDENT""" p[0] = AstExampleField( self.path, p.lineno(1), p.lexpos(1), p[1], p[5])
[ "def", "p_example_multiline", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "AstExampleField", "(", "self", ".", "path", ",", "p", ".", "lineno", "(", "1", ")", ",", "p", ".", "lexpos", "(", "1", ")", ",", "p", "[", "1", "]", ",", "p", "[", "5", "]", ")" ]
example_field : ID EQ NL INDENT ex_map NL DEDENT
[ "example_field", ":", "ID", "EQ", "NL", "INDENT", "ex_map", "NL", "DEDENT" ]
python
train
46.5