repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/property.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/property.py#L542-L551
def take(attributes, properties): """Returns a property set which include all properties in 'properties' that have any of 'attributes'.""" assert is_iterable_typed(attributes, basestring) assert is_iterable_typed(properties, basestring) result = [] for e in properties: if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))): result.append(e) return result
[ "def", "take", "(", "attributes", ",", "properties", ")", ":", "assert", "is_iterable_typed", "(", "attributes", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "properties", ",", "basestring", ")", "result", "=", "[", "]", "for", "e", "in", "properties", ":", "if", "b2", ".", "util", ".", "set", ".", "intersection", "(", "attributes", ",", "feature", ".", "attributes", "(", "get_grist", "(", "e", ")", ")", ")", ":", "result", ".", "append", "(", "e", ")", "return", "result" ]
Returns a property set which include all properties in 'properties' that have any of 'attributes'.
[ "Returns", "a", "property", "set", "which", "include", "all", "properties", "in", "properties", "that", "have", "any", "of", "attributes", "." ]
python
train
41.4
divio/django-filer
filer/utils/zip.py
https://github.com/divio/django-filer/blob/946629087943d41eff290f07bfdf240b8853dd88/filer/utils/zip.py#L9-L27
def unzip(file_obj): """ Take a path to a zipfile and checks if it is a valid zip file and returns... """ files = [] # TODO: implement try-except here zip = ZipFile(file_obj) bad_file = zip.testzip() if bad_file: raise Exception('"%s" in the .zip archive is corrupt.' % bad_file) infolist = zip.infolist() for zipinfo in infolist: if zipinfo.filename.startswith('__'): # do not process meta files continue file_obj = SimpleUploadedFile(name=zipinfo.filename, content=zip.read(zipinfo)) files.append((file_obj, zipinfo.filename)) zip.close() return files
[ "def", "unzip", "(", "file_obj", ")", ":", "files", "=", "[", "]", "# TODO: implement try-except here", "zip", "=", "ZipFile", "(", "file_obj", ")", "bad_file", "=", "zip", ".", "testzip", "(", ")", "if", "bad_file", ":", "raise", "Exception", "(", "'\"%s\" in the .zip archive is corrupt.'", "%", "bad_file", ")", "infolist", "=", "zip", ".", "infolist", "(", ")", "for", "zipinfo", "in", "infolist", ":", "if", "zipinfo", ".", "filename", ".", "startswith", "(", "'__'", ")", ":", "# do not process meta files", "continue", "file_obj", "=", "SimpleUploadedFile", "(", "name", "=", "zipinfo", ".", "filename", ",", "content", "=", "zip", ".", "read", "(", "zipinfo", ")", ")", "files", ".", "append", "(", "(", "file_obj", ",", "zipinfo", ".", "filename", ")", ")", "zip", ".", "close", "(", ")", "return", "files" ]
Take a path to a zipfile and checks if it is a valid zip file and returns...
[ "Take", "a", "path", "to", "a", "zipfile", "and", "checks", "if", "it", "is", "a", "valid", "zip", "file", "and", "returns", "..." ]
python
train
33.263158
spyder-ide/spyder
spyder/preferences/shortcuts.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/shortcuts.py#L142-L152
def keyPressEvent(self, event): """Qt Override.""" key = event.key() if key in [Qt.Key_Up]: self._parent.previous_row() elif key in [Qt.Key_Down]: self._parent.next_row() elif key in [Qt.Key_Enter, Qt.Key_Return]: self._parent.show_editor() else: super(ShortcutFinder, self).keyPressEvent(event)
[ "def", "keyPressEvent", "(", "self", ",", "event", ")", ":", "key", "=", "event", ".", "key", "(", ")", "if", "key", "in", "[", "Qt", ".", "Key_Up", "]", ":", "self", ".", "_parent", ".", "previous_row", "(", ")", "elif", "key", "in", "[", "Qt", ".", "Key_Down", "]", ":", "self", ".", "_parent", ".", "next_row", "(", ")", "elif", "key", "in", "[", "Qt", ".", "Key_Enter", ",", "Qt", ".", "Key_Return", "]", ":", "self", ".", "_parent", ".", "show_editor", "(", ")", "else", ":", "super", "(", "ShortcutFinder", ",", "self", ")", ".", "keyPressEvent", "(", "event", ")" ]
Qt Override.
[ "Qt", "Override", "." ]
python
train
35.545455
MartinThoma/mpu
mpu/string.py
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/string.py#L19-L53
def is_email(potential_email_address): """ Check if potential_email_address is a valid e-mail address. Please note that this function has no false-negatives but many false-positives. So if it returns that the input is not a valid e-mail adress, it certainly isn't. If it returns True, it might still be invalid. For example, the domain could not be registered. Parameters ---------- potential_email_address : str Returns ------- is_email : bool Examples -------- >>> is_email('') False >>> is_email('[email protected]') True >>> is_email('[email protected]') True >>> is_email('Martin Thoma <[email protected]>') False >>> is_email('info@martin-thoma') False """ context, mail = parseaddr(potential_email_address) first_condition = len(context) == 0 and len(mail) != 0 dot_after_at = ('@' in potential_email_address and '.' in potential_email_address.split('@')[1]) return first_condition and dot_after_at
[ "def", "is_email", "(", "potential_email_address", ")", ":", "context", ",", "mail", "=", "parseaddr", "(", "potential_email_address", ")", "first_condition", "=", "len", "(", "context", ")", "==", "0", "and", "len", "(", "mail", ")", "!=", "0", "dot_after_at", "=", "(", "'@'", "in", "potential_email_address", "and", "'.'", "in", "potential_email_address", ".", "split", "(", "'@'", ")", "[", "1", "]", ")", "return", "first_condition", "and", "dot_after_at" ]
Check if potential_email_address is a valid e-mail address. Please note that this function has no false-negatives but many false-positives. So if it returns that the input is not a valid e-mail adress, it certainly isn't. If it returns True, it might still be invalid. For example, the domain could not be registered. Parameters ---------- potential_email_address : str Returns ------- is_email : bool Examples -------- >>> is_email('') False >>> is_email('[email protected]') True >>> is_email('[email protected]') True >>> is_email('Martin Thoma <[email protected]>') False >>> is_email('info@martin-thoma') False
[ "Check", "if", "potential_email_address", "is", "a", "valid", "e", "-", "mail", "address", "." ]
python
train
29.285714
MolSSI-BSE/basis_set_exchange
basis_set_exchange/printing.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/printing.py#L66-L87
def electron_shell_str(shell, shellidx=None): '''Return a string representing the data for an electron shell If shellidx (index of the shell) is not None, it will also be printed ''' am = shell['angular_momentum'] amchar = lut.amint_to_char(am) amchar = amchar.upper() shellidx_str = '' if shellidx is not None: shellidx_str = 'Index {} '.format(shellidx) exponents = shell['exponents'] coefficients = shell['coefficients'] ncol = len(coefficients) + 1 point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)] s = "Shell: {}Region: {}: AM: {}\n".format(shellidx_str, shell['region'], amchar) s += "Function: {}\n".format(shell['function_type']) s += write_matrix([exponents, *coefficients], point_places) return s
[ "def", "electron_shell_str", "(", "shell", ",", "shellidx", "=", "None", ")", ":", "am", "=", "shell", "[", "'angular_momentum'", "]", "amchar", "=", "lut", ".", "amint_to_char", "(", "am", ")", "amchar", "=", "amchar", ".", "upper", "(", ")", "shellidx_str", "=", "''", "if", "shellidx", "is", "not", "None", ":", "shellidx_str", "=", "'Index {} '", ".", "format", "(", "shellidx", ")", "exponents", "=", "shell", "[", "'exponents'", "]", "coefficients", "=", "shell", "[", "'coefficients'", "]", "ncol", "=", "len", "(", "coefficients", ")", "+", "1", "point_places", "=", "[", "8", "*", "i", "+", "15", "*", "(", "i", "-", "1", ")", "for", "i", "in", "range", "(", "1", ",", "ncol", "+", "1", ")", "]", "s", "=", "\"Shell: {}Region: {}: AM: {}\\n\"", ".", "format", "(", "shellidx_str", ",", "shell", "[", "'region'", "]", ",", "amchar", ")", "s", "+=", "\"Function: {}\\n\"", ".", "format", "(", "shell", "[", "'function_type'", "]", ")", "s", "+=", "write_matrix", "(", "[", "exponents", ",", "*", "coefficients", "]", ",", "point_places", ")", "return", "s" ]
Return a string representing the data for an electron shell If shellidx (index of the shell) is not None, it will also be printed
[ "Return", "a", "string", "representing", "the", "data", "for", "an", "electron", "shell" ]
python
train
35.318182
bububa/pyTOP
pyTOP/simba.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/simba.py#L86-L94
def get(self, campaign_id, nick=None): '''xxxxx.xxxxx.campaign.area.get =================================== 取得一个推广计划的投放地域设置''' request = TOPRequest('xxxxx.xxxxx.campaign.area.get') request['campaign_id'] = campaign_id if nick!=None: request['nick'] = nick self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignArea}) return self.result
[ "def", "get", "(", "self", ",", "campaign_id", ",", "nick", "=", "None", ")", ":", "request", "=", "TOPRequest", "(", "'xxxxx.xxxxx.campaign.area.get'", ")", "request", "[", "'campaign_id'", "]", "=", "campaign_id", "if", "nick", "!=", "None", ":", "request", "[", "'nick'", "]", "=", "nick", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ")", ",", "fields", "=", "[", "'success'", ",", "'result'", ",", "'success'", ",", "'result_code'", ",", "'result_message'", "]", ",", "models", "=", "{", "'result'", ":", "CampaignArea", "}", ")", "return", "self", ".", "result" ]
xxxxx.xxxxx.campaign.area.get =================================== 取得一个推广计划的投放地域设置
[ "xxxxx", ".", "xxxxx", ".", "campaign", ".", "area", ".", "get", "===================================", "取得一个推广计划的投放地域设置" ]
python
train
51.888889
ewels/MultiQC
multiqc/modules/slamdunk/slamdunk.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/slamdunk/slamdunk.py#L435-L472
def slamdunkOverallRatesPlot (self): """ Generate the overall rates plot """ pconfig = { 'id': 'overallratesplot', 'title': 'Slamdunk: Overall conversion rates in reads', 'cpswitch': False, 'cpswitch_c_active': False, 'ylab': 'Number of reads', 'stacking': 'normal', 'tt_decimals': 2, 'tt_suffix': '%', 'tt_percentages': False, 'hide_zero_cats': False, 'data_labels': [ "Plus Strand +", "Minus Strand -", ] } cats = [OrderedDict(), OrderedDict()] keys = [ ['T>C', 'A>T', 'A>G', 'A>C', 'T>A', 'T>G', 'G>A', 'G>T', 'G>C', 'C>A', 'C>T', 'C>G'], ['A>G','A>T','A>C','T>A','T>G','T>C','G>A','G>T','G>C','C>A','C>T','C>G'] ] for i, k in enumerate(keys): for j, v in enumerate(k): cats[i][v] = { 'color': self.plot_cols[j] } self.add_section ( name = 'Conversion rates per read', anchor = 'slamdunk_overall_rates', description = """This plot shows the individual conversion rates over all reads. It shows these conversion rates strand-specific: This means for a properly labeled sample you would see a T&gt;C excess on the plus-strand and an A&gt;G excess on the minus strand (see the <a href="http://t-neumann.github.io/slamdunk/docs.html#rates" target="_blank">slamdunk docs</a>).""", plot = bargraph.plot([self.rates_data_plus,self.rates_data_minus], cats, pconfig) )
[ "def", "slamdunkOverallRatesPlot", "(", "self", ")", ":", "pconfig", "=", "{", "'id'", ":", "'overallratesplot'", ",", "'title'", ":", "'Slamdunk: Overall conversion rates in reads'", ",", "'cpswitch'", ":", "False", ",", "'cpswitch_c_active'", ":", "False", ",", "'ylab'", ":", "'Number of reads'", ",", "'stacking'", ":", "'normal'", ",", "'tt_decimals'", ":", "2", ",", "'tt_suffix'", ":", "'%'", ",", "'tt_percentages'", ":", "False", ",", "'hide_zero_cats'", ":", "False", ",", "'data_labels'", ":", "[", "\"Plus Strand +\"", ",", "\"Minus Strand -\"", ",", "]", "}", "cats", "=", "[", "OrderedDict", "(", ")", ",", "OrderedDict", "(", ")", "]", "keys", "=", "[", "[", "'T>C'", ",", "'A>T'", ",", "'A>G'", ",", "'A>C'", ",", "'T>A'", ",", "'T>G'", ",", "'G>A'", ",", "'G>T'", ",", "'G>C'", ",", "'C>A'", ",", "'C>T'", ",", "'C>G'", "]", ",", "[", "'A>G'", ",", "'A>T'", ",", "'A>C'", ",", "'T>A'", ",", "'T>G'", ",", "'T>C'", ",", "'G>A'", ",", "'G>T'", ",", "'G>C'", ",", "'C>A'", ",", "'C>T'", ",", "'C>G'", "]", "]", "for", "i", ",", "k", "in", "enumerate", "(", "keys", ")", ":", "for", "j", ",", "v", "in", "enumerate", "(", "k", ")", ":", "cats", "[", "i", "]", "[", "v", "]", "=", "{", "'color'", ":", "self", ".", "plot_cols", "[", "j", "]", "}", "self", ".", "add_section", "(", "name", "=", "'Conversion rates per read'", ",", "anchor", "=", "'slamdunk_overall_rates'", ",", "description", "=", "\"\"\"This plot shows the individual conversion rates over all reads.\n It shows these conversion rates strand-specific: This means for a properly labeled\n sample you would see a T&gt;C excess on the plus-strand and an A&gt;G excess on the minus strand\n (see the <a href=\"http://t-neumann.github.io/slamdunk/docs.html#rates\" target=\"_blank\">slamdunk docs</a>).\"\"\"", ",", "plot", "=", "bargraph", ".", "plot", "(", "[", "self", ".", "rates_data_plus", ",", "self", ".", "rates_data_minus", "]", ",", "cats", ",", "pconfig", ")", ")" ]
Generate the overall rates plot
[ "Generate", "the", "overall", "rates", "plot" ]
python
train
43.368421
Sanji-IO/sanji
sanji/core.py
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/core.py#L186-L195
def _resolve_responses(self): """ _resolve_responses """ while True: message = self.res_queue.get() if message is None: _logger.debug("_resolve_responses thread is terminated") return self.__resolve_responses(message)
[ "def", "_resolve_responses", "(", "self", ")", ":", "while", "True", ":", "message", "=", "self", ".", "res_queue", ".", "get", "(", ")", "if", "message", "is", "None", ":", "_logger", ".", "debug", "(", "\"_resolve_responses thread is terminated\"", ")", "return", "self", ".", "__resolve_responses", "(", "message", ")" ]
_resolve_responses
[ "_resolve_responses" ]
python
train
30.8
bukun/TorCMS
ext_script/autocrud/func_gen_html.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/ext_script/autocrud/func_gen_html.py#L152-L166
def gen_select_list(sig_dic): ''' For generating List view HTML file for SELECT. for each item. ''' view_jushi = '''<span class="label label-primary" style="margin-right:10px">''' dic_tmp = sig_dic['dic'] for key in dic_tmp.keys(): tmp_str = '''{{% if '{0}' in postinfo.extinfo and postinfo.extinfo["{0}"][0] == "{1}" %}} {2} {{% end %}}'''.format(sig_dic['en'], key, dic_tmp[key]) view_jushi += tmp_str view_jushi += '''</span>''' return view_jushi
[ "def", "gen_select_list", "(", "sig_dic", ")", ":", "view_jushi", "=", "'''<span class=\"label label-primary\" style=\"margin-right:10px\">'''", "dic_tmp", "=", "sig_dic", "[", "'dic'", "]", "for", "key", "in", "dic_tmp", ".", "keys", "(", ")", ":", "tmp_str", "=", "'''{{% if '{0}' in postinfo.extinfo and postinfo.extinfo[\"{0}\"][0] == \"{1}\" %}}\n {2} {{% end %}}'''", ".", "format", "(", "sig_dic", "[", "'en'", "]", ",", "key", ",", "dic_tmp", "[", "key", "]", ")", "view_jushi", "+=", "tmp_str", "view_jushi", "+=", "'''</span>'''", "return", "view_jushi" ]
For generating List view HTML file for SELECT. for each item.
[ "For", "generating", "List", "view", "HTML", "file", "for", "SELECT", ".", "for", "each", "item", "." ]
python
train
33.2
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L3393-L3403
def create(self, create_missing=None): """Do extra work to fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1235377 <https://bugzilla.redhat.com/show_bug.cgi?id=1235377>`_. """ return HostGroup( self._server_config, id=self.create_json(create_missing)['id'], ).read()
[ "def", "create", "(", "self", ",", "create_missing", "=", "None", ")", ":", "return", "HostGroup", "(", "self", ".", "_server_config", ",", "id", "=", "self", ".", "create_json", "(", "create_missing", ")", "[", "'id'", "]", ",", ")", ".", "read", "(", ")" ]
Do extra work to fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1235377 <https://bugzilla.redhat.com/show_bug.cgi?id=1235377>`_.
[ "Do", "extra", "work", "to", "fetch", "a", "complete", "set", "of", "attributes", "for", "this", "entity", "." ]
python
train
33.727273
tjvr/kurt
kurt/__init__.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L2241-L2252
def _convert(self, format): """Return a new Image instance with the given format. Returns self if the format is already the same. """ if self.format == format: return self else: image = Image(self.pil_image) image._format = format return image
[ "def", "_convert", "(", "self", ",", "format", ")", ":", "if", "self", ".", "format", "==", "format", ":", "return", "self", "else", ":", "image", "=", "Image", "(", "self", ".", "pil_image", ")", "image", ".", "_format", "=", "format", "return", "image" ]
Return a new Image instance with the given format. Returns self if the format is already the same.
[ "Return", "a", "new", "Image", "instance", "with", "the", "given", "format", "." ]
python
train
26.833333
IdentityPython/pyop
src/pyop/authz_state.py
https://github.com/IdentityPython/pyop/blob/7b1385964f079c39752fce5f2dbcf458b8a92e56/src/pyop/authz_state.py#L190-L208
def create_refresh_token(self, access_token_value): # type: (str) -> str """ Creates an refresh token bound to the specified access token. """ if access_token_value not in self.access_tokens: raise InvalidAccessToken('{} unknown'.format(access_token_value)) if not self.refresh_token_lifetime: logger.debug('no refresh token issued for for access_token=%s', access_token_value) return None refresh_token = rand_str() authz_info = {'access_token': access_token_value, 'exp': int(time.time()) + self.refresh_token_lifetime} self.refresh_tokens[refresh_token] = authz_info logger.debug('issued refresh_token=%s expiring=%d for access_token=%s', refresh_token, authz_info['exp'], access_token_value) return refresh_token
[ "def", "create_refresh_token", "(", "self", ",", "access_token_value", ")", ":", "# type: (str) -> str", "if", "access_token_value", "not", "in", "self", ".", "access_tokens", ":", "raise", "InvalidAccessToken", "(", "'{} unknown'", ".", "format", "(", "access_token_value", ")", ")", "if", "not", "self", ".", "refresh_token_lifetime", ":", "logger", ".", "debug", "(", "'no refresh token issued for for access_token=%s'", ",", "access_token_value", ")", "return", "None", "refresh_token", "=", "rand_str", "(", ")", "authz_info", "=", "{", "'access_token'", ":", "access_token_value", ",", "'exp'", ":", "int", "(", "time", ".", "time", "(", ")", ")", "+", "self", ".", "refresh_token_lifetime", "}", "self", ".", "refresh_tokens", "[", "refresh_token", "]", "=", "authz_info", "logger", ".", "debug", "(", "'issued refresh_token=%s expiring=%d for access_token=%s'", ",", "refresh_token", ",", "authz_info", "[", "'exp'", "]", ",", "access_token_value", ")", "return", "refresh_token" ]
Creates an refresh token bound to the specified access token.
[ "Creates", "an", "refresh", "token", "bound", "to", "the", "specified", "access", "token", "." ]
python
train
44.526316
bububa/pyTOP
pyTOP/user.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/user.py#L209-L218
def get(self, nicks=[], fields=[]): '''taobao.users.get 获取多个用户信息''' request = TOPRequest('taobao.users.get') request['nicks'] = ','.join(nicks) if not fields: user = User() fields = user.fields request['fields'] = ','.join(fields) self.create(self.execute(request)) return self.users
[ "def", "get", "(", "self", ",", "nicks", "=", "[", "]", ",", "fields", "=", "[", "]", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.users.get'", ")", "request", "[", "'nicks'", "]", "=", "','", ".", "join", "(", "nicks", ")", "if", "not", "fields", ":", "user", "=", "User", "(", ")", "fields", "=", "user", ".", "fields", "request", "[", "'fields'", "]", "=", "','", ".", "join", "(", "fields", ")", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ")", ")", "return", "self", ".", "users" ]
taobao.users.get 获取多个用户信息
[ "taobao", ".", "users", ".", "get", "获取多个用户信息" ]
python
train
35.4
mulkieran/justbases
src/justbases/_display.py
https://github.com/mulkieran/justbases/blob/dd52ff4b3d11609f54b2673599ee4eeb20f9734f/src/justbases/_display.py#L156-L192
def xform(self, left, right, repeating, base, sign): """ Return prefixes for tuple. :param str left: left of the radix :param str right: right of the radix :param str repeating: repeating part :param int base: the base in which value is displayed :param int sign: -1, 0, 1 as appropriate :returns: the number string :rtype: str """ # pylint: disable=too-many-arguments base_prefix = '' if self.CONFIG.use_prefix: if base == 8: base_prefix = '0' elif base == 16: base_prefix = '0x' else: base_prefix = '' base_subscript = str(base) if self.CONFIG.use_subscript else '' result = { 'sign' : '-' if sign == -1 else '', 'base_prefix' : base_prefix, 'left' : left, 'radix' : '.' if (right != "" or repeating != "") else "", 'right' : right, 'repeating' : ("(%s)" % repeating) if repeating != "" else "", 'base_separator' : '' if base_subscript == '' else '_', 'base_subscript' : base_subscript } return self._FMT_STR % result
[ "def", "xform", "(", "self", ",", "left", ",", "right", ",", "repeating", ",", "base", ",", "sign", ")", ":", "# pylint: disable=too-many-arguments", "base_prefix", "=", "''", "if", "self", ".", "CONFIG", ".", "use_prefix", ":", "if", "base", "==", "8", ":", "base_prefix", "=", "'0'", "elif", "base", "==", "16", ":", "base_prefix", "=", "'0x'", "else", ":", "base_prefix", "=", "''", "base_subscript", "=", "str", "(", "base", ")", "if", "self", ".", "CONFIG", ".", "use_subscript", "else", "''", "result", "=", "{", "'sign'", ":", "'-'", "if", "sign", "==", "-", "1", "else", "''", ",", "'base_prefix'", ":", "base_prefix", ",", "'left'", ":", "left", ",", "'radix'", ":", "'.'", "if", "(", "right", "!=", "\"\"", "or", "repeating", "!=", "\"\"", ")", "else", "\"\"", ",", "'right'", ":", "right", ",", "'repeating'", ":", "(", "\"(%s)\"", "%", "repeating", ")", "if", "repeating", "!=", "\"\"", "else", "\"\"", ",", "'base_separator'", ":", "''", "if", "base_subscript", "==", "''", "else", "'_'", ",", "'base_subscript'", ":", "base_subscript", "}", "return", "self", ".", "_FMT_STR", "%", "result" ]
Return prefixes for tuple. :param str left: left of the radix :param str right: right of the radix :param str repeating: repeating part :param int base: the base in which value is displayed :param int sign: -1, 0, 1 as appropriate :returns: the number string :rtype: str
[ "Return", "prefixes", "for", "tuple", "." ]
python
train
32.324324
klen/pylama
pylama/libs/inirama.py
https://github.com/klen/pylama/blob/f436ccc6b55b33381a295ded753e467953cf4379/pylama/libs/inirama.py#L316-L334
def write(self, f): """ Write namespace as INI file. :param f: File object or path to file. """ if isinstance(f, str): f = io.open(f, 'w', encoding='utf-8') if not hasattr(f, 'read'): raise AttributeError("Wrong type of file: {0}".format(type(f))) NS_LOGGER.info('Write to `{0}`'.format(f.name)) for section in self.sections.keys(): f.write('[{0}]\n'.format(section)) for k, v in self[section].items(): f.write('{0:15}= {1}\n'.format(k, v)) f.write('\n') f.close()
[ "def", "write", "(", "self", ",", "f", ")", ":", "if", "isinstance", "(", "f", ",", "str", ")", ":", "f", "=", "io", ".", "open", "(", "f", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "if", "not", "hasattr", "(", "f", ",", "'read'", ")", ":", "raise", "AttributeError", "(", "\"Wrong type of file: {0}\"", ".", "format", "(", "type", "(", "f", ")", ")", ")", "NS_LOGGER", ".", "info", "(", "'Write to `{0}`'", ".", "format", "(", "f", ".", "name", ")", ")", "for", "section", "in", "self", ".", "sections", ".", "keys", "(", ")", ":", "f", ".", "write", "(", "'[{0}]\\n'", ".", "format", "(", "section", ")", ")", "for", "k", ",", "v", "in", "self", "[", "section", "]", ".", "items", "(", ")", ":", "f", ".", "write", "(", "'{0:15}= {1}\\n'", ".", "format", "(", "k", ",", "v", ")", ")", "f", ".", "write", "(", "'\\n'", ")", "f", ".", "close", "(", ")" ]
Write namespace as INI file. :param f: File object or path to file.
[ "Write", "namespace", "as", "INI", "file", "." ]
python
train
31.052632
amzn/ion-python
amazon/ion/reader_binary.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_binary.py#L838-L870
def raw_reader(queue=None): """Returns a raw binary reader co-routine. Args: queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a new one will be created. Yields: IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed in the middle of a value or ``STREAM_END`` if there is no data **and** the parser is not in the middle of parsing a value. Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP`` to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE`` or ``STREAM_END`` event type. ``SKIP`` is only allowed within a container. A reader is *in* a container when the ``CONTAINER_START`` event type is encountered and *not in* a container when the ``CONTAINER_END`` event type for that container is encountered. """ if queue is None: queue = BufferQueue() ctx = _HandlerContext( position=0, limit=None, queue=queue, field_name=None, annotations=None, depth=0, whence=None ) return reader_trampoline(_container_handler(None, ctx))
[ "def", "raw_reader", "(", "queue", "=", "None", ")", ":", "if", "queue", "is", "None", ":", "queue", "=", "BufferQueue", "(", ")", "ctx", "=", "_HandlerContext", "(", "position", "=", "0", ",", "limit", "=", "None", ",", "queue", "=", "queue", ",", "field_name", "=", "None", ",", "annotations", "=", "None", ",", "depth", "=", "0", ",", "whence", "=", "None", ")", "return", "reader_trampoline", "(", "_container_handler", "(", "None", ",", "ctx", ")", ")" ]
Returns a raw binary reader co-routine. Args: queue (Optional[BufferQueue]): The buffer read data for parsing, if ``None`` a new one will be created. Yields: IonEvent: parse events, will have an event type of ``INCOMPLETE`` if data is needed in the middle of a value or ``STREAM_END`` if there is no data **and** the parser is not in the middle of parsing a value. Receives :class:`DataEvent`, with :class:`ReadEventType` of ``NEXT`` or ``SKIP`` to iterate over values, or ``DATA`` if the last event was a ``INCOMPLETE`` or ``STREAM_END`` event type. ``SKIP`` is only allowed within a container. A reader is *in* a container when the ``CONTAINER_START`` event type is encountered and *not in* a container when the ``CONTAINER_END`` event type for that container is encountered.
[ "Returns", "a", "raw", "binary", "reader", "co", "-", "routine", "." ]
python
train
36.939394
shaunduncan/giphypop
giphypop.py
https://github.com/shaunduncan/giphypop/blob/21e7f51c4f000ae24be3805b7eeec52bcce3d390/giphypop.py#L256-L268
def _fetch(self, endpoint_name, **params): """ Wrapper for making an api request from giphy """ params['api_key'] = self.api_key resp = requests.get(self._endpoint(endpoint_name), params=params) resp.raise_for_status() data = resp.json() self._check_or_raise(data.get('meta', {})) return data
[ "def", "_fetch", "(", "self", ",", "endpoint_name", ",", "*", "*", "params", ")", ":", "params", "[", "'api_key'", "]", "=", "self", ".", "api_key", "resp", "=", "requests", ".", "get", "(", "self", ".", "_endpoint", "(", "endpoint_name", ")", ",", "params", "=", "params", ")", "resp", ".", "raise_for_status", "(", ")", "data", "=", "resp", ".", "json", "(", ")", "self", ".", "_check_or_raise", "(", "data", ".", "get", "(", "'meta'", ",", "{", "}", ")", ")", "return", "data" ]
Wrapper for making an api request from giphy
[ "Wrapper", "for", "making", "an", "api", "request", "from", "giphy" ]
python
test
27.307692
StackStorm/pybind
pybind/slxos/v17s_1_02/brocade_mpls_rpc/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/brocade_mpls_rpc/__init__.py#L2802-L2823
def _set_clear_mpls_auto_bandwidth_statistics_lsp(self, v, load=False): """ Setter method for clear_mpls_auto_bandwidth_statistics_lsp, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_clear_mpls_auto_bandwidth_statistics_lsp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_clear_mpls_auto_bandwidth_statistics_lsp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=clear_mpls_auto_bandwidth_statistics_lsp.clear_mpls_auto_bandwidth_statistics_lsp, is_leaf=True, yang_name="clear-mpls-auto-bandwidth-statistics-lsp", rest_name="clear-mpls-auto-bandwidth-statistics-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """clear_mpls_auto_bandwidth_statistics_lsp must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=clear_mpls_auto_bandwidth_statistics_lsp.clear_mpls_auto_bandwidth_statistics_lsp, is_leaf=True, yang_name="clear-mpls-auto-bandwidth-statistics-lsp", rest_name="clear-mpls-auto-bandwidth-statistics-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__clear_mpls_auto_bandwidth_statistics_lsp = t if hasattr(self, '_set'): self._set()
[ "def", "_set_clear_mpls_auto_bandwidth_statistics_lsp", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "clear_mpls_auto_bandwidth_statistics_lsp", ".", "clear_mpls_auto_bandwidth_statistics_lsp", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"clear-mpls-auto-bandwidth-statistics-lsp\"", ",", "rest_name", "=", "\"clear-mpls-auto-bandwidth-statistics-lsp\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "False", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'hidden'", ":", "u'rpccmd'", ",", "u'actionpoint'", ":", "u'clearMplsAutoBandwidthStatistics'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls'", ",", "defining_module", "=", "'brocade-mpls'", ",", "yang_type", "=", "'rpc'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"clear_mpls_auto_bandwidth_statistics_lsp must be of a type compatible with rpc\"\"\"", ",", "'defined-type'", ":", "\"rpc\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=clear_mpls_auto_bandwidth_statistics_lsp.clear_mpls_auto_bandwidth_statistics_lsp, is_leaf=True, yang_name=\"clear-mpls-auto-bandwidth-statistics-lsp\", rest_name=\"clear-mpls-auto-bandwidth-statistics-lsp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsAutoBandwidthStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__clear_mpls_auto_bandwidth_statistics_lsp", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for clear_mpls_auto_bandwidth_statistics_lsp, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_auto_bandwidth_statistics_lsp (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_clear_mpls_auto_bandwidth_statistics_lsp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_clear_mpls_auto_bandwidth_statistics_lsp() directly.
[ "Setter", "method", "for", "clear_mpls_auto_bandwidth_statistics_lsp", "mapped", "from", "YANG", "variable", "/", "brocade_mpls_rpc", "/", "clear_mpls_auto_bandwidth_statistics_lsp", "(", "rpc", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_clear_mpls_auto_bandwidth_statistics_lsp", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_clear_mpls_auto_bandwidth_statistics_lsp", "()", "directly", "." ]
python
train
92.318182
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L4714-L4725
def libvlc_media_list_index_of_item(p_ml, p_md): '''Find index position of List media instance in media list. Warning: the function will return the first matched position. The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. @param p_md: media instance. @return: position of media instance or -1 if media not found. ''' f = _Cfunctions.get('libvlc_media_list_index_of_item', None) or \ _Cfunction('libvlc_media_list_index_of_item', ((1,), (1,),), None, ctypes.c_int, MediaList, Media) return f(p_ml, p_md)
[ "def", "libvlc_media_list_index_of_item", "(", "p_ml", ",", "p_md", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_media_list_index_of_item'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_list_index_of_item'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "MediaList", ",", "Media", ")", "return", "f", "(", "p_ml", ",", "p_md", ")" ]
Find index position of List media instance in media list. Warning: the function will return the first matched position. The L{libvlc_media_list_lock} should be held upon entering this function. @param p_ml: a media list instance. @param p_md: media instance. @return: position of media instance or -1 if media not found.
[ "Find", "index", "position", "of", "List", "media", "instance", "in", "media", "list", ".", "Warning", ":", "the", "function", "will", "return", "the", "first", "matched", "position", ".", "The", "L", "{", "libvlc_media_list_lock", "}", "should", "be", "held", "upon", "entering", "this", "function", "." ]
python
train
51.25
timknip/pycsg
csg/geom.py
https://github.com/timknip/pycsg/blob/b8f9710fd15c38dcc275d56a2108f604af38dcc8/csg/geom.py#L64-L66
def times(self, a): """ Multiply. """ return Vector(self.x*a, self.y*a, self.z*a)
[ "def", "times", "(", "self", ",", "a", ")", ":", "return", "Vector", "(", "self", ".", "x", "*", "a", ",", "self", ".", "y", "*", "a", ",", "self", ".", "z", "*", "a", ")" ]
Multiply.
[ "Multiply", "." ]
python
train
31.666667
econ-ark/HARK
HARK/utilities.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/utilities.py#L959-L978
def calcWeightedAvg(data,weights): ''' Generates a weighted average of simulated data. The Nth row of data is averaged and then weighted by the Nth element of weights in an aggregate average. Parameters ---------- data : numpy.array An array of data with N rows of J floats weights : numpy.array A length N array of weights for the N rows of data. Returns ------- weighted_sum : float The weighted sum of the data. ''' data_avg = np.mean(data,axis=1) weighted_sum = np.dot(data_avg,weights) return weighted_sum
[ "def", "calcWeightedAvg", "(", "data", ",", "weights", ")", ":", "data_avg", "=", "np", ".", "mean", "(", "data", ",", "axis", "=", "1", ")", "weighted_sum", "=", "np", ".", "dot", "(", "data_avg", ",", "weights", ")", "return", "weighted_sum" ]
Generates a weighted average of simulated data. The Nth row of data is averaged and then weighted by the Nth element of weights in an aggregate average. Parameters ---------- data : numpy.array An array of data with N rows of J floats weights : numpy.array A length N array of weights for the N rows of data. Returns ------- weighted_sum : float The weighted sum of the data.
[ "Generates", "a", "weighted", "average", "of", "simulated", "data", ".", "The", "Nth", "row", "of", "data", "is", "averaged", "and", "then", "weighted", "by", "the", "Nth", "element", "of", "weights", "in", "an", "aggregate", "average", "." ]
python
train
28.7
limodou/uliweb
uliweb/utils/generic.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L3008-L3022
def _make_like(self, column, format, value): """ make like condition :param column: column object :param format: '%_' '_%' '%_%' :param value: column value :return: condition object """ c = [] if format.startswith('%'): c.append('%') c.append(value) if format.endswith('%'): c.append('%') return column.like(''.join(c))
[ "def", "_make_like", "(", "self", ",", "column", ",", "format", ",", "value", ")", ":", "c", "=", "[", "]", "if", "format", ".", "startswith", "(", "'%'", ")", ":", "c", ".", "append", "(", "'%'", ")", "c", ".", "append", "(", "value", ")", "if", "format", ".", "endswith", "(", "'%'", ")", ":", "c", ".", "append", "(", "'%'", ")", "return", "column", ".", "like", "(", "''", ".", "join", "(", "c", ")", ")" ]
make like condition :param column: column object :param format: '%_' '_%' '%_%' :param value: column value :return: condition object
[ "make", "like", "condition", ":", "param", "column", ":", "column", "object", ":", "param", "format", ":", "%_", "_%", "%_%", ":", "param", "value", ":", "column", "value", ":", "return", ":", "condition", "object" ]
python
train
29.266667
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py#L736-L752
def build(self, **kw): """Actually build the node. This is called by the Taskmaster after it's decided that the Node is out-of-date and must be rebuilt, and after the prepare() method has gotten everything, uh, prepared. This method is called from multiple threads in a parallel build, so only do thread safe stuff here. Do thread unsafe stuff in built(). """ try: self.get_executor()(self, **kw) except SCons.Errors.BuildError as e: e.node = self raise
[ "def", "build", "(", "self", ",", "*", "*", "kw", ")", ":", "try", ":", "self", ".", "get_executor", "(", ")", "(", "self", ",", "*", "*", "kw", ")", "except", "SCons", ".", "Errors", ".", "BuildError", "as", "e", ":", "e", ".", "node", "=", "self", "raise" ]
Actually build the node. This is called by the Taskmaster after it's decided that the Node is out-of-date and must be rebuilt, and after the prepare() method has gotten everything, uh, prepared. This method is called from multiple threads in a parallel build, so only do thread safe stuff here. Do thread unsafe stuff in built().
[ "Actually", "build", "the", "node", "." ]
python
train
32.705882
atztogo/phonopy
phonopy/structure/spglib.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/spglib.py#L844-L877
def niggli_reduce(lattice, eps=1e-5): """Run Niggli reduction Args: lattice: Lattice parameters in the form of [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]] eps: float: Tolerance to check if difference of norms of two basis vectors is close to zero or not and if two basis vectors are orthogonal by the value of dot product being close to zero or not. The detail is shown at https://atztogo.github.io/niggli/. Returns: if the Niggli reduction succeeded: Reduced lattice parameters are given as a numpy 'double' array: [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]] otherwise None is returned. """ _set_no_error() niggli_lattice = np.array(np.transpose(lattice), dtype='double', order='C') result = spg.niggli_reduce(niggli_lattice, float(eps)) _set_error_message() if result == 0: return None else: return np.array(np.transpose(niggli_lattice), dtype='double', order='C')
[ "def", "niggli_reduce", "(", "lattice", ",", "eps", "=", "1e-5", ")", ":", "_set_no_error", "(", ")", "niggli_lattice", "=", "np", ".", "array", "(", "np", ".", "transpose", "(", "lattice", ")", ",", "dtype", "=", "'double'", ",", "order", "=", "'C'", ")", "result", "=", "spg", ".", "niggli_reduce", "(", "niggli_lattice", ",", "float", "(", "eps", ")", ")", "_set_error_message", "(", ")", "if", "result", "==", "0", ":", "return", "None", "else", ":", "return", "np", ".", "array", "(", "np", ".", "transpose", "(", "niggli_lattice", ")", ",", "dtype", "=", "'double'", ",", "order", "=", "'C'", ")" ]
Run Niggli reduction Args: lattice: Lattice parameters in the form of [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]] eps: float: Tolerance to check if difference of norms of two basis vectors is close to zero or not and if two basis vectors are orthogonal by the value of dot product being close to zero or not. The detail is shown at https://atztogo.github.io/niggli/. Returns: if the Niggli reduction succeeded: Reduced lattice parameters are given as a numpy 'double' array: [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]] otherwise None is returned.
[ "Run", "Niggli", "reduction" ]
python
train
33.558824
msmbuilder/msmbuilder
msmbuilder/tpt/committor.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/tpt/committor.py#L87-L147
def conditional_committors(source, sink, waypoint, msm): """ Computes the conditional committors :math:`q^{ABC^+}` which are is the probability of starting in one state and visiting state B before A while also visiting state C at some point. Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`, with ``sources = A``, ``sinks = B``, ``waypoint = C`` Parameters ---------- waypoint : int The index of the intermediate state source : int The index of the source state sink : int The index of the sink state msm : msmbuilder.MarkovStateModel MSM to analyze. Returns ------- cond_committors : np.ndarray Conditional committors, i.e. the probability of visiting a waypoint when on a path between source and sink. See Also -------- msmbuilder.tpt.fraction_visited : function Calculate the fraction of visits to a waypoint from a given source to a sink. msmbuilder.tpt.hub_scores : function Compute the 'hub score', the weighted fraction of visits for an entire network. Notes ----- Employs dense linear algebra, memory use scales as N^2, and cycle use scales as N^3 References ---------- .. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. """ # typecheck for data in [source, sink, waypoint]: if not isinstance(data, int): raise ValueError("source, sink, and waypoint must be integers.") if (source == waypoint) or (sink == waypoint) or (sink == source): raise ValueError('source, sink, waypoint must all be disjoint!') if hasattr(msm, 'all_transmats_'): cond_committors = np.zeros(msm.all_transmats_.shape[:2]) for i, tprob in enumerate(msm.all_transmats_): cond_committors[i, :] = _conditional_committors(source, sink, waypoint, tprob) return np.median(cond_committors, axis=0) return _conditional_committors(source, sink, waypoint, msm.transmat_)
[ "def", "conditional_committors", "(", "source", ",", "sink", ",", "waypoint", ",", "msm", ")", ":", "# typecheck", "for", "data", "in", "[", "source", ",", "sink", ",", "waypoint", "]", ":", "if", "not", "isinstance", "(", "data", ",", "int", ")", ":", "raise", "ValueError", "(", "\"source, sink, and waypoint must be integers.\"", ")", "if", "(", "source", "==", "waypoint", ")", "or", "(", "sink", "==", "waypoint", ")", "or", "(", "sink", "==", "source", ")", ":", "raise", "ValueError", "(", "'source, sink, waypoint must all be disjoint!'", ")", "if", "hasattr", "(", "msm", ",", "'all_transmats_'", ")", ":", "cond_committors", "=", "np", ".", "zeros", "(", "msm", ".", "all_transmats_", ".", "shape", "[", ":", "2", "]", ")", "for", "i", ",", "tprob", "in", "enumerate", "(", "msm", ".", "all_transmats_", ")", ":", "cond_committors", "[", "i", ",", ":", "]", "=", "_conditional_committors", "(", "source", ",", "sink", ",", "waypoint", ",", "tprob", ")", "return", "np", ".", "median", "(", "cond_committors", ",", "axis", "=", "0", ")", "return", "_conditional_committors", "(", "source", ",", "sink", ",", "waypoint", ",", "msm", ".", "transmat_", ")" ]
Computes the conditional committors :math:`q^{ABC^+}` which are is the probability of starting in one state and visiting state B before A while also visiting state C at some point. Note that in the notation of Dickson et. al. this computes :math:`h_c(A,B)`, with ``sources = A``, ``sinks = B``, ``waypoint = C`` Parameters ---------- waypoint : int The index of the intermediate state source : int The index of the source state sink : int The index of the sink state msm : msmbuilder.MarkovStateModel MSM to analyze. Returns ------- cond_committors : np.ndarray Conditional committors, i.e. the probability of visiting a waypoint when on a path between source and sink. See Also -------- msmbuilder.tpt.fraction_visited : function Calculate the fraction of visits to a waypoint from a given source to a sink. msmbuilder.tpt.hub_scores : function Compute the 'hub score', the weighted fraction of visits for an entire network. Notes ----- Employs dense linear algebra, memory use scales as N^2, and cycle use scales as N^3 References ---------- .. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
[ "Computes", "the", "conditional", "committors", ":", "math", ":", "q^", "{", "ABC^", "+", "}", "which", "are", "is", "the", "probability", "of", "starting", "in", "one", "state", "and", "visiting", "state", "B", "before", "A", "while", "also", "visiting", "state", "C", "at", "some", "point", "." ]
python
train
33.901639
blockstack/virtualchain
virtualchain/lib/blockchain/bitcoin_blockchain/bits.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/bits.py#L208-L232
def btc_witness_script_deserialize(_script): """ Given a hex-encoded serialized witness script, turn it into a witness stack (i.e. an array of Nones, ints, and strings) """ script = None if isinstance(_script, str) and re.match('^[0-9a-fA-F]*$', _script): # convert from hex to bin, safely script = binascii.unhexlify(_script) else: script = _script[:] # pointer to byte offset in _script (as an array due to Python scoping rules) ptr = [0] witness_stack_len = read_var_int(ptr, script) witness_stack = [] for _ in xrange(0, witness_stack_len): stack_item = read_var_string(ptr, script) witness_stack.append(stack_item) return witness_stack
[ "def", "btc_witness_script_deserialize", "(", "_script", ")", ":", "script", "=", "None", "if", "isinstance", "(", "_script", ",", "str", ")", "and", "re", ".", "match", "(", "'^[0-9a-fA-F]*$'", ",", "_script", ")", ":", "# convert from hex to bin, safely", "script", "=", "binascii", ".", "unhexlify", "(", "_script", ")", "else", ":", "script", "=", "_script", "[", ":", "]", "# pointer to byte offset in _script (as an array due to Python scoping rules)", "ptr", "=", "[", "0", "]", "witness_stack_len", "=", "read_var_int", "(", "ptr", ",", "script", ")", "witness_stack", "=", "[", "]", "for", "_", "in", "xrange", "(", "0", ",", "witness_stack_len", ")", ":", "stack_item", "=", "read_var_string", "(", "ptr", ",", "script", ")", "witness_stack", ".", "append", "(", "stack_item", ")", "return", "witness_stack" ]
Given a hex-encoded serialized witness script, turn it into a witness stack (i.e. an array of Nones, ints, and strings)
[ "Given", "a", "hex", "-", "encoded", "serialized", "witness", "script", "turn", "it", "into", "a", "witness", "stack", "(", "i", ".", "e", ".", "an", "array", "of", "Nones", "ints", "and", "strings", ")" ]
python
train
28.56
marrabld/planarradpy
gui/gui_mainLayout.py
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/gui/gui_mainLayout.py#L863-L871
def mouse_move(self, event): """ The following gets back coordinates of the mouse on the canvas. """ if (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE): self.posX = event.xdata self.posY = event.ydata self.graphic_target(self.posX, self.posY)
[ "def", "mouse_move", "(", "self", ",", "event", ")", ":", "if", "(", "self", ".", "ui", ".", "tabWidget", ".", "currentIndex", "(", ")", "==", "TabWidget", ".", "NORMAL_MODE", ")", ":", "self", ".", "posX", "=", "event", ".", "xdata", "self", ".", "posY", "=", "event", ".", "ydata", "self", ".", "graphic_target", "(", "self", ".", "posX", ",", "self", ".", "posY", ")" ]
The following gets back coordinates of the mouse on the canvas.
[ "The", "following", "gets", "back", "coordinates", "of", "the", "mouse", "on", "the", "canvas", "." ]
python
test
35
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/query.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/query.py#L939-L970
def _query_response_to_snapshot(response_pb, collection, expected_prefix): """Parse a query response protobuf to a document snapshot. Args: response_pb (google.cloud.proto.firestore.v1beta1.\ firestore_pb2.RunQueryResponse): A collection (~.firestore_v1beta1.collection.CollectionReference): A reference to the collection that initiated the query. expected_prefix (str): The expected prefix for fully-qualified document names returned in the query results. This can be computed directly from ``collection`` via :meth:`_parent_info`. Returns: Optional[~.firestore.document.DocumentSnapshot]: A snapshot of the data returned in the query. If ``response_pb.document`` is not set, the snapshot will be :data:`None`. """ if not response_pb.HasField("document"): return None document_id = _helpers.get_doc_id(response_pb.document, expected_prefix) reference = collection.document(document_id) data = _helpers.decode_dict(response_pb.document.fields, collection._client) snapshot = document.DocumentSnapshot( reference, data, exists=True, read_time=response_pb.read_time, create_time=response_pb.document.create_time, update_time=response_pb.document.update_time, ) return snapshot
[ "def", "_query_response_to_snapshot", "(", "response_pb", ",", "collection", ",", "expected_prefix", ")", ":", "if", "not", "response_pb", ".", "HasField", "(", "\"document\"", ")", ":", "return", "None", "document_id", "=", "_helpers", ".", "get_doc_id", "(", "response_pb", ".", "document", ",", "expected_prefix", ")", "reference", "=", "collection", ".", "document", "(", "document_id", ")", "data", "=", "_helpers", ".", "decode_dict", "(", "response_pb", ".", "document", ".", "fields", ",", "collection", ".", "_client", ")", "snapshot", "=", "document", ".", "DocumentSnapshot", "(", "reference", ",", "data", ",", "exists", "=", "True", ",", "read_time", "=", "response_pb", ".", "read_time", ",", "create_time", "=", "response_pb", ".", "document", ".", "create_time", ",", "update_time", "=", "response_pb", ".", "document", ".", "update_time", ",", ")", "return", "snapshot" ]
Parse a query response protobuf to a document snapshot. Args: response_pb (google.cloud.proto.firestore.v1beta1.\ firestore_pb2.RunQueryResponse): A collection (~.firestore_v1beta1.collection.CollectionReference): A reference to the collection that initiated the query. expected_prefix (str): The expected prefix for fully-qualified document names returned in the query results. This can be computed directly from ``collection`` via :meth:`_parent_info`. Returns: Optional[~.firestore.document.DocumentSnapshot]: A snapshot of the data returned in the query. If ``response_pb.document`` is not set, the snapshot will be :data:`None`.
[ "Parse", "a", "query", "response", "protobuf", "to", "a", "document", "snapshot", "." ]
python
train
41.9375
JdeRobot/base
src/drivers/drone/pose3d.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/drone/pose3d.py#L135-L148
def __callback (self, odom): ''' Callback function to receive and save Pose3d. @param odom: ROS Odometry received @type odom: Odometry ''' pose = odometry2Pose3D(odom) self.lock.acquire() self.data = pose self.lock.release()
[ "def", "__callback", "(", "self", ",", "odom", ")", ":", "pose", "=", "odometry2Pose3D", "(", "odom", ")", "self", ".", "lock", ".", "acquire", "(", ")", "self", ".", "data", "=", "pose", "self", ".", "lock", ".", "release", "(", ")" ]
Callback function to receive and save Pose3d. @param odom: ROS Odometry received @type odom: Odometry
[ "Callback", "function", "to", "receive", "and", "save", "Pose3d", "." ]
python
train
21.142857
cos-archives/modular-odm
modularodm/cache.py
https://github.com/cos-archives/modular-odm/blob/8a34891892b8af69b21fdc46701c91763a5c1cf9/modularodm/cache.py#L3-L21
def set_nested(data, value, *keys): """Assign to a nested dictionary. :param dict data: Dictionary to mutate :param value: Value to set :param list *keys: List of nested keys >>> data = {} >>> set_nested(data, 'hi', 'k0', 'k1', 'k2') >>> data {'k0': {'k1': {'k2': 'hi'}}} """ if len(keys) == 1: data[keys[0]] = value else: if keys[0] not in data: data[keys[0]] = {} set_nested(data[keys[0]], value, *keys[1:])
[ "def", "set_nested", "(", "data", ",", "value", ",", "*", "keys", ")", ":", "if", "len", "(", "keys", ")", "==", "1", ":", "data", "[", "keys", "[", "0", "]", "]", "=", "value", "else", ":", "if", "keys", "[", "0", "]", "not", "in", "data", ":", "data", "[", "keys", "[", "0", "]", "]", "=", "{", "}", "set_nested", "(", "data", "[", "keys", "[", "0", "]", "]", ",", "value", ",", "*", "keys", "[", "1", ":", "]", ")" ]
Assign to a nested dictionary. :param dict data: Dictionary to mutate :param value: Value to set :param list *keys: List of nested keys >>> data = {} >>> set_nested(data, 'hi', 'k0', 'k1', 'k2') >>> data {'k0': {'k1': {'k2': 'hi'}}}
[ "Assign", "to", "a", "nested", "dictionary", "." ]
python
valid
24.947368
area4lib/area4
area4/util.py
https://github.com/area4lib/area4/blob/7f71b58d6b44b1a61284a8a01f26afd3138b9b17/area4/util.py#L13-L29
def get_raw_file(): """ Get the raw divider file in a string array. :return: the array :rtype: str """ with open("{0}/dividers.txt".format( os.path.abspath( os.path.dirname(__file__) ) ), mode="r") as file_handler: lines = file_handler.readlines() lines[35] = str( random.randint(0, 999999999999) ) return lines
[ "def", "get_raw_file", "(", ")", ":", "with", "open", "(", "\"{0}/dividers.txt\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ")", ",", "mode", "=", "\"r\"", ")", "as", "file_handler", ":", "lines", "=", "file_handler", ".", "readlines", "(", ")", "lines", "[", "35", "]", "=", "str", "(", "random", ".", "randint", "(", "0", ",", "999999999999", ")", ")", "return", "lines" ]
Get the raw divider file in a string array. :return: the array :rtype: str
[ "Get", "the", "raw", "divider", "file", "in", "a", "string", "array", "." ]
python
train
23.294118
google/transitfeed
misc/import_ch_zurich.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/misc/import_ch_zurich.py#L163-L190
def DemangleName(self, name): "Applies some simple heuristics to split names into (city, name)." # Handle special cases where our heuristcs doesn't work. # Example:"Triemli" --> ("Triemli", "Zurich"). if name in SPECIAL_NAMES: return SPECIAL_NAMES[name] # Expand abbreviations. for abbrev, expanded in [('str.', 'strasse'), ('Schiffst.', 'Schiffstation')]: suffix_pos = name.rfind(abbrev) if suffix_pos > 0: name = name[:suffix_pos] + expanded #end for names = name.split(", ", 1) if len(names) == 2: if names[1] in POI_TERMS: nam = u'%s %s' % (names[0], names[1]) else: nam = names[1] city = names[0] else: # "Zurich Enge": First word of station name designates the city nam = names[0] city = nam.split(' ')[0] return (nam, SPECIAL_CITIES.get(city, city))
[ "def", "DemangleName", "(", "self", ",", "name", ")", ":", "# Handle special cases where our heuristcs doesn't work.", "# Example:\"Triemli\" --> (\"Triemli\", \"Zurich\").", "if", "name", "in", "SPECIAL_NAMES", ":", "return", "SPECIAL_NAMES", "[", "name", "]", "# Expand abbreviations.", "for", "abbrev", ",", "expanded", "in", "[", "(", "'str.'", ",", "'strasse'", ")", ",", "(", "'Schiffst.'", ",", "'Schiffstation'", ")", "]", ":", "suffix_pos", "=", "name", ".", "rfind", "(", "abbrev", ")", "if", "suffix_pos", ">", "0", ":", "name", "=", "name", "[", ":", "suffix_pos", "]", "+", "expanded", "#end for", "names", "=", "name", ".", "split", "(", "\", \"", ",", "1", ")", "if", "len", "(", "names", ")", "==", "2", ":", "if", "names", "[", "1", "]", "in", "POI_TERMS", ":", "nam", "=", "u'%s %s'", "%", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", "else", ":", "nam", "=", "names", "[", "1", "]", "city", "=", "names", "[", "0", "]", "else", ":", "# \"Zurich Enge\": First word of station name designates the city", "nam", "=", "names", "[", "0", "]", "city", "=", "nam", ".", "split", "(", "' '", ")", "[", "0", "]", "return", "(", "nam", ",", "SPECIAL_CITIES", ".", "get", "(", "city", ",", "city", ")", ")" ]
Applies some simple heuristics to split names into (city, name).
[ "Applies", "some", "simple", "heuristics", "to", "split", "names", "into", "(", "city", "name", ")", "." ]
python
train
31.535714
kgori/treeCl
treeCl/distance_matrix.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/distance_matrix.py#L147-L156
def normalise_rows(matrix): """ Scales all rows to length 1. Fails when row is 0-length, so it leaves these unchanged """ lengths = np.apply_along_axis(np.linalg.norm, 1, matrix) if not (lengths > 0).all(): # raise ValueError('Cannot normalise 0 length vector to length 1') # print(matrix) lengths[lengths == 0] = 1 return matrix / lengths[:, np.newaxis]
[ "def", "normalise_rows", "(", "matrix", ")", ":", "lengths", "=", "np", ".", "apply_along_axis", "(", "np", ".", "linalg", ".", "norm", ",", "1", ",", "matrix", ")", "if", "not", "(", "lengths", ">", "0", ")", ".", "all", "(", ")", ":", "# raise ValueError('Cannot normalise 0 length vector to length 1')", "# print(matrix)", "lengths", "[", "lengths", "==", "0", "]", "=", "1", "return", "matrix", "/", "lengths", "[", ":", ",", "np", ".", "newaxis", "]" ]
Scales all rows to length 1. Fails when row is 0-length, so it leaves these unchanged
[ "Scales", "all", "rows", "to", "length", "1", ".", "Fails", "when", "row", "is", "0", "-", "length", "so", "it", "leaves", "these", "unchanged" ]
python
train
39
zalando/patroni
patroni/postgresql.py
https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/postgresql.py#L892-L961
def start(self, timeout=None, task=None, block_callbacks=False, role=None): """Start PostgreSQL Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion or failure. :returns: True if start was initiated and postmaster ports are open, False if start failed""" # make sure we close all connections established against # the former node, otherwise, we might get a stalled one # after kill -9, which would report incorrect data to # patroni. self.close_connection() if self.is_running(): logger.error('Cannot start PostgreSQL because one is already running.') self.set_state('starting') return True if not block_callbacks: self.__cb_pending = ACTION_ON_START self.set_role(role or self.get_postgres_role_from_data_directory()) self.set_state('starting') self._pending_restart = False configuration = self._server_parameters if self.role == 'master' else self._build_effective_configuration() self._write_postgresql_conf(configuration) self.resolve_connection_addresses() self._replace_pg_hba() self._replace_pg_ident() options = ['--{0}={1}'.format(p, configuration[p]) for p in self.CMDLINE_OPTIONS if p in configuration and p != 'wal_keep_segments'] with self._cancellable_lock: if self._is_cancelled: return False with task or null_context(): if task and task.is_cancelled: logger.info("PostgreSQL start cancelled.") return False self._postmaster_proc = PostmasterProcess.start(self._pgcommand('postgres'), self._data_dir, self._postgresql_conf, options) if task: task.complete(self._postmaster_proc) start_timeout = timeout if not start_timeout: try: start_timeout = float(self.config.get('pg_ctl_timeout', 60)) except ValueError: start_timeout = 60 # We want postmaster to open ports before we continue if not self._postmaster_proc or not self.wait_for_port_open(self._postmaster_proc, start_timeout): return False ret = self.wait_for_startup(start_timeout) if ret is not None: return ret elif timeout is not None: return False else: return None
[ "def", "start", "(", "self", ",", "timeout", "=", "None", ",", "task", "=", "None", ",", "block_callbacks", "=", "False", ",", "role", "=", "None", ")", ":", "# make sure we close all connections established against", "# the former node, otherwise, we might get a stalled one", "# after kill -9, which would report incorrect data to", "# patroni.", "self", ".", "close_connection", "(", ")", "if", "self", ".", "is_running", "(", ")", ":", "logger", ".", "error", "(", "'Cannot start PostgreSQL because one is already running.'", ")", "self", ".", "set_state", "(", "'starting'", ")", "return", "True", "if", "not", "block_callbacks", ":", "self", ".", "__cb_pending", "=", "ACTION_ON_START", "self", ".", "set_role", "(", "role", "or", "self", ".", "get_postgres_role_from_data_directory", "(", ")", ")", "self", ".", "set_state", "(", "'starting'", ")", "self", ".", "_pending_restart", "=", "False", "configuration", "=", "self", ".", "_server_parameters", "if", "self", ".", "role", "==", "'master'", "else", "self", ".", "_build_effective_configuration", "(", ")", "self", ".", "_write_postgresql_conf", "(", "configuration", ")", "self", ".", "resolve_connection_addresses", "(", ")", "self", ".", "_replace_pg_hba", "(", ")", "self", ".", "_replace_pg_ident", "(", ")", "options", "=", "[", "'--{0}={1}'", ".", "format", "(", "p", ",", "configuration", "[", "p", "]", ")", "for", "p", "in", "self", ".", "CMDLINE_OPTIONS", "if", "p", "in", "configuration", "and", "p", "!=", "'wal_keep_segments'", "]", "with", "self", ".", "_cancellable_lock", ":", "if", "self", ".", "_is_cancelled", ":", "return", "False", "with", "task", "or", "null_context", "(", ")", ":", "if", "task", "and", "task", ".", "is_cancelled", ":", "logger", ".", "info", "(", "\"PostgreSQL start cancelled.\"", ")", "return", "False", "self", ".", "_postmaster_proc", "=", "PostmasterProcess", ".", "start", "(", "self", ".", "_pgcommand", "(", "'postgres'", ")", ",", "self", ".", "_data_dir", ",", "self", ".", "_postgresql_conf", ",", "options", ")", "if", "task", ":", "task", ".", "complete", "(", "self", ".", "_postmaster_proc", ")", "start_timeout", "=", "timeout", "if", "not", "start_timeout", ":", "try", ":", "start_timeout", "=", "float", "(", "self", ".", "config", ".", "get", "(", "'pg_ctl_timeout'", ",", "60", ")", ")", "except", "ValueError", ":", "start_timeout", "=", "60", "# We want postmaster to open ports before we continue", "if", "not", "self", ".", "_postmaster_proc", "or", "not", "self", ".", "wait_for_port_open", "(", "self", ".", "_postmaster_proc", ",", "start_timeout", ")", ":", "return", "False", "ret", "=", "self", ".", "wait_for_startup", "(", "start_timeout", ")", "if", "ret", "is", "not", "None", ":", "return", "ret", "elif", "timeout", "is", "not", "None", ":", "return", "False", "else", ":", "return", "None" ]
Start PostgreSQL Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion or failure. :returns: True if start was initiated and postmaster ports are open, False if start failed
[ "Start", "PostgreSQL" ]
python
train
37.714286
bernardopires/django-tenant-schemas
tenant_schemas/utils.py
https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/utils.py#L53-L61
def clean_tenant_url(url_string): """ Removes the TENANT_TOKEN from a particular string """ if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'): if (settings.PUBLIC_SCHEMA_URLCONF and url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)): url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):] return url_string
[ "def", "clean_tenant_url", "(", "url_string", ")", ":", "if", "hasattr", "(", "settings", ",", "'PUBLIC_SCHEMA_URLCONF'", ")", ":", "if", "(", "settings", ".", "PUBLIC_SCHEMA_URLCONF", "and", "url_string", ".", "startswith", "(", "settings", ".", "PUBLIC_SCHEMA_URLCONF", ")", ")", ":", "url_string", "=", "url_string", "[", "len", "(", "settings", ".", "PUBLIC_SCHEMA_URLCONF", ")", ":", "]", "return", "url_string" ]
Removes the TENANT_TOKEN from a particular string
[ "Removes", "the", "TENANT_TOKEN", "from", "a", "particular", "string" ]
python
train
40.111111
DataBiosphere/dsub
dsub/providers/google_v2_operations.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/google_v2_operations.py#L167-L179
def get_last_update(op): """Return the most recent timestamp in the operation.""" last_update = get_end_time(op) if not last_update: last_event = get_last_event(op) if last_event: last_update = last_event['timestamp'] if not last_update: last_update = get_create_time(op) return last_update
[ "def", "get_last_update", "(", "op", ")", ":", "last_update", "=", "get_end_time", "(", "op", ")", "if", "not", "last_update", ":", "last_event", "=", "get_last_event", "(", "op", ")", "if", "last_event", ":", "last_update", "=", "last_event", "[", "'timestamp'", "]", "if", "not", "last_update", ":", "last_update", "=", "get_create_time", "(", "op", ")", "return", "last_update" ]
Return the most recent timestamp in the operation.
[ "Return", "the", "most", "recent", "timestamp", "in", "the", "operation", "." ]
python
valid
23.769231
erdc/RAPIDpy
RAPIDpy/inflow/CreateInflowFileFromGriddedRunoff.py
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/inflow/CreateInflowFileFromGriddedRunoff.py#L52-L86
def read_in_weight_table(self, in_weight_table): """ Read in weight table """ print("Reading the weight table...") with open_csv(in_weight_table, "r") as csvfile: reader = csv.reader(csvfile) header_row = next(reader) # check number of columns in the weight table if len(header_row) < len(self.header_wt): raise Exception(self.error_messages[4]) # check header if header_row[1:len(self.header_wt)] != self.header_wt[1:]: raise Exception(self.error_messages[5]) self.dict_list = \ np.loadtxt( in_weight_table, delimiter=",", usecols=(0, 1, 2, 3, 4), skiprows=1, dtype={ 'names': (self.header_wt[0], self.header_wt[1], self.header_wt[2], self.header_wt[3], self.header_wt[4]), 'formats': ('i8', 'f8', 'i8', 'i8', 'i8') }, ) self.count = self.dict_list.shape[0] self.size_stream_id = \ len(np.unique(np.array(self.dict_list[self.header_wt[0]], dtype=np.int32)))
[ "def", "read_in_weight_table", "(", "self", ",", "in_weight_table", ")", ":", "print", "(", "\"Reading the weight table...\"", ")", "with", "open_csv", "(", "in_weight_table", ",", "\"r\"", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "reader", "(", "csvfile", ")", "header_row", "=", "next", "(", "reader", ")", "# check number of columns in the weight table\r", "if", "len", "(", "header_row", ")", "<", "len", "(", "self", ".", "header_wt", ")", ":", "raise", "Exception", "(", "self", ".", "error_messages", "[", "4", "]", ")", "# check header\r", "if", "header_row", "[", "1", ":", "len", "(", "self", ".", "header_wt", ")", "]", "!=", "self", ".", "header_wt", "[", "1", ":", "]", ":", "raise", "Exception", "(", "self", ".", "error_messages", "[", "5", "]", ")", "self", ".", "dict_list", "=", "np", ".", "loadtxt", "(", "in_weight_table", ",", "delimiter", "=", "\",\"", ",", "usecols", "=", "(", "0", ",", "1", ",", "2", ",", "3", ",", "4", ")", ",", "skiprows", "=", "1", ",", "dtype", "=", "{", "'names'", ":", "(", "self", ".", "header_wt", "[", "0", "]", ",", "self", ".", "header_wt", "[", "1", "]", ",", "self", ".", "header_wt", "[", "2", "]", ",", "self", ".", "header_wt", "[", "3", "]", ",", "self", ".", "header_wt", "[", "4", "]", ")", ",", "'formats'", ":", "(", "'i8'", ",", "'f8'", ",", "'i8'", ",", "'i8'", ",", "'i8'", ")", "}", ",", ")", "self", ".", "count", "=", "self", ".", "dict_list", ".", "shape", "[", "0", "]", "self", ".", "size_stream_id", "=", "len", "(", "np", ".", "unique", "(", "np", ".", "array", "(", "self", ".", "dict_list", "[", "self", ".", "header_wt", "[", "0", "]", "]", ",", "dtype", "=", "np", ".", "int32", ")", ")", ")" ]
Read in weight table
[ "Read", "in", "weight", "table" ]
python
train
38.714286
ericmjl/polcart
polcart/polcart.py
https://github.com/ericmjl/polcart/blob/1d003987f269c14884726205f871dd91de8610ce/polcart/polcart.py#L5-L20
def to_cartesian(r, theta, theta_units="radians"): """ Converts polar r, theta to cartesian x, y. """ assert theta_units in ['radians', 'degrees'],\ "kwarg theta_units must specified in radians or degrees" # Convert to radians if theta_units == "degrees": theta = to_radians(theta) theta = to_proper_radians(theta) x = r * cos(theta) y = r * sin(theta) return x, y
[ "def", "to_cartesian", "(", "r", ",", "theta", ",", "theta_units", "=", "\"radians\"", ")", ":", "assert", "theta_units", "in", "[", "'radians'", ",", "'degrees'", "]", ",", "\"kwarg theta_units must specified in radians or degrees\"", "# Convert to radians", "if", "theta_units", "==", "\"degrees\"", ":", "theta", "=", "to_radians", "(", "theta", ")", "theta", "=", "to_proper_radians", "(", "theta", ")", "x", "=", "r", "*", "cos", "(", "theta", ")", "y", "=", "r", "*", "sin", "(", "theta", ")", "return", "x", ",", "y" ]
Converts polar r, theta to cartesian x, y.
[ "Converts", "polar", "r", "theta", "to", "cartesian", "x", "y", "." ]
python
train
25.5
log2timeline/plaso
plaso/storage/interface.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/interface.py#L1737-L1745
def SetStorageProfiler(self, storage_profiler): """Sets the storage profiler. Args: storage_profiler (StorageProfiler): storage profiler. """ self._storage_profiler = storage_profiler if self._storage_file: self._storage_file.SetStorageProfiler(storage_profiler)
[ "def", "SetStorageProfiler", "(", "self", ",", "storage_profiler", ")", ":", "self", ".", "_storage_profiler", "=", "storage_profiler", "if", "self", ".", "_storage_file", ":", "self", ".", "_storage_file", ".", "SetStorageProfiler", "(", "storage_profiler", ")" ]
Sets the storage profiler. Args: storage_profiler (StorageProfiler): storage profiler.
[ "Sets", "the", "storage", "profiler", "." ]
python
train
31.888889
nickoala/telepot
telepot/aio/__init__.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/aio/__init__.py#L398-L401
async def unpinChatMessage(self, chat_id): """ See: https://core.telegram.org/bots/api#unpinchatmessage """ p = _strip(locals()) return await self._api_request('unpinChatMessage', _rectify(p))
[ "async", "def", "unpinChatMessage", "(", "self", ",", "chat_id", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ")", "return", "await", "self", ".", "_api_request", "(", "'unpinChatMessage'", ",", "_rectify", "(", "p", ")", ")" ]
See: https://core.telegram.org/bots/api#unpinchatmessage
[ "See", ":", "https", ":", "//", "core", ".", "telegram", ".", "org", "/", "bots", "/", "api#unpinchatmessage" ]
python
train
53.25
globus/globus-cli
globus_cli/commands/bookmark/rename.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/commands/bookmark/rename.py#L13-L23
def bookmark_rename(bookmark_id_or_name, new_bookmark_name): """ Executor for `globus bookmark rename` """ client = get_client() bookmark_id = resolve_id_or_name(client, bookmark_id_or_name)["id"] submit_data = {"name": new_bookmark_name} res = client.update_bookmark(bookmark_id, submit_data) formatted_print(res, simple_text="Success")
[ "def", "bookmark_rename", "(", "bookmark_id_or_name", ",", "new_bookmark_name", ")", ":", "client", "=", "get_client", "(", ")", "bookmark_id", "=", "resolve_id_or_name", "(", "client", ",", "bookmark_id_or_name", ")", "[", "\"id\"", "]", "submit_data", "=", "{", "\"name\"", ":", "new_bookmark_name", "}", "res", "=", "client", ".", "update_bookmark", "(", "bookmark_id", ",", "submit_data", ")", "formatted_print", "(", "res", ",", "simple_text", "=", "\"Success\"", ")" ]
Executor for `globus bookmark rename`
[ "Executor", "for", "globus", "bookmark", "rename" ]
python
train
32.818182
F5Networks/f5-common-python
f5sdk_plugins/fixtures.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5sdk_plugins/fixtures.py#L116-L119
def peer(opt_peer, opt_username, opt_password, scope="module"): '''peer bigip fixture''' p = BigIP(opt_peer, opt_username, opt_password) return p
[ "def", "peer", "(", "opt_peer", ",", "opt_username", ",", "opt_password", ",", "scope", "=", "\"module\"", ")", ":", "p", "=", "BigIP", "(", "opt_peer", ",", "opt_username", ",", "opt_password", ")", "return", "p" ]
peer bigip fixture
[ "peer", "bigip", "fixture" ]
python
train
38.5
frmdstryr/enamlx
enamlx/core/middleware.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/core/middleware.py#L65-L95
def convert_enamldef_def_to_func(token_stream): """ A token stream processor which processes all enaml declarative functions to allow using `def` instead of `func`. It does this by transforming DEF tokens to NAME within enamldef blocks and then changing the token value to `func`. Notes ------ Use this at your own risk! This was a feature intentionally dismissed by the author of enaml because declarative func's are not the same as python functions. """ in_enamldef = False depth = 0 for tok in token_stream: if tok.type == 'ENAMLDEF': in_enamldef = True elif tok.type == 'INDENT': depth += 1 elif in_enamldef and tok.type == 'DEF': # Since functions are not allowed on the RHS we can # transform the token type to a NAME so it's picked up by the # parser as a decl_funcdef instead of funcdef tok.type = 'NAME' tok.value = 'func' elif tok.type == 'DEDENT': depth -= 1 if depth == 0: in_enamldef = False yield tok
[ "def", "convert_enamldef_def_to_func", "(", "token_stream", ")", ":", "in_enamldef", "=", "False", "depth", "=", "0", "for", "tok", "in", "token_stream", ":", "if", "tok", ".", "type", "==", "'ENAMLDEF'", ":", "in_enamldef", "=", "True", "elif", "tok", ".", "type", "==", "'INDENT'", ":", "depth", "+=", "1", "elif", "in_enamldef", "and", "tok", ".", "type", "==", "'DEF'", ":", "# Since functions are not allowed on the RHS we can", "# transform the token type to a NAME so it's picked up by the", "# parser as a decl_funcdef instead of funcdef", "tok", ".", "type", "=", "'NAME'", "tok", ".", "value", "=", "'func'", "elif", "tok", ".", "type", "==", "'DEDENT'", ":", "depth", "-=", "1", "if", "depth", "==", "0", ":", "in_enamldef", "=", "False", "yield", "tok" ]
A token stream processor which processes all enaml declarative functions to allow using `def` instead of `func`. It does this by transforming DEF tokens to NAME within enamldef blocks and then changing the token value to `func`. Notes ------ Use this at your own risk! This was a feature intentionally dismissed by the author of enaml because declarative func's are not the same as python functions.
[ "A", "token", "stream", "processor", "which", "processes", "all", "enaml", "declarative", "functions", "to", "allow", "using", "def", "instead", "of", "func", ".", "It", "does", "this", "by", "transforming", "DEF", "tokens", "to", "NAME", "within", "enamldef", "blocks", "and", "then", "changing", "the", "token", "value", "to", "func", ".", "Notes", "------", "Use", "this", "at", "your", "own", "risk!", "This", "was", "a", "feature", "intentionally", "dismissed", "by", "the", "author", "of", "enaml", "because", "declarative", "func", "s", "are", "not", "the", "same", "as", "python", "functions", "." ]
python
train
35.806452
devopshq/youtrack
youtrack/connection.py
https://github.com/devopshq/youtrack/blob/c4ec19aca253ae30ac8eee7976a2f330e480a73b/youtrack/connection.py#L702-L707
def create_versions(self, project_id, versions): """ Accepts result of getVersions() """ for v in versions: self.create_version(project_id, v)
[ "def", "create_versions", "(", "self", ",", "project_id", ",", "versions", ")", ":", "for", "v", "in", "versions", ":", "self", ".", "create_version", "(", "project_id", ",", "v", ")" ]
Accepts result of getVersions()
[ "Accepts", "result", "of", "getVersions", "()" ]
python
train
29
gijzelaerr/python-snap7
snap7/client.py
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/client.py#L99-L106
def get_cpu_info(self): """ Retrieves CPU info from client """ info = snap7.snap7types.S7CpuInfo() result = self.library.Cli_GetCpuInfo(self.pointer, byref(info)) check_error(result, context="client") return info
[ "def", "get_cpu_info", "(", "self", ")", ":", "info", "=", "snap7", ".", "snap7types", ".", "S7CpuInfo", "(", ")", "result", "=", "self", ".", "library", ".", "Cli_GetCpuInfo", "(", "self", ".", "pointer", ",", "byref", "(", "info", ")", ")", "check_error", "(", "result", ",", "context", "=", "\"client\"", ")", "return", "info" ]
Retrieves CPU info from client
[ "Retrieves", "CPU", "info", "from", "client" ]
python
train
32.625
mikejarrett/pipcheck
pipcheck/checker.py
https://github.com/mikejarrett/pipcheck/blob/2ff47b9fd8914e1764c6e659ef39b77c1b1a12ad/pipcheck/checker.py#L83-L102
def write_updates_to_csv(self, updates): """ Given a list of updates, write the updates out to the provided CSV file. Args: updates (list): List of Update objects. """ with open(self._csv_file_name, 'w') as csvfile: csvwriter = self.csv_writer(csvfile) csvwriter.writerow(CSV_COLUMN_HEADERS) for update in updates: row = [ update.name, update.current_version, update.new_version, update.prelease, ] csvwriter.writerow(row)
[ "def", "write_updates_to_csv", "(", "self", ",", "updates", ")", ":", "with", "open", "(", "self", ".", "_csv_file_name", ",", "'w'", ")", "as", "csvfile", ":", "csvwriter", "=", "self", ".", "csv_writer", "(", "csvfile", ")", "csvwriter", ".", "writerow", "(", "CSV_COLUMN_HEADERS", ")", "for", "update", "in", "updates", ":", "row", "=", "[", "update", ".", "name", ",", "update", ".", "current_version", ",", "update", ".", "new_version", ",", "update", ".", "prelease", ",", "]", "csvwriter", ".", "writerow", "(", "row", ")" ]
Given a list of updates, write the updates out to the provided CSV file. Args: updates (list): List of Update objects.
[ "Given", "a", "list", "of", "updates", "write", "the", "updates", "out", "to", "the", "provided", "CSV", "file", "." ]
python
train
31.45
matousc89/padasip
padasip/filters/ap.py
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/ap.py#L205-L227
def adapt(self, d, x): """ Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array) """ # create input matrix and target vector self.x_mem[:,1:] = self.x_mem[:,:-1] self.x_mem[:,0] = x self.d_mem[1:] = self.d_mem[:-1] self.d_mem[0] = d # estimate output and error self.y_mem = np.dot(self.x_mem.T, self.w) self.e_mem = self.d_mem - self.y_mem # update dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps dw_part2 = np.linalg.solve(dw_part1, self.ide) dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem)) self.w += self.mu * dw
[ "def", "adapt", "(", "self", ",", "d", ",", "x", ")", ":", "# create input matrix and target vector", "self", ".", "x_mem", "[", ":", ",", "1", ":", "]", "=", "self", ".", "x_mem", "[", ":", ",", ":", "-", "1", "]", "self", ".", "x_mem", "[", ":", ",", "0", "]", "=", "x", "self", ".", "d_mem", "[", "1", ":", "]", "=", "self", ".", "d_mem", "[", ":", "-", "1", "]", "self", ".", "d_mem", "[", "0", "]", "=", "d", "# estimate output and error", "self", ".", "y_mem", "=", "np", ".", "dot", "(", "self", ".", "x_mem", ".", "T", ",", "self", ".", "w", ")", "self", ".", "e_mem", "=", "self", ".", "d_mem", "-", "self", ".", "y_mem", "# update", "dw_part1", "=", "np", ".", "dot", "(", "self", ".", "x_mem", ".", "T", ",", "self", ".", "x_mem", ")", "+", "self", ".", "ide_eps", "dw_part2", "=", "np", ".", "linalg", ".", "solve", "(", "dw_part1", ",", "self", ".", "ide", ")", "dw", "=", "np", ".", "dot", "(", "self", ".", "x_mem", ",", "np", ".", "dot", "(", "dw_part2", ",", "self", ".", "e_mem", ")", ")", "self", ".", "w", "+=", "self", ".", "mu", "*", "dw" ]
Adapt weights according one desired value and its input. **Args:** * `d` : desired value (float) * `x` : input array (1-dimensional array)
[ "Adapt", "weights", "according", "one", "desired", "value", "and", "its", "input", "." ]
python
train
32.565217
spyder-ide/spyder
spyder/utils/programs.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/programs.py#L486-L515
def is_python_interpreter(filename): """Evaluate wether a file is a python interpreter or not.""" real_filename = os.path.realpath(filename) # To follow symlink if existent if (not osp.isfile(real_filename) or not is_python_interpreter_valid_name(filename)): return False elif is_pythonw(filename): if os.name == 'nt': # pythonw is a binary on Windows if not encoding.is_text_file(real_filename): return True else: return False elif sys.platform == 'darwin': # pythonw is a text file in Anaconda but a binary in # the system if is_anaconda() and encoding.is_text_file(real_filename): return True elif not encoding.is_text_file(real_filename): return True else: return False else: # There's no pythonw in other systems return False elif encoding.is_text_file(real_filename): # At this point we can't have a text file return False else: return check_python_help(filename)
[ "def", "is_python_interpreter", "(", "filename", ")", ":", "real_filename", "=", "os", ".", "path", ".", "realpath", "(", "filename", ")", "# To follow symlink if existent\r", "if", "(", "not", "osp", ".", "isfile", "(", "real_filename", ")", "or", "not", "is_python_interpreter_valid_name", "(", "filename", ")", ")", ":", "return", "False", "elif", "is_pythonw", "(", "filename", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "# pythonw is a binary on Windows\r", "if", "not", "encoding", ".", "is_text_file", "(", "real_filename", ")", ":", "return", "True", "else", ":", "return", "False", "elif", "sys", ".", "platform", "==", "'darwin'", ":", "# pythonw is a text file in Anaconda but a binary in\r", "# the system\r", "if", "is_anaconda", "(", ")", "and", "encoding", ".", "is_text_file", "(", "real_filename", ")", ":", "return", "True", "elif", "not", "encoding", ".", "is_text_file", "(", "real_filename", ")", ":", "return", "True", "else", ":", "return", "False", "else", ":", "# There's no pythonw in other systems\r", "return", "False", "elif", "encoding", ".", "is_text_file", "(", "real_filename", ")", ":", "# At this point we can't have a text file\r", "return", "False", "else", ":", "return", "check_python_help", "(", "filename", ")" ]
Evaluate wether a file is a python interpreter or not.
[ "Evaluate", "wether", "a", "file", "is", "a", "python", "interpreter", "or", "not", "." ]
python
train
38.633333
apache/incubator-mxnet
python/mxnet/optimizer/optimizer.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/optimizer/optimizer.py#L1701-L1710
def get_states(self, dump_optimizer=False): """Gets updater states. Parameters ---------- dump_optimizer : bool, default False Whether to also save the optimizer itself. This would also save optimizer information such as learning rate and weight decay schedules. """ return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states)
[ "def", "get_states", "(", "self", ",", "dump_optimizer", "=", "False", ")", ":", "return", "pickle", ".", "dumps", "(", "(", "self", ".", "states", ",", "self", ".", "optimizer", ")", "if", "dump_optimizer", "else", "self", ".", "states", ")" ]
Gets updater states. Parameters ---------- dump_optimizer : bool, default False Whether to also save the optimizer itself. This would also save optimizer information such as learning rate and weight decay schedules.
[ "Gets", "updater", "states", "." ]
python
train
41.6
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1939-L1958
def mv_normal_like(x, mu, tau): R""" Multivariate normal log-likelihood .. math:: f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\} :Parameters: - `x` : (n,k) - `mu` : (k) Location parameter sequence. - `Tau` : (k,k) Positive definite precision matrix. .. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like` """ # TODO: Vectorize in Fortran if len(np.shape(x)) > 1: return np.sum([flib.prec_mvnorm(r, mu, tau) for r in x]) else: return flib.prec_mvnorm(x, mu, tau)
[ "def", "mv_normal_like", "(", "x", ",", "mu", ",", "tau", ")", ":", "# TODO: Vectorize in Fortran", "if", "len", "(", "np", ".", "shape", "(", "x", ")", ")", ">", "1", ":", "return", "np", ".", "sum", "(", "[", "flib", ".", "prec_mvnorm", "(", "r", ",", "mu", ",", "tau", ")", "for", "r", "in", "x", "]", ")", "else", ":", "return", "flib", ".", "prec_mvnorm", "(", "x", ",", "mu", ",", "tau", ")" ]
R""" Multivariate normal log-likelihood .. math:: f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\} :Parameters: - `x` : (n,k) - `mu` : (k) Location parameter sequence. - `Tau` : (k,k) Positive definite precision matrix. .. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like`
[ "R", "Multivariate", "normal", "log", "-", "likelihood" ]
python
train
30
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/ingest_visibilities/recv/async_recv.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/ingest_visibilities/recv/async_recv.py#L50-L114
def process_buffer(self, i_block, receive_buffer): """Blocking function to process the received heaps. This is run in an executor. """ self._log.info("Worker thread processing block %i", i_block) time_overall0 = time.time() time_unpack = 0.0 time_write = 0.0 for i_heap, heap in enumerate(receive_buffer.result()): # Skip and log any incomplete heaps. if isinstance(heap, spead2.recv.IncompleteHeap): self._log.info("Dropped incomplete heap %i", heap.cnt + 1) continue # Update the item group from this heap. items = self._item_group.update(heap) # Get the time and channel indices from the heap index. i_chan = i_heap // self._num_buffer_times i_time = i_heap % self._num_buffer_times if 'correlator_output_data' in items: vis_data = items['correlator_output_data'].value['VIS'] if self._block is None: num_baselines = vis_data.shape[0] num_pols = vis_data[0].shape[0] self._block = numpy.zeros((self._num_buffer_times, self._num_streams, num_baselines), dtype=('c8', num_pols)) self._block[:, :, :] = 0 # To make the copies faster. # Unpack data from the heap into the block to be processed. time_unpack0 = time.time() self._block[i_time, i_chan, :] = vis_data time_unpack += time.time() - time_unpack0 # Check the data for debugging! val = self._block[i_time, i_chan, -1][-1].real self._log.debug("Data: %.3f", val) if self._block is not None: # Process the buffered data here. if self._config['process_data']: pass # Write the buffered data to storage. if self._config['write_data']: time_write0 = time.time() with open(self._config['filename'], 'ab') as f: # Don't use pickle, it's really slow (even protocol 4)! numpy.save(f, self._block, allow_pickle=False) time_write += time.time() - time_write0 # Report time taken. time_overall = time.time() - time_overall0 self._log.info("Total processing time: %.1f ms", 1000 * time_overall) self._log.info("Unpack was %.1f %%", 100 * time_unpack / time_overall) self._log.info("Write was %.1f %%", 100 * time_write / time_overall) if time_unpack != 0.0: self._log.info("Memory speed %.1f MB/s", (self._block.nbytes * 1e-6) / time_unpack) if time_write != 0.0: self._log.info("Write speed %.1f MB/s", (self._block.nbytes * 1e-6) / time_write)
[ "def", "process_buffer", "(", "self", ",", "i_block", ",", "receive_buffer", ")", ":", "self", ".", "_log", ".", "info", "(", "\"Worker thread processing block %i\"", ",", "i_block", ")", "time_overall0", "=", "time", ".", "time", "(", ")", "time_unpack", "=", "0.0", "time_write", "=", "0.0", "for", "i_heap", ",", "heap", "in", "enumerate", "(", "receive_buffer", ".", "result", "(", ")", ")", ":", "# Skip and log any incomplete heaps.", "if", "isinstance", "(", "heap", ",", "spead2", ".", "recv", ".", "IncompleteHeap", ")", ":", "self", ".", "_log", ".", "info", "(", "\"Dropped incomplete heap %i\"", ",", "heap", ".", "cnt", "+", "1", ")", "continue", "# Update the item group from this heap.", "items", "=", "self", ".", "_item_group", ".", "update", "(", "heap", ")", "# Get the time and channel indices from the heap index.", "i_chan", "=", "i_heap", "//", "self", ".", "_num_buffer_times", "i_time", "=", "i_heap", "%", "self", ".", "_num_buffer_times", "if", "'correlator_output_data'", "in", "items", ":", "vis_data", "=", "items", "[", "'correlator_output_data'", "]", ".", "value", "[", "'VIS'", "]", "if", "self", ".", "_block", "is", "None", ":", "num_baselines", "=", "vis_data", ".", "shape", "[", "0", "]", "num_pols", "=", "vis_data", "[", "0", "]", ".", "shape", "[", "0", "]", "self", ".", "_block", "=", "numpy", ".", "zeros", "(", "(", "self", ".", "_num_buffer_times", ",", "self", ".", "_num_streams", ",", "num_baselines", ")", ",", "dtype", "=", "(", "'c8'", ",", "num_pols", ")", ")", "self", ".", "_block", "[", ":", ",", ":", ",", ":", "]", "=", "0", "# To make the copies faster.", "# Unpack data from the heap into the block to be processed.", "time_unpack0", "=", "time", ".", "time", "(", ")", "self", ".", "_block", "[", "i_time", ",", "i_chan", ",", ":", "]", "=", "vis_data", "time_unpack", "+=", "time", ".", "time", "(", ")", "-", "time_unpack0", "# Check the data for debugging!", "val", "=", "self", ".", "_block", "[", "i_time", ",", "i_chan", ",", "-", "1", "]", "[", "-", "1", "]", ".", "real", "self", ".", "_log", ".", "debug", "(", "\"Data: %.3f\"", ",", "val", ")", "if", "self", ".", "_block", "is", "not", "None", ":", "# Process the buffered data here.", "if", "self", ".", "_config", "[", "'process_data'", "]", ":", "pass", "# Write the buffered data to storage.", "if", "self", ".", "_config", "[", "'write_data'", "]", ":", "time_write0", "=", "time", ".", "time", "(", ")", "with", "open", "(", "self", ".", "_config", "[", "'filename'", "]", ",", "'ab'", ")", "as", "f", ":", "# Don't use pickle, it's really slow (even protocol 4)!", "numpy", ".", "save", "(", "f", ",", "self", ".", "_block", ",", "allow_pickle", "=", "False", ")", "time_write", "+=", "time", ".", "time", "(", ")", "-", "time_write0", "# Report time taken.", "time_overall", "=", "time", ".", "time", "(", ")", "-", "time_overall0", "self", ".", "_log", ".", "info", "(", "\"Total processing time: %.1f ms\"", ",", "1000", "*", "time_overall", ")", "self", ".", "_log", ".", "info", "(", "\"Unpack was %.1f %%\"", ",", "100", "*", "time_unpack", "/", "time_overall", ")", "self", ".", "_log", ".", "info", "(", "\"Write was %.1f %%\"", ",", "100", "*", "time_write", "/", "time_overall", ")", "if", "time_unpack", "!=", "0.0", ":", "self", ".", "_log", ".", "info", "(", "\"Memory speed %.1f MB/s\"", ",", "(", "self", ".", "_block", ".", "nbytes", "*", "1e-6", ")", "/", "time_unpack", ")", "if", "time_write", "!=", "0.0", ":", "self", ".", "_log", ".", "info", "(", "\"Write speed %.1f MB/s\"", ",", "(", "self", ".", "_block", ".", "nbytes", "*", "1e-6", ")", "/", "time_write", ")" ]
Blocking function to process the received heaps. This is run in an executor.
[ "Blocking", "function", "to", "process", "the", "received", "heaps", ".", "This", "is", "run", "in", "an", "executor", "." ]
python
train
45.907692
gunthercox/ChatterBot
chatterbot/parsing.py
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/parsing.py#L580-L636
def date_from_relative_week_year(base_date, time, dow, ordinal=1): """ Converts relative day to time Eg. this tuesday, last tuesday """ # If there is an ordinal (next 3 weeks) => return a start and end range # Reset date to start of the day relative_date = datetime(base_date.year, base_date.month, base_date.day) ord = convert_string_to_number(ordinal) if dow in year_variations: if time == 'this' or time == 'coming': return datetime(relative_date.year, 1, 1) elif time == 'last' or time == 'previous': return datetime(relative_date.year - 1, relative_date.month, 1) elif time == 'next' or time == 'following': return relative_date + timedelta(ord * 365) elif time == 'end of the': return datetime(relative_date.year, 12, 31) elif dow in month_variations: if time == 'this': return datetime(relative_date.year, relative_date.month, relative_date.day) elif time == 'last' or time == 'previous': return datetime(relative_date.year, relative_date.month - 1, relative_date.day) elif time == 'next' or time == 'following': if relative_date.month + ord >= 12: month = relative_date.month - 1 + ord year = relative_date.year + month // 12 month = month % 12 + 1 day = min(relative_date.day, calendar.monthrange(year, month)[1]) return datetime(year, month, day) else: return datetime(relative_date.year, relative_date.month + ord, relative_date.day) elif time == 'end of the': return datetime( relative_date.year, relative_date.month, calendar.monthrange(relative_date.year, relative_date.month)[1] ) elif dow in week_variations: if time == 'this': return relative_date - timedelta(days=relative_date.weekday()) elif time == 'last' or time == 'previous': return relative_date - timedelta(weeks=1) elif time == 'next' or time == 'following': return relative_date + timedelta(weeks=ord) elif time == 'end of the': day_of_week = base_date.weekday() return day_of_week + timedelta(days=6 - relative_date.weekday()) elif dow in day_variations: if time == 'this': return relative_date elif time == 'last' or time == 'previous': return relative_date - timedelta(days=1) elif time == 'next' or time == 'following': return relative_date + timedelta(days=ord) elif time == 'end of the': return datetime(relative_date.year, relative_date.month, relative_date.day, 23, 59, 59)
[ "def", "date_from_relative_week_year", "(", "base_date", ",", "time", ",", "dow", ",", "ordinal", "=", "1", ")", ":", "# If there is an ordinal (next 3 weeks) => return a start and end range", "# Reset date to start of the day", "relative_date", "=", "datetime", "(", "base_date", ".", "year", ",", "base_date", ".", "month", ",", "base_date", ".", "day", ")", "ord", "=", "convert_string_to_number", "(", "ordinal", ")", "if", "dow", "in", "year_variations", ":", "if", "time", "==", "'this'", "or", "time", "==", "'coming'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "1", ",", "1", ")", "elif", "time", "==", "'last'", "or", "time", "==", "'previous'", ":", "return", "datetime", "(", "relative_date", ".", "year", "-", "1", ",", "relative_date", ".", "month", ",", "1", ")", "elif", "time", "==", "'next'", "or", "time", "==", "'following'", ":", "return", "relative_date", "+", "timedelta", "(", "ord", "*", "365", ")", "elif", "time", "==", "'end of the'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "12", ",", "31", ")", "elif", "dow", "in", "month_variations", ":", "if", "time", "==", "'this'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", ",", "relative_date", ".", "day", ")", "elif", "time", "==", "'last'", "or", "time", "==", "'previous'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", "-", "1", ",", "relative_date", ".", "day", ")", "elif", "time", "==", "'next'", "or", "time", "==", "'following'", ":", "if", "relative_date", ".", "month", "+", "ord", ">=", "12", ":", "month", "=", "relative_date", ".", "month", "-", "1", "+", "ord", "year", "=", "relative_date", ".", "year", "+", "month", "//", "12", "month", "=", "month", "%", "12", "+", "1", "day", "=", "min", "(", "relative_date", ".", "day", ",", "calendar", ".", "monthrange", "(", "year", ",", "month", ")", "[", "1", "]", ")", "return", "datetime", "(", "year", ",", "month", ",", "day", ")", "else", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", "+", "ord", ",", "relative_date", ".", "day", ")", "elif", "time", "==", "'end of the'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", ",", "calendar", ".", "monthrange", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", ")", "[", "1", "]", ")", "elif", "dow", "in", "week_variations", ":", "if", "time", "==", "'this'", ":", "return", "relative_date", "-", "timedelta", "(", "days", "=", "relative_date", ".", "weekday", "(", ")", ")", "elif", "time", "==", "'last'", "or", "time", "==", "'previous'", ":", "return", "relative_date", "-", "timedelta", "(", "weeks", "=", "1", ")", "elif", "time", "==", "'next'", "or", "time", "==", "'following'", ":", "return", "relative_date", "+", "timedelta", "(", "weeks", "=", "ord", ")", "elif", "time", "==", "'end of the'", ":", "day_of_week", "=", "base_date", ".", "weekday", "(", ")", "return", "day_of_week", "+", "timedelta", "(", "days", "=", "6", "-", "relative_date", ".", "weekday", "(", ")", ")", "elif", "dow", "in", "day_variations", ":", "if", "time", "==", "'this'", ":", "return", "relative_date", "elif", "time", "==", "'last'", "or", "time", "==", "'previous'", ":", "return", "relative_date", "-", "timedelta", "(", "days", "=", "1", ")", "elif", "time", "==", "'next'", "or", "time", "==", "'following'", ":", "return", "relative_date", "+", "timedelta", "(", "days", "=", "ord", ")", "elif", "time", "==", "'end of the'", ":", "return", "datetime", "(", "relative_date", ".", "year", ",", "relative_date", ".", "month", ",", "relative_date", ".", "day", ",", "23", ",", "59", ",", "59", ")" ]
Converts relative day to time Eg. this tuesday, last tuesday
[ "Converts", "relative", "day", "to", "time", "Eg", ".", "this", "tuesday", "last", "tuesday" ]
python
train
48.263158
mitsei/dlkit
dlkit/json_/utilities.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/utilities.py#L158-L176
def convert_dict_to_datetime(obj_map): """converts dictionary representations of datetime back to datetime obj""" converted_map = {} for key, value in obj_map.items(): if isinstance(value, dict) and 'tzinfo' in value.keys(): converted_map[key] = datetime.datetime(**value) elif isinstance(value, dict): converted_map[key] = convert_dict_to_datetime(value) elif isinstance(value, list): updated_list = [] for internal_item in value: if isinstance(internal_item, dict): updated_list.append(convert_dict_to_datetime(internal_item)) else: updated_list.append(internal_item) converted_map[key] = updated_list else: converted_map[key] = value return converted_map
[ "def", "convert_dict_to_datetime", "(", "obj_map", ")", ":", "converted_map", "=", "{", "}", "for", "key", ",", "value", "in", "obj_map", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", "and", "'tzinfo'", "in", "value", ".", "keys", "(", ")", ":", "converted_map", "[", "key", "]", "=", "datetime", ".", "datetime", "(", "*", "*", "value", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "converted_map", "[", "key", "]", "=", "convert_dict_to_datetime", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "updated_list", "=", "[", "]", "for", "internal_item", "in", "value", ":", "if", "isinstance", "(", "internal_item", ",", "dict", ")", ":", "updated_list", ".", "append", "(", "convert_dict_to_datetime", "(", "internal_item", ")", ")", "else", ":", "updated_list", ".", "append", "(", "internal_item", ")", "converted_map", "[", "key", "]", "=", "updated_list", "else", ":", "converted_map", "[", "key", "]", "=", "value", "return", "converted_map" ]
converts dictionary representations of datetime back to datetime obj
[ "converts", "dictionary", "representations", "of", "datetime", "back", "to", "datetime", "obj" ]
python
train
43.736842
mitsei/dlkit
dlkit/json_/authorization/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/objects.py#L137-L149
def get_trust_id(self): """Gets the ``Trust`` ``Id`` for this authorization. return: (osid.id.Id) - the trust ``Id`` raise: IllegalState - ``has_trust()`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.Resource.get_avatar_id_template if not bool(self._my_map['trustId']): raise errors.IllegalState('this Authorization has no trust') else: return Id(self._my_map['trustId'])
[ "def", "get_trust_id", "(", "self", ")", ":", "# Implemented from template for osid.resource.Resource.get_avatar_id_template", "if", "not", "bool", "(", "self", ".", "_my_map", "[", "'trustId'", "]", ")", ":", "raise", "errors", ".", "IllegalState", "(", "'this Authorization has no trust'", ")", "else", ":", "return", "Id", "(", "self", ".", "_my_map", "[", "'trustId'", "]", ")" ]
Gets the ``Trust`` ``Id`` for this authorization. return: (osid.id.Id) - the trust ``Id`` raise: IllegalState - ``has_trust()`` is ``false`` *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "Trust", "Id", "for", "this", "authorization", "." ]
python
train
40.692308
holgern/pyedflib
pyedflib/edfwriter.py
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L552-L566
def setTransducer(self, edfsignal, transducer): """ Sets the transducer of signal edfsignal :param edfsignal: int :param transducer: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['transducer'] = transducer self.update_header()
[ "def", "setTransducer", "(", "self", ",", "edfsignal", ",", "transducer", ")", ":", "if", "(", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ")", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "[", "'transducer'", "]", "=", "transducer", "self", ".", "update_header", "(", ")" ]
Sets the transducer of signal edfsignal :param edfsignal: int :param transducer: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
[ "Sets", "the", "transducer", "of", "signal", "edfsignal" ]
python
train
36.2
log2timeline/dfvfs
dfvfs/helpers/volume_scanner.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/helpers/volume_scanner.py#L230-L270
def _NormalizedVolumeIdentifiers( self, volume_system, volume_identifiers, prefix='v'): """Normalizes volume identifiers. Args: volume_system (VolumeSystem): volume system. volume_identifiers (list[int|str]): allowed volume identifiers, formatted as an integer or string with prefix. prefix (Optional[str]): volume identifier prefix. Returns: list[str]: volume identifiers with prefix. Raises: ScannerError: if the volume identifier is not supported or no volume could be found that corresponds with the identifier. """ normalized_volume_identifiers = [] for volume_identifier in volume_identifiers: if isinstance(volume_identifier, int): volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier) elif not volume_identifier.startswith(prefix): try: volume_identifier = int(volume_identifier, 10) volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier) except (TypeError, ValueError): pass try: volume = volume_system.GetVolumeByIdentifier(volume_identifier) except KeyError: volume = None if not volume: raise errors.ScannerError( 'Volume missing for identifier: {0:s}.'.format(volume_identifier)) normalized_volume_identifiers.append(volume_identifier) return normalized_volume_identifiers
[ "def", "_NormalizedVolumeIdentifiers", "(", "self", ",", "volume_system", ",", "volume_identifiers", ",", "prefix", "=", "'v'", ")", ":", "normalized_volume_identifiers", "=", "[", "]", "for", "volume_identifier", "in", "volume_identifiers", ":", "if", "isinstance", "(", "volume_identifier", ",", "int", ")", ":", "volume_identifier", "=", "'{0:s}{1:d}'", ".", "format", "(", "prefix", ",", "volume_identifier", ")", "elif", "not", "volume_identifier", ".", "startswith", "(", "prefix", ")", ":", "try", ":", "volume_identifier", "=", "int", "(", "volume_identifier", ",", "10", ")", "volume_identifier", "=", "'{0:s}{1:d}'", ".", "format", "(", "prefix", ",", "volume_identifier", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "try", ":", "volume", "=", "volume_system", ".", "GetVolumeByIdentifier", "(", "volume_identifier", ")", "except", "KeyError", ":", "volume", "=", "None", "if", "not", "volume", ":", "raise", "errors", ".", "ScannerError", "(", "'Volume missing for identifier: {0:s}.'", ".", "format", "(", "volume_identifier", ")", ")", "normalized_volume_identifiers", ".", "append", "(", "volume_identifier", ")", "return", "normalized_volume_identifiers" ]
Normalizes volume identifiers. Args: volume_system (VolumeSystem): volume system. volume_identifiers (list[int|str]): allowed volume identifiers, formatted as an integer or string with prefix. prefix (Optional[str]): volume identifier prefix. Returns: list[str]: volume identifiers with prefix. Raises: ScannerError: if the volume identifier is not supported or no volume could be found that corresponds with the identifier.
[ "Normalizes", "volume", "identifiers", "." ]
python
train
33.95122
Fantomas42/django-blog-zinnia
zinnia/sitemaps.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/sitemaps.py#L50-L59
def items(self): """ Get a queryset, cache infos for standardized access to them later then compute the maximum of entries to define the priority of each items. """ queryset = self.get_queryset() self.cache_infos(queryset) self.set_max_entries() return queryset
[ "def", "items", "(", "self", ")", ":", "queryset", "=", "self", ".", "get_queryset", "(", ")", "self", ".", "cache_infos", "(", "queryset", ")", "self", ".", "set_max_entries", "(", ")", "return", "queryset" ]
Get a queryset, cache infos for standardized access to them later then compute the maximum of entries to define the priority of each items.
[ "Get", "a", "queryset", "cache", "infos", "for", "standardized", "access", "to", "them", "later", "then", "compute", "the", "maximum", "of", "entries", "to", "define", "the", "priority", "of", "each", "items", "." ]
python
train
32.4
IndicoDataSolutions/IndicoIo-python
indicoio/docx/docx_extraction.py
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/docx/docx_extraction.py#L6-L25
def docx_extraction(docx, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given a .docx file, returns the raw text associated with the given .docx file. The .docx file may be provided as base64 encoded data or as a filepath. Example usage: .. code-block:: python >>> from indicoio import docx_extraction >>> results = docx_extraction(docx_file) :param docx: The docx to be analyzed. :type docx: str or list of strs :rtype: dict or list of dicts """ docx = docx_preprocess(docx, batch=batch) url_params = {"batch": batch, "api_key": api_key, "version": version} results = api_handler(docx, cloud=cloud, api="docxextraction", url_params=url_params, **kwargs) return results
[ "def", "docx_extraction", "(", "docx", ",", "cloud", "=", "None", ",", "batch", "=", "False", ",", "api_key", "=", "None", ",", "version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "docx", "=", "docx_preprocess", "(", "docx", ",", "batch", "=", "batch", ")", "url_params", "=", "{", "\"batch\"", ":", "batch", ",", "\"api_key\"", ":", "api_key", ",", "\"version\"", ":", "version", "}", "results", "=", "api_handler", "(", "docx", ",", "cloud", "=", "cloud", ",", "api", "=", "\"docxextraction\"", ",", "url_params", "=", "url_params", ",", "*", "*", "kwargs", ")", "return", "results" ]
Given a .docx file, returns the raw text associated with the given .docx file. The .docx file may be provided as base64 encoded data or as a filepath. Example usage: .. code-block:: python >>> from indicoio import docx_extraction >>> results = docx_extraction(docx_file) :param docx: The docx to be analyzed. :type docx: str or list of strs :rtype: dict or list of dicts
[ "Given", "a", ".", "docx", "file", "returns", "the", "raw", "text", "associated", "with", "the", "given", ".", "docx", "file", ".", "The", ".", "docx", "file", "may", "be", "provided", "as", "base64", "encoded", "data", "or", "as", "a", "filepath", "." ]
python
train
37.1
horazont/aioxmpp
aioxmpp/roster/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/roster/service.py#L122-L131
def from_xso_item(cls, xso_item): """ Create a :class:`Item` with the :attr:`jid` set to the :attr:`.xso.Item.jid` obtained from `xso_item`. Then update that instance with `xso_item` using :meth:`update_from_xso_item` and return it. """ item = cls(xso_item.jid) item.update_from_xso_item(xso_item) return item
[ "def", "from_xso_item", "(", "cls", ",", "xso_item", ")", ":", "item", "=", "cls", "(", "xso_item", ".", "jid", ")", "item", ".", "update_from_xso_item", "(", "xso_item", ")", "return", "item" ]
Create a :class:`Item` with the :attr:`jid` set to the :attr:`.xso.Item.jid` obtained from `xso_item`. Then update that instance with `xso_item` using :meth:`update_from_xso_item` and return it.
[ "Create", "a", ":", "class", ":", "Item", "with", "the", ":", "attr", ":", "jid", "set", "to", "the", ":", "attr", ":", ".", "xso", ".", "Item", ".", "jid", "obtained", "from", "xso_item", ".", "Then", "update", "that", "instance", "with", "xso_item", "using", ":", "meth", ":", "update_from_xso_item", "and", "return", "it", "." ]
python
train
37.2
googleapis/google-cloud-python
datastore/google/cloud/datastore/transaction.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/transaction.py#L246-L262
def put(self, entity): """Adds an entity to be committed. Ensures the transaction is not marked readonly. Please see documentation at :meth:`~google.cloud.datastore.batch.Batch.put` :type entity: :class:`~google.cloud.datastore.entity.Entity` :param entity: the entity to be saved. :raises: :class:`RuntimeError` if the transaction is marked ReadOnly """ if self._options.HasField("read_only"): raise RuntimeError("Transaction is read only") else: super(Transaction, self).put(entity)
[ "def", "put", "(", "self", ",", "entity", ")", ":", "if", "self", ".", "_options", ".", "HasField", "(", "\"read_only\"", ")", ":", "raise", "RuntimeError", "(", "\"Transaction is read only\"", ")", "else", ":", "super", "(", "Transaction", ",", "self", ")", ".", "put", "(", "entity", ")" ]
Adds an entity to be committed. Ensures the transaction is not marked readonly. Please see documentation at :meth:`~google.cloud.datastore.batch.Batch.put` :type entity: :class:`~google.cloud.datastore.entity.Entity` :param entity: the entity to be saved. :raises: :class:`RuntimeError` if the transaction is marked ReadOnly
[ "Adds", "an", "entity", "to", "be", "committed", "." ]
python
train
34.823529
gem/oq-engine
openquake/hazardlib/contexts.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/contexts.py#L323-L368
def disaggregate(self, sitecol, ruptures, iml4, truncnorm, epsilons, monitor=Monitor()): """ Disaggregate (separate) PoE in different contributions. :param sitecol: a SiteCollection with N sites :param ruptures: an iterator over ruptures with the same TRT :param iml4: a 4d array of IMLs of shape (N, R, M, P) :param truncnorm: an instance of scipy.stats.truncnorm :param epsilons: the epsilon bins :param monitor: a Monitor instance :returns: an AccumDict with keys (poe, imt, rlzi) and mags, dists, lons, lats """ acc = AccumDict(accum=[]) ctx_mon = monitor('disagg_contexts', measuremem=False) pne_mon = monitor('disaggregate_pne', measuremem=False) clo_mon = monitor('get_closest', measuremem=False) for rupture in ruptures: with ctx_mon: orig_dctx = DistancesContext( (param, get_distances(rupture, sitecol, param)) for param in self.REQUIRES_DISTANCES) self.add_rup_params(rupture) with clo_mon: # this is faster than computing orig_dctx closest_points = rupture.surface.get_closest_points(sitecol) cache = {} for rlz, gsim in self.gsim_by_rlzi.items(): dctx = orig_dctx.roundup(gsim.minimum_distance) for m, imt in enumerate(iml4.imts): for p, poe in enumerate(iml4.poes_disagg): iml = tuple(iml4.array[:, rlz, m, p]) try: pne = cache[gsim, imt, iml] except KeyError: with pne_mon: pne = gsim.disaggregate_pne( rupture, sitecol, dctx, imt, iml, truncnorm, epsilons) cache[gsim, imt, iml] = pne acc[poe, str(imt), rlz].append(pne) acc['mags'].append(rupture.mag) acc['dists'].append(getattr(dctx, self.filter_distance)) acc['lons'].append(closest_points.lons) acc['lats'].append(closest_points.lats) return acc
[ "def", "disaggregate", "(", "self", ",", "sitecol", ",", "ruptures", ",", "iml4", ",", "truncnorm", ",", "epsilons", ",", "monitor", "=", "Monitor", "(", ")", ")", ":", "acc", "=", "AccumDict", "(", "accum", "=", "[", "]", ")", "ctx_mon", "=", "monitor", "(", "'disagg_contexts'", ",", "measuremem", "=", "False", ")", "pne_mon", "=", "monitor", "(", "'disaggregate_pne'", ",", "measuremem", "=", "False", ")", "clo_mon", "=", "monitor", "(", "'get_closest'", ",", "measuremem", "=", "False", ")", "for", "rupture", "in", "ruptures", ":", "with", "ctx_mon", ":", "orig_dctx", "=", "DistancesContext", "(", "(", "param", ",", "get_distances", "(", "rupture", ",", "sitecol", ",", "param", ")", ")", "for", "param", "in", "self", ".", "REQUIRES_DISTANCES", ")", "self", ".", "add_rup_params", "(", "rupture", ")", "with", "clo_mon", ":", "# this is faster than computing orig_dctx", "closest_points", "=", "rupture", ".", "surface", ".", "get_closest_points", "(", "sitecol", ")", "cache", "=", "{", "}", "for", "rlz", ",", "gsim", "in", "self", ".", "gsim_by_rlzi", ".", "items", "(", ")", ":", "dctx", "=", "orig_dctx", ".", "roundup", "(", "gsim", ".", "minimum_distance", ")", "for", "m", ",", "imt", "in", "enumerate", "(", "iml4", ".", "imts", ")", ":", "for", "p", ",", "poe", "in", "enumerate", "(", "iml4", ".", "poes_disagg", ")", ":", "iml", "=", "tuple", "(", "iml4", ".", "array", "[", ":", ",", "rlz", ",", "m", ",", "p", "]", ")", "try", ":", "pne", "=", "cache", "[", "gsim", ",", "imt", ",", "iml", "]", "except", "KeyError", ":", "with", "pne_mon", ":", "pne", "=", "gsim", ".", "disaggregate_pne", "(", "rupture", ",", "sitecol", ",", "dctx", ",", "imt", ",", "iml", ",", "truncnorm", ",", "epsilons", ")", "cache", "[", "gsim", ",", "imt", ",", "iml", "]", "=", "pne", "acc", "[", "poe", ",", "str", "(", "imt", ")", ",", "rlz", "]", ".", "append", "(", "pne", ")", "acc", "[", "'mags'", "]", ".", "append", "(", "rupture", ".", "mag", ")", "acc", "[", "'dists'", "]", ".", "append", "(", "getattr", "(", "dctx", ",", "self", ".", "filter_distance", ")", ")", "acc", "[", "'lons'", "]", ".", "append", "(", "closest_points", ".", "lons", ")", "acc", "[", "'lats'", "]", ".", "append", "(", "closest_points", ".", "lats", ")", "return", "acc" ]
Disaggregate (separate) PoE in different contributions. :param sitecol: a SiteCollection with N sites :param ruptures: an iterator over ruptures with the same TRT :param iml4: a 4d array of IMLs of shape (N, R, M, P) :param truncnorm: an instance of scipy.stats.truncnorm :param epsilons: the epsilon bins :param monitor: a Monitor instance :returns: an AccumDict with keys (poe, imt, rlzi) and mags, dists, lons, lats
[ "Disaggregate", "(", "separate", ")", "PoE", "in", "different", "contributions", "." ]
python
train
49.086957
Chilipp/psyplot
psyplot/plotter.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/plotter.py#L60-L83
def format_time(x): """Formats date values This function formats :class:`datetime.datetime` and :class:`datetime.timedelta` objects (and the corresponding numpy objects) using the :func:`xarray.core.formatting.format_timestamp` and the :func:`xarray.core.formatting.format_timedelta` functions. Parameters ---------- x: object The value to format. If not a time object, the value is returned Returns ------- str or `x` Either the formatted time object or the initial `x`""" if isinstance(x, (datetime64, datetime)): return format_timestamp(x) elif isinstance(x, (timedelta64, timedelta)): return format_timedelta(x) elif isinstance(x, ndarray): return list(x) if x.ndim else x[()] return x
[ "def", "format_time", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "(", "datetime64", ",", "datetime", ")", ")", ":", "return", "format_timestamp", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "(", "timedelta64", ",", "timedelta", ")", ")", ":", "return", "format_timedelta", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "ndarray", ")", ":", "return", "list", "(", "x", ")", "if", "x", ".", "ndim", "else", "x", "[", "(", ")", "]", "return", "x" ]
Formats date values This function formats :class:`datetime.datetime` and :class:`datetime.timedelta` objects (and the corresponding numpy objects) using the :func:`xarray.core.formatting.format_timestamp` and the :func:`xarray.core.formatting.format_timedelta` functions. Parameters ---------- x: object The value to format. If not a time object, the value is returned Returns ------- str or `x` Either the formatted time object or the initial `x`
[ "Formats", "date", "values" ]
python
train
32.041667
niklasf/python-chess
chess/__init__.py
https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/__init__.py#L1237-L1253
def copy(self: BaseBoardT) -> BaseBoardT: """Creates a copy of the board.""" board = type(self)(None) board.pawns = self.pawns board.knights = self.knights board.bishops = self.bishops board.rooks = self.rooks board.queens = self.queens board.kings = self.kings board.occupied_co[WHITE] = self.occupied_co[WHITE] board.occupied_co[BLACK] = self.occupied_co[BLACK] board.occupied = self.occupied board.promoted = self.promoted return board
[ "def", "copy", "(", "self", ":", "BaseBoardT", ")", "->", "BaseBoardT", ":", "board", "=", "type", "(", "self", ")", "(", "None", ")", "board", ".", "pawns", "=", "self", ".", "pawns", "board", ".", "knights", "=", "self", ".", "knights", "board", ".", "bishops", "=", "self", ".", "bishops", "board", ".", "rooks", "=", "self", ".", "rooks", "board", ".", "queens", "=", "self", ".", "queens", "board", ".", "kings", "=", "self", ".", "kings", "board", ".", "occupied_co", "[", "WHITE", "]", "=", "self", ".", "occupied_co", "[", "WHITE", "]", "board", ".", "occupied_co", "[", "BLACK", "]", "=", "self", ".", "occupied_co", "[", "BLACK", "]", "board", ".", "occupied", "=", "self", ".", "occupied", "board", ".", "promoted", "=", "self", ".", "promoted", "return", "board" ]
Creates a copy of the board.
[ "Creates", "a", "copy", "of", "the", "board", "." ]
python
train
31.117647
google/grr
grr/server/grr_response_server/export.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/export.py#L817-L823
def Convert(self, metadata, config, token=None): """Converts DNSClientConfiguration to ExportedDNSClientConfiguration.""" result = ExportedDNSClientConfiguration( metadata=metadata, dns_servers=" ".join(config.dns_server), dns_suffixes=" ".join(config.dns_suffix)) yield result
[ "def", "Convert", "(", "self", ",", "metadata", ",", "config", ",", "token", "=", "None", ")", ":", "result", "=", "ExportedDNSClientConfiguration", "(", "metadata", "=", "metadata", ",", "dns_servers", "=", "\" \"", ".", "join", "(", "config", ".", "dns_server", ")", ",", "dns_suffixes", "=", "\" \"", ".", "join", "(", "config", ".", "dns_suffix", ")", ")", "yield", "result" ]
Converts DNSClientConfiguration to ExportedDNSClientConfiguration.
[ "Converts", "DNSClientConfiguration", "to", "ExportedDNSClientConfiguration", "." ]
python
train
43.857143
BenDoan/perform
perform.py
https://github.com/BenDoan/perform/blob/3434c5c68fb7661d74f03404c71bb5fbebe1900f/perform.py#L128-L140
def get_programs(): """Returns a generator that yields the available executable programs :returns: a generator that yields the programs available after a refresh_listing() :rtype: generator """ programs = [] os.environ['PATH'] += os.pathsep + os.getcwd() for p in os.environ['PATH'].split(os.pathsep): if path.isdir(p): for f in os.listdir(p): if _is_executable(path.join(p, f)): yield f
[ "def", "get_programs", "(", ")", ":", "programs", "=", "[", "]", "os", ".", "environ", "[", "'PATH'", "]", "+=", "os", ".", "pathsep", "+", "os", ".", "getcwd", "(", ")", "for", "p", "in", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "if", "path", ".", "isdir", "(", "p", ")", ":", "for", "f", "in", "os", ".", "listdir", "(", "p", ")", ":", "if", "_is_executable", "(", "path", ".", "join", "(", "p", ",", "f", ")", ")", ":", "yield", "f" ]
Returns a generator that yields the available executable programs :returns: a generator that yields the programs available after a refresh_listing() :rtype: generator
[ "Returns", "a", "generator", "that", "yields", "the", "available", "executable", "programs" ]
python
train
35.384615
saltstack/salt
salt/utils/path.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/path.py#L41-L73
def islink(path): ''' Equivalent to os.path.islink() ''' if six.PY3 or not salt.utils.platform.is_windows(): return os.path.islink(path) if not HAS_WIN32FILE: log.error('Cannot check if %s is a link, missing required modules', path) if not _is_reparse_point(path): return False # check that it is a symlink reparse point (in case it is something else, # like a mount point) reparse_data = _get_reparse_data(path) # sanity check - this should not happen if not reparse_data: # not a reparse point return False # REPARSE_DATA_BUFFER structure - see # http://msdn.microsoft.com/en-us/library/ff552012.aspx # parse the structure header to work out which type of reparse point this is header_parser = struct.Struct('L') ReparseTag, = header_parser.unpack(reparse_data[:header_parser.size]) # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx if not ReparseTag & 0xA000FFFF == 0xA000000C: return False else: return True
[ "def", "islink", "(", "path", ")", ":", "if", "six", ".", "PY3", "or", "not", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "return", "os", ".", "path", ".", "islink", "(", "path", ")", "if", "not", "HAS_WIN32FILE", ":", "log", ".", "error", "(", "'Cannot check if %s is a link, missing required modules'", ",", "path", ")", "if", "not", "_is_reparse_point", "(", "path", ")", ":", "return", "False", "# check that it is a symlink reparse point (in case it is something else,", "# like a mount point)", "reparse_data", "=", "_get_reparse_data", "(", "path", ")", "# sanity check - this should not happen", "if", "not", "reparse_data", ":", "# not a reparse point", "return", "False", "# REPARSE_DATA_BUFFER structure - see", "# http://msdn.microsoft.com/en-us/library/ff552012.aspx", "# parse the structure header to work out which type of reparse point this is", "header_parser", "=", "struct", ".", "Struct", "(", "'L'", ")", "ReparseTag", ",", "=", "header_parser", ".", "unpack", "(", "reparse_data", "[", ":", "header_parser", ".", "size", "]", ")", "# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx", "if", "not", "ReparseTag", "&", "0xA000FFFF", "==", "0xA000000C", ":", "return", "False", "else", ":", "return", "True" ]
Equivalent to os.path.islink()
[ "Equivalent", "to", "os", ".", "path", ".", "islink", "()" ]
python
train
31.484848
cdgriffith/Reusables
reusables/cli.py
https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/cli.py#L204-L231
def cp(src, dst, overwrite=False): """ Copy files to a new location. :param src: list (or string) of paths of files to copy :param dst: file or folder to copy item(s) to :param overwrite: IF the file already exists, should I overwrite it? """ if not isinstance(src, list): src = [src] dst = os.path.expanduser(dst) dst_folder = os.path.isdir(dst) if len(src) > 1 and not dst_folder: raise OSError("Cannot copy multiple item to same file") for item in src: source = os.path.expanduser(item) destination = (dst if not dst_folder else os.path.join(dst, os.path.basename(source))) if not overwrite and os.path.exists(destination): _logger.warning("Not replacing {0} with {1}, overwrite not enabled" "".format(destination, source)) continue shutil.copy(source, destination)
[ "def", "cp", "(", "src", ",", "dst", ",", "overwrite", "=", "False", ")", ":", "if", "not", "isinstance", "(", "src", ",", "list", ")", ":", "src", "=", "[", "src", "]", "dst", "=", "os", ".", "path", ".", "expanduser", "(", "dst", ")", "dst_folder", "=", "os", ".", "path", ".", "isdir", "(", "dst", ")", "if", "len", "(", "src", ")", ">", "1", "and", "not", "dst_folder", ":", "raise", "OSError", "(", "\"Cannot copy multiple item to same file\"", ")", "for", "item", "in", "src", ":", "source", "=", "os", ".", "path", ".", "expanduser", "(", "item", ")", "destination", "=", "(", "dst", "if", "not", "dst_folder", "else", "os", ".", "path", ".", "join", "(", "dst", ",", "os", ".", "path", ".", "basename", "(", "source", ")", ")", ")", "if", "not", "overwrite", "and", "os", ".", "path", ".", "exists", "(", "destination", ")", ":", "_logger", ".", "warning", "(", "\"Not replacing {0} with {1}, overwrite not enabled\"", "\"\"", ".", "format", "(", "destination", ",", "source", ")", ")", "continue", "shutil", ".", "copy", "(", "source", ",", "destination", ")" ]
Copy files to a new location. :param src: list (or string) of paths of files to copy :param dst: file or folder to copy item(s) to :param overwrite: IF the file already exists, should I overwrite it?
[ "Copy", "files", "to", "a", "new", "location", "." ]
python
train
32.642857
5monkeys/django-enumfield
django_enumfield/enum.py
https://github.com/5monkeys/django-enumfield/blob/6cf20c0fba013d39960af0f4d2c9a3b399955eb3/django_enumfield/enum.py#L69-L77
def choices(cls, blank=False): """ Choices for Enum :return: List of tuples (<value>, <human-readable value>) :rtype: list """ choices = sorted([(key, value) for key, value in cls.values.items()], key=lambda x: x[0]) if blank: choices.insert(0, ('', Enum.Value('', None, '', cls))) return choices
[ "def", "choices", "(", "cls", ",", "blank", "=", "False", ")", ":", "choices", "=", "sorted", "(", "[", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "cls", ".", "values", ".", "items", "(", ")", "]", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "if", "blank", ":", "choices", ".", "insert", "(", "0", ",", "(", "''", ",", "Enum", ".", "Value", "(", "''", ",", "None", ",", "''", ",", "cls", ")", ")", ")", "return", "choices" ]
Choices for Enum :return: List of tuples (<value>, <human-readable value>) :rtype: list
[ "Choices", "for", "Enum", ":", "return", ":", "List", "of", "tuples", "(", "<value", ">", "<human", "-", "readable", "value", ">", ")", ":", "rtype", ":", "list" ]
python
train
39.555556
swilson/aqualogic
aqualogic/core.py
https://github.com/swilson/aqualogic/blob/b6e904363efc4f64c70aae127d040079587ecbc6/aqualogic/core.py#L392-L402
def states(self): """Returns a set containing the enabled states.""" state_list = [] for state in States: if state.value & self._states != 0: state_list.append(state) if (self._flashing_states & States.FILTER) != 0: state_list.append(States.FILTER_LOW_SPEED) return state_list
[ "def", "states", "(", "self", ")", ":", "state_list", "=", "[", "]", "for", "state", "in", "States", ":", "if", "state", ".", "value", "&", "self", ".", "_states", "!=", "0", ":", "state_list", ".", "append", "(", "state", ")", "if", "(", "self", ".", "_flashing_states", "&", "States", ".", "FILTER", ")", "!=", "0", ":", "state_list", ".", "append", "(", "States", ".", "FILTER_LOW_SPEED", ")", "return", "state_list" ]
Returns a set containing the enabled states.
[ "Returns", "a", "set", "containing", "the", "enabled", "states", "." ]
python
train
31.636364
fracpete/python-weka-wrapper
python/weka/core/tokenizers.py
https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/tokenizers.py#L42-L52
def next(self): """ Reads the next dataset row. :return: the next row :rtype: Instance """ if not self.__has_more(): raise StopIteration() else: return javabridge.get_env().get_string(self.__next())
[ "def", "next", "(", "self", ")", ":", "if", "not", "self", ".", "__has_more", "(", ")", ":", "raise", "StopIteration", "(", ")", "else", ":", "return", "javabridge", ".", "get_env", "(", ")", ".", "get_string", "(", "self", ".", "__next", "(", ")", ")" ]
Reads the next dataset row. :return: the next row :rtype: Instance
[ "Reads", "the", "next", "dataset", "row", "." ]
python
train
24.454545
anthill/koala
koala/excellib.py
https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/excellib.py#L970-L995
def xirr(values, dates, guess=0): """ Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the excel function XIRR(). Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d :param values: the payments of which at least one has to be negative. :param dates: the dates as excel dates (e.g. 43571 for 16/04/2019). :param guess: an initial guess which is required by Excel but isn't used by this function. :return: a float being the IRR. """ if isinstance(values, Range): values = values.values if isinstance(dates, Range): dates = dates.values if guess is not None and guess != 0: raise ValueError('guess value for excellib.irr() is %s and not 0' % guess) else: try: return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0) except RuntimeError: # Failed to converge? return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10)
[ "def", "xirr", "(", "values", ",", "dates", ",", "guess", "=", "0", ")", ":", "if", "isinstance", "(", "values", ",", "Range", ")", ":", "values", "=", "values", ".", "values", "if", "isinstance", "(", "dates", ",", "Range", ")", ":", "dates", "=", "dates", ".", "values", "if", "guess", "is", "not", "None", "and", "guess", "!=", "0", ":", "raise", "ValueError", "(", "'guess value for excellib.irr() is %s and not 0'", "%", "guess", ")", "else", ":", "try", ":", "return", "scipy", ".", "optimize", ".", "newton", "(", "lambda", "r", ":", "xnpv", "(", "r", ",", "values", ",", "dates", ",", "lim_rate", "=", "False", ")", ",", "0.0", ")", "except", "RuntimeError", ":", "# Failed to converge?", "return", "scipy", ".", "optimize", ".", "brentq", "(", "lambda", "r", ":", "xnpv", "(", "r", ",", "values", ",", "dates", ",", "lim_rate", "=", "False", ")", ",", "-", "1.0", ",", "1e10", ")" ]
Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the excel function XIRR(). Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d :param values: the payments of which at least one has to be negative. :param dates: the dates as excel dates (e.g. 43571 for 16/04/2019). :param guess: an initial guess which is required by Excel but isn't used by this function. :return: a float being the IRR.
[ "Function", "to", "calculate", "the", "internal", "rate", "of", "return", "(", "IRR", ")", "using", "payments", "and", "non", "-", "periodic", "dates", ".", "It", "resembles", "the", "excel", "function", "XIRR", "()", "." ]
python
train
41.807692
pmorissette/ffn
ffn/core.py
https://github.com/pmorissette/ffn/blob/ef09f28b858b7ffcd2627ce6a4dc618183a6bc8a/ffn/core.py#L1058-L1070
def to_price_index(returns, start=100): """ Returns a price index given a series of returns. Args: * returns: Expects a return series * start (number): Starting level Assumes arithmetic returns. Formula is: cumprod (1+r) """ return (returns.replace(to_replace=np.nan, value=0) + 1).cumprod() * start
[ "def", "to_price_index", "(", "returns", ",", "start", "=", "100", ")", ":", "return", "(", "returns", ".", "replace", "(", "to_replace", "=", "np", ".", "nan", ",", "value", "=", "0", ")", "+", "1", ")", ".", "cumprod", "(", ")", "*", "start" ]
Returns a price index given a series of returns. Args: * returns: Expects a return series * start (number): Starting level Assumes arithmetic returns. Formula is: cumprod (1+r)
[ "Returns", "a", "price", "index", "given", "a", "series", "of", "returns", "." ]
python
train
25.692308
inveniosoftware-attic/invenio-comments
invenio_comments/api.py
https://github.com/inveniosoftware-attic/invenio-comments/blob/62bb6e07c146baf75bf8de80b5896ab2a01a8423/invenio_comments/api.py#L1217-L1238
def get_user_subscription_to_discussion(recID, uid): """ Returns the type of subscription for the given user to this discussion. This does not check authorizations (for eg. if user was subscribed, but is suddenly no longer authorized). :param recID: record ID :param uid: user id :return: - 0 if user is not subscribed to discussion - 1 if user is subscribed, and is allowed to unsubscribe - 2 if user is subscribed, but cannot unsubscribe """ user_email = User.query.get(uid).email (emails1, emails2) = get_users_subscribed_to_discussion( recID, check_authorizations=False) if user_email in emails1: return 1 elif user_email in emails2: return 2 else: return 0
[ "def", "get_user_subscription_to_discussion", "(", "recID", ",", "uid", ")", ":", "user_email", "=", "User", ".", "query", ".", "get", "(", "uid", ")", ".", "email", "(", "emails1", ",", "emails2", ")", "=", "get_users_subscribed_to_discussion", "(", "recID", ",", "check_authorizations", "=", "False", ")", "if", "user_email", "in", "emails1", ":", "return", "1", "elif", "user_email", "in", "emails2", ":", "return", "2", "else", ":", "return", "0" ]
Returns the type of subscription for the given user to this discussion. This does not check authorizations (for eg. if user was subscribed, but is suddenly no longer authorized). :param recID: record ID :param uid: user id :return: - 0 if user is not subscribed to discussion - 1 if user is subscribed, and is allowed to unsubscribe - 2 if user is subscribed, but cannot unsubscribe
[ "Returns", "the", "type", "of", "subscription", "for", "the", "given", "user", "to", "this", "discussion", ".", "This", "does", "not", "check", "authorizations", "(", "for", "eg", ".", "if", "user", "was", "subscribed", "but", "is", "suddenly", "no", "longer", "authorized", ")", "." ]
python
train
35
inveniosoftware/invenio-admin
invenio_admin/filters.py
https://github.com/inveniosoftware/invenio-admin/blob/b5ff8f7de66d1d6b67efc9f81ff094eb2428f969/invenio_admin/filters.py#L43-L45
def conv_uuid(self, column, name, **kwargs): """Convert UUID filter.""" return [f(column, name, **kwargs) for f in self.uuid_filters]
[ "def", "conv_uuid", "(", "self", ",", "column", ",", "name", ",", "*", "*", "kwargs", ")", ":", "return", "[", "f", "(", "column", ",", "name", ",", "*", "*", "kwargs", ")", "for", "f", "in", "self", ".", "uuid_filters", "]" ]
Convert UUID filter.
[ "Convert", "UUID", "filter", "." ]
python
train
49
jonbretman/jinja-to-js
jinja_to_js/__init__.py
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L270-L283
def _get_depencency_var_name(self, dependency): """ Returns the variable name assigned to the given dependency or None if the dependency has not yet been registered. Args: dependency (str): Thet dependency that needs to be imported. Returns: str or None """ for dep_path, var_name in self.dependencies: if dep_path == dependency: return var_name
[ "def", "_get_depencency_var_name", "(", "self", ",", "dependency", ")", ":", "for", "dep_path", ",", "var_name", "in", "self", ".", "dependencies", ":", "if", "dep_path", "==", "dependency", ":", "return", "var_name" ]
Returns the variable name assigned to the given dependency or None if the dependency has not yet been registered. Args: dependency (str): Thet dependency that needs to be imported. Returns: str or None
[ "Returns", "the", "variable", "name", "assigned", "to", "the", "given", "dependency", "or", "None", "if", "the", "dependency", "has", "not", "yet", "been", "registered", "." ]
python
train
31.571429
gabstopper/smc-python
smc/base/collection.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/base/collection.py#L116-L126
def get(self, index): """ Get the element by index. If index is out of bounds for the internal list, None is returned. Indexes cannot be negative. :param int index: retrieve element by positive index in list :rtype: SubElement or None """ if self and (index <= len(self) -1): return self._result_cache[index]
[ "def", "get", "(", "self", ",", "index", ")", ":", "if", "self", "and", "(", "index", "<=", "len", "(", "self", ")", "-", "1", ")", ":", "return", "self", ".", "_result_cache", "[", "index", "]" ]
Get the element by index. If index is out of bounds for the internal list, None is returned. Indexes cannot be negative. :param int index: retrieve element by positive index in list :rtype: SubElement or None
[ "Get", "the", "element", "by", "index", ".", "If", "index", "is", "out", "of", "bounds", "for", "the", "internal", "list", "None", "is", "returned", ".", "Indexes", "cannot", "be", "negative", ".", ":", "param", "int", "index", ":", "retrieve", "element", "by", "positive", "index", "in", "list", ":", "rtype", ":", "SubElement", "or", "None" ]
python
train
34.818182
mattjj/pyslds
pyslds/states.py
https://github.com/mattjj/pyslds/blob/c505c2bd05a5549d450b518f02493b68ed12e590/pyslds/states.py#L853-L874
def heldout_log_likelihood(self, test_mask=None): """ Compute the log likelihood of the masked data given the latent discrete and continuous states. """ if test_mask is None: # If a test mask is not supplied, use the negation of this object's mask if self.mask is None: return 0 else: test_mask = ~self.mask xs = np.hstack((self.gaussian_states, self.inputs)) if self.single_emission: return self.emission_distns[0].\ log_likelihood((xs, self.data), mask=test_mask).sum() else: hll = 0 z = self.stateseq for idx, ed in enumerate(self.emission_distns): hll += ed.log_likelihood((xs[z == idx], self.data[z == idx]), mask=test_mask[z == idx]).sum()
[ "def", "heldout_log_likelihood", "(", "self", ",", "test_mask", "=", "None", ")", ":", "if", "test_mask", "is", "None", ":", "# If a test mask is not supplied, use the negation of this object's mask", "if", "self", ".", "mask", "is", "None", ":", "return", "0", "else", ":", "test_mask", "=", "~", "self", ".", "mask", "xs", "=", "np", ".", "hstack", "(", "(", "self", ".", "gaussian_states", ",", "self", ".", "inputs", ")", ")", "if", "self", ".", "single_emission", ":", "return", "self", ".", "emission_distns", "[", "0", "]", ".", "log_likelihood", "(", "(", "xs", ",", "self", ".", "data", ")", ",", "mask", "=", "test_mask", ")", ".", "sum", "(", ")", "else", ":", "hll", "=", "0", "z", "=", "self", ".", "stateseq", "for", "idx", ",", "ed", "in", "enumerate", "(", "self", ".", "emission_distns", ")", ":", "hll", "+=", "ed", ".", "log_likelihood", "(", "(", "xs", "[", "z", "==", "idx", "]", ",", "self", ".", "data", "[", "z", "==", "idx", "]", ")", ",", "mask", "=", "test_mask", "[", "z", "==", "idx", "]", ")", ".", "sum", "(", ")" ]
Compute the log likelihood of the masked data given the latent discrete and continuous states.
[ "Compute", "the", "log", "likelihood", "of", "the", "masked", "data", "given", "the", "latent", "discrete", "and", "continuous", "states", "." ]
python
train
39.909091
gem/oq-engine
openquake/calculators/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/views.py#L51-L91
def form(value): """ Format numbers in a nice way. >>> form(0) '0' >>> form(0.0) '0.0' >>> form(0.0001) '1.000E-04' >>> form(1003.4) '1,003' >>> form(103.4) '103' >>> form(9.3) '9.30000' >>> form(-1.2) '-1.2' """ if isinstance(value, FLOAT + INT): if value <= 0: return str(value) elif value < .001: return '%.3E' % value elif value < 10 and isinstance(value, FLOAT): return '%.5f' % value elif value > 1000: return '{:,d}'.format(int(round(value))) elif numpy.isnan(value): return 'NaN' else: # in the range 10-1000 return str(int(value)) elif isinstance(value, bytes): return decode(value) elif isinstance(value, str): return value elif isinstance(value, numpy.object_): return str(value) elif hasattr(value, '__len__') and len(value) > 1: return ' '.join(map(form, value)) return str(value)
[ "def", "form", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "FLOAT", "+", "INT", ")", ":", "if", "value", "<=", "0", ":", "return", "str", "(", "value", ")", "elif", "value", "<", ".001", ":", "return", "'%.3E'", "%", "value", "elif", "value", "<", "10", "and", "isinstance", "(", "value", ",", "FLOAT", ")", ":", "return", "'%.5f'", "%", "value", "elif", "value", ">", "1000", ":", "return", "'{:,d}'", ".", "format", "(", "int", "(", "round", "(", "value", ")", ")", ")", "elif", "numpy", ".", "isnan", "(", "value", ")", ":", "return", "'NaN'", "else", ":", "# in the range 10-1000", "return", "str", "(", "int", "(", "value", ")", ")", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "return", "decode", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "numpy", ".", "object_", ")", ":", "return", "str", "(", "value", ")", "elif", "hasattr", "(", "value", ",", "'__len__'", ")", "and", "len", "(", "value", ")", ">", "1", ":", "return", "' '", ".", "join", "(", "map", "(", "form", ",", "value", ")", ")", "return", "str", "(", "value", ")" ]
Format numbers in a nice way. >>> form(0) '0' >>> form(0.0) '0.0' >>> form(0.0001) '1.000E-04' >>> form(1003.4) '1,003' >>> form(103.4) '103' >>> form(9.3) '9.30000' >>> form(-1.2) '-1.2'
[ "Format", "numbers", "in", "a", "nice", "way", "." ]
python
train
24.365854
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py#L37-L49
def police_priority_map_conform_map_pri1_conform(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop('name') conform = ET.SubElement(police_priority_map, "conform") map_pri1_conform = ET.SubElement(conform, "map-pri1-conform") map_pri1_conform.text = kwargs.pop('map_pri1_conform') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "police_priority_map_conform_map_pri1_conform", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "police_priority_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"police-priority-map\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-policer\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "police_priority_map", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "conform", "=", "ET", ".", "SubElement", "(", "police_priority_map", ",", "\"conform\"", ")", "map_pri1_conform", "=", "ET", ".", "SubElement", "(", "conform", ",", "\"map-pri1-conform\"", ")", "map_pri1_conform", ".", "text", "=", "kwargs", ".", "pop", "(", "'map_pri1_conform'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
49.846154
frigg/frigg-worker
frigg_worker/deployments.py
https://github.com/frigg/frigg-worker/blob/8c215cd8f5a27ff9f5a4fedafe93d2ef0fbca86c/frigg_worker/deployments.py#L86-L96
def load_preset(self): """ Loads preset if it is specified in the .frigg.yml """ if 'preset' in self.settings.preview: with open(os.path.join(os.path.dirname(__file__), 'presets.yaml')) as f: presets = yaml.load(f.read()) if self.settings.preview['preset'] in presets: self.preset = presets[self.settings.preview['preset']] return self.preset
[ "def", "load_preset", "(", "self", ")", ":", "if", "'preset'", "in", "self", ".", "settings", ".", "preview", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'presets.yaml'", ")", ")", "as", "f", ":", "presets", "=", "yaml", ".", "load", "(", "f", ".", "read", "(", ")", ")", "if", "self", ".", "settings", ".", "preview", "[", "'preset'", "]", "in", "presets", ":", "self", ".", "preset", "=", "presets", "[", "self", ".", "settings", ".", "preview", "[", "'preset'", "]", "]", "return", "self", ".", "preset" ]
Loads preset if it is specified in the .frigg.yml
[ "Loads", "preset", "if", "it", "is", "specified", "in", "the", ".", "frigg", ".", "yml" ]
python
train
39.363636
saltstack/salt
salt/utils/win_update.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_update.py#L169-L242
def summary(self): ''' Create a dictionary with a summary of the updates in the collection. Returns: dict: Summary of the contents of the collection .. code-block:: cfg Summary of Updates: {'Total': <total number of updates returned>, 'Available': <updates that are not downloaded or installed>, 'Downloaded': <updates that are downloaded but not installed>, 'Installed': <updates installed (usually 0 unless installed=True)>, 'Categories': { <category 1>: <total for that category>, <category 2>: <total for category 2>, ... } } Code Example: .. code-block:: python import salt.utils.win_update updates = salt.utils.win_update.Updates() updates.summary() ''' # https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx if self.count() == 0: return 'Nothing to return' # Build a dictionary containing a summary of updates available results = {'Total': 0, 'Available': 0, 'Downloaded': 0, 'Installed': 0, 'Categories': {}, 'Severity': {}} for update in self.updates: # Count the total number of updates available results['Total'] += 1 # Updates available for download if not salt.utils.data.is_true(update.IsDownloaded) \ and not salt.utils.data.is_true(update.IsInstalled): results['Available'] += 1 # Updates downloaded awaiting install if salt.utils.data.is_true(update.IsDownloaded) \ and not salt.utils.data.is_true(update.IsInstalled): results['Downloaded'] += 1 # Updates installed if salt.utils.data.is_true(update.IsInstalled): results['Installed'] += 1 # Add Categories and increment total for each one # The sum will be more than the total because each update can have # multiple categories for category in update.Categories: if category.Name in results['Categories']: results['Categories'][category.Name] += 1 else: results['Categories'][category.Name] = 1 # Add Severity Summary if update.MsrcSeverity: if update.MsrcSeverity in results['Severity']: results['Severity'][update.MsrcSeverity] += 1 else: results['Severity'][update.MsrcSeverity] = 1 return results
[ "def", "summary", "(", "self", ")", ":", "# https://msdn.microsoft.com/en-us/library/windows/desktop/aa386099(v=vs.85).aspx", "if", "self", ".", "count", "(", ")", "==", "0", ":", "return", "'Nothing to return'", "# Build a dictionary containing a summary of updates available", "results", "=", "{", "'Total'", ":", "0", ",", "'Available'", ":", "0", ",", "'Downloaded'", ":", "0", ",", "'Installed'", ":", "0", ",", "'Categories'", ":", "{", "}", ",", "'Severity'", ":", "{", "}", "}", "for", "update", "in", "self", ".", "updates", ":", "# Count the total number of updates available", "results", "[", "'Total'", "]", "+=", "1", "# Updates available for download", "if", "not", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "update", ".", "IsDownloaded", ")", "and", "not", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "update", ".", "IsInstalled", ")", ":", "results", "[", "'Available'", "]", "+=", "1", "# Updates downloaded awaiting install", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "update", ".", "IsDownloaded", ")", "and", "not", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "update", ".", "IsInstalled", ")", ":", "results", "[", "'Downloaded'", "]", "+=", "1", "# Updates installed", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "update", ".", "IsInstalled", ")", ":", "results", "[", "'Installed'", "]", "+=", "1", "# Add Categories and increment total for each one", "# The sum will be more than the total because each update can have", "# multiple categories", "for", "category", "in", "update", ".", "Categories", ":", "if", "category", ".", "Name", "in", "results", "[", "'Categories'", "]", ":", "results", "[", "'Categories'", "]", "[", "category", ".", "Name", "]", "+=", "1", "else", ":", "results", "[", "'Categories'", "]", "[", "category", ".", "Name", "]", "=", "1", "# Add Severity Summary", "if", "update", ".", "MsrcSeverity", ":", "if", "update", ".", "MsrcSeverity", "in", "results", "[", "'Severity'", "]", ":", "results", "[", "'Severity'", "]", "[", "update", ".", "MsrcSeverity", "]", "+=", "1", "else", ":", "results", "[", "'Severity'", "]", "[", "update", ".", "MsrcSeverity", "]", "=", "1", "return", "results" ]
Create a dictionary with a summary of the updates in the collection. Returns: dict: Summary of the contents of the collection .. code-block:: cfg Summary of Updates: {'Total': <total number of updates returned>, 'Available': <updates that are not downloaded or installed>, 'Downloaded': <updates that are downloaded but not installed>, 'Installed': <updates installed (usually 0 unless installed=True)>, 'Categories': { <category 1>: <total for that category>, <category 2>: <total for category 2>, ... } } Code Example: .. code-block:: python import salt.utils.win_update updates = salt.utils.win_update.Updates() updates.summary()
[ "Create", "a", "dictionary", "with", "a", "summary", "of", "the", "updates", "in", "the", "collection", "." ]
python
train
37.040541
dls-controls/pymalcolm
malcolm/core/notifier.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/notifier.py#L217-L246
def handle_subscribe(self, request, path): # type: (Subscribe, List[str]) -> CallbackResponses """Add to the list of request to notify, and notify the initial value of the data held Args: request (Subscribe): The subscribe request path (list): The relative path from ourself Returns: list: [(callback, Response)] that need to be called """ ret = [] if path: # Recurse down name = path[0] if name not in self.children: self.children[name] = NotifierNode( getattr(self.data, name, None), self) ret += self.children[name].handle_subscribe(request, path[1:]) else: # This is for us serialized = serialize_object(self.data) if request.delta: self.delta_requests.append(request) ret.append(request.delta_response([[[], serialized]])) else: self.update_requests.append(request) ret.append(request.update_response(serialized)) return ret
[ "def", "handle_subscribe", "(", "self", ",", "request", ",", "path", ")", ":", "# type: (Subscribe, List[str]) -> CallbackResponses", "ret", "=", "[", "]", "if", "path", ":", "# Recurse down", "name", "=", "path", "[", "0", "]", "if", "name", "not", "in", "self", ".", "children", ":", "self", ".", "children", "[", "name", "]", "=", "NotifierNode", "(", "getattr", "(", "self", ".", "data", ",", "name", ",", "None", ")", ",", "self", ")", "ret", "+=", "self", ".", "children", "[", "name", "]", ".", "handle_subscribe", "(", "request", ",", "path", "[", "1", ":", "]", ")", "else", ":", "# This is for us", "serialized", "=", "serialize_object", "(", "self", ".", "data", ")", "if", "request", ".", "delta", ":", "self", ".", "delta_requests", ".", "append", "(", "request", ")", "ret", ".", "append", "(", "request", ".", "delta_response", "(", "[", "[", "[", "]", ",", "serialized", "]", "]", ")", ")", "else", ":", "self", ".", "update_requests", ".", "append", "(", "request", ")", "ret", ".", "append", "(", "request", ".", "update_response", "(", "serialized", ")", ")", "return", "ret" ]
Add to the list of request to notify, and notify the initial value of the data held Args: request (Subscribe): The subscribe request path (list): The relative path from ourself Returns: list: [(callback, Response)] that need to be called
[ "Add", "to", "the", "list", "of", "request", "to", "notify", "and", "notify", "the", "initial", "value", "of", "the", "data", "held" ]
python
train
37.133333
squaresLab/BugZoo
bugzoo/client/dockerm.py
https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/client/dockerm.py#L18-L28
def has_image(self, name: str) -> bool: """ Determines whether the server has a Docker image with a given name. """ path = "docker/images/{}".format(name) r = self.__api.head(path) if r.status_code == 204: return True elif r.status_code == 404: return False self.__api.handle_erroneous_response(r)
[ "def", "has_image", "(", "self", ",", "name", ":", "str", ")", "->", "bool", ":", "path", "=", "\"docker/images/{}\"", ".", "format", "(", "name", ")", "r", "=", "self", ".", "__api", ".", "head", "(", "path", ")", "if", "r", ".", "status_code", "==", "204", ":", "return", "True", "elif", "r", ".", "status_code", "==", "404", ":", "return", "False", "self", ".", "__api", ".", "handle_erroneous_response", "(", "r", ")" ]
Determines whether the server has a Docker image with a given name.
[ "Determines", "whether", "the", "server", "has", "a", "Docker", "image", "with", "a", "given", "name", "." ]
python
train
34.090909
log2timeline/dfvfs
dfvfs/file_io/raw_file_io.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/raw_file_io.py#L33-L41
def _Close(self): """Closes the file-like object.""" # pylint: disable=protected-access super(RawFile, self)._Close() for file_object in self._file_objects: file_object.close() self._file_objects = []
[ "def", "_Close", "(", "self", ")", ":", "# pylint: disable=protected-access", "super", "(", "RawFile", ",", "self", ")", ".", "_Close", "(", ")", "for", "file_object", "in", "self", ".", "_file_objects", ":", "file_object", ".", "close", "(", ")", "self", ".", "_file_objects", "=", "[", "]" ]
Closes the file-like object.
[ "Closes", "the", "file", "-", "like", "object", "." ]
python
train
24.444444
notifiers/notifiers
notifiers/utils/schema/formats.py
https://github.com/notifiers/notifiers/blob/6dd8aafff86935dbb4763db9c56f9cdd7fc08b65/notifiers/utils/schema/formats.py#L57-L61
def is_valid_port(instance: int): """Validates data is a valid port""" if not isinstance(instance, (int, str)): return True return int(instance) in range(65535)
[ "def", "is_valid_port", "(", "instance", ":", "int", ")", ":", "if", "not", "isinstance", "(", "instance", ",", "(", "int", ",", "str", ")", ")", ":", "return", "True", "return", "int", "(", "instance", ")", "in", "range", "(", "65535", ")" ]
Validates data is a valid port
[ "Validates", "data", "is", "a", "valid", "port" ]
python
train
35.2
aio-libs/aiohttp-devtools
aiohttp_devtools/start/template/app/settings.py
https://github.com/aio-libs/aiohttp-devtools/blob/e9ea6feb43558e6e64595ea0ea5613f226cba81f/aiohttp_devtools/start/template/app/settings.py#L48-L76
def substitute_environ(self): """ Substitute environment variables into settings. """ for attr_name in dir(self): if attr_name.startswith('_') or attr_name.upper() != attr_name: continue orig_value = getattr(self, attr_name) is_required = isinstance(orig_value, Required) orig_type = orig_value.v_type if is_required else type(orig_value) env_var_name = self._ENV_PREFIX + attr_name env_var = os.getenv(env_var_name, None) if env_var is not None: if issubclass(orig_type, bool): env_var = env_var.upper() in ('1', 'TRUE') elif issubclass(orig_type, int): env_var = int(env_var) elif issubclass(orig_type, Path): env_var = Path(env_var) elif issubclass(orig_type, bytes): env_var = env_var.encode() # could do floats here and lists etc via json setattr(self, attr_name, env_var) elif is_required and attr_name not in self._custom_settings: raise RuntimeError('The required environment variable "{0}" is currently not set, ' 'you\'ll need to run `source activate.settings.sh` ' 'or you can set that single environment variable with ' '`export {0}="<value>"`'.format(env_var_name))
[ "def", "substitute_environ", "(", "self", ")", ":", "for", "attr_name", "in", "dir", "(", "self", ")", ":", "if", "attr_name", ".", "startswith", "(", "'_'", ")", "or", "attr_name", ".", "upper", "(", ")", "!=", "attr_name", ":", "continue", "orig_value", "=", "getattr", "(", "self", ",", "attr_name", ")", "is_required", "=", "isinstance", "(", "orig_value", ",", "Required", ")", "orig_type", "=", "orig_value", ".", "v_type", "if", "is_required", "else", "type", "(", "orig_value", ")", "env_var_name", "=", "self", ".", "_ENV_PREFIX", "+", "attr_name", "env_var", "=", "os", ".", "getenv", "(", "env_var_name", ",", "None", ")", "if", "env_var", "is", "not", "None", ":", "if", "issubclass", "(", "orig_type", ",", "bool", ")", ":", "env_var", "=", "env_var", ".", "upper", "(", ")", "in", "(", "'1'", ",", "'TRUE'", ")", "elif", "issubclass", "(", "orig_type", ",", "int", ")", ":", "env_var", "=", "int", "(", "env_var", ")", "elif", "issubclass", "(", "orig_type", ",", "Path", ")", ":", "env_var", "=", "Path", "(", "env_var", ")", "elif", "issubclass", "(", "orig_type", ",", "bytes", ")", ":", "env_var", "=", "env_var", ".", "encode", "(", ")", "# could do floats here and lists etc via json", "setattr", "(", "self", ",", "attr_name", ",", "env_var", ")", "elif", "is_required", "and", "attr_name", "not", "in", "self", ".", "_custom_settings", ":", "raise", "RuntimeError", "(", "'The required environment variable \"{0}\" is currently not set, '", "'you\\'ll need to run `source activate.settings.sh` '", "'or you can set that single environment variable with '", "'`export {0}=\"<value>\"`'", ".", "format", "(", "env_var_name", ")", ")" ]
Substitute environment variables into settings.
[ "Substitute", "environment", "variables", "into", "settings", "." ]
python
train
51.448276
calmjs/calmjs
src/calmjs/toolchain.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L412-L483
def toolchain_spec_prepare_loaderplugins( toolchain, spec, loaderplugin_read_key, handler_sourcepath_key, loaderplugin_sourcepath_map_key=LOADERPLUGIN_SOURCEPATH_MAPS): """ A standard helper function for combining the filtered (e.g. using ``spec_update_sourcepath_filter_loaderplugins``) loaderplugin sourcepath mappings back into one that is usable with the standard ``toolchain_spec_compile_entries`` function. Arguments: toolchain The toolchain spec The spec loaderplugin_read_key The read_key associated with the loaderplugin process as set up for the Toolchain that implemented this. If the toolchain has this in its compile_entries: ToolchainSpecCompileEntry('loaderplugin', 'plugsrc', 'plugsink') The loaderplugin_read_key it must use will be 'plugsrc'. handler_sourcepath_key All found handlers will have their handler_sourcepath method be invoked, and the combined results will be a dict stored in the spec under that key. loaderplugin_sourcepath_map_key It must be the same key to the value produced by ``spec_update_sourcepath_filter_loaderplugins`` """ # ensure the registry is applied to the spec registry = spec_update_loaderplugin_registry( spec, default=toolchain.loaderplugin_registry) # this one is named like so for the compile entry method plugin_sourcepath = dict_setget_dict( spec, loaderplugin_read_key + '_sourcepath') # the key is supplied by the toolchain that might make use of this if handler_sourcepath_key: handler_sourcepath = dict_setget_dict(spec, handler_sourcepath_key) else: # provide a null value for this. handler_sourcepath = {} for key, value in spec.get(loaderplugin_sourcepath_map_key, {}).items(): handler = registry.get(key) if handler: # assume handler will do the job. logger.debug("found handler for '%s' loader plugin", key) plugin_sourcepath.update(value) logger.debug( "plugin_sourcepath updated with %d keys", len(value)) # TODO figure out how to address the case where the actual # JavaScript module for the handling wasn't found. handler_sourcepath.update( handler.generate_handler_sourcepath(toolchain, spec, value)) else: logger.warning( "loaderplugin handler for '%s' not found in loaderplugin " "registry '%s'; as arguments associated with loader plugins " "are specific, processing is disabled for this group; the " "sources referenced by the following names will not be " "compiled into the build target: %s", key, registry.registry_name, sorted(value.keys()), )
[ "def", "toolchain_spec_prepare_loaderplugins", "(", "toolchain", ",", "spec", ",", "loaderplugin_read_key", ",", "handler_sourcepath_key", ",", "loaderplugin_sourcepath_map_key", "=", "LOADERPLUGIN_SOURCEPATH_MAPS", ")", ":", "# ensure the registry is applied to the spec", "registry", "=", "spec_update_loaderplugin_registry", "(", "spec", ",", "default", "=", "toolchain", ".", "loaderplugin_registry", ")", "# this one is named like so for the compile entry method", "plugin_sourcepath", "=", "dict_setget_dict", "(", "spec", ",", "loaderplugin_read_key", "+", "'_sourcepath'", ")", "# the key is supplied by the toolchain that might make use of this", "if", "handler_sourcepath_key", ":", "handler_sourcepath", "=", "dict_setget_dict", "(", "spec", ",", "handler_sourcepath_key", ")", "else", ":", "# provide a null value for this.", "handler_sourcepath", "=", "{", "}", "for", "key", ",", "value", "in", "spec", ".", "get", "(", "loaderplugin_sourcepath_map_key", ",", "{", "}", ")", ".", "items", "(", ")", ":", "handler", "=", "registry", ".", "get", "(", "key", ")", "if", "handler", ":", "# assume handler will do the job.", "logger", ".", "debug", "(", "\"found handler for '%s' loader plugin\"", ",", "key", ")", "plugin_sourcepath", ".", "update", "(", "value", ")", "logger", ".", "debug", "(", "\"plugin_sourcepath updated with %d keys\"", ",", "len", "(", "value", ")", ")", "# TODO figure out how to address the case where the actual", "# JavaScript module for the handling wasn't found.", "handler_sourcepath", ".", "update", "(", "handler", ".", "generate_handler_sourcepath", "(", "toolchain", ",", "spec", ",", "value", ")", ")", "else", ":", "logger", ".", "warning", "(", "\"loaderplugin handler for '%s' not found in loaderplugin \"", "\"registry '%s'; as arguments associated with loader plugins \"", "\"are specific, processing is disabled for this group; the \"", "\"sources referenced by the following names will not be \"", "\"compiled into the build target: %s\"", ",", "key", ",", "registry", ".", "registry_name", ",", "sorted", "(", "value", ".", "keys", "(", ")", ")", ",", ")" ]
A standard helper function for combining the filtered (e.g. using ``spec_update_sourcepath_filter_loaderplugins``) loaderplugin sourcepath mappings back into one that is usable with the standard ``toolchain_spec_compile_entries`` function. Arguments: toolchain The toolchain spec The spec loaderplugin_read_key The read_key associated with the loaderplugin process as set up for the Toolchain that implemented this. If the toolchain has this in its compile_entries: ToolchainSpecCompileEntry('loaderplugin', 'plugsrc', 'plugsink') The loaderplugin_read_key it must use will be 'plugsrc'. handler_sourcepath_key All found handlers will have their handler_sourcepath method be invoked, and the combined results will be a dict stored in the spec under that key. loaderplugin_sourcepath_map_key It must be the same key to the value produced by ``spec_update_sourcepath_filter_loaderplugins``
[ "A", "standard", "helper", "function", "for", "combining", "the", "filtered", "(", "e", ".", "g", ".", "using", "spec_update_sourcepath_filter_loaderplugins", ")", "loaderplugin", "sourcepath", "mappings", "back", "into", "one", "that", "is", "usable", "with", "the", "standard", "toolchain_spec_compile_entries", "function", "." ]
python
train
39.902778
gwpy/gwpy
gwpy/signal/qtransform.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/qtransform.py#L147-L155
def _iter_qs(self): """Iterate over the Q values """ # work out how many Qs we need cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.) nplanes = int(max(ceil(cumum / self.deltam), 1)) dq = cumum / nplanes # pylint: disable=invalid-name for i in xrange(nplanes): yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5))
[ "def", "_iter_qs", "(", "self", ")", ":", "# work out how many Qs we need", "cumum", "=", "log", "(", "self", ".", "qrange", "[", "1", "]", "/", "self", ".", "qrange", "[", "0", "]", ")", "/", "2", "**", "(", "1", "/", "2.", ")", "nplanes", "=", "int", "(", "max", "(", "ceil", "(", "cumum", "/", "self", ".", "deltam", ")", ",", "1", ")", ")", "dq", "=", "cumum", "/", "nplanes", "# pylint: disable=invalid-name", "for", "i", "in", "xrange", "(", "nplanes", ")", ":", "yield", "self", ".", "qrange", "[", "0", "]", "*", "exp", "(", "2", "**", "(", "1", "/", "2.", ")", "*", "dq", "*", "(", "i", "+", ".5", ")", ")" ]
Iterate over the Q values
[ "Iterate", "over", "the", "Q", "values" ]
python
train
42.444444
thespacedoctor/rockAtlas
rockAtlas/positions/orbfitPositions.py
https://github.com/thespacedoctor/rockAtlas/blob/062ecaa95ab547efda535aa33165944f13c621de/rockAtlas/positions/orbfitPositions.py#L84-L119
def get(self, singleExposure=False): """ *get the orbfitPositions object* **Key Arguments:** - ``singleExposure`` -- only execute fot a single exposure (useful for debugging) **Return:** - None **Usage:** See class docstring """ self.log.info('starting the ``get`` method') if singleExposure: batchSize = 1 else: batchSize = int(self.settings["orbfit"]["batch size"]) exposureCount = 1 while exposureCount > 0: expsoureObjects, astorbString, exposureCount = self._get_exposures_requiring_orbfit_positions( batchSize=batchSize) if exposureCount: orbfitPositions = self._get_orbfit_positions( expsoureObjects, astorbString) self._add_orbfit_eph_to_database( orbfitPositions, expsoureObjects) if singleExposure: exposureCount = 0 self.log.info('completed the ``get`` method') return None
[ "def", "get", "(", "self", ",", "singleExposure", "=", "False", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``get`` method'", ")", "if", "singleExposure", ":", "batchSize", "=", "1", "else", ":", "batchSize", "=", "int", "(", "self", ".", "settings", "[", "\"orbfit\"", "]", "[", "\"batch size\"", "]", ")", "exposureCount", "=", "1", "while", "exposureCount", ">", "0", ":", "expsoureObjects", ",", "astorbString", ",", "exposureCount", "=", "self", ".", "_get_exposures_requiring_orbfit_positions", "(", "batchSize", "=", "batchSize", ")", "if", "exposureCount", ":", "orbfitPositions", "=", "self", ".", "_get_orbfit_positions", "(", "expsoureObjects", ",", "astorbString", ")", "self", ".", "_add_orbfit_eph_to_database", "(", "orbfitPositions", ",", "expsoureObjects", ")", "if", "singleExposure", ":", "exposureCount", "=", "0", "self", ".", "log", ".", "info", "(", "'completed the ``get`` method'", ")", "return", "None" ]
*get the orbfitPositions object* **Key Arguments:** - ``singleExposure`` -- only execute fot a single exposure (useful for debugging) **Return:** - None **Usage:** See class docstring
[ "*", "get", "the", "orbfitPositions", "object", "*" ]
python
train
29.777778
flatangle/flatlib
flatlib/ephem/eph.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/ephem/eph.py#L68-L72
def getFixedStar(ID, jd): """ Returns a fixed star. """ star = swe.sweFixedStar(ID, jd) _signInfo(star) return star
[ "def", "getFixedStar", "(", "ID", ",", "jd", ")", ":", "star", "=", "swe", ".", "sweFixedStar", "(", "ID", ",", "jd", ")", "_signInfo", "(", "star", ")", "return", "star" ]
Returns a fixed star.
[ "Returns", "a", "fixed", "star", "." ]
python
train
25.4
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1011-L1023
def _calc_lm_step(self, damped_JTJ, grad, subblock=None): """Calculates a Levenberg-Marquard step w/o acceleration""" delta0, res, rank, s = np.linalg.lstsq(damped_JTJ, -0.5*grad, rcond=self.min_eigval) if self._fresh_JTJ: CLOG.debug('%d degenerate of %d total directions' % ( delta0.size-rank, delta0.size)) if subblock is not None: delta = np.zeros(self.J.shape[0]) delta[subblock] = delta0 else: delta = delta0.copy() return delta
[ "def", "_calc_lm_step", "(", "self", ",", "damped_JTJ", ",", "grad", ",", "subblock", "=", "None", ")", ":", "delta0", ",", "res", ",", "rank", ",", "s", "=", "np", ".", "linalg", ".", "lstsq", "(", "damped_JTJ", ",", "-", "0.5", "*", "grad", ",", "rcond", "=", "self", ".", "min_eigval", ")", "if", "self", ".", "_fresh_JTJ", ":", "CLOG", ".", "debug", "(", "'%d degenerate of %d total directions'", "%", "(", "delta0", ".", "size", "-", "rank", ",", "delta0", ".", "size", ")", ")", "if", "subblock", "is", "not", "None", ":", "delta", "=", "np", ".", "zeros", "(", "self", ".", "J", ".", "shape", "[", "0", "]", ")", "delta", "[", "subblock", "]", "=", "delta0", "else", ":", "delta", "=", "delta0", ".", "copy", "(", ")", "return", "delta" ]
Calculates a Levenberg-Marquard step w/o acceleration
[ "Calculates", "a", "Levenberg", "-", "Marquard", "step", "w", "/", "o", "acceleration" ]
python
valid
42.538462
yfpeng/bioc
bioc/biocjson/decoder.py
https://github.com/yfpeng/bioc/blob/47ddaa010960d9ba673aefe068e7bbaf39f0fff4/bioc/biocjson/decoder.py#L37-L44
def parse_relation(obj: dict) -> BioCRelation: """Deserialize a dict obj to a BioCRelation object""" rel = BioCRelation() rel.id = obj['id'] rel.infons = obj['infons'] for node in obj['nodes']: rel.add_node(BioCNode(node['refid'], node['role'])) return rel
[ "def", "parse_relation", "(", "obj", ":", "dict", ")", "->", "BioCRelation", ":", "rel", "=", "BioCRelation", "(", ")", "rel", ".", "id", "=", "obj", "[", "'id'", "]", "rel", ".", "infons", "=", "obj", "[", "'infons'", "]", "for", "node", "in", "obj", "[", "'nodes'", "]", ":", "rel", ".", "add_node", "(", "BioCNode", "(", "node", "[", "'refid'", "]", ",", "node", "[", "'role'", "]", ")", ")", "return", "rel" ]
Deserialize a dict obj to a BioCRelation object
[ "Deserialize", "a", "dict", "obj", "to", "a", "BioCRelation", "object" ]
python
train
36
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/buildtasks_api.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/buildtasks_api.py#L269-L293
def cancel_bbuild(self, build_execution_configuration_id, **kwargs): """ Cancel the build execution defined with given executionConfigurationId. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.cancel_bbuild(build_execution_configuration_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int build_execution_configuration_id: Build Execution Configuration ID. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs) else: (data) = self.cancel_bbuild_with_http_info(build_execution_configuration_id, **kwargs) return data
[ "def", "cancel_bbuild", "(", "self", ",", "build_execution_configuration_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "cancel_bbuild_with_http_info", "(", "build_execution_configuration_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "cancel_bbuild_with_http_info", "(", "build_execution_configuration_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Cancel the build execution defined with given executionConfigurationId. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.cancel_bbuild(build_execution_configuration_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int build_execution_configuration_id: Build Execution Configuration ID. See org.jboss.pnc.spi.executor.BuildExecutionConfiguration. (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Cancel", "the", "build", "execution", "defined", "with", "given", "executionConfigurationId", ".", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "cancel_bbuild", "(", "build_execution_configuration_id", "callback", "=", "callback_function", ")" ]
python
train
50.04
andrenarchy/krypy
krypy/utils.py
https://github.com/andrenarchy/krypy/blob/4883ec9a61d64ea56489e15c35cc40f0633ab2f1/krypy/utils.py#L533-L558
def apply(self, a, return_Ya=False): r"""Apply the projection to an array. The computation is carried out without explicitly forming the matrix corresponding to the projection (which would be an array with ``shape==(N,N)``). See also :py:meth:`_apply`. """ # is projection the zero operator? if self.V.shape[1] == 0: Pa = numpy.zeros(a.shape) if return_Ya: return Pa, numpy.zeros((0, a.shape[1])) return Pa if return_Ya: x, Ya = self._apply(a, return_Ya=return_Ya) else: x = self._apply(a) for i in range(self.iterations-1): z = a - x w = self._apply(z) x = x + w if return_Ya: return x, Ya return x
[ "def", "apply", "(", "self", ",", "a", ",", "return_Ya", "=", "False", ")", ":", "# is projection the zero operator?", "if", "self", ".", "V", ".", "shape", "[", "1", "]", "==", "0", ":", "Pa", "=", "numpy", ".", "zeros", "(", "a", ".", "shape", ")", "if", "return_Ya", ":", "return", "Pa", ",", "numpy", ".", "zeros", "(", "(", "0", ",", "a", ".", "shape", "[", "1", "]", ")", ")", "return", "Pa", "if", "return_Ya", ":", "x", ",", "Ya", "=", "self", ".", "_apply", "(", "a", ",", "return_Ya", "=", "return_Ya", ")", "else", ":", "x", "=", "self", ".", "_apply", "(", "a", ")", "for", "i", "in", "range", "(", "self", ".", "iterations", "-", "1", ")", ":", "z", "=", "a", "-", "x", "w", "=", "self", ".", "_apply", "(", "z", ")", "x", "=", "x", "+", "w", "if", "return_Ya", ":", "return", "x", ",", "Ya", "return", "x" ]
r"""Apply the projection to an array. The computation is carried out without explicitly forming the matrix corresponding to the projection (which would be an array with ``shape==(N,N)``). See also :py:meth:`_apply`.
[ "r", "Apply", "the", "projection", "to", "an", "array", "." ]
python
train
30.923077
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_restserver.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_restserver.py#L27-L35
def mpstatus_to_json(status): '''Translate MPStatus in json string''' msg_keys = list(status.msgs.keys()) data = '{' for key in msg_keys[:-1]: data += mavlink_to_json(status.msgs[key]) + ',' data += mavlink_to_json(status.msgs[msg_keys[-1]]) data += '}' return data
[ "def", "mpstatus_to_json", "(", "status", ")", ":", "msg_keys", "=", "list", "(", "status", ".", "msgs", ".", "keys", "(", ")", ")", "data", "=", "'{'", "for", "key", "in", "msg_keys", "[", ":", "-", "1", "]", ":", "data", "+=", "mavlink_to_json", "(", "status", ".", "msgs", "[", "key", "]", ")", "+", "','", "data", "+=", "mavlink_to_json", "(", "status", ".", "msgs", "[", "msg_keys", "[", "-", "1", "]", "]", ")", "data", "+=", "'}'", "return", "data" ]
Translate MPStatus in json string
[ "Translate", "MPStatus", "in", "json", "string" ]
python
train
32.555556
nfcpy/nfcpy
src/nfc/clf/pn533.py
https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/clf/pn533.py#L360-L366
def send_rsp_recv_cmd(self, target, data, timeout): """While operating as *target* send response *data* to the remote device and return new command data if received within *timeout* seconds. """ return super(Device, self).send_rsp_recv_cmd(target, data, timeout)
[ "def", "send_rsp_recv_cmd", "(", "self", ",", "target", ",", "data", ",", "timeout", ")", ":", "return", "super", "(", "Device", ",", "self", ")", ".", "send_rsp_recv_cmd", "(", "target", ",", "data", ",", "timeout", ")" ]
While operating as *target* send response *data* to the remote device and return new command data if received within *timeout* seconds.
[ "While", "operating", "as", "*", "target", "*", "send", "response", "*", "data", "*", "to", "the", "remote", "device", "and", "return", "new", "command", "data", "if", "received", "within", "*", "timeout", "*", "seconds", "." ]
python
train
42.428571
manns/pyspread
pyspread/src/gui/_grid_cell_editor.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid_cell_editor.py#L194-L212
def EndEdit(self, row, col, grid, oldVal=None): """ End editing the cell. This function must check if the current value of the editing control is valid and different from the original value (available as oldval in its string form.) If it has not changed then simply return None, otherwise return the value in its string form. *Must Override* """ # Mirror our changes onto the main_window's code bar self._tc.Unbind(wx.EVT_KEY_UP) self.ApplyEdit(row, col, grid) del self._col del self._row del self._grid
[ "def", "EndEdit", "(", "self", ",", "row", ",", "col", ",", "grid", ",", "oldVal", "=", "None", ")", ":", "# Mirror our changes onto the main_window's code bar", "self", ".", "_tc", ".", "Unbind", "(", "wx", ".", "EVT_KEY_UP", ")", "self", ".", "ApplyEdit", "(", "row", ",", "col", ",", "grid", ")", "del", "self", ".", "_col", "del", "self", ".", "_row", "del", "self", ".", "_grid" ]
End editing the cell. This function must check if the current value of the editing control is valid and different from the original value (available as oldval in its string form.) If it has not changed then simply return None, otherwise return the value in its string form. *Must Override*
[ "End", "editing", "the", "cell", ".", "This", "function", "must", "check", "if", "the", "current", "value", "of", "the", "editing", "control", "is", "valid", "and", "different", "from", "the", "original", "value", "(", "available", "as", "oldval", "in", "its", "string", "form", ".", ")", "If", "it", "has", "not", "changed", "then", "simply", "return", "None", "otherwise", "return", "the", "value", "in", "its", "string", "form", ".", "*", "Must", "Override", "*" ]
python
train
31.736842
minio/minio-py
minio/api.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/api.py#L423-L437
def get_bucket_notification(self, bucket_name): """ Get notifications configured for the given bucket. :param bucket_name: Bucket name. """ is_valid_bucket_name(bucket_name) response = self._url_open( "GET", bucket_name=bucket_name, query={"notification": ""}, ) data = response.data.decode('utf-8') return parse_get_bucket_notification(data)
[ "def", "get_bucket_notification", "(", "self", ",", "bucket_name", ")", ":", "is_valid_bucket_name", "(", "bucket_name", ")", "response", "=", "self", ".", "_url_open", "(", "\"GET\"", ",", "bucket_name", "=", "bucket_name", ",", "query", "=", "{", "\"notification\"", ":", "\"\"", "}", ",", ")", "data", "=", "response", ".", "data", ".", "decode", "(", "'utf-8'", ")", "return", "parse_get_bucket_notification", "(", "data", ")" ]
Get notifications configured for the given bucket. :param bucket_name: Bucket name.
[ "Get", "notifications", "configured", "for", "the", "given", "bucket", "." ]
python
train
29.2
globality-corp/microcosm-flask
microcosm_flask/swagger/naming.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/swagger/naming.py#L11-L26
def operation_name(operation, ns): """ Convert an operation, obj(s) pair into a swagger operation id. For compatability with Bravado, we want to use underscores instead of dots and verb-friendly names. Example: foo.retrieve => client.foo.retrieve() foo.search_for.bar => client.foo.search_for_bars() """ verb = operation.value.name if ns.object_: return "{}_{}".format(verb, pluralize(ns.object_name)) else: return verb
[ "def", "operation_name", "(", "operation", ",", "ns", ")", ":", "verb", "=", "operation", ".", "value", ".", "name", "if", "ns", ".", "object_", ":", "return", "\"{}_{}\"", ".", "format", "(", "verb", ",", "pluralize", "(", "ns", ".", "object_name", ")", ")", "else", ":", "return", "verb" ]
Convert an operation, obj(s) pair into a swagger operation id. For compatability with Bravado, we want to use underscores instead of dots and verb-friendly names. Example: foo.retrieve => client.foo.retrieve() foo.search_for.bar => client.foo.search_for_bars()
[ "Convert", "an", "operation", "obj", "(", "s", ")", "pair", "into", "a", "swagger", "operation", "id", "." ]
python
train
29.8125
DarkEnergySurvey/ugali
ugali/scratch/simulation/survey_selection_function.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/simulation/survey_selection_function.py#L34-L40
def angToPix(nside, lon, lat, nest=False): """ Input (lon, lat) in degrees instead of (theta, phi) in radians """ theta = np.radians(90. - lat) phi = np.radians(lon) return hp.ang2pix(nside, theta, phi, nest=nest)
[ "def", "angToPix", "(", "nside", ",", "lon", ",", "lat", ",", "nest", "=", "False", ")", ":", "theta", "=", "np", ".", "radians", "(", "90.", "-", "lat", ")", "phi", "=", "np", ".", "radians", "(", "lon", ")", "return", "hp", ".", "ang2pix", "(", "nside", ",", "theta", ",", "phi", ",", "nest", "=", "nest", ")" ]
Input (lon, lat) in degrees instead of (theta, phi) in radians
[ "Input", "(", "lon", "lat", ")", "in", "degrees", "instead", "of", "(", "theta", "phi", ")", "in", "radians" ]
python
train
33