Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
1,900 | shawnsilva/steamwebapi | steamwebapi/api.py | ISteamUser.get_player_bans | def get_player_bans(self, steamIDS, format=None):
"""Request the communities a steam id is banned in.
steamIDS: Comma-delimited list of SteamIDs
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerBans', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | python | def get_player_bans(self, steamIDS, format=None):
"""Request the communities a steam id is banned in.
steamIDS: Comma-delimited list of SteamIDs
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerBans', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | ['def', 'get_player_bans', '(', 'self', ',', 'steamIDS', ',', 'format', '=', 'None', ')', ':', 'parameters', '=', '{', "'steamids'", ':', 'steamIDS', '}', 'if', 'format', 'is', 'not', 'None', ':', 'parameters', '[', "'format'", ']', '=', 'format', 'url', '=', 'self', '.', 'create_request_url', '(', 'self', '.', 'interface', ',', "'GetPlayerBans'", ',', '1', ',', 'parameters', ')', 'data', '=', 'self', '.', 'retrieve_request', '(', 'url', ')', 'return', 'self', '.', 'return_data', '(', 'data', ',', 'format', '=', 'format', ')'] | Request the communities a steam id is banned in.
steamIDS: Comma-delimited list of SteamIDs
format: Return format. None defaults to json. (json, xml, vdf) | ['Request', 'the', 'communities', 'a', 'steam', 'id', 'is', 'banned', 'in', '.'] | train | https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/api.py#L122-L135 |
1,901 | mailgun/expiringdict | expiringdict/__init__.py | ExpiringDict.values | def values(self):
""" Return a copy of the dictionary's list of values.
See the note for dict.items(). """
r = []
for key in self._safe_keys():
try:
r.append(self[key])
except KeyError:
pass
return r | python | def values(self):
""" Return a copy of the dictionary's list of values.
See the note for dict.items(). """
r = []
for key in self._safe_keys():
try:
r.append(self[key])
except KeyError:
pass
return r | ['def', 'values', '(', 'self', ')', ':', 'r', '=', '[', ']', 'for', 'key', 'in', 'self', '.', '_safe_keys', '(', ')', ':', 'try', ':', 'r', '.', 'append', '(', 'self', '[', 'key', ']', ')', 'except', 'KeyError', ':', 'pass', 'return', 'r'] | Return a copy of the dictionary's list of values.
See the note for dict.items(). | ['Return', 'a', 'copy', 'of', 'the', 'dictionary', 's', 'list', 'of', 'values', '.', 'See', 'the', 'note', 'for', 'dict', '.', 'items', '()', '.'] | train | https://github.com/mailgun/expiringdict/blob/750048022cde40d35721253a88fbaa2df1781e94/expiringdict/__init__.py#L129-L138 |
1,902 | dlecocq/nsq-py | nsq/response.py | Error.exception | def exception(self):
'''Return an instance of the corresponding exception'''
code, _, message = self.data.partition(' ')
return self.find(code)(message) | python | def exception(self):
'''Return an instance of the corresponding exception'''
code, _, message = self.data.partition(' ')
return self.find(code)(message) | ['def', 'exception', '(', 'self', ')', ':', 'code', ',', '_', ',', 'message', '=', 'self', '.', 'data', '.', 'partition', '(', "' '", ')', 'return', 'self', '.', 'find', '(', 'code', ')', '(', 'message', ')'] | Return an instance of the corresponding exception | ['Return', 'an', 'instance', 'of', 'the', 'corresponding', 'exception'] | train | https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/response.py#L147-L150 |
1,903 | leonidessaguisagjr/pseudol10nutil | pseudol10nutil/pseudol10nutil.py | PseudoL10nUtil.pseudolocalize | def pseudolocalize(self, s):
"""
Performs pseudo-localization on a string. The specific transforms to be
applied to the string is defined in the transforms field of the object.
:param s: String to pseudo-localize.
:returns: Copy of the string s with the transforms applied. If the input
string is an empty string or None, an empty string is returned.
"""
if not s: # If the string is empty or None
return u""
if not isinstance(s, six.text_type):
raise TypeError("String to pseudo-localize must be of type '{0}'.".format(six.text_type.__name__))
# If no transforms are defined, return the string as-is.
if not self.transforms:
return s
fmt_spec = re.compile(
r"""(
{.*?} # https://docs.python.org/3/library/string.html#formatstrings
|
%(?:\(\w+?\))?.*?[acdeEfFgGiorsuxX%] # https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting
)""", re.VERBOSE)
# If we don't find any format specifiers in the input string, just munge the entire string at once.
if not fmt_spec.search(s):
result = s
for munge in self.transforms:
result = munge(result)
# If there are format specifiers, we do transliterations on the sections of the string that are not format
# specifiers, then do any other munging (padding the length, adding brackets) on the entire string.
else:
substrings = fmt_spec.split(s)
for munge in self.transforms:
if munge in transforms._transliterations:
for idx in range(len(substrings)):
if not fmt_spec.match(substrings[idx]):
substrings[idx] = munge(substrings[idx])
else:
continue
else:
continue
result = u"".join(substrings)
for munge in self.transforms:
if munge not in transforms._transliterations:
result = munge(result)
return result | python | def pseudolocalize(self, s):
"""
Performs pseudo-localization on a string. The specific transforms to be
applied to the string is defined in the transforms field of the object.
:param s: String to pseudo-localize.
:returns: Copy of the string s with the transforms applied. If the input
string is an empty string or None, an empty string is returned.
"""
if not s: # If the string is empty or None
return u""
if not isinstance(s, six.text_type):
raise TypeError("String to pseudo-localize must be of type '{0}'.".format(six.text_type.__name__))
# If no transforms are defined, return the string as-is.
if not self.transforms:
return s
fmt_spec = re.compile(
r"""(
{.*?} # https://docs.python.org/3/library/string.html#formatstrings
|
%(?:\(\w+?\))?.*?[acdeEfFgGiorsuxX%] # https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting
)""", re.VERBOSE)
# If we don't find any format specifiers in the input string, just munge the entire string at once.
if not fmt_spec.search(s):
result = s
for munge in self.transforms:
result = munge(result)
# If there are format specifiers, we do transliterations on the sections of the string that are not format
# specifiers, then do any other munging (padding the length, adding brackets) on the entire string.
else:
substrings = fmt_spec.split(s)
for munge in self.transforms:
if munge in transforms._transliterations:
for idx in range(len(substrings)):
if not fmt_spec.match(substrings[idx]):
substrings[idx] = munge(substrings[idx])
else:
continue
else:
continue
result = u"".join(substrings)
for munge in self.transforms:
if munge not in transforms._transliterations:
result = munge(result)
return result | ['def', 'pseudolocalize', '(', 'self', ',', 's', ')', ':', 'if', 'not', 's', ':', '# If the string is empty or None', 'return', 'u""', 'if', 'not', 'isinstance', '(', 's', ',', 'six', '.', 'text_type', ')', ':', 'raise', 'TypeError', '(', '"String to pseudo-localize must be of type \'{0}\'."', '.', 'format', '(', 'six', '.', 'text_type', '.', '__name__', ')', ')', '# If no transforms are defined, return the string as-is.', 'if', 'not', 'self', '.', 'transforms', ':', 'return', 's', 'fmt_spec', '=', 're', '.', 'compile', '(', 'r"""(\n {.*?} # https://docs.python.org/3/library/string.html#formatstrings\n |\n %(?:\\(\\w+?\\))?.*?[acdeEfFgGiorsuxX%] # https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting\n )"""', ',', 're', '.', 'VERBOSE', ')', "# If we don't find any format specifiers in the input string, just munge the entire string at once.", 'if', 'not', 'fmt_spec', '.', 'search', '(', 's', ')', ':', 'result', '=', 's', 'for', 'munge', 'in', 'self', '.', 'transforms', ':', 'result', '=', 'munge', '(', 'result', ')', '# If there are format specifiers, we do transliterations on the sections of the string that are not format', '# specifiers, then do any other munging (padding the length, adding brackets) on the entire string.', 'else', ':', 'substrings', '=', 'fmt_spec', '.', 'split', '(', 's', ')', 'for', 'munge', 'in', 'self', '.', 'transforms', ':', 'if', 'munge', 'in', 'transforms', '.', '_transliterations', ':', 'for', 'idx', 'in', 'range', '(', 'len', '(', 'substrings', ')', ')', ':', 'if', 'not', 'fmt_spec', '.', 'match', '(', 'substrings', '[', 'idx', ']', ')', ':', 'substrings', '[', 'idx', ']', '=', 'munge', '(', 'substrings', '[', 'idx', ']', ')', 'else', ':', 'continue', 'else', ':', 'continue', 'result', '=', 'u""', '.', 'join', '(', 'substrings', ')', 'for', 'munge', 'in', 'self', '.', 'transforms', ':', 'if', 'munge', 'not', 'in', 'transforms', '.', '_transliterations', ':', 'result', '=', 'munge', '(', 'result', ')', 'return', 'result'] | Performs pseudo-localization on a string. The specific transforms to be
applied to the string is defined in the transforms field of the object.
:param s: String to pseudo-localize.
:returns: Copy of the string s with the transforms applied. If the input
string is an empty string or None, an empty string is returned. | ['Performs', 'pseudo', '-', 'localization', 'on', 'a', 'string', '.', 'The', 'specific', 'transforms', 'to', 'be', 'applied', 'to', 'the', 'string', 'is', 'defined', 'in', 'the', 'transforms', 'field', 'of', 'the', 'object', '.'] | train | https://github.com/leonidessaguisagjr/pseudol10nutil/blob/39cb0ae8cc5c1df5690816a18472e0366a49ab8d/pseudol10nutil/pseudol10nutil.py#L33-L77 |
1,904 | spacetelescope/stsci.tools | lib/stsci/tools/wcsutil.py | WCSObject.createReferenceWCS | def createReferenceWCS(self,refname,overwrite=yes):
""" Write out the values of the WCS keywords to the NEW
specified image 'fitsname'.
"""
hdu = self.createWcsHDU()
# If refname already exists, delete it to make way for new file
if os.path.exists(refname):
if overwrite==yes:
# Remove previous version and re-create with new header
os.remove(refname)
hdu.writeto(refname)
else:
# Append header to existing file
wcs_append = True
oldhdu = fits.open(refname, mode='append')
for e in oldhdu:
if 'extname' in e.header and e.header['extname'] == 'WCS':
wcs_append = False
if wcs_append == True:
oldhdu.append(hdu)
oldhdu.close()
del oldhdu
else:
# No previous file, so generate new one from scratch
hdu.writeto(refname)
# Clean up
del hdu | python | def createReferenceWCS(self,refname,overwrite=yes):
""" Write out the values of the WCS keywords to the NEW
specified image 'fitsname'.
"""
hdu = self.createWcsHDU()
# If refname already exists, delete it to make way for new file
if os.path.exists(refname):
if overwrite==yes:
# Remove previous version and re-create with new header
os.remove(refname)
hdu.writeto(refname)
else:
# Append header to existing file
wcs_append = True
oldhdu = fits.open(refname, mode='append')
for e in oldhdu:
if 'extname' in e.header and e.header['extname'] == 'WCS':
wcs_append = False
if wcs_append == True:
oldhdu.append(hdu)
oldhdu.close()
del oldhdu
else:
# No previous file, so generate new one from scratch
hdu.writeto(refname)
# Clean up
del hdu | ['def', 'createReferenceWCS', '(', 'self', ',', 'refname', ',', 'overwrite', '=', 'yes', ')', ':', 'hdu', '=', 'self', '.', 'createWcsHDU', '(', ')', '# If refname already exists, delete it to make way for new file', 'if', 'os', '.', 'path', '.', 'exists', '(', 'refname', ')', ':', 'if', 'overwrite', '==', 'yes', ':', '# Remove previous version and re-create with new header', 'os', '.', 'remove', '(', 'refname', ')', 'hdu', '.', 'writeto', '(', 'refname', ')', 'else', ':', '# Append header to existing file', 'wcs_append', '=', 'True', 'oldhdu', '=', 'fits', '.', 'open', '(', 'refname', ',', 'mode', '=', "'append'", ')', 'for', 'e', 'in', 'oldhdu', ':', 'if', "'extname'", 'in', 'e', '.', 'header', 'and', 'e', '.', 'header', '[', "'extname'", ']', '==', "'WCS'", ':', 'wcs_append', '=', 'False', 'if', 'wcs_append', '==', 'True', ':', 'oldhdu', '.', 'append', '(', 'hdu', ')', 'oldhdu', '.', 'close', '(', ')', 'del', 'oldhdu', 'else', ':', '# No previous file, so generate new one from scratch', 'hdu', '.', 'writeto', '(', 'refname', ')', '# Clean up', 'del', 'hdu'] | Write out the values of the WCS keywords to the NEW
specified image 'fitsname'. | ['Write', 'out', 'the', 'values', 'of', 'the', 'WCS', 'keywords', 'to', 'the', 'NEW', 'specified', 'image', 'fitsname', '.'] | train | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/wcsutil.py#L1025-L1053 |
1,905 | kensho-technologies/graphql-compiler | graphql_compiler/compiler/sql_context_helpers.py | get_node_at_path | def get_node_at_path(query_path, context):
"""Return the SqlNode associated with the query path."""
if query_path not in context.query_path_to_node:
raise AssertionError(
u'Unable to find SqlNode for query path {} with context {}.'.format(
query_path, context))
node = context.query_path_to_node[query_path]
return node | python | def get_node_at_path(query_path, context):
"""Return the SqlNode associated with the query path."""
if query_path not in context.query_path_to_node:
raise AssertionError(
u'Unable to find SqlNode for query path {} with context {}.'.format(
query_path, context))
node = context.query_path_to_node[query_path]
return node | ['def', 'get_node_at_path', '(', 'query_path', ',', 'context', ')', ':', 'if', 'query_path', 'not', 'in', 'context', '.', 'query_path_to_node', ':', 'raise', 'AssertionError', '(', "u'Unable to find SqlNode for query path {} with context {}.'", '.', 'format', '(', 'query_path', ',', 'context', ')', ')', 'node', '=', 'context', '.', 'query_path_to_node', '[', 'query_path', ']', 'return', 'node'] | Return the SqlNode associated with the query path. | ['Return', 'the', 'SqlNode', 'associated', 'with', 'the', 'query', 'path', '.'] | train | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/sql_context_helpers.py#L27-L34 |
1,906 | materialsproject/pymatgen | pymatgen/analysis/molecule_matcher.py | InchiMolAtomMapper._group_centroid | def _group_centroid(mol, ilabels, group_atoms):
"""
Calculate the centroids of a group atoms indexed by the labels of inchi
Args:
mol: The molecule. OpenBabel OBMol object
ilabel: inchi label map
Returns:
Centroid. Tuple (x, y, z)
"""
c1x, c1y, c1z = 0.0, 0.0, 0.0
for i in group_atoms:
orig_idx = ilabels[i-1]
oa1 = mol.GetAtom(orig_idx)
c1x += float(oa1.x())
c1y += float(oa1.y())
c1z += float(oa1.z())
num_atoms = len(group_atoms)
c1x /= num_atoms
c1y /= num_atoms
c1z /= num_atoms
return c1x, c1y, c1z | python | def _group_centroid(mol, ilabels, group_atoms):
"""
Calculate the centroids of a group atoms indexed by the labels of inchi
Args:
mol: The molecule. OpenBabel OBMol object
ilabel: inchi label map
Returns:
Centroid. Tuple (x, y, z)
"""
c1x, c1y, c1z = 0.0, 0.0, 0.0
for i in group_atoms:
orig_idx = ilabels[i-1]
oa1 = mol.GetAtom(orig_idx)
c1x += float(oa1.x())
c1y += float(oa1.y())
c1z += float(oa1.z())
num_atoms = len(group_atoms)
c1x /= num_atoms
c1y /= num_atoms
c1z /= num_atoms
return c1x, c1y, c1z | ['def', '_group_centroid', '(', 'mol', ',', 'ilabels', ',', 'group_atoms', ')', ':', 'c1x', ',', 'c1y', ',', 'c1z', '=', '0.0', ',', '0.0', ',', '0.0', 'for', 'i', 'in', 'group_atoms', ':', 'orig_idx', '=', 'ilabels', '[', 'i', '-', '1', ']', 'oa1', '=', 'mol', '.', 'GetAtom', '(', 'orig_idx', ')', 'c1x', '+=', 'float', '(', 'oa1', '.', 'x', '(', ')', ')', 'c1y', '+=', 'float', '(', 'oa1', '.', 'y', '(', ')', ')', 'c1z', '+=', 'float', '(', 'oa1', '.', 'z', '(', ')', ')', 'num_atoms', '=', 'len', '(', 'group_atoms', ')', 'c1x', '/=', 'num_atoms', 'c1y', '/=', 'num_atoms', 'c1z', '/=', 'num_atoms', 'return', 'c1x', ',', 'c1y', ',', 'c1z'] | Calculate the centroids of a group atoms indexed by the labels of inchi
Args:
mol: The molecule. OpenBabel OBMol object
ilabel: inchi label map
Returns:
Centroid. Tuple (x, y, z) | ['Calculate', 'the', 'centroids', 'of', 'a', 'group', 'atoms', 'indexed', 'by', 'the', 'labels', 'of', 'inchi'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/molecule_matcher.py#L237-L259 |
1,907 | MacHu-GWU/sqlalchemy_mate-project | sqlalchemy_mate/credential.py | EngineCreator.create_mysql | def create_mysql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mysql), **kwargs
) | python | def create_mysql(self, **kwargs):
"""
:rtype: Engine
"""
return self._ce(
self._ccs(self.DialectAndDriver.mysql), **kwargs
) | ['def', 'create_mysql', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_ce', '(', 'self', '.', '_ccs', '(', 'self', '.', 'DialectAndDriver', '.', 'mysql', ')', ',', '*', '*', 'kwargs', ')'] | :rtype: Engine | [':', 'rtype', ':', 'Engine'] | train | https://github.com/MacHu-GWU/sqlalchemy_mate-project/blob/946754744c8870f083fd7b4339fca15d1d6128b2/sqlalchemy_mate/credential.py#L362-L368 |
1,908 | mwouts/jupytext | jupytext/cell_metadata.py | json_options_to_metadata | def json_options_to_metadata(options, add_brackets=True):
"""Read metadata from its json representation"""
try:
options = loads('{' + options + '}' if add_brackets else options)
return options
except ValueError:
return {} | python | def json_options_to_metadata(options, add_brackets=True):
"""Read metadata from its json representation"""
try:
options = loads('{' + options + '}' if add_brackets else options)
return options
except ValueError:
return {} | ['def', 'json_options_to_metadata', '(', 'options', ',', 'add_brackets', '=', 'True', ')', ':', 'try', ':', 'options', '=', 'loads', '(', "'{'", '+', 'options', '+', "'}'", 'if', 'add_brackets', 'else', 'options', ')', 'return', 'options', 'except', 'ValueError', ':', 'return', '{', '}'] | Read metadata from its json representation | ['Read', 'metadata', 'from', 'its', 'json', 'representation'] | train | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_metadata.py#L333-L339 |
1,909 | tjcsl/ion | intranet/apps/announcements/views.py | add_announcement_view | def add_announcement_view(request):
"""Add an announcement."""
if request.method == "POST":
form = AnnouncementForm(request.POST)
logger.debug(form)
if form.is_valid():
obj = form.save()
obj.user = request.user
# SAFE HTML
obj.content = safe_html(obj.content)
obj.save()
announcement_posted_hook(request, obj)
messages.success(request, "Successfully added announcement.")
return redirect("index")
else:
messages.error(request, "Error adding announcement")
else:
form = AnnouncementForm()
return render(request, "announcements/add_modify.html", {"form": form, "action": "add"}) | python | def add_announcement_view(request):
"""Add an announcement."""
if request.method == "POST":
form = AnnouncementForm(request.POST)
logger.debug(form)
if form.is_valid():
obj = form.save()
obj.user = request.user
# SAFE HTML
obj.content = safe_html(obj.content)
obj.save()
announcement_posted_hook(request, obj)
messages.success(request, "Successfully added announcement.")
return redirect("index")
else:
messages.error(request, "Error adding announcement")
else:
form = AnnouncementForm()
return render(request, "announcements/add_modify.html", {"form": form, "action": "add"}) | ['def', 'add_announcement_view', '(', 'request', ')', ':', 'if', 'request', '.', 'method', '==', '"POST"', ':', 'form', '=', 'AnnouncementForm', '(', 'request', '.', 'POST', ')', 'logger', '.', 'debug', '(', 'form', ')', 'if', 'form', '.', 'is_valid', '(', ')', ':', 'obj', '=', 'form', '.', 'save', '(', ')', 'obj', '.', 'user', '=', 'request', '.', 'user', '# SAFE HTML', 'obj', '.', 'content', '=', 'safe_html', '(', 'obj', '.', 'content', ')', 'obj', '.', 'save', '(', ')', 'announcement_posted_hook', '(', 'request', ',', 'obj', ')', 'messages', '.', 'success', '(', 'request', ',', '"Successfully added announcement."', ')', 'return', 'redirect', '(', '"index"', ')', 'else', ':', 'messages', '.', 'error', '(', 'request', ',', '"Error adding announcement"', ')', 'else', ':', 'form', '=', 'AnnouncementForm', '(', ')', 'return', 'render', '(', 'request', ',', '"announcements/add_modify.html"', ',', '{', '"form"', ':', 'form', ',', '"action"', ':', '"add"', '}', ')'] | Add an announcement. | ['Add', 'an', 'announcement', '.'] | train | https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/announcements/views.py#L268-L286 |
1,910 | Capitains/MyCapytain | MyCapytain/resources/texts/local/capitains/cts.py | CapitainsCtsPassage.next | def next(self):
""" Next CapitainsCtsPassage (Interactive CapitainsCtsPassage)
"""
if self.nextId is not None:
return super(CapitainsCtsPassage, self).getTextualNode(subreference=self.nextId) | python | def next(self):
""" Next CapitainsCtsPassage (Interactive CapitainsCtsPassage)
"""
if self.nextId is not None:
return super(CapitainsCtsPassage, self).getTextualNode(subreference=self.nextId) | ['def', 'next', '(', 'self', ')', ':', 'if', 'self', '.', 'nextId', 'is', 'not', 'None', ':', 'return', 'super', '(', 'CapitainsCtsPassage', ',', 'self', ')', '.', 'getTextualNode', '(', 'subreference', '=', 'self', '.', 'nextId', ')'] | Next CapitainsCtsPassage (Interactive CapitainsCtsPassage) | ['Next', 'CapitainsCtsPassage', '(', 'Interactive', 'CapitainsCtsPassage', ')'] | train | https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/texts/local/capitains/cts.py#L672-L676 |
1,911 | sepandhaghighi/art | art/art.py | tsave | def tsave(
text,
font=DEFAULT_FONT,
filename="art",
chr_ignore=True,
print_status=True):
r"""
Save ascii art (support \n).
:param text: input text
:param font: input font
:type font:str
:type text:str
:param filename: output file name
:type filename:str
:param chr_ignore: ignore not supported character
:type chr_ignore:bool
:param print_status : save message print flag
:type print_status:bool
:return: None
"""
try:
if isinstance(text, str) is False:
raise Exception(TEXT_TYPE_ERROR)
files_list = os.listdir(os.getcwd())
extension = ".txt"
splitted_filename = filename.split(".")
name = splitted_filename[0]
if len(splitted_filename) > 1:
extension = "." + splitted_filename[1]
index = 2
test_name = name
while(True):
if test_name + extension in files_list:
test_name = name + str(index)
index = index + 1
else:
break
if font.lower() in TEST_FILTERED_FONTS:
file = codecs.open(test_name + extension, "w", encoding='utf-8')
else:
file = open(test_name + extension, "w")
result = text2art(text, font=font, chr_ignore=chr_ignore)
file.write(result)
file.close()
if print_status:
print("Saved! \nFilename: " + test_name + extension)
return {"Status": True, "Message": "OK"}
except Exception as e:
return {"Status": False, "Message": str(e)} | python | def tsave(
text,
font=DEFAULT_FONT,
filename="art",
chr_ignore=True,
print_status=True):
r"""
Save ascii art (support \n).
:param text: input text
:param font: input font
:type font:str
:type text:str
:param filename: output file name
:type filename:str
:param chr_ignore: ignore not supported character
:type chr_ignore:bool
:param print_status : save message print flag
:type print_status:bool
:return: None
"""
try:
if isinstance(text, str) is False:
raise Exception(TEXT_TYPE_ERROR)
files_list = os.listdir(os.getcwd())
extension = ".txt"
splitted_filename = filename.split(".")
name = splitted_filename[0]
if len(splitted_filename) > 1:
extension = "." + splitted_filename[1]
index = 2
test_name = name
while(True):
if test_name + extension in files_list:
test_name = name + str(index)
index = index + 1
else:
break
if font.lower() in TEST_FILTERED_FONTS:
file = codecs.open(test_name + extension, "w", encoding='utf-8')
else:
file = open(test_name + extension, "w")
result = text2art(text, font=font, chr_ignore=chr_ignore)
file.write(result)
file.close()
if print_status:
print("Saved! \nFilename: " + test_name + extension)
return {"Status": True, "Message": "OK"}
except Exception as e:
return {"Status": False, "Message": str(e)} | ['def', 'tsave', '(', 'text', ',', 'font', '=', 'DEFAULT_FONT', ',', 'filename', '=', '"art"', ',', 'chr_ignore', '=', 'True', ',', 'print_status', '=', 'True', ')', ':', 'try', ':', 'if', 'isinstance', '(', 'text', ',', 'str', ')', 'is', 'False', ':', 'raise', 'Exception', '(', 'TEXT_TYPE_ERROR', ')', 'files_list', '=', 'os', '.', 'listdir', '(', 'os', '.', 'getcwd', '(', ')', ')', 'extension', '=', '".txt"', 'splitted_filename', '=', 'filename', '.', 'split', '(', '"."', ')', 'name', '=', 'splitted_filename', '[', '0', ']', 'if', 'len', '(', 'splitted_filename', ')', '>', '1', ':', 'extension', '=', '"."', '+', 'splitted_filename', '[', '1', ']', 'index', '=', '2', 'test_name', '=', 'name', 'while', '(', 'True', ')', ':', 'if', 'test_name', '+', 'extension', 'in', 'files_list', ':', 'test_name', '=', 'name', '+', 'str', '(', 'index', ')', 'index', '=', 'index', '+', '1', 'else', ':', 'break', 'if', 'font', '.', 'lower', '(', ')', 'in', 'TEST_FILTERED_FONTS', ':', 'file', '=', 'codecs', '.', 'open', '(', 'test_name', '+', 'extension', ',', '"w"', ',', 'encoding', '=', "'utf-8'", ')', 'else', ':', 'file', '=', 'open', '(', 'test_name', '+', 'extension', ',', '"w"', ')', 'result', '=', 'text2art', '(', 'text', ',', 'font', '=', 'font', ',', 'chr_ignore', '=', 'chr_ignore', ')', 'file', '.', 'write', '(', 'result', ')', 'file', '.', 'close', '(', ')', 'if', 'print_status', ':', 'print', '(', '"Saved! \\nFilename: "', '+', 'test_name', '+', 'extension', ')', 'return', '{', '"Status"', ':', 'True', ',', '"Message"', ':', '"OK"', '}', 'except', 'Exception', 'as', 'e', ':', 'return', '{', '"Status"', ':', 'False', ',', '"Message"', ':', 'str', '(', 'e', ')', '}'] | r"""
Save ascii art (support \n).
:param text: input text
:param font: input font
:type font:str
:type text:str
:param filename: output file name
:type filename:str
:param chr_ignore: ignore not supported character
:type chr_ignore:bool
:param print_status : save message print flag
:type print_status:bool
:return: None | ['r', 'Save', 'ascii', 'art', '(', 'support', '\\', 'n', ')', '.'] | train | https://github.com/sepandhaghighi/art/blob/c5b0409de76464b0714c377f8fca17716f3a9482/art/art.py#L197-L246 |
1,912 | twilio/twilio-python | twilio/rest/authy/v1/service/entity/factor/__init__.py | FactorPage.get_instance | def get_instance(self, payload):
"""
Build an instance of FactorInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.authy.v1.service.entity.factor.FactorInstance
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorInstance
"""
return FactorInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
) | python | def get_instance(self, payload):
"""
Build an instance of FactorInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.authy.v1.service.entity.factor.FactorInstance
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorInstance
"""
return FactorInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
) | ['def', 'get_instance', '(', 'self', ',', 'payload', ')', ':', 'return', 'FactorInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'service_sid', '=', 'self', '.', '_solution', '[', "'service_sid'", ']', ',', 'identity', '=', 'self', '.', '_solution', '[', "'identity'", ']', ',', ')'] | Build an instance of FactorInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.authy.v1.service.entity.factor.FactorInstance
:rtype: twilio.rest.authy.v1.service.entity.factor.FactorInstance | ['Build', 'an', 'instance', 'of', 'FactorInstance'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/authy/v1/service/entity/factor/__init__.py#L211-L225 |
1,913 | SpamScope/mail-parser | mailparser/utils.py | fingerprints | def fingerprints(data):
"""
This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512
"""
Hashes = namedtuple('Hashes', "md5 sha1 sha256 sha512")
if six.PY2:
if not isinstance(data, str):
data = data.encode("utf-8")
elif six.PY3:
if not isinstance(data, bytes):
data = data.encode("utf-8")
# md5
md5 = hashlib.md5()
md5.update(data)
md5 = md5.hexdigest()
# sha1
sha1 = hashlib.sha1()
sha1.update(data)
sha1 = sha1.hexdigest()
# sha256
sha256 = hashlib.sha256()
sha256.update(data)
sha256 = sha256.hexdigest()
# sha512
sha512 = hashlib.sha512()
sha512.update(data)
sha512 = sha512.hexdigest()
return Hashes(md5, sha1, sha256, sha512) | python | def fingerprints(data):
"""
This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512
"""
Hashes = namedtuple('Hashes', "md5 sha1 sha256 sha512")
if six.PY2:
if not isinstance(data, str):
data = data.encode("utf-8")
elif six.PY3:
if not isinstance(data, bytes):
data = data.encode("utf-8")
# md5
md5 = hashlib.md5()
md5.update(data)
md5 = md5.hexdigest()
# sha1
sha1 = hashlib.sha1()
sha1.update(data)
sha1 = sha1.hexdigest()
# sha256
sha256 = hashlib.sha256()
sha256.update(data)
sha256 = sha256.hexdigest()
# sha512
sha512 = hashlib.sha512()
sha512.update(data)
sha512 = sha512.hexdigest()
return Hashes(md5, sha1, sha256, sha512) | ['def', 'fingerprints', '(', 'data', ')', ':', 'Hashes', '=', 'namedtuple', '(', "'Hashes'", ',', '"md5 sha1 sha256 sha512"', ')', 'if', 'six', '.', 'PY2', ':', 'if', 'not', 'isinstance', '(', 'data', ',', 'str', ')', ':', 'data', '=', 'data', '.', 'encode', '(', '"utf-8"', ')', 'elif', 'six', '.', 'PY3', ':', 'if', 'not', 'isinstance', '(', 'data', ',', 'bytes', ')', ':', 'data', '=', 'data', '.', 'encode', '(', '"utf-8"', ')', '# md5', 'md5', '=', 'hashlib', '.', 'md5', '(', ')', 'md5', '.', 'update', '(', 'data', ')', 'md5', '=', 'md5', '.', 'hexdigest', '(', ')', '# sha1', 'sha1', '=', 'hashlib', '.', 'sha1', '(', ')', 'sha1', '.', 'update', '(', 'data', ')', 'sha1', '=', 'sha1', '.', 'hexdigest', '(', ')', '# sha256', 'sha256', '=', 'hashlib', '.', 'sha256', '(', ')', 'sha256', '.', 'update', '(', 'data', ')', 'sha256', '=', 'sha256', '.', 'hexdigest', '(', ')', '# sha512', 'sha512', '=', 'hashlib', '.', 'sha512', '(', ')', 'sha512', '.', 'update', '(', 'data', ')', 'sha512', '=', 'sha512', '.', 'hexdigest', '(', ')', 'return', 'Hashes', '(', 'md5', ',', 'sha1', ',', 'sha256', ',', 'sha512', ')'] | This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512 | ['This', 'function', 'return', 'the', 'fingerprints', 'of', 'data', '.'] | train | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L161-L201 |
1,914 | edx/edx-val | edxval/models.py | ListField.get_prep_value | def get_prep_value(self, value):
"""
Converts a list to its json representation to store in database as text.
"""
if value and not isinstance(value, list):
raise ValidationError(u'ListField value {} is not a list.'.format(value))
return json.dumps(self.validate_list(value) or []) | python | def get_prep_value(self, value):
"""
Converts a list to its json representation to store in database as text.
"""
if value and not isinstance(value, list):
raise ValidationError(u'ListField value {} is not a list.'.format(value))
return json.dumps(self.validate_list(value) or []) | ['def', 'get_prep_value', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'and', 'not', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'raise', 'ValidationError', '(', "u'ListField value {} is not a list.'", '.', 'format', '(', 'value', ')', ')', 'return', 'json', '.', 'dumps', '(', 'self', '.', 'validate_list', '(', 'value', ')', 'or', '[', ']', ')'] | Converts a list to its json representation to store in database as text. | ['Converts', 'a', 'list', 'to', 'its', 'json', 'representation', 'to', 'store', 'in', 'database', 'as', 'text', '.'] | train | https://github.com/edx/edx-val/blob/30df48061e77641edb5272895b7c7f7f25eb7aa7/edxval/models.py#L233-L239 |
1,915 | PyHDI/Pyverilog | pyverilog/vparser/parser.py | VerilogParser.p_generate_named_block | def p_generate_named_block(self, p):
'generate_block : BEGIN COLON ID generate_items END'
p[0] = Block(p[4], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | python | def p_generate_named_block(self, p):
'generate_block : BEGIN COLON ID generate_items END'
p[0] = Block(p[4], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | ['def', 'p_generate_named_block', '(', 'self', ',', 'p', ')', ':', 'p', '[', '0', ']', '=', 'Block', '(', 'p', '[', '4', ']', ',', 'p', '[', '3', ']', ',', 'lineno', '=', 'p', '.', 'lineno', '(', '1', ')', ')', 'p', '.', 'set_lineno', '(', '0', ',', 'p', '.', 'lineno', '(', '1', ')', ')'] | generate_block : BEGIN COLON ID generate_items END | ['generate_block', ':', 'BEGIN', 'COLON', 'ID', 'generate_items', 'END'] | train | https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1938-L1941 |
1,916 | pypa/pipenv | pipenv/vendor/distlib/database.py | InstalledDistribution.exports | def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result | python | def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result | ['def', 'exports', '(', 'self', ')', ':', 'result', '=', '{', '}', 'r', '=', 'self', '.', 'get_distinfo_resource', '(', 'EXPORTS_FILENAME', ')', 'if', 'r', ':', 'result', '=', 'self', '.', 'read_exports', '(', ')', 'return', 'result'] | Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name. | ['Return', 'the', 'information', 'exported', 'by', 'this', 'distribution', '.', ':', 'return', ':', 'A', 'dictionary', 'of', 'exports', 'mapping', 'an', 'export', 'category', 'to', 'a', 'dict', 'of', ':', 'class', ':', 'ExportEntry', 'instances', 'describing', 'the', 'individual', 'export', 'entries', 'and', 'keyed', 'by', 'name', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L604-L615 |
1,917 | TkTech/Jawa | jawa/attributes/code.py | CodeAttribute.disassemble | def disassemble(self, *, transforms=None) -> Iterator[Instruction]:
"""
Disassembles this method, yielding an iterable of
:class:`~jawa.util.bytecode.Instruction` objects.
"""
if transforms is None:
if self.cf.classloader:
transforms = self.cf.classloader.bytecode_transforms
else:
transforms = []
transforms = [self._bind_transform(t) for t in transforms]
with io.BytesIO(self._code) as code:
ins_iter = iter(lambda: read_instruction(code, code.tell()), None)
for ins in ins_iter:
for transform in transforms:
ins = transform(ins)
yield ins | python | def disassemble(self, *, transforms=None) -> Iterator[Instruction]:
"""
Disassembles this method, yielding an iterable of
:class:`~jawa.util.bytecode.Instruction` objects.
"""
if transforms is None:
if self.cf.classloader:
transforms = self.cf.classloader.bytecode_transforms
else:
transforms = []
transforms = [self._bind_transform(t) for t in transforms]
with io.BytesIO(self._code) as code:
ins_iter = iter(lambda: read_instruction(code, code.tell()), None)
for ins in ins_iter:
for transform in transforms:
ins = transform(ins)
yield ins | ['def', 'disassemble', '(', 'self', ',', '*', ',', 'transforms', '=', 'None', ')', '->', 'Iterator', '[', 'Instruction', ']', ':', 'if', 'transforms', 'is', 'None', ':', 'if', 'self', '.', 'cf', '.', 'classloader', ':', 'transforms', '=', 'self', '.', 'cf', '.', 'classloader', '.', 'bytecode_transforms', 'else', ':', 'transforms', '=', '[', ']', 'transforms', '=', '[', 'self', '.', '_bind_transform', '(', 't', ')', 'for', 't', 'in', 'transforms', ']', 'with', 'io', '.', 'BytesIO', '(', 'self', '.', '_code', ')', 'as', 'code', ':', 'ins_iter', '=', 'iter', '(', 'lambda', ':', 'read_instruction', '(', 'code', ',', 'code', '.', 'tell', '(', ')', ')', ',', 'None', ')', 'for', 'ins', 'in', 'ins_iter', ':', 'for', 'transform', 'in', 'transforms', ':', 'ins', '=', 'transform', '(', 'ins', ')', 'yield', 'ins'] | Disassembles this method, yielding an iterable of
:class:`~jawa.util.bytecode.Instruction` objects. | ['Disassembles', 'this', 'method', 'yielding', 'an', 'iterable', 'of', ':', 'class', ':', '~jawa', '.', 'util', '.', 'bytecode', '.', 'Instruction', 'objects', '.'] | train | https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/attributes/code.py#L123-L141 |
1,918 | ejeschke/ginga | ginga/Bindings.py | BindingMapper.mode_key_up | def mode_key_up(self, viewer, keyname):
"""This method is called when a key is pressed in a mode and was
not handled by some other handler with precedence, such as a
subcanvas.
"""
# Is this a mode key?
if keyname not in self.mode_map:
# <== no
return False
bnch = self.mode_map[keyname]
if self._kbdmode == bnch.name:
# <-- the current mode key is being released
if bnch.type == 'held':
if self._button == 0:
# if no button is being held, then reset mode
self.reset_mode(viewer)
else:
self._delayed_reset = True
return True
return False | python | def mode_key_up(self, viewer, keyname):
"""This method is called when a key is pressed in a mode and was
not handled by some other handler with precedence, such as a
subcanvas.
"""
# Is this a mode key?
if keyname not in self.mode_map:
# <== no
return False
bnch = self.mode_map[keyname]
if self._kbdmode == bnch.name:
# <-- the current mode key is being released
if bnch.type == 'held':
if self._button == 0:
# if no button is being held, then reset mode
self.reset_mode(viewer)
else:
self._delayed_reset = True
return True
return False | ['def', 'mode_key_up', '(', 'self', ',', 'viewer', ',', 'keyname', ')', ':', '# Is this a mode key?', 'if', 'keyname', 'not', 'in', 'self', '.', 'mode_map', ':', '# <== no', 'return', 'False', 'bnch', '=', 'self', '.', 'mode_map', '[', 'keyname', ']', 'if', 'self', '.', '_kbdmode', '==', 'bnch', '.', 'name', ':', '# <-- the current mode key is being released', 'if', 'bnch', '.', 'type', '==', "'held'", ':', 'if', 'self', '.', '_button', '==', '0', ':', '# if no button is being held, then reset mode', 'self', '.', 'reset_mode', '(', 'viewer', ')', 'else', ':', 'self', '.', '_delayed_reset', '=', 'True', 'return', 'True', 'return', 'False'] | This method is called when a key is pressed in a mode and was
not handled by some other handler with precedence, such as a
subcanvas. | ['This', 'method', 'is', 'called', 'when', 'a', 'key', 'is', 'pressed', 'in', 'a', 'mode', 'and', 'was', 'not', 'handled', 'by', 'some', 'other', 'handler', 'with', 'precedence', 'such', 'as', 'a', 'subcanvas', '.'] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L2706-L2727 |
1,919 | roaet/eh | eh/mdv/tabulate.py | _main | def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:f:",
["help", "header", "output", "sep=", "float=", "format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = "g"
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out) | python | def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:f:",
["help", "header", "output", "sep=", "float=", "format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = "g"
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out) | ['def', '_main', '(', ')', ':', 'import', 'getopt', 'import', 'sys', 'import', 'textwrap', 'usage', '=', 'textwrap', '.', 'dedent', '(', '_main', '.', '__doc__', ')', 'try', ':', 'opts', ',', 'args', '=', 'getopt', '.', 'getopt', '(', 'sys', '.', 'argv', '[', '1', ':', ']', ',', '"h1o:s:F:f:"', ',', '[', '"help"', ',', '"header"', ',', '"output"', ',', '"sep="', ',', '"float="', ',', '"format="', ']', ')', 'except', 'getopt', '.', 'GetoptError', 'as', 'e', ':', 'print', '(', 'e', ')', 'print', '(', 'usage', ')', 'sys', '.', 'exit', '(', '2', ')', 'headers', '=', '[', ']', 'floatfmt', '=', '"g"', 'tablefmt', '=', '"simple"', 'sep', '=', 'r"\\s+"', 'outfile', '=', '"-"', 'for', 'opt', ',', 'value', 'in', 'opts', ':', 'if', 'opt', 'in', '[', '"-1"', ',', '"--header"', ']', ':', 'headers', '=', '"firstrow"', 'elif', 'opt', 'in', '[', '"-o"', ',', '"--output"', ']', ':', 'outfile', '=', 'value', 'elif', 'opt', 'in', '[', '"-F"', ',', '"--float"', ']', ':', 'floatfmt', '=', 'value', 'elif', 'opt', 'in', '[', '"-f"', ',', '"--format"', ']', ':', 'if', 'value', 'not', 'in', 'tabulate_formats', ':', 'print', '(', '"%s is not a supported table format"', '%', 'value', ')', 'print', '(', 'usage', ')', 'sys', '.', 'exit', '(', '3', ')', 'tablefmt', '=', 'value', 'elif', 'opt', 'in', '[', '"-s"', ',', '"--sep"', ']', ':', 'sep', '=', 'value', 'elif', 'opt', 'in', '[', '"-h"', ',', '"--help"', ']', ':', 'print', '(', 'usage', ')', 'sys', '.', 'exit', '(', '0', ')', 'files', '=', '[', 'sys', '.', 'stdin', ']', 'if', 'not', 'args', 'else', 'args', 'with', '(', 'sys', '.', 'stdout', 'if', 'outfile', '==', '"-"', 'else', 'open', '(', 'outfile', ',', '"w"', ')', ')', 'as', 'out', ':', 'for', 'f', 'in', 'files', ':', 'if', 'f', '==', '"-"', ':', 'f', '=', 'sys', '.', 'stdin', 'if', '_is_file', '(', 'f', ')', ':', '_pprint_file', '(', 'f', ',', 'headers', '=', 'headers', ',', 'tablefmt', '=', 'tablefmt', ',', 'sep', '=', 'sep', ',', 'floatfmt', '=', 'floatfmt', ',', 'file', '=', 'out', ')', 'else', ':', 'with', 'open', '(', 'f', ')', 'as', 'fobj', ':', '_pprint_file', '(', 'fobj', ',', 'headers', '=', 'headers', ',', 'tablefmt', '=', 'tablefmt', ',', 'sep', '=', 'sep', ',', 'floatfmt', '=', 'floatfmt', ',', 'file', '=', 'out', ')'] | \
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple) | ['\\', 'Usage', ':', 'tabulate', '[', 'options', ']', '[', 'FILE', '...', ']'] | train | https://github.com/roaet/eh/blob/9370864a9f1d65bb0f822d0aea83f1169c98f3bd/eh/mdv/tabulate.py#L1056-L1124 |
1,920 | blockstack/virtualchain | virtualchain/lib/indexer.py | get_index_range | def get_index_range(blockchain_name, blockchain_client, impl, working_dir):
"""
Get the range of block numbers that we need to fetch from the blockchain.
Requires virtualchain to have been configured with setup_virtualchain() if impl=None
Return None, None if we fail to connect to the blockchain
"""
start_block = config.get_first_block_id(impl)
try:
current_block = get_blockchain_height(blockchain_name, blockchain_client)
except Exception, e:
log.exception(e)
return None, None
saved_block = StateEngine.get_lastblock(impl, working_dir)
if saved_block is None:
saved_block = 0
elif saved_block == current_block:
start_block = saved_block
elif saved_block < current_block:
start_block = saved_block + 1
return start_block, current_block | python | def get_index_range(blockchain_name, blockchain_client, impl, working_dir):
"""
Get the range of block numbers that we need to fetch from the blockchain.
Requires virtualchain to have been configured with setup_virtualchain() if impl=None
Return None, None if we fail to connect to the blockchain
"""
start_block = config.get_first_block_id(impl)
try:
current_block = get_blockchain_height(blockchain_name, blockchain_client)
except Exception, e:
log.exception(e)
return None, None
saved_block = StateEngine.get_lastblock(impl, working_dir)
if saved_block is None:
saved_block = 0
elif saved_block == current_block:
start_block = saved_block
elif saved_block < current_block:
start_block = saved_block + 1
return start_block, current_block | ['def', 'get_index_range', '(', 'blockchain_name', ',', 'blockchain_client', ',', 'impl', ',', 'working_dir', ')', ':', 'start_block', '=', 'config', '.', 'get_first_block_id', '(', 'impl', ')', 'try', ':', 'current_block', '=', 'get_blockchain_height', '(', 'blockchain_name', ',', 'blockchain_client', ')', 'except', 'Exception', ',', 'e', ':', 'log', '.', 'exception', '(', 'e', ')', 'return', 'None', ',', 'None', 'saved_block', '=', 'StateEngine', '.', 'get_lastblock', '(', 'impl', ',', 'working_dir', ')', 'if', 'saved_block', 'is', 'None', ':', 'saved_block', '=', '0', 'elif', 'saved_block', '==', 'current_block', ':', 'start_block', '=', 'saved_block', 'elif', 'saved_block', '<', 'current_block', ':', 'start_block', '=', 'saved_block', '+', '1', 'return', 'start_block', ',', 'current_block'] | Get the range of block numbers that we need to fetch from the blockchain.
Requires virtualchain to have been configured with setup_virtualchain() if impl=None
Return None, None if we fail to connect to the blockchain | ['Get', 'the', 'range', 'of', 'block', 'numbers', 'that', 'we', 'need', 'to', 'fetch', 'from', 'the', 'blockchain', '.', 'Requires', 'virtualchain', 'to', 'have', 'been', 'configured', 'with', 'setup_virtualchain', '()', 'if', 'impl', '=', 'None', 'Return', 'None', 'None', 'if', 'we', 'fail', 'to', 'connect', 'to', 'the', 'blockchain'] | train | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/indexer.py#L1617-L1641 |
1,921 | google/prettytensor | prettytensor/recurrent_networks.py | RecurrentRunner.run | def run(self, fetch_list, feed_dict=None, sess=None):
"""Runs the graph with the provided feeds and fetches.
This function wraps sess.Run(), but takes care of state saving and
restoring by feeding in states and storing the new state values.
Args:
fetch_list: A list of requested output tensors.
feed_dict: A dictionary of feeds - see Session.Run(). Optional.
sess: The Tensorflow session to run. Can be None.
Returns:
The requested tensors as numpy arrays.
Raises:
ValueError: If the default graph during object construction was
different from the current default graph.
"""
if tf.get_default_graph() != self._graph:
raise ValueError('The current default graph is different from the graph'
' used at construction time of RecurrentRunner.')
if feed_dict is None:
all_feeds_dict = {}
else:
all_feeds_dict = dict(feed_dict)
all_feeds_dict.update(self._state_feeds)
all_fetches_list = list(fetch_list)
all_fetches_list += self._state_fetches
sess = sess or tf.get_default_session()
# Run the compute graph.
fetches = sess.run(all_fetches_list, all_feeds_dict)
# Update the feeds for the next time step.
states = fetches[len(fetch_list):]
for i, s in enumerate(states):
self._state_feeds[self._state_feed_names[i]] = s
return fetches[:len(fetch_list)] | python | def run(self, fetch_list, feed_dict=None, sess=None):
"""Runs the graph with the provided feeds and fetches.
This function wraps sess.Run(), but takes care of state saving and
restoring by feeding in states and storing the new state values.
Args:
fetch_list: A list of requested output tensors.
feed_dict: A dictionary of feeds - see Session.Run(). Optional.
sess: The Tensorflow session to run. Can be None.
Returns:
The requested tensors as numpy arrays.
Raises:
ValueError: If the default graph during object construction was
different from the current default graph.
"""
if tf.get_default_graph() != self._graph:
raise ValueError('The current default graph is different from the graph'
' used at construction time of RecurrentRunner.')
if feed_dict is None:
all_feeds_dict = {}
else:
all_feeds_dict = dict(feed_dict)
all_feeds_dict.update(self._state_feeds)
all_fetches_list = list(fetch_list)
all_fetches_list += self._state_fetches
sess = sess or tf.get_default_session()
# Run the compute graph.
fetches = sess.run(all_fetches_list, all_feeds_dict)
# Update the feeds for the next time step.
states = fetches[len(fetch_list):]
for i, s in enumerate(states):
self._state_feeds[self._state_feed_names[i]] = s
return fetches[:len(fetch_list)] | ['def', 'run', '(', 'self', ',', 'fetch_list', ',', 'feed_dict', '=', 'None', ',', 'sess', '=', 'None', ')', ':', 'if', 'tf', '.', 'get_default_graph', '(', ')', '!=', 'self', '.', '_graph', ':', 'raise', 'ValueError', '(', "'The current default graph is different from the graph'", "' used at construction time of RecurrentRunner.'", ')', 'if', 'feed_dict', 'is', 'None', ':', 'all_feeds_dict', '=', '{', '}', 'else', ':', 'all_feeds_dict', '=', 'dict', '(', 'feed_dict', ')', 'all_feeds_dict', '.', 'update', '(', 'self', '.', '_state_feeds', ')', 'all_fetches_list', '=', 'list', '(', 'fetch_list', ')', 'all_fetches_list', '+=', 'self', '.', '_state_fetches', 'sess', '=', 'sess', 'or', 'tf', '.', 'get_default_session', '(', ')', '# Run the compute graph.', 'fetches', '=', 'sess', '.', 'run', '(', 'all_fetches_list', ',', 'all_feeds_dict', ')', '# Update the feeds for the next time step.', 'states', '=', 'fetches', '[', 'len', '(', 'fetch_list', ')', ':', ']', 'for', 'i', ',', 's', 'in', 'enumerate', '(', 'states', ')', ':', 'self', '.', '_state_feeds', '[', 'self', '.', '_state_feed_names', '[', 'i', ']', ']', '=', 's', 'return', 'fetches', '[', ':', 'len', '(', 'fetch_list', ')', ']'] | Runs the graph with the provided feeds and fetches.
This function wraps sess.Run(), but takes care of state saving and
restoring by feeding in states and storing the new state values.
Args:
fetch_list: A list of requested output tensors.
feed_dict: A dictionary of feeds - see Session.Run(). Optional.
sess: The Tensorflow session to run. Can be None.
Returns:
The requested tensors as numpy arrays.
Raises:
ValueError: If the default graph during object construction was
different from the current default graph. | ['Runs', 'the', 'graph', 'with', 'the', 'provided', 'feeds', 'and', 'fetches', '.'] | train | https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/recurrent_networks.py#L599-L634 |
1,922 | tensorflow/tensor2tensor | tensor2tensor/models/revnet.py | revnet_cifar_base | def revnet_cifar_base():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_base()
hparams.num_channels_init_block = 32
hparams.first_batch_norm = [False, True, True]
hparams.init_stride = 1
hparams.init_kernel_size = 3
hparams.init_maxpool = False
hparams.strides = [1, 2, 2]
hparams.batch_size = 128
hparams.weight_decay = 1e-4
hparams.learning_rate = 0.1
hparams.learning_rate_cosine_cycle_steps = 5000
return hparams | python | def revnet_cifar_base():
"""Tiny hparams suitable for CIFAR/etc."""
hparams = revnet_base()
hparams.num_channels_init_block = 32
hparams.first_batch_norm = [False, True, True]
hparams.init_stride = 1
hparams.init_kernel_size = 3
hparams.init_maxpool = False
hparams.strides = [1, 2, 2]
hparams.batch_size = 128
hparams.weight_decay = 1e-4
hparams.learning_rate = 0.1
hparams.learning_rate_cosine_cycle_steps = 5000
return hparams | ['def', 'revnet_cifar_base', '(', ')', ':', 'hparams', '=', 'revnet_base', '(', ')', 'hparams', '.', 'num_channels_init_block', '=', '32', 'hparams', '.', 'first_batch_norm', '=', '[', 'False', ',', 'True', ',', 'True', ']', 'hparams', '.', 'init_stride', '=', '1', 'hparams', '.', 'init_kernel_size', '=', '3', 'hparams', '.', 'init_maxpool', '=', 'False', 'hparams', '.', 'strides', '=', '[', '1', ',', '2', ',', '2', ']', 'hparams', '.', 'batch_size', '=', '128', 'hparams', '.', 'weight_decay', '=', '1e-4', 'hparams', '.', 'learning_rate', '=', '0.1', 'hparams', '.', 'learning_rate_cosine_cycle_steps', '=', '5000', 'return', 'hparams'] | Tiny hparams suitable for CIFAR/etc. | ['Tiny', 'hparams', 'suitable', 'for', 'CIFAR', '/', 'etc', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L386-L400 |
1,923 | openid/python-openid | openid/oidutil.py | importElementTree | def importElementTree(module_names=None):
"""Find a working ElementTree implementation, trying the standard
places that such a thing might show up.
>>> ElementTree = importElementTree()
@param module_names: The names of modules to try to use as
ElementTree. Defaults to C{L{elementtree_modules}}
@returns: An ElementTree module
"""
if module_names is None:
module_names = elementtree_modules
for mod_name in module_names:
try:
ElementTree = __import__(mod_name, None, None, ['unused'])
except ImportError:
pass
else:
# Make sure it can actually parse XML
try:
ElementTree.XML('<unused/>')
except (SystemExit, MemoryError, AssertionError):
raise
except:
logging.exception('Not using ElementTree library %r because it failed to '
'parse a trivial document: %s' % mod_name)
else:
return ElementTree
else:
raise ImportError('No ElementTree library found. '
'You may need to install one. '
'Tried importing %r' % (module_names,)
) | python | def importElementTree(module_names=None):
"""Find a working ElementTree implementation, trying the standard
places that such a thing might show up.
>>> ElementTree = importElementTree()
@param module_names: The names of modules to try to use as
ElementTree. Defaults to C{L{elementtree_modules}}
@returns: An ElementTree module
"""
if module_names is None:
module_names = elementtree_modules
for mod_name in module_names:
try:
ElementTree = __import__(mod_name, None, None, ['unused'])
except ImportError:
pass
else:
# Make sure it can actually parse XML
try:
ElementTree.XML('<unused/>')
except (SystemExit, MemoryError, AssertionError):
raise
except:
logging.exception('Not using ElementTree library %r because it failed to '
'parse a trivial document: %s' % mod_name)
else:
return ElementTree
else:
raise ImportError('No ElementTree library found. '
'You may need to install one. '
'Tried importing %r' % (module_names,)
) | ['def', 'importElementTree', '(', 'module_names', '=', 'None', ')', ':', 'if', 'module_names', 'is', 'None', ':', 'module_names', '=', 'elementtree_modules', 'for', 'mod_name', 'in', 'module_names', ':', 'try', ':', 'ElementTree', '=', '__import__', '(', 'mod_name', ',', 'None', ',', 'None', ',', '[', "'unused'", ']', ')', 'except', 'ImportError', ':', 'pass', 'else', ':', '# Make sure it can actually parse XML', 'try', ':', 'ElementTree', '.', 'XML', '(', "'<unused/>'", ')', 'except', '(', 'SystemExit', ',', 'MemoryError', ',', 'AssertionError', ')', ':', 'raise', 'except', ':', 'logging', '.', 'exception', '(', "'Not using ElementTree library %r because it failed to '", "'parse a trivial document: %s'", '%', 'mod_name', ')', 'else', ':', 'return', 'ElementTree', 'else', ':', 'raise', 'ImportError', '(', "'No ElementTree library found. '", "'You may need to install one. '", "'Tried importing %r'", '%', '(', 'module_names', ',', ')', ')'] | Find a working ElementTree implementation, trying the standard
places that such a thing might show up.
>>> ElementTree = importElementTree()
@param module_names: The names of modules to try to use as
ElementTree. Defaults to C{L{elementtree_modules}}
@returns: An ElementTree module | ['Find', 'a', 'working', 'ElementTree', 'implementation', 'trying', 'the', 'standard', 'places', 'that', 'such', 'a', 'thing', 'might', 'show', 'up', '.'] | train | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/oidutil.py#L55-L89 |
1,924 | antiboredom/videogrep | videogrep/tools/getyoutubecc.py | getyoutubecc._parseXml | def _parseXml(self,cc):
""" INPUT: XML file with captions
OUTPUT: parsed object like:
[{'texlines': [u"So, I'm going to rewrite this", 'in a more concise form as'],
'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]
"""
htmlpar = HTMLParser.HTMLParser()
cc = cc.split("</text>") # ['<text start="2997.929">So, it will\nhas time', '<text start="3000.929">blah', ..]
captions = []
for line in cc:
if re.search('text', line):
time = re.search(r'start="(\d+)(?:\.(\d+)){0,1}', line).groups() # ('2997','929')
time = ( int(time[0]), int(0 if not time[1] else time[1]) )
#convert seconds and millisec to int
text = re.search(r'">(.*)', line, re.DOTALL).group(1) # extract text i.e. 'So, it will\nhas time'
textlines = [ htmlpar.unescape(htmlpar.unescape( unicode(lineunparsed,"utf-8") )) for lineunparsed in text.split('\n') ]
#unscape chars like & or '
ntime = {'hours':time[0]/3600,"min":time[0]%3600/60,"sec":time[0]%3600%60,"msec":time[1]}
captions.append({'time':ntime,'textlines':textlines})
return captions | python | def _parseXml(self,cc):
""" INPUT: XML file with captions
OUTPUT: parsed object like:
[{'texlines': [u"So, I'm going to rewrite this", 'in a more concise form as'],
'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]
"""
htmlpar = HTMLParser.HTMLParser()
cc = cc.split("</text>") # ['<text start="2997.929">So, it will\nhas time', '<text start="3000.929">blah', ..]
captions = []
for line in cc:
if re.search('text', line):
time = re.search(r'start="(\d+)(?:\.(\d+)){0,1}', line).groups() # ('2997','929')
time = ( int(time[0]), int(0 if not time[1] else time[1]) )
#convert seconds and millisec to int
text = re.search(r'">(.*)', line, re.DOTALL).group(1) # extract text i.e. 'So, it will\nhas time'
textlines = [ htmlpar.unescape(htmlpar.unescape( unicode(lineunparsed,"utf-8") )) for lineunparsed in text.split('\n') ]
#unscape chars like & or '
ntime = {'hours':time[0]/3600,"min":time[0]%3600/60,"sec":time[0]%3600%60,"msec":time[1]}
captions.append({'time':ntime,'textlines':textlines})
return captions | ['def', '_parseXml', '(', 'self', ',', 'cc', ')', ':', 'htmlpar', '=', 'HTMLParser', '.', 'HTMLParser', '(', ')', 'cc', '=', 'cc', '.', 'split', '(', '"</text>"', ')', '# [\'<text start="2997.929">So, it will\\nhas time\', \'<text start="3000.929">blah\', ..]', 'captions', '=', '[', ']', 'for', 'line', 'in', 'cc', ':', 'if', 're', '.', 'search', '(', "'text'", ',', 'line', ')', ':', 'time', '=', 're', '.', 'search', '(', 'r\'start="(\\d+)(?:\\.(\\d+)){0,1}\'', ',', 'line', ')', '.', 'groups', '(', ')', "# ('2997','929')", 'time', '=', '(', 'int', '(', 'time', '[', '0', ']', ')', ',', 'int', '(', '0', 'if', 'not', 'time', '[', '1', ']', 'else', 'time', '[', '1', ']', ')', ')', '#convert seconds and millisec to int', 'text', '=', 're', '.', 'search', '(', 'r\'">(.*)\'', ',', 'line', ',', 're', '.', 'DOTALL', ')', '.', 'group', '(', '1', ')', "# extract text i.e. 'So, it will\\nhas time'", 'textlines', '=', '[', 'htmlpar', '.', 'unescape', '(', 'htmlpar', '.', 'unescape', '(', 'unicode', '(', 'lineunparsed', ',', '"utf-8"', ')', ')', ')', 'for', 'lineunparsed', 'in', 'text', '.', 'split', '(', "'\\n'", ')', ']', '#unscape chars like & or '', 'ntime', '=', '{', "'hours'", ':', 'time', '[', '0', ']', '/', '3600', ',', '"min"', ':', 'time', '[', '0', ']', '%', '3600', '/', '60', ',', '"sec"', ':', 'time', '[', '0', ']', '%', '3600', '%', '60', ',', '"msec"', ':', 'time', '[', '1', ']', '}', 'captions', '.', 'append', '(', '{', "'time'", ':', 'ntime', ',', "'textlines'", ':', 'textlines', '}', ')', 'return', 'captions'] | INPUT: XML file with captions
OUTPUT: parsed object like:
[{'texlines': [u"So, I'm going to rewrite this", 'in a more concise form as'],
'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }] | ['INPUT', ':', 'XML', 'file', 'with', 'captions', 'OUTPUT', ':', 'parsed', 'object', 'like', ':', '[', '{', 'texlines', ':', '[', 'u', 'So', 'I', 'm', 'going', 'to', 'rewrite', 'this', 'in', 'a', 'more', 'concise', 'form', 'as', ']', 'time', ':', '{', 'hours', ':', '1', 'min', ':', '2', 'sec', ':', '44', 'msec', ':', '232', '}', '}', ']'] | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/tools/getyoutubecc.py#L57-L76 |
1,925 | pescadores/pescador | pescador/maps.py | keras_tuples | def keras_tuples(stream, inputs=None, outputs=None):
"""Reformat data objects as keras-compatible tuples.
For more detail: https://keras.io/models/model/#fit
Parameters
----------
stream : iterable
Stream of data objects.
inputs : string or iterable of strings, None
Keys to use for ordered input data.
If not specified, returns `None` in its place.
outputs : string or iterable of strings, default=None
Keys to use for ordered output data.
If not specified, returns `None` in its place.
Yields
------
x : np.ndarray, list of np.ndarray, or None
If `inputs` is a string, `x` is a single np.ndarray.
If `inputs` is an iterable of strings, `x` is a list of np.ndarrays.
If `inputs` is a null type, `x` is None.
y : np.ndarray, list of np.ndarray, or None
If `outputs` is a string, `y` is a single np.ndarray.
If `outputs` is an iterable of strings, `y` is a list of np.ndarrays.
If `outputs` is a null type, `y` is None.
Raises
------
DataError
If the stream contains items that are not data-like.
"""
flatten_inputs, flatten_outputs = False, False
if inputs and isinstance(inputs, six.string_types):
inputs = [inputs]
flatten_inputs = True
if outputs and isinstance(outputs, six.string_types):
outputs = [outputs]
flatten_outputs = True
inputs, outputs = (inputs or []), (outputs or [])
if not inputs + outputs:
raise PescadorError('At least one key must be given for '
'`inputs` or `outputs`')
for data in stream:
try:
x = list(data[key] for key in inputs) or None
if len(inputs) == 1 and flatten_inputs:
x = x[0]
y = list(data[key] for key in outputs) or None
if len(outputs) == 1 and flatten_outputs:
y = y[0]
yield (x, y)
except TypeError:
raise DataError("Malformed data stream: {}".format(data)) | python | def keras_tuples(stream, inputs=None, outputs=None):
"""Reformat data objects as keras-compatible tuples.
For more detail: https://keras.io/models/model/#fit
Parameters
----------
stream : iterable
Stream of data objects.
inputs : string or iterable of strings, None
Keys to use for ordered input data.
If not specified, returns `None` in its place.
outputs : string or iterable of strings, default=None
Keys to use for ordered output data.
If not specified, returns `None` in its place.
Yields
------
x : np.ndarray, list of np.ndarray, or None
If `inputs` is a string, `x` is a single np.ndarray.
If `inputs` is an iterable of strings, `x` is a list of np.ndarrays.
If `inputs` is a null type, `x` is None.
y : np.ndarray, list of np.ndarray, or None
If `outputs` is a string, `y` is a single np.ndarray.
If `outputs` is an iterable of strings, `y` is a list of np.ndarrays.
If `outputs` is a null type, `y` is None.
Raises
------
DataError
If the stream contains items that are not data-like.
"""
flatten_inputs, flatten_outputs = False, False
if inputs and isinstance(inputs, six.string_types):
inputs = [inputs]
flatten_inputs = True
if outputs and isinstance(outputs, six.string_types):
outputs = [outputs]
flatten_outputs = True
inputs, outputs = (inputs or []), (outputs or [])
if not inputs + outputs:
raise PescadorError('At least one key must be given for '
'`inputs` or `outputs`')
for data in stream:
try:
x = list(data[key] for key in inputs) or None
if len(inputs) == 1 and flatten_inputs:
x = x[0]
y = list(data[key] for key in outputs) or None
if len(outputs) == 1 and flatten_outputs:
y = y[0]
yield (x, y)
except TypeError:
raise DataError("Malformed data stream: {}".format(data)) | ['def', 'keras_tuples', '(', 'stream', ',', 'inputs', '=', 'None', ',', 'outputs', '=', 'None', ')', ':', 'flatten_inputs', ',', 'flatten_outputs', '=', 'False', ',', 'False', 'if', 'inputs', 'and', 'isinstance', '(', 'inputs', ',', 'six', '.', 'string_types', ')', ':', 'inputs', '=', '[', 'inputs', ']', 'flatten_inputs', '=', 'True', 'if', 'outputs', 'and', 'isinstance', '(', 'outputs', ',', 'six', '.', 'string_types', ')', ':', 'outputs', '=', '[', 'outputs', ']', 'flatten_outputs', '=', 'True', 'inputs', ',', 'outputs', '=', '(', 'inputs', 'or', '[', ']', ')', ',', '(', 'outputs', 'or', '[', ']', ')', 'if', 'not', 'inputs', '+', 'outputs', ':', 'raise', 'PescadorError', '(', "'At least one key must be given for '", "'`inputs` or `outputs`'", ')', 'for', 'data', 'in', 'stream', ':', 'try', ':', 'x', '=', 'list', '(', 'data', '[', 'key', ']', 'for', 'key', 'in', 'inputs', ')', 'or', 'None', 'if', 'len', '(', 'inputs', ')', '==', '1', 'and', 'flatten_inputs', ':', 'x', '=', 'x', '[', '0', ']', 'y', '=', 'list', '(', 'data', '[', 'key', ']', 'for', 'key', 'in', 'outputs', ')', 'or', 'None', 'if', 'len', '(', 'outputs', ')', '==', '1', 'and', 'flatten_outputs', ':', 'y', '=', 'y', '[', '0', ']', 'yield', '(', 'x', ',', 'y', ')', 'except', 'TypeError', ':', 'raise', 'DataError', '(', '"Malformed data stream: {}"', '.', 'format', '(', 'data', ')', ')'] | Reformat data objects as keras-compatible tuples.
For more detail: https://keras.io/models/model/#fit
Parameters
----------
stream : iterable
Stream of data objects.
inputs : string or iterable of strings, None
Keys to use for ordered input data.
If not specified, returns `None` in its place.
outputs : string or iterable of strings, default=None
Keys to use for ordered output data.
If not specified, returns `None` in its place.
Yields
------
x : np.ndarray, list of np.ndarray, or None
If `inputs` is a string, `x` is a single np.ndarray.
If `inputs` is an iterable of strings, `x` is a list of np.ndarrays.
If `inputs` is a null type, `x` is None.
y : np.ndarray, list of np.ndarray, or None
If `outputs` is a string, `y` is a single np.ndarray.
If `outputs` is an iterable of strings, `y` is a list of np.ndarrays.
If `outputs` is a null type, `y` is None.
Raises
------
DataError
If the stream contains items that are not data-like. | ['Reformat', 'data', 'objects', 'as', 'keras', '-', 'compatible', 'tuples', '.'] | train | https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/pescador/maps.py#L120-L179 |
1,926 | hovren/crisp | crisp/calibration.py | AutoCalibrator.calibrate | def calibrate(self, max_tracks=MAX_OPTIMIZATION_TRACKS, max_eval=MAX_OPTIMIZATION_FEV, norm_c=DEFAULT_NORM_C):
"""Perform calibration
Parameters
----------------------
max_eval : int
Maximum number of function evaluations
Returns
---------------------
dict
Optimization result
Raises
-----------------------
CalibrationError
If calibration fails
"""
x0 = np.array([self.parameter[param] for param in PARAM_ORDER])
available_tracks = np.sum([len(s.inliers) for s in self.slices])
if available_tracks < max_tracks:
warnings.warn("Could not use the requested {} tracks, since only {} were available in the slice data.".format(max_tracks, available_tracks))
max_tracks = available_tracks
# Get subset of available tracks such that all slices are still used
slice_sample_idxs = videoslice.fill_sampling(self.slices, max_tracks)
func_args = (self.slices, slice_sample_idxs, self.video.camera_model, self.gyro, norm_c)
self.slice_sample_idxs = slice_sample_idxs
logger.debug("Starting optimization on {:d} slices and {:d} tracks".format(len(self.slices), max_tracks))
start_time = time.time()
# TODO: Check what values of ftol and xtol are required for good results. The current setting is probably pessimistic.
leastsq_result = scipy.optimize.leastsq(optimization_func, x0, args=func_args, full_output=True, ftol=1e-10, xtol=1e-10, maxfev=max_eval)
elapsed = time.time() - start_time
x, covx, infodict, mesg, ier = leastsq_result
self.__debug_leastsq = leastsq_result
logger.debug("Optimization completed in {:.1f} seconds and {:d} function evaluations. ier={}, mesg='{}'".format(elapsed, infodict['nfev'], ier, mesg))
if ier in (1,2,3,4):
for pname, val in zip(PARAM_ORDER, x):
self.params['calibrated'][pname] = val
return self.parameter
else:
raise CalibrationError(mesg) | python | def calibrate(self, max_tracks=MAX_OPTIMIZATION_TRACKS, max_eval=MAX_OPTIMIZATION_FEV, norm_c=DEFAULT_NORM_C):
"""Perform calibration
Parameters
----------------------
max_eval : int
Maximum number of function evaluations
Returns
---------------------
dict
Optimization result
Raises
-----------------------
CalibrationError
If calibration fails
"""
x0 = np.array([self.parameter[param] for param in PARAM_ORDER])
available_tracks = np.sum([len(s.inliers) for s in self.slices])
if available_tracks < max_tracks:
warnings.warn("Could not use the requested {} tracks, since only {} were available in the slice data.".format(max_tracks, available_tracks))
max_tracks = available_tracks
# Get subset of available tracks such that all slices are still used
slice_sample_idxs = videoslice.fill_sampling(self.slices, max_tracks)
func_args = (self.slices, slice_sample_idxs, self.video.camera_model, self.gyro, norm_c)
self.slice_sample_idxs = slice_sample_idxs
logger.debug("Starting optimization on {:d} slices and {:d} tracks".format(len(self.slices), max_tracks))
start_time = time.time()
# TODO: Check what values of ftol and xtol are required for good results. The current setting is probably pessimistic.
leastsq_result = scipy.optimize.leastsq(optimization_func, x0, args=func_args, full_output=True, ftol=1e-10, xtol=1e-10, maxfev=max_eval)
elapsed = time.time() - start_time
x, covx, infodict, mesg, ier = leastsq_result
self.__debug_leastsq = leastsq_result
logger.debug("Optimization completed in {:.1f} seconds and {:d} function evaluations. ier={}, mesg='{}'".format(elapsed, infodict['nfev'], ier, mesg))
if ier in (1,2,3,4):
for pname, val in zip(PARAM_ORDER, x):
self.params['calibrated'][pname] = val
return self.parameter
else:
raise CalibrationError(mesg) | ['def', 'calibrate', '(', 'self', ',', 'max_tracks', '=', 'MAX_OPTIMIZATION_TRACKS', ',', 'max_eval', '=', 'MAX_OPTIMIZATION_FEV', ',', 'norm_c', '=', 'DEFAULT_NORM_C', ')', ':', 'x0', '=', 'np', '.', 'array', '(', '[', 'self', '.', 'parameter', '[', 'param', ']', 'for', 'param', 'in', 'PARAM_ORDER', ']', ')', 'available_tracks', '=', 'np', '.', 'sum', '(', '[', 'len', '(', 's', '.', 'inliers', ')', 'for', 's', 'in', 'self', '.', 'slices', ']', ')', 'if', 'available_tracks', '<', 'max_tracks', ':', 'warnings', '.', 'warn', '(', '"Could not use the requested {} tracks, since only {} were available in the slice data."', '.', 'format', '(', 'max_tracks', ',', 'available_tracks', ')', ')', 'max_tracks', '=', 'available_tracks', '# Get subset of available tracks such that all slices are still used', 'slice_sample_idxs', '=', 'videoslice', '.', 'fill_sampling', '(', 'self', '.', 'slices', ',', 'max_tracks', ')', 'func_args', '=', '(', 'self', '.', 'slices', ',', 'slice_sample_idxs', ',', 'self', '.', 'video', '.', 'camera_model', ',', 'self', '.', 'gyro', ',', 'norm_c', ')', 'self', '.', 'slice_sample_idxs', '=', 'slice_sample_idxs', 'logger', '.', 'debug', '(', '"Starting optimization on {:d} slices and {:d} tracks"', '.', 'format', '(', 'len', '(', 'self', '.', 'slices', ')', ',', 'max_tracks', ')', ')', 'start_time', '=', 'time', '.', 'time', '(', ')', '# TODO: Check what values of ftol and xtol are required for good results. The current setting is probably pessimistic.', 'leastsq_result', '=', 'scipy', '.', 'optimize', '.', 'leastsq', '(', 'optimization_func', ',', 'x0', ',', 'args', '=', 'func_args', ',', 'full_output', '=', 'True', ',', 'ftol', '=', '1e-10', ',', 'xtol', '=', '1e-10', ',', 'maxfev', '=', 'max_eval', ')', 'elapsed', '=', 'time', '.', 'time', '(', ')', '-', 'start_time', 'x', ',', 'covx', ',', 'infodict', ',', 'mesg', ',', 'ier', '=', 'leastsq_result', 'self', '.', '__debug_leastsq', '=', 'leastsq_result', 'logger', '.', 'debug', '(', '"Optimization completed in {:.1f} seconds and {:d} function evaluations. ier={}, mesg=\'{}\'"', '.', 'format', '(', 'elapsed', ',', 'infodict', '[', "'nfev'", ']', ',', 'ier', ',', 'mesg', ')', ')', 'if', 'ier', 'in', '(', '1', ',', '2', ',', '3', ',', '4', ')', ':', 'for', 'pname', ',', 'val', 'in', 'zip', '(', 'PARAM_ORDER', ',', 'x', ')', ':', 'self', '.', 'params', '[', "'calibrated'", ']', '[', 'pname', ']', '=', 'val', 'return', 'self', '.', 'parameter', 'else', ':', 'raise', 'CalibrationError', '(', 'mesg', ')'] | Perform calibration
Parameters
----------------------
max_eval : int
Maximum number of function evaluations
Returns
---------------------
dict
Optimization result
Raises
-----------------------
CalibrationError
If calibration fails | ['Perform', 'calibration'] | train | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/calibration.py#L167-L209 |
1,927 | saltstack/salt | salt/modules/smf_service.py | start | def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/usr/sbin/svcadm enable -s -t {0}'.format(name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = '/usr/sbin/svcadm clear {0}'.format(name)
__salt__['cmd.retcode'](clear_cmd, python_shell=False)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
return False | python | def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/usr/sbin/svcadm enable -s -t {0}'.format(name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = '/usr/sbin/svcadm clear {0}'.format(name)
__salt__['cmd.retcode'](clear_cmd, python_shell=False)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
return False | ['def', 'start', '(', 'name', ')', ':', 'cmd', '=', "'/usr/sbin/svcadm enable -s -t {0}'", '.', 'format', '(', 'name', ')', 'retcode', '=', '__salt__', '[', "'cmd.retcode'", ']', '(', 'cmd', ',', 'python_shell', '=', 'False', ')', 'if', 'not', 'retcode', ':', 'return', 'True', 'if', 'retcode', '==', '3', ':', '# Return code 3 means there was a problem with the service', "# A common case is being in the 'maintenance' state", '# Attempt a clear and try one more time', 'clear_cmd', '=', "'/usr/sbin/svcadm clear {0}'", '.', 'format', '(', 'name', ')', '__salt__', '[', "'cmd.retcode'", ']', '(', 'clear_cmd', ',', 'python_shell', '=', 'False', ')', 'return', 'not', '__salt__', '[', "'cmd.retcode'", ']', '(', 'cmd', ',', 'python_shell', '=', 'False', ')', 'return', 'False'] | Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name> | ['Start', 'the', 'specified', 'service'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smf_service.py#L155-L176 |
1,928 | bsolomon1124/pyfinance | pyfinance/returns.py | TSeries.drawdown_end | def drawdown_end(self, return_date=False):
"""The date of the drawdown trough.
Date at which the drawdown was most negative.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp
"""
end = self.drawdown_idx().idxmin()
if return_date:
return end.date()
return end | python | def drawdown_end(self, return_date=False):
"""The date of the drawdown trough.
Date at which the drawdown was most negative.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp
"""
end = self.drawdown_idx().idxmin()
if return_date:
return end.date()
return end | ['def', 'drawdown_end', '(', 'self', ',', 'return_date', '=', 'False', ')', ':', 'end', '=', 'self', '.', 'drawdown_idx', '(', ')', '.', 'idxmin', '(', ')', 'if', 'return_date', ':', 'return', 'end', '.', 'date', '(', ')', 'return', 'end'] | The date of the drawdown trough.
Date at which the drawdown was most negative.
Parameters
----------
return_date : bool, default False
If True, return a `datetime.date` object.
If False, return a Pandas Timestamp object.
Returns
-------
datetime.date or pandas._libs.tslib.Timestamp | ['The', 'date', 'of', 'the', 'drawdown', 'trough', '.'] | train | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L396-L415 |
1,929 | jmbhughes/suvi-trainer | suvitrainer/gui.py | App.onclick | def onclick(self, event):
"""
Draw contours on the data for a click in the thematic map
:param event: mouse click on thematic map preview
"""
if event.inaxes == self.previewax:
y, x = int(event.xdata), int(event.ydata)
label = self.selection_array[x, y]
contiguous_regions = scipy.ndimage.label(self.selection_array == label)[0]
this_region = contiguous_regions == (contiguous_regions[x, y])
# remove the boundaries so any region touching the edge isn't drawn odd
this_region[0, :] = 0
this_region[:, 0] = 0
this_region[this_region.shape[0]-1, :] = 0
this_region[:, this_region.shape[1]-1] = 0
# convert the region mask into just a true/false array of its boundary pixels
edges = binary_erosion(this_region) ^ this_region
# convert the boundary pixels into a path, moving around instead of just where
x, y = np.where(edges)
coords = np.dstack([x, y])[0]
path = [coords[0]]
coords = coords[1:]
while len(coords):
dist = np.sum(np.abs(path[-1] - coords), axis=1)
neighbor_index = np.argmin(dist)
if dist[neighbor_index] < 5:
path.append(coords[neighbor_index].copy())
coords[neighbor_index:-1] = coords[neighbor_index + 1:]
coords = coords[:-1]
else:
break
path = np.array(path)
clips = []
while len(coords) > 5:
dist = np.sum(np.abs(path[-1] - coords), axis=1)
neighbor_index = np.argmin(dist)
clip = [coords[neighbor_index].copy()]
coords[neighbor_index:-1] = coords[neighbor_index + 1:]
coords = coords[:-1]
while len(coords):
dist = np.sum(np.abs(clip[-1] - coords), axis=1)
neighbor_index = np.argmin(dist)
if dist[neighbor_index] < 5:
clip.append(coords[neighbor_index].copy())
coords[neighbor_index:-1] = coords[neighbor_index + 1:]
coords = coords[:-1]
else:
break
clips.append(np.array(clip))
# draw the continguous on the selection area
self.region_patches.append(PatchCollection(
[Polygon(np.dstack([path[:, 1], path[:, 0]])[0], False,
fill=False, facecolor=None,
edgecolor="black", alpha=1, lw=2.5)] +
[Polygon(np.dstack([clip[:, 1], clip[:, 0]])[0], False,
fill=False, facecolor=None,
edgecolor="black", alpha=1, lw=2.0) for clip in clips],
match_original=True))
self.imageax.add_collection(self.region_patches[-1])
self.fig.canvas.draw_idle() | python | def onclick(self, event):
"""
Draw contours on the data for a click in the thematic map
:param event: mouse click on thematic map preview
"""
if event.inaxes == self.previewax:
y, x = int(event.xdata), int(event.ydata)
label = self.selection_array[x, y]
contiguous_regions = scipy.ndimage.label(self.selection_array == label)[0]
this_region = contiguous_regions == (contiguous_regions[x, y])
# remove the boundaries so any region touching the edge isn't drawn odd
this_region[0, :] = 0
this_region[:, 0] = 0
this_region[this_region.shape[0]-1, :] = 0
this_region[:, this_region.shape[1]-1] = 0
# convert the region mask into just a true/false array of its boundary pixels
edges = binary_erosion(this_region) ^ this_region
# convert the boundary pixels into a path, moving around instead of just where
x, y = np.where(edges)
coords = np.dstack([x, y])[0]
path = [coords[0]]
coords = coords[1:]
while len(coords):
dist = np.sum(np.abs(path[-1] - coords), axis=1)
neighbor_index = np.argmin(dist)
if dist[neighbor_index] < 5:
path.append(coords[neighbor_index].copy())
coords[neighbor_index:-1] = coords[neighbor_index + 1:]
coords = coords[:-1]
else:
break
path = np.array(path)
clips = []
while len(coords) > 5:
dist = np.sum(np.abs(path[-1] - coords), axis=1)
neighbor_index = np.argmin(dist)
clip = [coords[neighbor_index].copy()]
coords[neighbor_index:-1] = coords[neighbor_index + 1:]
coords = coords[:-1]
while len(coords):
dist = np.sum(np.abs(clip[-1] - coords), axis=1)
neighbor_index = np.argmin(dist)
if dist[neighbor_index] < 5:
clip.append(coords[neighbor_index].copy())
coords[neighbor_index:-1] = coords[neighbor_index + 1:]
coords = coords[:-1]
else:
break
clips.append(np.array(clip))
# draw the continguous on the selection area
self.region_patches.append(PatchCollection(
[Polygon(np.dstack([path[:, 1], path[:, 0]])[0], False,
fill=False, facecolor=None,
edgecolor="black", alpha=1, lw=2.5)] +
[Polygon(np.dstack([clip[:, 1], clip[:, 0]])[0], False,
fill=False, facecolor=None,
edgecolor="black", alpha=1, lw=2.0) for clip in clips],
match_original=True))
self.imageax.add_collection(self.region_patches[-1])
self.fig.canvas.draw_idle() | ['def', 'onclick', '(', 'self', ',', 'event', ')', ':', 'if', 'event', '.', 'inaxes', '==', 'self', '.', 'previewax', ':', 'y', ',', 'x', '=', 'int', '(', 'event', '.', 'xdata', ')', ',', 'int', '(', 'event', '.', 'ydata', ')', 'label', '=', 'self', '.', 'selection_array', '[', 'x', ',', 'y', ']', 'contiguous_regions', '=', 'scipy', '.', 'ndimage', '.', 'label', '(', 'self', '.', 'selection_array', '==', 'label', ')', '[', '0', ']', 'this_region', '=', 'contiguous_regions', '==', '(', 'contiguous_regions', '[', 'x', ',', 'y', ']', ')', "# remove the boundaries so any region touching the edge isn't drawn odd", 'this_region', '[', '0', ',', ':', ']', '=', '0', 'this_region', '[', ':', ',', '0', ']', '=', '0', 'this_region', '[', 'this_region', '.', 'shape', '[', '0', ']', '-', '1', ',', ':', ']', '=', '0', 'this_region', '[', ':', ',', 'this_region', '.', 'shape', '[', '1', ']', '-', '1', ']', '=', '0', '# convert the region mask into just a true/false array of its boundary pixels', 'edges', '=', 'binary_erosion', '(', 'this_region', ')', '^', 'this_region', '# convert the boundary pixels into a path, moving around instead of just where', 'x', ',', 'y', '=', 'np', '.', 'where', '(', 'edges', ')', 'coords', '=', 'np', '.', 'dstack', '(', '[', 'x', ',', 'y', ']', ')', '[', '0', ']', 'path', '=', '[', 'coords', '[', '0', ']', ']', 'coords', '=', 'coords', '[', '1', ':', ']', 'while', 'len', '(', 'coords', ')', ':', 'dist', '=', 'np', '.', 'sum', '(', 'np', '.', 'abs', '(', 'path', '[', '-', '1', ']', '-', 'coords', ')', ',', 'axis', '=', '1', ')', 'neighbor_index', '=', 'np', '.', 'argmin', '(', 'dist', ')', 'if', 'dist', '[', 'neighbor_index', ']', '<', '5', ':', 'path', '.', 'append', '(', 'coords', '[', 'neighbor_index', ']', '.', 'copy', '(', ')', ')', 'coords', '[', 'neighbor_index', ':', '-', '1', ']', '=', 'coords', '[', 'neighbor_index', '+', '1', ':', ']', 'coords', '=', 'coords', '[', ':', '-', '1', ']', 'else', ':', 'break', 'path', '=', 'np', '.', 'array', '(', 'path', ')', 'clips', '=', '[', ']', 'while', 'len', '(', 'coords', ')', '>', '5', ':', 'dist', '=', 'np', '.', 'sum', '(', 'np', '.', 'abs', '(', 'path', '[', '-', '1', ']', '-', 'coords', ')', ',', 'axis', '=', '1', ')', 'neighbor_index', '=', 'np', '.', 'argmin', '(', 'dist', ')', 'clip', '=', '[', 'coords', '[', 'neighbor_index', ']', '.', 'copy', '(', ')', ']', 'coords', '[', 'neighbor_index', ':', '-', '1', ']', '=', 'coords', '[', 'neighbor_index', '+', '1', ':', ']', 'coords', '=', 'coords', '[', ':', '-', '1', ']', 'while', 'len', '(', 'coords', ')', ':', 'dist', '=', 'np', '.', 'sum', '(', 'np', '.', 'abs', '(', 'clip', '[', '-', '1', ']', '-', 'coords', ')', ',', 'axis', '=', '1', ')', 'neighbor_index', '=', 'np', '.', 'argmin', '(', 'dist', ')', 'if', 'dist', '[', 'neighbor_index', ']', '<', '5', ':', 'clip', '.', 'append', '(', 'coords', '[', 'neighbor_index', ']', '.', 'copy', '(', ')', ')', 'coords', '[', 'neighbor_index', ':', '-', '1', ']', '=', 'coords', '[', 'neighbor_index', '+', '1', ':', ']', 'coords', '=', 'coords', '[', ':', '-', '1', ']', 'else', ':', 'break', 'clips', '.', 'append', '(', 'np', '.', 'array', '(', 'clip', ')', ')', '# draw the continguous on the selection area', 'self', '.', 'region_patches', '.', 'append', '(', 'PatchCollection', '(', '[', 'Polygon', '(', 'np', '.', 'dstack', '(', '[', 'path', '[', ':', ',', '1', ']', ',', 'path', '[', ':', ',', '0', ']', ']', ')', '[', '0', ']', ',', 'False', ',', 'fill', '=', 'False', ',', 'facecolor', '=', 'None', ',', 'edgecolor', '=', '"black"', ',', 'alpha', '=', '1', ',', 'lw', '=', '2.5', ')', ']', '+', '[', 'Polygon', '(', 'np', '.', 'dstack', '(', '[', 'clip', '[', ':', ',', '1', ']', ',', 'clip', '[', ':', ',', '0', ']', ']', ')', '[', '0', ']', ',', 'False', ',', 'fill', '=', 'False', ',', 'facecolor', '=', 'None', ',', 'edgecolor', '=', '"black"', ',', 'alpha', '=', '1', ',', 'lw', '=', '2.0', ')', 'for', 'clip', 'in', 'clips', ']', ',', 'match_original', '=', 'True', ')', ')', 'self', '.', 'imageax', '.', 'add_collection', '(', 'self', '.', 'region_patches', '[', '-', '1', ']', ')', 'self', '.', 'fig', '.', 'canvas', '.', 'draw_idle', '(', ')'] | Draw contours on the data for a click in the thematic map
:param event: mouse click on thematic map preview | ['Draw', 'contours', 'on', 'the', 'data', 'for', 'a', 'click', 'in', 'the', 'thematic', 'map', ':', 'param', 'event', ':', 'mouse', 'click', 'on', 'thematic', 'map', 'preview'] | train | https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/gui.py#L378-L445 |
1,930 | delph-in/pydelphin | delphin/interfaces/ace.py | parse_from_iterable | def parse_from_iterable(grm, data, **kwargs):
"""
Parse each sentence in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): the sentences to parse
**kwargs: additional keyword arguments to pass to the AceParser
Yields:
:class:`~delphin.interfaces.ParseResponse`
Example:
>>> sentences = ['Dogs bark.', 'It rained']
>>> responses = list(ace.parse_from_iterable('erg.dat', sentences))
NOTE: parsed 2 / 2 sentences, avg 723k, time 0.01026s
"""
with AceParser(grm, **kwargs) as parser:
for datum in data:
yield parser.interact(datum) | python | def parse_from_iterable(grm, data, **kwargs):
"""
Parse each sentence in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): the sentences to parse
**kwargs: additional keyword arguments to pass to the AceParser
Yields:
:class:`~delphin.interfaces.ParseResponse`
Example:
>>> sentences = ['Dogs bark.', 'It rained']
>>> responses = list(ace.parse_from_iterable('erg.dat', sentences))
NOTE: parsed 2 / 2 sentences, avg 723k, time 0.01026s
"""
with AceParser(grm, **kwargs) as parser:
for datum in data:
yield parser.interact(datum) | ['def', 'parse_from_iterable', '(', 'grm', ',', 'data', ',', '*', '*', 'kwargs', ')', ':', 'with', 'AceParser', '(', 'grm', ',', '*', '*', 'kwargs', ')', 'as', 'parser', ':', 'for', 'datum', 'in', 'data', ':', 'yield', 'parser', '.', 'interact', '(', 'datum', ')'] | Parse each sentence in *data* with ACE using grammar *grm*.
Args:
grm (str): path to a compiled grammar image
data (iterable): the sentences to parse
**kwargs: additional keyword arguments to pass to the AceParser
Yields:
:class:`~delphin.interfaces.ParseResponse`
Example:
>>> sentences = ['Dogs bark.', 'It rained']
>>> responses = list(ace.parse_from_iterable('erg.dat', sentences))
NOTE: parsed 2 / 2 sentences, avg 723k, time 0.01026s | ['Parse', 'each', 'sentence', 'in', '*', 'data', '*', 'with', 'ACE', 'using', 'grammar', '*', 'grm', '*', '.'] | train | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/interfaces/ace.py#L468-L485 |
1,931 | grycap/RADL | radl/radl_parse.py | RADLParser.p_feature_contains | def p_feature_contains(self, t):
"""feature_contains : VAR CONTAINS LPAREN features RPAREN"""
t[0] = Feature(t[1], t[2], Features(t[4]), line=t.lineno(1)) | python | def p_feature_contains(self, t):
"""feature_contains : VAR CONTAINS LPAREN features RPAREN"""
t[0] = Feature(t[1], t[2], Features(t[4]), line=t.lineno(1)) | ['def', 'p_feature_contains', '(', 'self', ',', 't', ')', ':', 't', '[', '0', ']', '=', 'Feature', '(', 't', '[', '1', ']', ',', 't', '[', '2', ']', ',', 'Features', '(', 't', '[', '4', ']', ')', ',', 'line', '=', 't', '.', 'lineno', '(', '1', ')', ')'] | feature_contains : VAR CONTAINS LPAREN features RPAREN | ['feature_contains', ':', 'VAR', 'CONTAINS', 'LPAREN', 'features', 'RPAREN'] | train | https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl_parse.py#L335-L338 |
1,932 | ryanvarley/ExoData | exodata/database.py | OECDatabase.transitingPlanets | def transitingPlanets(self):
""" Returns a list of transiting planet objects
"""
transitingPlanets = []
for planet in self.planets:
try:
if planet.isTransiting:
transitingPlanets.append(planet)
except KeyError: # No 'discoverymethod' tag - this also filters Solar System planets
pass
return transitingPlanets | python | def transitingPlanets(self):
""" Returns a list of transiting planet objects
"""
transitingPlanets = []
for planet in self.planets:
try:
if planet.isTransiting:
transitingPlanets.append(planet)
except KeyError: # No 'discoverymethod' tag - this also filters Solar System planets
pass
return transitingPlanets | ['def', 'transitingPlanets', '(', 'self', ')', ':', 'transitingPlanets', '=', '[', ']', 'for', 'planet', 'in', 'self', '.', 'planets', ':', 'try', ':', 'if', 'planet', '.', 'isTransiting', ':', 'transitingPlanets', '.', 'append', '(', 'planet', ')', 'except', 'KeyError', ':', "# No 'discoverymethod' tag - this also filters Solar System planets", 'pass', 'return', 'transitingPlanets'] | Returns a list of transiting planet objects | ['Returns', 'a', 'list', 'of', 'transiting', 'planet', 'objects'] | train | https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/database.py#L68-L81 |
1,933 | DistrictDataLabs/yellowbrick | yellowbrick/cluster/icdm.py | InterclusterDistance.draw | def draw(self):
"""
Draw the embedded centers with their sizes on the visualization.
"""
# Compute the sizes of the markers from their score
sizes = self._get_cluster_sizes()
# Draw the scatter plots with associated sizes on the graph
self.ax.scatter(
self.embedded_centers_[:,0], self.embedded_centers_[:,1],
s=sizes, c=self.facecolor, edgecolor=self.edgecolor, linewidth=1,
)
# Annotate the clusters with their labels
for i, pt in enumerate(self.embedded_centers_):
self.ax.text(
s=str(i), x=pt[0], y=pt[1], va="center", ha="center", fontweight="bold"
)
# Ensure the current axes is always the main residuals axes
plt.sca(self.ax)
return self.ax | python | def draw(self):
"""
Draw the embedded centers with their sizes on the visualization.
"""
# Compute the sizes of the markers from their score
sizes = self._get_cluster_sizes()
# Draw the scatter plots with associated sizes on the graph
self.ax.scatter(
self.embedded_centers_[:,0], self.embedded_centers_[:,1],
s=sizes, c=self.facecolor, edgecolor=self.edgecolor, linewidth=1,
)
# Annotate the clusters with their labels
for i, pt in enumerate(self.embedded_centers_):
self.ax.text(
s=str(i), x=pt[0], y=pt[1], va="center", ha="center", fontweight="bold"
)
# Ensure the current axes is always the main residuals axes
plt.sca(self.ax)
return self.ax | ['def', 'draw', '(', 'self', ')', ':', '# Compute the sizes of the markers from their score', 'sizes', '=', 'self', '.', '_get_cluster_sizes', '(', ')', '# Draw the scatter plots with associated sizes on the graph', 'self', '.', 'ax', '.', 'scatter', '(', 'self', '.', 'embedded_centers_', '[', ':', ',', '0', ']', ',', 'self', '.', 'embedded_centers_', '[', ':', ',', '1', ']', ',', 's', '=', 'sizes', ',', 'c', '=', 'self', '.', 'facecolor', ',', 'edgecolor', '=', 'self', '.', 'edgecolor', ',', 'linewidth', '=', '1', ',', ')', '# Annotate the clusters with their labels', 'for', 'i', ',', 'pt', 'in', 'enumerate', '(', 'self', '.', 'embedded_centers_', ')', ':', 'self', '.', 'ax', '.', 'text', '(', 's', '=', 'str', '(', 'i', ')', ',', 'x', '=', 'pt', '[', '0', ']', ',', 'y', '=', 'pt', '[', '1', ']', ',', 'va', '=', '"center"', ',', 'ha', '=', '"center"', ',', 'fontweight', '=', '"bold"', ')', '# Ensure the current axes is always the main residuals axes', 'plt', '.', 'sca', '(', 'self', '.', 'ax', ')', 'return', 'self', '.', 'ax'] | Draw the embedded centers with their sizes on the visualization. | ['Draw', 'the', 'embedded', 'centers', 'with', 'their', 'sizes', 'on', 'the', 'visualization', '.'] | train | https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/cluster/icdm.py#L270-L291 |
1,934 | bunq/sdk_python | bunq/sdk/json/adapters.py | ShareDetailAdapter.serialize | def serialize(cls, share_detail):
"""
:type share_detail: object_.ShareDetail
:rtype: dict
"""
return {
cls._FIELD_PAYMENT: converter.serialize(
share_detail._payment_field_for_request),
cls._FIELD_READ_ONLY: converter.serialize(
share_detail._read_only_field_for_request),
cls._FIELD_DRAFT_PAYMENT: converter.serialize(
share_detail._draft_payment
),
} | python | def serialize(cls, share_detail):
"""
:type share_detail: object_.ShareDetail
:rtype: dict
"""
return {
cls._FIELD_PAYMENT: converter.serialize(
share_detail._payment_field_for_request),
cls._FIELD_READ_ONLY: converter.serialize(
share_detail._read_only_field_for_request),
cls._FIELD_DRAFT_PAYMENT: converter.serialize(
share_detail._draft_payment
),
} | ['def', 'serialize', '(', 'cls', ',', 'share_detail', ')', ':', 'return', '{', 'cls', '.', '_FIELD_PAYMENT', ':', 'converter', '.', 'serialize', '(', 'share_detail', '.', '_payment_field_for_request', ')', ',', 'cls', '.', '_FIELD_READ_ONLY', ':', 'converter', '.', 'serialize', '(', 'share_detail', '.', '_read_only_field_for_request', ')', ',', 'cls', '.', '_FIELD_DRAFT_PAYMENT', ':', 'converter', '.', 'serialize', '(', 'share_detail', '.', '_draft_payment', ')', ',', '}'] | :type share_detail: object_.ShareDetail
:rtype: dict | [':', 'type', 'share_detail', ':', 'object_', '.', 'ShareDetail'] | train | https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/json/adapters.py#L501-L516 |
1,935 | aio-libs/yarl | yarl/__init__.py | URL.port | def port(self):
"""Port part of URL, with scheme-based fallback.
None for relative URLs or URLs without explicit port and
scheme without default port substitution.
"""
return self._val.port or DEFAULT_PORTS.get(self._val.scheme) | python | def port(self):
"""Port part of URL, with scheme-based fallback.
None for relative URLs or URLs without explicit port and
scheme without default port substitution.
"""
return self._val.port or DEFAULT_PORTS.get(self._val.scheme) | ['def', 'port', '(', 'self', ')', ':', 'return', 'self', '.', '_val', '.', 'port', 'or', 'DEFAULT_PORTS', '.', 'get', '(', 'self', '.', '_val', '.', 'scheme', ')'] | Port part of URL, with scheme-based fallback.
None for relative URLs or URLs without explicit port and
scheme without default port substitution. | ['Port', 'part', 'of', 'URL', 'with', 'scheme', '-', 'based', 'fallback', '.'] | train | https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L456-L463 |
1,936 | pypa/setuptools | setuptools/msvc.py | EnvironmentInfo._build_paths | def _build_paths(self, name, spec_path_lists, exists):
"""
Given an environment variable name and specified paths,
return a pathsep-separated string of paths containing
unique, extant, directories from those paths and from
the environment variable. Raise an error if no paths
are resolved.
"""
# flatten spec_path_lists
spec_paths = itertools.chain.from_iterable(spec_path_lists)
env_paths = safe_env.get(name, '').split(os.pathsep)
paths = itertools.chain(spec_paths, env_paths)
extant_paths = list(filter(os.path.isdir, paths)) if exists else paths
if not extant_paths:
msg = "%s environment variable is empty" % name.upper()
raise distutils.errors.DistutilsPlatformError(msg)
unique_paths = self._unique_everseen(extant_paths)
return os.pathsep.join(unique_paths) | python | def _build_paths(self, name, spec_path_lists, exists):
"""
Given an environment variable name and specified paths,
return a pathsep-separated string of paths containing
unique, extant, directories from those paths and from
the environment variable. Raise an error if no paths
are resolved.
"""
# flatten spec_path_lists
spec_paths = itertools.chain.from_iterable(spec_path_lists)
env_paths = safe_env.get(name, '').split(os.pathsep)
paths = itertools.chain(spec_paths, env_paths)
extant_paths = list(filter(os.path.isdir, paths)) if exists else paths
if not extant_paths:
msg = "%s environment variable is empty" % name.upper()
raise distutils.errors.DistutilsPlatformError(msg)
unique_paths = self._unique_everseen(extant_paths)
return os.pathsep.join(unique_paths) | ['def', '_build_paths', '(', 'self', ',', 'name', ',', 'spec_path_lists', ',', 'exists', ')', ':', '# flatten spec_path_lists', 'spec_paths', '=', 'itertools', '.', 'chain', '.', 'from_iterable', '(', 'spec_path_lists', ')', 'env_paths', '=', 'safe_env', '.', 'get', '(', 'name', ',', "''", ')', '.', 'split', '(', 'os', '.', 'pathsep', ')', 'paths', '=', 'itertools', '.', 'chain', '(', 'spec_paths', ',', 'env_paths', ')', 'extant_paths', '=', 'list', '(', 'filter', '(', 'os', '.', 'path', '.', 'isdir', ',', 'paths', ')', ')', 'if', 'exists', 'else', 'paths', 'if', 'not', 'extant_paths', ':', 'msg', '=', '"%s environment variable is empty"', '%', 'name', '.', 'upper', '(', ')', 'raise', 'distutils', '.', 'errors', '.', 'DistutilsPlatformError', '(', 'msg', ')', 'unique_paths', '=', 'self', '.', '_unique_everseen', '(', 'extant_paths', ')', 'return', 'os', '.', 'pathsep', '.', 'join', '(', 'unique_paths', ')'] | Given an environment variable name and specified paths,
return a pathsep-separated string of paths containing
unique, extant, directories from those paths and from
the environment variable. Raise an error if no paths
are resolved. | ['Given', 'an', 'environment', 'variable', 'name', 'and', 'specified', 'paths', 'return', 'a', 'pathsep', '-', 'separated', 'string', 'of', 'paths', 'containing', 'unique', 'extant', 'directories', 'from', 'those', 'paths', 'and', 'from', 'the', 'environment', 'variable', '.', 'Raise', 'an', 'error', 'if', 'no', 'paths', 'are', 'resolved', '.'] | train | https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/msvc.py#L1261-L1278 |
1,937 | smdabdoub/phylotoast | bin/diversity.py | print_KruskalWallisH | def print_KruskalWallisH(div_calc):
"""
Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that
each group must have at least 5 measurements.
"""
calc = defaultdict(list)
try:
for k1, v1 in div_calc.iteritems():
for k2, v2 in v1.iteritems():
calc[k1].append(v2)
except:
return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "\
"significance testing."
h, p = stats.kruskal(*calc.values())
print "\nKruskal-Wallis H-test statistic for {} groups: {}".format(str(len(div_calc)), h)
print "p-value: {}".format(p) | python | def print_KruskalWallisH(div_calc):
"""
Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that
each group must have at least 5 measurements.
"""
calc = defaultdict(list)
try:
for k1, v1 in div_calc.iteritems():
for k2, v2 in v1.iteritems():
calc[k1].append(v2)
except:
return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "\
"significance testing."
h, p = stats.kruskal(*calc.values())
print "\nKruskal-Wallis H-test statistic for {} groups: {}".format(str(len(div_calc)), h)
print "p-value: {}".format(p) | ['def', 'print_KruskalWallisH', '(', 'div_calc', ')', ':', 'calc', '=', 'defaultdict', '(', 'list', ')', 'try', ':', 'for', 'k1', ',', 'v1', 'in', 'div_calc', '.', 'iteritems', '(', ')', ':', 'for', 'k2', ',', 'v2', 'in', 'v1', '.', 'iteritems', '(', ')', ':', 'calc', '[', 'k1', ']', '.', 'append', '(', 'v2', ')', 'except', ':', 'return', '"Error setting up input arrays for Kruskal-Wallis H-Test. Skipping "', '"significance testing."', 'h', ',', 'p', '=', 'stats', '.', 'kruskal', '(', '*', 'calc', '.', 'values', '(', ')', ')', 'print', '"\\nKruskal-Wallis H-test statistic for {} groups: {}"', '.', 'format', '(', 'str', '(', 'len', '(', 'div_calc', ')', ')', ',', 'h', ')', 'print', '"p-value: {}"', '.', 'format', '(', 'p', ')'] | Compute the Kruskal-Wallis H-test for independent samples. A typical rule is that
each group must have at least 5 measurements. | ['Compute', 'the', 'Kruskal', '-', 'Wallis', 'H', '-', 'test', 'for', 'independent', 'samples', '.', 'A', 'typical', 'rule', 'is', 'that', 'each', 'group', 'must', 'have', 'at', 'least', '5', 'measurements', '.'] | train | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L69-L84 |
1,938 | Rapptz/discord.py | discord/message.py | Message.raw_channel_mentions | def raw_channel_mentions(self):
"""A property that returns an array of channel IDs matched with
the syntax of <#channel_id> in the message content.
"""
return [int(x) for x in re.findall(r'<#([0-9]+)>', self.content)] | python | def raw_channel_mentions(self):
"""A property that returns an array of channel IDs matched with
the syntax of <#channel_id> in the message content.
"""
return [int(x) for x in re.findall(r'<#([0-9]+)>', self.content)] | ['def', 'raw_channel_mentions', '(', 'self', ')', ':', 'return', '[', 'int', '(', 'x', ')', 'for', 'x', 'in', 're', '.', 'findall', '(', "r'<#([0-9]+)>'", ',', 'self', '.', 'content', ')', ']'] | A property that returns an array of channel IDs matched with
the syntax of <#channel_id> in the message content. | ['A', 'property', 'that', 'returns', 'an', 'array', 'of', 'channel', 'IDs', 'matched', 'with', 'the', 'syntax', 'of', '<#channel_id', '>', 'in', 'the', 'message', 'content', '.'] | train | https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/message.py#L380-L384 |
1,939 | BD2KOnFHIR/fhirtordf | fhirtordf/fhir/fhirmetavoc.py | FHIRMetaVocEntry._to_str | def _to_str(uri: URIRef) -> str:
"""
Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation
Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference
:param uri: URI to convert
:return: tag name
"""
local_name = str(uri).replace(str(FHIR), '')
return local_name.rsplit('.', 1)[1] if '.' in local_name else local_name | python | def _to_str(uri: URIRef) -> str:
"""
Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation
Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference
:param uri: URI to convert
:return: tag name
"""
local_name = str(uri).replace(str(FHIR), '')
return local_name.rsplit('.', 1)[1] if '.' in local_name else local_name | ['def', '_to_str', '(', 'uri', ':', 'URIRef', ')', '->', 'str', ':', 'local_name', '=', 'str', '(', 'uri', ')', '.', 'replace', '(', 'str', '(', 'FHIR', ')', ',', "''", ')', 'return', 'local_name', '.', 'rsplit', '(', "'.'", ',', '1', ')', '[', '1', ']', 'if', "'.'", 'in', 'local_name', 'else', 'local_name'] | Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation
Example: http://hl7.org/fhir/Provenance.agent.whoReference --> whoReference
:param uri: URI to convert
:return: tag name | ['Convert', 'a', 'FHIR', 'style', 'URI', 'into', 'a', 'tag', 'name', 'to', 'be', 'used', 'to', 'retrieve', 'data', 'from', 'a', 'JSON', 'representation', 'Example', ':', 'http', ':', '//', 'hl7', '.', 'org', '/', 'fhir', '/', 'Provenance', '.', 'agent', '.', 'whoReference', '--', '>', 'whoReference', ':', 'param', 'uri', ':', 'URI', 'to', 'convert', ':', 'return', ':', 'tag', 'name'] | train | https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/fhirmetavoc.py#L60-L68 |
1,940 | log2timeline/dfvfs | dfvfs/file_io/vshadow_file_io.py | VShadowFile.read | def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
return self._vshadow_store.read(size) | python | def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
return self._vshadow_store.read(size) | ['def', 'read', '(', 'self', ',', 'size', '=', 'None', ')', ':', 'if', 'not', 'self', '.', '_is_open', ':', 'raise', 'IOError', '(', "'Not opened.'", ')', 'return', 'self', '.', '_vshadow_store', '.', 'read', '(', 'size', ')'] | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | ['Reads', 'a', 'byte', 'string', 'from', 'the', 'file', '-', 'like', 'object', 'at', 'the', 'current', 'offset', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/vshadow_file_io.py#L78-L98 |
1,941 | undertherain/pycontextfree | setup_boilerplate.py | parse_requirements | def parse_requirements(
requirements_path: str = 'requirements.txt') -> t.List[str]:
"""Read contents of requirements.txt file and return data from its relevant lines.
Only non-empty and non-comment lines are relevant.
"""
requirements = []
with HERE.joinpath(requirements_path).open() as reqs_file:
for requirement in [line.strip() for line in reqs_file.read().splitlines()]:
if not requirement or requirement.startswith('#'):
continue
requirements.append(requirement)
return requirements | python | def parse_requirements(
requirements_path: str = 'requirements.txt') -> t.List[str]:
"""Read contents of requirements.txt file and return data from its relevant lines.
Only non-empty and non-comment lines are relevant.
"""
requirements = []
with HERE.joinpath(requirements_path).open() as reqs_file:
for requirement in [line.strip() for line in reqs_file.read().splitlines()]:
if not requirement or requirement.startswith('#'):
continue
requirements.append(requirement)
return requirements | ['def', 'parse_requirements', '(', 'requirements_path', ':', 'str', '=', "'requirements.txt'", ')', '->', 't', '.', 'List', '[', 'str', ']', ':', 'requirements', '=', '[', ']', 'with', 'HERE', '.', 'joinpath', '(', 'requirements_path', ')', '.', 'open', '(', ')', 'as', 'reqs_file', ':', 'for', 'requirement', 'in', '[', 'line', '.', 'strip', '(', ')', 'for', 'line', 'in', 'reqs_file', '.', 'read', '(', ')', '.', 'splitlines', '(', ')', ']', ':', 'if', 'not', 'requirement', 'or', 'requirement', '.', 'startswith', '(', "'#'", ')', ':', 'continue', 'requirements', '.', 'append', '(', 'requirement', ')', 'return', 'requirements'] | Read contents of requirements.txt file and return data from its relevant lines.
Only non-empty and non-comment lines are relevant. | ['Read', 'contents', 'of', 'requirements', '.', 'txt', 'file', 'and', 'return', 'data', 'from', 'its', 'relevant', 'lines', '.'] | train | https://github.com/undertherain/pycontextfree/blob/91505e978f6034863747c98d919ac11b029b1ac3/setup_boilerplate.py#L66-L78 |
1,942 | tensorflow/tensor2tensor | tensor2tensor/models/image_transformer_2d.py | img2img_transformer2d_n31 | def img2img_transformer2d_n31():
"""Set of hyperparameters."""
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_encoder_layers = 6
hparams.num_decoder_layers = 12
hparams.num_heads = 8
hparams.query_shape = (16, 32)
hparams.memory_flange = (16, 32)
return hparams | python | def img2img_transformer2d_n31():
"""Set of hyperparameters."""
hparams = img2img_transformer2d_base()
hparams.batch_size = 1
hparams.num_encoder_layers = 6
hparams.num_decoder_layers = 12
hparams.num_heads = 8
hparams.query_shape = (16, 32)
hparams.memory_flange = (16, 32)
return hparams | ['def', 'img2img_transformer2d_n31', '(', ')', ':', 'hparams', '=', 'img2img_transformer2d_base', '(', ')', 'hparams', '.', 'batch_size', '=', '1', 'hparams', '.', 'num_encoder_layers', '=', '6', 'hparams', '.', 'num_decoder_layers', '=', '12', 'hparams', '.', 'num_heads', '=', '8', 'hparams', '.', 'query_shape', '=', '(', '16', ',', '32', ')', 'hparams', '.', 'memory_flange', '=', '(', '16', ',', '32', ')', 'return', 'hparams'] | Set of hyperparameters. | ['Set', 'of', 'hyperparameters', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer_2d.py#L829-L838 |
1,943 | thespacedoctor/tastic | tastic/tastic.py | baseClass.to_string | def to_string(
self,
indentLevel=1,
title=True,
tags=None,
projects=None,
tasks=None,
notes=None):
"""*convert this taskpaper object to a string*
**Key Arguments:**
- ``indentLevel`` -- the level of the indent for this object. Default *1*.
- ``title`` -- print the title of the taskpaper object alongside the contents. Default *True*
- ``tags`` -- replace tags with these tags. Default *None*
- ``projects`` -- replace projects with these projects, pass empty list to delete all projects. Default *None*
- ``tasks`` -- replace tasks with these ones, pass empty list to delete all tasks. Default *None*
- ``notes`` -- replace notes with these ones, pass empty list to delete all notes. Default *None*
**Return:**
- ``objectString`` -- the taskpaper object as a string
**Usage:**
If we have the *archive* project from a taskpaper document, we can convert it to a string using:
.. code-block:: python
print archiveProject.to_string()
.. code-block:: text
Archive:
- and a third task @done(2016-09-04) @project(parent project / child-project)
- and a forth task @done(2016-09-04) @project(parent project / child-project)
- fill the kettle @done(2016-09-04) @project(parent project / make coffee)
- boil the kettle @done(2016-09-04) @project(parent project / make coffee)
"""
indent = indentLevel * "\t"
objectString = ""
if title:
try:
# NONE DOCUMENT OBJECTS
objectString += self.title
except:
pass
try:
if tags:
tagString = (" @").join(tags)
else:
tagString = (" @").join(self.tags)
if len(tagString):
objectString += " @" + tagString
except:
pass
try:
if not notes:
notes = self.notes
for n in notes:
if len(n.title.strip()):
if not self.parent and len(objectString) == 0:
objectString += indent + n.title.strip() + n.content
else:
objectString += "\n" + indent + n.title.strip() + n.content
except:
pass
try:
if not tasks:
tasks = self.tasks
for t in tasks:
objectString += "\n" + indent + t.to_string(indentLevel + 1)
except:
pass
try:
if not projects:
projects = self.projects
for p in projects:
objectString += "\n" + indent + p.to_string(indentLevel + 1)
except:
pass
try:
objectString += "\n" + indent + self.searches
except:
pass
return objectString.strip() | python | def to_string(
self,
indentLevel=1,
title=True,
tags=None,
projects=None,
tasks=None,
notes=None):
"""*convert this taskpaper object to a string*
**Key Arguments:**
- ``indentLevel`` -- the level of the indent for this object. Default *1*.
- ``title`` -- print the title of the taskpaper object alongside the contents. Default *True*
- ``tags`` -- replace tags with these tags. Default *None*
- ``projects`` -- replace projects with these projects, pass empty list to delete all projects. Default *None*
- ``tasks`` -- replace tasks with these ones, pass empty list to delete all tasks. Default *None*
- ``notes`` -- replace notes with these ones, pass empty list to delete all notes. Default *None*
**Return:**
- ``objectString`` -- the taskpaper object as a string
**Usage:**
If we have the *archive* project from a taskpaper document, we can convert it to a string using:
.. code-block:: python
print archiveProject.to_string()
.. code-block:: text
Archive:
- and a third task @done(2016-09-04) @project(parent project / child-project)
- and a forth task @done(2016-09-04) @project(parent project / child-project)
- fill the kettle @done(2016-09-04) @project(parent project / make coffee)
- boil the kettle @done(2016-09-04) @project(parent project / make coffee)
"""
indent = indentLevel * "\t"
objectString = ""
if title:
try:
# NONE DOCUMENT OBJECTS
objectString += self.title
except:
pass
try:
if tags:
tagString = (" @").join(tags)
else:
tagString = (" @").join(self.tags)
if len(tagString):
objectString += " @" + tagString
except:
pass
try:
if not notes:
notes = self.notes
for n in notes:
if len(n.title.strip()):
if not self.parent and len(objectString) == 0:
objectString += indent + n.title.strip() + n.content
else:
objectString += "\n" + indent + n.title.strip() + n.content
except:
pass
try:
if not tasks:
tasks = self.tasks
for t in tasks:
objectString += "\n" + indent + t.to_string(indentLevel + 1)
except:
pass
try:
if not projects:
projects = self.projects
for p in projects:
objectString += "\n" + indent + p.to_string(indentLevel + 1)
except:
pass
try:
objectString += "\n" + indent + self.searches
except:
pass
return objectString.strip() | ['def', 'to_string', '(', 'self', ',', 'indentLevel', '=', '1', ',', 'title', '=', 'True', ',', 'tags', '=', 'None', ',', 'projects', '=', 'None', ',', 'tasks', '=', 'None', ',', 'notes', '=', 'None', ')', ':', 'indent', '=', 'indentLevel', '*', '"\\t"', 'objectString', '=', '""', 'if', 'title', ':', 'try', ':', '# NONE DOCUMENT OBJECTS', 'objectString', '+=', 'self', '.', 'title', 'except', ':', 'pass', 'try', ':', 'if', 'tags', ':', 'tagString', '=', '(', '" @"', ')', '.', 'join', '(', 'tags', ')', 'else', ':', 'tagString', '=', '(', '" @"', ')', '.', 'join', '(', 'self', '.', 'tags', ')', 'if', 'len', '(', 'tagString', ')', ':', 'objectString', '+=', '" @"', '+', 'tagString', 'except', ':', 'pass', 'try', ':', 'if', 'not', 'notes', ':', 'notes', '=', 'self', '.', 'notes', 'for', 'n', 'in', 'notes', ':', 'if', 'len', '(', 'n', '.', 'title', '.', 'strip', '(', ')', ')', ':', 'if', 'not', 'self', '.', 'parent', 'and', 'len', '(', 'objectString', ')', '==', '0', ':', 'objectString', '+=', 'indent', '+', 'n', '.', 'title', '.', 'strip', '(', ')', '+', 'n', '.', 'content', 'else', ':', 'objectString', '+=', '"\\n"', '+', 'indent', '+', 'n', '.', 'title', '.', 'strip', '(', ')', '+', 'n', '.', 'content', 'except', ':', 'pass', 'try', ':', 'if', 'not', 'tasks', ':', 'tasks', '=', 'self', '.', 'tasks', 'for', 't', 'in', 'tasks', ':', 'objectString', '+=', '"\\n"', '+', 'indent', '+', 't', '.', 'to_string', '(', 'indentLevel', '+', '1', ')', 'except', ':', 'pass', 'try', ':', 'if', 'not', 'projects', ':', 'projects', '=', 'self', '.', 'projects', 'for', 'p', 'in', 'projects', ':', 'objectString', '+=', '"\\n"', '+', 'indent', '+', 'p', '.', 'to_string', '(', 'indentLevel', '+', '1', ')', 'except', ':', 'pass', 'try', ':', 'objectString', '+=', '"\\n"', '+', 'indent', '+', 'self', '.', 'searches', 'except', ':', 'pass', 'return', 'objectString', '.', 'strip', '(', ')'] | *convert this taskpaper object to a string*
**Key Arguments:**
- ``indentLevel`` -- the level of the indent for this object. Default *1*.
- ``title`` -- print the title of the taskpaper object alongside the contents. Default *True*
- ``tags`` -- replace tags with these tags. Default *None*
- ``projects`` -- replace projects with these projects, pass empty list to delete all projects. Default *None*
- ``tasks`` -- replace tasks with these ones, pass empty list to delete all tasks. Default *None*
- ``notes`` -- replace notes with these ones, pass empty list to delete all notes. Default *None*
**Return:**
- ``objectString`` -- the taskpaper object as a string
**Usage:**
If we have the *archive* project from a taskpaper document, we can convert it to a string using:
.. code-block:: python
print archiveProject.to_string()
.. code-block:: text
Archive:
- and a third task @done(2016-09-04) @project(parent project / child-project)
- and a forth task @done(2016-09-04) @project(parent project / child-project)
- fill the kettle @done(2016-09-04) @project(parent project / make coffee)
- boil the kettle @done(2016-09-04) @project(parent project / make coffee) | ['*', 'convert', 'this', 'taskpaper', 'object', 'to', 'a', 'string', '*'] | train | https://github.com/thespacedoctor/tastic/blob/a0a16cf329a50057906ac3f696bb60b6fcee25e0/tastic/tastic.py#L315-L406 |
1,944 | apple/turicreate | src/unity/python/turicreate/toolkits/_internal_utils.py | _validate_data | def _validate_data(dataset, target, features=None, validation_set='auto'):
"""
Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception.
"""
_raise_error_if_not_sframe(dataset, "training dataset")
# Determine columns to keep
if features is None:
features = [feat for feat in dataset.column_names() if feat != target]
if not hasattr(features, '__iter__'):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError(
"Invalid feature %s: Feature names must be of type str" % x)
# Check validation_set argument
if isinstance(validation_set, str):
# Only string value allowed is 'auto'
if validation_set != 'auto':
raise TypeError('Unrecognized value for validation_set.')
elif isinstance(validation_set, _SFrame):
# Attempt to append the two datasets together to check schema
validation_set.head().append(dataset.head())
# Reduce validation set to requested columns
validation_set = _toolkits_select_columns(
validation_set, features + [target])
elif not validation_set is None:
raise TypeError("validation_set must be either 'auto', None, or an "
"SFrame matching the training data.")
# Reduce training set to requested columns
dataset = _toolkits_select_columns(dataset, features + [target])
return dataset, validation_set | python | def _validate_data(dataset, target, features=None, validation_set='auto'):
"""
Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception.
"""
_raise_error_if_not_sframe(dataset, "training dataset")
# Determine columns to keep
if features is None:
features = [feat for feat in dataset.column_names() if feat != target]
if not hasattr(features, '__iter__'):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError(
"Invalid feature %s: Feature names must be of type str" % x)
# Check validation_set argument
if isinstance(validation_set, str):
# Only string value allowed is 'auto'
if validation_set != 'auto':
raise TypeError('Unrecognized value for validation_set.')
elif isinstance(validation_set, _SFrame):
# Attempt to append the two datasets together to check schema
validation_set.head().append(dataset.head())
# Reduce validation set to requested columns
validation_set = _toolkits_select_columns(
validation_set, features + [target])
elif not validation_set is None:
raise TypeError("validation_set must be either 'auto', None, or an "
"SFrame matching the training data.")
# Reduce training set to requested columns
dataset = _toolkits_select_columns(dataset, features + [target])
return dataset, validation_set | ['def', '_validate_data', '(', 'dataset', ',', 'target', ',', 'features', '=', 'None', ',', 'validation_set', '=', "'auto'", ')', ':', '_raise_error_if_not_sframe', '(', 'dataset', ',', '"training dataset"', ')', '# Determine columns to keep', 'if', 'features', 'is', 'None', ':', 'features', '=', '[', 'feat', 'for', 'feat', 'in', 'dataset', '.', 'column_names', '(', ')', 'if', 'feat', '!=', 'target', ']', 'if', 'not', 'hasattr', '(', 'features', ',', "'__iter__'", ')', ':', 'raise', 'TypeError', '(', '"Input \'features\' must be a list."', ')', 'if', 'not', 'all', '(', '[', 'isinstance', '(', 'x', ',', 'str', ')', 'for', 'x', 'in', 'features', ']', ')', ':', 'raise', 'TypeError', '(', '"Invalid feature %s: Feature names must be of type str"', '%', 'x', ')', '# Check validation_set argument', 'if', 'isinstance', '(', 'validation_set', ',', 'str', ')', ':', "# Only string value allowed is 'auto'", 'if', 'validation_set', '!=', "'auto'", ':', 'raise', 'TypeError', '(', "'Unrecognized value for validation_set.'", ')', 'elif', 'isinstance', '(', 'validation_set', ',', '_SFrame', ')', ':', '# Attempt to append the two datasets together to check schema', 'validation_set', '.', 'head', '(', ')', '.', 'append', '(', 'dataset', '.', 'head', '(', ')', ')', '# Reduce validation set to requested columns', 'validation_set', '=', '_toolkits_select_columns', '(', 'validation_set', ',', 'features', '+', '[', 'target', ']', ')', 'elif', 'not', 'validation_set', 'is', 'None', ':', 'raise', 'TypeError', '(', '"validation_set must be either \'auto\', None, or an "', '"SFrame matching the training data."', ')', '# Reduce training set to requested columns', 'dataset', '=', '_toolkits_select_columns', '(', 'dataset', ',', 'features', '+', '[', 'target', ']', ')', 'return', 'dataset', ',', 'validation_set'] | Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception. | ['Validate', 'and', 'canonicalize', 'training', 'and', 'validation', 'data', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_internal_utils.py#L563-L625 |
1,945 | datadotworld/data.world-py | datadotworld/client/_swagger/apis/insights_api.py | InsightsApi.get_insights_for_project | def get_insights_for_project(self, project_owner, project_id, **kwargs):
"""
Get insights for project.
Get insights for a project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_insights_for_project(project_owner, project_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required)
:param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required)
:param str limit: Maximum number of items to include in a page of results.
:param str next: Token from previous result page to be used when requesting a subsequent page.
:return: PaginatedInsightResults
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_insights_for_project_with_http_info(project_owner, project_id, **kwargs)
else:
(data) = self.get_insights_for_project_with_http_info(project_owner, project_id, **kwargs)
return data | python | def get_insights_for_project(self, project_owner, project_id, **kwargs):
"""
Get insights for project.
Get insights for a project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_insights_for_project(project_owner, project_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required)
:param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required)
:param str limit: Maximum number of items to include in a page of results.
:param str next: Token from previous result page to be used when requesting a subsequent page.
:return: PaginatedInsightResults
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_insights_for_project_with_http_info(project_owner, project_id, **kwargs)
else:
(data) = self.get_insights_for_project_with_http_info(project_owner, project_id, **kwargs)
return data | ['def', 'get_insights_for_project', '(', 'self', ',', 'project_owner', ',', 'project_id', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'callback'", ')', ':', 'return', 'self', '.', 'get_insights_for_project_with_http_info', '(', 'project_owner', ',', 'project_id', ',', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'get_insights_for_project_with_http_info', '(', 'project_owner', ',', 'project_id', ',', '*', '*', 'kwargs', ')', 'return', 'data'] | Get insights for project.
Get insights for a project.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_insights_for_project(project_owner, project_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required)
:param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required)
:param str limit: Maximum number of items to include in a page of results.
:param str next: Token from previous result page to be used when requesting a subsequent page.
:return: PaginatedInsightResults
If the method is called asynchronously,
returns the request thread. | ['Get', 'insights', 'for', 'project', '.', 'Get', 'insights', 'for', 'a', 'project', '.', 'This', 'method', 'makes', 'a', 'synchronous', 'HTTP', 'request', 'by', 'default', '.', 'To', 'make', 'an', 'asynchronous', 'HTTP', 'request', 'please', 'define', 'a', 'callback', 'function', 'to', 'be', 'invoked', 'when', 'receiving', 'the', 'response', '.', '>>>', 'def', 'callback_function', '(', 'response', ')', ':', '>>>', 'pprint', '(', 'response', ')', '>>>', '>>>', 'thread', '=', 'api', '.', 'get_insights_for_project', '(', 'project_owner', 'project_id', 'callback', '=', 'callback_function', ')'] | train | https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/client/_swagger/apis/insights_api.py#L404-L431 |
1,946 | talkincode/txradius | txradius/radius/packet.py | Packet.DecodePacket | def DecodePacket(self, packet):
"""Initialize the object from raw packet data. Decode a packet as
received from the network and decode it.
:param packet: raw packet
:type packet: string"""
try:
(self.code, self.id, length, self.authenticator) = \
struct.unpack('!BBH16s', packet[0:20])
except struct.error:
raise PacketError('Packet header is corrupt')
if len(packet) != length:
raise PacketError('Packet has invalid length')
if length > 8192:
raise PacketError('Packet length is too long (%d)' % length)
self.clear()
packet = packet[20:]
while packet:
try:
(key, attrlen) = struct.unpack('!BB', packet[0:2])
except struct.error:
raise PacketError('Attribute header is corrupt')
if attrlen < 2:
raise PacketError(
'Attribute length is too small (%d)' % attrlen)
value = packet[2:attrlen]
if key == 26:
# 26 is the Vendor-Specific attribute
(vendor, subattrs) = self._PktDecodeVendorAttribute(value)
if vendor is None:
self.setdefault(key, []).append(value)
else:
for (k, v) in subattrs:
self.setdefault((vendor, k), []).append(v)
else:
self.setdefault(key, []).append(value)
packet = packet[attrlen:] | python | def DecodePacket(self, packet):
"""Initialize the object from raw packet data. Decode a packet as
received from the network and decode it.
:param packet: raw packet
:type packet: string"""
try:
(self.code, self.id, length, self.authenticator) = \
struct.unpack('!BBH16s', packet[0:20])
except struct.error:
raise PacketError('Packet header is corrupt')
if len(packet) != length:
raise PacketError('Packet has invalid length')
if length > 8192:
raise PacketError('Packet length is too long (%d)' % length)
self.clear()
packet = packet[20:]
while packet:
try:
(key, attrlen) = struct.unpack('!BB', packet[0:2])
except struct.error:
raise PacketError('Attribute header is corrupt')
if attrlen < 2:
raise PacketError(
'Attribute length is too small (%d)' % attrlen)
value = packet[2:attrlen]
if key == 26:
# 26 is the Vendor-Specific attribute
(vendor, subattrs) = self._PktDecodeVendorAttribute(value)
if vendor is None:
self.setdefault(key, []).append(value)
else:
for (k, v) in subattrs:
self.setdefault((vendor, k), []).append(v)
else:
self.setdefault(key, []).append(value)
packet = packet[attrlen:] | ['def', 'DecodePacket', '(', 'self', ',', 'packet', ')', ':', 'try', ':', '(', 'self', '.', 'code', ',', 'self', '.', 'id', ',', 'length', ',', 'self', '.', 'authenticator', ')', '=', 'struct', '.', 'unpack', '(', "'!BBH16s'", ',', 'packet', '[', '0', ':', '20', ']', ')', 'except', 'struct', '.', 'error', ':', 'raise', 'PacketError', '(', "'Packet header is corrupt'", ')', 'if', 'len', '(', 'packet', ')', '!=', 'length', ':', 'raise', 'PacketError', '(', "'Packet has invalid length'", ')', 'if', 'length', '>', '8192', ':', 'raise', 'PacketError', '(', "'Packet length is too long (%d)'", '%', 'length', ')', 'self', '.', 'clear', '(', ')', 'packet', '=', 'packet', '[', '20', ':', ']', 'while', 'packet', ':', 'try', ':', '(', 'key', ',', 'attrlen', ')', '=', 'struct', '.', 'unpack', '(', "'!BB'", ',', 'packet', '[', '0', ':', '2', ']', ')', 'except', 'struct', '.', 'error', ':', 'raise', 'PacketError', '(', "'Attribute header is corrupt'", ')', 'if', 'attrlen', '<', '2', ':', 'raise', 'PacketError', '(', "'Attribute length is too small (%d)'", '%', 'attrlen', ')', 'value', '=', 'packet', '[', '2', ':', 'attrlen', ']', 'if', 'key', '==', '26', ':', '# 26 is the Vendor-Specific attribute', '(', 'vendor', ',', 'subattrs', ')', '=', 'self', '.', '_PktDecodeVendorAttribute', '(', 'value', ')', 'if', 'vendor', 'is', 'None', ':', 'self', '.', 'setdefault', '(', 'key', ',', '[', ']', ')', '.', 'append', '(', 'value', ')', 'else', ':', 'for', '(', 'k', ',', 'v', ')', 'in', 'subattrs', ':', 'self', '.', 'setdefault', '(', '(', 'vendor', ',', 'k', ')', ',', '[', ']', ')', '.', 'append', '(', 'v', ')', 'else', ':', 'self', '.', 'setdefault', '(', 'key', ',', '[', ']', ')', '.', 'append', '(', 'value', ')', 'packet', '=', 'packet', '[', 'attrlen', ':', ']'] | Initialize the object from raw packet data. Decode a packet as
received from the network and decode it.
:param packet: raw packet
:type packet: string | ['Initialize', 'the', 'object', 'from', 'raw', 'packet', 'data', '.', 'Decode', 'a', 'packet', 'as', 'received', 'from', 'the', 'network', 'and', 'decode', 'it', '.'] | train | https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L308-L350 |
1,947 | SecurityInnovation/PGPy | pgpy/pgp.py | PGPKey.revoker | def revoker(self, revoker, **prefs):
"""
Generate a signature that specifies another key as being valid for revoking this key.
:param revoker: The :py:obj:`PGPKey` to specify as a valid revocation key.
:type revoker: :py:obj:`PGPKey`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoker`.
:keyword sensitive: If ``True``, this sets the sensitive flag on the RevocationKey subpacket. Currently,
this has no other effect.
:type sensitive: ``bool``
"""
hash_algo = prefs.pop('hash', None)
sig = PGPSignature.new(SignatureType.DirectlyOnKey, self.key_algorithm, hash_algo, self.fingerprint.keyid)
# signature options that only make sense when adding a revocation key
sensitive = prefs.pop('sensitive', False)
keyclass = RevocationKeyClass.Normal | (RevocationKeyClass.Sensitive if sensitive else 0x00)
sig._signature.subpackets.addnew('RevocationKey',
hashed=True,
algorithm=revoker.key_algorithm,
fingerprint=revoker.fingerprint,
keyclass=keyclass)
# revocation keys should really not be revocable themselves
prefs['revocable'] = False
return self._sign(self, sig, **prefs) | python | def revoker(self, revoker, **prefs):
"""
Generate a signature that specifies another key as being valid for revoking this key.
:param revoker: The :py:obj:`PGPKey` to specify as a valid revocation key.
:type revoker: :py:obj:`PGPKey`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoker`.
:keyword sensitive: If ``True``, this sets the sensitive flag on the RevocationKey subpacket. Currently,
this has no other effect.
:type sensitive: ``bool``
"""
hash_algo = prefs.pop('hash', None)
sig = PGPSignature.new(SignatureType.DirectlyOnKey, self.key_algorithm, hash_algo, self.fingerprint.keyid)
# signature options that only make sense when adding a revocation key
sensitive = prefs.pop('sensitive', False)
keyclass = RevocationKeyClass.Normal | (RevocationKeyClass.Sensitive if sensitive else 0x00)
sig._signature.subpackets.addnew('RevocationKey',
hashed=True,
algorithm=revoker.key_algorithm,
fingerprint=revoker.fingerprint,
keyclass=keyclass)
# revocation keys should really not be revocable themselves
prefs['revocable'] = False
return self._sign(self, sig, **prefs) | ['def', 'revoker', '(', 'self', ',', 'revoker', ',', '*', '*', 'prefs', ')', ':', 'hash_algo', '=', 'prefs', '.', 'pop', '(', "'hash'", ',', 'None', ')', 'sig', '=', 'PGPSignature', '.', 'new', '(', 'SignatureType', '.', 'DirectlyOnKey', ',', 'self', '.', 'key_algorithm', ',', 'hash_algo', ',', 'self', '.', 'fingerprint', '.', 'keyid', ')', '# signature options that only make sense when adding a revocation key', 'sensitive', '=', 'prefs', '.', 'pop', '(', "'sensitive'", ',', 'False', ')', 'keyclass', '=', 'RevocationKeyClass', '.', 'Normal', '|', '(', 'RevocationKeyClass', '.', 'Sensitive', 'if', 'sensitive', 'else', '0x00', ')', 'sig', '.', '_signature', '.', 'subpackets', '.', 'addnew', '(', "'RevocationKey'", ',', 'hashed', '=', 'True', ',', 'algorithm', '=', 'revoker', '.', 'key_algorithm', ',', 'fingerprint', '=', 'revoker', '.', 'fingerprint', ',', 'keyclass', '=', 'keyclass', ')', '# revocation keys should really not be revocable themselves', 'prefs', '[', "'revocable'", ']', '=', 'False', 'return', 'self', '.', '_sign', '(', 'self', ',', 'sig', ',', '*', '*', 'prefs', ')'] | Generate a signature that specifies another key as being valid for revoking this key.
:param revoker: The :py:obj:`PGPKey` to specify as a valid revocation key.
:type revoker: :py:obj:`PGPKey`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoker`.
:keyword sensitive: If ``True``, this sets the sensitive flag on the RevocationKey subpacket. Currently,
this has no other effect.
:type sensitive: ``bool`` | ['Generate', 'a', 'signature', 'that', 'specifies', 'another', 'key', 'as', 'being', 'valid', 'for', 'revoking', 'this', 'key', '.'] | train | https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L2015-L2048 |
1,948 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py | brocade_xstp_ext.get_stp_mst_detail_output_msti_port_oper_bpdu_guard | def get_stp_mst_detail_output_msti_port_oper_bpdu_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
oper_bpdu_guard = ET.SubElement(port, "oper-bpdu-guard")
oper_bpdu_guard.text = kwargs.pop('oper_bpdu_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def get_stp_mst_detail_output_msti_port_oper_bpdu_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
oper_bpdu_guard = ET.SubElement(port, "oper-bpdu-guard")
oper_bpdu_guard.text = kwargs.pop('oper_bpdu_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'get_stp_mst_detail_output_msti_port_oper_bpdu_guard', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_stp_mst_detail', '=', 'ET', '.', 'Element', '(', '"get_stp_mst_detail"', ')', 'config', '=', 'get_stp_mst_detail', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_stp_mst_detail', ',', '"output"', ')', 'msti', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"msti"', ')', 'instance_id_key', '=', 'ET', '.', 'SubElement', '(', 'msti', ',', '"instance-id"', ')', 'instance_id_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'instance_id'", ')', 'port', '=', 'ET', '.', 'SubElement', '(', 'msti', ',', '"port"', ')', 'oper_bpdu_guard', '=', 'ET', '.', 'SubElement', '(', 'port', ',', '"oper-bpdu-guard"', ')', 'oper_bpdu_guard', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'oper_bpdu_guard'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4946-L4961 |
1,949 | apple/turicreate | src/unity/python/turicreate/toolkits/recommender/util.py | _Recommender.evaluate_rmse | def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
"""
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} | python | def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
"""
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} | ['def', 'evaluate_rmse', '(', 'self', ',', 'dataset', ',', 'target', ')', ':', 'assert', 'target', 'in', 'dataset', '.', 'column_names', '(', ')', ',', "'Provided dataset must contain a target column with the same \\\n name as the target used during training.'", 'y', '=', 'dataset', '[', 'target', ']', 'yhat', '=', 'self', '.', 'predict', '(', 'dataset', ')', 'user_column', '=', 'self', '.', 'user_id', 'item_column', '=', 'self', '.', 'item_id', 'assert', 'user_column', 'in', 'dataset', '.', 'column_names', '(', ')', 'and', 'item_column', 'in', 'dataset', '.', 'column_names', '(', ')', ',', "'Provided data set must have a column pertaining to user ids and \\\n item ids, similar to what we had during training.'", 'result', '=', 'dataset', '[', '[', 'user_column', ',', 'item_column', ']', ']', 'result', '[', "'sq_error'", ']', '=', '(', 'y', '-', 'yhat', ')', '*', '(', 'y', '-', 'yhat', ')', 'rmse_by_user', '=', 'result', '.', 'groupby', '(', 'user_column', ',', '{', "'rmse'", ':', '_turicreate', '.', 'aggregate', '.', 'AVG', '(', "'sq_error'", ')', ',', "'count'", ':', '_turicreate', '.', 'aggregate', '.', 'COUNT', '}', ')', 'rmse_by_user', '[', "'rmse'", ']', '=', 'rmse_by_user', '[', "'rmse'", ']', '.', 'apply', '(', 'lambda', 'x', ':', 'x', '**', '.5', ')', 'rmse_by_item', '=', 'result', '.', 'groupby', '(', 'item_column', ',', '{', "'rmse'", ':', '_turicreate', '.', 'aggregate', '.', 'AVG', '(', "'sq_error'", ')', ',', "'count'", ':', '_turicreate', '.', 'aggregate', '.', 'COUNT', '}', ')', 'rmse_by_item', '[', "'rmse'", ']', '=', 'rmse_by_item', '[', "'rmse'", ']', '.', 'apply', '(', 'lambda', 'x', ':', 'x', '**', '.5', ')', 'overall_rmse', '=', 'result', '[', "'sq_error'", ']', '.', 'mean', '(', ')', '**', '.5', 'return', '{', "'rmse_by_user'", ':', 'rmse_by_user', ',', "'rmse_by_item'", ':', 'rmse_by_item', ',', "'rmse_overall'", ':', 'overall_rmse', '}'] | Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse | ['Evaluate', 'the', 'prediction', 'error', 'for', 'each', 'user', '-', 'item', 'pair', 'in', 'the', 'given', 'data', 'set', '.'] | train | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1576-L1635 |
1,950 | jonathf/chaospy | chaospy/poly/constructor/identifier.py | identify_core | def identify_core(core):
"""Identify the polynomial argument."""
for datatype, identifier in {
int: _identify_scaler,
numpy.int8: _identify_scaler,
numpy.int16: _identify_scaler,
numpy.int32: _identify_scaler,
numpy.int64: _identify_scaler,
float: _identify_scaler,
numpy.float16: _identify_scaler,
numpy.float32: _identify_scaler,
numpy.float64: _identify_scaler,
chaospy.poly.base.Poly: _identify_poly,
dict: _identify_dict,
numpy.ndarray: _identify_iterable,
list: _identify_iterable,
tuple: _identify_iterable,
}.items():
if isinstance(core, datatype):
return identifier(core)
raise TypeError(
"Poly arg: 'core' is not a valid type " + repr(core)) | python | def identify_core(core):
"""Identify the polynomial argument."""
for datatype, identifier in {
int: _identify_scaler,
numpy.int8: _identify_scaler,
numpy.int16: _identify_scaler,
numpy.int32: _identify_scaler,
numpy.int64: _identify_scaler,
float: _identify_scaler,
numpy.float16: _identify_scaler,
numpy.float32: _identify_scaler,
numpy.float64: _identify_scaler,
chaospy.poly.base.Poly: _identify_poly,
dict: _identify_dict,
numpy.ndarray: _identify_iterable,
list: _identify_iterable,
tuple: _identify_iterable,
}.items():
if isinstance(core, datatype):
return identifier(core)
raise TypeError(
"Poly arg: 'core' is not a valid type " + repr(core)) | ['def', 'identify_core', '(', 'core', ')', ':', 'for', 'datatype', ',', 'identifier', 'in', '{', 'int', ':', '_identify_scaler', ',', 'numpy', '.', 'int8', ':', '_identify_scaler', ',', 'numpy', '.', 'int16', ':', '_identify_scaler', ',', 'numpy', '.', 'int32', ':', '_identify_scaler', ',', 'numpy', '.', 'int64', ':', '_identify_scaler', ',', 'float', ':', '_identify_scaler', ',', 'numpy', '.', 'float16', ':', '_identify_scaler', ',', 'numpy', '.', 'float32', ':', '_identify_scaler', ',', 'numpy', '.', 'float64', ':', '_identify_scaler', ',', 'chaospy', '.', 'poly', '.', 'base', '.', 'Poly', ':', '_identify_poly', ',', 'dict', ':', '_identify_dict', ',', 'numpy', '.', 'ndarray', ':', '_identify_iterable', ',', 'list', ':', '_identify_iterable', ',', 'tuple', ':', '_identify_iterable', ',', '}', '.', 'items', '(', ')', ':', 'if', 'isinstance', '(', 'core', ',', 'datatype', ')', ':', 'return', 'identifier', '(', 'core', ')', 'raise', 'TypeError', '(', '"Poly arg: \'core\' is not a valid type "', '+', 'repr', '(', 'core', ')', ')'] | Identify the polynomial argument. | ['Identify', 'the', 'polynomial', 'argument', '.'] | train | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/poly/constructor/identifier.py#L11-L33 |
1,951 | openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._create_list_of_array_controllers | def _create_list_of_array_controllers(self):
"""Creates the list of Array Controller URIs.
:raises: IloCommandNotSupportedError if the ArrayControllers
doesnt have member "Member".
:returns list of ArrayControllers.
"""
headers, array_uri, array_settings = (
self._get_array_controller_resource())
array_uri_links = []
if ('links' in array_settings and
'Member' in array_settings['links']):
array_uri_links = array_settings['links']['Member']
else:
msg = ('"links/Member" section in ArrayControllers'
' does not exist')
raise exception.IloCommandNotSupportedError(msg)
return array_uri_links | python | def _create_list_of_array_controllers(self):
"""Creates the list of Array Controller URIs.
:raises: IloCommandNotSupportedError if the ArrayControllers
doesnt have member "Member".
:returns list of ArrayControllers.
"""
headers, array_uri, array_settings = (
self._get_array_controller_resource())
array_uri_links = []
if ('links' in array_settings and
'Member' in array_settings['links']):
array_uri_links = array_settings['links']['Member']
else:
msg = ('"links/Member" section in ArrayControllers'
' does not exist')
raise exception.IloCommandNotSupportedError(msg)
return array_uri_links | ['def', '_create_list_of_array_controllers', '(', 'self', ')', ':', 'headers', ',', 'array_uri', ',', 'array_settings', '=', '(', 'self', '.', '_get_array_controller_resource', '(', ')', ')', 'array_uri_links', '=', '[', ']', 'if', '(', "'links'", 'in', 'array_settings', 'and', "'Member'", 'in', 'array_settings', '[', "'links'", ']', ')', ':', 'array_uri_links', '=', 'array_settings', '[', "'links'", ']', '[', "'Member'", ']', 'else', ':', 'msg', '=', '(', '\'"links/Member" section in ArrayControllers\'', "' does not exist'", ')', 'raise', 'exception', '.', 'IloCommandNotSupportedError', '(', 'msg', ')', 'return', 'array_uri_links'] | Creates the list of Array Controller URIs.
:raises: IloCommandNotSupportedError if the ArrayControllers
doesnt have member "Member".
:returns list of ArrayControllers. | ['Creates', 'the', 'list', 'of', 'Array', 'Controller', 'URIs', '.'] | train | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L331-L348 |
1,952 | ask/carrot | carrot/backends/pikachu.py | SyncBackend.queue_declare | def queue_declare(self, queue, durable, exclusive, auto_delete,
warn_if_exists=False, arguments=None):
"""Declare a named queue."""
return self.channel.queue_declare(queue=queue,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments) | python | def queue_declare(self, queue, durable, exclusive, auto_delete,
warn_if_exists=False, arguments=None):
"""Declare a named queue."""
return self.channel.queue_declare(queue=queue,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments) | ['def', 'queue_declare', '(', 'self', ',', 'queue', ',', 'durable', ',', 'exclusive', ',', 'auto_delete', ',', 'warn_if_exists', '=', 'False', ',', 'arguments', '=', 'None', ')', ':', 'return', 'self', '.', 'channel', '.', 'queue_declare', '(', 'queue', '=', 'queue', ',', 'durable', '=', 'durable', ',', 'exclusive', '=', 'exclusive', ',', 'auto_delete', '=', 'auto_delete', ',', 'arguments', '=', 'arguments', ')'] | Declare a named queue. | ['Declare', 'a', 'named', 'queue', '.'] | train | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/pikachu.py#L87-L95 |
1,953 | mhe/pynrrd | nrrd/parsers.py | parse_number_list | def parse_number_list(x, dtype=None):
"""Parse NRRD number list from string into (N,) :class:`numpy.ndarray`.
See :ref:`user-guide:int list` and :ref:`user-guide:double list` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD number list
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking for fractional numbers. If
found, then the string will be converted to :class:`float`, otherwise :class:`int`. Default is to automatically
determine datatype.
Returns
-------
vector : (N,) :class:`numpy.ndarray`
Vector that is parsed from the :obj:`x` string
"""
# Always convert to float and then perform truncation to integer if necessary
number_list = np.array([float(x) for x in x.split()])
if dtype is None:
number_list_trunc = number_list.astype(int)
# If there is no difference between the truncated number list and the number list, then that means that the
# number list was all integers and we can just return that
if np.all((number_list - number_list_trunc) == 0):
number_list = number_list_trunc
elif dtype == int:
number_list = number_list.astype(int)
elif dtype != float:
raise NRRDError('dtype should be None for automatic type detection, float or int')
return number_list | python | def parse_number_list(x, dtype=None):
"""Parse NRRD number list from string into (N,) :class:`numpy.ndarray`.
See :ref:`user-guide:int list` and :ref:`user-guide:double list` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD number list
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking for fractional numbers. If
found, then the string will be converted to :class:`float`, otherwise :class:`int`. Default is to automatically
determine datatype.
Returns
-------
vector : (N,) :class:`numpy.ndarray`
Vector that is parsed from the :obj:`x` string
"""
# Always convert to float and then perform truncation to integer if necessary
number_list = np.array([float(x) for x in x.split()])
if dtype is None:
number_list_trunc = number_list.astype(int)
# If there is no difference between the truncated number list and the number list, then that means that the
# number list was all integers and we can just return that
if np.all((number_list - number_list_trunc) == 0):
number_list = number_list_trunc
elif dtype == int:
number_list = number_list.astype(int)
elif dtype != float:
raise NRRDError('dtype should be None for automatic type detection, float or int')
return number_list | ['def', 'parse_number_list', '(', 'x', ',', 'dtype', '=', 'None', ')', ':', '# Always convert to float and then perform truncation to integer if necessary', 'number_list', '=', 'np', '.', 'array', '(', '[', 'float', '(', 'x', ')', 'for', 'x', 'in', 'x', '.', 'split', '(', ')', ']', ')', 'if', 'dtype', 'is', 'None', ':', 'number_list_trunc', '=', 'number_list', '.', 'astype', '(', 'int', ')', '# If there is no difference between the truncated number list and the number list, then that means that the', '# number list was all integers and we can just return that', 'if', 'np', '.', 'all', '(', '(', 'number_list', '-', 'number_list_trunc', ')', '==', '0', ')', ':', 'number_list', '=', 'number_list_trunc', 'elif', 'dtype', '==', 'int', ':', 'number_list', '=', 'number_list', '.', 'astype', '(', 'int', ')', 'elif', 'dtype', '!=', 'float', ':', 'raise', 'NRRDError', '(', "'dtype should be None for automatic type detection, float or int'", ')', 'return', 'number_list'] | Parse NRRD number list from string into (N,) :class:`numpy.ndarray`.
See :ref:`user-guide:int list` and :ref:`user-guide:double list` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD number list
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking for fractional numbers. If
found, then the string will be converted to :class:`float`, otherwise :class:`int`. Default is to automatically
determine datatype.
Returns
-------
vector : (N,) :class:`numpy.ndarray`
Vector that is parsed from the :obj:`x` string | ['Parse', 'NRRD', 'number', 'list', 'from', 'string', 'into', '(', 'N', ')', ':', 'class', ':', 'numpy', '.', 'ndarray', '.'] | train | https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/parsers.py#L168-L204 |
1,954 | althonos/fs.sshfs | fs/sshfs/sshfs.py | SSHFS._exec_command | def _exec_command(self, cmd):
"""Run a command on the remote SSH server.
Returns:
bytes: the output of the command, if it didn't fail
None: if the error pipe of the command was not empty
"""
_, out, err = self._client.exec_command(cmd, timeout=self._timeout)
return out.read().strip() if not err.read().strip() else None | python | def _exec_command(self, cmd):
"""Run a command on the remote SSH server.
Returns:
bytes: the output of the command, if it didn't fail
None: if the error pipe of the command was not empty
"""
_, out, err = self._client.exec_command(cmd, timeout=self._timeout)
return out.read().strip() if not err.read().strip() else None | ['def', '_exec_command', '(', 'self', ',', 'cmd', ')', ':', '_', ',', 'out', ',', 'err', '=', 'self', '.', '_client', '.', 'exec_command', '(', 'cmd', ',', 'timeout', '=', 'self', '.', '_timeout', ')', 'return', 'out', '.', 'read', '(', ')', '.', 'strip', '(', ')', 'if', 'not', 'err', '.', 'read', '(', ')', '.', 'strip', '(', ')', 'else', 'None'] | Run a command on the remote SSH server.
Returns:
bytes: the output of the command, if it didn't fail
None: if the error pipe of the command was not empty | ['Run', 'a', 'command', 'on', 'the', 'remote', 'SSH', 'server', '.'] | train | https://github.com/althonos/fs.sshfs/blob/773cbdb6bceac5e00cf5785b6ffad6dc4574d29c/fs/sshfs/sshfs.py#L311-L319 |
1,955 | Azure/blobxfer | blobxfer/operations/md5.py | compute_md5_for_file_asbase64 | def compute_md5_for_file_asbase64(
filename, pagealign=False, start=None, end=None, blocksize=65536):
# type: (str, bool, int, int, int) -> str
"""Compute MD5 hash for file and encode as Base64
:param str filename: file to compute MD5 for
:param bool pagealign: page align data
:param int start: file start offset
:param int end: file end offset
:param int blocksize: block size
:rtype: str
:return: MD5 for file encoded as Base64
"""
hasher = blobxfer.util.new_md5_hasher()
with open(filename, 'rb') as filedesc:
if start is not None:
filedesc.seek(start)
curr = start
else:
curr = 0
while True:
if end is not None and curr + blocksize > end:
blocksize = end - curr
if blocksize == 0:
break
buf = filedesc.read(blocksize)
if not buf:
break
buflen = len(buf)
if pagealign and buflen < blocksize:
aligned = blobxfer.util.page_align_content_length(buflen)
if aligned != buflen:
buf = buf.ljust(aligned, b'\0')
hasher.update(buf)
curr += blocksize
return blobxfer.util.base64_encode_as_string(hasher.digest()) | python | def compute_md5_for_file_asbase64(
filename, pagealign=False, start=None, end=None, blocksize=65536):
# type: (str, bool, int, int, int) -> str
"""Compute MD5 hash for file and encode as Base64
:param str filename: file to compute MD5 for
:param bool pagealign: page align data
:param int start: file start offset
:param int end: file end offset
:param int blocksize: block size
:rtype: str
:return: MD5 for file encoded as Base64
"""
hasher = blobxfer.util.new_md5_hasher()
with open(filename, 'rb') as filedesc:
if start is not None:
filedesc.seek(start)
curr = start
else:
curr = 0
while True:
if end is not None and curr + blocksize > end:
blocksize = end - curr
if blocksize == 0:
break
buf = filedesc.read(blocksize)
if not buf:
break
buflen = len(buf)
if pagealign and buflen < blocksize:
aligned = blobxfer.util.page_align_content_length(buflen)
if aligned != buflen:
buf = buf.ljust(aligned, b'\0')
hasher.update(buf)
curr += blocksize
return blobxfer.util.base64_encode_as_string(hasher.digest()) | ['def', 'compute_md5_for_file_asbase64', '(', 'filename', ',', 'pagealign', '=', 'False', ',', 'start', '=', 'None', ',', 'end', '=', 'None', ',', 'blocksize', '=', '65536', ')', ':', '# type: (str, bool, int, int, int) -> str', 'hasher', '=', 'blobxfer', '.', 'util', '.', 'new_md5_hasher', '(', ')', 'with', 'open', '(', 'filename', ',', "'rb'", ')', 'as', 'filedesc', ':', 'if', 'start', 'is', 'not', 'None', ':', 'filedesc', '.', 'seek', '(', 'start', ')', 'curr', '=', 'start', 'else', ':', 'curr', '=', '0', 'while', 'True', ':', 'if', 'end', 'is', 'not', 'None', 'and', 'curr', '+', 'blocksize', '>', 'end', ':', 'blocksize', '=', 'end', '-', 'curr', 'if', 'blocksize', '==', '0', ':', 'break', 'buf', '=', 'filedesc', '.', 'read', '(', 'blocksize', ')', 'if', 'not', 'buf', ':', 'break', 'buflen', '=', 'len', '(', 'buf', ')', 'if', 'pagealign', 'and', 'buflen', '<', 'blocksize', ':', 'aligned', '=', 'blobxfer', '.', 'util', '.', 'page_align_content_length', '(', 'buflen', ')', 'if', 'aligned', '!=', 'buflen', ':', 'buf', '=', 'buf', '.', 'ljust', '(', 'aligned', ',', "b'\\0'", ')', 'hasher', '.', 'update', '(', 'buf', ')', 'curr', '+=', 'blocksize', 'return', 'blobxfer', '.', 'util', '.', 'base64_encode_as_string', '(', 'hasher', '.', 'digest', '(', ')', ')'] | Compute MD5 hash for file and encode as Base64
:param str filename: file to compute MD5 for
:param bool pagealign: page align data
:param int start: file start offset
:param int end: file end offset
:param int blocksize: block size
:rtype: str
:return: MD5 for file encoded as Base64 | ['Compute', 'MD5', 'hash', 'for', 'file', 'and', 'encode', 'as', 'Base64', ':', 'param', 'str', 'filename', ':', 'file', 'to', 'compute', 'MD5', 'for', ':', 'param', 'bool', 'pagealign', ':', 'page', 'align', 'data', ':', 'param', 'int', 'start', ':', 'file', 'start', 'offset', ':', 'param', 'int', 'end', ':', 'file', 'end', 'offset', ':', 'param', 'int', 'blocksize', ':', 'block', 'size', ':', 'rtype', ':', 'str', ':', 'return', ':', 'MD5', 'for', 'file', 'encoded', 'as', 'Base64'] | train | https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/md5.py#L50-L84 |
1,956 | grampajoe/pymosh | pymosh/riff.py | RiffIndexList.find_all | def find_all(self, header, list_type=None):
"""Find all direct children with header and optional list type."""
found = []
for chunk in self:
if chunk.header == header and (not list_type or (header in
list_headers and chunk.type == list_type)):
found.append(chunk)
return found | python | def find_all(self, header, list_type=None):
"""Find all direct children with header and optional list type."""
found = []
for chunk in self:
if chunk.header == header and (not list_type or (header in
list_headers and chunk.type == list_type)):
found.append(chunk)
return found | ['def', 'find_all', '(', 'self', ',', 'header', ',', 'list_type', '=', 'None', ')', ':', 'found', '=', '[', ']', 'for', 'chunk', 'in', 'self', ':', 'if', 'chunk', '.', 'header', '==', 'header', 'and', '(', 'not', 'list_type', 'or', '(', 'header', 'in', 'list_headers', 'and', 'chunk', '.', 'type', '==', 'list_type', ')', ')', ':', 'found', '.', 'append', '(', 'chunk', ')', 'return', 'found'] | Find all direct children with header and optional list type. | ['Find', 'all', 'direct', 'children', 'with', 'header', 'and', 'optional', 'list', 'type', '.'] | train | https://github.com/grampajoe/pymosh/blob/2a17a0271fda939528edc31572940d3b676f8a47/pymosh/riff.py#L117-L124 |
1,957 | hustlzp/permission | permission/permission.py | Rule.run | def run(self):
"""Run self.rules_list.
Return True if one rule channel has been passed.
Otherwise return False and the deny() method of the last failed rule.
"""
failed_result = None
for rule in self.rules_list:
for check, deny in rule:
if not check():
failed_result = (False, deny)
break
else:
return (True, None)
return failed_result | python | def run(self):
"""Run self.rules_list.
Return True if one rule channel has been passed.
Otherwise return False and the deny() method of the last failed rule.
"""
failed_result = None
for rule in self.rules_list:
for check, deny in rule:
if not check():
failed_result = (False, deny)
break
else:
return (True, None)
return failed_result | ['def', 'run', '(', 'self', ')', ':', 'failed_result', '=', 'None', 'for', 'rule', 'in', 'self', '.', 'rules_list', ':', 'for', 'check', ',', 'deny', 'in', 'rule', ':', 'if', 'not', 'check', '(', ')', ':', 'failed_result', '=', '(', 'False', ',', 'deny', ')', 'break', 'else', ':', 'return', '(', 'True', ',', 'None', ')', 'return', 'failed_result'] | Run self.rules_list.
Return True if one rule channel has been passed.
Otherwise return False and the deny() method of the last failed rule. | ['Run', 'self', '.', 'rules_list', '.'] | train | https://github.com/hustlzp/permission/blob/302a02a775c4cd53f7588ff9c4ce1ca49a0d40bf/permission/permission.py#L98-L112 |
1,958 | StanfordVL/robosuite | robosuite/devices/spacemouse.py | SpaceMouse.run | def run(self):
"""Listener method that keeps pulling new messages."""
t_last_click = -1
while True:
d = self.device.read(13)
if d is not None and self._enabled:
if d[0] == 1: ## readings from 6-DoF sensor
self.y = convert(d[1], d[2])
self.x = convert(d[3], d[4])
self.z = convert(d[5], d[6]) * -1.0
self.roll = convert(d[7], d[8])
self.pitch = convert(d[9], d[10])
self.yaw = convert(d[11], d[12])
self._control = [
self.x,
self.y,
self.z,
self.roll,
self.pitch,
self.yaw,
]
elif d[0] == 3: ## readings from the side buttons
# press left button
if d[1] == 1:
t_click = time.time()
elapsed_time = t_click - t_last_click
t_last_click = t_click
self.single_click_and_hold = True
# release left button
if d[1] == 0:
self.single_click_and_hold = False
# right button is for reset
if d[1] == 2:
self._reset_state = 1
self._enabled = False
self._reset_internal_state() | python | def run(self):
"""Listener method that keeps pulling new messages."""
t_last_click = -1
while True:
d = self.device.read(13)
if d is not None and self._enabled:
if d[0] == 1: ## readings from 6-DoF sensor
self.y = convert(d[1], d[2])
self.x = convert(d[3], d[4])
self.z = convert(d[5], d[6]) * -1.0
self.roll = convert(d[7], d[8])
self.pitch = convert(d[9], d[10])
self.yaw = convert(d[11], d[12])
self._control = [
self.x,
self.y,
self.z,
self.roll,
self.pitch,
self.yaw,
]
elif d[0] == 3: ## readings from the side buttons
# press left button
if d[1] == 1:
t_click = time.time()
elapsed_time = t_click - t_last_click
t_last_click = t_click
self.single_click_and_hold = True
# release left button
if d[1] == 0:
self.single_click_and_hold = False
# right button is for reset
if d[1] == 2:
self._reset_state = 1
self._enabled = False
self._reset_internal_state() | ['def', 'run', '(', 'self', ')', ':', 't_last_click', '=', '-', '1', 'while', 'True', ':', 'd', '=', 'self', '.', 'device', '.', 'read', '(', '13', ')', 'if', 'd', 'is', 'not', 'None', 'and', 'self', '.', '_enabled', ':', 'if', 'd', '[', '0', ']', '==', '1', ':', '## readings from 6-DoF sensor', 'self', '.', 'y', '=', 'convert', '(', 'd', '[', '1', ']', ',', 'd', '[', '2', ']', ')', 'self', '.', 'x', '=', 'convert', '(', 'd', '[', '3', ']', ',', 'd', '[', '4', ']', ')', 'self', '.', 'z', '=', 'convert', '(', 'd', '[', '5', ']', ',', 'd', '[', '6', ']', ')', '*', '-', '1.0', 'self', '.', 'roll', '=', 'convert', '(', 'd', '[', '7', ']', ',', 'd', '[', '8', ']', ')', 'self', '.', 'pitch', '=', 'convert', '(', 'd', '[', '9', ']', ',', 'd', '[', '10', ']', ')', 'self', '.', 'yaw', '=', 'convert', '(', 'd', '[', '11', ']', ',', 'd', '[', '12', ']', ')', 'self', '.', '_control', '=', '[', 'self', '.', 'x', ',', 'self', '.', 'y', ',', 'self', '.', 'z', ',', 'self', '.', 'roll', ',', 'self', '.', 'pitch', ',', 'self', '.', 'yaw', ',', ']', 'elif', 'd', '[', '0', ']', '==', '3', ':', '## readings from the side buttons', '# press left button', 'if', 'd', '[', '1', ']', '==', '1', ':', 't_click', '=', 'time', '.', 'time', '(', ')', 'elapsed_time', '=', 't_click', '-', 't_last_click', 't_last_click', '=', 't_click', 'self', '.', 'single_click_and_hold', '=', 'True', '# release left button', 'if', 'd', '[', '1', ']', '==', '0', ':', 'self', '.', 'single_click_and_hold', '=', 'False', '# right button is for reset', 'if', 'd', '[', '1', ']', '==', '2', ':', 'self', '.', '_reset_state', '=', '1', 'self', '.', '_enabled', '=', 'False', 'self', '.', '_reset_internal_state', '(', ')'] | Listener method that keeps pulling new messages. | ['Listener', 'method', 'that', 'keeps', 'pulling', 'new', 'messages', '.'] | train | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/devices/spacemouse.py#L155-L199 |
1,959 | SKA-ScienceDataProcessor/integration-prototype | sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py | DockerSwarmClient._parse_resources | def _parse_resources(resource_values: dict, resource_name: str) -> dict:
"""Parse resources key.
Args:
resource_values (dict): resource configurations values
resource_name (string): Resource name
Returns:
dict, resources specification
"""
# Initialising empty dictionary
resources = {}
for r_values in resource_values[resource_name]:
if 'limits' in r_values:
for r_key, r_value in \
resource_values[resource_name][r_values].items():
if 'cpu' in r_key:
cpu_value = float(r_value) * 10 ** 9
cpu_key = r_key[:3] + '_limit'
resources[cpu_key] = int(cpu_value)
if 'mem' in r_key:
mem_value = re.sub('M', '', r_value)
mem_key = r_key[:3] + '_limit'
resources[mem_key] = int(mem_value) * 1048576
resources_spec = docker.types.Resources(**resources)
return resources_spec | python | def _parse_resources(resource_values: dict, resource_name: str) -> dict:
"""Parse resources key.
Args:
resource_values (dict): resource configurations values
resource_name (string): Resource name
Returns:
dict, resources specification
"""
# Initialising empty dictionary
resources = {}
for r_values in resource_values[resource_name]:
if 'limits' in r_values:
for r_key, r_value in \
resource_values[resource_name][r_values].items():
if 'cpu' in r_key:
cpu_value = float(r_value) * 10 ** 9
cpu_key = r_key[:3] + '_limit'
resources[cpu_key] = int(cpu_value)
if 'mem' in r_key:
mem_value = re.sub('M', '', r_value)
mem_key = r_key[:3] + '_limit'
resources[mem_key] = int(mem_value) * 1048576
resources_spec = docker.types.Resources(**resources)
return resources_spec | ['def', '_parse_resources', '(', 'resource_values', ':', 'dict', ',', 'resource_name', ':', 'str', ')', '->', 'dict', ':', '# Initialising empty dictionary', 'resources', '=', '{', '}', 'for', 'r_values', 'in', 'resource_values', '[', 'resource_name', ']', ':', 'if', "'limits'", 'in', 'r_values', ':', 'for', 'r_key', ',', 'r_value', 'in', 'resource_values', '[', 'resource_name', ']', '[', 'r_values', ']', '.', 'items', '(', ')', ':', 'if', "'cpu'", 'in', 'r_key', ':', 'cpu_value', '=', 'float', '(', 'r_value', ')', '*', '10', '**', '9', 'cpu_key', '=', 'r_key', '[', ':', '3', ']', '+', "'_limit'", 'resources', '[', 'cpu_key', ']', '=', 'int', '(', 'cpu_value', ')', 'if', "'mem'", 'in', 'r_key', ':', 'mem_value', '=', 're', '.', 'sub', '(', "'M'", ',', "''", ',', 'r_value', ')', 'mem_key', '=', 'r_key', '[', ':', '3', ']', '+', "'_limit'", 'resources', '[', 'mem_key', ']', '=', 'int', '(', 'mem_value', ')', '*', '1048576', 'resources_spec', '=', 'docker', '.', 'types', '.', 'Resources', '(', '*', '*', 'resources', ')', 'return', 'resources_spec'] | Parse resources key.
Args:
resource_values (dict): resource configurations values
resource_name (string): Resource name
Returns:
dict, resources specification | ['Parse', 'resources', 'key', '.'] | train | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L617-L645 |
1,960 | sibirrer/lenstronomy | lenstronomy/Util/util.py | get_axes | def get_axes(x, y):
"""
computes the axis x and y of a given 2d grid
:param x:
:param y:
:return:
"""
n=int(np.sqrt(len(x)))
if n**2 != len(x):
raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(x)))
x_image = x.reshape(n,n)
y_image = y.reshape(n,n)
x_axes = x_image[0,:]
y_axes = y_image[:,0]
return x_axes, y_axes | python | def get_axes(x, y):
"""
computes the axis x and y of a given 2d grid
:param x:
:param y:
:return:
"""
n=int(np.sqrt(len(x)))
if n**2 != len(x):
raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(x)))
x_image = x.reshape(n,n)
y_image = y.reshape(n,n)
x_axes = x_image[0,:]
y_axes = y_image[:,0]
return x_axes, y_axes | ['def', 'get_axes', '(', 'x', ',', 'y', ')', ':', 'n', '=', 'int', '(', 'np', '.', 'sqrt', '(', 'len', '(', 'x', ')', ')', ')', 'if', 'n', '**', '2', '!=', 'len', '(', 'x', ')', ':', 'raise', 'ValueError', '(', '"lenght of input array given as %s is not square of integer number!"', '%', '(', 'len', '(', 'x', ')', ')', ')', 'x_image', '=', 'x', '.', 'reshape', '(', 'n', ',', 'n', ')', 'y_image', '=', 'y', '.', 'reshape', '(', 'n', ',', 'n', ')', 'x_axes', '=', 'x_image', '[', '0', ',', ':', ']', 'y_axes', '=', 'y_image', '[', ':', ',', '0', ']', 'return', 'x_axes', ',', 'y_axes'] | computes the axis x and y of a given 2d grid
:param x:
:param y:
:return: | ['computes', 'the', 'axis', 'x', 'and', 'y', 'of', 'a', 'given', '2d', 'grid', ':', 'param', 'x', ':', ':', 'param', 'y', ':', ':', 'return', ':'] | train | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/util.py#L227-L241 |
1,961 | inveniosoftware/invenio-github | invenio_github/handlers.py | disconnect | def disconnect(remote):
"""Disconnect callback handler for GitHub."""
# User must be authenticated
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
external_method = 'github'
external_ids = [i.id for i in current_user.external_identifiers
if i.method == external_method]
if external_ids:
oauth_unlink_external_id(dict(id=external_ids[0],
method=external_method))
user_id = int(current_user.get_id())
token = RemoteToken.get(user_id, remote.consumer_key)
if token:
extra_data = token.remote_account.extra_data
# Delete the token that we issued for GitHub to deliver webhooks
webhook_token_id = extra_data.get('tokens', {}).get('webhook')
ProviderToken.query.filter_by(id=webhook_token_id).delete()
# Disable GitHub webhooks from our side
db_repos = Repository.query.filter_by(user_id=user_id).all()
# Keep repositories with hooks to pass to the celery task later on
repos_with_hooks = [(r.github_id, r.hook) for r in db_repos if r.hook]
for repo in db_repos:
try:
Repository.disable(user_id=user_id,
github_id=repo.github_id,
name=repo.name)
except NoResultFound:
# If the repository doesn't exist, no action is necessary
pass
db.session.commit()
# Send Celery task for webhooks removal and token revocation
disconnect_github.delay(token.access_token, repos_with_hooks)
# Delete the RemoteAccount (along with the associated RemoteToken)
token.remote_account.delete()
return redirect(url_for('invenio_oauthclient_settings.index')) | python | def disconnect(remote):
"""Disconnect callback handler for GitHub."""
# User must be authenticated
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
external_method = 'github'
external_ids = [i.id for i in current_user.external_identifiers
if i.method == external_method]
if external_ids:
oauth_unlink_external_id(dict(id=external_ids[0],
method=external_method))
user_id = int(current_user.get_id())
token = RemoteToken.get(user_id, remote.consumer_key)
if token:
extra_data = token.remote_account.extra_data
# Delete the token that we issued for GitHub to deliver webhooks
webhook_token_id = extra_data.get('tokens', {}).get('webhook')
ProviderToken.query.filter_by(id=webhook_token_id).delete()
# Disable GitHub webhooks from our side
db_repos = Repository.query.filter_by(user_id=user_id).all()
# Keep repositories with hooks to pass to the celery task later on
repos_with_hooks = [(r.github_id, r.hook) for r in db_repos if r.hook]
for repo in db_repos:
try:
Repository.disable(user_id=user_id,
github_id=repo.github_id,
name=repo.name)
except NoResultFound:
# If the repository doesn't exist, no action is necessary
pass
db.session.commit()
# Send Celery task for webhooks removal and token revocation
disconnect_github.delay(token.access_token, repos_with_hooks)
# Delete the RemoteAccount (along with the associated RemoteToken)
token.remote_account.delete()
return redirect(url_for('invenio_oauthclient_settings.index')) | ['def', 'disconnect', '(', 'remote', ')', ':', '# User must be authenticated', 'if', 'not', 'current_user', '.', 'is_authenticated', ':', 'return', 'current_app', '.', 'login_manager', '.', 'unauthorized', '(', ')', 'external_method', '=', "'github'", 'external_ids', '=', '[', 'i', '.', 'id', 'for', 'i', 'in', 'current_user', '.', 'external_identifiers', 'if', 'i', '.', 'method', '==', 'external_method', ']', 'if', 'external_ids', ':', 'oauth_unlink_external_id', '(', 'dict', '(', 'id', '=', 'external_ids', '[', '0', ']', ',', 'method', '=', 'external_method', ')', ')', 'user_id', '=', 'int', '(', 'current_user', '.', 'get_id', '(', ')', ')', 'token', '=', 'RemoteToken', '.', 'get', '(', 'user_id', ',', 'remote', '.', 'consumer_key', ')', 'if', 'token', ':', 'extra_data', '=', 'token', '.', 'remote_account', '.', 'extra_data', '# Delete the token that we issued for GitHub to deliver webhooks', 'webhook_token_id', '=', 'extra_data', '.', 'get', '(', "'tokens'", ',', '{', '}', ')', '.', 'get', '(', "'webhook'", ')', 'ProviderToken', '.', 'query', '.', 'filter_by', '(', 'id', '=', 'webhook_token_id', ')', '.', 'delete', '(', ')', '# Disable GitHub webhooks from our side', 'db_repos', '=', 'Repository', '.', 'query', '.', 'filter_by', '(', 'user_id', '=', 'user_id', ')', '.', 'all', '(', ')', '# Keep repositories with hooks to pass to the celery task later on', 'repos_with_hooks', '=', '[', '(', 'r', '.', 'github_id', ',', 'r', '.', 'hook', ')', 'for', 'r', 'in', 'db_repos', 'if', 'r', '.', 'hook', ']', 'for', 'repo', 'in', 'db_repos', ':', 'try', ':', 'Repository', '.', 'disable', '(', 'user_id', '=', 'user_id', ',', 'github_id', '=', 'repo', '.', 'github_id', ',', 'name', '=', 'repo', '.', 'name', ')', 'except', 'NoResultFound', ':', "# If the repository doesn't exist, no action is necessary", 'pass', 'db', '.', 'session', '.', 'commit', '(', ')', '# Send Celery task for webhooks removal and token revocation', 'disconnect_github', '.', 'delay', '(', 'token', '.', 'access_token', ',', 'repos_with_hooks', ')', '# Delete the RemoteAccount (along with the associated RemoteToken)', 'token', '.', 'remote_account', '.', 'delete', '(', ')', 'return', 'redirect', '(', 'url_for', '(', "'invenio_oauthclient_settings.index'", ')', ')'] | Disconnect callback handler for GitHub. | ['Disconnect', 'callback', 'handler', 'for', 'GitHub', '.'] | train | https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/handlers.py#L62-L103 |
1,962 | ContextLab/quail | quail/fingerprint.py | stick_perm | def stick_perm(presenter, egg, dist_dict, strategy):
"""Computes weights for one reordering using stick-breaking method"""
# seed RNG
np.random.seed()
# unpack egg
egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg)
# reorder
regg = order_stick(presenter, egg, dist_dict, strategy)
# unpack regg
regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg)
# # get the order
regg_pres = list(regg_pres)
egg_pres = list(egg_pres)
idx = [egg_pres.index(r) for r in regg_pres]
# compute weights
weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict)
# save out the order
orders = idx
return weights, orders | python | def stick_perm(presenter, egg, dist_dict, strategy):
"""Computes weights for one reordering using stick-breaking method"""
# seed RNG
np.random.seed()
# unpack egg
egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg)
# reorder
regg = order_stick(presenter, egg, dist_dict, strategy)
# unpack regg
regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg)
# # get the order
regg_pres = list(regg_pres)
egg_pres = list(egg_pres)
idx = [egg_pres.index(r) for r in regg_pres]
# compute weights
weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict)
# save out the order
orders = idx
return weights, orders | ['def', 'stick_perm', '(', 'presenter', ',', 'egg', ',', 'dist_dict', ',', 'strategy', ')', ':', '# seed RNG', 'np', '.', 'random', '.', 'seed', '(', ')', '# unpack egg', 'egg_pres', ',', 'egg_rec', ',', 'egg_features', ',', 'egg_dist_funcs', '=', 'parse_egg', '(', 'egg', ')', '# reorder', 'regg', '=', 'order_stick', '(', 'presenter', ',', 'egg', ',', 'dist_dict', ',', 'strategy', ')', '# unpack regg', 'regg_pres', ',', 'regg_rec', ',', 'regg_features', ',', 'regg_dist_funcs', '=', 'parse_egg', '(', 'regg', ')', '# # get the order', 'regg_pres', '=', 'list', '(', 'regg_pres', ')', 'egg_pres', '=', 'list', '(', 'egg_pres', ')', 'idx', '=', '[', 'egg_pres', '.', 'index', '(', 'r', ')', 'for', 'r', 'in', 'regg_pres', ']', '# compute weights', 'weights', '=', 'compute_feature_weights_dict', '(', 'list', '(', 'regg_pres', ')', ',', 'list', '(', 'regg_pres', ')', ',', 'list', '(', 'regg_features', ')', ',', 'dist_dict', ')', '# save out the order', 'orders', '=', 'idx', 'return', 'weights', ',', 'orders'] | Computes weights for one reordering using stick-breaking method | ['Computes', 'weights', 'for', 'one', 'reordering', 'using', 'stick', '-', 'breaking', 'method'] | train | https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/fingerprint.py#L577-L603 |
1,963 | uw-it-aca/uw-restclients-pws | uw_pws/__init__.py | PWS.get_person_by_prox_rfid | def get_person_by_prox_rfid(self, prox_rfid):
"""
Returns a restclients.Person object for the given rfid. If the rfid
isn't found, or if there is an error communicating with the IdCard WS,
a DataFailureException will be thrown.
"""
if not self.valid_prox_rfid(prox_rfid):
raise InvalidProxRFID(prox_rfid)
url = "{}.json?{}".format(
CARD_PREFIX, urlencode({"prox_rfid": prox_rfid}))
response = DAO.getURL(url, {"Accept": "application/json"})
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
if not len(data["Cards"]):
raise DataFailureException(url, 404, "No card found")
regid = data["Cards"][0]["RegID"]
return self.get_person_by_regid(regid) | python | def get_person_by_prox_rfid(self, prox_rfid):
"""
Returns a restclients.Person object for the given rfid. If the rfid
isn't found, or if there is an error communicating with the IdCard WS,
a DataFailureException will be thrown.
"""
if not self.valid_prox_rfid(prox_rfid):
raise InvalidProxRFID(prox_rfid)
url = "{}.json?{}".format(
CARD_PREFIX, urlencode({"prox_rfid": prox_rfid}))
response = DAO.getURL(url, {"Accept": "application/json"})
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
data = json.loads(response.data)
if not len(data["Cards"]):
raise DataFailureException(url, 404, "No card found")
regid = data["Cards"][0]["RegID"]
return self.get_person_by_regid(regid) | ['def', 'get_person_by_prox_rfid', '(', 'self', ',', 'prox_rfid', ')', ':', 'if', 'not', 'self', '.', 'valid_prox_rfid', '(', 'prox_rfid', ')', ':', 'raise', 'InvalidProxRFID', '(', 'prox_rfid', ')', 'url', '=', '"{}.json?{}"', '.', 'format', '(', 'CARD_PREFIX', ',', 'urlencode', '(', '{', '"prox_rfid"', ':', 'prox_rfid', '}', ')', ')', 'response', '=', 'DAO', '.', 'getURL', '(', 'url', ',', '{', '"Accept"', ':', '"application/json"', '}', ')', 'if', 'response', '.', 'status', '!=', '200', ':', 'raise', 'DataFailureException', '(', 'url', ',', 'response', '.', 'status', ',', 'response', '.', 'data', ')', 'data', '=', 'json', '.', 'loads', '(', 'response', '.', 'data', ')', 'if', 'not', 'len', '(', 'data', '[', '"Cards"', ']', ')', ':', 'raise', 'DataFailureException', '(', 'url', ',', '404', ',', '"No card found"', ')', 'regid', '=', 'data', '[', '"Cards"', ']', '[', '0', ']', '[', '"RegID"', ']', 'return', 'self', '.', 'get_person_by_regid', '(', 'regid', ')'] | Returns a restclients.Person object for the given rfid. If the rfid
isn't found, or if there is an error communicating with the IdCard WS,
a DataFailureException will be thrown. | ['Returns', 'a', 'restclients', '.', 'Person', 'object', 'for', 'the', 'given', 'rfid', '.', 'If', 'the', 'rfid', 'isn', 't', 'found', 'or', 'if', 'there', 'is', 'an', 'error', 'communicating', 'with', 'the', 'IdCard', 'WS', 'a', 'DataFailureException', 'will', 'be', 'thrown', '.'] | train | https://github.com/uw-it-aca/uw-restclients-pws/blob/758d94b42a01762738140c5f984d05f389325b7a/uw_pws/__init__.py#L120-L141 |
1,964 | Lagg/steamodd | steam/items.py | item.equipped | def equipped(self):
""" Returns a dict of classes that have the item equipped and in what slot """
equipped = self._item.get("equipped", [])
# WORKAROUND: 0 is probably an off-by-one error
# WORKAROUND: 65535 actually serves a purpose (according to Valve)
return dict([(eq["class"], eq["slot"]) for eq in equipped if eq["class"] != 0 and eq["slot"] != 65535]) | python | def equipped(self):
""" Returns a dict of classes that have the item equipped and in what slot """
equipped = self._item.get("equipped", [])
# WORKAROUND: 0 is probably an off-by-one error
# WORKAROUND: 65535 actually serves a purpose (according to Valve)
return dict([(eq["class"], eq["slot"]) for eq in equipped if eq["class"] != 0 and eq["slot"] != 65535]) | ['def', 'equipped', '(', 'self', ')', ':', 'equipped', '=', 'self', '.', '_item', '.', 'get', '(', '"equipped"', ',', '[', ']', ')', '# WORKAROUND: 0 is probably an off-by-one error', '# WORKAROUND: 65535 actually serves a purpose (according to Valve)', 'return', 'dict', '(', '[', '(', 'eq', '[', '"class"', ']', ',', 'eq', '[', '"slot"', ']', ')', 'for', 'eq', 'in', 'equipped', 'if', 'eq', '[', '"class"', ']', '!=', '0', 'and', 'eq', '[', '"slot"', ']', '!=', '65535', ']', ')'] | Returns a dict of classes that have the item equipped and in what slot | ['Returns', 'a', 'dict', 'of', 'classes', 'that', 'have', 'the', 'item', 'equipped', 'and', 'in', 'what', 'slot'] | train | https://github.com/Lagg/steamodd/blob/2e9ced4e7a6dbe3e09d5a648450bafc12b937b95/steam/items.py#L306-L312 |
1,965 | bitesofcode/projexui | projexui/widgets/xtreewidget/xtreewidget.py | XTreeWidget.mouseDoubleClickEvent | def mouseDoubleClickEvent(self, event):
"""
Overloads when a mouse press occurs. If in editable mode, and the
click occurs on a selected index, then the editor will be created
and no selection change will occur.
:param event | <QMousePressEvent>
"""
item = self.itemAt(event.pos())
column = self.columnAt(event.pos().x())
mid_button = event.button() == QtCore.Qt.MidButton
ctrl_click = event.button() == QtCore.Qt.LeftButton and \
event.modifiers() == QtCore.Qt.ControlModifier
if mid_button or ctrl_click:
self.itemMiddleDoubleClicked.emit(item, column)
elif event.button() == QtCore.Qt.RightButton:
self.itemRightDoubleClicked.emit(item, column)
else:
super(XTreeWidget, self).mouseDoubleClickEvent(event) | python | def mouseDoubleClickEvent(self, event):
"""
Overloads when a mouse press occurs. If in editable mode, and the
click occurs on a selected index, then the editor will be created
and no selection change will occur.
:param event | <QMousePressEvent>
"""
item = self.itemAt(event.pos())
column = self.columnAt(event.pos().x())
mid_button = event.button() == QtCore.Qt.MidButton
ctrl_click = event.button() == QtCore.Qt.LeftButton and \
event.modifiers() == QtCore.Qt.ControlModifier
if mid_button or ctrl_click:
self.itemMiddleDoubleClicked.emit(item, column)
elif event.button() == QtCore.Qt.RightButton:
self.itemRightDoubleClicked.emit(item, column)
else:
super(XTreeWidget, self).mouseDoubleClickEvent(event) | ['def', 'mouseDoubleClickEvent', '(', 'self', ',', 'event', ')', ':', 'item', '=', 'self', '.', 'itemAt', '(', 'event', '.', 'pos', '(', ')', ')', 'column', '=', 'self', '.', 'columnAt', '(', 'event', '.', 'pos', '(', ')', '.', 'x', '(', ')', ')', 'mid_button', '=', 'event', '.', 'button', '(', ')', '==', 'QtCore', '.', 'Qt', '.', 'MidButton', 'ctrl_click', '=', 'event', '.', 'button', '(', ')', '==', 'QtCore', '.', 'Qt', '.', 'LeftButton', 'and', 'event', '.', 'modifiers', '(', ')', '==', 'QtCore', '.', 'Qt', '.', 'ControlModifier', 'if', 'mid_button', 'or', 'ctrl_click', ':', 'self', '.', 'itemMiddleDoubleClicked', '.', 'emit', '(', 'item', ',', 'column', ')', 'elif', 'event', '.', 'button', '(', ')', '==', 'QtCore', '.', 'Qt', '.', 'RightButton', ':', 'self', '.', 'itemRightDoubleClicked', '.', 'emit', '(', 'item', ',', 'column', ')', 'else', ':', 'super', '(', 'XTreeWidget', ',', 'self', ')', '.', 'mouseDoubleClickEvent', '(', 'event', ')'] | Overloads when a mouse press occurs. If in editable mode, and the
click occurs on a selected index, then the editor will be created
and no selection change will occur.
:param event | <QMousePressEvent> | ['Overloads', 'when', 'a', 'mouse', 'press', 'occurs', '.', 'If', 'in', 'editable', 'mode', 'and', 'the', 'click', 'occurs', 'on', 'a', 'selected', 'index', 'then', 'the', 'editor', 'will', 'be', 'created', 'and', 'no', 'selection', 'change', 'will', 'occur', '.', ':', 'param', 'event', '|', '<QMousePressEvent', '>'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L1263-L1283 |
1,966 | lablup/backend.ai-client-py | src/ai/backend/client/session.py | Session.close | def close(self):
'''
Terminates the session. It schedules the ``close()`` coroutine
of the underlying aiohttp session and then enqueues a sentinel
object to indicate termination. Then it waits until the worker
thread to self-terminate by joining.
'''
if self._closed:
return
self._closed = True
self._worker_thread.work_queue.put(self.aiohttp_session.close())
self._worker_thread.work_queue.put(self.worker_thread.sentinel)
self._worker_thread.join() | python | def close(self):
'''
Terminates the session. It schedules the ``close()`` coroutine
of the underlying aiohttp session and then enqueues a sentinel
object to indicate termination. Then it waits until the worker
thread to self-terminate by joining.
'''
if self._closed:
return
self._closed = True
self._worker_thread.work_queue.put(self.aiohttp_session.close())
self._worker_thread.work_queue.put(self.worker_thread.sentinel)
self._worker_thread.join() | ['def', 'close', '(', 'self', ')', ':', 'if', 'self', '.', '_closed', ':', 'return', 'self', '.', '_closed', '=', 'True', 'self', '.', '_worker_thread', '.', 'work_queue', '.', 'put', '(', 'self', '.', 'aiohttp_session', '.', 'close', '(', ')', ')', 'self', '.', '_worker_thread', '.', 'work_queue', '.', 'put', '(', 'self', '.', 'worker_thread', '.', 'sentinel', ')', 'self', '.', '_worker_thread', '.', 'join', '(', ')'] | Terminates the session. It schedules the ``close()`` coroutine
of the underlying aiohttp session and then enqueues a sentinel
object to indicate termination. Then it waits until the worker
thread to self-terminate by joining. | ['Terminates', 'the', 'session', '.', 'It', 'schedules', 'the', 'close', '()', 'coroutine', 'of', 'the', 'underlying', 'aiohttp', 'session', 'and', 'then', 'enqueues', 'a', 'sentinel', 'object', 'to', 'indicate', 'termination', '.', 'Then', 'it', 'waits', 'until', 'the', 'worker', 'thread', 'to', 'self', '-', 'terminate', 'by', 'joining', '.'] | train | https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/session.py#L214-L226 |
1,967 | flowersteam/explauto | explauto/sensorimotor_model/inverse/cma.py | fmin | def fmin(objective_function, x0, sigma0,
options=None,
args=(),
gradf=None,
restarts=0,
restart_from_best='False',
incpopsize=2,
eval_initial_x=False,
noise_handler=None,
noise_change_sigma_exponent=1,
noise_kappa_exponent=0, # TODO: add max kappa value as parameter
bipop=False):
"""functional interface to the stochastic optimizer CMA-ES
for non-convex function minimization.
Calling Sequences
=================
``fmin(objective_function, x0, sigma0)``
minimizes `objective_function` starting at `x0` and with standard deviation
`sigma0` (step-size)
``fmin(objective_function, x0, sigma0, options={'ftarget': 1e-5})``
minimizes `objective_function` up to target function value 1e-5, which
is typically useful for benchmarking.
``fmin(objective_function, x0, sigma0, args=('f',))``
minimizes `objective_function` called with an additional argument ``'f'``.
``fmin(objective_function, x0, sigma0, options={'ftarget':1e-5, 'popsize':40})``
uses additional options ``ftarget`` and ``popsize``
``fmin(objective_function, esobj, None, options={'maxfevals': 1e5})``
uses the `CMAEvolutionStrategy` object instance `esobj` to optimize
`objective_function`, similar to `esobj.optimize()`.
Arguments
=========
`objective_function`
function to be minimized. Called as ``objective_function(x,
*args)``. `x` is a one-dimensional `numpy.ndarray`.
`objective_function` can return `numpy.NaN`,
which is interpreted as outright rejection of solution `x`
and invokes an immediate resampling and (re-)evaluation
of a new solution not counting as function evaluation.
`x0`
list or `numpy.ndarray`, initial guess of minimum solution
before the application of the geno-phenotype transformation
according to the ``transformation`` option. It can also be
a string holding a Python expression that is evaluated
to yield the initial guess - this is important in case
restarts are performed so that they start from different
places. Otherwise `x0` can also be a `cma.CMAEvolutionStrategy`
object instance, in that case `sigma0` can be ``None``.
`sigma0`
scalar, initial standard deviation in each coordinate.
`sigma0` should be about 1/4th of the search domain width
(where the optimum is to be expected). The variables in
`objective_function` should be scaled such that they
presumably have similar sensitivity.
See also option `scaling_of_variables`.
`options`
a dictionary with additional options passed to the constructor
of class ``CMAEvolutionStrategy``, see ``cma.CMAOptions()``
for a list of available options.
``args=()``
arguments to be used to call the `objective_function`
``gradf``
gradient of f, where ``len(gradf(x, *args)) == len(x)``.
`gradf` is called once in each iteration if
``gradf is not None``.
``restarts=0``
number of restarts with increasing population size, see also
parameter `incpopsize`, implementing the IPOP-CMA-ES restart
strategy, see also parameter `bipop`; to restart from
different points (recommended), pass `x0` as a string.
``restart_from_best=False``
which point to restart from
``incpopsize=2``
multiplier for increasing the population size `popsize` before
each restart
``eval_initial_x=None``
evaluate initial solution, for `None` only with elitist option
``noise_handler=None``
a ``NoiseHandler`` instance or ``None``, a simple usecase is
``cma.fmin(f, 6 * [1], 1, noise_handler=cma.NoiseHandler(6))``
see ``help(cma.NoiseHandler)``.
``noise_change_sigma_exponent=1``
exponent for sigma increment for additional noise treatment
``noise_evaluations_as_kappa``
instead of applying reevaluations, the "number of evaluations"
is (ab)used as scaling factor kappa (experimental).
``bipop``
if True, run as BIPOP-CMA-ES; BIPOP is a special restart
strategy switching between two population sizings - small
(like the default CMA, but with more focused search) and
large (progressively increased as in IPOP). This makes the
algorithm perform well both on functions with many regularly
or irregularly arranged local optima (the latter by frequently
restarting with small populations). For the `bipop` parameter
to actually take effect, also select non-zero number of
(IPOP) restarts; the recommended setting is ``restarts<=9``
and `x0` passed as a string. Note that small-population
restarts do not count into the total restart count.
Optional Arguments
==================
All values in the `options` dictionary are evaluated if they are of
type `str`, besides `verb_filenameprefix`, see class `CMAOptions` for
details. The full list is available via ``cma.CMAOptions()``.
>>> import cma
>>> cma.CMAOptions()
Subsets of options can be displayed, for example like
``cma.CMAOptions('tol')``, or ``cma.CMAOptions('bound')``,
see also class `CMAOptions`.
Return
======
Return the list provided by `CMAEvolutionStrategy.result()` appended
with termination conditions, an `OOOptimizer` and a `BaseDataLogger`::
res = es.result() + (es.stop(), es, logger)
where
- ``res[0]`` (``xopt``) -- best evaluated solution
- ``res[1]`` (``fopt``) -- respective function value
- ``res[2]`` (``evalsopt``) -- respective number of function evaluations
- ``res[3]`` (``evals``) -- number of overall conducted objective function evaluations
- ``res[4]`` (``iterations``) -- number of overall conducted iterations
- ``res[5]`` (``xmean``) -- mean of the final sample distribution
- ``res[6]`` (``stds``) -- effective stds of the final sample distribution
- ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary
- ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance
- ``res[-1]`` (``logger``) -- class `CMADataLogger` instance
Details
=======
This function is an interface to the class `CMAEvolutionStrategy`. The
latter class should be used when full control over the iteration loop
of the optimizer is desired.
Examples
========
The following example calls `fmin` optimizing the Rosenbrock function
in 10-D with initial solution 0.1 and initial step-size 0.5. The
options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'CMA_diagonal':100, 'seed':1234, 'verb_time':0}
>>>
>>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, options)
(5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234)
Covariance matrix is diagonal for 10 iterations (1/ccov=29.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01
2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01
3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01
100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02
200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02
300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02
400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02
500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03
600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06
650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07
termination on tolfun : 1e-11
final/bestever f-value = 5.55796133406e-14 2.62435631419e-14
mean solution: [ 1. 1.00000001 1. 1.
1. 1.00000001 1.00000002 1.00000003 ...]
std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09
5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...]
>>>
>>> print('best solutions fitness = %f' % (res[1]))
best solutions fitness = 2.62435631419e-14
>>> assert res[1] < 1e-12
The above call is pretty much equivalent with the slightly more
verbose call::
es = cma.CMAEvolutionStrategy([0.1] * 10, 0.5,
options=options).optimize(cma.fcts.rosen)
The following example calls `fmin` optimizing the Rastrigin function
in 3-D with random initial solution in [-2,2], initial step-size 0.5
and the BIPOP restart strategy (that progressively increases population).
The options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'seed':12345, 'verb_time':0, 'ftarget': 1e-8}
>>>
>>> res = cma.fmin(cma.fcts.rastrigin, '2. * np.random.rand(3) - 1', 0.5,
... options, restarts=9, bipop=True)
(3_w,7)-aCMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=12345)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.633489455763566e+01 1.0e+00 4.35e-01 4e-01 4e-01
2 14 9.762462950258016e+00 1.2e+00 4.12e-01 4e-01 4e-01
3 21 2.461107851413725e+01 1.4e+00 3.78e-01 3e-01 4e-01
100 700 9.949590571272680e-01 1.7e+00 5.07e-05 3e-07 5e-07
123 861 9.949590570932969e-01 1.3e+00 3.93e-06 9e-09 1e-08
termination on tolfun=1e-11
final/bestever f-value = 9.949591e-01 9.949591e-01
mean solution: [ 9.94958638e-01 -7.19265205e-10 2.09294450e-10]
std deviation: [ 8.71497860e-09 8.58994807e-09 9.85585654e-09]
[...]
(4_w,9)-aCMA-ES (mu_w=2.8,w_1=49%) in dimension 3 (seed=12349)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 5342.0 2.114883315350800e+01 1.0e+00 3.42e-02 3e-02 4e-02
2 5351.0 1.810102940125502e+01 1.4e+00 3.79e-02 3e-02 4e-02
3 5360.0 1.340222457448063e+01 1.4e+00 4.58e-02 4e-02 6e-02
50 5783.0 8.631491965616078e-09 1.6e+00 2.01e-04 8e-06 1e-05
termination on ftarget=1e-08 after 4 restarts
final/bestever f-value = 8.316963e-09 8.316963e-09
mean solution: [ -3.10652459e-06 2.77935436e-06 -4.95444519e-06]
std deviation: [ 1.02825265e-05 8.08348144e-06 8.47256408e-06]
In either case, the method::
cma.plot();
(based on `matplotlib.pyplot`) produces a plot of the run and, if
necessary::
cma.show()
shows the plot in a window. Finally::
cma.savefig('myfirstrun') # savefig from matplotlib.pyplot
will save the figure in a png.
We can use the gradient like
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, np.zeros(10), 0.1,
... options = {'ftarget':1e-8,},
... gradf=cma.fcts.grad_rosen,
... )
>>> assert cma.fcts.rosen(res[0]) < 1e-8
>>> assert res[2] < 3600 # 1% are > 3300
>>> assert res[3] < 3600 # 1% are > 3300
:See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`,
`CMAOptions`, `scipy.optimize.fmin()`
""" # style guides say there should be the above empty line
if 1 < 3: # try: # pass on KeyboardInterrupt
if not objective_function: # cma.fmin(0, 0, 0)
return CMAOptions() # these opts are by definition valid
fmin_options = locals().copy() # archive original options
del fmin_options['objective_function']
del fmin_options['x0']
del fmin_options['sigma0']
del fmin_options['options']
del fmin_options['args']
if options is None:
options = cma_default_options
CMAOptions().check_attributes(options) # might modify options
# checked that no options.ftarget =
opts = CMAOptions(options.copy()).complement()
# BIPOP-related variables:
runs_with_small = 0
small_i = []
large_i = []
popsize0 = None # to be evaluated after the first iteration
maxiter0 = None # to be evaluated after the first iteration
base_evals = 0
irun = 0
best = BestSolution()
while True: # restart loop
sigma_factor = 1
# Adjust the population according to BIPOP after a restart.
if not bipop:
# BIPOP not in use, simply double the previous population
# on restart.
if irun > 0:
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
elif irun == 0:
# Initial run is with "normal" population size; it is
# the large population before first doubling, but its
# budget accounting is the same as in case of small
# population.
poptype = 'small'
elif sum(small_i) < sum(large_i):
# An interweaved run with small population size
poptype = 'small'
runs_with_small += 1 # _Before_ it's used in popsize_lastlarge
sigma_factor = 0.01 ** np.random.uniform() # Local search
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = np.floor(popsize0 * popsize_multiplier ** (np.random.uniform() ** 2))
opts['maxiter'] = min(maxiter0, 0.5 * sum(large_i) / opts['popsize'])
# print('small basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
else:
# A run with large population size; the population
# doubling is implicit with incpopsize.
poptype = 'large'
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
opts['maxiter'] = maxiter0
# print('large basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
# recover from a CMA object
if irun == 0 and isinstance(x0, CMAEvolutionStrategy):
es = x0
x0 = es.inputargs['x0'] # for the next restarts
if isscalar(sigma0) and isfinite(sigma0) and sigma0 > 0:
es.sigma = sigma0
# debatable whether this makes sense:
sigma0 = es.inputargs['sigma0'] # for the next restarts
if options is not None:
es.opts.set(options)
# ignore further input args and keep original options
else: # default case
if irun and eval(str(fmin_options['restart_from_best'])):
print_warning('CAVE: restart_from_best is often not useful',
verbose=opts['verbose'])
es = CMAEvolutionStrategy(best.x, sigma_factor * sigma0, opts)
else:
es = CMAEvolutionStrategy(x0, sigma_factor * sigma0, opts)
if eval_initial_x or es.opts['CMA_elitist'] == 'initial' \
or (es.opts['CMA_elitist'] and eval_initial_x is None):
x = es.gp.pheno(es.mean,
into_bounds=es.boundary_handler.repair,
archive=es.sent_solutions)
es.best.update([x], es.sent_solutions,
[objective_function(x, *args)], 1)
es.countevals += 1
opts = es.opts # processed options, unambiguous
# a hack:
fmin_opts = CMAOptions(fmin_options.copy(), unchecked=True)
for k in fmin_opts:
# locals() cannot be modified directly, exec won't work
# in 3.x, therefore
fmin_opts.eval(k, loc={'N': es.N,
'popsize': opts['popsize']},
correct_key=False)
append = opts['verb_append'] or es.countiter > 0 or irun > 0
# es.logger is "the same" logger, because the "identity"
# is only determined by the `filenameprefix`
logger = CMADataLogger(opts['verb_filenameprefix'],
opts['verb_log'])
logger.register(es, append).add() # no fitness values here
es.logger = logger
if noise_handler:
noisehandler = noise_handler
noise_handling = True
if fmin_opts['noise_change_sigma_exponent'] > 0:
es.opts['tolfacupx'] = inf
else:
noisehandler = NoiseHandler(es.N, 0)
noise_handling = False
es.noise_handler = noisehandler
# the problem: this assumes that good solutions cannot take longer than bad ones:
# with EvalInParallel(objective_function, 2, is_feasible=opts['is_feasible']) as eval_in_parallel:
if 1 < 3:
while not es.stop(): # iteration loop
# X, fit = eval_in_parallel(lambda: es.ask(1)[0], es.popsize, args, repetitions=noisehandler.evaluations-1)
X, fit = es.ask_and_eval(objective_function, args, gradf=gradf,
evaluations=noisehandler.evaluations,
aggregation=np.median) # treats NaN with resampling
# TODO: check args and in case use args=(noisehandler.evaluations, )
es.tell(X, fit) # prepare for next iteration
if noise_handling: # it would be better to also use these f-evaluations in tell
es.sigma *= noisehandler(X, fit, objective_function, es.ask,
args=args)**fmin_opts['noise_change_sigma_exponent']
es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though
# es.more_to_write.append(noisehandler.evaluations_just_done)
if noisehandler.maxevals > noisehandler.minevals:
es.more_to_write.append(noisehandler.get_evaluations())
if 1 < 3:
es.sp.cmean *= exp(-noise_kappa_exponent * np.tanh(noisehandler.noiseS))
if es.sp.cmean > 1:
es.sp.cmean = 1
es.disp()
logger.add(# more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],
modulo=1 if es.stop() and logger.modulo else None)
if (opts['verb_log'] and opts['verb_plot'] and
(es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop())):
logger.plot(324)
# end while not es.stop
mean_pheno = es.gp.pheno(es.mean, into_bounds=es.boundary_handler.repair, archive=es.sent_solutions)
fmean = objective_function(mean_pheno, *args)
es.countevals += 1
es.best.update([mean_pheno], es.sent_solutions, [fmean], es.countevals)
best.update(es.best, es.sent_solutions) # in restarted case
# es.best.update(best)
this_evals = es.countevals - base_evals
base_evals = es.countevals
# BIPOP stats update
if irun == 0:
popsize0 = opts['popsize']
maxiter0 = opts['maxiter']
# XXX: This might be a bug? Reproduced from Matlab
# small_i.append(this_evals)
if bipop:
if poptype == 'small':
small_i.append(this_evals)
else: # poptype == 'large'
large_i.append(this_evals)
# final message
if opts['verb_disp']:
es.result_pretty(irun, time.asctime(time.localtime()),
best.f)
irun += 1
# if irun > fmin_opts['restarts'] or 'ftarget' in es.stop() \
# if irun > restarts or 'ftarget' in es.stop() \
if irun - runs_with_small > fmin_opts['restarts'] or 'ftarget' in es.stop() \
or 'maxfevals' in es.stop(check=False):
break
opts['verb_append'] = es.countevals
opts['popsize'] = fmin_opts['incpopsize'] * es.sp.popsize # TODO: use rather options?
opts['seed'] += 1
# while irun
# es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell
if 1 < 3:
if irun:
es.best.update(best)
# TODO: there should be a better way to communicate the overall best
return es.result() + (es.stop(), es, logger)
else: # previously: to be removed
return (best.x.copy(), best.f, es.countevals,
dict((('stopdict', _CMAStopDict(es._stopdict))
, ('mean', es.gp.pheno(es.mean))
, ('std', es.sigma * es.sigma_vec * sqrt(es.dC) * es.gp.scales)
, ('out', es.out)
, ('opts', es.opts) # last state of options
, ('cma', es)
, ('inputargs', es.inputargs)
))
)
# TODO refine output, can #args be flexible?
# is this well usable as it is now?
else: # except KeyboardInterrupt: # Exception, e:
if eval(str(options['verb_disp'])) > 0:
print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')
raise | python | def fmin(objective_function, x0, sigma0,
options=None,
args=(),
gradf=None,
restarts=0,
restart_from_best='False',
incpopsize=2,
eval_initial_x=False,
noise_handler=None,
noise_change_sigma_exponent=1,
noise_kappa_exponent=0, # TODO: add max kappa value as parameter
bipop=False):
"""functional interface to the stochastic optimizer CMA-ES
for non-convex function minimization.
Calling Sequences
=================
``fmin(objective_function, x0, sigma0)``
minimizes `objective_function` starting at `x0` and with standard deviation
`sigma0` (step-size)
``fmin(objective_function, x0, sigma0, options={'ftarget': 1e-5})``
minimizes `objective_function` up to target function value 1e-5, which
is typically useful for benchmarking.
``fmin(objective_function, x0, sigma0, args=('f',))``
minimizes `objective_function` called with an additional argument ``'f'``.
``fmin(objective_function, x0, sigma0, options={'ftarget':1e-5, 'popsize':40})``
uses additional options ``ftarget`` and ``popsize``
``fmin(objective_function, esobj, None, options={'maxfevals': 1e5})``
uses the `CMAEvolutionStrategy` object instance `esobj` to optimize
`objective_function`, similar to `esobj.optimize()`.
Arguments
=========
`objective_function`
function to be minimized. Called as ``objective_function(x,
*args)``. `x` is a one-dimensional `numpy.ndarray`.
`objective_function` can return `numpy.NaN`,
which is interpreted as outright rejection of solution `x`
and invokes an immediate resampling and (re-)evaluation
of a new solution not counting as function evaluation.
`x0`
list or `numpy.ndarray`, initial guess of minimum solution
before the application of the geno-phenotype transformation
according to the ``transformation`` option. It can also be
a string holding a Python expression that is evaluated
to yield the initial guess - this is important in case
restarts are performed so that they start from different
places. Otherwise `x0` can also be a `cma.CMAEvolutionStrategy`
object instance, in that case `sigma0` can be ``None``.
`sigma0`
scalar, initial standard deviation in each coordinate.
`sigma0` should be about 1/4th of the search domain width
(where the optimum is to be expected). The variables in
`objective_function` should be scaled such that they
presumably have similar sensitivity.
See also option `scaling_of_variables`.
`options`
a dictionary with additional options passed to the constructor
of class ``CMAEvolutionStrategy``, see ``cma.CMAOptions()``
for a list of available options.
``args=()``
arguments to be used to call the `objective_function`
``gradf``
gradient of f, where ``len(gradf(x, *args)) == len(x)``.
`gradf` is called once in each iteration if
``gradf is not None``.
``restarts=0``
number of restarts with increasing population size, see also
parameter `incpopsize`, implementing the IPOP-CMA-ES restart
strategy, see also parameter `bipop`; to restart from
different points (recommended), pass `x0` as a string.
``restart_from_best=False``
which point to restart from
``incpopsize=2``
multiplier for increasing the population size `popsize` before
each restart
``eval_initial_x=None``
evaluate initial solution, for `None` only with elitist option
``noise_handler=None``
a ``NoiseHandler`` instance or ``None``, a simple usecase is
``cma.fmin(f, 6 * [1], 1, noise_handler=cma.NoiseHandler(6))``
see ``help(cma.NoiseHandler)``.
``noise_change_sigma_exponent=1``
exponent for sigma increment for additional noise treatment
``noise_evaluations_as_kappa``
instead of applying reevaluations, the "number of evaluations"
is (ab)used as scaling factor kappa (experimental).
``bipop``
if True, run as BIPOP-CMA-ES; BIPOP is a special restart
strategy switching between two population sizings - small
(like the default CMA, but with more focused search) and
large (progressively increased as in IPOP). This makes the
algorithm perform well both on functions with many regularly
or irregularly arranged local optima (the latter by frequently
restarting with small populations). For the `bipop` parameter
to actually take effect, also select non-zero number of
(IPOP) restarts; the recommended setting is ``restarts<=9``
and `x0` passed as a string. Note that small-population
restarts do not count into the total restart count.
Optional Arguments
==================
All values in the `options` dictionary are evaluated if they are of
type `str`, besides `verb_filenameprefix`, see class `CMAOptions` for
details. The full list is available via ``cma.CMAOptions()``.
>>> import cma
>>> cma.CMAOptions()
Subsets of options can be displayed, for example like
``cma.CMAOptions('tol')``, or ``cma.CMAOptions('bound')``,
see also class `CMAOptions`.
Return
======
Return the list provided by `CMAEvolutionStrategy.result()` appended
with termination conditions, an `OOOptimizer` and a `BaseDataLogger`::
res = es.result() + (es.stop(), es, logger)
where
- ``res[0]`` (``xopt``) -- best evaluated solution
- ``res[1]`` (``fopt``) -- respective function value
- ``res[2]`` (``evalsopt``) -- respective number of function evaluations
- ``res[3]`` (``evals``) -- number of overall conducted objective function evaluations
- ``res[4]`` (``iterations``) -- number of overall conducted iterations
- ``res[5]`` (``xmean``) -- mean of the final sample distribution
- ``res[6]`` (``stds``) -- effective stds of the final sample distribution
- ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary
- ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance
- ``res[-1]`` (``logger``) -- class `CMADataLogger` instance
Details
=======
This function is an interface to the class `CMAEvolutionStrategy`. The
latter class should be used when full control over the iteration loop
of the optimizer is desired.
Examples
========
The following example calls `fmin` optimizing the Rosenbrock function
in 10-D with initial solution 0.1 and initial step-size 0.5. The
options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'CMA_diagonal':100, 'seed':1234, 'verb_time':0}
>>>
>>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, options)
(5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234)
Covariance matrix is diagonal for 10 iterations (1/ccov=29.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01
2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01
3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01
100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02
200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02
300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02
400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02
500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03
600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06
650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07
termination on tolfun : 1e-11
final/bestever f-value = 5.55796133406e-14 2.62435631419e-14
mean solution: [ 1. 1.00000001 1. 1.
1. 1.00000001 1.00000002 1.00000003 ...]
std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09
5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...]
>>>
>>> print('best solutions fitness = %f' % (res[1]))
best solutions fitness = 2.62435631419e-14
>>> assert res[1] < 1e-12
The above call is pretty much equivalent with the slightly more
verbose call::
es = cma.CMAEvolutionStrategy([0.1] * 10, 0.5,
options=options).optimize(cma.fcts.rosen)
The following example calls `fmin` optimizing the Rastrigin function
in 3-D with random initial solution in [-2,2], initial step-size 0.5
and the BIPOP restart strategy (that progressively increases population).
The options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'seed':12345, 'verb_time':0, 'ftarget': 1e-8}
>>>
>>> res = cma.fmin(cma.fcts.rastrigin, '2. * np.random.rand(3) - 1', 0.5,
... options, restarts=9, bipop=True)
(3_w,7)-aCMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=12345)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.633489455763566e+01 1.0e+00 4.35e-01 4e-01 4e-01
2 14 9.762462950258016e+00 1.2e+00 4.12e-01 4e-01 4e-01
3 21 2.461107851413725e+01 1.4e+00 3.78e-01 3e-01 4e-01
100 700 9.949590571272680e-01 1.7e+00 5.07e-05 3e-07 5e-07
123 861 9.949590570932969e-01 1.3e+00 3.93e-06 9e-09 1e-08
termination on tolfun=1e-11
final/bestever f-value = 9.949591e-01 9.949591e-01
mean solution: [ 9.94958638e-01 -7.19265205e-10 2.09294450e-10]
std deviation: [ 8.71497860e-09 8.58994807e-09 9.85585654e-09]
[...]
(4_w,9)-aCMA-ES (mu_w=2.8,w_1=49%) in dimension 3 (seed=12349)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 5342.0 2.114883315350800e+01 1.0e+00 3.42e-02 3e-02 4e-02
2 5351.0 1.810102940125502e+01 1.4e+00 3.79e-02 3e-02 4e-02
3 5360.0 1.340222457448063e+01 1.4e+00 4.58e-02 4e-02 6e-02
50 5783.0 8.631491965616078e-09 1.6e+00 2.01e-04 8e-06 1e-05
termination on ftarget=1e-08 after 4 restarts
final/bestever f-value = 8.316963e-09 8.316963e-09
mean solution: [ -3.10652459e-06 2.77935436e-06 -4.95444519e-06]
std deviation: [ 1.02825265e-05 8.08348144e-06 8.47256408e-06]
In either case, the method::
cma.plot();
(based on `matplotlib.pyplot`) produces a plot of the run and, if
necessary::
cma.show()
shows the plot in a window. Finally::
cma.savefig('myfirstrun') # savefig from matplotlib.pyplot
will save the figure in a png.
We can use the gradient like
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, np.zeros(10), 0.1,
... options = {'ftarget':1e-8,},
... gradf=cma.fcts.grad_rosen,
... )
>>> assert cma.fcts.rosen(res[0]) < 1e-8
>>> assert res[2] < 3600 # 1% are > 3300
>>> assert res[3] < 3600 # 1% are > 3300
:See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`,
`CMAOptions`, `scipy.optimize.fmin()`
""" # style guides say there should be the above empty line
if 1 < 3: # try: # pass on KeyboardInterrupt
if not objective_function: # cma.fmin(0, 0, 0)
return CMAOptions() # these opts are by definition valid
fmin_options = locals().copy() # archive original options
del fmin_options['objective_function']
del fmin_options['x0']
del fmin_options['sigma0']
del fmin_options['options']
del fmin_options['args']
if options is None:
options = cma_default_options
CMAOptions().check_attributes(options) # might modify options
# checked that no options.ftarget =
opts = CMAOptions(options.copy()).complement()
# BIPOP-related variables:
runs_with_small = 0
small_i = []
large_i = []
popsize0 = None # to be evaluated after the first iteration
maxiter0 = None # to be evaluated after the first iteration
base_evals = 0
irun = 0
best = BestSolution()
while True: # restart loop
sigma_factor = 1
# Adjust the population according to BIPOP after a restart.
if not bipop:
# BIPOP not in use, simply double the previous population
# on restart.
if irun > 0:
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
elif irun == 0:
# Initial run is with "normal" population size; it is
# the large population before first doubling, but its
# budget accounting is the same as in case of small
# population.
poptype = 'small'
elif sum(small_i) < sum(large_i):
# An interweaved run with small population size
poptype = 'small'
runs_with_small += 1 # _Before_ it's used in popsize_lastlarge
sigma_factor = 0.01 ** np.random.uniform() # Local search
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = np.floor(popsize0 * popsize_multiplier ** (np.random.uniform() ** 2))
opts['maxiter'] = min(maxiter0, 0.5 * sum(large_i) / opts['popsize'])
# print('small basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
else:
# A run with large population size; the population
# doubling is implicit with incpopsize.
poptype = 'large'
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
opts['maxiter'] = maxiter0
# print('large basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
# recover from a CMA object
if irun == 0 and isinstance(x0, CMAEvolutionStrategy):
es = x0
x0 = es.inputargs['x0'] # for the next restarts
if isscalar(sigma0) and isfinite(sigma0) and sigma0 > 0:
es.sigma = sigma0
# debatable whether this makes sense:
sigma0 = es.inputargs['sigma0'] # for the next restarts
if options is not None:
es.opts.set(options)
# ignore further input args and keep original options
else: # default case
if irun and eval(str(fmin_options['restart_from_best'])):
print_warning('CAVE: restart_from_best is often not useful',
verbose=opts['verbose'])
es = CMAEvolutionStrategy(best.x, sigma_factor * sigma0, opts)
else:
es = CMAEvolutionStrategy(x0, sigma_factor * sigma0, opts)
if eval_initial_x or es.opts['CMA_elitist'] == 'initial' \
or (es.opts['CMA_elitist'] and eval_initial_x is None):
x = es.gp.pheno(es.mean,
into_bounds=es.boundary_handler.repair,
archive=es.sent_solutions)
es.best.update([x], es.sent_solutions,
[objective_function(x, *args)], 1)
es.countevals += 1
opts = es.opts # processed options, unambiguous
# a hack:
fmin_opts = CMAOptions(fmin_options.copy(), unchecked=True)
for k in fmin_opts:
# locals() cannot be modified directly, exec won't work
# in 3.x, therefore
fmin_opts.eval(k, loc={'N': es.N,
'popsize': opts['popsize']},
correct_key=False)
append = opts['verb_append'] or es.countiter > 0 or irun > 0
# es.logger is "the same" logger, because the "identity"
# is only determined by the `filenameprefix`
logger = CMADataLogger(opts['verb_filenameprefix'],
opts['verb_log'])
logger.register(es, append).add() # no fitness values here
es.logger = logger
if noise_handler:
noisehandler = noise_handler
noise_handling = True
if fmin_opts['noise_change_sigma_exponent'] > 0:
es.opts['tolfacupx'] = inf
else:
noisehandler = NoiseHandler(es.N, 0)
noise_handling = False
es.noise_handler = noisehandler
# the problem: this assumes that good solutions cannot take longer than bad ones:
# with EvalInParallel(objective_function, 2, is_feasible=opts['is_feasible']) as eval_in_parallel:
if 1 < 3:
while not es.stop(): # iteration loop
# X, fit = eval_in_parallel(lambda: es.ask(1)[0], es.popsize, args, repetitions=noisehandler.evaluations-1)
X, fit = es.ask_and_eval(objective_function, args, gradf=gradf,
evaluations=noisehandler.evaluations,
aggregation=np.median) # treats NaN with resampling
# TODO: check args and in case use args=(noisehandler.evaluations, )
es.tell(X, fit) # prepare for next iteration
if noise_handling: # it would be better to also use these f-evaluations in tell
es.sigma *= noisehandler(X, fit, objective_function, es.ask,
args=args)**fmin_opts['noise_change_sigma_exponent']
es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though
# es.more_to_write.append(noisehandler.evaluations_just_done)
if noisehandler.maxevals > noisehandler.minevals:
es.more_to_write.append(noisehandler.get_evaluations())
if 1 < 3:
es.sp.cmean *= exp(-noise_kappa_exponent * np.tanh(noisehandler.noiseS))
if es.sp.cmean > 1:
es.sp.cmean = 1
es.disp()
logger.add(# more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],
modulo=1 if es.stop() and logger.modulo else None)
if (opts['verb_log'] and opts['verb_plot'] and
(es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop())):
logger.plot(324)
# end while not es.stop
mean_pheno = es.gp.pheno(es.mean, into_bounds=es.boundary_handler.repair, archive=es.sent_solutions)
fmean = objective_function(mean_pheno, *args)
es.countevals += 1
es.best.update([mean_pheno], es.sent_solutions, [fmean], es.countevals)
best.update(es.best, es.sent_solutions) # in restarted case
# es.best.update(best)
this_evals = es.countevals - base_evals
base_evals = es.countevals
# BIPOP stats update
if irun == 0:
popsize0 = opts['popsize']
maxiter0 = opts['maxiter']
# XXX: This might be a bug? Reproduced from Matlab
# small_i.append(this_evals)
if bipop:
if poptype == 'small':
small_i.append(this_evals)
else: # poptype == 'large'
large_i.append(this_evals)
# final message
if opts['verb_disp']:
es.result_pretty(irun, time.asctime(time.localtime()),
best.f)
irun += 1
# if irun > fmin_opts['restarts'] or 'ftarget' in es.stop() \
# if irun > restarts or 'ftarget' in es.stop() \
if irun - runs_with_small > fmin_opts['restarts'] or 'ftarget' in es.stop() \
or 'maxfevals' in es.stop(check=False):
break
opts['verb_append'] = es.countevals
opts['popsize'] = fmin_opts['incpopsize'] * es.sp.popsize # TODO: use rather options?
opts['seed'] += 1
# while irun
# es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell
if 1 < 3:
if irun:
es.best.update(best)
# TODO: there should be a better way to communicate the overall best
return es.result() + (es.stop(), es, logger)
else: # previously: to be removed
return (best.x.copy(), best.f, es.countevals,
dict((('stopdict', _CMAStopDict(es._stopdict))
, ('mean', es.gp.pheno(es.mean))
, ('std', es.sigma * es.sigma_vec * sqrt(es.dC) * es.gp.scales)
, ('out', es.out)
, ('opts', es.opts) # last state of options
, ('cma', es)
, ('inputargs', es.inputargs)
))
)
# TODO refine output, can #args be flexible?
# is this well usable as it is now?
else: # except KeyboardInterrupt: # Exception, e:
if eval(str(options['verb_disp'])) > 0:
print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')
raise | ['def', 'fmin', '(', 'objective_function', ',', 'x0', ',', 'sigma0', ',', 'options', '=', 'None', ',', 'args', '=', '(', ')', ',', 'gradf', '=', 'None', ',', 'restarts', '=', '0', ',', 'restart_from_best', '=', "'False'", ',', 'incpopsize', '=', '2', ',', 'eval_initial_x', '=', 'False', ',', 'noise_handler', '=', 'None', ',', 'noise_change_sigma_exponent', '=', '1', ',', 'noise_kappa_exponent', '=', '0', ',', '# TODO: add max kappa value as parameter', 'bipop', '=', 'False', ')', ':', '# style guides say there should be the above empty line', 'if', '1', '<', '3', ':', '# try: # pass on KeyboardInterrupt', 'if', 'not', 'objective_function', ':', '# cma.fmin(0, 0, 0)', 'return', 'CMAOptions', '(', ')', '# these opts are by definition valid', 'fmin_options', '=', 'locals', '(', ')', '.', 'copy', '(', ')', '# archive original options', 'del', 'fmin_options', '[', "'objective_function'", ']', 'del', 'fmin_options', '[', "'x0'", ']', 'del', 'fmin_options', '[', "'sigma0'", ']', 'del', 'fmin_options', '[', "'options'", ']', 'del', 'fmin_options', '[', "'args'", ']', 'if', 'options', 'is', 'None', ':', 'options', '=', 'cma_default_options', 'CMAOptions', '(', ')', '.', 'check_attributes', '(', 'options', ')', '# might modify options', '# checked that no options.ftarget =', 'opts', '=', 'CMAOptions', '(', 'options', '.', 'copy', '(', ')', ')', '.', 'complement', '(', ')', '# BIPOP-related variables:', 'runs_with_small', '=', '0', 'small_i', '=', '[', ']', 'large_i', '=', '[', ']', 'popsize0', '=', 'None', '# to be evaluated after the first iteration', 'maxiter0', '=', 'None', '# to be evaluated after the first iteration', 'base_evals', '=', '0', 'irun', '=', '0', 'best', '=', 'BestSolution', '(', ')', 'while', 'True', ':', '# restart loop', 'sigma_factor', '=', '1', '# Adjust the population according to BIPOP after a restart.', 'if', 'not', 'bipop', ':', '# BIPOP not in use, simply double the previous population', '# on restart.', 'if', 'irun', '>', '0', ':', 'popsize_multiplier', '=', 'fmin_options', '[', "'incpopsize'", ']', '**', '(', 'irun', '-', 'runs_with_small', ')', 'opts', '[', "'popsize'", ']', '=', 'popsize0', '*', 'popsize_multiplier', 'elif', 'irun', '==', '0', ':', '# Initial run is with "normal" population size; it is', '# the large population before first doubling, but its', '# budget accounting is the same as in case of small', '# population.', 'poptype', '=', "'small'", 'elif', 'sum', '(', 'small_i', ')', '<', 'sum', '(', 'large_i', ')', ':', '# An interweaved run with small population size', 'poptype', '=', "'small'", 'runs_with_small', '+=', '1', "# _Before_ it's used in popsize_lastlarge", 'sigma_factor', '=', '0.01', '**', 'np', '.', 'random', '.', 'uniform', '(', ')', '# Local search', 'popsize_multiplier', '=', 'fmin_options', '[', "'incpopsize'", ']', '**', '(', 'irun', '-', 'runs_with_small', ')', 'opts', '[', "'popsize'", ']', '=', 'np', '.', 'floor', '(', 'popsize0', '*', 'popsize_multiplier', '**', '(', 'np', '.', 'random', '.', 'uniform', '(', ')', '**', '2', ')', ')', 'opts', '[', "'maxiter'", ']', '=', 'min', '(', 'maxiter0', ',', '0.5', '*', 'sum', '(', 'large_i', ')', '/', 'opts', '[', "'popsize'", ']', ')', "# print('small basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))", 'else', ':', '# A run with large population size; the population', '# doubling is implicit with incpopsize.', 'poptype', '=', "'large'", 'popsize_multiplier', '=', 'fmin_options', '[', "'incpopsize'", ']', '**', '(', 'irun', '-', 'runs_with_small', ')', 'opts', '[', "'popsize'", ']', '=', 'popsize0', '*', 'popsize_multiplier', 'opts', '[', "'maxiter'", ']', '=', 'maxiter0', "# print('large basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))", '# recover from a CMA object', 'if', 'irun', '==', '0', 'and', 'isinstance', '(', 'x0', ',', 'CMAEvolutionStrategy', ')', ':', 'es', '=', 'x0', 'x0', '=', 'es', '.', 'inputargs', '[', "'x0'", ']', '# for the next restarts', 'if', 'isscalar', '(', 'sigma0', ')', 'and', 'isfinite', '(', 'sigma0', ')', 'and', 'sigma0', '>', '0', ':', 'es', '.', 'sigma', '=', 'sigma0', '# debatable whether this makes sense:', 'sigma0', '=', 'es', '.', 'inputargs', '[', "'sigma0'", ']', '# for the next restarts', 'if', 'options', 'is', 'not', 'None', ':', 'es', '.', 'opts', '.', 'set', '(', 'options', ')', '# ignore further input args and keep original options', 'else', ':', '# default case', 'if', 'irun', 'and', 'eval', '(', 'str', '(', 'fmin_options', '[', "'restart_from_best'", ']', ')', ')', ':', 'print_warning', '(', "'CAVE: restart_from_best is often not useful'", ',', 'verbose', '=', 'opts', '[', "'verbose'", ']', ')', 'es', '=', 'CMAEvolutionStrategy', '(', 'best', '.', 'x', ',', 'sigma_factor', '*', 'sigma0', ',', 'opts', ')', 'else', ':', 'es', '=', 'CMAEvolutionStrategy', '(', 'x0', ',', 'sigma_factor', '*', 'sigma0', ',', 'opts', ')', 'if', 'eval_initial_x', 'or', 'es', '.', 'opts', '[', "'CMA_elitist'", ']', '==', "'initial'", 'or', '(', 'es', '.', 'opts', '[', "'CMA_elitist'", ']', 'and', 'eval_initial_x', 'is', 'None', ')', ':', 'x', '=', 'es', '.', 'gp', '.', 'pheno', '(', 'es', '.', 'mean', ',', 'into_bounds', '=', 'es', '.', 'boundary_handler', '.', 'repair', ',', 'archive', '=', 'es', '.', 'sent_solutions', ')', 'es', '.', 'best', '.', 'update', '(', '[', 'x', ']', ',', 'es', '.', 'sent_solutions', ',', '[', 'objective_function', '(', 'x', ',', '*', 'args', ')', ']', ',', '1', ')', 'es', '.', 'countevals', '+=', '1', 'opts', '=', 'es', '.', 'opts', '# processed options, unambiguous', '# a hack:', 'fmin_opts', '=', 'CMAOptions', '(', 'fmin_options', '.', 'copy', '(', ')', ',', 'unchecked', '=', 'True', ')', 'for', 'k', 'in', 'fmin_opts', ':', "# locals() cannot be modified directly, exec won't work", '# in 3.x, therefore', 'fmin_opts', '.', 'eval', '(', 'k', ',', 'loc', '=', '{', "'N'", ':', 'es', '.', 'N', ',', "'popsize'", ':', 'opts', '[', "'popsize'", ']', '}', ',', 'correct_key', '=', 'False', ')', 'append', '=', 'opts', '[', "'verb_append'", ']', 'or', 'es', '.', 'countiter', '>', '0', 'or', 'irun', '>', '0', '# es.logger is "the same" logger, because the "identity"', '# is only determined by the `filenameprefix`', 'logger', '=', 'CMADataLogger', '(', 'opts', '[', "'verb_filenameprefix'", ']', ',', 'opts', '[', "'verb_log'", ']', ')', 'logger', '.', 'register', '(', 'es', ',', 'append', ')', '.', 'add', '(', ')', '# no fitness values here', 'es', '.', 'logger', '=', 'logger', 'if', 'noise_handler', ':', 'noisehandler', '=', 'noise_handler', 'noise_handling', '=', 'True', 'if', 'fmin_opts', '[', "'noise_change_sigma_exponent'", ']', '>', '0', ':', 'es', '.', 'opts', '[', "'tolfacupx'", ']', '=', 'inf', 'else', ':', 'noisehandler', '=', 'NoiseHandler', '(', 'es', '.', 'N', ',', '0', ')', 'noise_handling', '=', 'False', 'es', '.', 'noise_handler', '=', 'noisehandler', '# the problem: this assumes that good solutions cannot take longer than bad ones:', "# with EvalInParallel(objective_function, 2, is_feasible=opts['is_feasible']) as eval_in_parallel:", 'if', '1', '<', '3', ':', 'while', 'not', 'es', '.', 'stop', '(', ')', ':', '# iteration loop', '# X, fit = eval_in_parallel(lambda: es.ask(1)[0], es.popsize, args, repetitions=noisehandler.evaluations-1)', 'X', ',', 'fit', '=', 'es', '.', 'ask_and_eval', '(', 'objective_function', ',', 'args', ',', 'gradf', '=', 'gradf', ',', 'evaluations', '=', 'noisehandler', '.', 'evaluations', ',', 'aggregation', '=', 'np', '.', 'median', ')', '# treats NaN with resampling', '# TODO: check args and in case use args=(noisehandler.evaluations, )', 'es', '.', 'tell', '(', 'X', ',', 'fit', ')', '# prepare for next iteration', 'if', 'noise_handling', ':', '# it would be better to also use these f-evaluations in tell', 'es', '.', 'sigma', '*=', 'noisehandler', '(', 'X', ',', 'fit', ',', 'objective_function', ',', 'es', '.', 'ask', ',', 'args', '=', 'args', ')', '**', 'fmin_opts', '[', "'noise_change_sigma_exponent'", ']', 'es', '.', 'countevals', '+=', 'noisehandler', '.', 'evaluations_just_done', '# TODO: this is a hack, not important though', '# es.more_to_write.append(noisehandler.evaluations_just_done)', 'if', 'noisehandler', '.', 'maxevals', '>', 'noisehandler', '.', 'minevals', ':', 'es', '.', 'more_to_write', '.', 'append', '(', 'noisehandler', '.', 'get_evaluations', '(', ')', ')', 'if', '1', '<', '3', ':', 'es', '.', 'sp', '.', 'cmean', '*=', 'exp', '(', '-', 'noise_kappa_exponent', '*', 'np', '.', 'tanh', '(', 'noisehandler', '.', 'noiseS', ')', ')', 'if', 'es', '.', 'sp', '.', 'cmean', '>', '1', ':', 'es', '.', 'sp', '.', 'cmean', '=', '1', 'es', '.', 'disp', '(', ')', 'logger', '.', 'add', '(', '# more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],', 'modulo', '=', '1', 'if', 'es', '.', 'stop', '(', ')', 'and', 'logger', '.', 'modulo', 'else', 'None', ')', 'if', '(', 'opts', '[', "'verb_log'", ']', 'and', 'opts', '[', "'verb_plot'", ']', 'and', '(', 'es', '.', 'countiter', '%', 'max', '(', 'opts', '[', "'verb_plot'", ']', ',', 'opts', '[', "'verb_log'", ']', ')', '==', '0', 'or', 'es', '.', 'stop', '(', ')', ')', ')', ':', 'logger', '.', 'plot', '(', '324', ')', '# end while not es.stop', 'mean_pheno', '=', 'es', '.', 'gp', '.', 'pheno', '(', 'es', '.', 'mean', ',', 'into_bounds', '=', 'es', '.', 'boundary_handler', '.', 'repair', ',', 'archive', '=', 'es', '.', 'sent_solutions', ')', 'fmean', '=', 'objective_function', '(', 'mean_pheno', ',', '*', 'args', ')', 'es', '.', 'countevals', '+=', '1', 'es', '.', 'best', '.', 'update', '(', '[', 'mean_pheno', ']', ',', 'es', '.', 'sent_solutions', ',', '[', 'fmean', ']', ',', 'es', '.', 'countevals', ')', 'best', '.', 'update', '(', 'es', '.', 'best', ',', 'es', '.', 'sent_solutions', ')', '# in restarted case', '# es.best.update(best)', 'this_evals', '=', 'es', '.', 'countevals', '-', 'base_evals', 'base_evals', '=', 'es', '.', 'countevals', '# BIPOP stats update', 'if', 'irun', '==', '0', ':', 'popsize0', '=', 'opts', '[', "'popsize'", ']', 'maxiter0', '=', 'opts', '[', "'maxiter'", ']', '# XXX: This might be a bug? Reproduced from Matlab', '# small_i.append(this_evals)', 'if', 'bipop', ':', 'if', 'poptype', '==', "'small'", ':', 'small_i', '.', 'append', '(', 'this_evals', ')', 'else', ':', "# poptype == 'large'", 'large_i', '.', 'append', '(', 'this_evals', ')', '# final message', 'if', 'opts', '[', "'verb_disp'", ']', ':', 'es', '.', 'result_pretty', '(', 'irun', ',', 'time', '.', 'asctime', '(', 'time', '.', 'localtime', '(', ')', ')', ',', 'best', '.', 'f', ')', 'irun', '+=', '1', "# if irun > fmin_opts['restarts'] or 'ftarget' in es.stop() \\", "# if irun > restarts or 'ftarget' in es.stop() \\", 'if', 'irun', '-', 'runs_with_small', '>', 'fmin_opts', '[', "'restarts'", ']', 'or', "'ftarget'", 'in', 'es', '.', 'stop', '(', ')', 'or', "'maxfevals'", 'in', 'es', '.', 'stop', '(', 'check', '=', 'False', ')', ':', 'break', 'opts', '[', "'verb_append'", ']', '=', 'es', '.', 'countevals', 'opts', '[', "'popsize'", ']', '=', 'fmin_opts', '[', "'incpopsize'", ']', '*', 'es', '.', 'sp', '.', 'popsize', '# TODO: use rather options?', 'opts', '[', "'seed'", ']', '+=', '1', '# while irun', "# es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell", 'if', '1', '<', '3', ':', 'if', 'irun', ':', 'es', '.', 'best', '.', 'update', '(', 'best', ')', '# TODO: there should be a better way to communicate the overall best', 'return', 'es', '.', 'result', '(', ')', '+', '(', 'es', '.', 'stop', '(', ')', ',', 'es', ',', 'logger', ')', 'else', ':', '# previously: to be removed', 'return', '(', 'best', '.', 'x', '.', 'copy', '(', ')', ',', 'best', '.', 'f', ',', 'es', '.', 'countevals', ',', 'dict', '(', '(', '(', "'stopdict'", ',', '_CMAStopDict', '(', 'es', '.', '_stopdict', ')', ')', ',', '(', "'mean'", ',', 'es', '.', 'gp', '.', 'pheno', '(', 'es', '.', 'mean', ')', ')', ',', '(', "'std'", ',', 'es', '.', 'sigma', '*', 'es', '.', 'sigma_vec', '*', 'sqrt', '(', 'es', '.', 'dC', ')', '*', 'es', '.', 'gp', '.', 'scales', ')', ',', '(', "'out'", ',', 'es', '.', 'out', ')', ',', '(', "'opts'", ',', 'es', '.', 'opts', ')', '# last state of options', ',', '(', "'cma'", ',', 'es', ')', ',', '(', "'inputargs'", ',', 'es', '.', 'inputargs', ')', ')', ')', ')', '# TODO refine output, can #args be flexible?', '# is this well usable as it is now?', 'else', ':', '# except KeyboardInterrupt: # Exception, e:', 'if', 'eval', '(', 'str', '(', 'options', '[', "'verb_disp'", ']', ')', ')', '>', '0', ':', 'print', '(', "' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception'", ')', 'raise'] | functional interface to the stochastic optimizer CMA-ES
for non-convex function minimization.
Calling Sequences
=================
``fmin(objective_function, x0, sigma0)``
minimizes `objective_function` starting at `x0` and with standard deviation
`sigma0` (step-size)
``fmin(objective_function, x0, sigma0, options={'ftarget': 1e-5})``
minimizes `objective_function` up to target function value 1e-5, which
is typically useful for benchmarking.
``fmin(objective_function, x0, sigma0, args=('f',))``
minimizes `objective_function` called with an additional argument ``'f'``.
``fmin(objective_function, x0, sigma0, options={'ftarget':1e-5, 'popsize':40})``
uses additional options ``ftarget`` and ``popsize``
``fmin(objective_function, esobj, None, options={'maxfevals': 1e5})``
uses the `CMAEvolutionStrategy` object instance `esobj` to optimize
`objective_function`, similar to `esobj.optimize()`.
Arguments
=========
`objective_function`
function to be minimized. Called as ``objective_function(x,
*args)``. `x` is a one-dimensional `numpy.ndarray`.
`objective_function` can return `numpy.NaN`,
which is interpreted as outright rejection of solution `x`
and invokes an immediate resampling and (re-)evaluation
of a new solution not counting as function evaluation.
`x0`
list or `numpy.ndarray`, initial guess of minimum solution
before the application of the geno-phenotype transformation
according to the ``transformation`` option. It can also be
a string holding a Python expression that is evaluated
to yield the initial guess - this is important in case
restarts are performed so that they start from different
places. Otherwise `x0` can also be a `cma.CMAEvolutionStrategy`
object instance, in that case `sigma0` can be ``None``.
`sigma0`
scalar, initial standard deviation in each coordinate.
`sigma0` should be about 1/4th of the search domain width
(where the optimum is to be expected). The variables in
`objective_function` should be scaled such that they
presumably have similar sensitivity.
See also option `scaling_of_variables`.
`options`
a dictionary with additional options passed to the constructor
of class ``CMAEvolutionStrategy``, see ``cma.CMAOptions()``
for a list of available options.
``args=()``
arguments to be used to call the `objective_function`
``gradf``
gradient of f, where ``len(gradf(x, *args)) == len(x)``.
`gradf` is called once in each iteration if
``gradf is not None``.
``restarts=0``
number of restarts with increasing population size, see also
parameter `incpopsize`, implementing the IPOP-CMA-ES restart
strategy, see also parameter `bipop`; to restart from
different points (recommended), pass `x0` as a string.
``restart_from_best=False``
which point to restart from
``incpopsize=2``
multiplier for increasing the population size `popsize` before
each restart
``eval_initial_x=None``
evaluate initial solution, for `None` only with elitist option
``noise_handler=None``
a ``NoiseHandler`` instance or ``None``, a simple usecase is
``cma.fmin(f, 6 * [1], 1, noise_handler=cma.NoiseHandler(6))``
see ``help(cma.NoiseHandler)``.
``noise_change_sigma_exponent=1``
exponent for sigma increment for additional noise treatment
``noise_evaluations_as_kappa``
instead of applying reevaluations, the "number of evaluations"
is (ab)used as scaling factor kappa (experimental).
``bipop``
if True, run as BIPOP-CMA-ES; BIPOP is a special restart
strategy switching between two population sizings - small
(like the default CMA, but with more focused search) and
large (progressively increased as in IPOP). This makes the
algorithm perform well both on functions with many regularly
or irregularly arranged local optima (the latter by frequently
restarting with small populations). For the `bipop` parameter
to actually take effect, also select non-zero number of
(IPOP) restarts; the recommended setting is ``restarts<=9``
and `x0` passed as a string. Note that small-population
restarts do not count into the total restart count.
Optional Arguments
==================
All values in the `options` dictionary are evaluated if they are of
type `str`, besides `verb_filenameprefix`, see class `CMAOptions` for
details. The full list is available via ``cma.CMAOptions()``.
>>> import cma
>>> cma.CMAOptions()
Subsets of options can be displayed, for example like
``cma.CMAOptions('tol')``, or ``cma.CMAOptions('bound')``,
see also class `CMAOptions`.
Return
======
Return the list provided by `CMAEvolutionStrategy.result()` appended
with termination conditions, an `OOOptimizer` and a `BaseDataLogger`::
res = es.result() + (es.stop(), es, logger)
where
- ``res[0]`` (``xopt``) -- best evaluated solution
- ``res[1]`` (``fopt``) -- respective function value
- ``res[2]`` (``evalsopt``) -- respective number of function evaluations
- ``res[3]`` (``evals``) -- number of overall conducted objective function evaluations
- ``res[4]`` (``iterations``) -- number of overall conducted iterations
- ``res[5]`` (``xmean``) -- mean of the final sample distribution
- ``res[6]`` (``stds``) -- effective stds of the final sample distribution
- ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary
- ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance
- ``res[-1]`` (``logger``) -- class `CMADataLogger` instance
Details
=======
This function is an interface to the class `CMAEvolutionStrategy`. The
latter class should be used when full control over the iteration loop
of the optimizer is desired.
Examples
========
The following example calls `fmin` optimizing the Rosenbrock function
in 10-D with initial solution 0.1 and initial step-size 0.5. The
options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'CMA_diagonal':100, 'seed':1234, 'verb_time':0}
>>>
>>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, options)
(5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234)
Covariance matrix is diagonal for 10 iterations (1/ccov=29.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01
2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01
3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01
100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02
200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02
300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02
400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02
500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03
600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06
650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07
termination on tolfun : 1e-11
final/bestever f-value = 5.55796133406e-14 2.62435631419e-14
mean solution: [ 1. 1.00000001 1. 1.
1. 1.00000001 1.00000002 1.00000003 ...]
std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09
5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...]
>>>
>>> print('best solutions fitness = %f' % (res[1]))
best solutions fitness = 2.62435631419e-14
>>> assert res[1] < 1e-12
The above call is pretty much equivalent with the slightly more
verbose call::
es = cma.CMAEvolutionStrategy([0.1] * 10, 0.5,
options=options).optimize(cma.fcts.rosen)
The following example calls `fmin` optimizing the Rastrigin function
in 3-D with random initial solution in [-2,2], initial step-size 0.5
and the BIPOP restart strategy (that progressively increases population).
The options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'seed':12345, 'verb_time':0, 'ftarget': 1e-8}
>>>
>>> res = cma.fmin(cma.fcts.rastrigin, '2. * np.random.rand(3) - 1', 0.5,
... options, restarts=9, bipop=True)
(3_w,7)-aCMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=12345)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.633489455763566e+01 1.0e+00 4.35e-01 4e-01 4e-01
2 14 9.762462950258016e+00 1.2e+00 4.12e-01 4e-01 4e-01
3 21 2.461107851413725e+01 1.4e+00 3.78e-01 3e-01 4e-01
100 700 9.949590571272680e-01 1.7e+00 5.07e-05 3e-07 5e-07
123 861 9.949590570932969e-01 1.3e+00 3.93e-06 9e-09 1e-08
termination on tolfun=1e-11
final/bestever f-value = 9.949591e-01 9.949591e-01
mean solution: [ 9.94958638e-01 -7.19265205e-10 2.09294450e-10]
std deviation: [ 8.71497860e-09 8.58994807e-09 9.85585654e-09]
[...]
(4_w,9)-aCMA-ES (mu_w=2.8,w_1=49%) in dimension 3 (seed=12349)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 5342.0 2.114883315350800e+01 1.0e+00 3.42e-02 3e-02 4e-02
2 5351.0 1.810102940125502e+01 1.4e+00 3.79e-02 3e-02 4e-02
3 5360.0 1.340222457448063e+01 1.4e+00 4.58e-02 4e-02 6e-02
50 5783.0 8.631491965616078e-09 1.6e+00 2.01e-04 8e-06 1e-05
termination on ftarget=1e-08 after 4 restarts
final/bestever f-value = 8.316963e-09 8.316963e-09
mean solution: [ -3.10652459e-06 2.77935436e-06 -4.95444519e-06]
std deviation: [ 1.02825265e-05 8.08348144e-06 8.47256408e-06]
In either case, the method::
cma.plot();
(based on `matplotlib.pyplot`) produces a plot of the run and, if
necessary::
cma.show()
shows the plot in a window. Finally::
cma.savefig('myfirstrun') # savefig from matplotlib.pyplot
will save the figure in a png.
We can use the gradient like
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, np.zeros(10), 0.1,
... options = {'ftarget':1e-8,},
... gradf=cma.fcts.grad_rosen,
... )
>>> assert cma.fcts.rosen(res[0]) < 1e-8
>>> assert res[2] < 3600 # 1% are > 3300
>>> assert res[3] < 3600 # 1% are > 3300
:See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`,
`CMAOptions`, `scipy.optimize.fmin()` | ['functional', 'interface', 'to', 'the', 'stochastic', 'optimizer', 'CMA', '-', 'ES', 'for', 'non', '-', 'convex', 'function', 'minimization', '.'] | train | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L5137-L5598 |
1,968 | Esri/ArcREST | src/arcrest/manageags/_security.py | Security.updateUser | def updateUser(self, username, password, fullname, description, email):
""" Updates a user account in the user store
Input:
username - the name of the user. The name must be unique in
the user store.
password - the password for this user.
fullname - an optional full name for the user.
description - an optional field to add comments or description
for the user account.
email - an optional email for the user account.
"""
params = {
"f" : "json",
"username" : username
}
if password is not None:
params['password'] = password
if fullname is not None:
params['fullname'] = fullname
if description is not None:
params['description'] = description
if email is not None:
params['email'] = email
uURL = self._url + "/users/update"
return self._post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | python | def updateUser(self, username, password, fullname, description, email):
""" Updates a user account in the user store
Input:
username - the name of the user. The name must be unique in
the user store.
password - the password for this user.
fullname - an optional full name for the user.
description - an optional field to add comments or description
for the user account.
email - an optional email for the user account.
"""
params = {
"f" : "json",
"username" : username
}
if password is not None:
params['password'] = password
if fullname is not None:
params['fullname'] = fullname
if description is not None:
params['description'] = description
if email is not None:
params['email'] = email
uURL = self._url + "/users/update"
return self._post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | ['def', 'updateUser', '(', 'self', ',', 'username', ',', 'password', ',', 'fullname', ',', 'description', ',', 'email', ')', ':', 'params', '=', '{', '"f"', ':', '"json"', ',', '"username"', ':', 'username', '}', 'if', 'password', 'is', 'not', 'None', ':', 'params', '[', "'password'", ']', '=', 'password', 'if', 'fullname', 'is', 'not', 'None', ':', 'params', '[', "'fullname'", ']', '=', 'fullname', 'if', 'description', 'is', 'not', 'None', ':', 'params', '[', "'description'", ']', '=', 'description', 'if', 'email', 'is', 'not', 'None', ':', 'params', '[', "'email'", ']', '=', 'email', 'uURL', '=', 'self', '.', '_url', '+', '"/users/update"', 'return', 'self', '.', '_post', '(', 'url', '=', 'uURL', ',', 'param_dict', '=', 'params', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ')'] | Updates a user account in the user store
Input:
username - the name of the user. The name must be unique in
the user store.
password - the password for this user.
fullname - an optional full name for the user.
description - an optional field to add comments or description
for the user account.
email - an optional email for the user account. | ['Updates', 'a', 'user', 'account', 'in', 'the', 'user', 'store', 'Input', ':', 'username', '-', 'the', 'name', 'of', 'the', 'user', '.', 'The', 'name', 'must', 'be', 'unique', 'in', 'the', 'user', 'store', '.', 'password', '-', 'the', 'password', 'for', 'this', 'user', '.', 'fullname', '-', 'an', 'optional', 'full', 'name', 'for', 'the', 'user', '.', 'description', '-', 'an', 'optional', 'field', 'to', 'add', 'comments', 'or', 'description', 'for', 'the', 'user', 'account', '.', 'email', '-', 'an', 'optional', 'email', 'for', 'the', 'user', 'account', '.'] | train | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_security.py#L621-L648 |
1,969 | base4sistemas/pyescpos | escpos/impl/epson.py | GenericESCPOS.code128 | def code128(self, data, **kwargs):
"""Renders given ``data`` as **Code 128** barcode symbology.
:param str codeset: Optional. Keyword argument for the subtype (code
set) to render. Defaults to :attr:`escpos.barcode.CODE128_A`.
.. warning::
You should draw up your data according to the subtype (code set).
The default is **Code 128 A** and there is no way (yet) to mix code
sets in a single barcode rendering (at least not uniformly).
Implementations may simply ignore the code set.
"""
if not re.match(r'^[\x20-\x7F]+$', data):
raise ValueError('Invalid Code 128 symbology. Code 128 can encode '
'any ASCII character ranging from 32 (20h) to 127 (7Fh); '
'got {!r}'.format(data))
codeset = kwargs.pop('codeset', barcode.CODE128_A)
barcode.validate_barcode_args(**kwargs)
return self._code128_impl(data, codeset=codeset, **kwargs) | python | def code128(self, data, **kwargs):
"""Renders given ``data`` as **Code 128** barcode symbology.
:param str codeset: Optional. Keyword argument for the subtype (code
set) to render. Defaults to :attr:`escpos.barcode.CODE128_A`.
.. warning::
You should draw up your data according to the subtype (code set).
The default is **Code 128 A** and there is no way (yet) to mix code
sets in a single barcode rendering (at least not uniformly).
Implementations may simply ignore the code set.
"""
if not re.match(r'^[\x20-\x7F]+$', data):
raise ValueError('Invalid Code 128 symbology. Code 128 can encode '
'any ASCII character ranging from 32 (20h) to 127 (7Fh); '
'got {!r}'.format(data))
codeset = kwargs.pop('codeset', barcode.CODE128_A)
barcode.validate_barcode_args(**kwargs)
return self._code128_impl(data, codeset=codeset, **kwargs) | ['def', 'code128', '(', 'self', ',', 'data', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 're', '.', 'match', '(', "r'^[\\x20-\\x7F]+$'", ',', 'data', ')', ':', 'raise', 'ValueError', '(', "'Invalid Code 128 symbology. Code 128 can encode '", "'any ASCII character ranging from 32 (20h) to 127 (7Fh); '", "'got {!r}'", '.', 'format', '(', 'data', ')', ')', 'codeset', '=', 'kwargs', '.', 'pop', '(', "'codeset'", ',', 'barcode', '.', 'CODE128_A', ')', 'barcode', '.', 'validate_barcode_args', '(', '*', '*', 'kwargs', ')', 'return', 'self', '.', '_code128_impl', '(', 'data', ',', 'codeset', '=', 'codeset', ',', '*', '*', 'kwargs', ')'] | Renders given ``data`` as **Code 128** barcode symbology.
:param str codeset: Optional. Keyword argument for the subtype (code
set) to render. Defaults to :attr:`escpos.barcode.CODE128_A`.
.. warning::
You should draw up your data according to the subtype (code set).
The default is **Code 128 A** and there is no way (yet) to mix code
sets in a single barcode rendering (at least not uniformly).
Implementations may simply ignore the code set. | ['Renders', 'given', 'data', 'as', '**', 'Code', '128', '**', 'barcode', 'symbology', '.'] | train | https://github.com/base4sistemas/pyescpos/blob/621bd00f1499aff700f37d8d36d04e0d761708f1/escpos/impl/epson.py#L240-L261 |
1,970 | jazzband/sorl-thumbnail | sorl/thumbnail/kvstores/base.py | KVStoreBase._get | def _get(self, key, identity='image'):
"""
Deserializing, prefix wrapper for _get_raw
"""
value = self._get_raw(add_prefix(key, identity))
if not value:
return None
if identity == 'image':
return deserialize_image_file(value)
return deserialize(value) | python | def _get(self, key, identity='image'):
"""
Deserializing, prefix wrapper for _get_raw
"""
value = self._get_raw(add_prefix(key, identity))
if not value:
return None
if identity == 'image':
return deserialize_image_file(value)
return deserialize(value) | ['def', '_get', '(', 'self', ',', 'key', ',', 'identity', '=', "'image'", ')', ':', 'value', '=', 'self', '.', '_get_raw', '(', 'add_prefix', '(', 'key', ',', 'identity', ')', ')', 'if', 'not', 'value', ':', 'return', 'None', 'if', 'identity', '==', "'image'", ':', 'return', 'deserialize_image_file', '(', 'value', ')', 'return', 'deserialize', '(', 'value', ')'] | Deserializing, prefix wrapper for _get_raw | ['Deserializing', 'prefix', 'wrapper', 'for', '_get_raw'] | train | https://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/kvstores/base.py#L141-L153 |
1,971 | lordmauve/lepton | examples/fireworks.py | on_draw | def on_draw():
global yrot
win.clear()
glLoadIdentity()
glTranslatef(0, 0, -100)
glRotatef(yrot, 0.0, 1.0, 0.0)
default_system.draw()
'''
glBindTexture(GL_TEXTURE_2D, 1)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(0,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 2)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(50,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 0)
''' | python | def on_draw():
global yrot
win.clear()
glLoadIdentity()
glTranslatef(0, 0, -100)
glRotatef(yrot, 0.0, 1.0, 0.0)
default_system.draw()
'''
glBindTexture(GL_TEXTURE_2D, 1)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(0,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 2)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(50,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 0)
''' | ['def', 'on_draw', '(', ')', ':', 'global', 'yrot', 'win', '.', 'clear', '(', ')', 'glLoadIdentity', '(', ')', 'glTranslatef', '(', '0', ',', '0', ',', '-', '100', ')', 'glRotatef', '(', 'yrot', ',', '0.0', ',', '1.0', ',', '0.0', ')', 'default_system', '.', 'draw', '(', ')'] | glBindTexture(GL_TEXTURE_2D, 1)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(0,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 2)
glEnable(GL_TEXTURE_2D)
glEnable(GL_POINT_SPRITE)
glPointSize(100);
glBegin(GL_POINTS)
glVertex2f(50,0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 0) | ['glBindTexture', '(', 'GL_TEXTURE_2D', '1', ')', 'glEnable', '(', 'GL_TEXTURE_2D', ')', 'glEnable', '(', 'GL_POINT_SPRITE', ')', 'glPointSize', '(', '100', ')', ';', 'glBegin', '(', 'GL_POINTS', ')', 'glVertex2f', '(', '0', '0', ')', 'glEnd', '()', 'glBindTexture', '(', 'GL_TEXTURE_2D', '2', ')', 'glEnable', '(', 'GL_TEXTURE_2D', ')', 'glEnable', '(', 'GL_POINT_SPRITE', ')', 'glPointSize', '(', '100', ')', ';', 'glBegin', '(', 'GL_POINTS', ')', 'glVertex2f', '(', '50', '0', ')', 'glEnd', '()', 'glBindTexture', '(', 'GL_TEXTURE_2D', '0', ')'] | train | https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/fireworks.py#L132-L155 |
1,972 | Turbo87/aerofiles | aerofiles/flarmcfg/writer.py | Writer.write_waypoint | def write_waypoint(self, latitude=None, longitude=None, description=None):
"""
Adds a waypoint to the current task declaration. The first and the
last waypoint added will be treated as takeoff and landing location,
respectively.
::
writer.write_waypoint(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> $PFLAC,S,ADDWP,5107345N,00624765E,Meiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for takeoff and landing points.
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param description: arbitrary text description of waypoint
"""
if not description:
description = ''
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
self.write_config(
'ADDWP', '%s,%s,%s' % (latitude, longitude, description[0:50])
) | python | def write_waypoint(self, latitude=None, longitude=None, description=None):
"""
Adds a waypoint to the current task declaration. The first and the
last waypoint added will be treated as takeoff and landing location,
respectively.
::
writer.write_waypoint(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> $PFLAC,S,ADDWP,5107345N,00624765E,Meiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for takeoff and landing points.
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param description: arbitrary text description of waypoint
"""
if not description:
description = ''
latitude = self.format_latitude(latitude)
longitude = self.format_longitude(longitude)
self.write_config(
'ADDWP', '%s,%s,%s' % (latitude, longitude, description[0:50])
) | ['def', 'write_waypoint', '(', 'self', ',', 'latitude', '=', 'None', ',', 'longitude', '=', 'None', ',', 'description', '=', 'None', ')', ':', 'if', 'not', 'description', ':', 'description', '=', "''", 'latitude', '=', 'self', '.', 'format_latitude', '(', 'latitude', ')', 'longitude', '=', 'self', '.', 'format_longitude', '(', 'longitude', ')', 'self', '.', 'write_config', '(', "'ADDWP'", ',', "'%s,%s,%s'", '%', '(', 'latitude', ',', 'longitude', ',', 'description', '[', '0', ':', '50', ']', ')', ')'] | Adds a waypoint to the current task declaration. The first and the
last waypoint added will be treated as takeoff and landing location,
respectively.
::
writer.write_waypoint(
latitude=(51 + 7.345 / 60.),
longitude=(6 + 24.765 / 60.),
text='Meiersberg',
)
# -> $PFLAC,S,ADDWP,5107345N,00624765E,Meiersberg
If no ``latitude`` or ``longitude`` is passed, the fields will be
filled with zeros (i.e. unknown coordinates). This however should only
be used for takeoff and landing points.
:param latitude: latitude of the point (between -90 and 90 degrees)
:param longitude: longitude of the point (between -180 and 180 degrees)
:param description: arbitrary text description of waypoint | ['Adds', 'a', 'waypoint', 'to', 'the', 'current', 'task', 'declaration', '.', 'The', 'first', 'and', 'the', 'last', 'waypoint', 'added', 'will', 'be', 'treated', 'as', 'takeoff', 'and', 'landing', 'location', 'respectively', '.'] | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/flarmcfg/writer.py#L150-L182 |
1,973 | vingd/vingd-api-python | vingd/client.py | Vingd.revoke_vouchers | def revoke_vouchers(self, vid_encoded=None,
uid_from=None, uid_to=None, gid=None,
valid_after=None, valid_before=None,
last=None, first=None):
"""
REVOKES/INVALIDATES a filtered list of vouchers.
:type vid_encoded: ``alphanumeric(64)``
:param vid_encoded:
Voucher ID, as a string with CRC.
:type uid_from: ``bigint``
:param uid_from:
Filter by source account UID.
:type uid_to: ``bigint``
:param uid_to:
Filter by destination account UID.
:type gid: ``alphanumeric(32)``
:param gid:
Filter by voucher Group ID. GID is localized to `uid_from`.
:type valid_after: ``datetime``/``dict``
:param valid_after:
Voucher has to be valid after this timestamp. Absolute
(``datetime``) or relative (``dict``) timestamps are accepted. Valid
keys for relative timestamp dictionary are same as keyword arguments
for `datetime.timedelta` (``days``, ``seconds``, ``minutes``,
``hours``, ``weeks``).
:type valid_before: ``datetime``/``dict``
:param valid_before:
Voucher was valid until this timestamp (for format, see the
`valid_after` above).
:type last: ``bigint``
:param last:
The number of newest vouchers (that satisfy all other criteria) to
return.
:type first: ``bigint``
:param first:
The number of oldest vouchers (that satisfy all other criteria) to
return.
:note:
As with `get_vouchers`, filters are restrictive, narrowing down the
set of vouchers, which initially includes complete voucher
collection. That means, in turn, that a naive empty-handed
`revoke_vouchers()` call shall revoke **all** un-used vouchers (both
valid and expired)!
:rtype: ``dict``
:returns:
A dictionary of successfully revoked vouchers, i.e. a map
``vid_encoded``: ``refund_transfer_id`` for all successfully revoked
vouchers.
:raises GeneralException:
:resource:
``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]``
``[/valid_after=<valid_after>][/valid_before=<valid_before>]``
``[/last=<last>][/first=<first>]``
:access: authorized users (ACL flag: ``voucher.revoke``)
"""
resource = self.kvpath(
'vouchers',
('ident', vid_encoded),
**{
'from': ('int', uid_from),
'to': ('int', uid_to),
'gid': ('ident', gid),
'valid_after': ('isobasic', absdatetime(valid_after)),
'valid_before': ('isobasic', absdatetime(valid_before)),
'first': ('int', first),
'last': ('int', last)
}
)
return self.request('delete', resource, json.dumps({'revoke': True})) | python | def revoke_vouchers(self, vid_encoded=None,
uid_from=None, uid_to=None, gid=None,
valid_after=None, valid_before=None,
last=None, first=None):
"""
REVOKES/INVALIDATES a filtered list of vouchers.
:type vid_encoded: ``alphanumeric(64)``
:param vid_encoded:
Voucher ID, as a string with CRC.
:type uid_from: ``bigint``
:param uid_from:
Filter by source account UID.
:type uid_to: ``bigint``
:param uid_to:
Filter by destination account UID.
:type gid: ``alphanumeric(32)``
:param gid:
Filter by voucher Group ID. GID is localized to `uid_from`.
:type valid_after: ``datetime``/``dict``
:param valid_after:
Voucher has to be valid after this timestamp. Absolute
(``datetime``) or relative (``dict``) timestamps are accepted. Valid
keys for relative timestamp dictionary are same as keyword arguments
for `datetime.timedelta` (``days``, ``seconds``, ``minutes``,
``hours``, ``weeks``).
:type valid_before: ``datetime``/``dict``
:param valid_before:
Voucher was valid until this timestamp (for format, see the
`valid_after` above).
:type last: ``bigint``
:param last:
The number of newest vouchers (that satisfy all other criteria) to
return.
:type first: ``bigint``
:param first:
The number of oldest vouchers (that satisfy all other criteria) to
return.
:note:
As with `get_vouchers`, filters are restrictive, narrowing down the
set of vouchers, which initially includes complete voucher
collection. That means, in turn, that a naive empty-handed
`revoke_vouchers()` call shall revoke **all** un-used vouchers (both
valid and expired)!
:rtype: ``dict``
:returns:
A dictionary of successfully revoked vouchers, i.e. a map
``vid_encoded``: ``refund_transfer_id`` for all successfully revoked
vouchers.
:raises GeneralException:
:resource:
``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]``
``[/valid_after=<valid_after>][/valid_before=<valid_before>]``
``[/last=<last>][/first=<first>]``
:access: authorized users (ACL flag: ``voucher.revoke``)
"""
resource = self.kvpath(
'vouchers',
('ident', vid_encoded),
**{
'from': ('int', uid_from),
'to': ('int', uid_to),
'gid': ('ident', gid),
'valid_after': ('isobasic', absdatetime(valid_after)),
'valid_before': ('isobasic', absdatetime(valid_before)),
'first': ('int', first),
'last': ('int', last)
}
)
return self.request('delete', resource, json.dumps({'revoke': True})) | ['def', 'revoke_vouchers', '(', 'self', ',', 'vid_encoded', '=', 'None', ',', 'uid_from', '=', 'None', ',', 'uid_to', '=', 'None', ',', 'gid', '=', 'None', ',', 'valid_after', '=', 'None', ',', 'valid_before', '=', 'None', ',', 'last', '=', 'None', ',', 'first', '=', 'None', ')', ':', 'resource', '=', 'self', '.', 'kvpath', '(', "'vouchers'", ',', '(', "'ident'", ',', 'vid_encoded', ')', ',', '*', '*', '{', "'from'", ':', '(', "'int'", ',', 'uid_from', ')', ',', "'to'", ':', '(', "'int'", ',', 'uid_to', ')', ',', "'gid'", ':', '(', "'ident'", ',', 'gid', ')', ',', "'valid_after'", ':', '(', "'isobasic'", ',', 'absdatetime', '(', 'valid_after', ')', ')', ',', "'valid_before'", ':', '(', "'isobasic'", ',', 'absdatetime', '(', 'valid_before', ')', ')', ',', "'first'", ':', '(', "'int'", ',', 'first', ')', ',', "'last'", ':', '(', "'int'", ',', 'last', ')', '}', ')', 'return', 'self', '.', 'request', '(', "'delete'", ',', 'resource', ',', 'json', '.', 'dumps', '(', '{', "'revoke'", ':', 'True', '}', ')', ')'] | REVOKES/INVALIDATES a filtered list of vouchers.
:type vid_encoded: ``alphanumeric(64)``
:param vid_encoded:
Voucher ID, as a string with CRC.
:type uid_from: ``bigint``
:param uid_from:
Filter by source account UID.
:type uid_to: ``bigint``
:param uid_to:
Filter by destination account UID.
:type gid: ``alphanumeric(32)``
:param gid:
Filter by voucher Group ID. GID is localized to `uid_from`.
:type valid_after: ``datetime``/``dict``
:param valid_after:
Voucher has to be valid after this timestamp. Absolute
(``datetime``) or relative (``dict``) timestamps are accepted. Valid
keys for relative timestamp dictionary are same as keyword arguments
for `datetime.timedelta` (``days``, ``seconds``, ``minutes``,
``hours``, ``weeks``).
:type valid_before: ``datetime``/``dict``
:param valid_before:
Voucher was valid until this timestamp (for format, see the
`valid_after` above).
:type last: ``bigint``
:param last:
The number of newest vouchers (that satisfy all other criteria) to
return.
:type first: ``bigint``
:param first:
The number of oldest vouchers (that satisfy all other criteria) to
return.
:note:
As with `get_vouchers`, filters are restrictive, narrowing down the
set of vouchers, which initially includes complete voucher
collection. That means, in turn, that a naive empty-handed
`revoke_vouchers()` call shall revoke **all** un-used vouchers (both
valid and expired)!
:rtype: ``dict``
:returns:
A dictionary of successfully revoked vouchers, i.e. a map
``vid_encoded``: ``refund_transfer_id`` for all successfully revoked
vouchers.
:raises GeneralException:
:resource:
``vouchers[/<vid_encoded>][/from=<uid_from>][/to=<uid_to>]``
``[/valid_after=<valid_after>][/valid_before=<valid_before>]``
``[/last=<last>][/first=<first>]``
:access: authorized users (ACL flag: ``voucher.revoke``) | ['REVOKES', '/', 'INVALIDATES', 'a', 'filtered', 'list', 'of', 'vouchers', '.', ':', 'type', 'vid_encoded', ':', 'alphanumeric', '(', '64', ')', ':', 'param', 'vid_encoded', ':', 'Voucher', 'ID', 'as', 'a', 'string', 'with', 'CRC', '.', ':', 'type', 'uid_from', ':', 'bigint', ':', 'param', 'uid_from', ':', 'Filter', 'by', 'source', 'account', 'UID', '.', ':', 'type', 'uid_to', ':', 'bigint', ':', 'param', 'uid_to', ':', 'Filter', 'by', 'destination', 'account', 'UID', '.', ':', 'type', 'gid', ':', 'alphanumeric', '(', '32', ')', ':', 'param', 'gid', ':', 'Filter', 'by', 'voucher', 'Group', 'ID', '.', 'GID', 'is', 'localized', 'to', 'uid_from', '.', ':', 'type', 'valid_after', ':', 'datetime', '/', 'dict', ':', 'param', 'valid_after', ':', 'Voucher', 'has', 'to', 'be', 'valid', 'after', 'this', 'timestamp', '.', 'Absolute', '(', 'datetime', ')', 'or', 'relative', '(', 'dict', ')', 'timestamps', 'are', 'accepted', '.', 'Valid', 'keys', 'for', 'relative', 'timestamp', 'dictionary', 'are', 'same', 'as', 'keyword', 'arguments', 'for', 'datetime', '.', 'timedelta', '(', 'days', 'seconds', 'minutes', 'hours', 'weeks', ')', '.', ':', 'type', 'valid_before', ':', 'datetime', '/', 'dict', ':', 'param', 'valid_before', ':', 'Voucher', 'was', 'valid', 'until', 'this', 'timestamp', '(', 'for', 'format', 'see', 'the', 'valid_after', 'above', ')', '.', ':', 'type', 'last', ':', 'bigint', ':', 'param', 'last', ':', 'The', 'number', 'of', 'newest', 'vouchers', '(', 'that', 'satisfy', 'all', 'other', 'criteria', ')', 'to', 'return', '.', ':', 'type', 'first', ':', 'bigint', ':', 'param', 'first', ':', 'The', 'number', 'of', 'oldest', 'vouchers', '(', 'that', 'satisfy', 'all', 'other', 'criteria', ')', 'to', 'return', '.', ':', 'note', ':', 'As', 'with', 'get_vouchers', 'filters', 'are', 'restrictive', 'narrowing', 'down', 'the', 'set', 'of', 'vouchers', 'which', 'initially', 'includes', 'complete', 'voucher', 'collection', '.', 'That', 'means', 'in', 'turn', 'that', 'a', 'naive', 'empty', '-', 'handed', 'revoke_vouchers', '()', 'call', 'shall', 'revoke', '**', 'all', '**', 'un', '-', 'used', 'vouchers', '(', 'both', 'valid', 'and', 'expired', ')', '!', ':', 'rtype', ':', 'dict', ':', 'returns', ':', 'A', 'dictionary', 'of', 'successfully', 'revoked', 'vouchers', 'i', '.', 'e', '.', 'a', 'map', 'vid_encoded', ':', 'refund_transfer_id', 'for', 'all', 'successfully', 'revoked', 'vouchers', '.', ':', 'raises', 'GeneralException', ':', ':', 'resource', ':', 'vouchers', '[', '/', '<vid_encoded', '>', ']', '[', '/', 'from', '=', '<uid_from', '>', ']', '[', '/', 'to', '=', '<uid_to', '>', ']', '[', '/', 'valid_after', '=', '<valid_after', '>', ']', '[', '/', 'valid_before', '=', '<valid_before', '>', ']', '[', '/', 'last', '=', '<last', '>', ']', '[', '/', 'first', '=', '<first', '>', ']', ':', 'access', ':', 'authorized', 'users', '(', 'ACL', 'flag', ':', 'voucher', '.', 'revoke', ')'] | train | https://github.com/vingd/vingd-api-python/blob/7548a49973a472f7277c8ef847563faa7b6f3706/vingd/client.py#L853-L925 |
1,974 | ladybug-tools/ladybug | ladybug/wea.py | Wea.directional_irradiance | def directional_irradiance(self, altitude=90, azimuth=180,
ground_reflectance=0.2, isotrophic=True):
"""Returns the irradiance components facing a given altitude and azimuth.
This method computes unobstructed solar flux facing a given
altitude and azimuth. The default is set to return the golbal horizontal
irradiance, assuming an altitude facing straight up (90 degrees).
Args:
altitude: A number between -90 and 90 that represents the
altitude at which irradiance is being evaluated in degrees.
azimuth: A number between 0 and 360 that represents the
azimuth at wich irradiance is being evaluated in degrees.
ground_reflectance: A number between 0 and 1 that represents the
reflectance of the ground. Default is set to 0.2. Some
common ground reflectances are:
urban: 0.18
grass: 0.20
fresh grass: 0.26
soil: 0.17
sand: 0.40
snow: 0.65
fresh_snow: 0.75
asphalt: 0.12
concrete: 0.30
sea: 0.06
isotrophic: A boolean value that sets whether an istotrophic sky is
used (as opposed to an anisotrophic sky). An isotrophic sky
assummes an even distribution of diffuse irradiance across the
sky while an anisotrophic sky places more diffuse irradiance
near the solar disc. Default is set to True for isotrophic
Returns:
total_irradiance: A data collection of total solar irradiance.
direct_irradiance: A data collection of direct solar irradiance.
diffuse_irradiance: A data collection of diffuse sky solar irradiance.
reflected_irradiance: A data collection of ground reflected solar irradiance.
"""
# function to convert polar coordinates to xyz.
def pol2cart(phi, theta):
mult = math.cos(theta)
x = math.sin(phi) * mult
y = math.cos(phi) * mult
z = math.sin(theta)
return Vector3(x, y, z)
# convert the altitude and azimuth to a normal vector
normal = pol2cart(math.radians(azimuth), math.radians(altitude))
# create sunpath and get altitude at every timestep of the year
direct_irr, diffuse_irr, reflected_irr, total_irr = [], [], [], []
sp = Sunpath.from_location(self.location)
sp.is_leap_year = self.is_leap_year
for dt, dnr, dhr in zip(self.datetimes, self.direct_normal_irradiance,
self.diffuse_horizontal_irradiance):
sun = sp.calculate_sun_from_date_time(dt)
sun_vec = pol2cart(math.radians(sun.azimuth),
math.radians(sun.altitude))
vec_angle = sun_vec.angle(normal)
# direct irradiance on surface
srf_dir = 0
if sun.altitude > 0 and vec_angle < math.pi / 2:
srf_dir = dnr * math.cos(vec_angle)
# diffuse irradiance on surface
if isotrophic is True:
srf_dif = dhr * ((math.sin(math.radians(altitude)) / 2) + 0.5)
else:
y = max(0.45, 0.55 + (0.437 * math.cos(vec_angle)) + 0.313 *
math.cos(vec_angle) * 0.313 * math.cos(vec_angle))
srf_dif = self.dhr * (y * (
math.sin(math.radians(abs(90 - altitude)))) +
math.cos(math.radians(abs(90 - altitude))))
# reflected irradiance on surface.
e_glob = dhr + dnr * math.cos(math.radians(90 - sun.altitude))
srf_ref = e_glob * ground_reflectance * (0.5 - (math.sin(
math.radians(altitude)) / 2))
# add it all together
direct_irr.append(srf_dir)
diffuse_irr.append(srf_dif)
reflected_irr.append(srf_ref)
total_irr.append(srf_dir + srf_dif + srf_ref)
# create the headers
a_per = AnalysisPeriod(timestep=self.timestep, is_leap_year=self.is_leap_year)
direct_hea = diffuse_hea = reflected_hea = total_hea = \
Header(Irradiance(), 'W/m2', a_per, self.metadata)
# create the data collections
direct_irradiance = HourlyContinuousCollection(direct_hea, direct_irr)
diffuse_irradiance = HourlyContinuousCollection(diffuse_hea, diffuse_irr)
reflected_irradiance = HourlyContinuousCollection(reflected_hea, reflected_irr)
total_irradiance = HourlyContinuousCollection(total_hea, total_irr)
return total_irradiance, direct_irradiance, \
diffuse_irradiance, reflected_irradiance | python | def directional_irradiance(self, altitude=90, azimuth=180,
ground_reflectance=0.2, isotrophic=True):
"""Returns the irradiance components facing a given altitude and azimuth.
This method computes unobstructed solar flux facing a given
altitude and azimuth. The default is set to return the golbal horizontal
irradiance, assuming an altitude facing straight up (90 degrees).
Args:
altitude: A number between -90 and 90 that represents the
altitude at which irradiance is being evaluated in degrees.
azimuth: A number between 0 and 360 that represents the
azimuth at wich irradiance is being evaluated in degrees.
ground_reflectance: A number between 0 and 1 that represents the
reflectance of the ground. Default is set to 0.2. Some
common ground reflectances are:
urban: 0.18
grass: 0.20
fresh grass: 0.26
soil: 0.17
sand: 0.40
snow: 0.65
fresh_snow: 0.75
asphalt: 0.12
concrete: 0.30
sea: 0.06
isotrophic: A boolean value that sets whether an istotrophic sky is
used (as opposed to an anisotrophic sky). An isotrophic sky
assummes an even distribution of diffuse irradiance across the
sky while an anisotrophic sky places more diffuse irradiance
near the solar disc. Default is set to True for isotrophic
Returns:
total_irradiance: A data collection of total solar irradiance.
direct_irradiance: A data collection of direct solar irradiance.
diffuse_irradiance: A data collection of diffuse sky solar irradiance.
reflected_irradiance: A data collection of ground reflected solar irradiance.
"""
# function to convert polar coordinates to xyz.
def pol2cart(phi, theta):
mult = math.cos(theta)
x = math.sin(phi) * mult
y = math.cos(phi) * mult
z = math.sin(theta)
return Vector3(x, y, z)
# convert the altitude and azimuth to a normal vector
normal = pol2cart(math.radians(azimuth), math.radians(altitude))
# create sunpath and get altitude at every timestep of the year
direct_irr, diffuse_irr, reflected_irr, total_irr = [], [], [], []
sp = Sunpath.from_location(self.location)
sp.is_leap_year = self.is_leap_year
for dt, dnr, dhr in zip(self.datetimes, self.direct_normal_irradiance,
self.diffuse_horizontal_irradiance):
sun = sp.calculate_sun_from_date_time(dt)
sun_vec = pol2cart(math.radians(sun.azimuth),
math.radians(sun.altitude))
vec_angle = sun_vec.angle(normal)
# direct irradiance on surface
srf_dir = 0
if sun.altitude > 0 and vec_angle < math.pi / 2:
srf_dir = dnr * math.cos(vec_angle)
# diffuse irradiance on surface
if isotrophic is True:
srf_dif = dhr * ((math.sin(math.radians(altitude)) / 2) + 0.5)
else:
y = max(0.45, 0.55 + (0.437 * math.cos(vec_angle)) + 0.313 *
math.cos(vec_angle) * 0.313 * math.cos(vec_angle))
srf_dif = self.dhr * (y * (
math.sin(math.radians(abs(90 - altitude)))) +
math.cos(math.radians(abs(90 - altitude))))
# reflected irradiance on surface.
e_glob = dhr + dnr * math.cos(math.radians(90 - sun.altitude))
srf_ref = e_glob * ground_reflectance * (0.5 - (math.sin(
math.radians(altitude)) / 2))
# add it all together
direct_irr.append(srf_dir)
diffuse_irr.append(srf_dif)
reflected_irr.append(srf_ref)
total_irr.append(srf_dir + srf_dif + srf_ref)
# create the headers
a_per = AnalysisPeriod(timestep=self.timestep, is_leap_year=self.is_leap_year)
direct_hea = diffuse_hea = reflected_hea = total_hea = \
Header(Irradiance(), 'W/m2', a_per, self.metadata)
# create the data collections
direct_irradiance = HourlyContinuousCollection(direct_hea, direct_irr)
diffuse_irradiance = HourlyContinuousCollection(diffuse_hea, diffuse_irr)
reflected_irradiance = HourlyContinuousCollection(reflected_hea, reflected_irr)
total_irradiance = HourlyContinuousCollection(total_hea, total_irr)
return total_irradiance, direct_irradiance, \
diffuse_irradiance, reflected_irradiance | ['def', 'directional_irradiance', '(', 'self', ',', 'altitude', '=', '90', ',', 'azimuth', '=', '180', ',', 'ground_reflectance', '=', '0.2', ',', 'isotrophic', '=', 'True', ')', ':', '# function to convert polar coordinates to xyz.', 'def', 'pol2cart', '(', 'phi', ',', 'theta', ')', ':', 'mult', '=', 'math', '.', 'cos', '(', 'theta', ')', 'x', '=', 'math', '.', 'sin', '(', 'phi', ')', '*', 'mult', 'y', '=', 'math', '.', 'cos', '(', 'phi', ')', '*', 'mult', 'z', '=', 'math', '.', 'sin', '(', 'theta', ')', 'return', 'Vector3', '(', 'x', ',', 'y', ',', 'z', ')', '# convert the altitude and azimuth to a normal vector', 'normal', '=', 'pol2cart', '(', 'math', '.', 'radians', '(', 'azimuth', ')', ',', 'math', '.', 'radians', '(', 'altitude', ')', ')', '# create sunpath and get altitude at every timestep of the year', 'direct_irr', ',', 'diffuse_irr', ',', 'reflected_irr', ',', 'total_irr', '=', '[', ']', ',', '[', ']', ',', '[', ']', ',', '[', ']', 'sp', '=', 'Sunpath', '.', 'from_location', '(', 'self', '.', 'location', ')', 'sp', '.', 'is_leap_year', '=', 'self', '.', 'is_leap_year', 'for', 'dt', ',', 'dnr', ',', 'dhr', 'in', 'zip', '(', 'self', '.', 'datetimes', ',', 'self', '.', 'direct_normal_irradiance', ',', 'self', '.', 'diffuse_horizontal_irradiance', ')', ':', 'sun', '=', 'sp', '.', 'calculate_sun_from_date_time', '(', 'dt', ')', 'sun_vec', '=', 'pol2cart', '(', 'math', '.', 'radians', '(', 'sun', '.', 'azimuth', ')', ',', 'math', '.', 'radians', '(', 'sun', '.', 'altitude', ')', ')', 'vec_angle', '=', 'sun_vec', '.', 'angle', '(', 'normal', ')', '# direct irradiance on surface', 'srf_dir', '=', '0', 'if', 'sun', '.', 'altitude', '>', '0', 'and', 'vec_angle', '<', 'math', '.', 'pi', '/', '2', ':', 'srf_dir', '=', 'dnr', '*', 'math', '.', 'cos', '(', 'vec_angle', ')', '# diffuse irradiance on surface', 'if', 'isotrophic', 'is', 'True', ':', 'srf_dif', '=', 'dhr', '*', '(', '(', 'math', '.', 'sin', '(', 'math', '.', 'radians', '(', 'altitude', ')', ')', '/', '2', ')', '+', '0.5', ')', 'else', ':', 'y', '=', 'max', '(', '0.45', ',', '0.55', '+', '(', '0.437', '*', 'math', '.', 'cos', '(', 'vec_angle', ')', ')', '+', '0.313', '*', 'math', '.', 'cos', '(', 'vec_angle', ')', '*', '0.313', '*', 'math', '.', 'cos', '(', 'vec_angle', ')', ')', 'srf_dif', '=', 'self', '.', 'dhr', '*', '(', 'y', '*', '(', 'math', '.', 'sin', '(', 'math', '.', 'radians', '(', 'abs', '(', '90', '-', 'altitude', ')', ')', ')', ')', '+', 'math', '.', 'cos', '(', 'math', '.', 'radians', '(', 'abs', '(', '90', '-', 'altitude', ')', ')', ')', ')', '# reflected irradiance on surface.', 'e_glob', '=', 'dhr', '+', 'dnr', '*', 'math', '.', 'cos', '(', 'math', '.', 'radians', '(', '90', '-', 'sun', '.', 'altitude', ')', ')', 'srf_ref', '=', 'e_glob', '*', 'ground_reflectance', '*', '(', '0.5', '-', '(', 'math', '.', 'sin', '(', 'math', '.', 'radians', '(', 'altitude', ')', ')', '/', '2', ')', ')', '# add it all together', 'direct_irr', '.', 'append', '(', 'srf_dir', ')', 'diffuse_irr', '.', 'append', '(', 'srf_dif', ')', 'reflected_irr', '.', 'append', '(', 'srf_ref', ')', 'total_irr', '.', 'append', '(', 'srf_dir', '+', 'srf_dif', '+', 'srf_ref', ')', '# create the headers', 'a_per', '=', 'AnalysisPeriod', '(', 'timestep', '=', 'self', '.', 'timestep', ',', 'is_leap_year', '=', 'self', '.', 'is_leap_year', ')', 'direct_hea', '=', 'diffuse_hea', '=', 'reflected_hea', '=', 'total_hea', '=', 'Header', '(', 'Irradiance', '(', ')', ',', "'W/m2'", ',', 'a_per', ',', 'self', '.', 'metadata', ')', '# create the data collections', 'direct_irradiance', '=', 'HourlyContinuousCollection', '(', 'direct_hea', ',', 'direct_irr', ')', 'diffuse_irradiance', '=', 'HourlyContinuousCollection', '(', 'diffuse_hea', ',', 'diffuse_irr', ')', 'reflected_irradiance', '=', 'HourlyContinuousCollection', '(', 'reflected_hea', ',', 'reflected_irr', ')', 'total_irradiance', '=', 'HourlyContinuousCollection', '(', 'total_hea', ',', 'total_irr', ')', 'return', 'total_irradiance', ',', 'direct_irradiance', ',', 'diffuse_irradiance', ',', 'reflected_irradiance'] | Returns the irradiance components facing a given altitude and azimuth.
This method computes unobstructed solar flux facing a given
altitude and azimuth. The default is set to return the golbal horizontal
irradiance, assuming an altitude facing straight up (90 degrees).
Args:
altitude: A number between -90 and 90 that represents the
altitude at which irradiance is being evaluated in degrees.
azimuth: A number between 0 and 360 that represents the
azimuth at wich irradiance is being evaluated in degrees.
ground_reflectance: A number between 0 and 1 that represents the
reflectance of the ground. Default is set to 0.2. Some
common ground reflectances are:
urban: 0.18
grass: 0.20
fresh grass: 0.26
soil: 0.17
sand: 0.40
snow: 0.65
fresh_snow: 0.75
asphalt: 0.12
concrete: 0.30
sea: 0.06
isotrophic: A boolean value that sets whether an istotrophic sky is
used (as opposed to an anisotrophic sky). An isotrophic sky
assummes an even distribution of diffuse irradiance across the
sky while an anisotrophic sky places more diffuse irradiance
near the solar disc. Default is set to True for isotrophic
Returns:
total_irradiance: A data collection of total solar irradiance.
direct_irradiance: A data collection of direct solar irradiance.
diffuse_irradiance: A data collection of diffuse sky solar irradiance.
reflected_irradiance: A data collection of ground reflected solar irradiance. | ['Returns', 'the', 'irradiance', 'components', 'facing', 'a', 'given', 'altitude', 'and', 'azimuth', '.'] | train | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/wea.py#L594-L692 |
1,975 | honzajavorek/redis-collections | redis_collections/dicts.py | Dict.copy | def copy(self, key=None):
"""
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
"""
other = self.__class__(redis=self.redis, key=key)
other.update(self)
return other | python | def copy(self, key=None):
"""
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
"""
other = self.__class__(redis=self.redis, key=key)
other.update(self)
return other | ['def', 'copy', '(', 'self', ',', 'key', '=', 'None', ')', ':', 'other', '=', 'self', '.', '__class__', '(', 'redis', '=', 'self', '.', 'redis', ',', 'key', '=', 'key', ')', 'other', '.', 'update', '(', 'self', ')', 'return', 'other'] | Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key. | ['Return', 'a', 'new', 'collection', 'with', 'the', 'same', 'items', 'as', 'this', 'one', '.', 'If', '*', 'key', '*', 'is', 'specified', 'create', 'the', 'new', 'collection', 'with', 'the', 'given', 'Redis', 'key', '.'] | train | https://github.com/honzajavorek/redis-collections/blob/07ca8efe88fb128f7dc7319dfa6a26cd39b3776b/redis_collections/dicts.py#L362-L371 |
1,976 | Jahaja/psdash | psdash/log.py | LogReader.search | def search(self, text):
"""
Find text in log file from current position
returns a tuple containing:
absolute position,
position in result buffer,
result buffer (the actual file contents)
"""
key = hash(text)
searcher = self._searchers.get(key)
if not searcher:
searcher = ReverseFileSearcher(self.filename, text)
self._searchers[key] = searcher
position = searcher.find()
if position < 0:
# reset the searcher to start from the tail again.
searcher.reset()
return -1, -1, ''
# try to get some content from before and after the result's position
read_before = self.buffer_size / 2
offset = max(position - read_before, 0)
bufferpos = position if offset == 0 else read_before
self.fp.seek(offset)
return position, bufferpos, self.read() | python | def search(self, text):
"""
Find text in log file from current position
returns a tuple containing:
absolute position,
position in result buffer,
result buffer (the actual file contents)
"""
key = hash(text)
searcher = self._searchers.get(key)
if not searcher:
searcher = ReverseFileSearcher(self.filename, text)
self._searchers[key] = searcher
position = searcher.find()
if position < 0:
# reset the searcher to start from the tail again.
searcher.reset()
return -1, -1, ''
# try to get some content from before and after the result's position
read_before = self.buffer_size / 2
offset = max(position - read_before, 0)
bufferpos = position if offset == 0 else read_before
self.fp.seek(offset)
return position, bufferpos, self.read() | ['def', 'search', '(', 'self', ',', 'text', ')', ':', 'key', '=', 'hash', '(', 'text', ')', 'searcher', '=', 'self', '.', '_searchers', '.', 'get', '(', 'key', ')', 'if', 'not', 'searcher', ':', 'searcher', '=', 'ReverseFileSearcher', '(', 'self', '.', 'filename', ',', 'text', ')', 'self', '.', '_searchers', '[', 'key', ']', '=', 'searcher', 'position', '=', 'searcher', '.', 'find', '(', ')', 'if', 'position', '<', '0', ':', '# reset the searcher to start from the tail again.', 'searcher', '.', 'reset', '(', ')', 'return', '-', '1', ',', '-', '1', ',', "''", "# try to get some content from before and after the result's position", 'read_before', '=', 'self', '.', 'buffer_size', '/', '2', 'offset', '=', 'max', '(', 'position', '-', 'read_before', ',', '0', ')', 'bufferpos', '=', 'position', 'if', 'offset', '==', '0', 'else', 'read_before', 'self', '.', 'fp', '.', 'seek', '(', 'offset', ')', 'return', 'position', ',', 'bufferpos', ',', 'self', '.', 'read', '(', ')'] | Find text in log file from current position
returns a tuple containing:
absolute position,
position in result buffer,
result buffer (the actual file contents) | ['Find', 'text', 'in', 'log', 'file', 'from', 'current', 'position'] | train | https://github.com/Jahaja/psdash/blob/4f1784742666045a3c33bd471dbe489b4f5c7699/psdash/log.py#L114-L140 |
1,977 | sdispater/orator | orator/migrations/migrator.py | Migrator.run | def run(self, path, pretend=False):
"""
Run the outstanding migrations for a given path.
:param path: The path
:type path: str
:param pretend: Whether we execute the migrations as dry-run
:type pretend: bool
"""
self._notes = []
files = self._get_migration_files(path)
ran = self._repository.get_ran()
migrations = [f for f in files if f not in ran]
self.run_migration_list(path, migrations, pretend) | python | def run(self, path, pretend=False):
"""
Run the outstanding migrations for a given path.
:param path: The path
:type path: str
:param pretend: Whether we execute the migrations as dry-run
:type pretend: bool
"""
self._notes = []
files = self._get_migration_files(path)
ran = self._repository.get_ran()
migrations = [f for f in files if f not in ran]
self.run_migration_list(path, migrations, pretend) | ['def', 'run', '(', 'self', ',', 'path', ',', 'pretend', '=', 'False', ')', ':', 'self', '.', '_notes', '=', '[', ']', 'files', '=', 'self', '.', '_get_migration_files', '(', 'path', ')', 'ran', '=', 'self', '.', '_repository', '.', 'get_ran', '(', ')', 'migrations', '=', '[', 'f', 'for', 'f', 'in', 'files', 'if', 'f', 'not', 'in', 'ran', ']', 'self', '.', 'run_migration_list', '(', 'path', ',', 'migrations', ',', 'pretend', ')'] | Run the outstanding migrations for a given path.
:param path: The path
:type path: str
:param pretend: Whether we execute the migrations as dry-run
:type pretend: bool | ['Run', 'the', 'outstanding', 'migrations', 'for', 'a', 'given', 'path', '.'] | train | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/migrations/migrator.py#L34-L51 |
1,978 | consbio/parserutils | parserutils/elements.py | iter_elements | def iter_elements(element_function, parent_to_parse, **kwargs):
"""
Applies element_function to each of the sub-elements in parent_to_parse.
The passed in function must take at least one element, and an optional
list of kwargs which are relevant to each of the elements in the list:
def elem_func(each_elem, **kwargs)
"""
parent = get_element(parent_to_parse)
if not hasattr(element_function, '__call__'):
return parent
for child in ([] if parent is None else parent):
element_function(child, **kwargs)
return parent | python | def iter_elements(element_function, parent_to_parse, **kwargs):
"""
Applies element_function to each of the sub-elements in parent_to_parse.
The passed in function must take at least one element, and an optional
list of kwargs which are relevant to each of the elements in the list:
def elem_func(each_elem, **kwargs)
"""
parent = get_element(parent_to_parse)
if not hasattr(element_function, '__call__'):
return parent
for child in ([] if parent is None else parent):
element_function(child, **kwargs)
return parent | ['def', 'iter_elements', '(', 'element_function', ',', 'parent_to_parse', ',', '*', '*', 'kwargs', ')', ':', 'parent', '=', 'get_element', '(', 'parent_to_parse', ')', 'if', 'not', 'hasattr', '(', 'element_function', ',', "'__call__'", ')', ':', 'return', 'parent', 'for', 'child', 'in', '(', '[', ']', 'if', 'parent', 'is', 'None', 'else', 'parent', ')', ':', 'element_function', '(', 'child', ',', '*', '*', 'kwargs', ')', 'return', 'parent'] | Applies element_function to each of the sub-elements in parent_to_parse.
The passed in function must take at least one element, and an optional
list of kwargs which are relevant to each of the elements in the list:
def elem_func(each_elem, **kwargs) | ['Applies', 'element_function', 'to', 'each', 'of', 'the', 'sub', '-', 'elements', 'in', 'parent_to_parse', '.', 'The', 'passed', 'in', 'function', 'must', 'take', 'at', 'least', 'one', 'element', 'and', 'an', 'optional', 'list', 'of', 'kwargs', 'which', 'are', 'relevant', 'to', 'each', 'of', 'the', 'elements', 'in', 'the', 'list', ':', 'def', 'elem_func', '(', 'each_elem', '**', 'kwargs', ')'] | train | https://github.com/consbio/parserutils/blob/f13f80db99ed43479336b116e38512e3566e4623/parserutils/elements.py#L911-L927 |
1,979 | mitsei/dlkit | dlkit/json_/assessment/sessions.py | ItemBankSession.get_item_ids_by_banks | def get_item_ids_by_banks(self, bank_ids):
"""Gets the list of ``Item Ids`` corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.id.IdList) - list of bank ``Ids``
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
id_list = []
for item in self.get_items_by_banks(bank_ids):
id_list.append(item.get_id())
return IdList(id_list) | python | def get_item_ids_by_banks(self, bank_ids):
"""Gets the list of ``Item Ids`` corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.id.IdList) - list of bank ``Ids``
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
id_list = []
for item in self.get_items_by_banks(bank_ids):
id_list.append(item.get_id())
return IdList(id_list) | ['def', 'get_item_ids_by_banks', '(', 'self', ',', 'bank_ids', ')', ':', '# Implemented from template for', '# osid.resource.ResourceBinSession.get_resource_ids_by_bins', 'id_list', '=', '[', ']', 'for', 'item', 'in', 'self', '.', 'get_items_by_banks', '(', 'bank_ids', ')', ':', 'id_list', '.', 'append', '(', 'item', '.', 'get_id', '(', ')', ')', 'return', 'IdList', '(', 'id_list', ')'] | Gets the list of ``Item Ids`` corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.id.IdList) - list of bank ``Ids``
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - assessment failure
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'the', 'list', 'of', 'Item', 'Ids', 'corresponding', 'to', 'a', 'list', 'of', 'Banks', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L3451-L3467 |
1,980 | biocore/mustached-octo-ironman | moi/group.py | Group.close | def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub) | python | def close(self):
"""Unsubscribe the group and all jobs being listened too"""
for channel in self._listening_to:
self.toredis.unsubscribe(channel)
self.toredis.unsubscribe(self.group_pubsub) | ['def', 'close', '(', 'self', ')', ':', 'for', 'channel', 'in', 'self', '.', '_listening_to', ':', 'self', '.', 'toredis', '.', 'unsubscribe', '(', 'channel', ')', 'self', '.', 'toredis', '.', 'unsubscribe', '(', 'self', '.', 'group_pubsub', ')'] | Unsubscribe the group and all jobs being listened too | ['Unsubscribe', 'the', 'group', 'and', 'all', 'jobs', 'being', 'listened', 'too'] | train | https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L109-L113 |
1,981 | inveniosoftware/invenio-records-rest | invenio_records_rest/schemas/json.py | StrictKeysMixin.check_unknown_fields | def check_unknown_fields(self, data, original_data):
"""Check for unknown keys."""
if isinstance(original_data, list):
for elem in original_data:
self.check_unknown_fields(data, elem)
else:
for key in original_data:
if key not in [
self.fields[field].attribute or field
for field in self.fields
]:
raise ValidationError(
'Unknown field name {}'.format(key), field_names=[key]) | python | def check_unknown_fields(self, data, original_data):
"""Check for unknown keys."""
if isinstance(original_data, list):
for elem in original_data:
self.check_unknown_fields(data, elem)
else:
for key in original_data:
if key not in [
self.fields[field].attribute or field
for field in self.fields
]:
raise ValidationError(
'Unknown field name {}'.format(key), field_names=[key]) | ['def', 'check_unknown_fields', '(', 'self', ',', 'data', ',', 'original_data', ')', ':', 'if', 'isinstance', '(', 'original_data', ',', 'list', ')', ':', 'for', 'elem', 'in', 'original_data', ':', 'self', '.', 'check_unknown_fields', '(', 'data', ',', 'elem', ')', 'else', ':', 'for', 'key', 'in', 'original_data', ':', 'if', 'key', 'not', 'in', '[', 'self', '.', 'fields', '[', 'field', ']', '.', 'attribute', 'or', 'field', 'for', 'field', 'in', 'self', '.', 'fields', ']', ':', 'raise', 'ValidationError', '(', "'Unknown field name {}'", '.', 'format', '(', 'key', ')', ',', 'field_names', '=', '[', 'key', ']', ')'] | Check for unknown keys. | ['Check', 'for', 'unknown', 'keys', '.'] | train | https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/schemas/json.py#L24-L36 |
1,982 | genialis/resolwe-runtime-utils | resolwe_runtime_utils.py | save_dir | def save_dir(key, dir_path, *refs):
"""Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
"""
if not os.path.isdir(dir_path):
return error(
"Output '{}' set to a missing directory: '{}'.".format(key, dir_path)
)
result = {key: {"dir": dir_path}}
if refs:
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
result[key]["refs"] = refs
return json.dumps(result) | python | def save_dir(key, dir_path, *refs):
"""Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
"""
if not os.path.isdir(dir_path):
return error(
"Output '{}' set to a missing directory: '{}'.".format(key, dir_path)
)
result = {key: {"dir": dir_path}}
if refs:
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
result[key]["refs"] = refs
return json.dumps(result) | ['def', 'save_dir', '(', 'key', ',', 'dir_path', ',', '*', 'refs', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'dir_path', ')', ':', 'return', 'error', '(', '"Output \'{}\' set to a missing directory: \'{}\'."', '.', 'format', '(', 'key', ',', 'dir_path', ')', ')', 'result', '=', '{', 'key', ':', '{', '"dir"', ':', 'dir_path', '}', '}', 'if', 'refs', ':', 'missing_refs', '=', '[', 'ref', 'for', 'ref', 'in', 'refs', 'if', 'not', '(', 'os', '.', 'path', '.', 'isfile', '(', 'ref', ')', 'or', 'os', '.', 'path', '.', 'isdir', '(', 'ref', ')', ')', ']', 'if', 'len', '(', 'missing_refs', ')', '>', '0', ':', 'return', 'error', '(', '"Output \'{}\' set to missing references: \'{}\'."', '.', 'format', '(', 'key', ',', "', '", '.', 'join', '(', 'missing_refs', ')', ')', ')', 'result', '[', 'key', ']', '[', '"refs"', ']', '=', 'refs', 'return', 'json', '.', 'dumps', '(', 'result', ')'] | Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}} | ['Convert', 'the', 'given', 'parameters', 'to', 'a', 'special', 'JSON', 'object', '.'] | train | https://github.com/genialis/resolwe-runtime-utils/blob/5657d7cf981972a5259b9b475eae220479401001/resolwe_runtime_utils.py#L146-L173 |
1,983 | CalebBell/fluids | fluids/fittings.py | K_tilting_disk_check_valve_Crane | def K_tilting_disk_check_valve_Crane(D, angle, fd=None):
r'''Returns the loss coefficient for a tilting disk check valve as shown in
[1]_. Results are specified in [1]_ to be for the disk's resting position
to be at 5 or 25 degrees to the flow direction. The model is implemented
here so as to switch to the higher loss 15 degree coefficients at 10
degrees, and use the lesser coefficients for any angle under 10 degrees.
.. math::
K = N\cdot f_d
N is obtained from the following table:
+--------+-------------+-------------+
| | angle = 5 ° | angle = 15° |
+========+=============+=============+
| 2-8" | 40 | 120 |
+--------+-------------+-------------+
| 10-14" | 30 | 90 |
+--------+-------------+-------------+
| 16-48" | 20 | 60 |
+--------+-------------+-------------+
The actual change of coefficients happen at <= 9" and <= 15".
Parameters
----------
D : float
Diameter of the pipe section the valve in mounted in; the
same as the line size [m]
angle : float
Angle of the tilting disk to the flow direction; nominally 5 or 15
degrees [degrees]
fd : float, optional
Darcy friction factor calculated for the actual pipe flow in clean
steel (roughness = 0.0018 inch) in the fully developed turbulent
region; do not specify this to use the original Crane friction factor!,
[-]
Returns
-------
K : float
Loss coefficient with respect to the pipe inside diameter [-]
Notes
-----
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions.
Examples
--------
>>> K_tilting_disk_check_valve_Crane(.01, 5)
1.1626516551826345
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
'''
if fd is None:
fd = ft_Crane(D)
if angle < 10:
# 5 degree case
if D <= 0.2286:
# 2-8 inches, split at 9 inch
return 40*fd
elif D <= 0.381:
# 10-14 inches, split at 15 inch
return 30*fd
else:
# 16-18 inches
return 20*fd
else:
# 15 degree case
if D < 0.2286:
# 2-8 inches
return 120*fd
elif D < 0.381:
# 10-14 inches
return 90*fd
else:
# 16-18 inches
return 60*fd | python | def K_tilting_disk_check_valve_Crane(D, angle, fd=None):
r'''Returns the loss coefficient for a tilting disk check valve as shown in
[1]_. Results are specified in [1]_ to be for the disk's resting position
to be at 5 or 25 degrees to the flow direction. The model is implemented
here so as to switch to the higher loss 15 degree coefficients at 10
degrees, and use the lesser coefficients for any angle under 10 degrees.
.. math::
K = N\cdot f_d
N is obtained from the following table:
+--------+-------------+-------------+
| | angle = 5 ° | angle = 15° |
+========+=============+=============+
| 2-8" | 40 | 120 |
+--------+-------------+-------------+
| 10-14" | 30 | 90 |
+--------+-------------+-------------+
| 16-48" | 20 | 60 |
+--------+-------------+-------------+
The actual change of coefficients happen at <= 9" and <= 15".
Parameters
----------
D : float
Diameter of the pipe section the valve in mounted in; the
same as the line size [m]
angle : float
Angle of the tilting disk to the flow direction; nominally 5 or 15
degrees [degrees]
fd : float, optional
Darcy friction factor calculated for the actual pipe flow in clean
steel (roughness = 0.0018 inch) in the fully developed turbulent
region; do not specify this to use the original Crane friction factor!,
[-]
Returns
-------
K : float
Loss coefficient with respect to the pipe inside diameter [-]
Notes
-----
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions.
Examples
--------
>>> K_tilting_disk_check_valve_Crane(.01, 5)
1.1626516551826345
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
'''
if fd is None:
fd = ft_Crane(D)
if angle < 10:
# 5 degree case
if D <= 0.2286:
# 2-8 inches, split at 9 inch
return 40*fd
elif D <= 0.381:
# 10-14 inches, split at 15 inch
return 30*fd
else:
# 16-18 inches
return 20*fd
else:
# 15 degree case
if D < 0.2286:
# 2-8 inches
return 120*fd
elif D < 0.381:
# 10-14 inches
return 90*fd
else:
# 16-18 inches
return 60*fd | ['def', 'K_tilting_disk_check_valve_Crane', '(', 'D', ',', 'angle', ',', 'fd', '=', 'None', ')', ':', 'if', 'fd', 'is', 'None', ':', 'fd', '=', 'ft_Crane', '(', 'D', ')', 'if', 'angle', '<', '10', ':', '# 5 degree case', 'if', 'D', '<=', '0.2286', ':', '# 2-8 inches, split at 9 inch', 'return', '40', '*', 'fd', 'elif', 'D', '<=', '0.381', ':', '# 10-14 inches, split at 15 inch', 'return', '30', '*', 'fd', 'else', ':', '# 16-18 inches', 'return', '20', '*', 'fd', 'else', ':', '# 15 degree case', 'if', 'D', '<', '0.2286', ':', '# 2-8 inches', 'return', '120', '*', 'fd', 'elif', 'D', '<', '0.381', ':', '# 10-14 inches', 'return', '90', '*', 'fd', 'else', ':', '# 16-18 inches', 'return', '60', '*', 'fd'] | r'''Returns the loss coefficient for a tilting disk check valve as shown in
[1]_. Results are specified in [1]_ to be for the disk's resting position
to be at 5 or 25 degrees to the flow direction. The model is implemented
here so as to switch to the higher loss 15 degree coefficients at 10
degrees, and use the lesser coefficients for any angle under 10 degrees.
.. math::
K = N\cdot f_d
N is obtained from the following table:
+--------+-------------+-------------+
| | angle = 5 ° | angle = 15° |
+========+=============+=============+
| 2-8" | 40 | 120 |
+--------+-------------+-------------+
| 10-14" | 30 | 90 |
+--------+-------------+-------------+
| 16-48" | 20 | 60 |
+--------+-------------+-------------+
The actual change of coefficients happen at <= 9" and <= 15".
Parameters
----------
D : float
Diameter of the pipe section the valve in mounted in; the
same as the line size [m]
angle : float
Angle of the tilting disk to the flow direction; nominally 5 or 15
degrees [degrees]
fd : float, optional
Darcy friction factor calculated for the actual pipe flow in clean
steel (roughness = 0.0018 inch) in the fully developed turbulent
region; do not specify this to use the original Crane friction factor!,
[-]
Returns
-------
K : float
Loss coefficient with respect to the pipe inside diameter [-]
Notes
-----
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions.
Examples
--------
>>> K_tilting_disk_check_valve_Crane(.01, 5)
1.1626516551826345
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009. | ['r', 'Returns', 'the', 'loss', 'coefficient', 'for', 'a', 'tilting', 'disk', 'check', 'valve', 'as', 'shown', 'in', '[', '1', ']', '_', '.', 'Results', 'are', 'specified', 'in', '[', '1', ']', '_', 'to', 'be', 'for', 'the', 'disk', 's', 'resting', 'position', 'to', 'be', 'at', '5', 'or', '25', 'degrees', 'to', 'the', 'flow', 'direction', '.', 'The', 'model', 'is', 'implemented', 'here', 'so', 'as', 'to', 'switch', 'to', 'the', 'higher', 'loss', '15', 'degree', 'coefficients', 'at', '10', 'degrees', 'and', 'use', 'the', 'lesser', 'coefficients', 'for', 'any', 'angle', 'under', '10', 'degrees', '.', '..', 'math', '::', 'K', '=', 'N', '\\', 'cdot', 'f_d', 'N', 'is', 'obtained', 'from', 'the', 'following', 'table', ':'] | train | https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L3536-L3617 |
1,984 | AustralianSynchrotron/lightflow | lightflow/models/datastore.py | DataStore.exists | def exists(self, workflow_id):
""" Checks whether a document with the specified workflow id already exists.
Args:
workflow_id (str): The workflow id that should be checked.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
bool: ``True`` if a document with the specified workflow id exists.
"""
try:
db = self._client[self.database]
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return col.find_one({"_id": ObjectId(workflow_id)}) is not None
except ConnectionFailure:
raise DataStoreNotConnected() | python | def exists(self, workflow_id):
""" Checks whether a document with the specified workflow id already exists.
Args:
workflow_id (str): The workflow id that should be checked.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
bool: ``True`` if a document with the specified workflow id exists.
"""
try:
db = self._client[self.database]
col = db[WORKFLOW_DATA_COLLECTION_NAME]
return col.find_one({"_id": ObjectId(workflow_id)}) is not None
except ConnectionFailure:
raise DataStoreNotConnected() | ['def', 'exists', '(', 'self', ',', 'workflow_id', ')', ':', 'try', ':', 'db', '=', 'self', '.', '_client', '[', 'self', '.', 'database', ']', 'col', '=', 'db', '[', 'WORKFLOW_DATA_COLLECTION_NAME', ']', 'return', 'col', '.', 'find_one', '(', '{', '"_id"', ':', 'ObjectId', '(', 'workflow_id', ')', '}', ')', 'is', 'not', 'None', 'except', 'ConnectionFailure', ':', 'raise', 'DataStoreNotConnected', '(', ')'] | Checks whether a document with the specified workflow id already exists.
Args:
workflow_id (str): The workflow id that should be checked.
Raises:
DataStoreNotConnected: If the data store is not connected to the server.
Returns:
bool: ``True`` if a document with the specified workflow id exists. | ['Checks', 'whether', 'a', 'document', 'with', 'the', 'specified', 'workflow', 'id', 'already', 'exists', '.'] | train | https://github.com/AustralianSynchrotron/lightflow/blob/dc53dbc1d961e20fb144273baca258060705c03e/lightflow/models/datastore.py#L139-L157 |
1,985 | alexandrovteam/pyimzML | pyimzml/ImzMLParser.py | ImzMLParser.__iter_read_spectrum_meta | def __iter_read_spectrum_meta(self):
"""
This method should only be called by __init__. Reads the data formats, coordinates and offsets from
the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum
metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty.
Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or
"IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer".
"""
mz_group = int_group = None
slist = None
elem_iterator = self.iterparse(self.filename, events=("start", "end"))
if sys.version_info > (3,):
_, self.root = next(elem_iterator)
else:
_, self.root = elem_iterator.next()
for event, elem in elem_iterator:
if elem.tag == self.sl + "spectrumList" and event == "start":
slist = elem
elif elem.tag == self.sl + "spectrum" and event == "end":
self.__process_spectrum(elem)
slist.remove(elem)
elif elem.tag == self.sl + "referenceableParamGroup" and event == "end":
for param in elem:
if param.attrib["name"] == "m/z array":
self.mzGroupId = elem.attrib['id']
mz_group = elem
elif param.attrib["name"] == "intensity array":
self.intGroupId = elem.attrib['id']
int_group = elem
self.__assign_precision(int_group, mz_group)
self.__fix_offsets() | python | def __iter_read_spectrum_meta(self):
"""
This method should only be called by __init__. Reads the data formats, coordinates and offsets from
the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum
metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty.
Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or
"IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer".
"""
mz_group = int_group = None
slist = None
elem_iterator = self.iterparse(self.filename, events=("start", "end"))
if sys.version_info > (3,):
_, self.root = next(elem_iterator)
else:
_, self.root = elem_iterator.next()
for event, elem in elem_iterator:
if elem.tag == self.sl + "spectrumList" and event == "start":
slist = elem
elif elem.tag == self.sl + "spectrum" and event == "end":
self.__process_spectrum(elem)
slist.remove(elem)
elif elem.tag == self.sl + "referenceableParamGroup" and event == "end":
for param in elem:
if param.attrib["name"] == "m/z array":
self.mzGroupId = elem.attrib['id']
mz_group = elem
elif param.attrib["name"] == "intensity array":
self.intGroupId = elem.attrib['id']
int_group = elem
self.__assign_precision(int_group, mz_group)
self.__fix_offsets() | ['def', '__iter_read_spectrum_meta', '(', 'self', ')', ':', 'mz_group', '=', 'int_group', '=', 'None', 'slist', '=', 'None', 'elem_iterator', '=', 'self', '.', 'iterparse', '(', 'self', '.', 'filename', ',', 'events', '=', '(', '"start"', ',', '"end"', ')', ')', 'if', 'sys', '.', 'version_info', '>', '(', '3', ',', ')', ':', '_', ',', 'self', '.', 'root', '=', 'next', '(', 'elem_iterator', ')', 'else', ':', '_', ',', 'self', '.', 'root', '=', 'elem_iterator', '.', 'next', '(', ')', 'for', 'event', ',', 'elem', 'in', 'elem_iterator', ':', 'if', 'elem', '.', 'tag', '==', 'self', '.', 'sl', '+', '"spectrumList"', 'and', 'event', '==', '"start"', ':', 'slist', '=', 'elem', 'elif', 'elem', '.', 'tag', '==', 'self', '.', 'sl', '+', '"spectrum"', 'and', 'event', '==', '"end"', ':', 'self', '.', '__process_spectrum', '(', 'elem', ')', 'slist', '.', 'remove', '(', 'elem', ')', 'elif', 'elem', '.', 'tag', '==', 'self', '.', 'sl', '+', '"referenceableParamGroup"', 'and', 'event', '==', '"end"', ':', 'for', 'param', 'in', 'elem', ':', 'if', 'param', '.', 'attrib', '[', '"name"', ']', '==', '"m/z array"', ':', 'self', '.', 'mzGroupId', '=', 'elem', '.', 'attrib', '[', "'id'", ']', 'mz_group', '=', 'elem', 'elif', 'param', '.', 'attrib', '[', '"name"', ']', '==', '"intensity array"', ':', 'self', '.', 'intGroupId', '=', 'elem', '.', 'attrib', '[', "'id'", ']', 'int_group', '=', 'elem', 'self', '.', '__assign_precision', '(', 'int_group', ',', 'mz_group', ')', 'self', '.', '__fix_offsets', '(', ')'] | This method should only be called by __init__. Reads the data formats, coordinates and offsets from
the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum
metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty.
Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or
"IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer". | ['This', 'method', 'should', 'only', 'be', 'called', 'by', '__init__', '.', 'Reads', 'the', 'data', 'formats', 'coordinates', 'and', 'offsets', 'from', 'the', '.', 'imzML', 'file', 'and', 'initializes', 'the', 'respective', 'attributes', '.', 'While', 'traversing', 'the', 'XML', 'tree', 'the', 'per', '-', 'spectrum', 'metadata', 'is', 'pruned', 'i', '.', 'e', '.', 'the', '<spectrumList', '>', 'element', '(', 's', ')', 'are', 'left', 'behind', 'empty', '.'] | train | https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLParser.py#L115-L148 |
1,986 | materialsproject/pymatgen | pymatgen/electronic_structure/plotter.py | BoltztrapPlotter.plot_power_factor_mu | def plot_power_factor_mu(self, temp=600, output='eig',
relaxation_time=1e-14, xlim=None):
"""
Plot the power factor in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output=output, doping_levels=False)[
temp]
plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylabel("Power factor, ($\\mu$W/(mK$^2$))", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt | python | def plot_power_factor_mu(self, temp=600, output='eig',
relaxation_time=1e-14, xlim=None):
"""
Plot the power factor in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output=output, doping_levels=False)[
temp]
plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylabel("Power factor, ($\\mu$W/(mK$^2$))", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt | ['def', 'plot_power_factor_mu', '(', 'self', ',', 'temp', '=', '600', ',', 'output', '=', "'eig'", ',', 'relaxation_time', '=', '1e-14', ',', 'xlim', '=', 'None', ')', ':', 'import', 'matplotlib', '.', 'pyplot', 'as', 'plt', 'plt', '.', 'figure', '(', 'figsize', '=', '(', '9', ',', '7', ')', ')', 'pf', '=', 'self', '.', '_bz', '.', 'get_power_factor', '(', 'relaxation_time', '=', 'relaxation_time', ',', 'output', '=', 'output', ',', 'doping_levels', '=', 'False', ')', '[', 'temp', ']', 'plt', '.', 'semilogy', '(', 'self', '.', '_bz', '.', 'mu_steps', ',', 'pf', ',', 'linewidth', '=', '3.0', ')', 'self', '.', '_plot_bg_limits', '(', ')', 'self', '.', '_plot_doping', '(', 'temp', ')', 'if', 'output', '==', "'eig'", ':', 'plt', '.', 'legend', '(', '[', "'PF$_1$'", ',', "'PF$_2$'", ',', "'PF$_3$'", ']', ')', 'if', 'xlim', 'is', 'None', ':', 'plt', '.', 'xlim', '(', '-', '0.5', ',', 'self', '.', '_bz', '.', 'gap', '+', '0.5', ')', 'else', ':', 'plt', '.', 'xlim', '(', 'xlim', ')', 'plt', '.', 'ylabel', '(', '"Power factor, ($\\\\mu$W/(mK$^2$))"', ',', 'fontsize', '=', '30.0', ')', 'plt', '.', 'xlabel', '(', '"E-E$_f$ (eV)"', ',', 'fontsize', '=', '30.0', ')', 'plt', '.', 'xticks', '(', 'fontsize', '=', '25', ')', 'plt', '.', 'yticks', '(', 'fontsize', '=', '25', ')', 'plt', '.', 'tight_layout', '(', ')', 'return', 'plt'] | Plot the power factor in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object | ['Plot', 'the', 'power', 'factor', 'in', 'function', 'of', 'Fermi', 'level', '.', 'Semi', '-', 'log', 'plot'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/plotter.py#L2797-L2831 |
1,987 | nutechsoftware/alarmdecoder | alarmdecoder/decoder.py | AlarmDecoder._handle_rfx | def _handle_rfx(self, data):
"""
Handle RF messages.
:param data: RF message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.RFMessage`
"""
msg = RFMessage(data)
self.on_rfx_message(message=msg)
return msg | python | def _handle_rfx(self, data):
"""
Handle RF messages.
:param data: RF message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.RFMessage`
"""
msg = RFMessage(data)
self.on_rfx_message(message=msg)
return msg | ['def', '_handle_rfx', '(', 'self', ',', 'data', ')', ':', 'msg', '=', 'RFMessage', '(', 'data', ')', 'self', '.', 'on_rfx_message', '(', 'message', '=', 'msg', ')', 'return', 'msg'] | Handle RF messages.
:param data: RF message to parse
:type data: string
:returns: :py:class:`~alarmdecoder.messages.RFMessage` | ['Handle', 'RF', 'messages', '.'] | train | https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/decoder.py#L499-L512 |
1,988 | ska-sa/katcp-python | katcp/client.py | DeviceClient.until_protocol | def until_protocol(self, timeout=None):
"""Return future that resolves after receipt of katcp protocol info.
If the returned future resolves, the server's protocol information is
available in the ProtocolFlags instance self.protocol_flags.
"""
t0 = self.ioloop.time()
yield self.until_running(timeout=timeout)
t1 = self.ioloop.time()
if timeout:
timedelta = timeout - (t1 - t0)
else:
timedelta = None
assert get_thread_ident() == self.ioloop_thread_id
yield self._received_protocol_info.until_set(timeout=timedelta) | python | def until_protocol(self, timeout=None):
"""Return future that resolves after receipt of katcp protocol info.
If the returned future resolves, the server's protocol information is
available in the ProtocolFlags instance self.protocol_flags.
"""
t0 = self.ioloop.time()
yield self.until_running(timeout=timeout)
t1 = self.ioloop.time()
if timeout:
timedelta = timeout - (t1 - t0)
else:
timedelta = None
assert get_thread_ident() == self.ioloop_thread_id
yield self._received_protocol_info.until_set(timeout=timedelta) | ['def', 'until_protocol', '(', 'self', ',', 'timeout', '=', 'None', ')', ':', 't0', '=', 'self', '.', 'ioloop', '.', 'time', '(', ')', 'yield', 'self', '.', 'until_running', '(', 'timeout', '=', 'timeout', ')', 't1', '=', 'self', '.', 'ioloop', '.', 'time', '(', ')', 'if', 'timeout', ':', 'timedelta', '=', 'timeout', '-', '(', 't1', '-', 't0', ')', 'else', ':', 'timedelta', '=', 'None', 'assert', 'get_thread_ident', '(', ')', '==', 'self', '.', 'ioloop_thread_id', 'yield', 'self', '.', '_received_protocol_info', '.', 'until_set', '(', 'timeout', '=', 'timedelta', ')'] | Return future that resolves after receipt of katcp protocol info.
If the returned future resolves, the server's protocol information is
available in the ProtocolFlags instance self.protocol_flags. | ['Return', 'future', 'that', 'resolves', 'after', 'receipt', 'of', 'katcp', 'protocol', 'info', '.'] | train | https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L972-L987 |
1,989 | Tanganelli/CoAPthon3 | coapthon/layers/messagelayer.py | MessageLayer.fetch_mid | def fetch_mid(self):
"""
Gets the next valid MID.
:return: the mid to use
"""
current_mid = self._current_mid
self._current_mid += 1
self._current_mid %= 65535
return current_mid | python | def fetch_mid(self):
"""
Gets the next valid MID.
:return: the mid to use
"""
current_mid = self._current_mid
self._current_mid += 1
self._current_mid %= 65535
return current_mid | ['def', 'fetch_mid', '(', 'self', ')', ':', 'current_mid', '=', 'self', '.', '_current_mid', 'self', '.', '_current_mid', '+=', '1', 'self', '.', '_current_mid', '%=', '65535', 'return', 'current_mid'] | Gets the next valid MID.
:return: the mid to use | ['Gets', 'the', 'next', 'valid', 'MID', '.'] | train | https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/layers/messagelayer.py#L40-L49 |
1,990 | doloopwhile/PyExecJS | execjs/_external_runtime.py | _find_executable | def _find_executable(prog, pathext=("",)):
"""protected"""
pathlist = _decode_if_not_text(os.environ.get('PATH', '')).split(os.pathsep)
for dir in pathlist:
for ext in pathext:
filename = os.path.join(dir, prog + ext)
try:
st = os.stat(filename)
except os.error:
continue
if stat.S_ISREG(st.st_mode) and (stat.S_IMODE(st.st_mode) & 0o111):
return filename
return None | python | def _find_executable(prog, pathext=("",)):
"""protected"""
pathlist = _decode_if_not_text(os.environ.get('PATH', '')).split(os.pathsep)
for dir in pathlist:
for ext in pathext:
filename = os.path.join(dir, prog + ext)
try:
st = os.stat(filename)
except os.error:
continue
if stat.S_ISREG(st.st_mode) and (stat.S_IMODE(st.st_mode) & 0o111):
return filename
return None | ['def', '_find_executable', '(', 'prog', ',', 'pathext', '=', '(', '""', ',', ')', ')', ':', 'pathlist', '=', '_decode_if_not_text', '(', 'os', '.', 'environ', '.', 'get', '(', "'PATH'", ',', "''", ')', ')', '.', 'split', '(', 'os', '.', 'pathsep', ')', 'for', 'dir', 'in', 'pathlist', ':', 'for', 'ext', 'in', 'pathext', ':', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'dir', ',', 'prog', '+', 'ext', ')', 'try', ':', 'st', '=', 'os', '.', 'stat', '(', 'filename', ')', 'except', 'os', '.', 'error', ':', 'continue', 'if', 'stat', '.', 'S_ISREG', '(', 'st', '.', 'st_mode', ')', 'and', '(', 'stat', '.', 'S_IMODE', '(', 'st', '.', 'st_mode', ')', '&', '0o111', ')', ':', 'return', 'filename', 'return', 'None'] | protected | ['protected'] | train | https://github.com/doloopwhile/PyExecJS/blob/e300f0a8120c0b7b70eed0758c3c85a9bd1a7b9f/execjs/_external_runtime.py#L182-L195 |
1,991 | Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_1/graph/graph_client.py | GraphClient.set_avatar | def set_avatar(self, avatar, subject_descriptor):
"""SetAvatar.
[Preview API]
:param :class:`<Avatar> <azure.devops.v5_1.graph.models.Avatar>` avatar:
:param str subject_descriptor:
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
content = self._serialize.body(avatar, 'Avatar')
self._send(http_method='PUT',
location_id='801eaf9c-0585-4be8-9cdb-b0efa074de91',
version='5.1-preview.1',
route_values=route_values,
content=content) | python | def set_avatar(self, avatar, subject_descriptor):
"""SetAvatar.
[Preview API]
:param :class:`<Avatar> <azure.devops.v5_1.graph.models.Avatar>` avatar:
:param str subject_descriptor:
"""
route_values = {}
if subject_descriptor is not None:
route_values['subjectDescriptor'] = self._serialize.url('subject_descriptor', subject_descriptor, 'str')
content = self._serialize.body(avatar, 'Avatar')
self._send(http_method='PUT',
location_id='801eaf9c-0585-4be8-9cdb-b0efa074de91',
version='5.1-preview.1',
route_values=route_values,
content=content) | ['def', 'set_avatar', '(', 'self', ',', 'avatar', ',', 'subject_descriptor', ')', ':', 'route_values', '=', '{', '}', 'if', 'subject_descriptor', 'is', 'not', 'None', ':', 'route_values', '[', "'subjectDescriptor'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'subject_descriptor'", ',', 'subject_descriptor', ',', "'str'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'avatar', ',', "'Avatar'", ')', 'self', '.', '_send', '(', 'http_method', '=', "'PUT'", ',', 'location_id', '=', "'801eaf9c-0585-4be8-9cdb-b0efa074de91'", ',', 'version', '=', "'5.1-preview.1'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ')'] | SetAvatar.
[Preview API]
:param :class:`<Avatar> <azure.devops.v5_1.graph.models.Avatar>` avatar:
:param str subject_descriptor: | ['SetAvatar', '.', '[', 'Preview', 'API', ']', ':', 'param', ':', 'class', ':', '<Avatar', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'graph', '.', 'models', '.', 'Avatar', '>', 'avatar', ':', ':', 'param', 'str', 'subject_descriptor', ':'] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/graph/graph_client.py#L64-L78 |
1,992 | unbit/davvy | davvy/__init__.py | register_prop | def register_prop(name, handler_get, handler_set):
"""
register a property handler
"""
global props_get, props_set
if handler_get:
props_get[name] = handler_get
if handler_set:
props_set[name] = handler_set | python | def register_prop(name, handler_get, handler_set):
"""
register a property handler
"""
global props_get, props_set
if handler_get:
props_get[name] = handler_get
if handler_set:
props_set[name] = handler_set | ['def', 'register_prop', '(', 'name', ',', 'handler_get', ',', 'handler_set', ')', ':', 'global', 'props_get', ',', 'props_set', 'if', 'handler_get', ':', 'props_get', '[', 'name', ']', '=', 'handler_get', 'if', 'handler_set', ':', 'props_set', '[', 'name', ']', '=', 'handler_set'] | register a property handler | ['register', 'a', 'property', 'handler'] | train | https://github.com/unbit/davvy/blob/d9cd95fba25dbc76d80955bbbe5ff9d7cf52268a/davvy/__init__.py#L9-L17 |
1,993 | sibirrer/lenstronomy | lenstronomy/ImSim/MultiBand/multi_frame.py | MultiFrame.image_linear_solve | def image_linear_solve(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, inv_bool=False):
"""
computes the image (lens and source surface brightness with a given lens model).
The linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles)
:param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles
:param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles
:param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles
:param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions
:param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix.
:return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data
"""
A = self.linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
C_D_response, model_error_list = self.error_response(kwargs_lens, kwargs_ps)
d = self.data_response
param, cov_param, wls_model = de_lens.get_param_WLS(A.T, 1 / C_D_response, d, inv_bool=inv_bool)
kwargs_lens_0 = [kwargs_lens[k] for k in self._idex_lens_list[0]]
_, _, _, _ = self._imageModel_list[0]._update_linear_kwargs(param, kwargs_lens_0, kwargs_source, kwargs_lens_light, kwargs_ps)
wls_list = self._array2image_list(wls_model)
return wls_list, model_error_list, cov_param, param | python | def image_linear_solve(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, inv_bool=False):
"""
computes the image (lens and source surface brightness with a given lens model).
The linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles)
:param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles
:param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles
:param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles
:param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions
:param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix.
:return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data
"""
A = self.linear_response_matrix(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
C_D_response, model_error_list = self.error_response(kwargs_lens, kwargs_ps)
d = self.data_response
param, cov_param, wls_model = de_lens.get_param_WLS(A.T, 1 / C_D_response, d, inv_bool=inv_bool)
kwargs_lens_0 = [kwargs_lens[k] for k in self._idex_lens_list[0]]
_, _, _, _ = self._imageModel_list[0]._update_linear_kwargs(param, kwargs_lens_0, kwargs_source, kwargs_lens_light, kwargs_ps)
wls_list = self._array2image_list(wls_model)
return wls_list, model_error_list, cov_param, param | ['def', 'image_linear_solve', '(', 'self', ',', 'kwargs_lens', ',', 'kwargs_source', ',', 'kwargs_lens_light', ',', 'kwargs_ps', ',', 'inv_bool', '=', 'False', ')', ':', 'A', '=', 'self', '.', 'linear_response_matrix', '(', 'kwargs_lens', ',', 'kwargs_source', ',', 'kwargs_lens_light', ',', 'kwargs_ps', ')', 'C_D_response', ',', 'model_error_list', '=', 'self', '.', 'error_response', '(', 'kwargs_lens', ',', 'kwargs_ps', ')', 'd', '=', 'self', '.', 'data_response', 'param', ',', 'cov_param', ',', 'wls_model', '=', 'de_lens', '.', 'get_param_WLS', '(', 'A', '.', 'T', ',', '1', '/', 'C_D_response', ',', 'd', ',', 'inv_bool', '=', 'inv_bool', ')', 'kwargs_lens_0', '=', '[', 'kwargs_lens', '[', 'k', ']', 'for', 'k', 'in', 'self', '.', '_idex_lens_list', '[', '0', ']', ']', '_', ',', '_', ',', '_', ',', '_', '=', 'self', '.', '_imageModel_list', '[', '0', ']', '.', '_update_linear_kwargs', '(', 'param', ',', 'kwargs_lens_0', ',', 'kwargs_source', ',', 'kwargs_lens_light', ',', 'kwargs_ps', ')', 'wls_list', '=', 'self', '.', '_array2image_list', '(', 'wls_model', ')', 'return', 'wls_list', ',', 'model_error_list', ',', 'cov_param', ',', 'param'] | computes the image (lens and source surface brightness with a given lens model).
The linear parameters are computed with a weighted linear least square optimization (i.e. flux normalization of the brightness profiles)
:param kwargs_lens: list of keyword arguments corresponding to the superposition of different lens profiles
:param kwargs_source: list of keyword arguments corresponding to the superposition of different source light profiles
:param kwargs_lens_light: list of keyword arguments corresponding to different lens light surface brightness profiles
:param kwargs_ps: keyword arguments corresponding to "other" parameters, such as external shear and point source image positions
:param inv_bool: if True, invert the full linear solver Matrix Ax = y for the purpose of the covariance matrix.
:return: 1d array of surface brightness pixels of the optimal solution of the linear parameters to match the data | ['computes', 'the', 'image', '(', 'lens', 'and', 'source', 'surface', 'brightness', 'with', 'a', 'given', 'lens', 'model', ')', '.', 'The', 'linear', 'parameters', 'are', 'computed', 'with', 'a', 'weighted', 'linear', 'least', 'square', 'optimization', '(', 'i', '.', 'e', '.', 'flux', 'normalization', 'of', 'the', 'brightness', 'profiles', ')', ':', 'param', 'kwargs_lens', ':', 'list', 'of', 'keyword', 'arguments', 'corresponding', 'to', 'the', 'superposition', 'of', 'different', 'lens', 'profiles', ':', 'param', 'kwargs_source', ':', 'list', 'of', 'keyword', 'arguments', 'corresponding', 'to', 'the', 'superposition', 'of', 'different', 'source', 'light', 'profiles', ':', 'param', 'kwargs_lens_light', ':', 'list', 'of', 'keyword', 'arguments', 'corresponding', 'to', 'different', 'lens', 'light', 'surface', 'brightness', 'profiles', ':', 'param', 'kwargs_ps', ':', 'keyword', 'arguments', 'corresponding', 'to', 'other', 'parameters', 'such', 'as', 'external', 'shear', 'and', 'point', 'source', 'image', 'positions', ':', 'param', 'inv_bool', ':', 'if', 'True', 'invert', 'the', 'full', 'linear', 'solver', 'Matrix', 'Ax', '=', 'y', 'for', 'the', 'purpose', 'of', 'the', 'covariance', 'matrix', '.', ':', 'return', ':', '1d', 'array', 'of', 'surface', 'brightness', 'pixels', 'of', 'the', 'optimal', 'solution', 'of', 'the', 'linear', 'parameters', 'to', 'match', 'the', 'data'] | train | https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/MultiBand/multi_frame.py#L50-L68 |
1,994 | dwavesystems/dimod | dimod/binary_quadratic_model.py | BinaryQuadraticModel.add_variables_from | def add_variables_from(self, linear, vartype=None):
"""Add variables and/or linear biases to a binary quadratic model.
Args:
linear (dict[variable, bias]/iterable[(variable, bias)]):
A collection of variables and their linear biases to add to the model.
If a dict, keys are variables in the binary quadratic model and
values are biases. Alternatively, an iterable of (variable, bias) pairs.
Variables can be any python object that is a valid dict key.
Many methods and functions expect the biases
to be numbers but this is not explicitly checked.
If any variable already exists in the model, its bias is added to
the variable's current linear bias.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates creates an empty Ising model, adds two variables,
and subsequently adds to the bias of the one while adding a new, third,
variable.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
>>> len(bqm.linear)
0
>>> bqm.add_variables_from({'a': .5, 'b': -1.})
>>> 'b' in bqm
True
>>> bqm.add_variables_from({'b': -1., 'c': 2.0})
>>> bqm.linear['b']
-2.0
"""
if isinstance(linear, abc.Mapping):
for v, bias in iteritems(linear):
self.add_variable(v, bias, vartype=vartype)
else:
try:
for v, bias in linear:
self.add_variable(v, bias, vartype=vartype)
except TypeError:
raise TypeError("expected 'linear' to be a dict or an iterable of 2-tuples.") | python | def add_variables_from(self, linear, vartype=None):
"""Add variables and/or linear biases to a binary quadratic model.
Args:
linear (dict[variable, bias]/iterable[(variable, bias)]):
A collection of variables and their linear biases to add to the model.
If a dict, keys are variables in the binary quadratic model and
values are biases. Alternatively, an iterable of (variable, bias) pairs.
Variables can be any python object that is a valid dict key.
Many methods and functions expect the biases
to be numbers but this is not explicitly checked.
If any variable already exists in the model, its bias is added to
the variable's current linear bias.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates creates an empty Ising model, adds two variables,
and subsequently adds to the bias of the one while adding a new, third,
variable.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
>>> len(bqm.linear)
0
>>> bqm.add_variables_from({'a': .5, 'b': -1.})
>>> 'b' in bqm
True
>>> bqm.add_variables_from({'b': -1., 'c': 2.0})
>>> bqm.linear['b']
-2.0
"""
if isinstance(linear, abc.Mapping):
for v, bias in iteritems(linear):
self.add_variable(v, bias, vartype=vartype)
else:
try:
for v, bias in linear:
self.add_variable(v, bias, vartype=vartype)
except TypeError:
raise TypeError("expected 'linear' to be a dict or an iterable of 2-tuples.") | ['def', 'add_variables_from', '(', 'self', ',', 'linear', ',', 'vartype', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'linear', ',', 'abc', '.', 'Mapping', ')', ':', 'for', 'v', ',', 'bias', 'in', 'iteritems', '(', 'linear', ')', ':', 'self', '.', 'add_variable', '(', 'v', ',', 'bias', ',', 'vartype', '=', 'vartype', ')', 'else', ':', 'try', ':', 'for', 'v', ',', 'bias', 'in', 'linear', ':', 'self', '.', 'add_variable', '(', 'v', ',', 'bias', ',', 'vartype', '=', 'vartype', ')', 'except', 'TypeError', ':', 'raise', 'TypeError', '(', '"expected \'linear\' to be a dict or an iterable of 2-tuples."', ')'] | Add variables and/or linear biases to a binary quadratic model.
Args:
linear (dict[variable, bias]/iterable[(variable, bias)]):
A collection of variables and their linear biases to add to the model.
If a dict, keys are variables in the binary quadratic model and
values are biases. Alternatively, an iterable of (variable, bias) pairs.
Variables can be any python object that is a valid dict key.
Many methods and functions expect the biases
to be numbers but this is not explicitly checked.
If any variable already exists in the model, its bias is added to
the variable's current linear bias.
vartype (:class:`.Vartype`, optional, default=None):
Vartype of the given bias. If None, the vartype of the binary
quadratic model is used. Valid values are :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
Examples:
This example creates creates an empty Ising model, adds two variables,
and subsequently adds to the bias of the one while adding a new, third,
variable.
>>> import dimod
...
>>> bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)
>>> len(bqm.linear)
0
>>> bqm.add_variables_from({'a': .5, 'b': -1.})
>>> 'b' in bqm
True
>>> bqm.add_variables_from({'b': -1., 'c': 2.0})
>>> bqm.linear['b']
-2.0 | ['Add', 'variables', 'and', '/', 'or', 'linear', 'biases', 'to', 'a', 'binary', 'quadratic', 'model', '.'] | train | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/binary_quadratic_model.py#L459-L504 |
1,995 | django-fluent/django-fluent-contents | fluent_contents/admin/placeholderfield.py | PlaceholderFieldAdmin.get_placeholder_data | def get_placeholder_data(self, request, obj=None):
"""
Return the data of the placeholder fields.
"""
# Return all placeholder fields in the model.
if not hasattr(self.model, '_meta_placeholder_fields'):
return []
data = []
for name, field in self.model._meta_placeholder_fields.items():
assert isinstance(field, PlaceholderField)
data.append(PlaceholderData(
slot=field.slot,
title=field.verbose_name.capitalize(),
fallback_language=None, # Information cant' be known by "render_placeholder" in the template.
))
return data | python | def get_placeholder_data(self, request, obj=None):
"""
Return the data of the placeholder fields.
"""
# Return all placeholder fields in the model.
if not hasattr(self.model, '_meta_placeholder_fields'):
return []
data = []
for name, field in self.model._meta_placeholder_fields.items():
assert isinstance(field, PlaceholderField)
data.append(PlaceholderData(
slot=field.slot,
title=field.verbose_name.capitalize(),
fallback_language=None, # Information cant' be known by "render_placeholder" in the template.
))
return data | ['def', 'get_placeholder_data', '(', 'self', ',', 'request', ',', 'obj', '=', 'None', ')', ':', '# Return all placeholder fields in the model.', 'if', 'not', 'hasattr', '(', 'self', '.', 'model', ',', "'_meta_placeholder_fields'", ')', ':', 'return', '[', ']', 'data', '=', '[', ']', 'for', 'name', ',', 'field', 'in', 'self', '.', 'model', '.', '_meta_placeholder_fields', '.', 'items', '(', ')', ':', 'assert', 'isinstance', '(', 'field', ',', 'PlaceholderField', ')', 'data', '.', 'append', '(', 'PlaceholderData', '(', 'slot', '=', 'field', '.', 'slot', ',', 'title', '=', 'field', '.', 'verbose_name', '.', 'capitalize', '(', ')', ',', 'fallback_language', '=', 'None', ',', '# Information cant\' be known by "render_placeholder" in the template.', ')', ')', 'return', 'data'] | Return the data of the placeholder fields. | ['Return', 'the', 'data', 'of', 'the', 'placeholder', 'fields', '.'] | train | https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/admin/placeholderfield.py#L55-L72 |
1,996 | thombashi/pytablereader | pytablereader/csv/core.py | CsvTableTextLoader.load | def load(self):
"""
Extract tabular data as |TableData| instances from a CSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader`
"""
self._validate()
self._logger.logging_load()
self._csv_reader = csv.reader(
six.StringIO(self.source.strip()),
delimiter=self.delimiter,
quotechar=self.quotechar,
strict=True,
skipinitialspace=True,
)
formatter = CsvTableFormatter(self._to_data_matrix())
formatter.accept(self)
return formatter.to_table_data() | python | def load(self):
"""
Extract tabular data as |TableData| instances from a CSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader`
"""
self._validate()
self._logger.logging_load()
self._csv_reader = csv.reader(
six.StringIO(self.source.strip()),
delimiter=self.delimiter,
quotechar=self.quotechar,
strict=True,
skipinitialspace=True,
)
formatter = CsvTableFormatter(self._to_data_matrix())
formatter.accept(self)
return formatter.to_table_data() | ['def', 'load', '(', 'self', ')', ':', 'self', '.', '_validate', '(', ')', 'self', '.', '_logger', '.', 'logging_load', '(', ')', 'self', '.', '_csv_reader', '=', 'csv', '.', 'reader', '(', 'six', '.', 'StringIO', '(', 'self', '.', 'source', '.', 'strip', '(', ')', ')', ',', 'delimiter', '=', 'self', '.', 'delimiter', ',', 'quotechar', '=', 'self', '.', 'quotechar', ',', 'strict', '=', 'True', ',', 'skipinitialspace', '=', 'True', ',', ')', 'formatter', '=', 'CsvTableFormatter', '(', 'self', '.', '_to_data_matrix', '(', ')', ')', 'formatter', '.', 'accept', '(', 'self', ')', 'return', 'formatter', '.', 'to_table_data', '(', ')'] | Extract tabular data as |TableData| instances from a CSV text object.
|load_source_desc_text|
:return:
Loaded table data.
|load_table_name_desc|
=================== ========================================
Format specifier Value after the replacement
=================== ========================================
``%(filename)s`` ``""``
``%(format_name)s`` ``"csv"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ========================================
:rtype: |TableData| iterator
:raises pytablereader.DataError:
If the CSV data is invalid.
.. seealso::
:py:func:`csv.reader` | ['Extract', 'tabular', 'data', 'as', '|TableData|', 'instances', 'from', 'a', 'CSV', 'text', 'object', '.', '|load_source_desc_text|'] | train | https://github.com/thombashi/pytablereader/blob/bc3c057a2cc775bcce690e0e9019c2907b638101/pytablereader/csv/core.py#L220-L258 |
1,997 | sixty-north/asq | asq/queryables.py | Queryable.of_type | def of_type(self, classinfo):
'''Filters elements according to whether they are of a certain type.
Note: This method uses deferred execution.
Args:
classinfo: If classinfo is neither a class object nor a type object
it may be a tuple of class or type objects, or may recursively
contain other such tuples (other sequence types are not
accepted).
Returns:
A Queryable over those elements of the source sequence for which
the predicate is True.
Raises:
ValueError: If the Queryable is closed.
TypeError: If classinfo is not a class, type, or tuple of classes,
types, and such tuples.
'''
if self.closed():
raise ValueError("Attempt to call of_type() on a closed "
"Queryable.")
if not is_type(classinfo):
raise TypeError("of_type() parameter classinfo={0} is not a class "
"object or a type objector a tuple of class or "
"type objects.".format(classinfo))
return self.where(lambda x: isinstance(x, classinfo)) | python | def of_type(self, classinfo):
'''Filters elements according to whether they are of a certain type.
Note: This method uses deferred execution.
Args:
classinfo: If classinfo is neither a class object nor a type object
it may be a tuple of class or type objects, or may recursively
contain other such tuples (other sequence types are not
accepted).
Returns:
A Queryable over those elements of the source sequence for which
the predicate is True.
Raises:
ValueError: If the Queryable is closed.
TypeError: If classinfo is not a class, type, or tuple of classes,
types, and such tuples.
'''
if self.closed():
raise ValueError("Attempt to call of_type() on a closed "
"Queryable.")
if not is_type(classinfo):
raise TypeError("of_type() parameter classinfo={0} is not a class "
"object or a type objector a tuple of class or "
"type objects.".format(classinfo))
return self.where(lambda x: isinstance(x, classinfo)) | ['def', 'of_type', '(', 'self', ',', 'classinfo', ')', ':', 'if', 'self', '.', 'closed', '(', ')', ':', 'raise', 'ValueError', '(', '"Attempt to call of_type() on a closed "', '"Queryable."', ')', 'if', 'not', 'is_type', '(', 'classinfo', ')', ':', 'raise', 'TypeError', '(', '"of_type() parameter classinfo={0} is not a class "', '"object or a type objector a tuple of class or "', '"type objects."', '.', 'format', '(', 'classinfo', ')', ')', 'return', 'self', '.', 'where', '(', 'lambda', 'x', ':', 'isinstance', '(', 'x', ',', 'classinfo', ')', ')'] | Filters elements according to whether they are of a certain type.
Note: This method uses deferred execution.
Args:
classinfo: If classinfo is neither a class object nor a type object
it may be a tuple of class or type objects, or may recursively
contain other such tuples (other sequence types are not
accepted).
Returns:
A Queryable over those elements of the source sequence for which
the predicate is True.
Raises:
ValueError: If the Queryable is closed.
TypeError: If classinfo is not a class, type, or tuple of classes,
types, and such tuples. | ['Filters', 'elements', 'according', 'to', 'whether', 'they', 'are', 'of', 'a', 'certain', 'type', '.'] | train | https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/queryables.py#L578-L607 |
1,998 | rosenbrockc/acorn | acorn/ipython.py | InteractiveDecorator.pre_run_cell | def pre_run_cell(self, cellno, code):
"""Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed.
"""
#First, we look for loops and list/dict comprehensions in the code. Find
#the id of the latest cell that was executed.
self.cellid = cellno
#If there is a loop somewhere in the code, it could generate millions of
#database entries and make the notebook unusable.
import ast
if findloop(ast.parse(code)):
#Disable the acorn logging systems so that we don't pollute the
#database.
from acorn.logging.decoration import set_streamlining
set_streamlining(True)
#Create the pre-execute entry for the database.
from time import time
self.pre = {
"m": "loop",
"a": None,
"s": time(),
"r": None,
"c": code,
} | python | def pre_run_cell(self, cellno, code):
"""Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed.
"""
#First, we look for loops and list/dict comprehensions in the code. Find
#the id of the latest cell that was executed.
self.cellid = cellno
#If there is a loop somewhere in the code, it could generate millions of
#database entries and make the notebook unusable.
import ast
if findloop(ast.parse(code)):
#Disable the acorn logging systems so that we don't pollute the
#database.
from acorn.logging.decoration import set_streamlining
set_streamlining(True)
#Create the pre-execute entry for the database.
from time import time
self.pre = {
"m": "loop",
"a": None,
"s": time(),
"r": None,
"c": code,
} | ['def', 'pre_run_cell', '(', 'self', ',', 'cellno', ',', 'code', ')', ':', '#First, we look for loops and list/dict comprehensions in the code. Find', '#the id of the latest cell that was executed.', 'self', '.', 'cellid', '=', 'cellno', '#If there is a loop somewhere in the code, it could generate millions of', '#database entries and make the notebook unusable.', 'import', 'ast', 'if', 'findloop', '(', 'ast', '.', 'parse', '(', 'code', ')', ')', ':', "#Disable the acorn logging systems so that we don't pollute the", '#database.', 'from', 'acorn', '.', 'logging', '.', 'decoration', 'import', 'set_streamlining', 'set_streamlining', '(', 'True', ')', '#Create the pre-execute entry for the database.', 'from', 'time', 'import', 'time', 'self', '.', 'pre', '=', '{', '"m"', ':', '"loop"', ',', '"a"', ':', 'None', ',', '"s"', ':', 'time', '(', ')', ',', '"r"', ':', 'None', ',', '"c"', ':', 'code', ',', '}'] | Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed. | ['Executes', 'before', 'the', 'user', '-', 'entered', 'code', 'in', 'ipython', 'is', 'run', '.', 'This', 'intercepts', 'loops', 'and', 'other', 'problematic', 'code', 'that', 'would', 'produce', 'lots', 'of', 'database', 'entries', 'and', 'streamlines', 'it', 'to', 'produce', 'only', 'a', 'single', 'entry', '.'] | train | https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L421-L451 |
1,999 | PmagPy/PmagPy | pmagpy/pmag.py | measurements_methods | def measurements_methods(meas_data, noave):
"""
get list of unique specs
"""
#
version_num = get_version()
sids = get_specs(meas_data)
# list of measurement records for this specimen
#
# step through spec by spec
#
SpecTmps, SpecOuts = [], []
for spec in sids:
TRM, IRM3D, ATRM, CR = 0, 0, 0, 0
expcodes = ""
# first collect all data for this specimen and do lab treatments
# list of measurement records for this specimen
SpecRecs = get_dictitem(meas_data, 'er_specimen_name', spec, 'T')
for rec in SpecRecs:
if 'measurement_flag' not in list(rec.keys()):
rec['measurement_flag'] = 'g'
tmpmeths = rec['magic_method_codes'].split(":")
meths = []
if "LP-TRM" in tmpmeths:
TRM = 1 # catch these suckers here!
if "LP-IRM-3D" in tmpmeths:
IRM3D = 1 # catch these suckers here!
elif "LP-AN-TRM" in tmpmeths:
ATRM = 1 # catch these suckers here!
elif "LP-CR-TRM" in tmpmeths:
CR = 1 # catch these suckers here!
#
# otherwise write over existing method codes
#
# find NRM data (LT-NO)
#
elif float(rec["measurement_temp"]) >= 273. and float(rec["measurement_temp"]) < 323.:
# between 0 and 50C is room T measurement
if ("measurement_dc_field" not in list(rec.keys()) or float(rec["measurement_dc_field"]) == 0 or rec["measurement_dc_field"] == "") and ("measurement_ac_field" not in list(rec.keys()) or float(rec["measurement_ac_field"]) == 0 or rec["measurement_ac_field"] == ""):
# measurement done in zero field!
if "treatment_temp" not in list(rec.keys()) or rec["treatment_temp"].strip() == "" or (float(rec["treatment_temp"]) >= 273. and float(rec["treatment_temp"]) < 298.):
# between 0 and 50C is room T treatment
if "treatment_ac_field" not in list(rec.keys()) or rec["treatment_ac_field"] == "" or float(rec["treatment_ac_field"]) == 0:
# no AF
# no IRM!
if "treatment_dc_field" not in list(rec.keys()) or rec["treatment_dc_field"] == "" or float(rec["treatment_dc_field"]) == 0:
if "LT-NO" not in meths:
meths.append("LT-NO")
elif "LT-IRM" not in meths:
meths.append("LT-IRM") # it's an IRM
#
# find AF/infield/zerofield
#
# no ARM
elif "treatment_dc_field" not in list(rec.keys()) or rec["treatment_dc_field"] == "" or float(rec["treatment_dc_field"]) == 0:
if "LT-AF-Z" not in meths:
meths.append("LT-AF-Z")
else: # yes ARM
if "LT-AF-I" not in meths:
meths.append("LT-AF-I")
#
# find Thermal/infield/zerofield
#
elif float(rec["treatment_temp"]) >= 323: # treatment done at high T
if TRM == 1:
if "LT-T-I" not in meths:
# TRM - even if zero applied field!
meths.append("LT-T-I")
# no TRM
elif "treatment_dc_field" not in list(rec.keys()) or rec["treatment_dc_field"] == "" or float(rec["treatment_dc_field"]) == 0.:
if "LT-T-Z" not in meths:
# don't overwrite if part of a TRM experiment!
meths.append("LT-T-Z")
else: # yes TRM
if "LT-T-I" not in meths:
meths.append("LT-T-I")
#
# find low-T infield,zero field
#
else: # treatment done at low T
# no field
if "treatment_dc_field" not in list(rec.keys()) or rec["treatment_dc_field"] == "" or float(rec["treatment_dc_field"]) == 0:
if "LT-LT-Z" not in meths:
meths.append("LT-LT-Z")
else: # yes field
if "LT-LT-I" not in meths:
meths.append("LT-LT-I")
if "measurement_chi_volume" in list(rec.keys()) or "measurement_chi_mass" in list(rec.keys()):
if "LP-X" not in meths:
meths.append("LP-X")
# measurement in presence of dc field and not susceptibility;
# hysteresis!
elif "measurement_lab_dc_field" in list(rec.keys()) and rec["measurement_lab_dc_field"] != 0:
if "LP-HYS" not in meths:
hysq = input("Is this a hysteresis experiment? [1]/0")
if hysq == "" or hysq == "1":
meths.append("LP-HYS")
else:
metha = input(
"Enter the lab protocol code that best describes this experiment ")
meths.append(metha)
methcode = ""
for meth in meths:
methcode = methcode + meth.strip() + ":"
rec["magic_method_codes"] = methcode[:-1] # assign them back
#
# done with first pass, collect and assign provisional method codes
if "measurement_description" not in list(rec.keys()):
rec["measurement_description"] = ""
rec["er_citation_names"] = "This study"
SpecTmps.append(rec)
# ready for second pass through, step through specimens, check whether ptrm, ptrm tail checks, or AARM, etc.
#
for spec in sids:
MD, pTRM, IZ, ZI = 0, 0, 0, 0 # these are flags for the lab protocol codes
expcodes = ""
NewSpecs, SpecMeths = [], []
experiment_name, measnum = "", 1
if IRM3D == 1:
experiment_name = "LP-IRM-3D"
if ATRM == 1:
experiment_name = "LP-AN-TRM"
if CR == 1:
experiment_name = "LP-CR"
NewSpecs = get_dictitem(SpecTmps, 'er_specimen_name', spec, 'T')
#
# first look for replicate measurements
#
Ninit = len(NewSpecs)
if noave != 1:
# averages replicate measurements, returns treatment keys that are
# being used
vdata, treatkeys = vspec_magic(NewSpecs)
if len(vdata) != len(NewSpecs):
# print spec,'started with ',Ninit,' ending with ',len(vdata)
NewSpecs = vdata
# print "Averaged replicate measurements"
#
# now look through this specimen's records - try to figure out what experiment it is
#
if len(NewSpecs) > 1: # more than one meas for this spec - part of an unknown experiment
SpecMeths = get_list(NewSpecs, 'magic_method_codes').split(":")
# TRM steps, could be TRM acquisition, Shaw or a Thellier
# experiment or TDS experiment
if "LT-T-I" in SpecMeths and experiment_name == "":
#
# collect all the infield steps and look for changes in dc field vector
#
Steps, TI = [], 1
for rec in NewSpecs:
methods = get_list(
NewSpecs, 'magic_method_codes').split(":")
if "LT-T-I" in methods:
Steps.append(rec) # get all infield steps together
rec_bak = Steps[0]
if "treatment_dc_field_phi" in list(rec_bak.keys()) and "treatment_dc_field_theta" in list(rec_bak.keys()):
# at least there is field orientation info
if rec_bak["treatment_dc_field_phi"] != "" and rec_bak["treatment_dc_field_theta"] != "":
phi0, theta0 = rec_bak["treatment_dc_field_phi"], rec_bak["treatment_dc_field_theta"]
for k in range(1, len(Steps)):
rec = Steps[k]
phi, theta = rec["treatment_dc_field_phi"], rec["treatment_dc_field_theta"]
if phi != phi0 or theta != theta0:
ANIS = 1 # if direction changes, is some sort of anisotropy experiment
if "LT-AF-I" in SpecMeths and "LT-AF-Z" in SpecMeths: # must be Shaw :(
experiment_name = "LP-PI-TRM:LP-PI-ALT-AFARM"
elif TRM == 1:
experiment_name = "LP-TRM"
else:
TI = 0 # no infield steps at all
if "LT-T-Z" in SpecMeths and experiment_name == "": # thermal demag steps
if TI == 0:
experiment_name = "LP-DIR-T" # just ordinary thermal demag
elif TRM != 1: # heart pounding - could be some kind of TRM normalized paleointensity or LP-TRM-TD experiment
Temps = []
for step in Steps: # check through the infield steps - if all at same temperature, then must be a demag of a total TRM with checks
if step['treatment_temp'] not in Temps:
Temps.append(step['treatment_temp'])
if len(Temps) > 1:
experiment_name = "LP-PI-TRM" # paleointensity normalized by TRM
else:
# thermal demag of a lab TRM (could be part of a
# LP-PI-TDS experiment)
experiment_name = "LP-TRM-TD"
TZ = 1
else:
TZ = 0 # no zero field steps at all
if "LT-AF-I" in SpecMeths: # ARM steps
Steps = []
for rec in NewSpecs:
tmp = rec["magic_method_codes"].split(":")
methods = []
for meth in tmp:
methods.append(meth.strip())
if "LT-AF-I" in methods:
Steps.append(rec) # get all infield steps together
rec_bak = Steps[0]
if "treatment_dc_field_phi" in list(rec_bak.keys()) and "treatment_dc_field_theta" in list(rec_bak.keys()):
# at least there is field orientation info
if rec_bak["treatment_dc_field_phi"] != "" and rec_bak["treatment_dc_field_theta"] != "":
phi0, theta0 = rec_bak["treatment_dc_field_phi"], rec_bak["treatment_dc_field_theta"]
ANIS = 0
for k in range(1, len(Steps)):
rec = Steps[k]
phi, theta = rec["treatment_dc_field_phi"], rec["treatment_dc_field_theta"]
if phi != phi0 or theta != theta0:
ANIS = 1 # if direction changes, is some sort of anisotropy experiment
if ANIS == 1:
experiment_name = "LP-AN-ARM"
if experiment_name == "": # not anisotropy of ARM - acquisition?
field0 = rec_bak["treatment_dc_field"]
ARM = 0
for k in range(1, len(Steps)):
rec = Steps[k]
field = rec["treatment_dc_field"]
if field != field0:
ARM = 1
if ARM == 1:
experiment_name = "LP-ARM"
AFI = 1
else:
AFI = 0 # no ARM steps at all
if "LT-AF-Z" in SpecMeths and experiment_name == "": # AF demag steps
if AFI == 0:
experiment_name = "LP-DIR-AF" # just ordinary AF demag
else: # heart pounding - a pseudothellier?
experiment_name = "LP-PI-ARM"
AFZ = 1
else:
AFZ = 0 # no AF demag at all
if "LT-IRM" in SpecMeths: # IRM
Steps = []
for rec in NewSpecs:
tmp = rec["magic_method_codes"].split(":")
methods = []
for meth in tmp:
methods.append(meth.strip())
if "LT-IRM" in methods:
Steps.append(rec) # get all infield steps together
rec_bak = Steps[0]
if "treatment_dc_field_phi" in list(rec_bak.keys()) and "treatment_dc_field_theta" in list(rec_bak.keys()):
# at least there is field orientation info
if rec_bak["treatment_dc_field_phi"] != "" and rec_bak["treatment_dc_field_theta"] != "":
phi0, theta0 = rec_bak["treatment_dc_field_phi"], rec_bak["treatment_dc_field_theta"]
ANIS = 0
for k in range(1, len(Steps)):
rec = Steps[k]
phi, theta = rec["treatment_dc_field_phi"], rec["treatment_dc_field_theta"]
if phi != phi0 or theta != theta0:
ANIS = 1 # if direction changes, is some sort of anisotropy experiment
if ANIS == 1:
experiment_name = "LP-AN-IRM"
if experiment_name == "": # not anisotropy of IRM - acquisition?
field0 = rec_bak["treatment_dc_field"]
IRM = 0
for k in range(1, len(Steps)):
rec = Steps[k]
field = rec["treatment_dc_field"]
if field != field0:
IRM = 1
if IRM == 1:
experiment_name = "LP-IRM"
IRM = 1
else:
IRM = 0 # no IRM at all
if "LP-X" in SpecMeths: # susceptibility run
Steps = get_dictitem(
NewSpecs, 'magic_method_codes', 'LT-X', 'has')
if len(Steps) > 0:
rec_bak = Steps[0]
if "treatment_dc_field_phi" in list(rec_bak.keys()) and "treatment_dc_field_theta" in list(rec_bak.keys()):
# at least there is field orientation info
if rec_bak["treatment_dc_field_phi"] != "" and rec_bak["treatment_dc_field_theta"] != "":
phi0, theta0 = rec_bak["treatment_dc_field_phi"], rec_bak["treatment_dc_field_theta"]
ANIS = 0
for k in range(1, len(Steps)):
rec = Steps[k]
phi, theta = rec["treatment_dc_field_phi"], rec["treatment_dc_field_theta"]
if phi != phi0 or theta != theta0:
ANIS = 1 # if direction changes, is some sort of anisotropy experiment
if ANIS == 1:
experiment_name = "LP-AN-MS"
else:
CHI = 0 # no susceptibility at all
#
# now need to deal with special thellier experiment problems - first clear up pTRM checks and tail checks
#
if experiment_name == "LP-PI-TRM": # is some sort of thellier experiment
rec_bak = NewSpecs[0]
tmp = rec_bak["magic_method_codes"].split(":")
methbak = []
for meth in tmp:
methbak.append(meth.strip()) # previous steps method codes
for k in range(1, len(NewSpecs)):
rec = NewSpecs[k]
tmp = rec["magic_method_codes"].split(":")
meths = []
for meth in tmp:
# get this guys method codes
meths.append(meth.strip())
#
# check if this is a pTRM check
#
if float(rec["treatment_temp"]) < float(rec_bak["treatment_temp"]): # went backward
if "LT-T-I" in meths and "LT-T-Z" in methbak: # must be a pTRM check after first z
#
# replace LT-T-I method code with LT-PTRM-I
#
methcodes = ""
for meth in meths:
if meth != "LT-T-I":
methcode = methcode + meth.strip() + ":"
methcodes = methcodes + "LT-PTRM-I"
meths = methcodes.split(":")
pTRM = 1
elif "LT-T-Z" in meths and "LT-T-I" in methbak: # must be pTRM check after first I
#
# replace LT-T-Z method code with LT-PTRM-Z
#
methcodes = ""
for meth in meths:
if meth != "LT-T-Z":
methcode = methcode + meth + ":"
methcodes = methcodes + "LT-PTRM-Z"
meths = methcodes.split(":")
pTRM = 1
methcodes = ""
for meth in meths:
methcodes = methcodes + meth.strip() + ":"
# attach new method code
rec["magic_method_codes"] = methcodes[:-1]
rec_bak = rec # next previous record
tmp = rec_bak["magic_method_codes"].split(":")
methbak = []
for meth in tmp:
# previous steps method codes
methbak.append(meth.strip())
#
# done with assigning pTRM checks. data should be "fixed" in NewSpecs
#
# now let's find out which steps are infield zerofield (IZ) and which are zerofield infield (ZI)
#
rec_bak = NewSpecs[0]
tmp = rec_bak["magic_method_codes"].split(":")
methbak = []
for meth in tmp:
methbak.append(meth.strip()) # previous steps method codes
if "LT-NO" not in methbak: # first measurement is not NRM
if "LT-T-I" in methbak:
IZorZI = "LP-PI-TRM-IZ" # first pair is IZ
if "LT-T-Z" in methbak:
IZorZI = "LP-PI-TRM-ZI" # first pair is ZI
if IZorZI not in methbak:
methbak.append(IZorZI)
methcode = ""
for meth in methbak:
methcode = methcode + meth + ":"
# fix first heating step when no NRM
NewSpecs[0]["magic_method_codes"] = methcode[:-1]
else:
IZorZI = "" # first measurement is NRM and not one of a pair
for k in range(1, len(NewSpecs)): # hunt through measurements again
rec = NewSpecs[k]
tmp = rec["magic_method_codes"].split(":")
meths = []
for meth in tmp:
# get this guys method codes
meths.append(meth.strip())
#
# check if this start a new temperature step of a infield/zerofield pair
#
if float(rec["treatment_temp"]) > float(rec_bak["treatment_temp"]) and "LT-PTRM-I" not in methbak: # new pair?
if "LT-T-I" in meths: # infield of this pair
IZorZI = "LP-PI-TRM-IZ"
IZ = 1 # at least one IZ pair
elif "LT-T-Z" in meths: # zerofield
IZorZI = "LP-PI-TRM-ZI"
ZI = 1 # at least one ZI pair
# new pair after out of sequence PTRM check?
elif float(rec["treatment_temp"]) > float(rec_bak["treatment_temp"]) and "LT-PTRM-I" in methbak and IZorZI != "LP-PI-TRM-ZI":
if "LT-T-I" in meths: # infield of this pair
IZorZI = "LP-PI-TRM-IZ"
IZ = 1 # at least one IZ pair
elif "LT-T-Z" in meths: # zerofield
IZorZI = "LP-PI-TRM-ZI"
ZI = 1 # at least one ZI pair
# stayed same temp
if float(rec["treatment_temp"]) == float(rec_bak["treatment_temp"]):
if "LT-T-Z" in meths and "LT-T-I" in methbak and IZorZI == "LP-PI-TRM-ZI": # must be a tail check
#
# replace LT-T-Z method code with LT-PTRM-MD
#
methcodes = ""
for meth in meths:
if meth != "LT-T-Z":
methcode = methcode + meth + ":"
methcodes = methcodes + "LT-PTRM-MD"
meths = methcodes.split(":")
MD = 1
# fix method codes
if "LT-PTRM-I" not in meths and "LT-PTRM-MD" not in meths and IZorZI not in meths:
meths.append(IZorZI)
newmeths = []
for meth in meths:
if meth not in newmeths:
newmeths.append(meth) # try to get uniq set
methcode = ""
for meth in newmeths:
methcode = methcode + meth + ":"
rec["magic_method_codes"] = methcode[:-1]
rec_bak = rec # moving on to next record, making current one the backup
# get last specimen's method codes in a list
methbak = rec_bak["magic_method_codes"].split(":")
#
# done with this specimen's records, now check if any pTRM checks or MD checks
#
if pTRM == 1:
experiment_name = experiment_name + ":LP-PI-ALT-PTRM"
if MD == 1:
experiment_name = experiment_name + ":LP-PI-BT-MD"
if IZ == 1 and ZI == 1:
experiment_name = experiment_name + ":LP-PI-BT-IZZI"
if IZ == 1 and ZI == 0:
experiment_name = experiment_name + ":LP-PI-IZ" # Aitken method
if IZ == 0 and ZI == 1:
experiment_name = experiment_name + ":LP-PI-ZI" # Coe method
IZ, ZI, pTRM, MD = 0, 0, 0, 0 # reset these for next specimen
for rec in NewSpecs: # fix the experiment name for all recs for this specimen and save in SpecOuts
# assign an experiment name to all specimen measurements
# from this specimen
if experiment_name != "":
rec["magic_method_codes"] = rec["magic_method_codes"] + \
":" + experiment_name
rec["magic_experiment_name"] = spec + ":" + experiment_name
rec['measurement_number'] = '%i' % (
measnum) # assign measurement numbers
measnum += 1
#rec['sequence'] = '%i'%(seqnum)
#seqnum += 1
SpecOuts.append(rec)
elif experiment_name == "LP-PI-TRM:LP-PI-ALT-AFARM": # is a Shaw experiment!
ARM, TRM = 0, 0
for rec in NewSpecs: # fix the experiment name for all recs for this specimen and save in SpecOuts
# assign an experiment name to all specimen measurements from this specimen
# make the second ARM in Shaw experiments LT-AF-I-2, stick
# in the AF of ARM and TRM codes
meths = rec["magic_method_codes"].split(":")
if ARM == 1:
if "LT-AF-I" in meths:
del meths[meths.index("LT-AF-I")]
meths.append("LT-AF-I-2")
ARM = 2
if "LT-AF-Z" in meths and TRM == 0:
meths.append("LP-ARM-AFD")
if TRM == 1 and ARM == 1:
if "LT-AF-Z" in meths:
meths.append("LP-TRM-AFD")
if ARM == 2:
if "LT-AF-Z" in meths:
meths.append("LP-ARM2-AFD")
newcode = ""
for meth in meths:
newcode = newcode + meth + ":"
rec["magic_method_codes"] = newcode[:-1]
if "LT-AF-I" in meths:
ARM = 1
if "LT-T-I" in meths:
TRM = 1
rec["magic_method_codes"] = rec["magic_method_codes"] + \
":" + experiment_name
rec["magic_experiment_name"] = spec + ":" + experiment_name
rec['measurement_number'] = '%i' % (
measnum) # assign measurement numbers
#rec['sequence'] = '%i'%(seqnum)
#seqnum += 1
measnum += 1
SpecOuts.append(rec)
else: # not a Thellier-Thellier or a Shaw experiemnt
for rec in NewSpecs:
if experiment_name == "":
rec["magic_method_codes"] = "LT-NO"
rec["magic_experiment_name"] = spec + ":LT-NO"
rec['measurement_number'] = '%i' % (
measnum) # assign measurement numbers
#rec['sequence'] = '%i'%(seqnum)
#seqnum += 1
measnum += 1
else:
if experiment_name not in rec['magic_method_codes']:
rec["magic_method_codes"] = rec["magic_method_codes"] + \
":" + experiment_name
rec["magic_method_codes"] = rec["magic_method_codes"].strip(
':')
rec['measurement_number'] = '%i' % (
measnum) # assign measurement numbers
#rec['sequence'] = '%i'%(seqnum)
#seqnum += 1
measnum += 1
rec["magic_experiment_name"] = spec + \
":" + experiment_name
rec["magic_software_packages"] = version_num
SpecOuts.append(rec)
else:
NewSpecs[0]["magic_experiment_name"] = spec + ":" + \
NewSpecs[0]['magic_method_codes'].split(':')[0]
NewSpecs[0]["magic_software_packages"] = version_num
# just copy over the single record as is
SpecOuts.append(NewSpecs[0])
return SpecOuts | python | def measurements_methods(meas_data, noave):
"""
get list of unique specs
"""
#
version_num = get_version()
sids = get_specs(meas_data)
# list of measurement records for this specimen
#
# step through spec by spec
#
SpecTmps, SpecOuts = [], []
for spec in sids:
TRM, IRM3D, ATRM, CR = 0, 0, 0, 0
expcodes = ""
# first collect all data for this specimen and do lab treatments
# list of measurement records for this specimen
SpecRecs = get_dictitem(meas_data, 'er_specimen_name', spec, 'T')
for rec in SpecRecs:
if 'measurement_flag' not in list(rec.keys()):
rec['measurement_flag'] = 'g'
tmpmeths = rec['magic_method_codes'].split(":")
meths = []
if "LP-TRM" in tmpmeths:
TRM = 1 # catch these suckers here!
if "LP-IRM-3D" in tmpmeths:
IRM3D = 1 # catch these suckers here!
elif "LP-AN-TRM" in tmpmeths:
ATRM = 1 # catch these suckers here!
elif "LP-CR-TRM" in tmpmeths:
CR = 1 # catch these suckers here!
#
# otherwise write over existing method codes
#
# find NRM data (LT-NO)
#
elif float(rec["measurement_temp"]) >= 273. and float(rec["measurement_temp"]) < 323.:
# between 0 and 50C is room T measurement
if ("measurement_dc_field" not in list(rec.keys()) or float(rec["measurement_dc_field"]) == 0 or rec["measurement_dc_field"] == "") and ("measurement_ac_field" not in list(rec.keys()) or float(rec["measurement_ac_field"]) == 0 or rec["measurement_ac_field"] == ""):
# measurement done in zero field!
if "treatment_temp" not in list(rec.keys()) or rec["treatment_temp"].strip() == "" or (float(rec["treatment_temp"]) >= 273. and float(rec["treatment_temp"]) < 298.):
# between 0 and 50C is room T treatment
if "treatment_ac_field" not in list(rec.keys()) or rec["treatment_ac_field"] == "" or float(rec["treatment_ac_field"]) == 0:
# no AF
# no IRM!
if "treatment_dc_field" not in list(rec.keys()) or rec["treatment_dc_field"] == "" or float(rec["treatment_dc_field"]) == 0:
if "LT-NO" not in meths:
meths.append("LT-NO")
elif "LT-IRM" not in meths:
meths.append("LT-IRM") # it's an IRM
#
# find AF/infield/zerofield
#
# no ARM
elif "treatment_dc_field" not in list(rec.keys()) or rec["treatment_dc_field"] == "" or float(rec["treatment_dc_field"]) == 0:
if "LT-AF-Z" not in meths:
meths.append("LT-AF-Z")
else: # yes ARM
if "LT-AF-I" not in meths:
meths.append("LT-AF-I")
#
# find Thermal/infield/zerofield
#
elif float(rec["treatment_temp"]) >= 323: # treatment done at high T
if TRM == 1:
if "LT-T-I" not in meths:
# TRM - even if zero applied field!
meths.append("LT-T-I")
# no TRM
elif "treatment_dc_field" not in list(rec.keys()) or rec["treatment_dc_field"] == "" or float(rec["treatment_dc_field"]) == 0.:
if "LT-T-Z" not in meths:
# don't overwrite if part of a TRM experiment!
meths.append("LT-T-Z")
else: # yes TRM
if "LT-T-I" not in meths:
meths.append("LT-T-I")
#
# find low-T infield,zero field
#
else: # treatment done at low T
# no field
if "treatment_dc_field" not in list(rec.keys()) or rec["treatment_dc_field"] == "" or float(rec["treatment_dc_field"]) == 0:
if "LT-LT-Z" not in meths:
meths.append("LT-LT-Z")
else: # yes field
if "LT-LT-I" not in meths:
meths.append("LT-LT-I")
if "measurement_chi_volume" in list(rec.keys()) or "measurement_chi_mass" in list(rec.keys()):
if "LP-X" not in meths:
meths.append("LP-X")
# measurement in presence of dc field and not susceptibility;
# hysteresis!
elif "measurement_lab_dc_field" in list(rec.keys()) and rec["measurement_lab_dc_field"] != 0:
if "LP-HYS" not in meths:
hysq = input("Is this a hysteresis experiment? [1]/0")
if hysq == "" or hysq == "1":
meths.append("LP-HYS")
else:
metha = input(
"Enter the lab protocol code that best describes this experiment ")
meths.append(metha)
methcode = ""
for meth in meths:
methcode = methcode + meth.strip() + ":"
rec["magic_method_codes"] = methcode[:-1] # assign them back
#
# done with first pass, collect and assign provisional method codes
if "measurement_description" not in list(rec.keys()):
rec["measurement_description"] = ""
rec["er_citation_names"] = "This study"
SpecTmps.append(rec)
# ready for second pass through, step through specimens, check whether ptrm, ptrm tail checks, or AARM, etc.
#
for spec in sids:
MD, pTRM, IZ, ZI = 0, 0, 0, 0 # these are flags for the lab protocol codes
expcodes = ""
NewSpecs, SpecMeths = [], []
experiment_name, measnum = "", 1
if IRM3D == 1:
experiment_name = "LP-IRM-3D"
if ATRM == 1:
experiment_name = "LP-AN-TRM"
if CR == 1:
experiment_name = "LP-CR"
NewSpecs = get_dictitem(SpecTmps, 'er_specimen_name', spec, 'T')
#
# first look for replicate measurements
#
Ninit = len(NewSpecs)
if noave != 1:
# averages replicate measurements, returns treatment keys that are
# being used
vdata, treatkeys = vspec_magic(NewSpecs)
if len(vdata) != len(NewSpecs):
# print spec,'started with ',Ninit,' ending with ',len(vdata)
NewSpecs = vdata
# print "Averaged replicate measurements"
#
# now look through this specimen's records - try to figure out what experiment it is
#
if len(NewSpecs) > 1: # more than one meas for this spec - part of an unknown experiment
SpecMeths = get_list(NewSpecs, 'magic_method_codes').split(":")
# TRM steps, could be TRM acquisition, Shaw or a Thellier
# experiment or TDS experiment
if "LT-T-I" in SpecMeths and experiment_name == "":
#
# collect all the infield steps and look for changes in dc field vector
#
Steps, TI = [], 1
for rec in NewSpecs:
methods = get_list(
NewSpecs, 'magic_method_codes').split(":")
if "LT-T-I" in methods:
Steps.append(rec) # get all infield steps together
rec_bak = Steps[0]
if "treatment_dc_field_phi" in list(rec_bak.keys()) and "treatment_dc_field_theta" in list(rec_bak.keys()):
# at least there is field orientation info
if rec_bak["treatment_dc_field_phi"] != "" and rec_bak["treatment_dc_field_theta"] != "":
phi0, theta0 = rec_bak["treatment_dc_field_phi"], rec_bak["treatment_dc_field_theta"]
for k in range(1, len(Steps)):
rec = Steps[k]
phi, theta = rec["treatment_dc_field_phi"], rec["treatment_dc_field_theta"]
if phi != phi0 or theta != theta0:
ANIS = 1 # if direction changes, is some sort of anisotropy experiment
if "LT-AF-I" in SpecMeths and "LT-AF-Z" in SpecMeths: # must be Shaw :(
experiment_name = "LP-PI-TRM:LP-PI-ALT-AFARM"
elif TRM == 1:
experiment_name = "LP-TRM"
else:
TI = 0 # no infield steps at all
if "LT-T-Z" in SpecMeths and experiment_name == "": # thermal demag steps
if TI == 0:
experiment_name = "LP-DIR-T" # just ordinary thermal demag
elif TRM != 1: # heart pounding - could be some kind of TRM normalized paleointensity or LP-TRM-TD experiment
Temps = []
for step in Steps: # check through the infield steps - if all at same temperature, then must be a demag of a total TRM with checks
if step['treatment_temp'] not in Temps:
Temps.append(step['treatment_temp'])
if len(Temps) > 1:
experiment_name = "LP-PI-TRM" # paleointensity normalized by TRM
else:
# thermal demag of a lab TRM (could be part of a
# LP-PI-TDS experiment)
experiment_name = "LP-TRM-TD"
TZ = 1
else:
TZ = 0 # no zero field steps at all
if "LT-AF-I" in SpecMeths: # ARM steps
Steps = []
for rec in NewSpecs:
tmp = rec["magic_method_codes"].split(":")
methods = []
for meth in tmp:
methods.append(meth.strip())
if "LT-AF-I" in methods:
Steps.append(rec) # get all infield steps together
rec_bak = Steps[0]
if "treatment_dc_field_phi" in list(rec_bak.keys()) and "treatment_dc_field_theta" in list(rec_bak.keys()):
# at least there is field orientation info
if rec_bak["treatment_dc_field_phi"] != "" and rec_bak["treatment_dc_field_theta"] != "":
phi0, theta0 = rec_bak["treatment_dc_field_phi"], rec_bak["treatment_dc_field_theta"]
ANIS = 0
for k in range(1, len(Steps)):
rec = Steps[k]
phi, theta = rec["treatment_dc_field_phi"], rec["treatment_dc_field_theta"]
if phi != phi0 or theta != theta0:
ANIS = 1 # if direction changes, is some sort of anisotropy experiment
if ANIS == 1:
experiment_name = "LP-AN-ARM"
if experiment_name == "": # not anisotropy of ARM - acquisition?
field0 = rec_bak["treatment_dc_field"]
ARM = 0
for k in range(1, len(Steps)):
rec = Steps[k]
field = rec["treatment_dc_field"]
if field != field0:
ARM = 1
if ARM == 1:
experiment_name = "LP-ARM"
AFI = 1
else:
AFI = 0 # no ARM steps at all
if "LT-AF-Z" in SpecMeths and experiment_name == "": # AF demag steps
if AFI == 0:
experiment_name = "LP-DIR-AF" # just ordinary AF demag
else: # heart pounding - a pseudothellier?
experiment_name = "LP-PI-ARM"
AFZ = 1
else:
AFZ = 0 # no AF demag at all
if "LT-IRM" in SpecMeths: # IRM
Steps = []
for rec in NewSpecs:
tmp = rec["magic_method_codes"].split(":")
methods = []
for meth in tmp:
methods.append(meth.strip())
if "LT-IRM" in methods:
Steps.append(rec) # get all infield steps together
rec_bak = Steps[0]
if "treatment_dc_field_phi" in list(rec_bak.keys()) and "treatment_dc_field_theta" in list(rec_bak.keys()):
# at least there is field orientation info
if rec_bak["treatment_dc_field_phi"] != "" and rec_bak["treatment_dc_field_theta"] != "":
phi0, theta0 = rec_bak["treatment_dc_field_phi"], rec_bak["treatment_dc_field_theta"]
ANIS = 0
for k in range(1, len(Steps)):
rec = Steps[k]
phi, theta = rec["treatment_dc_field_phi"], rec["treatment_dc_field_theta"]
if phi != phi0 or theta != theta0:
ANIS = 1 # if direction changes, is some sort of anisotropy experiment
if ANIS == 1:
experiment_name = "LP-AN-IRM"
if experiment_name == "": # not anisotropy of IRM - acquisition?
field0 = rec_bak["treatment_dc_field"]
IRM = 0
for k in range(1, len(Steps)):
rec = Steps[k]
field = rec["treatment_dc_field"]
if field != field0:
IRM = 1
if IRM == 1:
experiment_name = "LP-IRM"
IRM = 1
else:
IRM = 0 # no IRM at all
if "LP-X" in SpecMeths: # susceptibility run
Steps = get_dictitem(
NewSpecs, 'magic_method_codes', 'LT-X', 'has')
if len(Steps) > 0:
rec_bak = Steps[0]
if "treatment_dc_field_phi" in list(rec_bak.keys()) and "treatment_dc_field_theta" in list(rec_bak.keys()):
# at least there is field orientation info
if rec_bak["treatment_dc_field_phi"] != "" and rec_bak["treatment_dc_field_theta"] != "":
phi0, theta0 = rec_bak["treatment_dc_field_phi"], rec_bak["treatment_dc_field_theta"]
ANIS = 0
for k in range(1, len(Steps)):
rec = Steps[k]
phi, theta = rec["treatment_dc_field_phi"], rec["treatment_dc_field_theta"]
if phi != phi0 or theta != theta0:
ANIS = 1 # if direction changes, is some sort of anisotropy experiment
if ANIS == 1:
experiment_name = "LP-AN-MS"
else:
CHI = 0 # no susceptibility at all
#
# now need to deal with special thellier experiment problems - first clear up pTRM checks and tail checks
#
if experiment_name == "LP-PI-TRM": # is some sort of thellier experiment
rec_bak = NewSpecs[0]
tmp = rec_bak["magic_method_codes"].split(":")
methbak = []
for meth in tmp:
methbak.append(meth.strip()) # previous steps method codes
for k in range(1, len(NewSpecs)):
rec = NewSpecs[k]
tmp = rec["magic_method_codes"].split(":")
meths = []
for meth in tmp:
# get this guys method codes
meths.append(meth.strip())
#
# check if this is a pTRM check
#
if float(rec["treatment_temp"]) < float(rec_bak["treatment_temp"]): # went backward
if "LT-T-I" in meths and "LT-T-Z" in methbak: # must be a pTRM check after first z
#
# replace LT-T-I method code with LT-PTRM-I
#
methcodes = ""
for meth in meths:
if meth != "LT-T-I":
methcode = methcode + meth.strip() + ":"
methcodes = methcodes + "LT-PTRM-I"
meths = methcodes.split(":")
pTRM = 1
elif "LT-T-Z" in meths and "LT-T-I" in methbak: # must be pTRM check after first I
#
# replace LT-T-Z method code with LT-PTRM-Z
#
methcodes = ""
for meth in meths:
if meth != "LT-T-Z":
methcode = methcode + meth + ":"
methcodes = methcodes + "LT-PTRM-Z"
meths = methcodes.split(":")
pTRM = 1
methcodes = ""
for meth in meths:
methcodes = methcodes + meth.strip() + ":"
# attach new method code
rec["magic_method_codes"] = methcodes[:-1]
rec_bak = rec # next previous record
tmp = rec_bak["magic_method_codes"].split(":")
methbak = []
for meth in tmp:
# previous steps method codes
methbak.append(meth.strip())
#
# done with assigning pTRM checks. data should be "fixed" in NewSpecs
#
# now let's find out which steps are infield zerofield (IZ) and which are zerofield infield (ZI)
#
rec_bak = NewSpecs[0]
tmp = rec_bak["magic_method_codes"].split(":")
methbak = []
for meth in tmp:
methbak.append(meth.strip()) # previous steps method codes
if "LT-NO" not in methbak: # first measurement is not NRM
if "LT-T-I" in methbak:
IZorZI = "LP-PI-TRM-IZ" # first pair is IZ
if "LT-T-Z" in methbak:
IZorZI = "LP-PI-TRM-ZI" # first pair is ZI
if IZorZI not in methbak:
methbak.append(IZorZI)
methcode = ""
for meth in methbak:
methcode = methcode + meth + ":"
# fix first heating step when no NRM
NewSpecs[0]["magic_method_codes"] = methcode[:-1]
else:
IZorZI = "" # first measurement is NRM and not one of a pair
for k in range(1, len(NewSpecs)): # hunt through measurements again
rec = NewSpecs[k]
tmp = rec["magic_method_codes"].split(":")
meths = []
for meth in tmp:
# get this guys method codes
meths.append(meth.strip())
#
# check if this start a new temperature step of a infield/zerofield pair
#
if float(rec["treatment_temp"]) > float(rec_bak["treatment_temp"]) and "LT-PTRM-I" not in methbak: # new pair?
if "LT-T-I" in meths: # infield of this pair
IZorZI = "LP-PI-TRM-IZ"
IZ = 1 # at least one IZ pair
elif "LT-T-Z" in meths: # zerofield
IZorZI = "LP-PI-TRM-ZI"
ZI = 1 # at least one ZI pair
# new pair after out of sequence PTRM check?
elif float(rec["treatment_temp"]) > float(rec_bak["treatment_temp"]) and "LT-PTRM-I" in methbak and IZorZI != "LP-PI-TRM-ZI":
if "LT-T-I" in meths: # infield of this pair
IZorZI = "LP-PI-TRM-IZ"
IZ = 1 # at least one IZ pair
elif "LT-T-Z" in meths: # zerofield
IZorZI = "LP-PI-TRM-ZI"
ZI = 1 # at least one ZI pair
# stayed same temp
if float(rec["treatment_temp"]) == float(rec_bak["treatment_temp"]):
if "LT-T-Z" in meths and "LT-T-I" in methbak and IZorZI == "LP-PI-TRM-ZI": # must be a tail check
#
# replace LT-T-Z method code with LT-PTRM-MD
#
methcodes = ""
for meth in meths:
if meth != "LT-T-Z":
methcode = methcode + meth + ":"
methcodes = methcodes + "LT-PTRM-MD"
meths = methcodes.split(":")
MD = 1
# fix method codes
if "LT-PTRM-I" not in meths and "LT-PTRM-MD" not in meths and IZorZI not in meths:
meths.append(IZorZI)
newmeths = []
for meth in meths:
if meth not in newmeths:
newmeths.append(meth) # try to get uniq set
methcode = ""
for meth in newmeths:
methcode = methcode + meth + ":"
rec["magic_method_codes"] = methcode[:-1]
rec_bak = rec # moving on to next record, making current one the backup
# get last specimen's method codes in a list
methbak = rec_bak["magic_method_codes"].split(":")
#
# done with this specimen's records, now check if any pTRM checks or MD checks
#
if pTRM == 1:
experiment_name = experiment_name + ":LP-PI-ALT-PTRM"
if MD == 1:
experiment_name = experiment_name + ":LP-PI-BT-MD"
if IZ == 1 and ZI == 1:
experiment_name = experiment_name + ":LP-PI-BT-IZZI"
if IZ == 1 and ZI == 0:
experiment_name = experiment_name + ":LP-PI-IZ" # Aitken method
if IZ == 0 and ZI == 1:
experiment_name = experiment_name + ":LP-PI-ZI" # Coe method
IZ, ZI, pTRM, MD = 0, 0, 0, 0 # reset these for next specimen
for rec in NewSpecs: # fix the experiment name for all recs for this specimen and save in SpecOuts
# assign an experiment name to all specimen measurements
# from this specimen
if experiment_name != "":
rec["magic_method_codes"] = rec["magic_method_codes"] + \
":" + experiment_name
rec["magic_experiment_name"] = spec + ":" + experiment_name
rec['measurement_number'] = '%i' % (
measnum) # assign measurement numbers
measnum += 1
#rec['sequence'] = '%i'%(seqnum)
#seqnum += 1
SpecOuts.append(rec)
elif experiment_name == "LP-PI-TRM:LP-PI-ALT-AFARM": # is a Shaw experiment!
ARM, TRM = 0, 0
for rec in NewSpecs: # fix the experiment name for all recs for this specimen and save in SpecOuts
# assign an experiment name to all specimen measurements from this specimen
# make the second ARM in Shaw experiments LT-AF-I-2, stick
# in the AF of ARM and TRM codes
meths = rec["magic_method_codes"].split(":")
if ARM == 1:
if "LT-AF-I" in meths:
del meths[meths.index("LT-AF-I")]
meths.append("LT-AF-I-2")
ARM = 2
if "LT-AF-Z" in meths and TRM == 0:
meths.append("LP-ARM-AFD")
if TRM == 1 and ARM == 1:
if "LT-AF-Z" in meths:
meths.append("LP-TRM-AFD")
if ARM == 2:
if "LT-AF-Z" in meths:
meths.append("LP-ARM2-AFD")
newcode = ""
for meth in meths:
newcode = newcode + meth + ":"
rec["magic_method_codes"] = newcode[:-1]
if "LT-AF-I" in meths:
ARM = 1
if "LT-T-I" in meths:
TRM = 1
rec["magic_method_codes"] = rec["magic_method_codes"] + \
":" + experiment_name
rec["magic_experiment_name"] = spec + ":" + experiment_name
rec['measurement_number'] = '%i' % (
measnum) # assign measurement numbers
#rec['sequence'] = '%i'%(seqnum)
#seqnum += 1
measnum += 1
SpecOuts.append(rec)
else: # not a Thellier-Thellier or a Shaw experiemnt
for rec in NewSpecs:
if experiment_name == "":
rec["magic_method_codes"] = "LT-NO"
rec["magic_experiment_name"] = spec + ":LT-NO"
rec['measurement_number'] = '%i' % (
measnum) # assign measurement numbers
#rec['sequence'] = '%i'%(seqnum)
#seqnum += 1
measnum += 1
else:
if experiment_name not in rec['magic_method_codes']:
rec["magic_method_codes"] = rec["magic_method_codes"] + \
":" + experiment_name
rec["magic_method_codes"] = rec["magic_method_codes"].strip(
':')
rec['measurement_number'] = '%i' % (
measnum) # assign measurement numbers
#rec['sequence'] = '%i'%(seqnum)
#seqnum += 1
measnum += 1
rec["magic_experiment_name"] = spec + \
":" + experiment_name
rec["magic_software_packages"] = version_num
SpecOuts.append(rec)
else:
NewSpecs[0]["magic_experiment_name"] = spec + ":" + \
NewSpecs[0]['magic_method_codes'].split(':')[0]
NewSpecs[0]["magic_software_packages"] = version_num
# just copy over the single record as is
SpecOuts.append(NewSpecs[0])
return SpecOuts | ['def', 'measurements_methods', '(', 'meas_data', ',', 'noave', ')', ':', '#', 'version_num', '=', 'get_version', '(', ')', 'sids', '=', 'get_specs', '(', 'meas_data', ')', '# list of measurement records for this specimen', '#', '# step through spec by spec', '#', 'SpecTmps', ',', 'SpecOuts', '=', '[', ']', ',', '[', ']', 'for', 'spec', 'in', 'sids', ':', 'TRM', ',', 'IRM3D', ',', 'ATRM', ',', 'CR', '=', '0', ',', '0', ',', '0', ',', '0', 'expcodes', '=', '""', '# first collect all data for this specimen and do lab treatments', '# list of measurement records for this specimen', 'SpecRecs', '=', 'get_dictitem', '(', 'meas_data', ',', "'er_specimen_name'", ',', 'spec', ',', "'T'", ')', 'for', 'rec', 'in', 'SpecRecs', ':', 'if', "'measurement_flag'", 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', ':', 'rec', '[', "'measurement_flag'", ']', '=', "'g'", 'tmpmeths', '=', 'rec', '[', "'magic_method_codes'", ']', '.', 'split', '(', '":"', ')', 'meths', '=', '[', ']', 'if', '"LP-TRM"', 'in', 'tmpmeths', ':', 'TRM', '=', '1', '# catch these suckers here!', 'if', '"LP-IRM-3D"', 'in', 'tmpmeths', ':', 'IRM3D', '=', '1', '# catch these suckers here!', 'elif', '"LP-AN-TRM"', 'in', 'tmpmeths', ':', 'ATRM', '=', '1', '# catch these suckers here!', 'elif', '"LP-CR-TRM"', 'in', 'tmpmeths', ':', 'CR', '=', '1', '# catch these suckers here!', '#', '# otherwise write over existing method codes', '#', '# find NRM data (LT-NO)', '#', 'elif', 'float', '(', 'rec', '[', '"measurement_temp"', ']', ')', '>=', '273.', 'and', 'float', '(', 'rec', '[', '"measurement_temp"', ']', ')', '<', '323.', ':', '# between 0 and 50C is room T measurement', 'if', '(', '"measurement_dc_field"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', 'float', '(', 'rec', '[', '"measurement_dc_field"', ']', ')', '==', '0', 'or', 'rec', '[', '"measurement_dc_field"', ']', '==', '""', ')', 'and', '(', '"measurement_ac_field"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', 'float', '(', 'rec', '[', '"measurement_ac_field"', ']', ')', '==', '0', 'or', 'rec', '[', '"measurement_ac_field"', ']', '==', '""', ')', ':', '# measurement done in zero field!', 'if', '"treatment_temp"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', 'rec', '[', '"treatment_temp"', ']', '.', 'strip', '(', ')', '==', '""', 'or', '(', 'float', '(', 'rec', '[', '"treatment_temp"', ']', ')', '>=', '273.', 'and', 'float', '(', 'rec', '[', '"treatment_temp"', ']', ')', '<', '298.', ')', ':', '# between 0 and 50C is room T treatment', 'if', '"treatment_ac_field"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', 'rec', '[', '"treatment_ac_field"', ']', '==', '""', 'or', 'float', '(', 'rec', '[', '"treatment_ac_field"', ']', ')', '==', '0', ':', '# no AF', '# no IRM!', 'if', '"treatment_dc_field"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', 'rec', '[', '"treatment_dc_field"', ']', '==', '""', 'or', 'float', '(', 'rec', '[', '"treatment_dc_field"', ']', ')', '==', '0', ':', 'if', '"LT-NO"', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LT-NO"', ')', 'elif', '"LT-IRM"', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LT-IRM"', ')', "# it's an IRM", '#', '# find AF/infield/zerofield', '#', '# no ARM', 'elif', '"treatment_dc_field"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', 'rec', '[', '"treatment_dc_field"', ']', '==', '""', 'or', 'float', '(', 'rec', '[', '"treatment_dc_field"', ']', ')', '==', '0', ':', 'if', '"LT-AF-Z"', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LT-AF-Z"', ')', 'else', ':', '# yes ARM', 'if', '"LT-AF-I"', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LT-AF-I"', ')', '#', '# find Thermal/infield/zerofield', '#', 'elif', 'float', '(', 'rec', '[', '"treatment_temp"', ']', ')', '>=', '323', ':', '# treatment done at high T', 'if', 'TRM', '==', '1', ':', 'if', '"LT-T-I"', 'not', 'in', 'meths', ':', '# TRM - even if zero applied field!', 'meths', '.', 'append', '(', '"LT-T-I"', ')', '# no TRM', 'elif', '"treatment_dc_field"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', 'rec', '[', '"treatment_dc_field"', ']', '==', '""', 'or', 'float', '(', 'rec', '[', '"treatment_dc_field"', ']', ')', '==', '0.', ':', 'if', '"LT-T-Z"', 'not', 'in', 'meths', ':', "# don't overwrite if part of a TRM experiment!", 'meths', '.', 'append', '(', '"LT-T-Z"', ')', 'else', ':', '# yes TRM', 'if', '"LT-T-I"', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LT-T-I"', ')', '#', '# find low-T infield,zero field', '#', 'else', ':', '# treatment done at low T', '# no field', 'if', '"treatment_dc_field"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', 'rec', '[', '"treatment_dc_field"', ']', '==', '""', 'or', 'float', '(', 'rec', '[', '"treatment_dc_field"', ']', ')', '==', '0', ':', 'if', '"LT-LT-Z"', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LT-LT-Z"', ')', 'else', ':', '# yes field', 'if', '"LT-LT-I"', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LT-LT-I"', ')', 'if', '"measurement_chi_volume"', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'or', '"measurement_chi_mass"', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', ':', 'if', '"LP-X"', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LP-X"', ')', '# measurement in presence of dc field and not susceptibility;', '# hysteresis!', 'elif', '"measurement_lab_dc_field"', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', 'and', 'rec', '[', '"measurement_lab_dc_field"', ']', '!=', '0', ':', 'if', '"LP-HYS"', 'not', 'in', 'meths', ':', 'hysq', '=', 'input', '(', '"Is this a hysteresis experiment? [1]/0"', ')', 'if', 'hysq', '==', '""', 'or', 'hysq', '==', '"1"', ':', 'meths', '.', 'append', '(', '"LP-HYS"', ')', 'else', ':', 'metha', '=', 'input', '(', '"Enter the lab protocol code that best describes this experiment "', ')', 'meths', '.', 'append', '(', 'metha', ')', 'methcode', '=', '""', 'for', 'meth', 'in', 'meths', ':', 'methcode', '=', 'methcode', '+', 'meth', '.', 'strip', '(', ')', '+', '":"', 'rec', '[', '"magic_method_codes"', ']', '=', 'methcode', '[', ':', '-', '1', ']', '# assign them back', '#', '# done with first pass, collect and assign provisional method codes', 'if', '"measurement_description"', 'not', 'in', 'list', '(', 'rec', '.', 'keys', '(', ')', ')', ':', 'rec', '[', '"measurement_description"', ']', '=', '""', 'rec', '[', '"er_citation_names"', ']', '=', '"This study"', 'SpecTmps', '.', 'append', '(', 'rec', ')', '# ready for second pass through, step through specimens, check whether ptrm, ptrm tail checks, or AARM, etc.', '#', 'for', 'spec', 'in', 'sids', ':', 'MD', ',', 'pTRM', ',', 'IZ', ',', 'ZI', '=', '0', ',', '0', ',', '0', ',', '0', '# these are flags for the lab protocol codes', 'expcodes', '=', '""', 'NewSpecs', ',', 'SpecMeths', '=', '[', ']', ',', '[', ']', 'experiment_name', ',', 'measnum', '=', '""', ',', '1', 'if', 'IRM3D', '==', '1', ':', 'experiment_name', '=', '"LP-IRM-3D"', 'if', 'ATRM', '==', '1', ':', 'experiment_name', '=', '"LP-AN-TRM"', 'if', 'CR', '==', '1', ':', 'experiment_name', '=', '"LP-CR"', 'NewSpecs', '=', 'get_dictitem', '(', 'SpecTmps', ',', "'er_specimen_name'", ',', 'spec', ',', "'T'", ')', '#', '# first look for replicate measurements', '#', 'Ninit', '=', 'len', '(', 'NewSpecs', ')', 'if', 'noave', '!=', '1', ':', '# averages replicate measurements, returns treatment keys that are', '# being used', 'vdata', ',', 'treatkeys', '=', 'vspec_magic', '(', 'NewSpecs', ')', 'if', 'len', '(', 'vdata', ')', '!=', 'len', '(', 'NewSpecs', ')', ':', "# print spec,'started with ',Ninit,' ending with ',len(vdata)", 'NewSpecs', '=', 'vdata', '# print "Averaged replicate measurements"', '#', "# now look through this specimen's records - try to figure out what experiment it is", '#', 'if', 'len', '(', 'NewSpecs', ')', '>', '1', ':', '# more than one meas for this spec - part of an unknown experiment', 'SpecMeths', '=', 'get_list', '(', 'NewSpecs', ',', "'magic_method_codes'", ')', '.', 'split', '(', '":"', ')', '# TRM steps, could be TRM acquisition, Shaw or a Thellier', '# experiment or TDS experiment', 'if', '"LT-T-I"', 'in', 'SpecMeths', 'and', 'experiment_name', '==', '""', ':', '#', '# collect all the infield steps and look for changes in dc field vector', '#', 'Steps', ',', 'TI', '=', '[', ']', ',', '1', 'for', 'rec', 'in', 'NewSpecs', ':', 'methods', '=', 'get_list', '(', 'NewSpecs', ',', "'magic_method_codes'", ')', '.', 'split', '(', '":"', ')', 'if', '"LT-T-I"', 'in', 'methods', ':', 'Steps', '.', 'append', '(', 'rec', ')', '# get all infield steps together', 'rec_bak', '=', 'Steps', '[', '0', ']', 'if', '"treatment_dc_field_phi"', 'in', 'list', '(', 'rec_bak', '.', 'keys', '(', ')', ')', 'and', '"treatment_dc_field_theta"', 'in', 'list', '(', 'rec_bak', '.', 'keys', '(', ')', ')', ':', '# at least there is field orientation info', 'if', 'rec_bak', '[', '"treatment_dc_field_phi"', ']', '!=', '""', 'and', 'rec_bak', '[', '"treatment_dc_field_theta"', ']', '!=', '""', ':', 'phi0', ',', 'theta0', '=', 'rec_bak', '[', '"treatment_dc_field_phi"', ']', ',', 'rec_bak', '[', '"treatment_dc_field_theta"', ']', 'for', 'k', 'in', 'range', '(', '1', ',', 'len', '(', 'Steps', ')', ')', ':', 'rec', '=', 'Steps', '[', 'k', ']', 'phi', ',', 'theta', '=', 'rec', '[', '"treatment_dc_field_phi"', ']', ',', 'rec', '[', '"treatment_dc_field_theta"', ']', 'if', 'phi', '!=', 'phi0', 'or', 'theta', '!=', 'theta0', ':', 'ANIS', '=', '1', '# if direction changes, is some sort of anisotropy experiment', 'if', '"LT-AF-I"', 'in', 'SpecMeths', 'and', '"LT-AF-Z"', 'in', 'SpecMeths', ':', '# must be Shaw :(', 'experiment_name', '=', '"LP-PI-TRM:LP-PI-ALT-AFARM"', 'elif', 'TRM', '==', '1', ':', 'experiment_name', '=', '"LP-TRM"', 'else', ':', 'TI', '=', '0', '# no infield steps at all', 'if', '"LT-T-Z"', 'in', 'SpecMeths', 'and', 'experiment_name', '==', '""', ':', '# thermal demag steps', 'if', 'TI', '==', '0', ':', 'experiment_name', '=', '"LP-DIR-T"', '# just ordinary thermal demag', 'elif', 'TRM', '!=', '1', ':', '# heart pounding - could be some kind of TRM normalized paleointensity or LP-TRM-TD experiment', 'Temps', '=', '[', ']', 'for', 'step', 'in', 'Steps', ':', '# check through the infield steps - if all at same temperature, then must be a demag of a total TRM with checks', 'if', 'step', '[', "'treatment_temp'", ']', 'not', 'in', 'Temps', ':', 'Temps', '.', 'append', '(', 'step', '[', "'treatment_temp'", ']', ')', 'if', 'len', '(', 'Temps', ')', '>', '1', ':', 'experiment_name', '=', '"LP-PI-TRM"', '# paleointensity normalized by TRM', 'else', ':', '# thermal demag of a lab TRM (could be part of a', '# LP-PI-TDS experiment)', 'experiment_name', '=', '"LP-TRM-TD"', 'TZ', '=', '1', 'else', ':', 'TZ', '=', '0', '# no zero field steps at all', 'if', '"LT-AF-I"', 'in', 'SpecMeths', ':', '# ARM steps', 'Steps', '=', '[', ']', 'for', 'rec', 'in', 'NewSpecs', ':', 'tmp', '=', 'rec', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', 'methods', '=', '[', ']', 'for', 'meth', 'in', 'tmp', ':', 'methods', '.', 'append', '(', 'meth', '.', 'strip', '(', ')', ')', 'if', '"LT-AF-I"', 'in', 'methods', ':', 'Steps', '.', 'append', '(', 'rec', ')', '# get all infield steps together', 'rec_bak', '=', 'Steps', '[', '0', ']', 'if', '"treatment_dc_field_phi"', 'in', 'list', '(', 'rec_bak', '.', 'keys', '(', ')', ')', 'and', '"treatment_dc_field_theta"', 'in', 'list', '(', 'rec_bak', '.', 'keys', '(', ')', ')', ':', '# at least there is field orientation info', 'if', 'rec_bak', '[', '"treatment_dc_field_phi"', ']', '!=', '""', 'and', 'rec_bak', '[', '"treatment_dc_field_theta"', ']', '!=', '""', ':', 'phi0', ',', 'theta0', '=', 'rec_bak', '[', '"treatment_dc_field_phi"', ']', ',', 'rec_bak', '[', '"treatment_dc_field_theta"', ']', 'ANIS', '=', '0', 'for', 'k', 'in', 'range', '(', '1', ',', 'len', '(', 'Steps', ')', ')', ':', 'rec', '=', 'Steps', '[', 'k', ']', 'phi', ',', 'theta', '=', 'rec', '[', '"treatment_dc_field_phi"', ']', ',', 'rec', '[', '"treatment_dc_field_theta"', ']', 'if', 'phi', '!=', 'phi0', 'or', 'theta', '!=', 'theta0', ':', 'ANIS', '=', '1', '# if direction changes, is some sort of anisotropy experiment', 'if', 'ANIS', '==', '1', ':', 'experiment_name', '=', '"LP-AN-ARM"', 'if', 'experiment_name', '==', '""', ':', '# not anisotropy of ARM - acquisition?', 'field0', '=', 'rec_bak', '[', '"treatment_dc_field"', ']', 'ARM', '=', '0', 'for', 'k', 'in', 'range', '(', '1', ',', 'len', '(', 'Steps', ')', ')', ':', 'rec', '=', 'Steps', '[', 'k', ']', 'field', '=', 'rec', '[', '"treatment_dc_field"', ']', 'if', 'field', '!=', 'field0', ':', 'ARM', '=', '1', 'if', 'ARM', '==', '1', ':', 'experiment_name', '=', '"LP-ARM"', 'AFI', '=', '1', 'else', ':', 'AFI', '=', '0', '# no ARM steps at all', 'if', '"LT-AF-Z"', 'in', 'SpecMeths', 'and', 'experiment_name', '==', '""', ':', '# AF demag steps', 'if', 'AFI', '==', '0', ':', 'experiment_name', '=', '"LP-DIR-AF"', '# just ordinary AF demag', 'else', ':', '# heart pounding - a pseudothellier?', 'experiment_name', '=', '"LP-PI-ARM"', 'AFZ', '=', '1', 'else', ':', 'AFZ', '=', '0', '# no AF demag at all', 'if', '"LT-IRM"', 'in', 'SpecMeths', ':', '# IRM', 'Steps', '=', '[', ']', 'for', 'rec', 'in', 'NewSpecs', ':', 'tmp', '=', 'rec', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', 'methods', '=', '[', ']', 'for', 'meth', 'in', 'tmp', ':', 'methods', '.', 'append', '(', 'meth', '.', 'strip', '(', ')', ')', 'if', '"LT-IRM"', 'in', 'methods', ':', 'Steps', '.', 'append', '(', 'rec', ')', '# get all infield steps together', 'rec_bak', '=', 'Steps', '[', '0', ']', 'if', '"treatment_dc_field_phi"', 'in', 'list', '(', 'rec_bak', '.', 'keys', '(', ')', ')', 'and', '"treatment_dc_field_theta"', 'in', 'list', '(', 'rec_bak', '.', 'keys', '(', ')', ')', ':', '# at least there is field orientation info', 'if', 'rec_bak', '[', '"treatment_dc_field_phi"', ']', '!=', '""', 'and', 'rec_bak', '[', '"treatment_dc_field_theta"', ']', '!=', '""', ':', 'phi0', ',', 'theta0', '=', 'rec_bak', '[', '"treatment_dc_field_phi"', ']', ',', 'rec_bak', '[', '"treatment_dc_field_theta"', ']', 'ANIS', '=', '0', 'for', 'k', 'in', 'range', '(', '1', ',', 'len', '(', 'Steps', ')', ')', ':', 'rec', '=', 'Steps', '[', 'k', ']', 'phi', ',', 'theta', '=', 'rec', '[', '"treatment_dc_field_phi"', ']', ',', 'rec', '[', '"treatment_dc_field_theta"', ']', 'if', 'phi', '!=', 'phi0', 'or', 'theta', '!=', 'theta0', ':', 'ANIS', '=', '1', '# if direction changes, is some sort of anisotropy experiment', 'if', 'ANIS', '==', '1', ':', 'experiment_name', '=', '"LP-AN-IRM"', 'if', 'experiment_name', '==', '""', ':', '# not anisotropy of IRM - acquisition?', 'field0', '=', 'rec_bak', '[', '"treatment_dc_field"', ']', 'IRM', '=', '0', 'for', 'k', 'in', 'range', '(', '1', ',', 'len', '(', 'Steps', ')', ')', ':', 'rec', '=', 'Steps', '[', 'k', ']', 'field', '=', 'rec', '[', '"treatment_dc_field"', ']', 'if', 'field', '!=', 'field0', ':', 'IRM', '=', '1', 'if', 'IRM', '==', '1', ':', 'experiment_name', '=', '"LP-IRM"', 'IRM', '=', '1', 'else', ':', 'IRM', '=', '0', '# no IRM at all', 'if', '"LP-X"', 'in', 'SpecMeths', ':', '# susceptibility run', 'Steps', '=', 'get_dictitem', '(', 'NewSpecs', ',', "'magic_method_codes'", ',', "'LT-X'", ',', "'has'", ')', 'if', 'len', '(', 'Steps', ')', '>', '0', ':', 'rec_bak', '=', 'Steps', '[', '0', ']', 'if', '"treatment_dc_field_phi"', 'in', 'list', '(', 'rec_bak', '.', 'keys', '(', ')', ')', 'and', '"treatment_dc_field_theta"', 'in', 'list', '(', 'rec_bak', '.', 'keys', '(', ')', ')', ':', '# at least there is field orientation info', 'if', 'rec_bak', '[', '"treatment_dc_field_phi"', ']', '!=', '""', 'and', 'rec_bak', '[', '"treatment_dc_field_theta"', ']', '!=', '""', ':', 'phi0', ',', 'theta0', '=', 'rec_bak', '[', '"treatment_dc_field_phi"', ']', ',', 'rec_bak', '[', '"treatment_dc_field_theta"', ']', 'ANIS', '=', '0', 'for', 'k', 'in', 'range', '(', '1', ',', 'len', '(', 'Steps', ')', ')', ':', 'rec', '=', 'Steps', '[', 'k', ']', 'phi', ',', 'theta', '=', 'rec', '[', '"treatment_dc_field_phi"', ']', ',', 'rec', '[', '"treatment_dc_field_theta"', ']', 'if', 'phi', '!=', 'phi0', 'or', 'theta', '!=', 'theta0', ':', 'ANIS', '=', '1', '# if direction changes, is some sort of anisotropy experiment', 'if', 'ANIS', '==', '1', ':', 'experiment_name', '=', '"LP-AN-MS"', 'else', ':', 'CHI', '=', '0', '# no susceptibility at all', '#', '# now need to deal with special thellier experiment problems - first clear up pTRM checks and tail checks', '#', 'if', 'experiment_name', '==', '"LP-PI-TRM"', ':', '# is some sort of thellier experiment', 'rec_bak', '=', 'NewSpecs', '[', '0', ']', 'tmp', '=', 'rec_bak', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', 'methbak', '=', '[', ']', 'for', 'meth', 'in', 'tmp', ':', 'methbak', '.', 'append', '(', 'meth', '.', 'strip', '(', ')', ')', '# previous steps method codes', 'for', 'k', 'in', 'range', '(', '1', ',', 'len', '(', 'NewSpecs', ')', ')', ':', 'rec', '=', 'NewSpecs', '[', 'k', ']', 'tmp', '=', 'rec', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', 'meths', '=', '[', ']', 'for', 'meth', 'in', 'tmp', ':', '# get this guys method codes', 'meths', '.', 'append', '(', 'meth', '.', 'strip', '(', ')', ')', '#', '# check if this is a pTRM check', '#', 'if', 'float', '(', 'rec', '[', '"treatment_temp"', ']', ')', '<', 'float', '(', 'rec_bak', '[', '"treatment_temp"', ']', ')', ':', '# went backward', 'if', '"LT-T-I"', 'in', 'meths', 'and', '"LT-T-Z"', 'in', 'methbak', ':', '# must be a pTRM check after first z', '#', '# replace LT-T-I method code with LT-PTRM-I', '#', 'methcodes', '=', '""', 'for', 'meth', 'in', 'meths', ':', 'if', 'meth', '!=', '"LT-T-I"', ':', 'methcode', '=', 'methcode', '+', 'meth', '.', 'strip', '(', ')', '+', '":"', 'methcodes', '=', 'methcodes', '+', '"LT-PTRM-I"', 'meths', '=', 'methcodes', '.', 'split', '(', '":"', ')', 'pTRM', '=', '1', 'elif', '"LT-T-Z"', 'in', 'meths', 'and', '"LT-T-I"', 'in', 'methbak', ':', '# must be pTRM check after first I', '#', '# replace LT-T-Z method code with LT-PTRM-Z', '#', 'methcodes', '=', '""', 'for', 'meth', 'in', 'meths', ':', 'if', 'meth', '!=', '"LT-T-Z"', ':', 'methcode', '=', 'methcode', '+', 'meth', '+', '":"', 'methcodes', '=', 'methcodes', '+', '"LT-PTRM-Z"', 'meths', '=', 'methcodes', '.', 'split', '(', '":"', ')', 'pTRM', '=', '1', 'methcodes', '=', '""', 'for', 'meth', 'in', 'meths', ':', 'methcodes', '=', 'methcodes', '+', 'meth', '.', 'strip', '(', ')', '+', '":"', '# attach new method code', 'rec', '[', '"magic_method_codes"', ']', '=', 'methcodes', '[', ':', '-', '1', ']', 'rec_bak', '=', 'rec', '# next previous record', 'tmp', '=', 'rec_bak', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', 'methbak', '=', '[', ']', 'for', 'meth', 'in', 'tmp', ':', '# previous steps method codes', 'methbak', '.', 'append', '(', 'meth', '.', 'strip', '(', ')', ')', '#', '# done with assigning pTRM checks. data should be "fixed" in NewSpecs', '#', "# now let's find out which steps are infield zerofield (IZ) and which are zerofield infield (ZI)", '#', 'rec_bak', '=', 'NewSpecs', '[', '0', ']', 'tmp', '=', 'rec_bak', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', 'methbak', '=', '[', ']', 'for', 'meth', 'in', 'tmp', ':', 'methbak', '.', 'append', '(', 'meth', '.', 'strip', '(', ')', ')', '# previous steps method codes', 'if', '"LT-NO"', 'not', 'in', 'methbak', ':', '# first measurement is not NRM', 'if', '"LT-T-I"', 'in', 'methbak', ':', 'IZorZI', '=', '"LP-PI-TRM-IZ"', '# first pair is IZ', 'if', '"LT-T-Z"', 'in', 'methbak', ':', 'IZorZI', '=', '"LP-PI-TRM-ZI"', '# first pair is ZI', 'if', 'IZorZI', 'not', 'in', 'methbak', ':', 'methbak', '.', 'append', '(', 'IZorZI', ')', 'methcode', '=', '""', 'for', 'meth', 'in', 'methbak', ':', 'methcode', '=', 'methcode', '+', 'meth', '+', '":"', '# fix first heating step when no NRM', 'NewSpecs', '[', '0', ']', '[', '"magic_method_codes"', ']', '=', 'methcode', '[', ':', '-', '1', ']', 'else', ':', 'IZorZI', '=', '""', '# first measurement is NRM and not one of a pair', 'for', 'k', 'in', 'range', '(', '1', ',', 'len', '(', 'NewSpecs', ')', ')', ':', '# hunt through measurements again', 'rec', '=', 'NewSpecs', '[', 'k', ']', 'tmp', '=', 'rec', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', 'meths', '=', '[', ']', 'for', 'meth', 'in', 'tmp', ':', '# get this guys method codes', 'meths', '.', 'append', '(', 'meth', '.', 'strip', '(', ')', ')', '#', '# check if this start a new temperature step of a infield/zerofield pair', '#', 'if', 'float', '(', 'rec', '[', '"treatment_temp"', ']', ')', '>', 'float', '(', 'rec_bak', '[', '"treatment_temp"', ']', ')', 'and', '"LT-PTRM-I"', 'not', 'in', 'methbak', ':', '# new pair?', 'if', '"LT-T-I"', 'in', 'meths', ':', '# infield of this pair', 'IZorZI', '=', '"LP-PI-TRM-IZ"', 'IZ', '=', '1', '# at least one IZ pair', 'elif', '"LT-T-Z"', 'in', 'meths', ':', '# zerofield', 'IZorZI', '=', '"LP-PI-TRM-ZI"', 'ZI', '=', '1', '# at least one ZI pair', '# new pair after out of sequence PTRM check?', 'elif', 'float', '(', 'rec', '[', '"treatment_temp"', ']', ')', '>', 'float', '(', 'rec_bak', '[', '"treatment_temp"', ']', ')', 'and', '"LT-PTRM-I"', 'in', 'methbak', 'and', 'IZorZI', '!=', '"LP-PI-TRM-ZI"', ':', 'if', '"LT-T-I"', 'in', 'meths', ':', '# infield of this pair', 'IZorZI', '=', '"LP-PI-TRM-IZ"', 'IZ', '=', '1', '# at least one IZ pair', 'elif', '"LT-T-Z"', 'in', 'meths', ':', '# zerofield', 'IZorZI', '=', '"LP-PI-TRM-ZI"', 'ZI', '=', '1', '# at least one ZI pair', '# stayed same temp', 'if', 'float', '(', 'rec', '[', '"treatment_temp"', ']', ')', '==', 'float', '(', 'rec_bak', '[', '"treatment_temp"', ']', ')', ':', 'if', '"LT-T-Z"', 'in', 'meths', 'and', '"LT-T-I"', 'in', 'methbak', 'and', 'IZorZI', '==', '"LP-PI-TRM-ZI"', ':', '# must be a tail check', '#', '# replace LT-T-Z method code with LT-PTRM-MD', '#', 'methcodes', '=', '""', 'for', 'meth', 'in', 'meths', ':', 'if', 'meth', '!=', '"LT-T-Z"', ':', 'methcode', '=', 'methcode', '+', 'meth', '+', '":"', 'methcodes', '=', 'methcodes', '+', '"LT-PTRM-MD"', 'meths', '=', 'methcodes', '.', 'split', '(', '":"', ')', 'MD', '=', '1', '# fix method codes', 'if', '"LT-PTRM-I"', 'not', 'in', 'meths', 'and', '"LT-PTRM-MD"', 'not', 'in', 'meths', 'and', 'IZorZI', 'not', 'in', 'meths', ':', 'meths', '.', 'append', '(', 'IZorZI', ')', 'newmeths', '=', '[', ']', 'for', 'meth', 'in', 'meths', ':', 'if', 'meth', 'not', 'in', 'newmeths', ':', 'newmeths', '.', 'append', '(', 'meth', ')', '# try to get uniq set', 'methcode', '=', '""', 'for', 'meth', 'in', 'newmeths', ':', 'methcode', '=', 'methcode', '+', 'meth', '+', '":"', 'rec', '[', '"magic_method_codes"', ']', '=', 'methcode', '[', ':', '-', '1', ']', 'rec_bak', '=', 'rec', '# moving on to next record, making current one the backup', "# get last specimen's method codes in a list", 'methbak', '=', 'rec_bak', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', '#', "# done with this specimen's records, now check if any pTRM checks or MD checks", '#', 'if', 'pTRM', '==', '1', ':', 'experiment_name', '=', 'experiment_name', '+', '":LP-PI-ALT-PTRM"', 'if', 'MD', '==', '1', ':', 'experiment_name', '=', 'experiment_name', '+', '":LP-PI-BT-MD"', 'if', 'IZ', '==', '1', 'and', 'ZI', '==', '1', ':', 'experiment_name', '=', 'experiment_name', '+', '":LP-PI-BT-IZZI"', 'if', 'IZ', '==', '1', 'and', 'ZI', '==', '0', ':', 'experiment_name', '=', 'experiment_name', '+', '":LP-PI-IZ"', '# Aitken method', 'if', 'IZ', '==', '0', 'and', 'ZI', '==', '1', ':', 'experiment_name', '=', 'experiment_name', '+', '":LP-PI-ZI"', '# Coe method', 'IZ', ',', 'ZI', ',', 'pTRM', ',', 'MD', '=', '0', ',', '0', ',', '0', ',', '0', '# reset these for next specimen', 'for', 'rec', 'in', 'NewSpecs', ':', '# fix the experiment name for all recs for this specimen and save in SpecOuts', '# assign an experiment name to all specimen measurements', '# from this specimen', 'if', 'experiment_name', '!=', '""', ':', 'rec', '[', '"magic_method_codes"', ']', '=', 'rec', '[', '"magic_method_codes"', ']', '+', '":"', '+', 'experiment_name', 'rec', '[', '"magic_experiment_name"', ']', '=', 'spec', '+', '":"', '+', 'experiment_name', 'rec', '[', "'measurement_number'", ']', '=', "'%i'", '%', '(', 'measnum', ')', '# assign measurement numbers', 'measnum', '+=', '1', "#rec['sequence'] = '%i'%(seqnum)", '#seqnum += 1', 'SpecOuts', '.', 'append', '(', 'rec', ')', 'elif', 'experiment_name', '==', '"LP-PI-TRM:LP-PI-ALT-AFARM"', ':', '# is a Shaw experiment!', 'ARM', ',', 'TRM', '=', '0', ',', '0', 'for', 'rec', 'in', 'NewSpecs', ':', '# fix the experiment name for all recs for this specimen and save in SpecOuts', '# assign an experiment name to all specimen measurements from this specimen', '# make the second ARM in Shaw experiments LT-AF-I-2, stick', '# in the AF of ARM and TRM codes', 'meths', '=', 'rec', '[', '"magic_method_codes"', ']', '.', 'split', '(', '":"', ')', 'if', 'ARM', '==', '1', ':', 'if', '"LT-AF-I"', 'in', 'meths', ':', 'del', 'meths', '[', 'meths', '.', 'index', '(', '"LT-AF-I"', ')', ']', 'meths', '.', 'append', '(', '"LT-AF-I-2"', ')', 'ARM', '=', '2', 'if', '"LT-AF-Z"', 'in', 'meths', 'and', 'TRM', '==', '0', ':', 'meths', '.', 'append', '(', '"LP-ARM-AFD"', ')', 'if', 'TRM', '==', '1', 'and', 'ARM', '==', '1', ':', 'if', '"LT-AF-Z"', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LP-TRM-AFD"', ')', 'if', 'ARM', '==', '2', ':', 'if', '"LT-AF-Z"', 'in', 'meths', ':', 'meths', '.', 'append', '(', '"LP-ARM2-AFD"', ')', 'newcode', '=', '""', 'for', 'meth', 'in', 'meths', ':', 'newcode', '=', 'newcode', '+', 'meth', '+', '":"', 'rec', '[', '"magic_method_codes"', ']', '=', 'newcode', '[', ':', '-', '1', ']', 'if', '"LT-AF-I"', 'in', 'meths', ':', 'ARM', '=', '1', 'if', '"LT-T-I"', 'in', 'meths', ':', 'TRM', '=', '1', 'rec', '[', '"magic_method_codes"', ']', '=', 'rec', '[', '"magic_method_codes"', ']', '+', '":"', '+', 'experiment_name', 'rec', '[', '"magic_experiment_name"', ']', '=', 'spec', '+', '":"', '+', 'experiment_name', 'rec', '[', "'measurement_number'", ']', '=', "'%i'", '%', '(', 'measnum', ')', '# assign measurement numbers', "#rec['sequence'] = '%i'%(seqnum)", '#seqnum += 1', 'measnum', '+=', '1', 'SpecOuts', '.', 'append', '(', 'rec', ')', 'else', ':', '# not a Thellier-Thellier or a Shaw experiemnt', 'for', 'rec', 'in', 'NewSpecs', ':', 'if', 'experiment_name', '==', '""', ':', 'rec', '[', '"magic_method_codes"', ']', '=', '"LT-NO"', 'rec', '[', '"magic_experiment_name"', ']', '=', 'spec', '+', '":LT-NO"', 'rec', '[', "'measurement_number'", ']', '=', "'%i'", '%', '(', 'measnum', ')', '# assign measurement numbers', "#rec['sequence'] = '%i'%(seqnum)", '#seqnum += 1', 'measnum', '+=', '1', 'else', ':', 'if', 'experiment_name', 'not', 'in', 'rec', '[', "'magic_method_codes'", ']', ':', 'rec', '[', '"magic_method_codes"', ']', '=', 'rec', '[', '"magic_method_codes"', ']', '+', '":"', '+', 'experiment_name', 'rec', '[', '"magic_method_codes"', ']', '=', 'rec', '[', '"magic_method_codes"', ']', '.', 'strip', '(', "':'", ')', 'rec', '[', "'measurement_number'", ']', '=', "'%i'", '%', '(', 'measnum', ')', '# assign measurement numbers', "#rec['sequence'] = '%i'%(seqnum)", '#seqnum += 1', 'measnum', '+=', '1', 'rec', '[', '"magic_experiment_name"', ']', '=', 'spec', '+', '":"', '+', 'experiment_name', 'rec', '[', '"magic_software_packages"', ']', '=', 'version_num', 'SpecOuts', '.', 'append', '(', 'rec', ')', 'else', ':', 'NewSpecs', '[', '0', ']', '[', '"magic_experiment_name"', ']', '=', 'spec', '+', '":"', '+', 'NewSpecs', '[', '0', ']', '[', "'magic_method_codes'", ']', '.', 'split', '(', "':'", ')', '[', '0', ']', 'NewSpecs', '[', '0', ']', '[', '"magic_software_packages"', ']', '=', 'version_num', '# just copy over the single record as is', 'SpecOuts', '.', 'append', '(', 'NewSpecs', '[', '0', ']', ')', 'return', 'SpecOuts'] | get list of unique specs | ['get', 'list', 'of', 'unique', 'specs'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L7745-L8254 |
Subsets and Splits