repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
watson-developer-cloud/python-sdk
examples/assistant_tone_analyzer_integration/tone_detection.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/examples/assistant_tone_analyzer_integration/tone_detection.py#L34-L77
def updateUserTone(conversationPayload, toneAnalyzerPayload, maintainHistory): """ updateUserTone processes the Tone Analyzer payload to pull out the emotion, writing and social tones, and identify the meaningful tones (i.e., those tones that meet the specified thresholds). The conversationPayload json object is updated to include these tones. @param conversationPayload json object returned by the Watson Conversation Service @param toneAnalyzerPayload json object returned by the Watson Tone Analyzer Service @returns conversationPayload where the user object has been updated with tone information from the toneAnalyzerPayload """ emotionTone = None writingTone = None socialTone = None # if there is no context in a if 'context' not in conversationPayload: conversationPayload['context'] = {} if 'user' not in conversationPayload['context']: conversationPayload['context'] = initUser() # For convenience sake, define a variable for the user object user = conversationPayload['context']['user'] # Extract the tones - emotion, writing and social if toneAnalyzerPayload and toneAnalyzerPayload['document_tone']: for toneCategory in toneAnalyzerPayload['document_tone']['tone_categories']: if toneCategory['category_id'] == EMOTION_TONE_LABEL: emotionTone = toneCategory if toneCategory['category_id'] == LANGUAGE_TONE_LABEL: writingTone = toneCategory if toneCategory['category_id'] == SOCIAL_TONE_LABEL: socialTone = toneCategory updateEmotionTone(user, emotionTone, maintainHistory) updateWritingTone(user, writingTone, maintainHistory) updateSocialTone(user, socialTone, maintainHistory) conversationPayload['context']['user'] = user return conversationPayload
[ "def", "updateUserTone", "(", "conversationPayload", ",", "toneAnalyzerPayload", ",", "maintainHistory", ")", ":", "emotionTone", "=", "None", "writingTone", "=", "None", "socialTone", "=", "None", "# if there is no context in a", "if", "'context'", "not", "in", "conversationPayload", ":", "conversationPayload", "[", "'context'", "]", "=", "{", "}", "if", "'user'", "not", "in", "conversationPayload", "[", "'context'", "]", ":", "conversationPayload", "[", "'context'", "]", "=", "initUser", "(", ")", "# For convenience sake, define a variable for the user object", "user", "=", "conversationPayload", "[", "'context'", "]", "[", "'user'", "]", "# Extract the tones - emotion, writing and social", "if", "toneAnalyzerPayload", "and", "toneAnalyzerPayload", "[", "'document_tone'", "]", ":", "for", "toneCategory", "in", "toneAnalyzerPayload", "[", "'document_tone'", "]", "[", "'tone_categories'", "]", ":", "if", "toneCategory", "[", "'category_id'", "]", "==", "EMOTION_TONE_LABEL", ":", "emotionTone", "=", "toneCategory", "if", "toneCategory", "[", "'category_id'", "]", "==", "LANGUAGE_TONE_LABEL", ":", "writingTone", "=", "toneCategory", "if", "toneCategory", "[", "'category_id'", "]", "==", "SOCIAL_TONE_LABEL", ":", "socialTone", "=", "toneCategory", "updateEmotionTone", "(", "user", ",", "emotionTone", ",", "maintainHistory", ")", "updateWritingTone", "(", "user", ",", "writingTone", ",", "maintainHistory", ")", "updateSocialTone", "(", "user", ",", "socialTone", ",", "maintainHistory", ")", "conversationPayload", "[", "'context'", "]", "[", "'user'", "]", "=", "user", "return", "conversationPayload" ]
updateUserTone processes the Tone Analyzer payload to pull out the emotion, writing and social tones, and identify the meaningful tones (i.e., those tones that meet the specified thresholds). The conversationPayload json object is updated to include these tones. @param conversationPayload json object returned by the Watson Conversation Service @param toneAnalyzerPayload json object returned by the Watson Tone Analyzer Service @returns conversationPayload where the user object has been updated with tone information from the toneAnalyzerPayload
[ "updateUserTone", "processes", "the", "Tone", "Analyzer", "payload", "to", "pull", "out", "the", "emotion", "writing", "and", "social", "tones", "and", "identify", "the", "meaningful", "tones", "(", "i", ".", "e", ".", "those", "tones", "that", "meet", "the", "specified", "thresholds", ")", ".", "The", "conversationPayload", "json", "object", "is", "updated", "to", "include", "these", "tones", "." ]
python
train
42.227273
martijnvermaat/interval-binning
binning/__init__.py
https://github.com/martijnvermaat/interval-binning/blob/91c359ff3ddd1f587a209521dd238d2d93fc93f0/binning/__init__.py#L174-L197
def contained_bins(start, stop=None): """ Given an interval `start:stop`, return bins for intervals completely *contained by* `start:stop`. The order is according to the bin level (starting with the smallest bins), and within a level according to the bin number (ascending). :arg int start, stop: Interval positions (zero-based, open-ended). If `stop` is not provided, the interval is assumed to be of length 1 (equivalent to `stop = start + 1`). :return: All bins for intervals contained by `start:stop`, ordered first according to bin level (ascending) and then according to bin number (ascending). :rtype: list(int) :raise OutOfRangeError: If `start:stop` exceeds the range of the binning scheme. """ if stop is None: stop = start + 1 min_bin = assign_bin(start, stop) return [bin for bin in overlapping_bins(start, stop) if bin >= min_bin]
[ "def", "contained_bins", "(", "start", ",", "stop", "=", "None", ")", ":", "if", "stop", "is", "None", ":", "stop", "=", "start", "+", "1", "min_bin", "=", "assign_bin", "(", "start", ",", "stop", ")", "return", "[", "bin", "for", "bin", "in", "overlapping_bins", "(", "start", ",", "stop", ")", "if", "bin", ">=", "min_bin", "]" ]
Given an interval `start:stop`, return bins for intervals completely *contained by* `start:stop`. The order is according to the bin level (starting with the smallest bins), and within a level according to the bin number (ascending). :arg int start, stop: Interval positions (zero-based, open-ended). If `stop` is not provided, the interval is assumed to be of length 1 (equivalent to `stop = start + 1`). :return: All bins for intervals contained by `start:stop`, ordered first according to bin level (ascending) and then according to bin number (ascending). :rtype: list(int) :raise OutOfRangeError: If `start:stop` exceeds the range of the binning scheme.
[ "Given", "an", "interval", "start", ":", "stop", "return", "bins", "for", "intervals", "completely", "*", "contained", "by", "*", "start", ":", "stop", ".", "The", "order", "is", "according", "to", "the", "bin", "level", "(", "starting", "with", "the", "smallest", "bins", ")", "and", "within", "a", "level", "according", "to", "the", "bin", "number", "(", "ascending", ")", "." ]
python
train
38.083333
oasis-open/cti-taxii-client
taxii2client/__init__.py
https://github.com/oasis-open/cti-taxii-client/blob/b4c037fb61d8b8892af34423e2c67c81218d6f8e/taxii2client/__init__.py#L986-L1020
def _merge_headers(self, call_specific_headers): """ Merge headers from different sources together. Headers passed to the post/get methods have highest priority, then headers associated with the connection object itself have next priority. :param call_specific_headers: A header dict from the get/post call, or None (the default for those methods). :return: A key-case-insensitive MutableMapping object which contains the merged headers. (This doesn't actually return a dict.) """ # A case-insensitive mapping is necessary here so that there is # predictable behavior. If a plain dict were used, you'd get keys in # the merged dict which differ only in case. The requests library # would merge them internally, and it would be unpredictable which key # is chosen for the final set of headers. Another possible approach # would be to upper/lower-case everything, but this seemed easier. On # the other hand, I don't know if CaseInsensitiveDict is public API...? # First establish defaults merged_headers = requests.structures.CaseInsensitiveDict({ "User-Agent": self.user_agent }) # Then overlay with specifics from post/get methods if call_specific_headers: merged_headers.update(call_specific_headers) # Special "User-Agent" header check, to ensure one is always sent. # The call-specific overlay could have null'd out that header. if not merged_headers.get("User-Agent"): merged_headers["User-Agent"] = self.user_agent return merged_headers
[ "def", "_merge_headers", "(", "self", ",", "call_specific_headers", ")", ":", "# A case-insensitive mapping is necessary here so that there is", "# predictable behavior. If a plain dict were used, you'd get keys in", "# the merged dict which differ only in case. The requests library", "# would merge them internally, and it would be unpredictable which key", "# is chosen for the final set of headers. Another possible approach", "# would be to upper/lower-case everything, but this seemed easier. On", "# the other hand, I don't know if CaseInsensitiveDict is public API...?", "# First establish defaults", "merged_headers", "=", "requests", ".", "structures", ".", "CaseInsensitiveDict", "(", "{", "\"User-Agent\"", ":", "self", ".", "user_agent", "}", ")", "# Then overlay with specifics from post/get methods", "if", "call_specific_headers", ":", "merged_headers", ".", "update", "(", "call_specific_headers", ")", "# Special \"User-Agent\" header check, to ensure one is always sent.", "# The call-specific overlay could have null'd out that header.", "if", "not", "merged_headers", ".", "get", "(", "\"User-Agent\"", ")", ":", "merged_headers", "[", "\"User-Agent\"", "]", "=", "self", ".", "user_agent", "return", "merged_headers" ]
Merge headers from different sources together. Headers passed to the post/get methods have highest priority, then headers associated with the connection object itself have next priority. :param call_specific_headers: A header dict from the get/post call, or None (the default for those methods). :return: A key-case-insensitive MutableMapping object which contains the merged headers. (This doesn't actually return a dict.)
[ "Merge", "headers", "from", "different", "sources", "together", ".", "Headers", "passed", "to", "the", "post", "/", "get", "methods", "have", "highest", "priority", "then", "headers", "associated", "with", "the", "connection", "object", "itself", "have", "next", "priority", "." ]
python
valid
47.542857
shoebot/shoebot
extensions/gedit/gedit2-plugin/shoebotit/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/extensions/gedit/gedit2-plugin/shoebotit/__init__.py#L186-L196
def get_source(self, doc): """ Grab contents of 'doc' and return it :param doc: The active document :return: """ start_iter = doc.get_start_iter() end_iter = doc.get_end_iter() source = doc.get_text(start_iter, end_iter, False) return source
[ "def", "get_source", "(", "self", ",", "doc", ")", ":", "start_iter", "=", "doc", ".", "get_start_iter", "(", ")", "end_iter", "=", "doc", ".", "get_end_iter", "(", ")", "source", "=", "doc", ".", "get_text", "(", "start_iter", ",", "end_iter", ",", "False", ")", "return", "source" ]
Grab contents of 'doc' and return it :param doc: The active document :return:
[ "Grab", "contents", "of", "doc", "and", "return", "it" ]
python
valid
27.636364
ray-project/ray
python/ray/tune/trainable.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trainable.py#L213-L222
def delete_checkpoint(self, checkpoint_dir): """Removes subdirectory within checkpoint_folder Parameters ---------- checkpoint_dir : path to checkpoint """ if os.path.isfile(checkpoint_dir): shutil.rmtree(os.path.dirname(checkpoint_dir)) else: shutil.rmtree(checkpoint_dir)
[ "def", "delete_checkpoint", "(", "self", ",", "checkpoint_dir", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "checkpoint_dir", ")", ":", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "dirname", "(", "checkpoint_dir", ")", ")", "else", ":", "shutil", ".", "rmtree", "(", "checkpoint_dir", ")" ]
Removes subdirectory within checkpoint_folder Parameters ---------- checkpoint_dir : path to checkpoint
[ "Removes", "subdirectory", "within", "checkpoint_folder", "Parameters", "----------", "checkpoint_dir", ":", "path", "to", "checkpoint" ]
python
train
34.8
kadrlica/pymodeler
pymodeler/parameter.py
https://github.com/kadrlica/pymodeler/blob/f426c01416fd4b8fc3afeeb6d3b5d1cb0cb8f8e3/pymodeler/parameter.py#L252-L268
def check_type(self, value): """Hook for type-checking, invoked during assignment. raises TypeError if neither value nor self.dtype are None and they do not match. will not raise an exception if either value or self.dtype is None """ if self.__dict__['dtype'] is None: return elif value is None: return elif isinstance(value, self.__dict__['dtype']): return msg = "Value of type %s, when %s was expected." % ( type(value), self.__dict__['dtype']) raise TypeError(msg)
[ "def", "check_type", "(", "self", ",", "value", ")", ":", "if", "self", ".", "__dict__", "[", "'dtype'", "]", "is", "None", ":", "return", "elif", "value", "is", "None", ":", "return", "elif", "isinstance", "(", "value", ",", "self", ".", "__dict__", "[", "'dtype'", "]", ")", ":", "return", "msg", "=", "\"Value of type %s, when %s was expected.\"", "%", "(", "type", "(", "value", ")", ",", "self", ".", "__dict__", "[", "'dtype'", "]", ")", "raise", "TypeError", "(", "msg", ")" ]
Hook for type-checking, invoked during assignment. raises TypeError if neither value nor self.dtype are None and they do not match. will not raise an exception if either value or self.dtype is None
[ "Hook", "for", "type", "-", "checking", "invoked", "during", "assignment", "." ]
python
test
34.176471
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2933-L2940
def confirmation_pdf(self, confirmation_id): """ Opens a pdf of a confirmation :param confirmation_id: the confirmation id :return: dict """ return self._create_get_request(resource=CONFIRMATIONS, billomat_id=confirmation_id, command=PDF)
[ "def", "confirmation_pdf", "(", "self", ",", "confirmation_id", ")", ":", "return", "self", ".", "_create_get_request", "(", "resource", "=", "CONFIRMATIONS", ",", "billomat_id", "=", "confirmation_id", ",", "command", "=", "PDF", ")" ]
Opens a pdf of a confirmation :param confirmation_id: the confirmation id :return: dict
[ "Opens", "a", "pdf", "of", "a", "confirmation" ]
python
train
35
bopo/mootdx
mootdx/quotes.py
https://github.com/bopo/mootdx/blob/7c4623e9464c75d3c87a06d48fe8734b027374fa/mootdx/quotes.py#L123-L139
def trans_his(self, symbol='', start=0, offset=10, date=''): ''' 查询历史分笔成交 :param market: 市场代码 :param symbol: 股票代码 :param start: 起始位置 :param offset: 数量 :param date: 日期 :return: pd.dataFrame or None ''' market = get_stock_market(symbol) with self.client.connect(*self.bestip): data = self.client.get_history_transaction_data( int(market), symbol, int(start), int(offset), date) return self.client.to_df(data)
[ "def", "trans_his", "(", "self", ",", "symbol", "=", "''", ",", "start", "=", "0", ",", "offset", "=", "10", ",", "date", "=", "''", ")", ":", "market", "=", "get_stock_market", "(", "symbol", ")", "with", "self", ".", "client", ".", "connect", "(", "*", "self", ".", "bestip", ")", ":", "data", "=", "self", ".", "client", ".", "get_history_transaction_data", "(", "int", "(", "market", ")", ",", "symbol", ",", "int", "(", "start", ")", ",", "int", "(", "offset", ")", ",", "date", ")", "return", "self", ".", "client", ".", "to_df", "(", "data", ")" ]
查询历史分笔成交 :param market: 市场代码 :param symbol: 股票代码 :param start: 起始位置 :param offset: 数量 :param date: 日期 :return: pd.dataFrame or None
[ "查询历史分笔成交" ]
python
train
30.588235
cloud-custodian/cloud-custodian
tools/c7n_salactus/c7n_salactus/cli.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_salactus/c7n_salactus/cli.py#L770-L782
def failures(): """Show any unexpected failures""" if not HAVE_BIN_LIBS: click.echo("missing required binary libs (lz4, msgpack)") return q = Queue('failed', connection=worker.connection) for i in q.get_job_ids(): j = q.job_class.fetch(i, connection=q.connection) click.echo("%s on %s" % (j.func_name, j.origin)) if not j.func_name.endswith('process_keyset'): click.echo("params %s %s" % (j._args, j._kwargs)) click.echo(j.exc_info)
[ "def", "failures", "(", ")", ":", "if", "not", "HAVE_BIN_LIBS", ":", "click", ".", "echo", "(", "\"missing required binary libs (lz4, msgpack)\"", ")", "return", "q", "=", "Queue", "(", "'failed'", ",", "connection", "=", "worker", ".", "connection", ")", "for", "i", "in", "q", ".", "get_job_ids", "(", ")", ":", "j", "=", "q", ".", "job_class", ".", "fetch", "(", "i", ",", "connection", "=", "q", ".", "connection", ")", "click", ".", "echo", "(", "\"%s on %s\"", "%", "(", "j", ".", "func_name", ",", "j", ".", "origin", ")", ")", "if", "not", "j", ".", "func_name", ".", "endswith", "(", "'process_keyset'", ")", ":", "click", ".", "echo", "(", "\"params %s %s\"", "%", "(", "j", ".", "_args", ",", "j", ".", "_kwargs", ")", ")", "click", ".", "echo", "(", "j", ".", "exc_info", ")" ]
Show any unexpected failures
[ "Show", "any", "unexpected", "failures" ]
python
train
38.230769
LuminosoInsight/wordfreq
wordfreq/__init__.py
https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/__init__.py#L87-L109
def available_languages(wordlist='best'): """ Given a wordlist name, return a dictionary of language codes to filenames, representing all the languages in which that wordlist is available. """ if wordlist == 'best': available = available_languages('small') available.update(available_languages('large')) return available elif wordlist == 'combined': logger.warning( "The 'combined' wordlists have been renamed to 'small'." ) wordlist = 'small' available = {} for path in DATA_PATH.glob('*.msgpack.gz'): if not path.name.startswith('_'): list_name = path.name.split('.')[0] name, lang = list_name.split('_') if name == wordlist: available[lang] = str(path) return available
[ "def", "available_languages", "(", "wordlist", "=", "'best'", ")", ":", "if", "wordlist", "==", "'best'", ":", "available", "=", "available_languages", "(", "'small'", ")", "available", ".", "update", "(", "available_languages", "(", "'large'", ")", ")", "return", "available", "elif", "wordlist", "==", "'combined'", ":", "logger", ".", "warning", "(", "\"The 'combined' wordlists have been renamed to 'small'.\"", ")", "wordlist", "=", "'small'", "available", "=", "{", "}", "for", "path", "in", "DATA_PATH", ".", "glob", "(", "'*.msgpack.gz'", ")", ":", "if", "not", "path", ".", "name", ".", "startswith", "(", "'_'", ")", ":", "list_name", "=", "path", ".", "name", ".", "split", "(", "'.'", ")", "[", "0", "]", "name", ",", "lang", "=", "list_name", ".", "split", "(", "'_'", ")", "if", "name", "==", "wordlist", ":", "available", "[", "lang", "]", "=", "str", "(", "path", ")", "return", "available" ]
Given a wordlist name, return a dictionary of language codes to filenames, representing all the languages in which that wordlist is available.
[ "Given", "a", "wordlist", "name", "return", "a", "dictionary", "of", "language", "codes", "to", "filenames", "representing", "all", "the", "languages", "in", "which", "that", "wordlist", "is", "available", "." ]
python
train
35.086957
niolabs/python-xbee
xbee/helpers/dispatch/dispatch.py
https://github.com/niolabs/python-xbee/blob/b91be3d0ee7ccaa1990120b5b5490999d8e6cbc7/xbee/helpers/dispatch/dispatch.py#L26-L48
def register(self, name, callback, filter): """ register: string, function: string, data -> None, function: data -> boolean -> None Register will save the given name, callback, and filter function for use when a packet arrives. When one arrives, the filter function will be called to determine whether to call its associated callback function. If the filter method returns true, the callback method will be called with its associated name string and the packet which triggered the call. """ if name in self.names: raise ValueError("A callback has already been registered with \ the name '%s'" % name) self.handlers.append({ 'name': name, 'callback': callback, 'filter': filter }) self.names.add(name)
[ "def", "register", "(", "self", ",", "name", ",", "callback", ",", "filter", ")", ":", "if", "name", "in", "self", ".", "names", ":", "raise", "ValueError", "(", "\"A callback has already been registered with \\\n the name '%s'\"", "%", "name", ")", "self", ".", "handlers", ".", "append", "(", "{", "'name'", ":", "name", ",", "'callback'", ":", "callback", ",", "'filter'", ":", "filter", "}", ")", "self", ".", "names", ".", "add", "(", "name", ")" ]
register: string, function: string, data -> None, function: data -> boolean -> None Register will save the given name, callback, and filter function for use when a packet arrives. When one arrives, the filter function will be called to determine whether to call its associated callback function. If the filter method returns true, the callback method will be called with its associated name string and the packet which triggered the call.
[ "register", ":", "string", "function", ":", "string", "data", "-", ">", "None", "function", ":", "data", "-", ">", "boolean", "-", ">", "None" ]
python
train
37.826087
hydraplatform/hydra-base
hydra_base/lib/network.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/network.py#L954-L984
def _get_scenarios(network_id, include_data, user_id, scenario_ids=None): """ Get all the scenarios in a network """ scen_qry = db.DBSession.query(Scenario).filter( Scenario.network_id == network_id).options( noload('network')).filter( Scenario.status == 'A') if scenario_ids: logging.info("Filtering by scenario_ids %s",scenario_ids) scen_qry = scen_qry.filter(Scenario.id.in_(scenario_ids)) extras = {'resourcescenarios': [], 'resourcegroupitems': []} scens = [JSONObject(s,extras=extras) for s in db.DBSession.execute(scen_qry.statement).fetchall()] all_resource_group_items = _get_all_group_items(network_id) if include_data == 'Y' or include_data == True: all_rs = _get_all_resourcescenarios(network_id, user_id) metadata = _get_metadata(network_id, user_id) for s in scens: s.resourcegroupitems = all_resource_group_items.get(s.id, []) if include_data == 'Y' or include_data == True: s.resourcescenarios = all_rs.get(s.id, []) for rs in s.resourcescenarios: rs.dataset.metadata = metadata.get(rs.dataset_id, {}) return scens
[ "def", "_get_scenarios", "(", "network_id", ",", "include_data", ",", "user_id", ",", "scenario_ids", "=", "None", ")", ":", "scen_qry", "=", "db", ".", "DBSession", ".", "query", "(", "Scenario", ")", ".", "filter", "(", "Scenario", ".", "network_id", "==", "network_id", ")", ".", "options", "(", "noload", "(", "'network'", ")", ")", ".", "filter", "(", "Scenario", ".", "status", "==", "'A'", ")", "if", "scenario_ids", ":", "logging", ".", "info", "(", "\"Filtering by scenario_ids %s\"", ",", "scenario_ids", ")", "scen_qry", "=", "scen_qry", ".", "filter", "(", "Scenario", ".", "id", ".", "in_", "(", "scenario_ids", ")", ")", "extras", "=", "{", "'resourcescenarios'", ":", "[", "]", ",", "'resourcegroupitems'", ":", "[", "]", "}", "scens", "=", "[", "JSONObject", "(", "s", ",", "extras", "=", "extras", ")", "for", "s", "in", "db", ".", "DBSession", ".", "execute", "(", "scen_qry", ".", "statement", ")", ".", "fetchall", "(", ")", "]", "all_resource_group_items", "=", "_get_all_group_items", "(", "network_id", ")", "if", "include_data", "==", "'Y'", "or", "include_data", "==", "True", ":", "all_rs", "=", "_get_all_resourcescenarios", "(", "network_id", ",", "user_id", ")", "metadata", "=", "_get_metadata", "(", "network_id", ",", "user_id", ")", "for", "s", "in", "scens", ":", "s", ".", "resourcegroupitems", "=", "all_resource_group_items", ".", "get", "(", "s", ".", "id", ",", "[", "]", ")", "if", "include_data", "==", "'Y'", "or", "include_data", "==", "True", ":", "s", ".", "resourcescenarios", "=", "all_rs", ".", "get", "(", "s", ".", "id", ",", "[", "]", ")", "for", "rs", "in", "s", ".", "resourcescenarios", ":", "rs", ".", "dataset", ".", "metadata", "=", "metadata", ".", "get", "(", "rs", ".", "dataset_id", ",", "{", "}", ")", "return", "scens" ]
Get all the scenarios in a network
[ "Get", "all", "the", "scenarios", "in", "a", "network" ]
python
train
39.096774
3DLIRIOUS/MeshLabXML
meshlabxml/delete.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/delete.py#L53-L74
def small_parts(script, ratio=0.2, non_closed_only=False): """ Select & delete the small disconnected parts (components) of a mesh. Args: script: the FilterScript object or script filename to write the filter to. ratio (float): This ratio (between 0 and 1) defines the meaning of 'small' as the threshold ratio between the number of faces of the largest component and the other ones. A larger value will select more components. non_closed_only (bool): Select only non-closed components. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ select.small_parts(script, ratio, non_closed_only) selected(script) return None
[ "def", "small_parts", "(", "script", ",", "ratio", "=", "0.2", ",", "non_closed_only", "=", "False", ")", ":", "select", ".", "small_parts", "(", "script", ",", "ratio", ",", "non_closed_only", ")", "selected", "(", "script", ")", "return", "None" ]
Select & delete the small disconnected parts (components) of a mesh. Args: script: the FilterScript object or script filename to write the filter to. ratio (float): This ratio (between 0 and 1) defines the meaning of 'small' as the threshold ratio between the number of faces of the largest component and the other ones. A larger value will select more components. non_closed_only (bool): Select only non-closed components. Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
[ "Select", "&", "delete", "the", "small", "disconnected", "parts", "(", "components", ")", "of", "a", "mesh", "." ]
python
test
33.636364
bierschenk/ode
examples/example_2d_orbit.py
https://github.com/bierschenk/ode/blob/01fb714874926f0988a4bb250d2a0c8a2429e4f0/examples/example_2d_orbit.py#L7-L76
def dx_orbit_sys(t, X): '''X = [ m1x, m1y, m2x, m2y, m3x, m3y, m4x, m4y, m1vx, m1vy, m2vx, m2vy, m3vx, m3vy, m4vx, m4vy ] ''' (m1x, m1y, m2x, m2y, m3x, m3y, m4x, m4y, m1vx, m1vy, m2vx, m2vy, m3vx, m3vy, m4vx, m4vy) = X m_moon1 = 7.342*(10**22) # kg m_moon2 = 7.342*(10**22) # kg m_moon3 = 7.342*(10**22) # kg m_moon4 = 7.342*(10**22) # kg G = 6.67408*(10**-11) # m**3 kg**−1 s**−2 dm12 = sqrt((m1x - m2x)**2 + (m1y - m2y)**2) dm13 = sqrt((m1x - m3x)**2 + (m1y - m3y)**2) dm14 = sqrt((m1x - m4x)**2 + (m1y - m4y)**2) dm23 = sqrt((m2x - m3x)**2 + (m2y - m3y)**2) dm24 = sqrt((m2x - m4x)**2 + (m2y - m4y)**2) dm34 = sqrt((m3x - m4x)**2 + (m3y - m4y)**2) f12 = G * m_moon1 * m_moon2 / (dm12 * dm12) f13 = G * m_moon1 * m_moon3 / (dm13 * dm13) f14 = G * m_moon1 * m_moon4 / (dm14 * dm14) f23 = G * m_moon2 * m_moon3 / (dm23 * dm23) f24 = G * m_moon2 * m_moon4 / (dm24 * dm24) f34 = G * m_moon3 * m_moon4 / (dm34 * dm34) dr12 = atan2(m2y - m1y, m2x - m1x) dr13 = atan2(m3y - m1y, m3x - m1x) dr14 = atan2(m4y - m1y, m4x - m1x) dr23 = atan2(m3y - m2y, m3x - m2x) dr24 = atan2(m4y - m2y, m4x - m2x) dr34 = atan2(m4y - m3y, m4x - m3x) f1x = f12 * cos(dr12) + f13 * cos(dr13) + f14 * cos(dr14) f1y = f12 * sin(dr12) + f13 * sin(dr13) + f14 * sin(dr14) f2x = f12 * cos(dr12 + pi) + f23 * cos(dr23) + f24 * cos(dr24) f2y = f12 * sin(dr12 + pi) + f23 * sin(dr23) + f24 * sin(dr24) f3x = f13 * cos(dr13 + pi) + f23 * cos(dr23 + pi) + f34 * cos(dr34) f3y = f13 * sin(dr13 + pi) + f23 * sin(dr23 + pi) + f34 * sin(dr34) f4x = f14 * cos(dr14 + pi) + f24 * cos(dr24 + pi) + f34 * cos(dr34 + pi) f4y = f14 * sin(dr14 + pi) + f24 * sin(dr24 + pi) + f34 * sin(dr34 + pi) dX = [ m1vx, m1vy, m2vx, m2vy, m3vx, m3vy, m4vx, m4vy, f1x / m_moon1, f1y / m_moon1, f2x / m_moon2, f2y / m_moon2, f3x / m_moon3, f3y / m_moon3, f4x / m_moon4, f4y / m_moon4, ] return dX
[ "def", "dx_orbit_sys", "(", "t", ",", "X", ")", ":", "(", "m1x", ",", "m1y", ",", "m2x", ",", "m2y", ",", "m3x", ",", "m3y", ",", "m4x", ",", "m4y", ",", "m1vx", ",", "m1vy", ",", "m2vx", ",", "m2vy", ",", "m3vx", ",", "m3vy", ",", "m4vx", ",", "m4vy", ")", "=", "X", "m_moon1", "=", "7.342", "*", "(", "10", "**", "22", ")", "# kg", "m_moon2", "=", "7.342", "*", "(", "10", "**", "22", ")", "# kg", "m_moon3", "=", "7.342", "*", "(", "10", "**", "22", ")", "# kg", "m_moon4", "=", "7.342", "*", "(", "10", "**", "22", ")", "# kg", "G", "=", "6.67408", "*", "(", "10", "**", "-", "11", ")", "# m**3 kg**−1 s**−2", "dm12", "=", "sqrt", "(", "(", "m1x", "-", "m2x", ")", "**", "2", "+", "(", "m1y", "-", "m2y", ")", "**", "2", ")", "dm13", "=", "sqrt", "(", "(", "m1x", "-", "m3x", ")", "**", "2", "+", "(", "m1y", "-", "m3y", ")", "**", "2", ")", "dm14", "=", "sqrt", "(", "(", "m1x", "-", "m4x", ")", "**", "2", "+", "(", "m1y", "-", "m4y", ")", "**", "2", ")", "dm23", "=", "sqrt", "(", "(", "m2x", "-", "m3x", ")", "**", "2", "+", "(", "m2y", "-", "m3y", ")", "**", "2", ")", "dm24", "=", "sqrt", "(", "(", "m2x", "-", "m4x", ")", "**", "2", "+", "(", "m2y", "-", "m4y", ")", "**", "2", ")", "dm34", "=", "sqrt", "(", "(", "m3x", "-", "m4x", ")", "**", "2", "+", "(", "m3y", "-", "m4y", ")", "**", "2", ")", "f12", "=", "G", "*", "m_moon1", "*", "m_moon2", "/", "(", "dm12", "*", "dm12", ")", "f13", "=", "G", "*", "m_moon1", "*", "m_moon3", "/", "(", "dm13", "*", "dm13", ")", "f14", "=", "G", "*", "m_moon1", "*", "m_moon4", "/", "(", "dm14", "*", "dm14", ")", "f23", "=", "G", "*", "m_moon2", "*", "m_moon3", "/", "(", "dm23", "*", "dm23", ")", "f24", "=", "G", "*", "m_moon2", "*", "m_moon4", "/", "(", "dm24", "*", "dm24", ")", "f34", "=", "G", "*", "m_moon3", "*", "m_moon4", "/", "(", "dm34", "*", "dm34", ")", "dr12", "=", "atan2", "(", "m2y", "-", "m1y", ",", "m2x", "-", "m1x", ")", "dr13", "=", "atan2", "(", "m3y", "-", "m1y", ",", "m3x", "-", "m1x", ")", "dr14", "=", "atan2", "(", "m4y", "-", "m1y", ",", "m4x", "-", "m1x", ")", "dr23", "=", "atan2", "(", "m3y", "-", "m2y", ",", "m3x", "-", "m2x", ")", "dr24", "=", "atan2", "(", "m4y", "-", "m2y", ",", "m4x", "-", "m2x", ")", "dr34", "=", "atan2", "(", "m4y", "-", "m3y", ",", "m4x", "-", "m3x", ")", "f1x", "=", "f12", "*", "cos", "(", "dr12", ")", "+", "f13", "*", "cos", "(", "dr13", ")", "+", "f14", "*", "cos", "(", "dr14", ")", "f1y", "=", "f12", "*", "sin", "(", "dr12", ")", "+", "f13", "*", "sin", "(", "dr13", ")", "+", "f14", "*", "sin", "(", "dr14", ")", "f2x", "=", "f12", "*", "cos", "(", "dr12", "+", "pi", ")", "+", "f23", "*", "cos", "(", "dr23", ")", "+", "f24", "*", "cos", "(", "dr24", ")", "f2y", "=", "f12", "*", "sin", "(", "dr12", "+", "pi", ")", "+", "f23", "*", "sin", "(", "dr23", ")", "+", "f24", "*", "sin", "(", "dr24", ")", "f3x", "=", "f13", "*", "cos", "(", "dr13", "+", "pi", ")", "+", "f23", "*", "cos", "(", "dr23", "+", "pi", ")", "+", "f34", "*", "cos", "(", "dr34", ")", "f3y", "=", "f13", "*", "sin", "(", "dr13", "+", "pi", ")", "+", "f23", "*", "sin", "(", "dr23", "+", "pi", ")", "+", "f34", "*", "sin", "(", "dr34", ")", "f4x", "=", "f14", "*", "cos", "(", "dr14", "+", "pi", ")", "+", "f24", "*", "cos", "(", "dr24", "+", "pi", ")", "+", "f34", "*", "cos", "(", "dr34", "+", "pi", ")", "f4y", "=", "f14", "*", "sin", "(", "dr14", "+", "pi", ")", "+", "f24", "*", "sin", "(", "dr24", "+", "pi", ")", "+", "f34", "*", "sin", "(", "dr34", "+", "pi", ")", "dX", "=", "[", "m1vx", ",", "m1vy", ",", "m2vx", ",", "m2vy", ",", "m3vx", ",", "m3vy", ",", "m4vx", ",", "m4vy", ",", "f1x", "/", "m_moon1", ",", "f1y", "/", "m_moon1", ",", "f2x", "/", "m_moon2", ",", "f2y", "/", "m_moon2", ",", "f3x", "/", "m_moon3", ",", "f3y", "/", "m_moon3", ",", "f4x", "/", "m_moon4", ",", "f4y", "/", "m_moon4", ",", "]", "return", "dX" ]
X = [ m1x, m1y, m2x, m2y, m3x, m3y, m4x, m4y, m1vx, m1vy, m2vx, m2vy, m3vx, m3vy, m4vx, m4vy ]
[ "X", "=", "[", "m1x", "m1y", "m2x", "m2y", "m3x", "m3y", "m4x", "m4y", "m1vx", "m1vy", "m2vx", "m2vy", "m3vx", "m3vy", "m4vx", "m4vy", "]" ]
python
train
31.285714
Capitains/Nautilus
capitains_nautilus/collections/sparql.py
https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/collections/sparql.py#L92-L104
def set_label(self, label, lang): """ Add the label of the collection in given lang :param label: Label Value :param lang: Language code """ try: self.metadata.add(SKOS.prefLabel, Literal(label, lang=lang)) self.graph.addN([ (self.asNode(), RDFS.label, Literal(label, lang=lang), self.graph), ]) except Exception as E: pass
[ "def", "set_label", "(", "self", ",", "label", ",", "lang", ")", ":", "try", ":", "self", ".", "metadata", ".", "add", "(", "SKOS", ".", "prefLabel", ",", "Literal", "(", "label", ",", "lang", "=", "lang", ")", ")", "self", ".", "graph", ".", "addN", "(", "[", "(", "self", ".", "asNode", "(", ")", ",", "RDFS", ".", "label", ",", "Literal", "(", "label", ",", "lang", "=", "lang", ")", ",", "self", ".", "graph", ")", ",", "]", ")", "except", "Exception", "as", "E", ":", "pass" ]
Add the label of the collection in given lang :param label: Label Value :param lang: Language code
[ "Add", "the", "label", "of", "the", "collection", "in", "given", "lang" ]
python
train
32.692308
datacats/datacats
datacats/template.py
https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/template.py#L12-L41
def ckan_extension_template(name, target): """ Create ckanext-(name) in target directory. """ setupdir = '{0}/ckanext-{1}theme'.format(target, name) extdir = setupdir + '/ckanext/{0}theme'.format(name) templatedir = extdir + '/templates/' staticdir = extdir + '/static/datacats' makedirs(templatedir + '/home/snippets') makedirs(staticdir) here = dirname(__file__) copyfile(here + '/images/chart.png', staticdir + '/chart.png') copyfile(here + '/images/datacats-footer.png', staticdir + '/datacats-footer.png') filecontents = [ (setupdir + '/setup.py', SETUP_PY), (setupdir + '/.gitignore', DOT_GITIGNORE), (setupdir + '/ckanext/__init__.py', NAMESPACE_PACKAGE), (extdir + '/__init__.py', ''), (extdir + '/plugins.py', PLUGINS_PY), (templatedir + '/home/snippets/promoted.html', PROMOTED_SNIPPET), (templatedir + '/footer.html', FOOTER_HTML), ] for filename, content in filecontents: with open(filename, 'w') as f: f.write(content.replace('##name##', name))
[ "def", "ckan_extension_template", "(", "name", ",", "target", ")", ":", "setupdir", "=", "'{0}/ckanext-{1}theme'", ".", "format", "(", "target", ",", "name", ")", "extdir", "=", "setupdir", "+", "'/ckanext/{0}theme'", ".", "format", "(", "name", ")", "templatedir", "=", "extdir", "+", "'/templates/'", "staticdir", "=", "extdir", "+", "'/static/datacats'", "makedirs", "(", "templatedir", "+", "'/home/snippets'", ")", "makedirs", "(", "staticdir", ")", "here", "=", "dirname", "(", "__file__", ")", "copyfile", "(", "here", "+", "'/images/chart.png'", ",", "staticdir", "+", "'/chart.png'", ")", "copyfile", "(", "here", "+", "'/images/datacats-footer.png'", ",", "staticdir", "+", "'/datacats-footer.png'", ")", "filecontents", "=", "[", "(", "setupdir", "+", "'/setup.py'", ",", "SETUP_PY", ")", ",", "(", "setupdir", "+", "'/.gitignore'", ",", "DOT_GITIGNORE", ")", ",", "(", "setupdir", "+", "'/ckanext/__init__.py'", ",", "NAMESPACE_PACKAGE", ")", ",", "(", "extdir", "+", "'/__init__.py'", ",", "''", ")", ",", "(", "extdir", "+", "'/plugins.py'", ",", "PLUGINS_PY", ")", ",", "(", "templatedir", "+", "'/home/snippets/promoted.html'", ",", "PROMOTED_SNIPPET", ")", ",", "(", "templatedir", "+", "'/footer.html'", ",", "FOOTER_HTML", ")", ",", "]", "for", "filename", ",", "content", "in", "filecontents", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "content", ".", "replace", "(", "'##name##'", ",", "name", ")", ")" ]
Create ckanext-(name) in target directory.
[ "Create", "ckanext", "-", "(", "name", ")", "in", "target", "directory", "." ]
python
train
36
dw/mitogen
mitogen/core.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L3034-L3043
def shutdown(self): """ Request broker gracefully disconnect streams and stop. Safe to call from any thread. """ _v and LOG.debug('%r.shutdown()', self) def _shutdown(): self._alive = False if self._alive and not self._exitted: self.defer(_shutdown)
[ "def", "shutdown", "(", "self", ")", ":", "_v", "and", "LOG", ".", "debug", "(", "'%r.shutdown()'", ",", "self", ")", "def", "_shutdown", "(", ")", ":", "self", ".", "_alive", "=", "False", "if", "self", ".", "_alive", "and", "not", "self", ".", "_exitted", ":", "self", ".", "defer", "(", "_shutdown", ")" ]
Request broker gracefully disconnect streams and stop. Safe to call from any thread.
[ "Request", "broker", "gracefully", "disconnect", "streams", "and", "stop", ".", "Safe", "to", "call", "from", "any", "thread", "." ]
python
train
32
farzadghanei/statsd-metrics
statsdmetrics/client/__init__.py
https://github.com/farzadghanei/statsd-metrics/blob/153ff37b79777f208e49bb9d3fb737ba52b99f98/statsdmetrics/client/__init__.py#L74-L88
def remove_client(self, client): # type: (object) -> None """Remove the client from the users of the socket. If there are no more clients for the socket, it will close automatically. """ try: self._clients.remove(id(client)) except ValueError: pass if len(self._clients) < 1: self.close()
[ "def", "remove_client", "(", "self", ",", "client", ")", ":", "# type: (object) -> None", "try", ":", "self", ".", "_clients", ".", "remove", "(", "id", "(", "client", ")", ")", "except", "ValueError", ":", "pass", "if", "len", "(", "self", ".", "_clients", ")", "<", "1", ":", "self", ".", "close", "(", ")" ]
Remove the client from the users of the socket. If there are no more clients for the socket, it will close automatically.
[ "Remove", "the", "client", "from", "the", "users", "of", "the", "socket", "." ]
python
test
25.133333
kytos/kytos-utils
kytos/utils/napps.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/napps.py#L69-L83
def __require_kytos_config(self): """Set path locations from kytosd API. It should not be called directly, but from properties that require a running kytosd instance. """ if self.__enabled is None: uri = self._kytos_api + 'api/kytos/core/config/' try: options = json.loads(urllib.request.urlopen(uri).read()) except urllib.error.URLError: print('Kytos is not running.') sys.exit() self.__enabled = Path(options.get('napps')) self.__installed = Path(options.get('installed_napps'))
[ "def", "__require_kytos_config", "(", "self", ")", ":", "if", "self", ".", "__enabled", "is", "None", ":", "uri", "=", "self", ".", "_kytos_api", "+", "'api/kytos/core/config/'", "try", ":", "options", "=", "json", ".", "loads", "(", "urllib", ".", "request", ".", "urlopen", "(", "uri", ")", ".", "read", "(", ")", ")", "except", "urllib", ".", "error", ".", "URLError", ":", "print", "(", "'Kytos is not running.'", ")", "sys", ".", "exit", "(", ")", "self", ".", "__enabled", "=", "Path", "(", "options", ".", "get", "(", "'napps'", ")", ")", "self", ".", "__installed", "=", "Path", "(", "options", ".", "get", "(", "'installed_napps'", ")", ")" ]
Set path locations from kytosd API. It should not be called directly, but from properties that require a running kytosd instance.
[ "Set", "path", "locations", "from", "kytosd", "API", "." ]
python
train
41
UCL-INGI/INGInious
inginious/frontend/pages/tasks.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/tasks.py#L40-L83
def set_selected_submission(self, course, task, submissionid): """ Set submission whose id is `submissionid` to selected grading submission for the given course/task. Returns a boolean indicating whether the operation was successful or not. """ submission = self.submission_manager.get_submission(submissionid) # Do not continue if submission does not exist or is not owned by current user if not submission: return False # Check if the submission if from this task/course! if submission["taskid"] != task.get_id() or submission["courseid"] != course.get_id(): return False is_staff = self.user_manager.has_staff_rights_on_course(course, self.user_manager.session_username()) # Do not enable submission selection after deadline if not task.get_accessible_time().is_open() and not is_staff: return False # Only allow to set submission if the student must choose their best submission themselves if task.get_evaluate() != 'student' and not is_staff: return False # Check if task is done per group/team if task.is_group_task() and not is_staff: group = self.database.aggregations.find_one( {"courseid": task.get_course_id(), "groups.students": self.user_manager.session_username()}, {"groups": {"$elemMatch": {"students": self.user_manager.session_username()}}}) students = group["groups"][0]["students"] else: students = [self.user_manager.session_username()] # Check if group/team is the same if students == submission["username"]: self.database.user_tasks.update_many( {"courseid": task.get_course_id(), "taskid": task.get_id(), "username": {"$in": students}}, {"$set": {"submissionid": submission['_id'], "grade": submission['grade'], "succeeded": submission["result"] == "success"}}) return True else: return False
[ "def", "set_selected_submission", "(", "self", ",", "course", ",", "task", ",", "submissionid", ")", ":", "submission", "=", "self", ".", "submission_manager", ".", "get_submission", "(", "submissionid", ")", "# Do not continue if submission does not exist or is not owned by current user", "if", "not", "submission", ":", "return", "False", "# Check if the submission if from this task/course!", "if", "submission", "[", "\"taskid\"", "]", "!=", "task", ".", "get_id", "(", ")", "or", "submission", "[", "\"courseid\"", "]", "!=", "course", ".", "get_id", "(", ")", ":", "return", "False", "is_staff", "=", "self", ".", "user_manager", ".", "has_staff_rights_on_course", "(", "course", ",", "self", ".", "user_manager", ".", "session_username", "(", ")", ")", "# Do not enable submission selection after deadline", "if", "not", "task", ".", "get_accessible_time", "(", ")", ".", "is_open", "(", ")", "and", "not", "is_staff", ":", "return", "False", "# Only allow to set submission if the student must choose their best submission themselves", "if", "task", ".", "get_evaluate", "(", ")", "!=", "'student'", "and", "not", "is_staff", ":", "return", "False", "# Check if task is done per group/team", "if", "task", ".", "is_group_task", "(", ")", "and", "not", "is_staff", ":", "group", "=", "self", ".", "database", ".", "aggregations", ".", "find_one", "(", "{", "\"courseid\"", ":", "task", ".", "get_course_id", "(", ")", ",", "\"groups.students\"", ":", "self", ".", "user_manager", ".", "session_username", "(", ")", "}", ",", "{", "\"groups\"", ":", "{", "\"$elemMatch\"", ":", "{", "\"students\"", ":", "self", ".", "user_manager", ".", "session_username", "(", ")", "}", "}", "}", ")", "students", "=", "group", "[", "\"groups\"", "]", "[", "0", "]", "[", "\"students\"", "]", "else", ":", "students", "=", "[", "self", ".", "user_manager", ".", "session_username", "(", ")", "]", "# Check if group/team is the same", "if", "students", "==", "submission", "[", "\"username\"", "]", ":", "self", ".", "database", ".", "user_tasks", ".", "update_many", "(", "{", "\"courseid\"", ":", "task", ".", "get_course_id", "(", ")", ",", "\"taskid\"", ":", "task", ".", "get_id", "(", ")", ",", "\"username\"", ":", "{", "\"$in\"", ":", "students", "}", "}", ",", "{", "\"$set\"", ":", "{", "\"submissionid\"", ":", "submission", "[", "'_id'", "]", ",", "\"grade\"", ":", "submission", "[", "'grade'", "]", ",", "\"succeeded\"", ":", "submission", "[", "\"result\"", "]", "==", "\"success\"", "}", "}", ")", "return", "True", "else", ":", "return", "False" ]
Set submission whose id is `submissionid` to selected grading submission for the given course/task. Returns a boolean indicating whether the operation was successful or not.
[ "Set", "submission", "whose", "id", "is", "submissionid", "to", "selected", "grading", "submission", "for", "the", "given", "course", "/", "task", ".", "Returns", "a", "boolean", "indicating", "whether", "the", "operation", "was", "successful", "or", "not", "." ]
python
train
47.113636
nickmckay/LiPD-utilities
Python/lipd/misc.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L250-L262
def get_dsn(d): """ Get the dataset name from a record :param dict d: Metadata :return str: Dataset name """ try: return d["dataSetName"] except Exception as e: logger_misc.warn("get_dsn: Exception: No datasetname found, unable to continue: {}".format(e)) exit(1)
[ "def", "get_dsn", "(", "d", ")", ":", "try", ":", "return", "d", "[", "\"dataSetName\"", "]", "except", "Exception", "as", "e", ":", "logger_misc", ".", "warn", "(", "\"get_dsn: Exception: No datasetname found, unable to continue: {}\"", ".", "format", "(", "e", ")", ")", "exit", "(", "1", ")" ]
Get the dataset name from a record :param dict d: Metadata :return str: Dataset name
[ "Get", "the", "dataset", "name", "from", "a", "record" ]
python
train
23.461538
KelSolaar/Umbra
umbra/ui/common.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/common.py#L264-L287
def signals_blocker(instance, attribute, *args, **kwargs): """ Blocks given instance signals before calling the given attribute with \ given arguments and then unblocks the signals. :param instance: Instance object. :type instance: QObject :param attribute: Attribute to call. :type attribute: QObject :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* :return: Object. :rtype: object """ value = None try: hasattr(instance, "blockSignals") and instance.blockSignals(True) value = attribute(*args, **kwargs) finally: hasattr(instance, "blockSignals") and instance.blockSignals(False) return value
[ "def", "signals_blocker", "(", "instance", ",", "attribute", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", "=", "None", "try", ":", "hasattr", "(", "instance", ",", "\"blockSignals\"", ")", "and", "instance", ".", "blockSignals", "(", "True", ")", "value", "=", "attribute", "(", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "hasattr", "(", "instance", ",", "\"blockSignals\"", ")", "and", "instance", ".", "blockSignals", "(", "False", ")", "return", "value" ]
Blocks given instance signals before calling the given attribute with \ given arguments and then unblocks the signals. :param instance: Instance object. :type instance: QObject :param attribute: Attribute to call. :type attribute: QObject :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* :return: Object. :rtype: object
[ "Blocks", "given", "instance", "signals", "before", "calling", "the", "given", "attribute", "with", "\\", "given", "arguments", "and", "then", "unblocks", "the", "signals", "." ]
python
train
30.375
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L266-L276
def get_labels(obj): """ Retrieve the labels of a clustering.rst object :param obj: the clustering.rst object :return: the resulting labels """ if Clustering.is_pyclustering_instance(obj.model): return obj._labels_from_pyclusters else: return obj.model.labels_
[ "def", "get_labels", "(", "obj", ")", ":", "if", "Clustering", ".", "is_pyclustering_instance", "(", "obj", ".", "model", ")", ":", "return", "obj", ".", "_labels_from_pyclusters", "else", ":", "return", "obj", ".", "model", ".", "labels_" ]
Retrieve the labels of a clustering.rst object :param obj: the clustering.rst object :return: the resulting labels
[ "Retrieve", "the", "labels", "of", "a", "clustering", ".", "rst", "object" ]
python
train
30.090909
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L178-L182
def list_orgs(self): """ list the orgs configured in the keychain """ orgs = list(self.orgs.keys()) orgs.sort() return orgs
[ "def", "list_orgs", "(", "self", ")", ":", "orgs", "=", "list", "(", "self", ".", "orgs", ".", "keys", "(", ")", ")", "orgs", ".", "sort", "(", ")", "return", "orgs" ]
list the orgs configured in the keychain
[ "list", "the", "orgs", "configured", "in", "the", "keychain" ]
python
train
30.2
saltstack/salt
salt/modules/slsutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/slsutil.py#L37-L56
def merge(obj_a, obj_b, strategy='smart', renderer='yaml', merge_lists=False): ''' Merge a data structure into another by choosing a merge strategy Strategies: * aggregate * list * overwrite * recurse * smart CLI Example: .. code-block:: shell salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}' ''' return salt.utils.dictupdate.merge(obj_a, obj_b, strategy, renderer, merge_lists)
[ "def", "merge", "(", "obj_a", ",", "obj_b", ",", "strategy", "=", "'smart'", ",", "renderer", "=", "'yaml'", ",", "merge_lists", "=", "False", ")", ":", "return", "salt", ".", "utils", ".", "dictupdate", ".", "merge", "(", "obj_a", ",", "obj_b", ",", "strategy", ",", "renderer", ",", "merge_lists", ")" ]
Merge a data structure into another by choosing a merge strategy Strategies: * aggregate * list * overwrite * recurse * smart CLI Example: .. code-block:: shell salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
[ "Merge", "a", "data", "structure", "into", "another", "by", "choosing", "a", "merge", "strategy" ]
python
train
21.6
mbedmicro/pyOCD
pyocd/flash/flash.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/flash/flash.py#L389-L402
def load_page_buffer(self, buffer_number, address, bytes): """! @brief Load data to a numbered page buffer. This method is used in conjunction with start_program_page_with_buffer() to implement double buffered programming. """ assert buffer_number < len(self.page_buffers), "Invalid buffer number" # prevent security settings from locking the device bytes = self.override_security_bits(address, bytes) # transfer the buffer to device RAM self.target.write_memory_block8(self.page_buffers[buffer_number], bytes)
[ "def", "load_page_buffer", "(", "self", ",", "buffer_number", ",", "address", ",", "bytes", ")", ":", "assert", "buffer_number", "<", "len", "(", "self", ".", "page_buffers", ")", ",", "\"Invalid buffer number\"", "# prevent security settings from locking the device", "bytes", "=", "self", ".", "override_security_bits", "(", "address", ",", "bytes", ")", "# transfer the buffer to device RAM", "self", ".", "target", ".", "write_memory_block8", "(", "self", ".", "page_buffers", "[", "buffer_number", "]", ",", "bytes", ")" ]
! @brief Load data to a numbered page buffer. This method is used in conjunction with start_program_page_with_buffer() to implement double buffered programming.
[ "!" ]
python
train
42
dmlc/gluon-nlp
src/gluonnlp/model/bert.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/bert.py#L364-L368
def _get_classifier(self, prefix): """ Construct a decoder for the next sentence prediction task """ with self.name_scope(): classifier = nn.Dense(2, prefix=prefix) return classifier
[ "def", "_get_classifier", "(", "self", ",", "prefix", ")", ":", "with", "self", ".", "name_scope", "(", ")", ":", "classifier", "=", "nn", ".", "Dense", "(", "2", ",", "prefix", "=", "prefix", ")", "return", "classifier" ]
Construct a decoder for the next sentence prediction task
[ "Construct", "a", "decoder", "for", "the", "next", "sentence", "prediction", "task" ]
python
train
42.8
thiagopbueno/pyrddl
pyrddl/utils.py
https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/utils.py#L32-L44
def rename_state_fluent(name: str) -> str: '''Returns current state fluent canonical name. Args: name (str): The next state fluent name. Returns: str: The current state fluent name. ''' i = name.index('/') functor = name[:i] arity = name[i+1:] return "{}'/{}".format(functor, arity)
[ "def", "rename_state_fluent", "(", "name", ":", "str", ")", "->", "str", ":", "i", "=", "name", ".", "index", "(", "'/'", ")", "functor", "=", "name", "[", ":", "i", "]", "arity", "=", "name", "[", "i", "+", "1", ":", "]", "return", "\"{}'/{}\"", ".", "format", "(", "functor", ",", "arity", ")" ]
Returns current state fluent canonical name. Args: name (str): The next state fluent name. Returns: str: The current state fluent name.
[ "Returns", "current", "state", "fluent", "canonical", "name", "." ]
python
train
24.615385
yyuu/botornado
boto/s3/bucket.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/s3/bucket.py#L464-L557
def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None): """ Deletes a set of keys using S3's Multi-object delete API. If a VersionID is specified for that key then that version is removed. Returns a MultiDeleteResult Object, which contains Deleted and Error elements for each key you ask to delete. :type keys: list :param keys: A list of either key_names or (key_name, versionid) pairs or a list of Key instances. :type quiet: boolean :param quiet: In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body. :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required anytime you are deleting versioned objects from a bucket that has the MFADelete option on the bucket. :returns: An instance of MultiDeleteResult """ ikeys = iter(keys) result = MultiDeleteResult(self) provider = self.connection.provider query_args = 'delete' def delete_keys2(hdrs): hdrs = hdrs or {} data = u"""<?xml version="1.0" encoding="UTF-8"?>""" data += u"<Delete>" if quiet: data += u"<Quiet>true</Quiet>" count = 0 while count < 1000: try: key = ikeys.next() except StopIteration: break if isinstance(key, basestring): key_name = key version_id = None elif isinstance(key, tuple) and len(key) == 2: key_name, version_id = key elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name: key_name = key.name version_id = key.version_id else: if isinstance(key, Prefix): key_name = key.name code = 'PrefixSkipped' # Don't delete Prefix else: key_name = repr(key) # try get a string code = 'InvalidArgument' # other unknown type message = 'Invalid. No delete action taken for this object.' error = Error(key_name, code=code, message=message) result.errors.append(error) continue count += 1 #key_name = key_name.decode('utf-8') data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_name) if version_id: data += u"<VersionId>%s</VersionId>" % version_id data += u"</Object>" data += u"</Delete>" if count <= 0: return False # no more data = data.encode('utf-8') fp = StringIO.StringIO(data) md5 = boto.utils.compute_md5(fp) hdrs['Content-MD5'] = md5[1] hdrs['Content-Type'] = 'text/xml' if mfa_token: hdrs[provider.mfa_header] = ' '.join(mfa_token) response = self.connection.make_request('POST', self.name, headers=hdrs, query_args=query_args, data=data) body = response.read() if response.status == 200: h = handler.XmlHandler(result, self) xml.sax.parseString(body, h) return count >= 1000 # more? else: raise provider.storage_response_error(response.status, response.reason, body) while delete_keys2(headers): pass return result
[ "def", "delete_keys", "(", "self", ",", "keys", ",", "quiet", "=", "False", ",", "mfa_token", "=", "None", ",", "headers", "=", "None", ")", ":", "ikeys", "=", "iter", "(", "keys", ")", "result", "=", "MultiDeleteResult", "(", "self", ")", "provider", "=", "self", ".", "connection", ".", "provider", "query_args", "=", "'delete'", "def", "delete_keys2", "(", "hdrs", ")", ":", "hdrs", "=", "hdrs", "or", "{", "}", "data", "=", "u\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\"\"\"", "data", "+=", "u\"<Delete>\"", "if", "quiet", ":", "data", "+=", "u\"<Quiet>true</Quiet>\"", "count", "=", "0", "while", "count", "<", "1000", ":", "try", ":", "key", "=", "ikeys", ".", "next", "(", ")", "except", "StopIteration", ":", "break", "if", "isinstance", "(", "key", ",", "basestring", ")", ":", "key_name", "=", "key", "version_id", "=", "None", "elif", "isinstance", "(", "key", ",", "tuple", ")", "and", "len", "(", "key", ")", "==", "2", ":", "key_name", ",", "version_id", "=", "key", "elif", "(", "isinstance", "(", "key", ",", "Key", ")", "or", "isinstance", "(", "key", ",", "DeleteMarker", ")", ")", "and", "key", ".", "name", ":", "key_name", "=", "key", ".", "name", "version_id", "=", "key", ".", "version_id", "else", ":", "if", "isinstance", "(", "key", ",", "Prefix", ")", ":", "key_name", "=", "key", ".", "name", "code", "=", "'PrefixSkipped'", "# Don't delete Prefix", "else", ":", "key_name", "=", "repr", "(", "key", ")", "# try get a string", "code", "=", "'InvalidArgument'", "# other unknown type", "message", "=", "'Invalid. No delete action taken for this object.'", "error", "=", "Error", "(", "key_name", ",", "code", "=", "code", ",", "message", "=", "message", ")", "result", ".", "errors", ".", "append", "(", "error", ")", "continue", "count", "+=", "1", "#key_name = key_name.decode('utf-8')", "data", "+=", "u\"<Object><Key>%s</Key>\"", "%", "xml", ".", "sax", ".", "saxutils", ".", "escape", "(", "key_name", ")", "if", "version_id", ":", "data", "+=", "u\"<VersionId>%s</VersionId>\"", "%", "version_id", "data", "+=", "u\"</Object>\"", "data", "+=", "u\"</Delete>\"", "if", "count", "<=", "0", ":", "return", "False", "# no more", "data", "=", "data", ".", "encode", "(", "'utf-8'", ")", "fp", "=", "StringIO", ".", "StringIO", "(", "data", ")", "md5", "=", "boto", ".", "utils", ".", "compute_md5", "(", "fp", ")", "hdrs", "[", "'Content-MD5'", "]", "=", "md5", "[", "1", "]", "hdrs", "[", "'Content-Type'", "]", "=", "'text/xml'", "if", "mfa_token", ":", "hdrs", "[", "provider", ".", "mfa_header", "]", "=", "' '", ".", "join", "(", "mfa_token", ")", "response", "=", "self", ".", "connection", ".", "make_request", "(", "'POST'", ",", "self", ".", "name", ",", "headers", "=", "hdrs", ",", "query_args", "=", "query_args", ",", "data", "=", "data", ")", "body", "=", "response", ".", "read", "(", ")", "if", "response", ".", "status", "==", "200", ":", "h", "=", "handler", ".", "XmlHandler", "(", "result", ",", "self", ")", "xml", ".", "sax", ".", "parseString", "(", "body", ",", "h", ")", "return", "count", ">=", "1000", "# more?", "else", ":", "raise", "provider", ".", "storage_response_error", "(", "response", ".", "status", ",", "response", ".", "reason", ",", "body", ")", "while", "delete_keys2", "(", "headers", ")", ":", "pass", "return", "result" ]
Deletes a set of keys using S3's Multi-object delete API. If a VersionID is specified for that key then that version is removed. Returns a MultiDeleteResult Object, which contains Deleted and Error elements for each key you ask to delete. :type keys: list :param keys: A list of either key_names or (key_name, versionid) pairs or a list of Key instances. :type quiet: boolean :param quiet: In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body. :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required anytime you are deleting versioned objects from a bucket that has the MFADelete option on the bucket. :returns: An instance of MultiDeleteResult
[ "Deletes", "a", "set", "of", "keys", "using", "S3", "s", "Multi", "-", "object", "delete", "API", ".", "If", "a", "VersionID", "is", "specified", "for", "that", "key", "then", "that", "version", "is", "removed", ".", "Returns", "a", "MultiDeleteResult", "Object", "which", "contains", "Deleted", "and", "Error", "elements", "for", "each", "key", "you", "ask", "to", "delete", ".", ":", "type", "keys", ":", "list", ":", "param", "keys", ":", "A", "list", "of", "either", "key_names", "or", "(", "key_name", "versionid", ")", "pairs", "or", "a", "list", "of", "Key", "instances", "." ]
python
train
45.904255
LudovicRousseau/PyKCS11
PyKCS11/__init__.py
https://github.com/LudovicRousseau/PyKCS11/blob/76ccd8741af2ea193aaf1ca29dfedfa412c134fe/PyKCS11/__init__.py#L903-L916
def login(self, pin, user_type=CKU_USER): """ C_Login :param pin: the user's PIN or None for CKF_PROTECTED_AUTHENTICATION_PATH :type pin: string :param user_type: the user type. The default value is CKU_USER. You may also use CKU_SO :type user_type: integer """ pin1 = ckbytelist(pin) rv = self.lib.C_Login(self.session, user_type, pin1) if rv != CKR_OK: raise PyKCS11Error(rv)
[ "def", "login", "(", "self", ",", "pin", ",", "user_type", "=", "CKU_USER", ")", ":", "pin1", "=", "ckbytelist", "(", "pin", ")", "rv", "=", "self", ".", "lib", ".", "C_Login", "(", "self", ".", "session", ",", "user_type", ",", "pin1", ")", "if", "rv", "!=", "CKR_OK", ":", "raise", "PyKCS11Error", "(", "rv", ")" ]
C_Login :param pin: the user's PIN or None for CKF_PROTECTED_AUTHENTICATION_PATH :type pin: string :param user_type: the user type. The default value is CKU_USER. You may also use CKU_SO :type user_type: integer
[ "C_Login" ]
python
test
33.357143
dmbee/seglearn
seglearn/pipe.py
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L292-L308
def predict_proba(self, X): """ Apply transforms, and predict_proba of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_proba : array-like, shape = [n_samples, n_classes] Predicted probability of each class """ Xt, _, _ = self._transform(X) return self._final_estimator.predict_proba(Xt)
[ "def", "predict_proba", "(", "self", ",", "X", ")", ":", "Xt", ",", "_", ",", "_", "=", "self", ".", "_transform", "(", "X", ")", "return", "self", ".", "_final_estimator", ".", "predict_proba", "(", "Xt", ")" ]
Apply transforms, and predict_proba of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_proba : array-like, shape = [n_samples, n_classes] Predicted probability of each class
[ "Apply", "transforms", "and", "predict_proba", "of", "the", "final", "estimator" ]
python
train
29.647059
acorg/dark-matter
dark/blast/conversion.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/blast/conversion.py#L287-L326
def readAlignments(self, reads): """ Read lines of JSON from self._filename, convert them to read alignments and yield them. @param reads: An iterable of L{Read} instances, corresponding to the reads that were given to BLAST. @raise ValueError: If any of the lines in the file cannot be converted to JSON. @return: A generator that yields C{dark.alignments.ReadAlignments} instances. """ if self._fp is None: self._open(self._filename) reads = iter(reads) try: for lineNumber, line in enumerate(self._fp, start=2): try: record = loads(line[:-1]) except ValueError as e: raise ValueError( 'Could not convert line %d of %r to JSON (%s). ' 'Line is %r.' % (lineNumber, self._filename, e, line[:-1])) else: try: read = next(reads) except StopIteration: raise ValueError( 'Read generator failed to yield read number %d ' 'during parsing of BLAST file %r.' % (lineNumber - 1, self._filename)) else: alignments = self._dictToAlignments(record, read) yield ReadAlignments(read, alignments) finally: self._fp.close() self._fp = None
[ "def", "readAlignments", "(", "self", ",", "reads", ")", ":", "if", "self", ".", "_fp", "is", "None", ":", "self", ".", "_open", "(", "self", ".", "_filename", ")", "reads", "=", "iter", "(", "reads", ")", "try", ":", "for", "lineNumber", ",", "line", "in", "enumerate", "(", "self", ".", "_fp", ",", "start", "=", "2", ")", ":", "try", ":", "record", "=", "loads", "(", "line", "[", ":", "-", "1", "]", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "'Could not convert line %d of %r to JSON (%s). '", "'Line is %r.'", "%", "(", "lineNumber", ",", "self", ".", "_filename", ",", "e", ",", "line", "[", ":", "-", "1", "]", ")", ")", "else", ":", "try", ":", "read", "=", "next", "(", "reads", ")", "except", "StopIteration", ":", "raise", "ValueError", "(", "'Read generator failed to yield read number %d '", "'during parsing of BLAST file %r.'", "%", "(", "lineNumber", "-", "1", ",", "self", ".", "_filename", ")", ")", "else", ":", "alignments", "=", "self", ".", "_dictToAlignments", "(", "record", ",", "read", ")", "yield", "ReadAlignments", "(", "read", ",", "alignments", ")", "finally", ":", "self", ".", "_fp", ".", "close", "(", ")", "self", ".", "_fp", "=", "None" ]
Read lines of JSON from self._filename, convert them to read alignments and yield them. @param reads: An iterable of L{Read} instances, corresponding to the reads that were given to BLAST. @raise ValueError: If any of the lines in the file cannot be converted to JSON. @return: A generator that yields C{dark.alignments.ReadAlignments} instances.
[ "Read", "lines", "of", "JSON", "from", "self", ".", "_filename", "convert", "them", "to", "read", "alignments", "and", "yield", "them", "." ]
python
train
38.975
jwodder/javaproperties
javaproperties/xmlprops.py
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L88-L112
def dump_xml(props, fp, comment=None, encoding='UTF-8', sort_keys=False): """ Write a series ``props`` of key-value pairs to a binary filehandle ``fp`` in the format of an XML properties file. The file will include both an XML declaration and a doctype declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to write to ``fp``. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param fp: a file-like object to write the values of ``props`` to :type fp: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :return: `None` """ fp = codecs.lookup(encoding).streamwriter(fp, errors='xmlcharrefreplace') print('<?xml version="1.0" encoding={0} standalone="no"?>' .format(quoteattr(encoding)), file=fp) for s in _stream_xml(props, comment, sort_keys): print(s, file=fp)
[ "def", "dump_xml", "(", "props", ",", "fp", ",", "comment", "=", "None", ",", "encoding", "=", "'UTF-8'", ",", "sort_keys", "=", "False", ")", ":", "fp", "=", "codecs", ".", "lookup", "(", "encoding", ")", ".", "streamwriter", "(", "fp", ",", "errors", "=", "'xmlcharrefreplace'", ")", "print", "(", "'<?xml version=\"1.0\" encoding={0} standalone=\"no\"?>'", ".", "format", "(", "quoteattr", "(", "encoding", ")", ")", ",", "file", "=", "fp", ")", "for", "s", "in", "_stream_xml", "(", "props", ",", "comment", ",", "sort_keys", ")", ":", "print", "(", "s", ",", "file", "=", "fp", ")" ]
Write a series ``props`` of key-value pairs to a binary filehandle ``fp`` in the format of an XML properties file. The file will include both an XML declaration and a doctype declaration. :param props: A mapping or iterable of ``(key, value)`` pairs to write to ``fp``. All keys and values in ``props`` must be text strings. If ``sort_keys`` is `False`, the entries are output in iteration order. :param fp: a file-like object to write the values of ``props`` to :type fp: binary file-like object :param comment: if non-`None`, ``comment`` will be output as a ``<comment>`` element before the ``<entry>`` elements :type comment: text string or `None` :param string encoding: the name of the encoding to use for the XML document (also included in the XML declaration) :param bool sort_keys: if true, the elements of ``props`` are sorted lexicographically by key in the output :return: `None`
[ "Write", "a", "series", "props", "of", "key", "-", "value", "pairs", "to", "a", "binary", "filehandle", "fp", "in", "the", "format", "of", "an", "XML", "properties", "file", ".", "The", "file", "will", "include", "both", "an", "XML", "declaration", "and", "a", "doctype", "declaration", "." ]
python
train
52.52
neovim/pynvim
pynvim/api/buffer.py
https://github.com/neovim/pynvim/blob/5e577188e6d7133f597ad0ce60dc6a4b1314064a/pynvim/api/buffer.py#L100-L106
def add_highlight(self, hl_group, line, col_start=0, col_end=-1, src_id=-1, async_=None, **kwargs): """Add a highlight to the buffer.""" async_ = check_async(async_, kwargs, src_id != 0) return self.request('nvim_buf_add_highlight', src_id, hl_group, line, col_start, col_end, async_=async_)
[ "def", "add_highlight", "(", "self", ",", "hl_group", ",", "line", ",", "col_start", "=", "0", ",", "col_end", "=", "-", "1", ",", "src_id", "=", "-", "1", ",", "async_", "=", "None", ",", "*", "*", "kwargs", ")", ":", "async_", "=", "check_async", "(", "async_", ",", "kwargs", ",", "src_id", "!=", "0", ")", "return", "self", ".", "request", "(", "'nvim_buf_add_highlight'", ",", "src_id", ",", "hl_group", ",", "line", ",", "col_start", ",", "col_end", ",", "async_", "=", "async_", ")" ]
Add a highlight to the buffer.
[ "Add", "a", "highlight", "to", "the", "buffer", "." ]
python
train
54.428571
apple/turicreate
src/unity/python/turicreate/toolkits/regression/random_forest_regression.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/random_forest_regression.py#L179-L228
def evaluate(self, dataset, metric='auto', missing_value_action='auto'): """ Evaluate the model on the given dataset. Parameters ---------- dataset : SFrame Dataset in the same format used for training. The columns names and types of the dataset must be the same as that used in training. metric : str, optional Name of the evaluation metric. Possible values are: 'auto' : Compute all metrics. 'rmse' : Rooted mean squared error. 'max_error' : Maximum error. missing_value_action : str, optional Action to perform when missing values are encountered. Can be one of: - 'auto': By default the model will treat missing value as is. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with evaluation and terminate with an error message. Returns ------- out : dict A dictionary containing the evaluation result. See Also -------- create, predict Examples -------- .. sourcecode:: python >>> results = model.evaluate(test_data, 'rmse') """ _raise_error_evaluation_metric_is_valid( metric, ['auto', 'rmse', 'max_error']) return super(RandomForestRegression, self).evaluate(dataset, missing_value_action=missing_value_action, metric=metric)
[ "def", "evaluate", "(", "self", ",", "dataset", ",", "metric", "=", "'auto'", ",", "missing_value_action", "=", "'auto'", ")", ":", "_raise_error_evaluation_metric_is_valid", "(", "metric", ",", "[", "'auto'", ",", "'rmse'", ",", "'max_error'", "]", ")", "return", "super", "(", "RandomForestRegression", ",", "self", ")", ".", "evaluate", "(", "dataset", ",", "missing_value_action", "=", "missing_value_action", ",", "metric", "=", "metric", ")" ]
Evaluate the model on the given dataset. Parameters ---------- dataset : SFrame Dataset in the same format used for training. The columns names and types of the dataset must be the same as that used in training. metric : str, optional Name of the evaluation metric. Possible values are: 'auto' : Compute all metrics. 'rmse' : Rooted mean squared error. 'max_error' : Maximum error. missing_value_action : str, optional Action to perform when missing values are encountered. Can be one of: - 'auto': By default the model will treat missing value as is. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with evaluation and terminate with an error message. Returns ------- out : dict A dictionary containing the evaluation result. See Also -------- create, predict Examples -------- .. sourcecode:: python >>> results = model.evaluate(test_data, 'rmse')
[ "Evaluate", "the", "model", "on", "the", "given", "dataset", "." ]
python
train
34.36
ansible/molecule
molecule/command/converge.py
https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/command/converge.py#L72-L81
def execute(self): """ Execute the actions necessary to perform a `molecule converge` and returns None. :return: None """ self.print_info() self._config.provisioner.converge() self._config.state.change_state('converged', True)
[ "def", "execute", "(", "self", ")", ":", "self", ".", "print_info", "(", ")", "self", ".", "_config", ".", "provisioner", ".", "converge", "(", ")", "self", ".", "_config", ".", "state", ".", "change_state", "(", "'converged'", ",", "True", ")" ]
Execute the actions necessary to perform a `molecule converge` and returns None. :return: None
[ "Execute", "the", "actions", "necessary", "to", "perform", "a", "molecule", "converge", "and", "returns", "None", "." ]
python
train
28.2
awslabs/aws-sam-cli
samcli/commands/local/lib/local_api_service.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/local_api_service.py#L86-L106
def _make_routing_list(api_provider): """ Returns a list of routes to configure the Local API Service based on the APIs configured in the template. Parameters ---------- api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider Returns ------- list(samcli.local.apigw.service.Route) List of Routes to pass to the service """ routes = [] for api in api_provider.get_all(): route = Route(methods=[api.method], function_name=api.function_name, path=api.path, binary_types=api.binary_media_types) routes.append(route) return routes
[ "def", "_make_routing_list", "(", "api_provider", ")", ":", "routes", "=", "[", "]", "for", "api", "in", "api_provider", ".", "get_all", "(", ")", ":", "route", "=", "Route", "(", "methods", "=", "[", "api", ".", "method", "]", ",", "function_name", "=", "api", ".", "function_name", ",", "path", "=", "api", ".", "path", ",", "binary_types", "=", "api", ".", "binary_media_types", ")", "routes", ".", "append", "(", "route", ")", "return", "routes" ]
Returns a list of routes to configure the Local API Service based on the APIs configured in the template. Parameters ---------- api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider Returns ------- list(samcli.local.apigw.service.Route) List of Routes to pass to the service
[ "Returns", "a", "list", "of", "routes", "to", "configure", "the", "Local", "API", "Service", "based", "on", "the", "APIs", "configured", "in", "the", "template", "." ]
python
train
32.571429
numenta/htmresearch
htmresearch/support/union_temporal_pooler_monitor_mixin.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/support/union_temporal_pooler_monitor_mixin.py#L249-L273
def mmPrettyPrintDataOverlap(self): """ Returns pretty-printed string representation of overlap metric data. (See `mmGetDataOverlap`.) @return (string) Pretty-printed data """ matrix = self.mmGetDataOverlap() resetsTrace = self.mmGetTraceResets() text = "" for i, row in enumerate(matrix): if resetsTrace.data[i]: text += "\n" for j, item in enumerate(row): if resetsTrace.data[j]: text += " " text += "{:4}".format(item) text += "\n" return text
[ "def", "mmPrettyPrintDataOverlap", "(", "self", ")", ":", "matrix", "=", "self", ".", "mmGetDataOverlap", "(", ")", "resetsTrace", "=", "self", ".", "mmGetTraceResets", "(", ")", "text", "=", "\"\"", "for", "i", ",", "row", "in", "enumerate", "(", "matrix", ")", ":", "if", "resetsTrace", ".", "data", "[", "i", "]", ":", "text", "+=", "\"\\n\"", "for", "j", ",", "item", "in", "enumerate", "(", "row", ")", ":", "if", "resetsTrace", ".", "data", "[", "j", "]", ":", "text", "+=", "\" \"", "text", "+=", "\"{:4}\"", ".", "format", "(", "item", ")", "text", "+=", "\"\\n\"", "return", "text" ]
Returns pretty-printed string representation of overlap metric data. (See `mmGetDataOverlap`.) @return (string) Pretty-printed data
[ "Returns", "pretty", "-", "printed", "string", "representation", "of", "overlap", "metric", "data", ".", "(", "See", "mmGetDataOverlap", ".", ")" ]
python
train
20.96
conchoecia/gloTK
gloTK/scripts/glotk_sweep.py
https://github.com/conchoecia/gloTK/blob/58abee663fcfbbd09f4863c3ca3ae054e33184a8/gloTK/scripts/glotk_sweep.py#L165-L191
def meraculous_runner(self): """ Check to make sure that the allAssembliesDir has been created, if not, make it. This will only execute for the first time an assembly has been run in this directory. Run the directory from allAssembliesDir. The self.callString instance attribute tells Meraculous to name the assembly directory self.runName. After the run is complete, create the meraculous report, passing the directory containing the run (aka self.thisAssemblyDir). """ #set the dir to temp assembly dir os.chdir(self.allAssembliesDir) print(self.callString) p = subprocess.run(self.callString, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) output = str(p.stdout) err = str(p.stderr) #generate the report for the run self._generate_report() #exit, returning the output and err return (output, err)
[ "def", "meraculous_runner", "(", "self", ")", ":", "#set the dir to temp assembly dir", "os", ".", "chdir", "(", "self", ".", "allAssembliesDir", ")", "print", "(", "self", ".", "callString", ")", "p", "=", "subprocess", ".", "run", "(", "self", ".", "callString", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "universal_newlines", "=", "True", ")", "output", "=", "str", "(", "p", ".", "stdout", ")", "err", "=", "str", "(", "p", ".", "stderr", ")", "#generate the report for the run", "self", ".", "_generate_report", "(", ")", "#exit, returning the output and err", "return", "(", "output", ",", "err", ")" ]
Check to make sure that the allAssembliesDir has been created, if not, make it. This will only execute for the first time an assembly has been run in this directory. Run the directory from allAssembliesDir. The self.callString instance attribute tells Meraculous to name the assembly directory self.runName. After the run is complete, create the meraculous report, passing the directory containing the run (aka self.thisAssemblyDir).
[ "Check", "to", "make", "sure", "that", "the", "allAssembliesDir", "has", "been", "created", "if", "not", "make", "it", ".", "This", "will", "only", "execute", "for", "the", "first", "time", "an", "assembly", "has", "been", "run", "in", "this", "directory", "." ]
python
train
37.851852
numenta/htmresearch
htmresearch/algorithms/location_modules.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/location_modules.py#L366-L398
def chooseReliableActiveFiringRate(cellsPerAxis, bumpSigma, minimumActiveDiameter=None): """ When a cell is activated by sensory input, this implies that the phase is within a particular small patch of the rhombus. This patch is roughly equivalent to a circle of diameter (1/cellsPerAxis)(2/sqrt(3)), centered on the cell. This 2/sqrt(3) accounts for the fact that when circles are packed into hexagons, there are small uncovered spaces between the circles, so the circles need to expand by a factor of (2/sqrt(3)) to cover this space. This sensory input will activate the phase at the center of this cell. To account for uncertainty of the actual phase that was used during learning, the bump of active cells needs to be sufficiently large for this cell to remain active until the bump has moved by the above diameter. So the diameter of the bump (and, equivalently, the cell's firing field) needs to be at least 2 of the above diameters. @param minimumActiveDiameter (float or None) If specified, this makes sure the bump of active cells is always above a certain size. This is useful for testing scenarios where grid cell modules can only encode location with a limited "readout resolution", matching the biology. @return An "activeFiringRate" for use in the ThresholdedGaussian2DLocationModule. """ firingFieldDiameter = 2 * (1./cellsPerAxis)*(2./math.sqrt(3)) if minimumActiveDiameter: firingFieldDiameter = max(firingFieldDiameter, minimumActiveDiameter) return ThresholdedGaussian2DLocationModule.gaussian( bumpSigma, firingFieldDiameter / 2.)
[ "def", "chooseReliableActiveFiringRate", "(", "cellsPerAxis", ",", "bumpSigma", ",", "minimumActiveDiameter", "=", "None", ")", ":", "firingFieldDiameter", "=", "2", "*", "(", "1.", "/", "cellsPerAxis", ")", "*", "(", "2.", "/", "math", ".", "sqrt", "(", "3", ")", ")", "if", "minimumActiveDiameter", ":", "firingFieldDiameter", "=", "max", "(", "firingFieldDiameter", ",", "minimumActiveDiameter", ")", "return", "ThresholdedGaussian2DLocationModule", ".", "gaussian", "(", "bumpSigma", ",", "firingFieldDiameter", "/", "2.", ")" ]
When a cell is activated by sensory input, this implies that the phase is within a particular small patch of the rhombus. This patch is roughly equivalent to a circle of diameter (1/cellsPerAxis)(2/sqrt(3)), centered on the cell. This 2/sqrt(3) accounts for the fact that when circles are packed into hexagons, there are small uncovered spaces between the circles, so the circles need to expand by a factor of (2/sqrt(3)) to cover this space. This sensory input will activate the phase at the center of this cell. To account for uncertainty of the actual phase that was used during learning, the bump of active cells needs to be sufficiently large for this cell to remain active until the bump has moved by the above diameter. So the diameter of the bump (and, equivalently, the cell's firing field) needs to be at least 2 of the above diameters. @param minimumActiveDiameter (float or None) If specified, this makes sure the bump of active cells is always above a certain size. This is useful for testing scenarios where grid cell modules can only encode location with a limited "readout resolution", matching the biology. @return An "activeFiringRate" for use in the ThresholdedGaussian2DLocationModule.
[ "When", "a", "cell", "is", "activated", "by", "sensory", "input", "this", "implies", "that", "the", "phase", "is", "within", "a", "particular", "small", "patch", "of", "the", "rhombus", ".", "This", "patch", "is", "roughly", "equivalent", "to", "a", "circle", "of", "diameter", "(", "1", "/", "cellsPerAxis", ")", "(", "2", "/", "sqrt", "(", "3", "))", "centered", "on", "the", "cell", ".", "This", "2", "/", "sqrt", "(", "3", ")", "accounts", "for", "the", "fact", "that", "when", "circles", "are", "packed", "into", "hexagons", "there", "are", "small", "uncovered", "spaces", "between", "the", "circles", "so", "the", "circles", "need", "to", "expand", "by", "a", "factor", "of", "(", "2", "/", "sqrt", "(", "3", "))", "to", "cover", "this", "space", "." ]
python
train
50.545455
numenta/htmresearch
htmresearch/frameworks/location/path_integration_union_narrowing.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/location/path_integration_union_narrowing.py#L225-L252
def sensoryCompute(self, activeMinicolumns, learn): """ @param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing. """ inputParams = { "activeColumns": activeMinicolumns, "basalInput": self.getLocationRepresentation(), "basalGrowthCandidates": self.getLearnableLocationRepresentation(), "learn": learn } self.L4.compute(**inputParams) locationParams = { "anchorInput": self.L4.getActiveCells(), "anchorGrowthCandidates": self.L4.getWinnerCells(), "learn": learn, } for module in self.L6aModules: module.sensoryCompute(**locationParams) return (inputParams, locationParams)
[ "def", "sensoryCompute", "(", "self", ",", "activeMinicolumns", ",", "learn", ")", ":", "inputParams", "=", "{", "\"activeColumns\"", ":", "activeMinicolumns", ",", "\"basalInput\"", ":", "self", ".", "getLocationRepresentation", "(", ")", ",", "\"basalGrowthCandidates\"", ":", "self", ".", "getLearnableLocationRepresentation", "(", ")", ",", "\"learn\"", ":", "learn", "}", "self", ".", "L4", ".", "compute", "(", "*", "*", "inputParams", ")", "locationParams", "=", "{", "\"anchorInput\"", ":", "self", ".", "L4", ".", "getActiveCells", "(", ")", ",", "\"anchorGrowthCandidates\"", ":", "self", ".", "L4", ".", "getWinnerCells", "(", ")", ",", "\"learn\"", ":", "learn", ",", "}", "for", "module", "in", "self", ".", "L6aModules", ":", "module", ".", "sensoryCompute", "(", "*", "*", "locationParams", ")", "return", "(", "inputParams", ",", "locationParams", ")" ]
@param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing.
[ "@param", "activeMinicolumns", "(", "numpy", "array", ")", "List", "of", "indices", "of", "minicolumns", "to", "activate", "." ]
python
train
28.821429
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4056-L4069
def get_stp_mst_detail_output_cist_port_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") port = ET.SubElement(cist, "port") interface_name = ET.SubElement(port, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_cist_port_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=", "get_stp_mst_detail", "output", "=", "ET", ".", "SubElement", "(", "get_stp_mst_detail", ",", "\"output\"", ")", "cist", "=", "ET", ".", "SubElement", "(", "output", ",", "\"cist\"", ")", "port", "=", "ET", ".", "SubElement", "(", "cist", ",", "\"port\"", ")", "interface_name", "=", "ET", ".", "SubElement", "(", "port", ",", "\"interface-name\"", ")", "interface_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
42.785714
timothyb0912/pylogit
pylogit/bootstrap_abc.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_abc.py#L66-L76
def check_validity_of_long_form_args(model_obj, wide_weights, rows_to_obs): """ Ensures the args to `create_long_form_weights` have expected properties. """ # Ensure model_obj has the necessary method for create_long_form_weights ensure_model_obj_has_mapping_constructor(model_obj) # Ensure wide_weights is a 1D or 2D ndarray. ensure_wide_weights_is_1D_or_2D_ndarray(wide_weights) # Ensure rows_to_obs is a scipy sparse matrix ensure_rows_to_obs_validity(rows_to_obs) return None
[ "def", "check_validity_of_long_form_args", "(", "model_obj", ",", "wide_weights", ",", "rows_to_obs", ")", ":", "# Ensure model_obj has the necessary method for create_long_form_weights", "ensure_model_obj_has_mapping_constructor", "(", "model_obj", ")", "# Ensure wide_weights is a 1D or 2D ndarray.", "ensure_wide_weights_is_1D_or_2D_ndarray", "(", "wide_weights", ")", "# Ensure rows_to_obs is a scipy sparse matrix", "ensure_rows_to_obs_validity", "(", "rows_to_obs", ")", "return", "None" ]
Ensures the args to `create_long_form_weights` have expected properties.
[ "Ensures", "the", "args", "to", "create_long_form_weights", "have", "expected", "properties", "." ]
python
train
46.272727
quantmind/pulsar
pulsar/utils/httpurl.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/httpurl.py#L198-L211
def quote_header_value(value, extra_chars='', allow_token=True): """Quote a header value if necessary. :param value: the value to quote. :param extra_chars: a list of extra characters to skip quoting. :param allow_token: if this is enabled token values are returned unchanged. """ value = to_string(value) if allow_token: token_chars = HEADER_TOKEN_CHARS | set(extra_chars) if set(value).issubset(token_chars): return value return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
[ "def", "quote_header_value", "(", "value", ",", "extra_chars", "=", "''", ",", "allow_token", "=", "True", ")", ":", "value", "=", "to_string", "(", "value", ")", "if", "allow_token", ":", "token_chars", "=", "HEADER_TOKEN_CHARS", "|", "set", "(", "extra_chars", ")", "if", "set", "(", "value", ")", ".", "issubset", "(", "token_chars", ")", ":", "return", "value", "return", "'\"%s\"'", "%", "value", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")" ]
Quote a header value if necessary. :param value: the value to quote. :param extra_chars: a list of extra characters to skip quoting. :param allow_token: if this is enabled token values are returned unchanged.
[ "Quote", "a", "header", "value", "if", "necessary", "." ]
python
train
38.785714
juju/charm-helpers
charmhelpers/contrib/openstack/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L1142-L1160
def workload_state_compare(current_workload_state, workload_state): """ Return highest priority of two states""" hierarchy = {'unknown': -1, 'active': 0, 'maintenance': 1, 'waiting': 2, 'blocked': 3, } if hierarchy.get(workload_state) is None: workload_state = 'unknown' if hierarchy.get(current_workload_state) is None: current_workload_state = 'unknown' # Set workload_state based on hierarchy of statuses if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): return current_workload_state else: return workload_state
[ "def", "workload_state_compare", "(", "current_workload_state", ",", "workload_state", ")", ":", "hierarchy", "=", "{", "'unknown'", ":", "-", "1", ",", "'active'", ":", "0", ",", "'maintenance'", ":", "1", ",", "'waiting'", ":", "2", ",", "'blocked'", ":", "3", ",", "}", "if", "hierarchy", ".", "get", "(", "workload_state", ")", "is", "None", ":", "workload_state", "=", "'unknown'", "if", "hierarchy", ".", "get", "(", "current_workload_state", ")", "is", "None", ":", "current_workload_state", "=", "'unknown'", "# Set workload_state based on hierarchy of statuses", "if", "hierarchy", ".", "get", "(", "current_workload_state", ")", ">", "hierarchy", ".", "get", "(", "workload_state", ")", ":", "return", "current_workload_state", "else", ":", "return", "workload_state" ]
Return highest priority of two states
[ "Return", "highest", "priority", "of", "two", "states" ]
python
train
35.157895
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/flake_id_generator_new_id_batch_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/flake_id_generator_new_id_batch_codec.py#L29-L35
def decode_response(client_message, to_object=None): """ Decode response from client message""" parameters = dict(base=None, increment=None, batch_size=None) parameters['base'] = client_message.read_long() parameters['increment'] = client_message.read_long() parameters['batch_size'] = client_message.read_int() return parameters
[ "def", "decode_response", "(", "client_message", ",", "to_object", "=", "None", ")", ":", "parameters", "=", "dict", "(", "base", "=", "None", ",", "increment", "=", "None", ",", "batch_size", "=", "None", ")", "parameters", "[", "'base'", "]", "=", "client_message", ".", "read_long", "(", ")", "parameters", "[", "'increment'", "]", "=", "client_message", ".", "read_long", "(", ")", "parameters", "[", "'batch_size'", "]", "=", "client_message", ".", "read_int", "(", ")", "return", "parameters" ]
Decode response from client message
[ "Decode", "response", "from", "client", "message" ]
python
train
49.571429
biocore/mustached-octo-ironman
moi/group.py
https://github.com/biocore/mustached-octo-ironman/blob/54128d8fdff327e1b7ffd9bb77bf38c3df9526d7/moi/group.py#L139-L163
def unlisten_to_node(self, id_): """Stop listening to a job Parameters ---------- id_ : str An ID to remove Returns -------- str or None The ID removed or None if the ID was not removed """ id_pubsub = _pubsub_key(id_) if id_pubsub in self._listening_to: del self._listening_to[id_pubsub] self.toredis.unsubscribe(id_pubsub) parent = json_decode(r_client.get(id_)).get('parent', None) if parent is not None: r_client.srem(_children_key(parent), id_) r_client.srem(self.group_children, id_) return id_
[ "def", "unlisten_to_node", "(", "self", ",", "id_", ")", ":", "id_pubsub", "=", "_pubsub_key", "(", "id_", ")", "if", "id_pubsub", "in", "self", ".", "_listening_to", ":", "del", "self", ".", "_listening_to", "[", "id_pubsub", "]", "self", ".", "toredis", ".", "unsubscribe", "(", "id_pubsub", ")", "parent", "=", "json_decode", "(", "r_client", ".", "get", "(", "id_", ")", ")", ".", "get", "(", "'parent'", ",", "None", ")", "if", "parent", "is", "not", "None", ":", "r_client", ".", "srem", "(", "_children_key", "(", "parent", ")", ",", "id_", ")", "r_client", ".", "srem", "(", "self", ".", "group_children", ",", "id_", ")", "return", "id_" ]
Stop listening to a job Parameters ---------- id_ : str An ID to remove Returns -------- str or None The ID removed or None if the ID was not removed
[ "Stop", "listening", "to", "a", "job" ]
python
train
26.92
smarie/python-parsyfiles
parsyfiles/plugins_base/support_for_objects.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_base/support_for_objects.py#L191-L202
def should_display_warnings_for(to_type): """ Central method where we control whether warnings should be displayed """ if not hasattr(to_type, '__module__'): return True elif to_type.__module__ in {'builtins'} or to_type.__module__.startswith('parsyfiles') \ or to_type.__name__ in {'DataFrame'}: return False elif issubclass(to_type, int) or issubclass(to_type, str) \ or issubclass(to_type, float) or issubclass(to_type, bool): return False else: return True
[ "def", "should_display_warnings_for", "(", "to_type", ")", ":", "if", "not", "hasattr", "(", "to_type", ",", "'__module__'", ")", ":", "return", "True", "elif", "to_type", ".", "__module__", "in", "{", "'builtins'", "}", "or", "to_type", ".", "__module__", ".", "startswith", "(", "'parsyfiles'", ")", "or", "to_type", ".", "__name__", "in", "{", "'DataFrame'", "}", ":", "return", "False", "elif", "issubclass", "(", "to_type", ",", "int", ")", "or", "issubclass", "(", "to_type", ",", "str", ")", "or", "issubclass", "(", "to_type", ",", "float", ")", "or", "issubclass", "(", "to_type", ",", "bool", ")", ":", "return", "False", "else", ":", "return", "True" ]
Central method where we control whether warnings should be displayed
[ "Central", "method", "where", "we", "control", "whether", "warnings", "should", "be", "displayed" ]
python
train
43.75
gwastro/pycbc
pycbc/inference/models/marginalized_gaussian_noise.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/marginalized_gaussian_noise.py#L549-L553
def _margtimephase_loglr(self, mf_snr, opt_snr): """Returns the log likelihood ratio marginalized over time and phase. """ return special.logsumexp(numpy.log(special.i0(mf_snr)), b=self._deltat) - 0.5*opt_snr
[ "def", "_margtimephase_loglr", "(", "self", ",", "mf_snr", ",", "opt_snr", ")", ":", "return", "special", ".", "logsumexp", "(", "numpy", ".", "log", "(", "special", ".", "i0", "(", "mf_snr", ")", ")", ",", "b", "=", "self", ".", "_deltat", ")", "-", "0.5", "*", "opt_snr" ]
Returns the log likelihood ratio marginalized over time and phase.
[ "Returns", "the", "log", "likelihood", "ratio", "marginalized", "over", "time", "and", "phase", "." ]
python
train
52.2
icgood/pymap
pymap/backend/maildir/flags.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/backend/maildir/flags.py#L92-L110
def from_maildir(self, codes: str) -> FrozenSet[Flag]: """Return the set of IMAP flags that correspond to the letter codes. Args: codes: The letter codes to map. """ flags = set() for code in codes: if code == ',': break to_sys = self._to_sys.get(code) if to_sys is not None: flags.add(to_sys) else: to_kwd = self._to_kwd.get(code) if to_kwd is not None: flags.add(to_kwd) return frozenset(flags)
[ "def", "from_maildir", "(", "self", ",", "codes", ":", "str", ")", "->", "FrozenSet", "[", "Flag", "]", ":", "flags", "=", "set", "(", ")", "for", "code", "in", "codes", ":", "if", "code", "==", "','", ":", "break", "to_sys", "=", "self", ".", "_to_sys", ".", "get", "(", "code", ")", "if", "to_sys", "is", "not", "None", ":", "flags", ".", "add", "(", "to_sys", ")", "else", ":", "to_kwd", "=", "self", ".", "_to_kwd", ".", "get", "(", "code", ")", "if", "to_kwd", "is", "not", "None", ":", "flags", ".", "add", "(", "to_kwd", ")", "return", "frozenset", "(", "flags", ")" ]
Return the set of IMAP flags that correspond to the letter codes. Args: codes: The letter codes to map.
[ "Return", "the", "set", "of", "IMAP", "flags", "that", "correspond", "to", "the", "letter", "codes", "." ]
python
train
30.105263
nicolargo/glances
glances/globals.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/globals.py#L44-L53
def safe_makedirs(path): """A safe function for creating a directory tree.""" try: os.makedirs(path) except OSError as err: if err.errno == errno.EEXIST: if not os.path.isdir(path): raise else: raise
[ "def", "safe_makedirs", "(", "path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "OSError", "as", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "EEXIST", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "raise", "else", ":", "raise" ]
A safe function for creating a directory tree.
[ "A", "safe", "function", "for", "creating", "a", "directory", "tree", "." ]
python
train
26.6
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L4198-L4255
def dolnp3_0(Data): """ DEPRECATED!! USE dolnp() Desciption: takes a list of dicts with the controlled vocabulary of 3_0 and calls dolnp on them after reformating for compatibility. Parameters __________ Data : nested list of dictionarys with keys dir_dec dir_inc dir_tilt_correction method_codes Returns ------- ReturnData : dictionary with keys dec : fisher mean dec of data in Data inc : fisher mean inc of data in Data n_lines : number of directed lines [method_code = DE-BFL or DE-FM] n_planes : number of best fit planes [method_code = DE-BFP] alpha95 : fisher confidence circle from Data R : fisher R value of Data K : fisher k value of Data Effects prints to screen in case of no data """ if len(Data) == 0: print("This function requires input Data have at least 1 entry") return {} if len(Data) == 1: ReturnData = {} ReturnData["dec"] = Data[0]['dir_dec'] ReturnData["inc"] = Data[0]['dir_inc'] ReturnData["n_total"] = '1' if "DE-BFP" in Data[0]['method_codes']: ReturnData["n_lines"] = '0' ReturnData["n_planes"] = '1' else: ReturnData["n_planes"] = '0' ReturnData["n_lines"] = '1' ReturnData["alpha95"] = "" ReturnData["R"] = "" ReturnData["K"] = "" return ReturnData else: LnpData = [] for n, d in enumerate(Data): LnpData.append({}) LnpData[n]['dec'] = d['dir_dec'] LnpData[n]['inc'] = d['dir_inc'] LnpData[n]['tilt_correction'] = d['dir_tilt_correction'] if 'method_codes' in list(d.keys()): if "DE-BFP" in d['method_codes']: LnpData[n]['dir_type'] = 'p' else: LnpData[n]['dir_type'] = 'l' # get a sample average from all specimens ReturnData = dolnp(LnpData, 'dir_type') return ReturnData
[ "def", "dolnp3_0", "(", "Data", ")", ":", "if", "len", "(", "Data", ")", "==", "0", ":", "print", "(", "\"This function requires input Data have at least 1 entry\"", ")", "return", "{", "}", "if", "len", "(", "Data", ")", "==", "1", ":", "ReturnData", "=", "{", "}", "ReturnData", "[", "\"dec\"", "]", "=", "Data", "[", "0", "]", "[", "'dir_dec'", "]", "ReturnData", "[", "\"inc\"", "]", "=", "Data", "[", "0", "]", "[", "'dir_inc'", "]", "ReturnData", "[", "\"n_total\"", "]", "=", "'1'", "if", "\"DE-BFP\"", "in", "Data", "[", "0", "]", "[", "'method_codes'", "]", ":", "ReturnData", "[", "\"n_lines\"", "]", "=", "'0'", "ReturnData", "[", "\"n_planes\"", "]", "=", "'1'", "else", ":", "ReturnData", "[", "\"n_planes\"", "]", "=", "'0'", "ReturnData", "[", "\"n_lines\"", "]", "=", "'1'", "ReturnData", "[", "\"alpha95\"", "]", "=", "\"\"", "ReturnData", "[", "\"R\"", "]", "=", "\"\"", "ReturnData", "[", "\"K\"", "]", "=", "\"\"", "return", "ReturnData", "else", ":", "LnpData", "=", "[", "]", "for", "n", ",", "d", "in", "enumerate", "(", "Data", ")", ":", "LnpData", ".", "append", "(", "{", "}", ")", "LnpData", "[", "n", "]", "[", "'dec'", "]", "=", "d", "[", "'dir_dec'", "]", "LnpData", "[", "n", "]", "[", "'inc'", "]", "=", "d", "[", "'dir_inc'", "]", "LnpData", "[", "n", "]", "[", "'tilt_correction'", "]", "=", "d", "[", "'dir_tilt_correction'", "]", "if", "'method_codes'", "in", "list", "(", "d", ".", "keys", "(", ")", ")", ":", "if", "\"DE-BFP\"", "in", "d", "[", "'method_codes'", "]", ":", "LnpData", "[", "n", "]", "[", "'dir_type'", "]", "=", "'p'", "else", ":", "LnpData", "[", "n", "]", "[", "'dir_type'", "]", "=", "'l'", "# get a sample average from all specimens", "ReturnData", "=", "dolnp", "(", "LnpData", ",", "'dir_type'", ")", "return", "ReturnData" ]
DEPRECATED!! USE dolnp() Desciption: takes a list of dicts with the controlled vocabulary of 3_0 and calls dolnp on them after reformating for compatibility. Parameters __________ Data : nested list of dictionarys with keys dir_dec dir_inc dir_tilt_correction method_codes Returns ------- ReturnData : dictionary with keys dec : fisher mean dec of data in Data inc : fisher mean inc of data in Data n_lines : number of directed lines [method_code = DE-BFL or DE-FM] n_planes : number of best fit planes [method_code = DE-BFP] alpha95 : fisher confidence circle from Data R : fisher R value of Data K : fisher k value of Data Effects prints to screen in case of no data
[ "DEPRECATED!!", "USE", "dolnp", "()", "Desciption", ":", "takes", "a", "list", "of", "dicts", "with", "the", "controlled", "vocabulary", "of", "3_0", "and", "calls", "dolnp", "on", "them", "after", "reformating", "for", "compatibility", ".", "Parameters", "__________", "Data", ":", "nested", "list", "of", "dictionarys", "with", "keys", "dir_dec", "dir_inc", "dir_tilt_correction", "method_codes" ]
python
train
35.327586
webrecorder/pywb
pywb/utils/canonicalize.py
https://github.com/webrecorder/pywb/blob/77f8bb647639dd66f6b92b7a9174c28810e4b1d9/pywb/utils/canonicalize.py#L58-L84
def unsurt(surt): """ # Simple surt >>> unsurt('com,example)/') 'example.com/' # Broken surt >>> unsurt('com,example)') 'com,example)' # Long surt >>> unsurt('suffix,domain,sub,subsub,another,subdomain)/path/file/\ index.html?a=b?c=)/') 'subdomain.another.subsub.sub.domain.suffix/path/file/index.html?a=b?c=)/' """ try: index = surt.index(')/') parts = surt[0:index].split(',') parts.reverse() host = '.'.join(parts) host += surt[index + 1:] return host except ValueError: # May not be a valid surt return surt
[ "def", "unsurt", "(", "surt", ")", ":", "try", ":", "index", "=", "surt", ".", "index", "(", "')/'", ")", "parts", "=", "surt", "[", "0", ":", "index", "]", ".", "split", "(", "','", ")", "parts", ".", "reverse", "(", ")", "host", "=", "'.'", ".", "join", "(", "parts", ")", "host", "+=", "surt", "[", "index", "+", "1", ":", "]", "return", "host", "except", "ValueError", ":", "# May not be a valid surt", "return", "surt" ]
# Simple surt >>> unsurt('com,example)/') 'example.com/' # Broken surt >>> unsurt('com,example)') 'com,example)' # Long surt >>> unsurt('suffix,domain,sub,subsub,another,subdomain)/path/file/\ index.html?a=b?c=)/') 'subdomain.another.subsub.sub.domain.suffix/path/file/index.html?a=b?c=)/'
[ "#", "Simple", "surt", ">>>", "unsurt", "(", "com", "example", ")", "/", ")", "example", ".", "com", "/" ]
python
train
22.407407
williamjameshandley/fgivenx
fgivenx/io.py
https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/io.py#L106-L116
def save(self, *args): """ Save cache to file using pickle. Parameters ---------- *args: All but the last argument are inputs to the cached function. The last is the actual value of the function. """ with open(self.file_root + '.pkl', "wb") as f: pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL)
[ "def", "save", "(", "self", ",", "*", "args", ")", ":", "with", "open", "(", "self", ".", "file_root", "+", "'.pkl'", ",", "\"wb\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "args", ",", "f", ",", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", ")" ]
Save cache to file using pickle. Parameters ---------- *args: All but the last argument are inputs to the cached function. The last is the actual value of the function.
[ "Save", "cache", "to", "file", "using", "pickle", "." ]
python
train
34.181818
mk-fg/feedjack
feedjack/models.py
https://github.com/mk-fg/feedjack/blob/3fe65c0f66dc2cfdf45834aaa7235ec9f81b3ca3/feedjack/models.py#L616-L622
def update_handler(feeds): '''Update all cross-referencing filters results for feeds and others, related to them. Intended to be called from non-Feed update hooks (like new Post saving).''' # Check if this call is a result of actions initiated from # one of the hooks in a higher frame (resulting in recursion). if Feed._filters_update_handler_lock: return return Feed._filters_update_handler(Feed, feeds, force=True)
[ "def", "update_handler", "(", "feeds", ")", ":", "# Check if this call is a result of actions initiated from", "# one of the hooks in a higher frame (resulting in recursion).", "if", "Feed", ".", "_filters_update_handler_lock", ":", "return", "return", "Feed", ".", "_filters_update_handler", "(", "Feed", ",", "feeds", ",", "force", "=", "True", ")" ]
Update all cross-referencing filters results for feeds and others, related to them. Intended to be called from non-Feed update hooks (like new Post saving).
[ "Update", "all", "cross", "-", "referencing", "filters", "results", "for", "feeds", "and", "others", "related", "to", "them", ".", "Intended", "to", "be", "called", "from", "non", "-", "Feed", "update", "hooks", "(", "like", "new", "Post", "saving", ")", "." ]
python
train
60.714286
OSSOS/MOP
src/jjk/preproc/MOPplot.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/MOPplot.py#L14-L38
def load_edbfile(file=None): """Load the targets from a file""" import ephem,string,math if file is None: import tkFileDialog try: file=tkFileDialog.askopenfilename() except: return if file is None or file == '': return f=open(file) lines=f.readlines() f.close() for line in lines: p=line.split(',') name=p[0].strip().upper() mpc_objs[name]=ephem.readdb(line) mpc_objs[name].compute() objInfoDict[name]="%6s %6s %6s\n" % ( string.center("a",6), string.center("e",6), string.center("i",6) ) objInfoDict[name]+="%6.2f %6.3f %6.2f\n" % (mpc_objs[name]._a,mpc_objs[name]._e,math.degrees(mpc_objs[name]._inc)) objInfoDict[name]+="%7.2f %7.2f\n" % ( mpc_objs[name].earth_distance, mpc_objs[name].mag) doplot(mpc_objs)
[ "def", "load_edbfile", "(", "file", "=", "None", ")", ":", "import", "ephem", ",", "string", ",", "math", "if", "file", "is", "None", ":", "import", "tkFileDialog", "try", ":", "file", "=", "tkFileDialog", ".", "askopenfilename", "(", ")", "except", ":", "return", "if", "file", "is", "None", "or", "file", "==", "''", ":", "return", "f", "=", "open", "(", "file", ")", "lines", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "for", "line", "in", "lines", ":", "p", "=", "line", ".", "split", "(", "','", ")", "name", "=", "p", "[", "0", "]", ".", "strip", "(", ")", ".", "upper", "(", ")", "mpc_objs", "[", "name", "]", "=", "ephem", ".", "readdb", "(", "line", ")", "mpc_objs", "[", "name", "]", ".", "compute", "(", ")", "objInfoDict", "[", "name", "]", "=", "\"%6s %6s %6s\\n\"", "%", "(", "string", ".", "center", "(", "\"a\"", ",", "6", ")", ",", "string", ".", "center", "(", "\"e\"", ",", "6", ")", ",", "string", ".", "center", "(", "\"i\"", ",", "6", ")", ")", "objInfoDict", "[", "name", "]", "+=", "\"%6.2f %6.3f %6.2f\\n\"", "%", "(", "mpc_objs", "[", "name", "]", ".", "_a", ",", "mpc_objs", "[", "name", "]", ".", "_e", ",", "math", ".", "degrees", "(", "mpc_objs", "[", "name", "]", ".", "_inc", ")", ")", "objInfoDict", "[", "name", "]", "+=", "\"%7.2f %7.2f\\n\"", "%", "(", "mpc_objs", "[", "name", "]", ".", "earth_distance", ",", "mpc_objs", "[", "name", "]", ".", "mag", ")", "doplot", "(", "mpc_objs", ")" ]
Load the targets from a file
[ "Load", "the", "targets", "from", "a", "file" ]
python
train
33.4
ambitioninc/rabbitmq-admin
rabbitmq_admin/base.py
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L87-L98
def _api_post(self, url, **kwargs): """ A convenience wrapper for _post. Adds headers, auth and base url by default """ kwargs['url'] = self.url + url kwargs['auth'] = self.auth headers = deepcopy(self.headers) headers.update(kwargs.get('headers', {})) kwargs['headers'] = headers self._post(**kwargs)
[ "def", "_api_post", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'url'", "]", "=", "self", ".", "url", "+", "url", "kwargs", "[", "'auth'", "]", "=", "self", ".", "auth", "headers", "=", "deepcopy", "(", "self", ".", "headers", ")", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ",", "{", "}", ")", ")", "kwargs", "[", "'headers'", "]", "=", "headers", "self", ".", "_post", "(", "*", "*", "kwargs", ")" ]
A convenience wrapper for _post. Adds headers, auth and base url by default
[ "A", "convenience", "wrapper", "for", "_post", ".", "Adds", "headers", "auth", "and", "base", "url", "by", "default" ]
python
train
30.916667
chop-dbhi/varify-data-warehouse
vdw/samples/receivers.py
https://github.com/chop-dbhi/varify-data-warehouse/blob/1600ee1bc5fae6c68fd03b23624467298570cca8/vdw/samples/receivers.py#L103-L117
def update_batch_count(instance, **kwargs): """Sample post-save handler to update the sample's batch count. Batches are unpublished by default (to prevent publishing empty batches). If the `AUTO_PUBLISH_BATCH` setting is true, the batch will be published automatically when at least one published sample is in the batch. """ batch = instance.batch count = batch.samples.filter(published=True).count() if count != batch.count: batch.count = count if AUTO_PUBLISH_BATCH: batch.published = bool(count) batch.save()
[ "def", "update_batch_count", "(", "instance", ",", "*", "*", "kwargs", ")", ":", "batch", "=", "instance", ".", "batch", "count", "=", "batch", ".", "samples", ".", "filter", "(", "published", "=", "True", ")", ".", "count", "(", ")", "if", "count", "!=", "batch", ".", "count", ":", "batch", ".", "count", "=", "count", "if", "AUTO_PUBLISH_BATCH", ":", "batch", ".", "published", "=", "bool", "(", "count", ")", "batch", ".", "save", "(", ")" ]
Sample post-save handler to update the sample's batch count. Batches are unpublished by default (to prevent publishing empty batches). If the `AUTO_PUBLISH_BATCH` setting is true, the batch will be published automatically when at least one published sample is in the batch.
[ "Sample", "post", "-", "save", "handler", "to", "update", "the", "sample", "s", "batch", "count", "." ]
python
train
37.8
inasafe/inasafe
safe/gui/widgets/dock.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/dock.py#L334-L458
def read_settings(self): """Set the dock state from QSettings. Do this on init and after changing options in the options dialog. """ extent = setting('user_extent', None, str) if extent: extent = QgsGeometry.fromWkt(extent) if not extent.isGeosValid(): extent = None crs = setting('user_extent_crs', None, str) if crs: crs = QgsCoordinateReferenceSystem(crs) if not crs.isValid(): crs = None mode = setting('analysis_extents_mode', HAZARD_EXPOSURE_VIEW) if crs and extent and mode == HAZARD_EXPOSURE_BOUNDINGBOX: self.extent.set_user_extent(extent, crs) # It's better to set the show_rubber_bands after setting the user # extent. self.extent.show_rubber_bands = setting( 'showRubberBands', False, bool) flag = setting('visibleLayersOnlyFlag', True, bool) self.show_only_visible_layers_flag = flag flag = setting('set_layer_from_title_flag', True, bool) self.set_layer_from_title_flag = flag self.zoom_to_impact_flag = setting('setZoomToImpactFlag', True, bool) # whether exposure layer should be hidden after model completes self.hide_exposure_flag = setting('setHideExposureFlag', False, bool) # whether to show or not dev only options self.developer_mode = setting('developer_mode', False, bool) # If we use selected features only flag = setting('useSelectedFeaturesOnly', True, bool) self.use_selected_features_only = flag # We need to re-trigger the aggregation combobox with the new flag. index = self.aggregation_layer_combo.currentIndex() self.aggregation_layer_combo.setCurrentIndex(index) # whether to show or not a custom Logo flag = setting('organisation_logo_path', supporters_logo_path(), str) self.organisation_logo_path = flag # Changed default to False for new users in 3.2 - see #2171 show_logos_flag = setting( 'showOrganisationLogoInDockFlag', False, bool) # Flag to check valid organization logo invalid_logo_size = False logo_not_exist = False if self.organisation_logo_path: dock_width = float(self.width()) # Dont let the image be more than 100px height maximum_height = 100.0 # px pixmap = QPixmap(self.organisation_logo_path) # it will throw Overflow Error if pixmap.height() == 0 if pixmap.height() > 0: height_ratio = maximum_height / pixmap.height() maximum_width = int(pixmap.width() * height_ratio) # Don't let the image be more than the dock width wide if maximum_width > dock_width: width_ratio = dock_width / float(pixmap.width()) maximum_height = int(pixmap.height() * width_ratio) maximum_width = dock_width too_high = pixmap.height() > maximum_height too_wide = pixmap.width() > dock_width if too_wide or too_high: pixmap = pixmap.scaled( maximum_width, maximum_height, Qt.KeepAspectRatio) self.organisation_logo.setMaximumWidth(maximum_width) # We have manually scaled using logic above self.organisation_logo.setScaledContents(False) self.organisation_logo.setPixmap(pixmap) else: # handle zero pixmap height and or nonexistent files if not os.path.exists(self.organisation_logo_path): logo_not_exist = True else: invalid_logo_size = True if (self.organisation_logo_path and show_logos_flag and not invalid_logo_size and not logo_not_exist): self._show_organisation_logo() else: self.organisation_logo.hide() # RM: this is a fix for nonexistent organization logo or zero height if logo_not_exist: # noinspection PyCallByClass QMessageBox.warning( self, self.tr('InaSAFE %s' % self.inasafe_version), self.tr( 'The file for organization logo in %s doesn\'t exists. ' 'Please check in Plugins -> InaSAFE -> Options that your ' 'paths are still correct and update them if needed.' % self.organisation_logo_path ), QMessageBox.Ok) if invalid_logo_size: # noinspection PyCallByClass QMessageBox.warning( self, self.tr('InaSAFE %s' % self.inasafe_version), self.tr( 'The file for organization logo has zero height. Please ' 'provide valid file for organization logo.' ), QMessageBox.Ok) if logo_not_exist or invalid_logo_size: set_setting('organisation_logo_path', supporters_logo_path())
[ "def", "read_settings", "(", "self", ")", ":", "extent", "=", "setting", "(", "'user_extent'", ",", "None", ",", "str", ")", "if", "extent", ":", "extent", "=", "QgsGeometry", ".", "fromWkt", "(", "extent", ")", "if", "not", "extent", ".", "isGeosValid", "(", ")", ":", "extent", "=", "None", "crs", "=", "setting", "(", "'user_extent_crs'", ",", "None", ",", "str", ")", "if", "crs", ":", "crs", "=", "QgsCoordinateReferenceSystem", "(", "crs", ")", "if", "not", "crs", ".", "isValid", "(", ")", ":", "crs", "=", "None", "mode", "=", "setting", "(", "'analysis_extents_mode'", ",", "HAZARD_EXPOSURE_VIEW", ")", "if", "crs", "and", "extent", "and", "mode", "==", "HAZARD_EXPOSURE_BOUNDINGBOX", ":", "self", ".", "extent", ".", "set_user_extent", "(", "extent", ",", "crs", ")", "# It's better to set the show_rubber_bands after setting the user", "# extent.", "self", ".", "extent", ".", "show_rubber_bands", "=", "setting", "(", "'showRubberBands'", ",", "False", ",", "bool", ")", "flag", "=", "setting", "(", "'visibleLayersOnlyFlag'", ",", "True", ",", "bool", ")", "self", ".", "show_only_visible_layers_flag", "=", "flag", "flag", "=", "setting", "(", "'set_layer_from_title_flag'", ",", "True", ",", "bool", ")", "self", ".", "set_layer_from_title_flag", "=", "flag", "self", ".", "zoom_to_impact_flag", "=", "setting", "(", "'setZoomToImpactFlag'", ",", "True", ",", "bool", ")", "# whether exposure layer should be hidden after model completes", "self", ".", "hide_exposure_flag", "=", "setting", "(", "'setHideExposureFlag'", ",", "False", ",", "bool", ")", "# whether to show or not dev only options", "self", ".", "developer_mode", "=", "setting", "(", "'developer_mode'", ",", "False", ",", "bool", ")", "# If we use selected features only", "flag", "=", "setting", "(", "'useSelectedFeaturesOnly'", ",", "True", ",", "bool", ")", "self", ".", "use_selected_features_only", "=", "flag", "# We need to re-trigger the aggregation combobox with the new flag.", "index", "=", "self", ".", "aggregation_layer_combo", ".", "currentIndex", "(", ")", "self", ".", "aggregation_layer_combo", ".", "setCurrentIndex", "(", "index", ")", "# whether to show or not a custom Logo", "flag", "=", "setting", "(", "'organisation_logo_path'", ",", "supporters_logo_path", "(", ")", ",", "str", ")", "self", ".", "organisation_logo_path", "=", "flag", "# Changed default to False for new users in 3.2 - see #2171", "show_logos_flag", "=", "setting", "(", "'showOrganisationLogoInDockFlag'", ",", "False", ",", "bool", ")", "# Flag to check valid organization logo", "invalid_logo_size", "=", "False", "logo_not_exist", "=", "False", "if", "self", ".", "organisation_logo_path", ":", "dock_width", "=", "float", "(", "self", ".", "width", "(", ")", ")", "# Dont let the image be more than 100px height", "maximum_height", "=", "100.0", "# px", "pixmap", "=", "QPixmap", "(", "self", ".", "organisation_logo_path", ")", "# it will throw Overflow Error if pixmap.height() == 0", "if", "pixmap", ".", "height", "(", ")", ">", "0", ":", "height_ratio", "=", "maximum_height", "/", "pixmap", ".", "height", "(", ")", "maximum_width", "=", "int", "(", "pixmap", ".", "width", "(", ")", "*", "height_ratio", ")", "# Don't let the image be more than the dock width wide", "if", "maximum_width", ">", "dock_width", ":", "width_ratio", "=", "dock_width", "/", "float", "(", "pixmap", ".", "width", "(", ")", ")", "maximum_height", "=", "int", "(", "pixmap", ".", "height", "(", ")", "*", "width_ratio", ")", "maximum_width", "=", "dock_width", "too_high", "=", "pixmap", ".", "height", "(", ")", ">", "maximum_height", "too_wide", "=", "pixmap", ".", "width", "(", ")", ">", "dock_width", "if", "too_wide", "or", "too_high", ":", "pixmap", "=", "pixmap", ".", "scaled", "(", "maximum_width", ",", "maximum_height", ",", "Qt", ".", "KeepAspectRatio", ")", "self", ".", "organisation_logo", ".", "setMaximumWidth", "(", "maximum_width", ")", "# We have manually scaled using logic above", "self", ".", "organisation_logo", ".", "setScaledContents", "(", "False", ")", "self", ".", "organisation_logo", ".", "setPixmap", "(", "pixmap", ")", "else", ":", "# handle zero pixmap height and or nonexistent files", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "organisation_logo_path", ")", ":", "logo_not_exist", "=", "True", "else", ":", "invalid_logo_size", "=", "True", "if", "(", "self", ".", "organisation_logo_path", "and", "show_logos_flag", "and", "not", "invalid_logo_size", "and", "not", "logo_not_exist", ")", ":", "self", ".", "_show_organisation_logo", "(", ")", "else", ":", "self", ".", "organisation_logo", ".", "hide", "(", ")", "# RM: this is a fix for nonexistent organization logo or zero height", "if", "logo_not_exist", ":", "# noinspection PyCallByClass", "QMessageBox", ".", "warning", "(", "self", ",", "self", ".", "tr", "(", "'InaSAFE %s'", "%", "self", ".", "inasafe_version", ")", ",", "self", ".", "tr", "(", "'The file for organization logo in %s doesn\\'t exists. '", "'Please check in Plugins -> InaSAFE -> Options that your '", "'paths are still correct and update them if needed.'", "%", "self", ".", "organisation_logo_path", ")", ",", "QMessageBox", ".", "Ok", ")", "if", "invalid_logo_size", ":", "# noinspection PyCallByClass", "QMessageBox", ".", "warning", "(", "self", ",", "self", ".", "tr", "(", "'InaSAFE %s'", "%", "self", ".", "inasafe_version", ")", ",", "self", ".", "tr", "(", "'The file for organization logo has zero height. Please '", "'provide valid file for organization logo.'", ")", ",", "QMessageBox", ".", "Ok", ")", "if", "logo_not_exist", "or", "invalid_logo_size", ":", "set_setting", "(", "'organisation_logo_path'", ",", "supporters_logo_path", "(", ")", ")" ]
Set the dock state from QSettings. Do this on init and after changing options in the options dialog.
[ "Set", "the", "dock", "state", "from", "QSettings", "." ]
python
train
40.688
ktdreyer/txbugzilla
txbugzilla/__init__.py
https://github.com/ktdreyer/txbugzilla/blob/ccfc6667ce9d696b08b468b25c813cc2b68d30d6/txbugzilla/__init__.py#L100-L114
def assign(self, bugids, user): """ Assign a bug to a user. param bugid: ``int``, bug ID number. param user: ``str``, the login name of the user to whom the bug is assigned returns: deferred that when fired returns True if the change succeeded, False if the change was unnecessary (because the user is already assigned.) """ payload = {'ids': (bugids,), 'assigned_to': user} d = self.call('Bug.update', payload) d.addCallback(self._parse_bug_assigned_callback) return d
[ "def", "assign", "(", "self", ",", "bugids", ",", "user", ")", ":", "payload", "=", "{", "'ids'", ":", "(", "bugids", ",", ")", ",", "'assigned_to'", ":", "user", "}", "d", "=", "self", ".", "call", "(", "'Bug.update'", ",", "payload", ")", "d", ".", "addCallback", "(", "self", ".", "_parse_bug_assigned_callback", ")", "return", "d" ]
Assign a bug to a user. param bugid: ``int``, bug ID number. param user: ``str``, the login name of the user to whom the bug is assigned returns: deferred that when fired returns True if the change succeeded, False if the change was unnecessary (because the user is already assigned.)
[ "Assign", "a", "bug", "to", "a", "user", "." ]
python
train
39.333333
saltstack/salt
salt/modules/syslog_ng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/syslog_ng.py#L1012-L1086
def start(name=None, user=None, group=None, chroot=None, caps=None, no_caps=False, pidfile=None, enable_core=False, fd_limit=None, verbose=False, debug=False, trace=False, yydebug=False, persist_file=None, control=None, worker_threads=None): ''' Ensures, that syslog-ng is started via the given parameters. This function is intended to be used from the state module. Users shouldn't use this function, if the service module is available on their system. If :mod:`syslog_ng.set_config_file <salt.modules.syslog_ng.set_binary_path>`, is called before, this function will use the set binary path. CLI Example: .. code-block:: bash salt '*' syslog_ng.start ''' params = [] _add_cli_param(params, 'user', user) _add_cli_param(params, 'group', group) _add_cli_param(params, 'chroot', chroot) _add_cli_param(params, 'caps', caps) _add_boolean_cli_param(params, 'no-capse', no_caps) _add_cli_param(params, 'pidfile', pidfile) _add_boolean_cli_param(params, 'enable-core', enable_core) _add_cli_param(params, 'fd-limit', fd_limit) _add_boolean_cli_param(params, 'verbose', verbose) _add_boolean_cli_param(params, 'debug', debug) _add_boolean_cli_param(params, 'trace', trace) _add_boolean_cli_param(params, 'yydebug', yydebug) _add_cli_param(params, 'cfgfile', __SYSLOG_NG_CONFIG_FILE) _add_boolean_cli_param(params, 'persist-file', persist_file) _add_cli_param(params, 'control', control) _add_cli_param(params, 'worker-threads', worker_threads) if __SYSLOG_NG_BINARY_PATH: syslog_ng_binary = os.path.join(__SYSLOG_NG_BINARY_PATH, 'syslog-ng') command = [syslog_ng_binary] + params if __opts__.get('test', False): comment = 'Syslog_ng state module will start {0}'.format(command) return _format_state_result(name, result=None, comment=comment) result = __salt__['cmd.run_all'](command, python_shell=False) else: command = ['syslog-ng'] + params if __opts__.get('test', False): comment = 'Syslog_ng state module will start {0}'.format(command) return _format_state_result(name, result=None, comment=comment) result = __salt__['cmd.run_all'](command, python_shell=False) if result['pid'] > 0: succ = True else: succ = False return _format_state_result( name, result=succ, changes={'new': ' '.join(command), 'old': ''} )
[ "def", "start", "(", "name", "=", "None", ",", "user", "=", "None", ",", "group", "=", "None", ",", "chroot", "=", "None", ",", "caps", "=", "None", ",", "no_caps", "=", "False", ",", "pidfile", "=", "None", ",", "enable_core", "=", "False", ",", "fd_limit", "=", "None", ",", "verbose", "=", "False", ",", "debug", "=", "False", ",", "trace", "=", "False", ",", "yydebug", "=", "False", ",", "persist_file", "=", "None", ",", "control", "=", "None", ",", "worker_threads", "=", "None", ")", ":", "params", "=", "[", "]", "_add_cli_param", "(", "params", ",", "'user'", ",", "user", ")", "_add_cli_param", "(", "params", ",", "'group'", ",", "group", ")", "_add_cli_param", "(", "params", ",", "'chroot'", ",", "chroot", ")", "_add_cli_param", "(", "params", ",", "'caps'", ",", "caps", ")", "_add_boolean_cli_param", "(", "params", ",", "'no-capse'", ",", "no_caps", ")", "_add_cli_param", "(", "params", ",", "'pidfile'", ",", "pidfile", ")", "_add_boolean_cli_param", "(", "params", ",", "'enable-core'", ",", "enable_core", ")", "_add_cli_param", "(", "params", ",", "'fd-limit'", ",", "fd_limit", ")", "_add_boolean_cli_param", "(", "params", ",", "'verbose'", ",", "verbose", ")", "_add_boolean_cli_param", "(", "params", ",", "'debug'", ",", "debug", ")", "_add_boolean_cli_param", "(", "params", ",", "'trace'", ",", "trace", ")", "_add_boolean_cli_param", "(", "params", ",", "'yydebug'", ",", "yydebug", ")", "_add_cli_param", "(", "params", ",", "'cfgfile'", ",", "__SYSLOG_NG_CONFIG_FILE", ")", "_add_boolean_cli_param", "(", "params", ",", "'persist-file'", ",", "persist_file", ")", "_add_cli_param", "(", "params", ",", "'control'", ",", "control", ")", "_add_cli_param", "(", "params", ",", "'worker-threads'", ",", "worker_threads", ")", "if", "__SYSLOG_NG_BINARY_PATH", ":", "syslog_ng_binary", "=", "os", ".", "path", ".", "join", "(", "__SYSLOG_NG_BINARY_PATH", ",", "'syslog-ng'", ")", "command", "=", "[", "syslog_ng_binary", "]", "+", "params", "if", "__opts__", ".", "get", "(", "'test'", ",", "False", ")", ":", "comment", "=", "'Syslog_ng state module will start {0}'", ".", "format", "(", "command", ")", "return", "_format_state_result", "(", "name", ",", "result", "=", "None", ",", "comment", "=", "comment", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "command", ",", "python_shell", "=", "False", ")", "else", ":", "command", "=", "[", "'syslog-ng'", "]", "+", "params", "if", "__opts__", ".", "get", "(", "'test'", ",", "False", ")", ":", "comment", "=", "'Syslog_ng state module will start {0}'", ".", "format", "(", "command", ")", "return", "_format_state_result", "(", "name", ",", "result", "=", "None", ",", "comment", "=", "comment", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "command", ",", "python_shell", "=", "False", ")", "if", "result", "[", "'pid'", "]", ">", "0", ":", "succ", "=", "True", "else", ":", "succ", "=", "False", "return", "_format_state_result", "(", "name", ",", "result", "=", "succ", ",", "changes", "=", "{", "'new'", ":", "' '", ".", "join", "(", "command", ")", ",", "'old'", ":", "''", "}", ")" ]
Ensures, that syslog-ng is started via the given parameters. This function is intended to be used from the state module. Users shouldn't use this function, if the service module is available on their system. If :mod:`syslog_ng.set_config_file <salt.modules.syslog_ng.set_binary_path>`, is called before, this function will use the set binary path. CLI Example: .. code-block:: bash salt '*' syslog_ng.start
[ "Ensures", "that", "syslog", "-", "ng", "is", "started", "via", "the", "given", "parameters", ".", "This", "function", "is", "intended", "to", "be", "used", "from", "the", "state", "module", "." ]
python
train
34.186667
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L5514-L5520
def annotation(self, type, set=None): """Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found""" l = self.count(type,set,True,default_ignore_annotations) if len(l) >= 1: return l[0] else: raise NoSuchAnnotation()
[ "def", "annotation", "(", "self", ",", "type", ",", "set", "=", "None", ")", ":", "l", "=", "self", ".", "count", "(", "type", ",", "set", ",", "True", ",", "default_ignore_annotations", ")", "if", "len", "(", "l", ")", ">=", "1", ":", "return", "l", "[", "0", "]", "else", ":", "raise", "NoSuchAnnotation", "(", ")" ]
Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found
[ "Will", "return", "a", "**", "single", "**", "annotation", "(", "even", "if", "there", "are", "multiple", ")", ".", "Raises", "a", "NoSuchAnnotation", "exception", "if", "none", "was", "found" ]
python
train
47.571429
ArangoDB-Community/pyArango
pyArango/database.py
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/database.py#L83-L122
def createCollection(self, className = 'Collection', **colProperties) : """Creates a collection and returns it. ClassName the name of a class inheriting from Collection or Egdes, it can also be set to 'Collection' or 'Edges' in order to create untyped collections of documents or edges. Use colProperties to put things such as 'waitForSync = True' (see ArangoDB's doc for a full list of possible arugments). If a '_properties' dictionary is defined in the collection schema, arguments to this function overide it""" colClass = COL.getCollectionClass(className) if len(colProperties) > 0 : colProperties = dict(colProperties) else : try : colProperties = dict(colClass._properties) except AttributeError : colProperties = {} if className != 'Collection' and className != 'Edges' : colProperties['name'] = className else : if 'name' not in colProperties : raise ValueError("a 'name' argument mush be supplied if you want to create a generic collection") if colProperties['name'] in self.collections : raise CreationError("Database %s already has a collection named %s" % (self.name, colProperties['name']) ) if issubclass(colClass, COL.Edges) or colClass.__class__ is COL.Edges: colProperties["type"] = CONST.COLLECTION_EDGE_TYPE else : colProperties["type"] = CONST.COLLECTION_DOCUMENT_TYPE payload = json.dumps(colProperties, default=str) r = self.connection.session.post(self.collectionsURL, data = payload) data = r.json() if r.status_code == 200 and not data["error"] : col = colClass(self, data) self.collections[col.name] = col return self.collections[col.name] else : raise CreationError(data["errorMessage"], data)
[ "def", "createCollection", "(", "self", ",", "className", "=", "'Collection'", ",", "*", "*", "colProperties", ")", ":", "colClass", "=", "COL", ".", "getCollectionClass", "(", "className", ")", "if", "len", "(", "colProperties", ")", ">", "0", ":", "colProperties", "=", "dict", "(", "colProperties", ")", "else", ":", "try", ":", "colProperties", "=", "dict", "(", "colClass", ".", "_properties", ")", "except", "AttributeError", ":", "colProperties", "=", "{", "}", "if", "className", "!=", "'Collection'", "and", "className", "!=", "'Edges'", ":", "colProperties", "[", "'name'", "]", "=", "className", "else", ":", "if", "'name'", "not", "in", "colProperties", ":", "raise", "ValueError", "(", "\"a 'name' argument mush be supplied if you want to create a generic collection\"", ")", "if", "colProperties", "[", "'name'", "]", "in", "self", ".", "collections", ":", "raise", "CreationError", "(", "\"Database %s already has a collection named %s\"", "%", "(", "self", ".", "name", ",", "colProperties", "[", "'name'", "]", ")", ")", "if", "issubclass", "(", "colClass", ",", "COL", ".", "Edges", ")", "or", "colClass", ".", "__class__", "is", "COL", ".", "Edges", ":", "colProperties", "[", "\"type\"", "]", "=", "CONST", ".", "COLLECTION_EDGE_TYPE", "else", ":", "colProperties", "[", "\"type\"", "]", "=", "CONST", ".", "COLLECTION_DOCUMENT_TYPE", "payload", "=", "json", ".", "dumps", "(", "colProperties", ",", "default", "=", "str", ")", "r", "=", "self", ".", "connection", ".", "session", ".", "post", "(", "self", ".", "collectionsURL", ",", "data", "=", "payload", ")", "data", "=", "r", ".", "json", "(", ")", "if", "r", ".", "status_code", "==", "200", "and", "not", "data", "[", "\"error\"", "]", ":", "col", "=", "colClass", "(", "self", ",", "data", ")", "self", ".", "collections", "[", "col", ".", "name", "]", "=", "col", "return", "self", ".", "collections", "[", "col", ".", "name", "]", "else", ":", "raise", "CreationError", "(", "data", "[", "\"errorMessage\"", "]", ",", "data", ")" ]
Creates a collection and returns it. ClassName the name of a class inheriting from Collection or Egdes, it can also be set to 'Collection' or 'Edges' in order to create untyped collections of documents or edges. Use colProperties to put things such as 'waitForSync = True' (see ArangoDB's doc for a full list of possible arugments). If a '_properties' dictionary is defined in the collection schema, arguments to this function overide it
[ "Creates", "a", "collection", "and", "returns", "it", ".", "ClassName", "the", "name", "of", "a", "class", "inheriting", "from", "Collection", "or", "Egdes", "it", "can", "also", "be", "set", "to", "Collection", "or", "Edges", "in", "order", "to", "create", "untyped", "collections", "of", "documents", "or", "edges", ".", "Use", "colProperties", "to", "put", "things", "such", "as", "waitForSync", "=", "True", "(", "see", "ArangoDB", "s", "doc", "for", "a", "full", "list", "of", "possible", "arugments", ")", ".", "If", "a", "_properties", "dictionary", "is", "defined", "in", "the", "collection", "schema", "arguments", "to", "this", "function", "overide", "it" ]
python
train
47.925
portfors-lab/sparkle
sparkle/gui/stim/stimulusview.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulusview.py#L178-L193
def visualRectRC(self, row, column): """The rectangle for the bounds of the item at *row*, *column* :param row: row of the item :type row: int :param column: column of the item :type column: int :returns: :qtdoc:`QRect` -- rectangle of the borders of the item """ rect = self._rects[row][column] if rect.isValid(): return QtCore.QRect(rect.x() - self.horizontalScrollBar().value(), rect.y() - self.verticalScrollBar().value(), rect.width(), rect.height()) else: return rect
[ "def", "visualRectRC", "(", "self", ",", "row", ",", "column", ")", ":", "rect", "=", "self", ".", "_rects", "[", "row", "]", "[", "column", "]", "if", "rect", ".", "isValid", "(", ")", ":", "return", "QtCore", ".", "QRect", "(", "rect", ".", "x", "(", ")", "-", "self", ".", "horizontalScrollBar", "(", ")", ".", "value", "(", ")", ",", "rect", ".", "y", "(", ")", "-", "self", ".", "verticalScrollBar", "(", ")", ".", "value", "(", ")", ",", "rect", ".", "width", "(", ")", ",", "rect", ".", "height", "(", ")", ")", "else", ":", "return", "rect" ]
The rectangle for the bounds of the item at *row*, *column* :param row: row of the item :type row: int :param column: column of the item :type column: int :returns: :qtdoc:`QRect` -- rectangle of the borders of the item
[ "The", "rectangle", "for", "the", "bounds", "of", "the", "item", "at", "*", "row", "*", "*", "column", "*" ]
python
train
38.3125
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L1494-L1503
def triangle(self, verts=True, lines=True): """ Converts actor polygons and strips to triangles. """ tf = vtk.vtkTriangleFilter() tf.SetPassLines(lines) tf.SetPassVerts(verts) tf.SetInputData(self.poly) tf.Update() return self.updateMesh(tf.GetOutput())
[ "def", "triangle", "(", "self", ",", "verts", "=", "True", ",", "lines", "=", "True", ")", ":", "tf", "=", "vtk", ".", "vtkTriangleFilter", "(", ")", "tf", ".", "SetPassLines", "(", "lines", ")", "tf", ".", "SetPassVerts", "(", "verts", ")", "tf", ".", "SetInputData", "(", "self", ".", "poly", ")", "tf", ".", "Update", "(", ")", "return", "self", ".", "updateMesh", "(", "tf", ".", "GetOutput", "(", ")", ")" ]
Converts actor polygons and strips to triangles.
[ "Converts", "actor", "polygons", "and", "strips", "to", "triangles", "." ]
python
train
31.6
acorg/dark-matter
bin/compare-consensuses.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/bin/compare-consensuses.py#L14-L34
def makeOuputDir(outputDir, force): """ Create or check for an output directory. @param outputDir: A C{str} output directory name, or C{None}. @param force: If C{True}, allow overwriting of pre-existing files. @return: The C{str} output directory name. """ if outputDir: if exists(outputDir): if not force: print('Will not overwrite pre-existing files. Use --force to ' 'make me.', file=sys.stderr) sys.exit(1) else: mkdir(outputDir) else: outputDir = mkdtemp() print('Writing output files to %s' % outputDir) return outputDir
[ "def", "makeOuputDir", "(", "outputDir", ",", "force", ")", ":", "if", "outputDir", ":", "if", "exists", "(", "outputDir", ")", ":", "if", "not", "force", ":", "print", "(", "'Will not overwrite pre-existing files. Use --force to '", "'make me.'", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "mkdir", "(", "outputDir", ")", "else", ":", "outputDir", "=", "mkdtemp", "(", ")", "print", "(", "'Writing output files to %s'", "%", "outputDir", ")", "return", "outputDir" ]
Create or check for an output directory. @param outputDir: A C{str} output directory name, or C{None}. @param force: If C{True}, allow overwriting of pre-existing files. @return: The C{str} output directory name.
[ "Create", "or", "check", "for", "an", "output", "directory", "." ]
python
train
31.142857
michaelaye/pyciss
pyciss/ringcube.py
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/ringcube.py#L259-L335
def imshow( self, data=None, save=False, ax=None, interpolation="none", extra_title=None, show_resonances="some", set_extent=True, equalized=False, rmin=None, rmax=None, savepath=".", **kwargs, ): """Powerful default display. show_resonances can be True, a list, 'all', or 'some' """ if data is None: data = self.img if self.resonance_axis is not None: logger.debug("removing resonance_axis") self.resonance_axis.remove() if equalized: data = np.nan_to_num(data) data[data < 0] = 0 data = exposure.equalize_hist(data) self.plotted_data = data extent_val = self.extent if set_extent else None min_, max_ = self.plot_limits self.min_ = min_ self.max_ = max_ if ax is None: if not _SEABORN_INSTALLED: fig, ax = plt.subplots(figsize=calc_4_3(8)) else: fig, ax = plt.subplots() else: fig = ax.get_figure() with quantity_support(): im = ax.imshow( data, extent=extent_val, cmap="gray", vmin=min_, vmax=max_, interpolation=interpolation, origin="lower", aspect="auto", **kwargs, ) if any([rmin is not None, rmax is not None]): ax.set_ylim(rmin, rmax) self.mpl_im = im ax.set_xlabel("Longitude [deg]") ax.set_ylabel("Radius [Mm]") ax.ticklabel_format(useOffset=False) # ax.grid('on') title = self.plot_title if extra_title: title += ", " + extra_title ax.set_title(title, fontsize=12) if show_resonances: self.set_resonance_axis(ax, show_resonances, rmin, rmax) if save: savename = self.plotfname if extra_title: savename = savename[:-4] + "_" + extra_title + ".png" p = Path(savename) fullpath = Path(savepath) / p.name fig.savefig(fullpath, dpi=150) logging.info("Created %s", fullpath) self.im = im return im
[ "def", "imshow", "(", "self", ",", "data", "=", "None", ",", "save", "=", "False", ",", "ax", "=", "None", ",", "interpolation", "=", "\"none\"", ",", "extra_title", "=", "None", ",", "show_resonances", "=", "\"some\"", ",", "set_extent", "=", "True", ",", "equalized", "=", "False", ",", "rmin", "=", "None", ",", "rmax", "=", "None", ",", "savepath", "=", "\".\"", ",", "*", "*", "kwargs", ",", ")", ":", "if", "data", "is", "None", ":", "data", "=", "self", ".", "img", "if", "self", ".", "resonance_axis", "is", "not", "None", ":", "logger", ".", "debug", "(", "\"removing resonance_axis\"", ")", "self", ".", "resonance_axis", ".", "remove", "(", ")", "if", "equalized", ":", "data", "=", "np", ".", "nan_to_num", "(", "data", ")", "data", "[", "data", "<", "0", "]", "=", "0", "data", "=", "exposure", ".", "equalize_hist", "(", "data", ")", "self", ".", "plotted_data", "=", "data", "extent_val", "=", "self", ".", "extent", "if", "set_extent", "else", "None", "min_", ",", "max_", "=", "self", ".", "plot_limits", "self", ".", "min_", "=", "min_", "self", ".", "max_", "=", "max_", "if", "ax", "is", "None", ":", "if", "not", "_SEABORN_INSTALLED", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "calc_4_3", "(", "8", ")", ")", "else", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "else", ":", "fig", "=", "ax", ".", "get_figure", "(", ")", "with", "quantity_support", "(", ")", ":", "im", "=", "ax", ".", "imshow", "(", "data", ",", "extent", "=", "extent_val", ",", "cmap", "=", "\"gray\"", ",", "vmin", "=", "min_", ",", "vmax", "=", "max_", ",", "interpolation", "=", "interpolation", ",", "origin", "=", "\"lower\"", ",", "aspect", "=", "\"auto\"", ",", "*", "*", "kwargs", ",", ")", "if", "any", "(", "[", "rmin", "is", "not", "None", ",", "rmax", "is", "not", "None", "]", ")", ":", "ax", ".", "set_ylim", "(", "rmin", ",", "rmax", ")", "self", ".", "mpl_im", "=", "im", "ax", ".", "set_xlabel", "(", "\"Longitude [deg]\"", ")", "ax", ".", "set_ylabel", "(", "\"Radius [Mm]\"", ")", "ax", ".", "ticklabel_format", "(", "useOffset", "=", "False", ")", "# ax.grid('on')", "title", "=", "self", ".", "plot_title", "if", "extra_title", ":", "title", "+=", "\", \"", "+", "extra_title", "ax", ".", "set_title", "(", "title", ",", "fontsize", "=", "12", ")", "if", "show_resonances", ":", "self", ".", "set_resonance_axis", "(", "ax", ",", "show_resonances", ",", "rmin", ",", "rmax", ")", "if", "save", ":", "savename", "=", "self", ".", "plotfname", "if", "extra_title", ":", "savename", "=", "savename", "[", ":", "-", "4", "]", "+", "\"_\"", "+", "extra_title", "+", "\".png\"", "p", "=", "Path", "(", "savename", ")", "fullpath", "=", "Path", "(", "savepath", ")", "/", "p", ".", "name", "fig", ".", "savefig", "(", "fullpath", ",", "dpi", "=", "150", ")", "logging", ".", "info", "(", "\"Created %s\"", ",", "fullpath", ")", "self", ".", "im", "=", "im", "return", "im" ]
Powerful default display. show_resonances can be True, a list, 'all', or 'some'
[ "Powerful", "default", "display", "." ]
python
train
29.688312
saltstack/salt
salt/states/trafficserver.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/trafficserver.py#L289-L315
def refresh(name): ''' Initiate a Traffic Server configuration file reread. Use this command to update the running configuration after any configuration file modification. The timestamp of the last reconfiguration event (in seconds since epoch) is published in the proxy.node.config.reconfigure_time metric. .. code-block:: yaml refresh_ats: trafficserver.refresh ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if __opts__['test']: ret['comment'] = 'Refreshing local node configuration' return ret __salt__['trafficserver.refresh']() ret['result'] = True ret['comment'] = 'Refreshed local node configuration' return ret
[ "def", "refresh", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Refreshing local node configuration'", "return", "ret", "__salt__", "[", "'trafficserver.refresh'", "]", "(", ")", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Refreshed local node configuration'", "return", "ret" ]
Initiate a Traffic Server configuration file reread. Use this command to update the running configuration after any configuration file modification. The timestamp of the last reconfiguration event (in seconds since epoch) is published in the proxy.node.config.reconfigure_time metric. .. code-block:: yaml refresh_ats: trafficserver.refresh
[ "Initiate", "a", "Traffic", "Server", "configuration", "file", "reread", ".", "Use", "this", "command", "to", "update", "the", "running", "configuration", "after", "any", "configuration", "file", "modification", "." ]
python
train
27.481481
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1511-L1519
def min_pulse_sp(self): """ Used to set the pulse size in milliseconds for the signal that tells the servo to drive to the miniumum (counter-clockwise) position_sp. Default value is 600. Valid values are 300 to 700. You must write to the position_sp attribute for changes to this attribute to take effect. """ self._min_pulse_sp, value = self.get_attr_int(self._min_pulse_sp, 'min_pulse_sp') return value
[ "def", "min_pulse_sp", "(", "self", ")", ":", "self", ".", "_min_pulse_sp", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_min_pulse_sp", ",", "'min_pulse_sp'", ")", "return", "value" ]
Used to set the pulse size in milliseconds for the signal that tells the servo to drive to the miniumum (counter-clockwise) position_sp. Default value is 600. Valid values are 300 to 700. You must write to the position_sp attribute for changes to this attribute to take effect.
[ "Used", "to", "set", "the", "pulse", "size", "in", "milliseconds", "for", "the", "signal", "that", "tells", "the", "servo", "to", "drive", "to", "the", "miniumum", "(", "counter", "-", "clockwise", ")", "position_sp", ".", "Default", "value", "is", "600", ".", "Valid", "values", "are", "300", "to", "700", ".", "You", "must", "write", "to", "the", "position_sp", "attribute", "for", "changes", "to", "this", "attribute", "to", "take", "effect", "." ]
python
train
51.111111
saltstack/salt
salt/modules/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/network.py#L1524-L1549
def _get_bufsize_linux(iface): ''' Return network interface buffer information using ethtool ''' ret = {'result': False} cmd = '/sbin/ethtool -g {0}'.format(iface) out = __salt__['cmd.run'](cmd) pat = re.compile(r'^(.+):\s+(\d+)$') suffix = 'max-' for line in out.splitlines(): res = pat.match(line) if res: ret[res.group(1).lower().replace(' ', '-') + suffix] = int(res.group(2)) ret['result'] = True elif line.endswith('maximums:'): suffix = '-max' elif line.endswith('settings:'): suffix = '' if not ret['result']: parts = out.split() # remove shell cmd prefix from msg if parts[0].endswith('sh:'): out = ' '.join(parts[1:]) ret['comment'] = out return ret
[ "def", "_get_bufsize_linux", "(", "iface", ")", ":", "ret", "=", "{", "'result'", ":", "False", "}", "cmd", "=", "'/sbin/ethtool -g {0}'", ".", "format", "(", "iface", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "pat", "=", "re", ".", "compile", "(", "r'^(.+):\\s+(\\d+)$'", ")", "suffix", "=", "'max-'", "for", "line", "in", "out", ".", "splitlines", "(", ")", ":", "res", "=", "pat", ".", "match", "(", "line", ")", "if", "res", ":", "ret", "[", "res", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'-'", ")", "+", "suffix", "]", "=", "int", "(", "res", ".", "group", "(", "2", ")", ")", "ret", "[", "'result'", "]", "=", "True", "elif", "line", ".", "endswith", "(", "'maximums:'", ")", ":", "suffix", "=", "'-max'", "elif", "line", ".", "endswith", "(", "'settings:'", ")", ":", "suffix", "=", "''", "if", "not", "ret", "[", "'result'", "]", ":", "parts", "=", "out", ".", "split", "(", ")", "# remove shell cmd prefix from msg", "if", "parts", "[", "0", "]", ".", "endswith", "(", "'sh:'", ")", ":", "out", "=", "' '", ".", "join", "(", "parts", "[", "1", ":", "]", ")", "ret", "[", "'comment'", "]", "=", "out", "return", "ret" ]
Return network interface buffer information using ethtool
[ "Return", "network", "interface", "buffer", "information", "using", "ethtool" ]
python
train
30.884615
Erotemic/utool
utool/util_str.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2501-L2523
def format_text_as_docstr(text): r""" CommandLine: python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr Example: >>> # DISABLE_DOCTEST >>> from pyvim_funcs import * # NOQA >>> text = testdata_text() >>> formated_text = format_text_as_docstr(text) >>> result = ('formated_text = \n%s' % (str(formated_text),)) >>> print(result) """ import utool as ut import re min_indent = ut.get_minimum_indentation(text) indent_ = ' ' * min_indent formated_text = re.sub('^' + indent_, '' + indent_ + '>>> ', text, flags=re.MULTILINE) formated_text = re.sub('^$', '' + indent_ + '>>> #', formated_text, flags=re.MULTILINE) return formated_text
[ "def", "format_text_as_docstr", "(", "text", ")", ":", "import", "utool", "as", "ut", "import", "re", "min_indent", "=", "ut", ".", "get_minimum_indentation", "(", "text", ")", "indent_", "=", "' '", "*", "min_indent", "formated_text", "=", "re", ".", "sub", "(", "'^'", "+", "indent_", ",", "''", "+", "indent_", "+", "'>>> '", ",", "text", ",", "flags", "=", "re", ".", "MULTILINE", ")", "formated_text", "=", "re", ".", "sub", "(", "'^$'", ",", "''", "+", "indent_", "+", "'>>> #'", ",", "formated_text", ",", "flags", "=", "re", ".", "MULTILINE", ")", "return", "formated_text" ]
r""" CommandLine: python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr Example: >>> # DISABLE_DOCTEST >>> from pyvim_funcs import * # NOQA >>> text = testdata_text() >>> formated_text = format_text_as_docstr(text) >>> result = ('formated_text = \n%s' % (str(formated_text),)) >>> print(result)
[ "r" ]
python
train
33.869565
gem/oq-engine
openquake/server/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/server/views.py#L341-L372
def calc_abort(request, calc_id): """ Abort the given calculation, it is it running """ job = logs.dbcmd('get_job', calc_id) if job is None: message = {'error': 'Unknown job %s' % calc_id} return HttpResponse(content=json.dumps(message), content_type=JSON) if job.status not in ('submitted', 'executing'): message = {'error': 'Job %s is not running' % job.id} return HttpResponse(content=json.dumps(message), content_type=JSON) if not utils.user_has_permission(request, job.user_name): message = {'error': ('User %s has no permission to abort job %s' % (job.user_name, job.id))} return HttpResponse(content=json.dumps(message), content_type=JSON, status=403) if job.pid: # is a spawned job try: os.kill(job.pid, signal.SIGTERM) except Exception as exc: logging.error(exc) else: logging.warning('Aborting job %d, pid=%d', job.id, job.pid) logs.dbcmd('set_status', job.id, 'aborted') message = {'success': 'Killing job %d' % job.id} return HttpResponse(content=json.dumps(message), content_type=JSON) message = {'error': 'PID for job %s not found' % job.id} return HttpResponse(content=json.dumps(message), content_type=JSON)
[ "def", "calc_abort", "(", "request", ",", "calc_id", ")", ":", "job", "=", "logs", ".", "dbcmd", "(", "'get_job'", ",", "calc_id", ")", "if", "job", "is", "None", ":", "message", "=", "{", "'error'", ":", "'Unknown job %s'", "%", "calc_id", "}", "return", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "message", ")", ",", "content_type", "=", "JSON", ")", "if", "job", ".", "status", "not", "in", "(", "'submitted'", ",", "'executing'", ")", ":", "message", "=", "{", "'error'", ":", "'Job %s is not running'", "%", "job", ".", "id", "}", "return", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "message", ")", ",", "content_type", "=", "JSON", ")", "if", "not", "utils", ".", "user_has_permission", "(", "request", ",", "job", ".", "user_name", ")", ":", "message", "=", "{", "'error'", ":", "(", "'User %s has no permission to abort job %s'", "%", "(", "job", ".", "user_name", ",", "job", ".", "id", ")", ")", "}", "return", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "message", ")", ",", "content_type", "=", "JSON", ",", "status", "=", "403", ")", "if", "job", ".", "pid", ":", "# is a spawned job", "try", ":", "os", ".", "kill", "(", "job", ".", "pid", ",", "signal", ".", "SIGTERM", ")", "except", "Exception", "as", "exc", ":", "logging", ".", "error", "(", "exc", ")", "else", ":", "logging", ".", "warning", "(", "'Aborting job %d, pid=%d'", ",", "job", ".", "id", ",", "job", ".", "pid", ")", "logs", ".", "dbcmd", "(", "'set_status'", ",", "job", ".", "id", ",", "'aborted'", ")", "message", "=", "{", "'success'", ":", "'Killing job %d'", "%", "job", ".", "id", "}", "return", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "message", ")", ",", "content_type", "=", "JSON", ")", "message", "=", "{", "'error'", ":", "'PID for job %s not found'", "%", "job", ".", "id", "}", "return", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "message", ")", ",", "content_type", "=", "JSON", ")" ]
Abort the given calculation, it is it running
[ "Abort", "the", "given", "calculation", "it", "is", "it", "running" ]
python
train
41.5625
keras-rl/keras-rl
rl/policy.py
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/policy.py#L128-L139
def select_action(self, nb_actions, probs): """Return the selected action # Arguments probs (np.ndarray) : Probabilty for each action # Returns action """ action = np.random.choice(range(nb_actions), p=probs) return action
[ "def", "select_action", "(", "self", ",", "nb_actions", ",", "probs", ")", ":", "action", "=", "np", ".", "random", ".", "choice", "(", "range", "(", "nb_actions", ")", ",", "p", "=", "probs", ")", "return", "action" ]
Return the selected action # Arguments probs (np.ndarray) : Probabilty for each action # Returns action
[ "Return", "the", "selected", "action" ]
python
train
23.833333
Jarn/jarn.viewdoc
jarn/viewdoc/viewdoc.py
https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L551-L572
def run(self): """Render and display Python package documentation. """ os.environ['JARN_RUN'] = '1' self.python.check_valid_python() args = self.parse_options(self.args) if args: arg = args[0] else: arg = os.curdir if arg: arg = expanduser(arg) if isfile(arg): outfile = self.render_file(arg) elif isdir(arg): outfile = self.render_long_description(arg) else: err_exit('No such file or directory: %s' % arg) self.open_in_browser(outfile)
[ "def", "run", "(", "self", ")", ":", "os", ".", "environ", "[", "'JARN_RUN'", "]", "=", "'1'", "self", ".", "python", ".", "check_valid_python", "(", ")", "args", "=", "self", ".", "parse_options", "(", "self", ".", "args", ")", "if", "args", ":", "arg", "=", "args", "[", "0", "]", "else", ":", "arg", "=", "os", ".", "curdir", "if", "arg", ":", "arg", "=", "expanduser", "(", "arg", ")", "if", "isfile", "(", "arg", ")", ":", "outfile", "=", "self", ".", "render_file", "(", "arg", ")", "elif", "isdir", "(", "arg", ")", ":", "outfile", "=", "self", ".", "render_long_description", "(", "arg", ")", "else", ":", "err_exit", "(", "'No such file or directory: %s'", "%", "arg", ")", "self", ".", "open_in_browser", "(", "outfile", ")" ]
Render and display Python package documentation.
[ "Render", "and", "display", "Python", "package", "documentation", "." ]
python
train
26.681818
doraemonext/wechat-python-sdk
wechat_sdk/basic.py
https://github.com/doraemonext/wechat-python-sdk/blob/bf6f6f3d4a5440feb73a51937059d7feddc335a0/wechat_sdk/basic.py#L692-L737
def send_article_message(self, user_id, articles=None, media_id=None): """ 发送图文消息 详情请参考 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html :param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source :param articles: list 对象, 每个元素为一个 dict 对象, key 包含 `title`, `description`, `picurl`, `url` :param media_id: 待发送的图文 Media ID :return: 返回的 JSON 数据包 """ # neither 'articles' nor 'media_id' is specified if articles is None and media_id is None: raise TypeError('must provide one parameter in "articles" and "media_id"') # articles specified if articles: articles_data = [] for article in articles: article = Article(**article) articles_data.append({ 'title': article.title, 'description': article.description, 'url': article.url, 'picurl': article.picurl, }) return self.request.post( url='https://api.weixin.qq.com/cgi-bin/message/custom/send', data={ 'touser': user_id, 'msgtype': 'news', 'news': { 'articles': articles_data, }, } ) # media_id specified return self.request.post( url='https://api.weixin.qq.com/cgi-bin/message/custom/send', data={ 'touser': user_id, 'msgtype': 'mpnews', 'mpnews': { 'media_id': media_id, }, } )
[ "def", "send_article_message", "(", "self", ",", "user_id", ",", "articles", "=", "None", ",", "media_id", "=", "None", ")", ":", "# neither 'articles' nor 'media_id' is specified", "if", "articles", "is", "None", "and", "media_id", "is", "None", ":", "raise", "TypeError", "(", "'must provide one parameter in \"articles\" and \"media_id\"'", ")", "# articles specified", "if", "articles", ":", "articles_data", "=", "[", "]", "for", "article", "in", "articles", ":", "article", "=", "Article", "(", "*", "*", "article", ")", "articles_data", ".", "append", "(", "{", "'title'", ":", "article", ".", "title", ",", "'description'", ":", "article", ".", "description", ",", "'url'", ":", "article", ".", "url", ",", "'picurl'", ":", "article", ".", "picurl", ",", "}", ")", "return", "self", ".", "request", ".", "post", "(", "url", "=", "'https://api.weixin.qq.com/cgi-bin/message/custom/send'", ",", "data", "=", "{", "'touser'", ":", "user_id", ",", "'msgtype'", ":", "'news'", ",", "'news'", ":", "{", "'articles'", ":", "articles_data", ",", "}", ",", "}", ")", "# media_id specified", "return", "self", ".", "request", ".", "post", "(", "url", "=", "'https://api.weixin.qq.com/cgi-bin/message/custom/send'", ",", "data", "=", "{", "'touser'", ":", "user_id", ",", "'msgtype'", ":", "'mpnews'", ",", "'mpnews'", ":", "{", "'media_id'", ":", "media_id", ",", "}", ",", "}", ")" ]
发送图文消息 详情请参考 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html :param user_id: 用户 ID, 就是你收到的 WechatMessage 的 source :param articles: list 对象, 每个元素为一个 dict 对象, key 包含 `title`, `description`, `picurl`, `url` :param media_id: 待发送的图文 Media ID :return: 返回的 JSON 数据包
[ "发送图文消息", "详情请参考", "http", ":", "//", "mp", ".", "weixin", ".", "qq", ".", "com", "/", "wiki", "/", "7", "/", "12a5a320ae96fecdf0e15cb06123de9f", ".", "html", ":", "param", "user_id", ":", "用户", "ID", "就是你收到的", "WechatMessage", "的", "source", ":", "param", "articles", ":", "list", "对象", "每个元素为一个", "dict", "对象", "key", "包含", "title", "description", "picurl", "url", ":", "param", "media_id", ":", "待发送的图文", "Media", "ID", ":", "return", ":", "返回的", "JSON", "数据包" ]
python
valid
36.347826
mitsei/dlkit
dlkit/services/assessment.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L3816-L3824
def use_plenary_sequence_rule_view(self): """Pass through to provider SequenceRuleLookupSession.use_plenary_sequence_rule_view""" self._object_views['sequence_rule'] = PLENARY # self._get_provider_session('sequence_rule_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_sequence_rule_view() except AttributeError: pass
[ "def", "use_plenary_sequence_rule_view", "(", "self", ")", ":", "self", ".", "_object_views", "[", "'sequence_rule'", "]", "=", "PLENARY", "# self._get_provider_session('sequence_rule_lookup_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_plenary_sequence_rule_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Pass through to provider SequenceRuleLookupSession.use_plenary_sequence_rule_view
[ "Pass", "through", "to", "provider", "SequenceRuleLookupSession", ".", "use_plenary_sequence_rule_view" ]
python
train
52.666667
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/mapping.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/mapping.py#L3622-L3671
def sync(self, json_obj=None): """ synchronize this transport with the Ariane server transport :return: """ LOGGER.debug("Transport.sync") if json_obj is None: params = None if self.id is not None: params = SessionService.complete_transactional_req({'ID': self.id}) if params is not None: if MappingService.driver_type != DriverFactory.DRIVER_REST: params['OPERATION'] = 'getTransport' args = {'properties': params} else: args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params} response = TransportService.requester.call(args) if MappingService.driver_type != DriverFactory.DRIVER_REST: response = response.get() if response.rc == 0: json_obj = response.response_content else: err_msg = 'Transport.sync - Problem while syncing transport (id: ' + str(self.id) + '). ' \ 'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + \ " (" + str(response.rc) + ")" LOGGER.warning(err_msg) if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message: raise ArianeMappingOverloadError("Transport.sync", ArianeMappingOverloadError.ERROR_MSG) # traceback.print_stack() elif 'transportID' not in json_obj: err_msg = 'Transport.sync - Problem while syncing transport (id: ' + str(self.id) + '). ' \ 'Reason: inconsistent json_obj' + str(json_obj) + " from : \n" LOGGER.warning(err_msg) # traceback.print_stack() if json_obj is not None: self.id = json_obj['transportID'] self.name = json_obj['transportName'] if MappingService.driver_type != DriverFactory.DRIVER_REST: if 'transportProperties' in json_obj: self.properties = DriverTools.json2properties(json_obj['transportProperties']) else: self.properties = None else: self.properties = json_obj['transportProperties'] if 'transportProperties' in json_obj else None
[ "def", "sync", "(", "self", ",", "json_obj", "=", "None", ")", ":", "LOGGER", ".", "debug", "(", "\"Transport.sync\"", ")", "if", "json_obj", "is", "None", ":", "params", "=", "None", "if", "self", ".", "id", "is", "not", "None", ":", "params", "=", "SessionService", ".", "complete_transactional_req", "(", "{", "'ID'", ":", "self", ".", "id", "}", ")", "if", "params", "is", "not", "None", ":", "if", "MappingService", ".", "driver_type", "!=", "DriverFactory", ".", "DRIVER_REST", ":", "params", "[", "'OPERATION'", "]", "=", "'getTransport'", "args", "=", "{", "'properties'", ":", "params", "}", "else", ":", "args", "=", "{", "'http_operation'", ":", "'GET'", ",", "'operation_path'", ":", "'get'", ",", "'parameters'", ":", "params", "}", "response", "=", "TransportService", ".", "requester", ".", "call", "(", "args", ")", "if", "MappingService", ".", "driver_type", "!=", "DriverFactory", ".", "DRIVER_REST", ":", "response", "=", "response", ".", "get", "(", ")", "if", "response", ".", "rc", "==", "0", ":", "json_obj", "=", "response", ".", "response_content", "else", ":", "err_msg", "=", "'Transport.sync - Problem while syncing transport (id: '", "+", "str", "(", "self", ".", "id", ")", "+", "'). '", "'Reason: '", "+", "str", "(", "response", ".", "response_content", ")", "+", "' - '", "+", "str", "(", "response", ".", "error_message", ")", "+", "\" (\"", "+", "str", "(", "response", ".", "rc", ")", "+", "\")\"", "LOGGER", ".", "warning", "(", "err_msg", ")", "if", "response", ".", "rc", "==", "500", "and", "ArianeMappingOverloadError", ".", "ERROR_MSG", "in", "response", ".", "error_message", ":", "raise", "ArianeMappingOverloadError", "(", "\"Transport.sync\"", ",", "ArianeMappingOverloadError", ".", "ERROR_MSG", ")", "# traceback.print_stack()", "elif", "'transportID'", "not", "in", "json_obj", ":", "err_msg", "=", "'Transport.sync - Problem while syncing transport (id: '", "+", "str", "(", "self", ".", "id", ")", "+", "'). '", "'Reason: inconsistent json_obj'", "+", "str", "(", "json_obj", ")", "+", "\" from : \\n\"", "LOGGER", ".", "warning", "(", "err_msg", ")", "# traceback.print_stack()", "if", "json_obj", "is", "not", "None", ":", "self", ".", "id", "=", "json_obj", "[", "'transportID'", "]", "self", ".", "name", "=", "json_obj", "[", "'transportName'", "]", "if", "MappingService", ".", "driver_type", "!=", "DriverFactory", ".", "DRIVER_REST", ":", "if", "'transportProperties'", "in", "json_obj", ":", "self", ".", "properties", "=", "DriverTools", ".", "json2properties", "(", "json_obj", "[", "'transportProperties'", "]", ")", "else", ":", "self", ".", "properties", "=", "None", "else", ":", "self", ".", "properties", "=", "json_obj", "[", "'transportProperties'", "]", "if", "'transportProperties'", "in", "json_obj", "else", "None" ]
synchronize this transport with the Ariane server transport :return:
[ "synchronize", "this", "transport", "with", "the", "Ariane", "server", "transport", ":", "return", ":" ]
python
train
49.34
mfcloud/python-zvm-sdk
sample/simple/sample.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/sample/simple/sample.py#L15-L36
def delete_guest(userid): """ Destroy a virtual machine. Input parameters: :userid: USERID of the guest, last 8 if length > 8 """ # Check if the guest exists. guest_list_info = client.send_request('guest_list') # the string 'userid' need to be coded as 'u'userid' in case of py2 interpreter. userid_1 = (unicode(userid, "utf-8") if sys.version[0] == '2' else userid) if userid_1 not in guest_list_info['output']: RuntimeError("Userid %s does not exist!" % userid) # Delete the guest. guest_delete_info = client.send_request('guest_delete', userid) if guest_delete_info['overallRC']: print("\nFailed to delete guest %s!" % userid) else: print("\nSucceeded to delete guest %s!" % userid)
[ "def", "delete_guest", "(", "userid", ")", ":", "# Check if the guest exists.", "guest_list_info", "=", "client", ".", "send_request", "(", "'guest_list'", ")", "# the string 'userid' need to be coded as 'u'userid' in case of py2 interpreter.", "userid_1", "=", "(", "unicode", "(", "userid", ",", "\"utf-8\"", ")", "if", "sys", ".", "version", "[", "0", "]", "==", "'2'", "else", "userid", ")", "if", "userid_1", "not", "in", "guest_list_info", "[", "'output'", "]", ":", "RuntimeError", "(", "\"Userid %s does not exist!\"", "%", "userid", ")", "# Delete the guest.", "guest_delete_info", "=", "client", ".", "send_request", "(", "'guest_delete'", ",", "userid", ")", "if", "guest_delete_info", "[", "'overallRC'", "]", ":", "print", "(", "\"\\nFailed to delete guest %s!\"", "%", "userid", ")", "else", ":", "print", "(", "\"\\nSucceeded to delete guest %s!\"", "%", "userid", ")" ]
Destroy a virtual machine. Input parameters: :userid: USERID of the guest, last 8 if length > 8
[ "Destroy", "a", "virtual", "machine", "." ]
python
train
34.363636
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/kernelmanager.py#L361-L376
def shutdown(self, restart=False): """Request an immediate kernel shutdown. Upon receipt of the (empty) reply, client code can safely assume that the kernel has shut down and it's safe to forcefully terminate it if it's still alive. The kernel will send the reply via a function registered with Python's atexit module, ensuring it's truly done as the kernel is done with all normal operation. """ # Send quit message to kernel. Once we implement kernel-side setattr, # this should probably be done that way, but for now this will do. msg = self.session.msg('shutdown_request', {'restart':restart}) self._queue_send(msg) return msg['header']['msg_id']
[ "def", "shutdown", "(", "self", ",", "restart", "=", "False", ")", ":", "# Send quit message to kernel. Once we implement kernel-side setattr,", "# this should probably be done that way, but for now this will do.", "msg", "=", "self", ".", "session", ".", "msg", "(", "'shutdown_request'", ",", "{", "'restart'", ":", "restart", "}", ")", "self", ".", "_queue_send", "(", "msg", ")", "return", "msg", "[", "'header'", "]", "[", "'msg_id'", "]" ]
Request an immediate kernel shutdown. Upon receipt of the (empty) reply, client code can safely assume that the kernel has shut down and it's safe to forcefully terminate it if it's still alive. The kernel will send the reply via a function registered with Python's atexit module, ensuring it's truly done as the kernel is done with all normal operation.
[ "Request", "an", "immediate", "kernel", "shutdown", "." ]
python
test
46.3125
monarch-initiative/dipper
dipper/sources/Ensembl.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Ensembl.py#L134-L158
def fetch_protein_list(self, taxon_id): """ Fetch a list of proteins for a species in biomart :param taxid: :return: list """ protein_list = list() # col = self.columns['ensembl_biomart'] col = ['ensembl_peptide_id', ] params = urllib.parse.urlencode( {'query': self._build_biomart_gene_query(taxon_id, col)}) conn = http.client.HTTPConnection(ENS_URL) conn.request("GET", '/biomart/martservice?' + params) response = conn.getresponse() for line in response: line = line.decode('utf-8').rstrip() row = line.split('\t') if len(row) != len(col): LOG.warning("Data error for p-list query on %d", taxon_id) continue protein_list.append(row[col.index('ensembl_peptide_id')]) conn.close() return protein_list
[ "def", "fetch_protein_list", "(", "self", ",", "taxon_id", ")", ":", "protein_list", "=", "list", "(", ")", "# col = self.columns['ensembl_biomart']", "col", "=", "[", "'ensembl_peptide_id'", ",", "]", "params", "=", "urllib", ".", "parse", ".", "urlencode", "(", "{", "'query'", ":", "self", ".", "_build_biomart_gene_query", "(", "taxon_id", ",", "col", ")", "}", ")", "conn", "=", "http", ".", "client", ".", "HTTPConnection", "(", "ENS_URL", ")", "conn", ".", "request", "(", "\"GET\"", ",", "'/biomart/martservice?'", "+", "params", ")", "response", "=", "conn", ".", "getresponse", "(", ")", "for", "line", "in", "response", ":", "line", "=", "line", ".", "decode", "(", "'utf-8'", ")", ".", "rstrip", "(", ")", "row", "=", "line", ".", "split", "(", "'\\t'", ")", "if", "len", "(", "row", ")", "!=", "len", "(", "col", ")", ":", "LOG", ".", "warning", "(", "\"Data error for p-list query on %d\"", ",", "taxon_id", ")", "continue", "protein_list", ".", "append", "(", "row", "[", "col", ".", "index", "(", "'ensembl_peptide_id'", ")", "]", ")", "conn", ".", "close", "(", ")", "return", "protein_list" ]
Fetch a list of proteins for a species in biomart :param taxid: :return: list
[ "Fetch", "a", "list", "of", "proteins", "for", "a", "species", "in", "biomart", ":", "param", "taxid", ":", ":", "return", ":", "list" ]
python
train
35.68
ArchiveTeam/wpull
wpull/namevalue.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/namevalue.py#L169-L189
def unfold_lines(string): '''Join lines that are wrapped. Any line that starts with a space or tab is joined to the previous line. ''' assert isinstance(string, str), 'Expect str. Got {}'.format(type(string)) lines = string.splitlines() line_buffer = io.StringIO() for line_number in range(len(lines)): line = lines[line_number] if line and line[0:1] in (' ', '\t'): line_buffer.write(' ') elif line_number != 0: line_buffer.write('\r\n') line_buffer.write(line.strip()) line_buffer.write('\r\n') return line_buffer.getvalue()
[ "def", "unfold_lines", "(", "string", ")", ":", "assert", "isinstance", "(", "string", ",", "str", ")", ",", "'Expect str. Got {}'", ".", "format", "(", "type", "(", "string", ")", ")", "lines", "=", "string", ".", "splitlines", "(", ")", "line_buffer", "=", "io", ".", "StringIO", "(", ")", "for", "line_number", "in", "range", "(", "len", "(", "lines", ")", ")", ":", "line", "=", "lines", "[", "line_number", "]", "if", "line", "and", "line", "[", "0", ":", "1", "]", "in", "(", "' '", ",", "'\\t'", ")", ":", "line_buffer", ".", "write", "(", "' '", ")", "elif", "line_number", "!=", "0", ":", "line_buffer", ".", "write", "(", "'\\r\\n'", ")", "line_buffer", ".", "write", "(", "line", ".", "strip", "(", ")", ")", "line_buffer", ".", "write", "(", "'\\r\\n'", ")", "return", "line_buffer", ".", "getvalue", "(", ")" ]
Join lines that are wrapped. Any line that starts with a space or tab is joined to the previous line.
[ "Join", "lines", "that", "are", "wrapped", "." ]
python
train
28.857143
src-d/modelforge
modelforge/environment.py
https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/environment.py#L42-L61
def collect_loaded_packages() -> List[Tuple[str, str]]: """ Return the currently loaded package names and their versions. """ dists = get_installed_distributions() get_dist_files = DistFilesFinder() file_table = {} for dist in dists: for file in get_dist_files(dist): file_table[file] = dist used_dists = set() # we greedily load all values to a list to avoid weird # "dictionary changed size during iteration" errors for module in list(sys.modules.values()): try: dist = file_table[module.__file__] except (AttributeError, KeyError): continue used_dists.add(dist) return sorted((dist.project_name, dist.version) for dist in used_dists)
[ "def", "collect_loaded_packages", "(", ")", "->", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", ":", "dists", "=", "get_installed_distributions", "(", ")", "get_dist_files", "=", "DistFilesFinder", "(", ")", "file_table", "=", "{", "}", "for", "dist", "in", "dists", ":", "for", "file", "in", "get_dist_files", "(", "dist", ")", ":", "file_table", "[", "file", "]", "=", "dist", "used_dists", "=", "set", "(", ")", "# we greedily load all values to a list to avoid weird", "# \"dictionary changed size during iteration\" errors", "for", "module", "in", "list", "(", "sys", ".", "modules", ".", "values", "(", ")", ")", ":", "try", ":", "dist", "=", "file_table", "[", "module", ".", "__file__", "]", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "continue", "used_dists", ".", "add", "(", "dist", ")", "return", "sorted", "(", "(", "dist", ".", "project_name", ",", "dist", ".", "version", ")", "for", "dist", "in", "used_dists", ")" ]
Return the currently loaded package names and their versions.
[ "Return", "the", "currently", "loaded", "package", "names", "and", "their", "versions", "." ]
python
train
36.65
google/openhtf
openhtf/core/phase_group.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/phase_group.py#L192-L204
def load_code_info(phases_or_groups): """Recursively load code info for a PhaseGroup or list of phases or groups.""" if isinstance(phases_or_groups, PhaseGroup): return phases_or_groups.load_code_info() ret = [] for phase in phases_or_groups: if isinstance(phase, PhaseGroup): ret.append(phase.load_code_info()) else: ret.append( mutablerecords.CopyRecord( phase, code_info=test_record.CodeInfo.for_function(phase.func))) return ret
[ "def", "load_code_info", "(", "phases_or_groups", ")", ":", "if", "isinstance", "(", "phases_or_groups", ",", "PhaseGroup", ")", ":", "return", "phases_or_groups", ".", "load_code_info", "(", ")", "ret", "=", "[", "]", "for", "phase", "in", "phases_or_groups", ":", "if", "isinstance", "(", "phase", ",", "PhaseGroup", ")", ":", "ret", ".", "append", "(", "phase", ".", "load_code_info", "(", ")", ")", "else", ":", "ret", ".", "append", "(", "mutablerecords", ".", "CopyRecord", "(", "phase", ",", "code_info", "=", "test_record", ".", "CodeInfo", ".", "for_function", "(", "phase", ".", "func", ")", ")", ")", "return", "ret" ]
Recursively load code info for a PhaseGroup or list of phases or groups.
[ "Recursively", "load", "code", "info", "for", "a", "PhaseGroup", "or", "list", "of", "phases", "or", "groups", "." ]
python
train
36.769231
mrcagney/gtfstk
gtfstk/trips.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/trips.py#L207-L390
def compute_trip_stats( feed: "Feed", route_ids: Optional[List[str]] = None, *, compute_dist_from_shapes: bool = False, ) -> DataFrame: """ Return a DataFrame with the following columns: - ``'trip_id'`` - ``'route_id'`` - ``'route_short_name'`` - ``'route_type'`` - ``'direction_id'``: NaN if missing from feed - ``'shape_id'``: NaN if missing from feed - ``'num_stops'``: number of stops on trip - ``'start_time'``: first departure time of the trip - ``'end_time'``: last departure time of the trip - ``'start_stop_id'``: stop ID of the first stop of the trip - ``'end_stop_id'``: stop ID of the last stop of the trip - ``'is_loop'``: 1 if the start and end stop are less than 400m apart and 0 otherwise - ``'distance'``: distance of the trip in ``feed.dist_units``; contains all ``np.nan`` entries if ``feed.shapes is None`` - ``'duration'``: duration of the trip in hours - ``'speed'``: distance/duration If ``feed.stop_times`` has a ``shape_dist_traveled`` column with at least one non-NaN value and ``compute_dist_from_shapes == False``, then use that column to compute the distance column. Else if ``feed.shapes is not None``, then compute the distance column using the shapes and Shapely. Otherwise, set the distances to NaN. If route IDs are given, then restrict to trips on those routes. Notes ----- - Assume the following feed attributes are not ``None``: * ``feed.trips`` * ``feed.routes`` * ``feed.stop_times`` * ``feed.shapes`` (optionally) * Those used in :func:`.stops.build_geometry_by_stop` - Calculating trip distances with ``compute_dist_from_shapes=True`` seems pretty accurate. For example, calculating trip distances on `this Portland feed <https://transitfeeds.com/p/trimet/43/1400947517>`_ using ``compute_dist_from_shapes=False`` and ``compute_dist_from_shapes=True``, yields a difference of at most 0.83km from the original values. """ f = feed.trips.copy() # Restrict to given route IDs if route_ids is not None: f = f[f["route_id"].isin(route_ids)].copy() # Merge with stop times and extra trip info. # Convert departure times to seconds past midnight to # compute trip durations later. if "direction_id" not in f.columns: f["direction_id"] = np.nan if "shape_id" not in f.columns: f["shape_id"] = np.nan f = ( f[["route_id", "trip_id", "direction_id", "shape_id"]] .merge(feed.routes[["route_id", "route_short_name", "route_type"]]) .merge(feed.stop_times) .sort_values(["trip_id", "stop_sequence"]) .assign( departure_time=lambda x: x["departure_time"].map( hp.timestr_to_seconds ) ) ) # Compute all trips stats except distance, # which is possibly more involved geometry_by_stop = feed.build_geometry_by_stop(use_utm=True) g = f.groupby("trip_id") def my_agg(group): d = OrderedDict() d["route_id"] = group["route_id"].iat[0] d["route_short_name"] = group["route_short_name"].iat[0] d["route_type"] = group["route_type"].iat[0] d["direction_id"] = group["direction_id"].iat[0] d["shape_id"] = group["shape_id"].iat[0] d["num_stops"] = group.shape[0] d["start_time"] = group["departure_time"].iat[0] d["end_time"] = group["departure_time"].iat[-1] d["start_stop_id"] = group["stop_id"].iat[0] d["end_stop_id"] = group["stop_id"].iat[-1] dist = geometry_by_stop[d["start_stop_id"]].distance( geometry_by_stop[d["end_stop_id"]] ) d["is_loop"] = int(dist < 400) d["duration"] = (d["end_time"] - d["start_time"]) / 3600 return pd.Series(d) # Apply my_agg, but don't reset index yet. # Need trip ID as index to line up the results of the # forthcoming distance calculation h = g.apply(my_agg) # Compute distance if ( hp.is_not_null(f, "shape_dist_traveled") and not compute_dist_from_shapes ): # Compute distances using shape_dist_traveled column h["distance"] = g.apply( lambda group: group["shape_dist_traveled"].max() ) elif feed.shapes is not None: # Compute distances using the shapes and Shapely geometry_by_shape = feed.build_geometry_by_shape(use_utm=True) geometry_by_stop = feed.build_geometry_by_stop(use_utm=True) m_to_dist = hp.get_convert_dist("m", feed.dist_units) def compute_dist(group): """ Return the distance traveled along the trip between the first and last stops. If that distance is negative or if the trip's linestring intersects itfeed, then return the length of the trip's linestring instead. """ shape = group["shape_id"].iat[0] try: # Get the linestring for this trip linestring = geometry_by_shape[shape] except KeyError: # Shape ID is NaN or doesn't exist in shapes. # No can do. return np.nan # If the linestring intersects itfeed, then that can cause # errors in the computation below, so just # return the length of the linestring as a good approximation D = linestring.length if not linestring.is_simple: return D # Otherwise, return the difference of the distances along # the linestring of the first and last stop start_stop = group["stop_id"].iat[0] end_stop = group["stop_id"].iat[-1] try: start_point = geometry_by_stop[start_stop] end_point = geometry_by_stop[end_stop] except KeyError: # One of the two stop IDs is NaN, so just # return the length of the linestring return D d1 = linestring.project(start_point) d2 = linestring.project(end_point) d = d2 - d1 if 0 < d < D + 100: return d else: # Something is probably wrong, so just # return the length of the linestring return D h["distance"] = g.apply(compute_dist) # Convert from meters h["distance"] = h["distance"].map(m_to_dist) else: h["distance"] = np.nan # Reset index and compute final stats h = h.reset_index() h["speed"] = h["distance"] / h["duration"] h[["start_time", "end_time"]] = h[["start_time", "end_time"]].applymap( lambda x: hp.timestr_to_seconds(x, inverse=True) ) return h.sort_values(["route_id", "direction_id", "start_time"])
[ "def", "compute_trip_stats", "(", "feed", ":", "\"Feed\"", ",", "route_ids", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ",", "*", ",", "compute_dist_from_shapes", ":", "bool", "=", "False", ",", ")", "->", "DataFrame", ":", "f", "=", "feed", ".", "trips", ".", "copy", "(", ")", "# Restrict to given route IDs", "if", "route_ids", "is", "not", "None", ":", "f", "=", "f", "[", "f", "[", "\"route_id\"", "]", ".", "isin", "(", "route_ids", ")", "]", ".", "copy", "(", ")", "# Merge with stop times and extra trip info.", "# Convert departure times to seconds past midnight to", "# compute trip durations later.", "if", "\"direction_id\"", "not", "in", "f", ".", "columns", ":", "f", "[", "\"direction_id\"", "]", "=", "np", ".", "nan", "if", "\"shape_id\"", "not", "in", "f", ".", "columns", ":", "f", "[", "\"shape_id\"", "]", "=", "np", ".", "nan", "f", "=", "(", "f", "[", "[", "\"route_id\"", ",", "\"trip_id\"", ",", "\"direction_id\"", ",", "\"shape_id\"", "]", "]", ".", "merge", "(", "feed", ".", "routes", "[", "[", "\"route_id\"", ",", "\"route_short_name\"", ",", "\"route_type\"", "]", "]", ")", ".", "merge", "(", "feed", ".", "stop_times", ")", ".", "sort_values", "(", "[", "\"trip_id\"", ",", "\"stop_sequence\"", "]", ")", ".", "assign", "(", "departure_time", "=", "lambda", "x", ":", "x", "[", "\"departure_time\"", "]", ".", "map", "(", "hp", ".", "timestr_to_seconds", ")", ")", ")", "# Compute all trips stats except distance,", "# which is possibly more involved", "geometry_by_stop", "=", "feed", ".", "build_geometry_by_stop", "(", "use_utm", "=", "True", ")", "g", "=", "f", ".", "groupby", "(", "\"trip_id\"", ")", "def", "my_agg", "(", "group", ")", ":", "d", "=", "OrderedDict", "(", ")", "d", "[", "\"route_id\"", "]", "=", "group", "[", "\"route_id\"", "]", ".", "iat", "[", "0", "]", "d", "[", "\"route_short_name\"", "]", "=", "group", "[", "\"route_short_name\"", "]", ".", "iat", "[", "0", "]", "d", "[", "\"route_type\"", "]", "=", "group", "[", "\"route_type\"", "]", ".", "iat", "[", "0", "]", "d", "[", "\"direction_id\"", "]", "=", "group", "[", "\"direction_id\"", "]", ".", "iat", "[", "0", "]", "d", "[", "\"shape_id\"", "]", "=", "group", "[", "\"shape_id\"", "]", ".", "iat", "[", "0", "]", "d", "[", "\"num_stops\"", "]", "=", "group", ".", "shape", "[", "0", "]", "d", "[", "\"start_time\"", "]", "=", "group", "[", "\"departure_time\"", "]", ".", "iat", "[", "0", "]", "d", "[", "\"end_time\"", "]", "=", "group", "[", "\"departure_time\"", "]", ".", "iat", "[", "-", "1", "]", "d", "[", "\"start_stop_id\"", "]", "=", "group", "[", "\"stop_id\"", "]", ".", "iat", "[", "0", "]", "d", "[", "\"end_stop_id\"", "]", "=", "group", "[", "\"stop_id\"", "]", ".", "iat", "[", "-", "1", "]", "dist", "=", "geometry_by_stop", "[", "d", "[", "\"start_stop_id\"", "]", "]", ".", "distance", "(", "geometry_by_stop", "[", "d", "[", "\"end_stop_id\"", "]", "]", ")", "d", "[", "\"is_loop\"", "]", "=", "int", "(", "dist", "<", "400", ")", "d", "[", "\"duration\"", "]", "=", "(", "d", "[", "\"end_time\"", "]", "-", "d", "[", "\"start_time\"", "]", ")", "/", "3600", "return", "pd", ".", "Series", "(", "d", ")", "# Apply my_agg, but don't reset index yet.", "# Need trip ID as index to line up the results of the", "# forthcoming distance calculation", "h", "=", "g", ".", "apply", "(", "my_agg", ")", "# Compute distance", "if", "(", "hp", ".", "is_not_null", "(", "f", ",", "\"shape_dist_traveled\"", ")", "and", "not", "compute_dist_from_shapes", ")", ":", "# Compute distances using shape_dist_traveled column", "h", "[", "\"distance\"", "]", "=", "g", ".", "apply", "(", "lambda", "group", ":", "group", "[", "\"shape_dist_traveled\"", "]", ".", "max", "(", ")", ")", "elif", "feed", ".", "shapes", "is", "not", "None", ":", "# Compute distances using the shapes and Shapely", "geometry_by_shape", "=", "feed", ".", "build_geometry_by_shape", "(", "use_utm", "=", "True", ")", "geometry_by_stop", "=", "feed", ".", "build_geometry_by_stop", "(", "use_utm", "=", "True", ")", "m_to_dist", "=", "hp", ".", "get_convert_dist", "(", "\"m\"", ",", "feed", ".", "dist_units", ")", "def", "compute_dist", "(", "group", ")", ":", "\"\"\"\n Return the distance traveled along the trip between the\n first and last stops.\n If that distance is negative or if the trip's linestring\n intersects itfeed, then return the length of the trip's\n linestring instead.\n \"\"\"", "shape", "=", "group", "[", "\"shape_id\"", "]", ".", "iat", "[", "0", "]", "try", ":", "# Get the linestring for this trip", "linestring", "=", "geometry_by_shape", "[", "shape", "]", "except", "KeyError", ":", "# Shape ID is NaN or doesn't exist in shapes.", "# No can do.", "return", "np", ".", "nan", "# If the linestring intersects itfeed, then that can cause", "# errors in the computation below, so just", "# return the length of the linestring as a good approximation", "D", "=", "linestring", ".", "length", "if", "not", "linestring", ".", "is_simple", ":", "return", "D", "# Otherwise, return the difference of the distances along", "# the linestring of the first and last stop", "start_stop", "=", "group", "[", "\"stop_id\"", "]", ".", "iat", "[", "0", "]", "end_stop", "=", "group", "[", "\"stop_id\"", "]", ".", "iat", "[", "-", "1", "]", "try", ":", "start_point", "=", "geometry_by_stop", "[", "start_stop", "]", "end_point", "=", "geometry_by_stop", "[", "end_stop", "]", "except", "KeyError", ":", "# One of the two stop IDs is NaN, so just", "# return the length of the linestring", "return", "D", "d1", "=", "linestring", ".", "project", "(", "start_point", ")", "d2", "=", "linestring", ".", "project", "(", "end_point", ")", "d", "=", "d2", "-", "d1", "if", "0", "<", "d", "<", "D", "+", "100", ":", "return", "d", "else", ":", "# Something is probably wrong, so just", "# return the length of the linestring", "return", "D", "h", "[", "\"distance\"", "]", "=", "g", ".", "apply", "(", "compute_dist", ")", "# Convert from meters", "h", "[", "\"distance\"", "]", "=", "h", "[", "\"distance\"", "]", ".", "map", "(", "m_to_dist", ")", "else", ":", "h", "[", "\"distance\"", "]", "=", "np", ".", "nan", "# Reset index and compute final stats", "h", "=", "h", ".", "reset_index", "(", ")", "h", "[", "\"speed\"", "]", "=", "h", "[", "\"distance\"", "]", "/", "h", "[", "\"duration\"", "]", "h", "[", "[", "\"start_time\"", ",", "\"end_time\"", "]", "]", "=", "h", "[", "[", "\"start_time\"", ",", "\"end_time\"", "]", "]", ".", "applymap", "(", "lambda", "x", ":", "hp", ".", "timestr_to_seconds", "(", "x", ",", "inverse", "=", "True", ")", ")", "return", "h", ".", "sort_values", "(", "[", "\"route_id\"", ",", "\"direction_id\"", ",", "\"start_time\"", "]", ")" ]
Return a DataFrame with the following columns: - ``'trip_id'`` - ``'route_id'`` - ``'route_short_name'`` - ``'route_type'`` - ``'direction_id'``: NaN if missing from feed - ``'shape_id'``: NaN if missing from feed - ``'num_stops'``: number of stops on trip - ``'start_time'``: first departure time of the trip - ``'end_time'``: last departure time of the trip - ``'start_stop_id'``: stop ID of the first stop of the trip - ``'end_stop_id'``: stop ID of the last stop of the trip - ``'is_loop'``: 1 if the start and end stop are less than 400m apart and 0 otherwise - ``'distance'``: distance of the trip in ``feed.dist_units``; contains all ``np.nan`` entries if ``feed.shapes is None`` - ``'duration'``: duration of the trip in hours - ``'speed'``: distance/duration If ``feed.stop_times`` has a ``shape_dist_traveled`` column with at least one non-NaN value and ``compute_dist_from_shapes == False``, then use that column to compute the distance column. Else if ``feed.shapes is not None``, then compute the distance column using the shapes and Shapely. Otherwise, set the distances to NaN. If route IDs are given, then restrict to trips on those routes. Notes ----- - Assume the following feed attributes are not ``None``: * ``feed.trips`` * ``feed.routes`` * ``feed.stop_times`` * ``feed.shapes`` (optionally) * Those used in :func:`.stops.build_geometry_by_stop` - Calculating trip distances with ``compute_dist_from_shapes=True`` seems pretty accurate. For example, calculating trip distances on `this Portland feed <https://transitfeeds.com/p/trimet/43/1400947517>`_ using ``compute_dist_from_shapes=False`` and ``compute_dist_from_shapes=True``, yields a difference of at most 0.83km from the original values.
[ "Return", "a", "DataFrame", "with", "the", "following", "columns", ":" ]
python
train
36.86413
joke2k/faker
faker/providers/date_time/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/date_time/__init__.py#L1645-L1682
def date_time_between_dates( self, datetime_start=None, datetime_end=None, tzinfo=None): """ Takes two DateTime objects and returns a random datetime between the two given datetimes. Accepts DateTime objects. :param datetime_start: DateTime :param datetime_end: DateTime :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ if datetime_start is None: datetime_start = datetime.now(tzinfo) if datetime_end is None: datetime_end = datetime.now(tzinfo) timestamp = self.generator.random.randint( datetime_to_timestamp(datetime_start), datetime_to_timestamp(datetime_end), ) try: if tzinfo is None: pick = datetime.fromtimestamp(timestamp, tzlocal()) pick = pick.astimezone(tzutc()).replace(tzinfo=None) else: pick = datetime.fromtimestamp(timestamp, tzinfo) except OverflowError: raise OverflowError( "You specified an end date with a timestamp bigger than the maximum allowed on this" " system. Please specify an earlier date.", ) return pick
[ "def", "date_time_between_dates", "(", "self", ",", "datetime_start", "=", "None", ",", "datetime_end", "=", "None", ",", "tzinfo", "=", "None", ")", ":", "if", "datetime_start", "is", "None", ":", "datetime_start", "=", "datetime", ".", "now", "(", "tzinfo", ")", "if", "datetime_end", "is", "None", ":", "datetime_end", "=", "datetime", ".", "now", "(", "tzinfo", ")", "timestamp", "=", "self", ".", "generator", ".", "random", ".", "randint", "(", "datetime_to_timestamp", "(", "datetime_start", ")", ",", "datetime_to_timestamp", "(", "datetime_end", ")", ",", ")", "try", ":", "if", "tzinfo", "is", "None", ":", "pick", "=", "datetime", ".", "fromtimestamp", "(", "timestamp", ",", "tzlocal", "(", ")", ")", "pick", "=", "pick", ".", "astimezone", "(", "tzutc", "(", ")", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "else", ":", "pick", "=", "datetime", ".", "fromtimestamp", "(", "timestamp", ",", "tzinfo", ")", "except", "OverflowError", ":", "raise", "OverflowError", "(", "\"You specified an end date with a timestamp bigger than the maximum allowed on this\"", "\" system. Please specify an earlier date.\"", ",", ")", "return", "pick" ]
Takes two DateTime objects and returns a random datetime between the two given datetimes. Accepts DateTime objects. :param datetime_start: DateTime :param datetime_end: DateTime :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime
[ "Takes", "two", "DateTime", "objects", "and", "returns", "a", "random", "datetime", "between", "the", "two", "given", "datetimes", ".", "Accepts", "DateTime", "objects", "." ]
python
train
35.184211
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L1875-L1885
def setVisibleColumns(self, visible): """ Sets the list of visible columns for this widget. This method will take any column in this tree's list NOT found within the inputed column list and hide them. :param columns | [<str>, ..] """ colnames = self.columns() for c, column in enumerate(colnames): self.setColumnHidden(c, column not in visible)
[ "def", "setVisibleColumns", "(", "self", ",", "visible", ")", ":", "colnames", "=", "self", ".", "columns", "(", ")", "for", "c", ",", "column", "in", "enumerate", "(", "colnames", ")", ":", "self", ".", "setColumnHidden", "(", "c", ",", "column", "not", "in", "visible", ")" ]
Sets the list of visible columns for this widget. This method will take any column in this tree's list NOT found within the inputed column list and hide them. :param columns | [<str>, ..]
[ "Sets", "the", "list", "of", "visible", "columns", "for", "this", "widget", ".", "This", "method", "will", "take", "any", "column", "in", "this", "tree", "s", "list", "NOT", "found", "within", "the", "inputed", "column", "list", "and", "hide", "them", ".", ":", "param", "columns", "|", "[", "<str", ">", "..", "]" ]
python
train
39.545455
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/build/lib/biosignalsnotebooks/detect.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/detect.py#L428-L452
def _integration(data, sample_rate): """ Moving window integration. N is the number of samples in the width of the integration window ---------- Parameters ---------- data : ndarray Samples of the signal where a moving window integration will be applied. sample_rate : int Sampling rate at which the acquisition took place. Returns ------- out : ndarray Integrated signal samples. """ wind_size = int(0.080 * sample_rate) int_ecg = numpy.zeros_like(data) cum_sum = data.cumsum() int_ecg[wind_size:] = (cum_sum[wind_size:] - cum_sum[:-wind_size]) / wind_size int_ecg[:wind_size] = cum_sum[:wind_size] / numpy.arange(1, wind_size + 1) return int_ecg
[ "def", "_integration", "(", "data", ",", "sample_rate", ")", ":", "wind_size", "=", "int", "(", "0.080", "*", "sample_rate", ")", "int_ecg", "=", "numpy", ".", "zeros_like", "(", "data", ")", "cum_sum", "=", "data", ".", "cumsum", "(", ")", "int_ecg", "[", "wind_size", ":", "]", "=", "(", "cum_sum", "[", "wind_size", ":", "]", "-", "cum_sum", "[", ":", "-", "wind_size", "]", ")", "/", "wind_size", "int_ecg", "[", ":", "wind_size", "]", "=", "cum_sum", "[", ":", "wind_size", "]", "/", "numpy", ".", "arange", "(", "1", ",", "wind_size", "+", "1", ")", "return", "int_ecg" ]
Moving window integration. N is the number of samples in the width of the integration window ---------- Parameters ---------- data : ndarray Samples of the signal where a moving window integration will be applied. sample_rate : int Sampling rate at which the acquisition took place. Returns ------- out : ndarray Integrated signal samples.
[ "Moving", "window", "integration", ".", "N", "is", "the", "number", "of", "samples", "in", "the", "width", "of", "the", "integration", "window" ]
python
train
28.88
kubernetes-client/python
kubernetes/client/apis/storage_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/storage_v1_api.py#L824-L850
def list_storage_class(self, **kwargs): """ list or watch objects of kind StorageClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_storage_class(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1StorageClassList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_storage_class_with_http_info(**kwargs) else: (data) = self.list_storage_class_with_http_info(**kwargs) return data
[ "def", "list_storage_class", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_storage_class_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "list_storage_class_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
list or watch objects of kind StorageClass This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_storage_class(async_req=True) >>> result = thread.get() :param async_req bool :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1StorageClassList If the method is called asynchronously, returns the request thread.
[ "list", "or", "watch", "objects", "of", "kind", "StorageClass", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "list_storage_class", "(", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
166.62963
bitesofcode/projexui
projexui/dialogs/xwizardbrowserdialog/xwizardplugin.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xwizardbrowserdialog/xwizardplugin.py#L207-L219
def register( cls, plugin ): """ Registers a particular plugin to the global system at the given name. :param plugin | <XWizardPlugin> """ if ( not plugin ): return if ( cls._plugins is None ): cls._plugins = {} cls._plugins[plugin.uniqueName()] = plugin
[ "def", "register", "(", "cls", ",", "plugin", ")", ":", "if", "(", "not", "plugin", ")", ":", "return", "if", "(", "cls", ".", "_plugins", "is", "None", ")", ":", "cls", ".", "_plugins", "=", "{", "}", "cls", ".", "_plugins", "[", "plugin", ".", "uniqueName", "(", ")", "]", "=", "plugin" ]
Registers a particular plugin to the global system at the given name. :param plugin | <XWizardPlugin>
[ "Registers", "a", "particular", "plugin", "to", "the", "global", "system", "at", "the", "given", "name", ".", ":", "param", "plugin", "|", "<XWizardPlugin", ">" ]
python
train
28.384615
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L2447-L2509
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Load Logical Partition (requires classic mode).""" assert wait_for_completion is True # async not supported yet lpar_oid = uri_parms[0] lpar_uri = '/api/logical-partitions/' + lpar_oid try: lpar = hmc.lookup_by_uri(lpar_uri) except KeyError: raise InvalidResourceError(method, uri) cpc = lpar.manager.parent assert not cpc.dpm_enabled status = lpar.properties.get('status', None) force = body.get('force', False) if body else False clear_indicator = body.get('clear-indicator', True) if body else True store_status_indicator = body.get('store-status-indicator', False) if body else False if status == 'not-activated': raise ConflictError(method, uri, reason=0, message="LPAR {!r} could not be loaded " "because the LPAR is in status {}.". format(lpar.name, status)) elif status == 'operating' and not force: raise ServerError(method, uri, reason=263, message="LPAR {!r} could not be loaded " "because the LPAR is already loaded " "(and force was not specified).". format(lpar.name)) load_address = body.get('load-address', None) if body else None if not load_address: # Starting with z14, this parameter is optional and a last-used # property is available. load_address = lpar.properties.get('last-used-load-address', None) if load_address is None: # TODO: Verify actual error for this case on a z14. raise BadRequestError(method, uri, reason=5, message="LPAR {!r} could not be loaded " "because a load address is not specified " "in the request or in the Lpar last-used " "property". format(lpar.name)) load_parameter = body.get('load-parameter', None) if body else None if not load_parameter: # Starting with z14, a last-used property is available. load_parameter = lpar.properties.get( 'last-used-load-parameter', None) if load_parameter is None: load_parameter = '' # Reflect the load in the resource if clear_indicator: lpar.properties['memory'] = '' if store_status_indicator: lpar.properties['stored-status'] = status else: lpar.properties['stored-status'] = None lpar.properties['status'] = LparLoadHandler.get_status() lpar.properties['last-used-load-address'] = load_address lpar.properties['last-used-load-parameter'] = load_parameter
[ "def", "post", "(", "method", ",", "hmc", ",", "uri", ",", "uri_parms", ",", "body", ",", "logon_required", ",", "wait_for_completion", ")", ":", "assert", "wait_for_completion", "is", "True", "# async not supported yet", "lpar_oid", "=", "uri_parms", "[", "0", "]", "lpar_uri", "=", "'/api/logical-partitions/'", "+", "lpar_oid", "try", ":", "lpar", "=", "hmc", ".", "lookup_by_uri", "(", "lpar_uri", ")", "except", "KeyError", ":", "raise", "InvalidResourceError", "(", "method", ",", "uri", ")", "cpc", "=", "lpar", ".", "manager", ".", "parent", "assert", "not", "cpc", ".", "dpm_enabled", "status", "=", "lpar", ".", "properties", ".", "get", "(", "'status'", ",", "None", ")", "force", "=", "body", ".", "get", "(", "'force'", ",", "False", ")", "if", "body", "else", "False", "clear_indicator", "=", "body", ".", "get", "(", "'clear-indicator'", ",", "True", ")", "if", "body", "else", "True", "store_status_indicator", "=", "body", ".", "get", "(", "'store-status-indicator'", ",", "False", ")", "if", "body", "else", "False", "if", "status", "==", "'not-activated'", ":", "raise", "ConflictError", "(", "method", ",", "uri", ",", "reason", "=", "0", ",", "message", "=", "\"LPAR {!r} could not be loaded \"", "\"because the LPAR is in status {}.\"", ".", "format", "(", "lpar", ".", "name", ",", "status", ")", ")", "elif", "status", "==", "'operating'", "and", "not", "force", ":", "raise", "ServerError", "(", "method", ",", "uri", ",", "reason", "=", "263", ",", "message", "=", "\"LPAR {!r} could not be loaded \"", "\"because the LPAR is already loaded \"", "\"(and force was not specified).\"", ".", "format", "(", "lpar", ".", "name", ")", ")", "load_address", "=", "body", ".", "get", "(", "'load-address'", ",", "None", ")", "if", "body", "else", "None", "if", "not", "load_address", ":", "# Starting with z14, this parameter is optional and a last-used", "# property is available.", "load_address", "=", "lpar", ".", "properties", ".", "get", "(", "'last-used-load-address'", ",", "None", ")", "if", "load_address", "is", "None", ":", "# TODO: Verify actual error for this case on a z14.", "raise", "BadRequestError", "(", "method", ",", "uri", ",", "reason", "=", "5", ",", "message", "=", "\"LPAR {!r} could not be loaded \"", "\"because a load address is not specified \"", "\"in the request or in the Lpar last-used \"", "\"property\"", ".", "format", "(", "lpar", ".", "name", ")", ")", "load_parameter", "=", "body", ".", "get", "(", "'load-parameter'", ",", "None", ")", "if", "body", "else", "None", "if", "not", "load_parameter", ":", "# Starting with z14, a last-used property is available.", "load_parameter", "=", "lpar", ".", "properties", ".", "get", "(", "'last-used-load-parameter'", ",", "None", ")", "if", "load_parameter", "is", "None", ":", "load_parameter", "=", "''", "# Reflect the load in the resource", "if", "clear_indicator", ":", "lpar", ".", "properties", "[", "'memory'", "]", "=", "''", "if", "store_status_indicator", ":", "lpar", ".", "properties", "[", "'stored-status'", "]", "=", "status", "else", ":", "lpar", ".", "properties", "[", "'stored-status'", "]", "=", "None", "lpar", ".", "properties", "[", "'status'", "]", "=", "LparLoadHandler", ".", "get_status", "(", ")", "lpar", ".", "properties", "[", "'last-used-load-address'", "]", "=", "load_address", "lpar", ".", "properties", "[", "'last-used-load-parameter'", "]", "=", "load_parameter" ]
Operation: Load Logical Partition (requires classic mode).
[ "Operation", ":", "Load", "Logical", "Partition", "(", "requires", "classic", "mode", ")", "." ]
python
train
48.222222
merll/docker-map
dockermap/map/yaml.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/yaml.py#L122-L134
def load_clients_file(filename, configuration_class=ClientConfiguration): """ Loads client configurations from a YAML file. :param filename: YAML file name. :type filename: unicode | str :param configuration_class: Class of the configuration object to create. :type configuration_class: class :return: A dictionary of client configuration objects. :rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration] """ with open(filename, 'r') as f: return load_clients(f, configuration_class=configuration_class)
[ "def", "load_clients_file", "(", "filename", ",", "configuration_class", "=", "ClientConfiguration", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "return", "load_clients", "(", "f", ",", "configuration_class", "=", "configuration_class", ")" ]
Loads client configurations from a YAML file. :param filename: YAML file name. :type filename: unicode | str :param configuration_class: Class of the configuration object to create. :type configuration_class: class :return: A dictionary of client configuration objects. :rtype: dict[unicode | str, dockermap.map.config.client.ClientConfiguration]
[ "Loads", "client", "configurations", "from", "a", "YAML", "file", "." ]
python
train
43.076923
wesleyfr/boxpython
boxpython/session.py
https://github.com/wesleyfr/boxpython/blob/f00a8ada6dff2c7ffc88bf89d4e15965c5adb422/boxpython/session.py#L529-L550
def search(self, **kwargs): """Searches for files/folders Args: \*\*kwargs (dict): A dictionary containing necessary parameters (check https://developers.box.com/docs/#search for list of parameters) Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ query_string = {} for key, value in kwargs.iteritems(): query_string[key] = value return self.__request("GET","search",querystring=query_string)
[ "def", "search", "(", "self", ",", "*", "*", "kwargs", ")", ":", "query_string", "=", "{", "}", "for", "key", ",", "value", "in", "kwargs", ".", "iteritems", "(", ")", ":", "query_string", "[", "key", "]", "=", "value", "return", "self", ".", "__request", "(", "\"GET\"", ",", "\"search\"", ",", "querystring", "=", "query_string", ")" ]
Searches for files/folders Args: \*\*kwargs (dict): A dictionary containing necessary parameters (check https://developers.box.com/docs/#search for list of parameters) Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
[ "Searches", "for", "files", "/", "folders" ]
python
train
33.863636
google/textfsm
textfsm/terminal.py
https://github.com/google/textfsm/blob/63a2aaece33e07947aa80963dca99b893964633b/textfsm/terminal.py#L170-L182
def TerminalSize(): """Returns terminal length and width as a tuple.""" try: with open(os.ctermid(), 'r') as tty_instance: length_width = struct.unpack( 'hh', fcntl.ioctl(tty_instance.fileno(), termios.TIOCGWINSZ, '1234')) except (IOError, OSError): try: length_width = (int(os.environ['LINES']), int(os.environ['COLUMNS'])) except (ValueError, KeyError): length_width = (24, 80) return length_width
[ "def", "TerminalSize", "(", ")", ":", "try", ":", "with", "open", "(", "os", ".", "ctermid", "(", ")", ",", "'r'", ")", "as", "tty_instance", ":", "length_width", "=", "struct", ".", "unpack", "(", "'hh'", ",", "fcntl", ".", "ioctl", "(", "tty_instance", ".", "fileno", "(", ")", ",", "termios", ".", "TIOCGWINSZ", ",", "'1234'", ")", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "try", ":", "length_width", "=", "(", "int", "(", "os", ".", "environ", "[", "'LINES'", "]", ")", ",", "int", "(", "os", ".", "environ", "[", "'COLUMNS'", "]", ")", ")", "except", "(", "ValueError", ",", "KeyError", ")", ":", "length_width", "=", "(", "24", ",", "80", ")", "return", "length_width" ]
Returns terminal length and width as a tuple.
[ "Returns", "terminal", "length", "and", "width", "as", "a", "tuple", "." ]
python
train
35.153846
eqcorrscan/EQcorrscan
eqcorrscan/core/subspace.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/subspace.py#L417-L442
def plot(self, stachans='all', size=(10, 7), show=True): """ Plot the output basis vectors for the detector at the given dimension. Corresponds to the first n horizontal vectors of the V matrix. :type stachans: list :param stachans: list of tuples of station, channel pairs to plot. :type stachans: list :param stachans: List of tuples of (station, channel) to use. Can set\ to 'all' to use all the station-channel pairs available. If \ detector is multiplexed, will just plot that. :type size: tuple :param size: Figure size. :type show: bool :param show: Whether or not to show the figure. :returns: Figure :rtype: matplotlib.pyplot.Figure .. Note:: See :func:`eqcorrscan.utils.plotting.subspace_detector_plot` for example. """ return subspace_detector_plot(detector=self, stachans=stachans, size=size, show=show)
[ "def", "plot", "(", "self", ",", "stachans", "=", "'all'", ",", "size", "=", "(", "10", ",", "7", ")", ",", "show", "=", "True", ")", ":", "return", "subspace_detector_plot", "(", "detector", "=", "self", ",", "stachans", "=", "stachans", ",", "size", "=", "size", ",", "show", "=", "show", ")" ]
Plot the output basis vectors for the detector at the given dimension. Corresponds to the first n horizontal vectors of the V matrix. :type stachans: list :param stachans: list of tuples of station, channel pairs to plot. :type stachans: list :param stachans: List of tuples of (station, channel) to use. Can set\ to 'all' to use all the station-channel pairs available. If \ detector is multiplexed, will just plot that. :type size: tuple :param size: Figure size. :type show: bool :param show: Whether or not to show the figure. :returns: Figure :rtype: matplotlib.pyplot.Figure .. Note:: See :func:`eqcorrscan.utils.plotting.subspace_detector_plot` for example.
[ "Plot", "the", "output", "basis", "vectors", "for", "the", "detector", "at", "the", "given", "dimension", "." ]
python
train
38.807692
lambdamusic/Ontospy
ontospy/extras/shell_lib.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L1123-L1144
def _do_shell(self, line): """Send a command to the Unix shell.\n==> Usage: shell ls ~""" if not line: return sp = Popen(line, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=not WINDOWS) (fo, fe) = (sp.stdout, sp.stderr) if PY2: out = fo.read().strip(EOL) err = fe.read().strip(EOL) else: out = fo.read().decode("utf-8") err = fe.read().decode("utf-8") if out: print(out) return if err: print(err.replace('isbn_', ''))
[ "def", "_do_shell", "(", "self", ",", "line", ")", ":", "if", "not", "line", ":", "return", "sp", "=", "Popen", "(", "line", ",", "shell", "=", "True", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "close_fds", "=", "not", "WINDOWS", ")", "(", "fo", ",", "fe", ")", "=", "(", "sp", ".", "stdout", ",", "sp", ".", "stderr", ")", "if", "PY2", ":", "out", "=", "fo", ".", "read", "(", ")", ".", "strip", "(", "EOL", ")", "err", "=", "fe", ".", "read", "(", ")", ".", "strip", "(", "EOL", ")", "else", ":", "out", "=", "fo", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", "err", "=", "fe", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", "if", "out", ":", "print", "(", "out", ")", "return", "if", "err", ":", "print", "(", "err", ".", "replace", "(", "'isbn_'", ",", "''", ")", ")" ]
Send a command to the Unix shell.\n==> Usage: shell ls ~
[ "Send", "a", "command", "to", "the", "Unix", "shell", ".", "\\", "n", "==", ">", "Usage", ":", "shell", "ls", "~" ]
python
train
30.227273
mishbahr/djangocms-forms
djangocms_forms/forms.py
https://github.com/mishbahr/djangocms-forms/blob/9d7a4ef9769fd5e1526921c084d6da7b8070a2c1/djangocms_forms/forms.py#L75-L84
def clean_form_template(self): """ Check if template exists """ form_template = self.cleaned_data.get('form_template', '') if form_template: try: get_template(form_template) except TemplateDoesNotExist: msg = _('Selected Form Template does not exist.') raise forms.ValidationError(msg) return form_template
[ "def", "clean_form_template", "(", "self", ")", ":", "form_template", "=", "self", ".", "cleaned_data", ".", "get", "(", "'form_template'", ",", "''", ")", "if", "form_template", ":", "try", ":", "get_template", "(", "form_template", ")", "except", "TemplateDoesNotExist", ":", "msg", "=", "_", "(", "'Selected Form Template does not exist.'", ")", "raise", "forms", ".", "ValidationError", "(", "msg", ")", "return", "form_template" ]
Check if template exists
[ "Check", "if", "template", "exists" ]
python
train
40.1
genialis/resolwe
resolwe/flow/views/mixins.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/mixins.py#L44-L50
def perform_create(self, serializer): """Create a resource.""" with transaction.atomic(): instance = serializer.save() # Assign all permissions to the object contributor. assign_contributor_permissions(instance)
[ "def", "perform_create", "(", "self", ",", "serializer", ")", ":", "with", "transaction", ".", "atomic", "(", ")", ":", "instance", "=", "serializer", ".", "save", "(", ")", "# Assign all permissions to the object contributor.", "assign_contributor_permissions", "(", "instance", ")" ]
Create a resource.
[ "Create", "a", "resource", "." ]
python
train
36.857143
openstack/proliantutils
proliantutils/redfish/resources/system/iscsi.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/iscsi.py#L42-L51
def iscsi_settings(self): """Property to provide reference to iSCSI settings instance It is calculated once when the first time it is queried. On refresh, this property gets reset. """ return ISCSISettings( self._conn, utils.get_subresource_path_by( self, ["@Redfish.Settings", "SettingsObject"]), redfish_version=self.redfish_version)
[ "def", "iscsi_settings", "(", "self", ")", ":", "return", "ISCSISettings", "(", "self", ".", "_conn", ",", "utils", ".", "get_subresource_path_by", "(", "self", ",", "[", "\"@Redfish.Settings\"", ",", "\"SettingsObject\"", "]", ")", ",", "redfish_version", "=", "self", ".", "redfish_version", ")" ]
Property to provide reference to iSCSI settings instance It is calculated once when the first time it is queried. On refresh, this property gets reset.
[ "Property", "to", "provide", "reference", "to", "iSCSI", "settings", "instance" ]
python
train
40.7
django-danceschool/django-danceschool
danceschool/core/ajax.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/ajax.py#L106-L136
def getEmailTemplate(request): ''' This function handles the Ajax call made when a user wants a specific email template ''' if request.method != 'POST': return HttpResponse(_('Error, no POST data.')) if not hasattr(request,'user'): return HttpResponse(_('Error, not authenticated.')) template_id = request.POST.get('template') if not template_id: return HttpResponse(_("Error, no template ID provided.")) try: this_template = EmailTemplate.objects.get(id=template_id) except ObjectDoesNotExist: return HttpResponse(_("Error getting template.")) if this_template.groupRequired and this_template.groupRequired not in request.user.groups.all(): return HttpResponse(_("Error, no permission to access this template.")) if this_template.hideFromForm: return HttpResponse(_("Error, no permission to access this template.")) return JsonResponse({ 'subject': this_template.subject, 'content': this_template.content, 'html_content': this_template.html_content, 'richTextChoice': this_template.richTextChoice, })
[ "def", "getEmailTemplate", "(", "request", ")", ":", "if", "request", ".", "method", "!=", "'POST'", ":", "return", "HttpResponse", "(", "_", "(", "'Error, no POST data.'", ")", ")", "if", "not", "hasattr", "(", "request", ",", "'user'", ")", ":", "return", "HttpResponse", "(", "_", "(", "'Error, not authenticated.'", ")", ")", "template_id", "=", "request", ".", "POST", ".", "get", "(", "'template'", ")", "if", "not", "template_id", ":", "return", "HttpResponse", "(", "_", "(", "\"Error, no template ID provided.\"", ")", ")", "try", ":", "this_template", "=", "EmailTemplate", ".", "objects", ".", "get", "(", "id", "=", "template_id", ")", "except", "ObjectDoesNotExist", ":", "return", "HttpResponse", "(", "_", "(", "\"Error getting template.\"", ")", ")", "if", "this_template", ".", "groupRequired", "and", "this_template", ".", "groupRequired", "not", "in", "request", ".", "user", ".", "groups", ".", "all", "(", ")", ":", "return", "HttpResponse", "(", "_", "(", "\"Error, no permission to access this template.\"", ")", ")", "if", "this_template", ".", "hideFromForm", ":", "return", "HttpResponse", "(", "_", "(", "\"Error, no permission to access this template.\"", ")", ")", "return", "JsonResponse", "(", "{", "'subject'", ":", "this_template", ".", "subject", ",", "'content'", ":", "this_template", ".", "content", ",", "'html_content'", ":", "this_template", ".", "html_content", ",", "'richTextChoice'", ":", "this_template", ".", "richTextChoice", ",", "}", ")" ]
This function handles the Ajax call made when a user wants a specific email template
[ "This", "function", "handles", "the", "Ajax", "call", "made", "when", "a", "user", "wants", "a", "specific", "email", "template" ]
python
train
37
Capitains/MyCapytain
MyCapytain/resources/prototypes/metadata.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/metadata.py#L461-L468
def get_creator(self, lang=None): """ Get the DC Creator literal value :param lang: Language to retrieve :return: Creator string representation :rtype: Literal """ return self.metadata.get_single(key=DC.creator, lang=lang)
[ "def", "get_creator", "(", "self", ",", "lang", "=", "None", ")", ":", "return", "self", ".", "metadata", ".", "get_single", "(", "key", "=", "DC", ".", "creator", ",", "lang", "=", "lang", ")" ]
Get the DC Creator literal value :param lang: Language to retrieve :return: Creator string representation :rtype: Literal
[ "Get", "the", "DC", "Creator", "literal", "value" ]
python
train
33