id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
247,400
pjanis/funtool
funtool/state_collection.py
add_grouping
def add_grouping(state_collection, grouping_name, loaded_processes, overriding_parameters=None): """ Adds a grouping to a state collection by using the process selected by the grouping name Does not override existing groupings """ if ( grouping_name not in state_collection.groupings and loaded_processes != None and loaded_processes["grouping_selector"].get(grouping_name) != None ): state_collection = loaded_processes["grouping_selector"][grouping_name].process_function(state_collection,overriding_parameters) return state_collection
python
def add_grouping(state_collection, grouping_name, loaded_processes, overriding_parameters=None): """ Adds a grouping to a state collection by using the process selected by the grouping name Does not override existing groupings """ if ( grouping_name not in state_collection.groupings and loaded_processes != None and loaded_processes["grouping_selector"].get(grouping_name) != None ): state_collection = loaded_processes["grouping_selector"][grouping_name].process_function(state_collection,overriding_parameters) return state_collection
[ "def", "add_grouping", "(", "state_collection", ",", "grouping_name", ",", "loaded_processes", ",", "overriding_parameters", "=", "None", ")", ":", "if", "(", "grouping_name", "not", "in", "state_collection", ".", "groupings", "and", "loaded_processes", "!=", "None", "and", "loaded_processes", "[", "\"grouping_selector\"", "]", ".", "get", "(", "grouping_name", ")", "!=", "None", ")", ":", "state_collection", "=", "loaded_processes", "[", "\"grouping_selector\"", "]", "[", "grouping_name", "]", ".", "process_function", "(", "state_collection", ",", "overriding_parameters", ")", "return", "state_collection" ]
Adds a grouping to a state collection by using the process selected by the grouping name Does not override existing groupings
[ "Adds", "a", "grouping", "to", "a", "state", "collection", "by", "using", "the", "process", "selected", "by", "the", "grouping", "name", "Does", "not", "override", "existing", "groupings" ]
231851238f0a62bc3682d8fa937db9053378c53d
https://github.com/pjanis/funtool/blob/231851238f0a62bc3682d8fa937db9053378c53d/funtool/state_collection.py#L26-L38
247,401
pjanis/funtool
funtool/state_collection.py
add_group_to_grouping
def add_group_to_grouping(state_collection, grouping_name, group, group_key=None): """ Adds a group to the named grouping, with a given group key If no group key is given, the lowest available integer becomes the group key If no grouping exists by the given name a new one will be created Replaces any group with the same grouping_name and the same group_key """ if state_collection.groupings.get(grouping_name) is None: state_collection.groupings[grouping_name]= {} if group_key is None: group_key= _next_lowest_integer(state_collection.groupings[grouping_name].keys()) state_collection.groupings[grouping_name][group_key]= group return state_collection
python
def add_group_to_grouping(state_collection, grouping_name, group, group_key=None): """ Adds a group to the named grouping, with a given group key If no group key is given, the lowest available integer becomes the group key If no grouping exists by the given name a new one will be created Replaces any group with the same grouping_name and the same group_key """ if state_collection.groupings.get(grouping_name) is None: state_collection.groupings[grouping_name]= {} if group_key is None: group_key= _next_lowest_integer(state_collection.groupings[grouping_name].keys()) state_collection.groupings[grouping_name][group_key]= group return state_collection
[ "def", "add_group_to_grouping", "(", "state_collection", ",", "grouping_name", ",", "group", ",", "group_key", "=", "None", ")", ":", "if", "state_collection", ".", "groupings", ".", "get", "(", "grouping_name", ")", "is", "None", ":", "state_collection", ".", "groupings", "[", "grouping_name", "]", "=", "{", "}", "if", "group_key", "is", "None", ":", "group_key", "=", "_next_lowest_integer", "(", "state_collection", ".", "groupings", "[", "grouping_name", "]", ".", "keys", "(", ")", ")", "state_collection", ".", "groupings", "[", "grouping_name", "]", "[", "group_key", "]", "=", "group", "return", "state_collection" ]
Adds a group to the named grouping, with a given group key If no group key is given, the lowest available integer becomes the group key If no grouping exists by the given name a new one will be created Replaces any group with the same grouping_name and the same group_key
[ "Adds", "a", "group", "to", "the", "named", "grouping", "with", "a", "given", "group", "key" ]
231851238f0a62bc3682d8fa937db9053378c53d
https://github.com/pjanis/funtool/blob/231851238f0a62bc3682d8fa937db9053378c53d/funtool/state_collection.py#L40-L55
247,402
pjanis/funtool
funtool/state_collection.py
_combined_grouping_values
def _combined_grouping_values(grouping_name,collection_a,collection_b): """ returns a dict with values from both collections for a given grouping name Warning: collection2 overrides collection1 if there is a group_key conflict """ new_grouping= collection_a.groupings.get(grouping_name,{}).copy() new_grouping.update(collection_b.groupings.get(grouping_name,{})) return new_grouping
python
def _combined_grouping_values(grouping_name,collection_a,collection_b): """ returns a dict with values from both collections for a given grouping name Warning: collection2 overrides collection1 if there is a group_key conflict """ new_grouping= collection_a.groupings.get(grouping_name,{}).copy() new_grouping.update(collection_b.groupings.get(grouping_name,{})) return new_grouping
[ "def", "_combined_grouping_values", "(", "grouping_name", ",", "collection_a", ",", "collection_b", ")", ":", "new_grouping", "=", "collection_a", ".", "groupings", ".", "get", "(", "grouping_name", ",", "{", "}", ")", ".", "copy", "(", ")", "new_grouping", ".", "update", "(", "collection_b", ".", "groupings", ".", "get", "(", "grouping_name", ",", "{", "}", ")", ")", "return", "new_grouping" ]
returns a dict with values from both collections for a given grouping name Warning: collection2 overrides collection1 if there is a group_key conflict
[ "returns", "a", "dict", "with", "values", "from", "both", "collections", "for", "a", "given", "grouping", "name" ]
231851238f0a62bc3682d8fa937db9053378c53d
https://github.com/pjanis/funtool/blob/231851238f0a62bc3682d8fa937db9053378c53d/funtool/state_collection.py#L67-L75
247,403
pjanis/funtool
funtool/state_collection.py
_next_lowest_integer
def _next_lowest_integer(group_keys): """ returns the lowest available integer in a set of dict keys """ try: #TODO Replace with max default value when dropping compatibility with Python < 3.4 largest_int= max([ int(val) for val in group_keys if _is_int(val)]) except: largest_int= 0 return largest_int + 1
python
def _next_lowest_integer(group_keys): """ returns the lowest available integer in a set of dict keys """ try: #TODO Replace with max default value when dropping compatibility with Python < 3.4 largest_int= max([ int(val) for val in group_keys if _is_int(val)]) except: largest_int= 0 return largest_int + 1
[ "def", "_next_lowest_integer", "(", "group_keys", ")", ":", "try", ":", "#TODO Replace with max default value when dropping compatibility with Python < 3.4", "largest_int", "=", "max", "(", "[", "int", "(", "val", ")", "for", "val", "in", "group_keys", "if", "_is_int", "(", "val", ")", "]", ")", "except", ":", "largest_int", "=", "0", "return", "largest_int", "+", "1" ]
returns the lowest available integer in a set of dict keys
[ "returns", "the", "lowest", "available", "integer", "in", "a", "set", "of", "dict", "keys" ]
231851238f0a62bc3682d8fa937db9053378c53d
https://github.com/pjanis/funtool/blob/231851238f0a62bc3682d8fa937db9053378c53d/funtool/state_collection.py#L77-L85
247,404
gfranxman/utinypass
utinypass/client.py
TinyPassApiClient.grant_user_access
def grant_user_access( self, uid, rid, expire_datetime = None, send_email=False ): ''' Takes a user id and resource id and records a grant of access to that reseource for the user. If no expire_date is set, we'll default to 24 hours. If send_email is set to True, Tinypass will send an email related to the grant. No return value, raises ValueError. ''' path = "/api/v3/publisher/user/access/grant" # convert expire_date to gmt seconds if expire_datetime: expires_seconds = calendar.timegm(expires_datetime.timetuple()) else: expires_seconds = calendar.timegm(datetime.datetime.now().timetuple()) + (60*60*24) data = { 'api_token': self.api_token, 'aid': self.app_id, 'rid': rid, 'uid': uid, 'expire_date': expires_seconds, 'send_email': send_email, } r = requests.get( self.base_url + path, data=data ) if r.status_code != 200: raise ValueError( path + ":" + r.reason )
python
def grant_user_access( self, uid, rid, expire_datetime = None, send_email=False ): ''' Takes a user id and resource id and records a grant of access to that reseource for the user. If no expire_date is set, we'll default to 24 hours. If send_email is set to True, Tinypass will send an email related to the grant. No return value, raises ValueError. ''' path = "/api/v3/publisher/user/access/grant" # convert expire_date to gmt seconds if expire_datetime: expires_seconds = calendar.timegm(expires_datetime.timetuple()) else: expires_seconds = calendar.timegm(datetime.datetime.now().timetuple()) + (60*60*24) data = { 'api_token': self.api_token, 'aid': self.app_id, 'rid': rid, 'uid': uid, 'expire_date': expires_seconds, 'send_email': send_email, } r = requests.get( self.base_url + path, data=data ) if r.status_code != 200: raise ValueError( path + ":" + r.reason )
[ "def", "grant_user_access", "(", "self", ",", "uid", ",", "rid", ",", "expire_datetime", "=", "None", ",", "send_email", "=", "False", ")", ":", "path", "=", "\"/api/v3/publisher/user/access/grant\"", "# convert expire_date to gmt seconds", "if", "expire_datetime", ":", "expires_seconds", "=", "calendar", ".", "timegm", "(", "expires_datetime", ".", "timetuple", "(", ")", ")", "else", ":", "expires_seconds", "=", "calendar", ".", "timegm", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", "+", "(", "60", "*", "60", "*", "24", ")", "data", "=", "{", "'api_token'", ":", "self", ".", "api_token", ",", "'aid'", ":", "self", ".", "app_id", ",", "'rid'", ":", "rid", ",", "'uid'", ":", "uid", ",", "'expire_date'", ":", "expires_seconds", ",", "'send_email'", ":", "send_email", ",", "}", "r", "=", "requests", ".", "get", "(", "self", ".", "base_url", "+", "path", ",", "data", "=", "data", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "ValueError", "(", "path", "+", "\":\"", "+", "r", ".", "reason", ")" ]
Takes a user id and resource id and records a grant of access to that reseource for the user. If no expire_date is set, we'll default to 24 hours. If send_email is set to True, Tinypass will send an email related to the grant. No return value, raises ValueError.
[ "Takes", "a", "user", "id", "and", "resource", "id", "and", "records", "a", "grant", "of", "access", "to", "that", "reseource", "for", "the", "user", ".", "If", "no", "expire_date", "is", "set", "we", "ll", "default", "to", "24", "hours", ".", "If", "send_email", "is", "set", "to", "True", "Tinypass", "will", "send", "an", "email", "related", "to", "the", "grant", ".", "No", "return", "value", "raises", "ValueError", "." ]
c49cff25ae408dbbb58ec98d1c87894474011cdf
https://github.com/gfranxman/utinypass/blob/c49cff25ae408dbbb58ec98d1c87894474011cdf/utinypass/client.py#L87-L114
247,405
gfranxman/utinypass
utinypass/client.py
TinyPassApiClient.revoke_user_access
def revoke_user_access( self, access_id ): ''' Takes an access_id, probably obtained from the get_access_list structure, and revokes that access. No return value, but may raise ValueError. ''' path = "/api/v3/publisher/user/access/revoke" data = { 'api_token': self.api_token, 'access_id': access_id, } r = requests.get( self.base_url + path, data=data ) if r.status_code != 200: raise ValueError( path + ":" + r.reason )
python
def revoke_user_access( self, access_id ): ''' Takes an access_id, probably obtained from the get_access_list structure, and revokes that access. No return value, but may raise ValueError. ''' path = "/api/v3/publisher/user/access/revoke" data = { 'api_token': self.api_token, 'access_id': access_id, } r = requests.get( self.base_url + path, data=data ) if r.status_code != 200: raise ValueError( path + ":" + r.reason )
[ "def", "revoke_user_access", "(", "self", ",", "access_id", ")", ":", "path", "=", "\"/api/v3/publisher/user/access/revoke\"", "data", "=", "{", "'api_token'", ":", "self", ".", "api_token", ",", "'access_id'", ":", "access_id", ",", "}", "r", "=", "requests", ".", "get", "(", "self", ".", "base_url", "+", "path", ",", "data", "=", "data", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "ValueError", "(", "path", "+", "\":\"", "+", "r", ".", "reason", ")" ]
Takes an access_id, probably obtained from the get_access_list structure, and revokes that access. No return value, but may raise ValueError.
[ "Takes", "an", "access_id", "probably", "obtained", "from", "the", "get_access_list", "structure", "and", "revokes", "that", "access", ".", "No", "return", "value", "but", "may", "raise", "ValueError", "." ]
c49cff25ae408dbbb58ec98d1c87894474011cdf
https://github.com/gfranxman/utinypass/blob/c49cff25ae408dbbb58ec98d1c87894474011cdf/utinypass/client.py#L118-L133
247,406
hawkowl/txctools
txctools/reports/hotspot.py
HotspotReport.process
def process(self): """ Process the warnings. """ for filename, warnings in self.warnings.iteritems(): self.fileCounts[filename] = {} fc = self.fileCounts[filename] fc["warning_count"] = len(warnings) fc["warning_breakdown"] = self._warnCount(warnings) self.warningCounts = self._warnCount(warnings, warningCount=self.warningCounts)
python
def process(self): """ Process the warnings. """ for filename, warnings in self.warnings.iteritems(): self.fileCounts[filename] = {} fc = self.fileCounts[filename] fc["warning_count"] = len(warnings) fc["warning_breakdown"] = self._warnCount(warnings) self.warningCounts = self._warnCount(warnings, warningCount=self.warningCounts)
[ "def", "process", "(", "self", ")", ":", "for", "filename", ",", "warnings", "in", "self", ".", "warnings", ".", "iteritems", "(", ")", ":", "self", ".", "fileCounts", "[", "filename", "]", "=", "{", "}", "fc", "=", "self", ".", "fileCounts", "[", "filename", "]", "fc", "[", "\"warning_count\"", "]", "=", "len", "(", "warnings", ")", "fc", "[", "\"warning_breakdown\"", "]", "=", "self", ".", "_warnCount", "(", "warnings", ")", "self", ".", "warningCounts", "=", "self", ".", "_warnCount", "(", "warnings", ",", "warningCount", "=", "self", ".", "warningCounts", ")" ]
Process the warnings.
[ "Process", "the", "warnings", "." ]
14cab033ea179211a7bfd88dc202d576fc336ddc
https://github.com/hawkowl/txctools/blob/14cab033ea179211a7bfd88dc202d576fc336ddc/txctools/reports/hotspot.py#L39-L51
247,407
hawkowl/txctools
txctools/reports/hotspot.py
HotspotReport.deliverTextResults
def deliverTextResults(self): """ Deliver the results in a pretty text output. @return: Pretty text output! """ output = "=======================\ntxctools Hotspot Report\n"\ "=======================\n\n" fileResults = sorted(self.fileCounts.items(), key=lambda x: x[1]["warning_count"], reverse=True) output += "Warnings per File\n=================\n" count = 0 for item in fileResults: count += 1 output += "#%s - %s - %s\n" % (count, item[0], item[1]["warning_count"]) output += "\nWarnings Breakdown\n==================\n" count = 0 warningCount = 0 warningResults = sorted(self.warningCounts.items(), key=lambda x: x[1]["count"], reverse=True) for item in warningResults: warningCount += item[1]["count"] for warning, winfo in warningResults: count += 1 output += "#%s - %s - %s (%s%%) - %s\n" % (count, warning, winfo["count"], int(winfo["count"] / warningCount * 100), tools.cleanupMessage(warning, winfo)) return output
python
def deliverTextResults(self): """ Deliver the results in a pretty text output. @return: Pretty text output! """ output = "=======================\ntxctools Hotspot Report\n"\ "=======================\n\n" fileResults = sorted(self.fileCounts.items(), key=lambda x: x[1]["warning_count"], reverse=True) output += "Warnings per File\n=================\n" count = 0 for item in fileResults: count += 1 output += "#%s - %s - %s\n" % (count, item[0], item[1]["warning_count"]) output += "\nWarnings Breakdown\n==================\n" count = 0 warningCount = 0 warningResults = sorted(self.warningCounts.items(), key=lambda x: x[1]["count"], reverse=True) for item in warningResults: warningCount += item[1]["count"] for warning, winfo in warningResults: count += 1 output += "#%s - %s - %s (%s%%) - %s\n" % (count, warning, winfo["count"], int(winfo["count"] / warningCount * 100), tools.cleanupMessage(warning, winfo)) return output
[ "def", "deliverTextResults", "(", "self", ")", ":", "output", "=", "\"=======================\\ntxctools Hotspot Report\\n\"", "\"=======================\\n\\n\"", "fileResults", "=", "sorted", "(", "self", ".", "fileCounts", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", "[", "\"warning_count\"", "]", ",", "reverse", "=", "True", ")", "output", "+=", "\"Warnings per File\\n=================\\n\"", "count", "=", "0", "for", "item", "in", "fileResults", ":", "count", "+=", "1", "output", "+=", "\"#%s - %s - %s\\n\"", "%", "(", "count", ",", "item", "[", "0", "]", ",", "item", "[", "1", "]", "[", "\"warning_count\"", "]", ")", "output", "+=", "\"\\nWarnings Breakdown\\n==================\\n\"", "count", "=", "0", "warningCount", "=", "0", "warningResults", "=", "sorted", "(", "self", ".", "warningCounts", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", "[", "\"count\"", "]", ",", "reverse", "=", "True", ")", "for", "item", "in", "warningResults", ":", "warningCount", "+=", "item", "[", "1", "]", "[", "\"count\"", "]", "for", "warning", ",", "winfo", "in", "warningResults", ":", "count", "+=", "1", "output", "+=", "\"#%s - %s - %s (%s%%) - %s\\n\"", "%", "(", "count", ",", "warning", ",", "winfo", "[", "\"count\"", "]", ",", "int", "(", "winfo", "[", "\"count\"", "]", "/", "warningCount", "*", "100", ")", ",", "tools", ".", "cleanupMessage", "(", "warning", ",", "winfo", ")", ")", "return", "output" ]
Deliver the results in a pretty text output. @return: Pretty text output!
[ "Deliver", "the", "results", "in", "a", "pretty", "text", "output", "." ]
14cab033ea179211a7bfd88dc202d576fc336ddc
https://github.com/hawkowl/txctools/blob/14cab033ea179211a7bfd88dc202d576fc336ddc/txctools/reports/hotspot.py#L63-L100
247,408
hawkowl/txctools
txctools/reports/hotspot.py
HotspotReport._warnCount
def _warnCount(self, warnings, warningCount=None): """ Calculate the count of each warning, being given a list of them. @param warnings: L{list} of L{dict}s that come from L{tools.parsePyLintWarnings}. @param warningCount: A L{dict} produced by this method previously, if you are adding to the warnings. @return: L{dict} of L{dict}s for the warnings. """ if not warningCount: warningCount = {} for warning in warnings: wID = warning["warning_id"] if not warningCount.get(wID): warningCount[wID] = {} warningCount[wID]["count"] = 1 warningCount[wID]["message"] = warning.get("warning_message") else: warningCount[wID]["count"] += 1 return warningCount
python
def _warnCount(self, warnings, warningCount=None): """ Calculate the count of each warning, being given a list of them. @param warnings: L{list} of L{dict}s that come from L{tools.parsePyLintWarnings}. @param warningCount: A L{dict} produced by this method previously, if you are adding to the warnings. @return: L{dict} of L{dict}s for the warnings. """ if not warningCount: warningCount = {} for warning in warnings: wID = warning["warning_id"] if not warningCount.get(wID): warningCount[wID] = {} warningCount[wID]["count"] = 1 warningCount[wID]["message"] = warning.get("warning_message") else: warningCount[wID]["count"] += 1 return warningCount
[ "def", "_warnCount", "(", "self", ",", "warnings", ",", "warningCount", "=", "None", ")", ":", "if", "not", "warningCount", ":", "warningCount", "=", "{", "}", "for", "warning", "in", "warnings", ":", "wID", "=", "warning", "[", "\"warning_id\"", "]", "if", "not", "warningCount", ".", "get", "(", "wID", ")", ":", "warningCount", "[", "wID", "]", "=", "{", "}", "warningCount", "[", "wID", "]", "[", "\"count\"", "]", "=", "1", "warningCount", "[", "wID", "]", "[", "\"message\"", "]", "=", "warning", ".", "get", "(", "\"warning_message\"", ")", "else", ":", "warningCount", "[", "wID", "]", "[", "\"count\"", "]", "+=", "1", "return", "warningCount" ]
Calculate the count of each warning, being given a list of them. @param warnings: L{list} of L{dict}s that come from L{tools.parsePyLintWarnings}. @param warningCount: A L{dict} produced by this method previously, if you are adding to the warnings. @return: L{dict} of L{dict}s for the warnings.
[ "Calculate", "the", "count", "of", "each", "warning", "being", "given", "a", "list", "of", "them", "." ]
14cab033ea179211a7bfd88dc202d576fc336ddc
https://github.com/hawkowl/txctools/blob/14cab033ea179211a7bfd88dc202d576fc336ddc/txctools/reports/hotspot.py#L103-L126
247,409
jamieleshaw/lurklib
lurklib/channel.py
_Channel.list_
def list_(self): """ Gets a list of channels on the server. """ with self.lock: self.send('LIST') list_ = {} while self.readable(): msg = self._recv(expected_replies=('322', '321', '323')) if msg[0] == '322': channel, usercount, modes, topic = msg[2].split(' ', 3) modes = modes.replace(':', '', 1).replace(':', '', 1) modes = modes.replace('[', '').replace( \ ']', '').replace('+', '') list_[channel] = usercount, modes, topic elif msg[0] == '321': pass elif msg[0] == '323': break return list_
python
def list_(self): """ Gets a list of channels on the server. """ with self.lock: self.send('LIST') list_ = {} while self.readable(): msg = self._recv(expected_replies=('322', '321', '323')) if msg[0] == '322': channel, usercount, modes, topic = msg[2].split(' ', 3) modes = modes.replace(':', '', 1).replace(':', '', 1) modes = modes.replace('[', '').replace( \ ']', '').replace('+', '') list_[channel] = usercount, modes, topic elif msg[0] == '321': pass elif msg[0] == '323': break return list_
[ "def", "list_", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "send", "(", "'LIST'", ")", "list_", "=", "{", "}", "while", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "expected_replies", "=", "(", "'322'", ",", "'321'", ",", "'323'", ")", ")", "if", "msg", "[", "0", "]", "==", "'322'", ":", "channel", ",", "usercount", ",", "modes", ",", "topic", "=", "msg", "[", "2", "]", ".", "split", "(", "' '", ",", "3", ")", "modes", "=", "modes", ".", "replace", "(", "':'", ",", "''", ",", "1", ")", ".", "replace", "(", "':'", ",", "''", ",", "1", ")", "modes", "=", "modes", ".", "replace", "(", "'['", ",", "''", ")", ".", "replace", "(", "']'", ",", "''", ")", ".", "replace", "(", "'+'", ",", "''", ")", "list_", "[", "channel", "]", "=", "usercount", ",", "modes", ",", "topic", "elif", "msg", "[", "0", "]", "==", "'321'", ":", "pass", "elif", "msg", "[", "0", "]", "==", "'323'", ":", "break", "return", "list_" ]
Gets a list of channels on the server.
[ "Gets", "a", "list", "of", "channels", "on", "the", "server", "." ]
a861f35d880140422103dd78ec3239814e85fd7e
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/channel.py#L337-L357
247,410
bmuller/toquen-python
toquen/client.py
AWSClient.servers_with_roles
def servers_with_roles(self, roles, env=None, match_all=False): """ Get servers with the given roles. If env is given, then the environment must match as well. If match_all is True, then only return servers who have all of the given roles. Otherwise, return servers that have one or more of the given roles. """ result = [] roles = set(roles) for instance in self.server_details(): instroles = set(instance['roles']) envmatches = (env is None) or (instance['environment'] == env) if envmatches and match_all and roles <= instroles: result.append(instance) elif envmatches and not match_all and len(roles & instroles) > 0: result.append(instance) return result
python
def servers_with_roles(self, roles, env=None, match_all=False): """ Get servers with the given roles. If env is given, then the environment must match as well. If match_all is True, then only return servers who have all of the given roles. Otherwise, return servers that have one or more of the given roles. """ result = [] roles = set(roles) for instance in self.server_details(): instroles = set(instance['roles']) envmatches = (env is None) or (instance['environment'] == env) if envmatches and match_all and roles <= instroles: result.append(instance) elif envmatches and not match_all and len(roles & instroles) > 0: result.append(instance) return result
[ "def", "servers_with_roles", "(", "self", ",", "roles", ",", "env", "=", "None", ",", "match_all", "=", "False", ")", ":", "result", "=", "[", "]", "roles", "=", "set", "(", "roles", ")", "for", "instance", "in", "self", ".", "server_details", "(", ")", ":", "instroles", "=", "set", "(", "instance", "[", "'roles'", "]", ")", "envmatches", "=", "(", "env", "is", "None", ")", "or", "(", "instance", "[", "'environment'", "]", "==", "env", ")", "if", "envmatches", "and", "match_all", "and", "roles", "<=", "instroles", ":", "result", ".", "append", "(", "instance", ")", "elif", "envmatches", "and", "not", "match_all", "and", "len", "(", "roles", "&", "instroles", ")", ">", "0", ":", "result", ".", "append", "(", "instance", ")", "return", "result" ]
Get servers with the given roles. If env is given, then the environment must match as well. If match_all is True, then only return servers who have all of the given roles. Otherwise, return servers that have one or more of the given roles.
[ "Get", "servers", "with", "the", "given", "roles", ".", "If", "env", "is", "given", "then", "the", "environment", "must", "match", "as", "well", ".", "If", "match_all", "is", "True", "then", "only", "return", "servers", "who", "have", "all", "of", "the", "given", "roles", ".", "Otherwise", "return", "servers", "that", "have", "one", "or", "more", "of", "the", "given", "roles", "." ]
bfe4073a91b03b06b934aa20a174d96f7b7660e5
https://github.com/bmuller/toquen-python/blob/bfe4073a91b03b06b934aa20a174d96f7b7660e5/toquen/client.py#L19-L34
247,411
bmuller/toquen-python
toquen/client.py
FabricFriendlyClient.ips_with_roles
def ips_with_roles(self, roles, env=None, match_all=False): """ Returns a function that, when called, gets servers with the given roles. If env is given, then the environment must match as well. If match_all is True, then only return servers who have all of the given roles. Otherwise, return servers that have one or more of the given roles. """ def func(): return [s['external_ip'] for s in self.servers_with_roles(roles, env, match_all)] return func
python
def ips_with_roles(self, roles, env=None, match_all=False): """ Returns a function that, when called, gets servers with the given roles. If env is given, then the environment must match as well. If match_all is True, then only return servers who have all of the given roles. Otherwise, return servers that have one or more of the given roles. """ def func(): return [s['external_ip'] for s in self.servers_with_roles(roles, env, match_all)] return func
[ "def", "ips_with_roles", "(", "self", ",", "roles", ",", "env", "=", "None", ",", "match_all", "=", "False", ")", ":", "def", "func", "(", ")", ":", "return", "[", "s", "[", "'external_ip'", "]", "for", "s", "in", "self", ".", "servers_with_roles", "(", "roles", ",", "env", ",", "match_all", ")", "]", "return", "func" ]
Returns a function that, when called, gets servers with the given roles. If env is given, then the environment must match as well. If match_all is True, then only return servers who have all of the given roles. Otherwise, return servers that have one or more of the given roles.
[ "Returns", "a", "function", "that", "when", "called", "gets", "servers", "with", "the", "given", "roles", ".", "If", "env", "is", "given", "then", "the", "environment", "must", "match", "as", "well", ".", "If", "match_all", "is", "True", "then", "only", "return", "servers", "who", "have", "all", "of", "the", "given", "roles", ".", "Otherwise", "return", "servers", "that", "have", "one", "or", "more", "of", "the", "given", "roles", "." ]
bfe4073a91b03b06b934aa20a174d96f7b7660e5
https://github.com/bmuller/toquen-python/blob/bfe4073a91b03b06b934aa20a174d96f7b7660e5/toquen/client.py#L67-L76
247,412
polysquare/polysquare-generic-file-linter
polysquarelinter/valid_words_dictionary.py
create
def create(spellchecker_cache_path): """Create a Dictionary at spellchecker_cache_path with valid words.""" user_dictionary = os.path.join(os.getcwd(), "DICTIONARY") user_words = read_dictionary_file(user_dictionary) valid_words = Dictionary(valid_words_set(user_dictionary, user_words), "valid_words", [user_dictionary], spellchecker_cache_path) return (user_words, valid_words)
python
def create(spellchecker_cache_path): """Create a Dictionary at spellchecker_cache_path with valid words.""" user_dictionary = os.path.join(os.getcwd(), "DICTIONARY") user_words = read_dictionary_file(user_dictionary) valid_words = Dictionary(valid_words_set(user_dictionary, user_words), "valid_words", [user_dictionary], spellchecker_cache_path) return (user_words, valid_words)
[ "def", "create", "(", "spellchecker_cache_path", ")", ":", "user_dictionary", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "\"DICTIONARY\"", ")", "user_words", "=", "read_dictionary_file", "(", "user_dictionary", ")", "valid_words", "=", "Dictionary", "(", "valid_words_set", "(", "user_dictionary", ",", "user_words", ")", ",", "\"valid_words\"", ",", "[", "user_dictionary", "]", ",", "spellchecker_cache_path", ")", "return", "(", "user_words", ",", "valid_words", ")" ]
Create a Dictionary at spellchecker_cache_path with valid words.
[ "Create", "a", "Dictionary", "at", "spellchecker_cache_path", "with", "valid", "words", "." ]
cfc88771acd3d5551c28fa5d917bb0aeb584c4cc
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/valid_words_dictionary.py#L16-L26
247,413
msuozzo/Aduro
aduro/snapshot.py
KindleLibrarySnapshot.process_event
def process_event(self, event): """Apply an event to the snapshot instance """ if not isinstance(event, KindleEvent): pass elif isinstance(event, AddEvent): self._data[event.asin] = BookSnapshot(event.asin) elif isinstance(event, SetReadingEvent): self._data[event.asin].status = ReadingStatus.CURRENT self._data[event.asin].progress = event.initial_progress elif isinstance(event, ReadEvent): self._data[event.asin].progress += event.progress elif isinstance(event, SetFinishedEvent): self._data[event.asin].status = ReadingStatus.COMPLETED else: raise TypeError
python
def process_event(self, event): """Apply an event to the snapshot instance """ if not isinstance(event, KindleEvent): pass elif isinstance(event, AddEvent): self._data[event.asin] = BookSnapshot(event.asin) elif isinstance(event, SetReadingEvent): self._data[event.asin].status = ReadingStatus.CURRENT self._data[event.asin].progress = event.initial_progress elif isinstance(event, ReadEvent): self._data[event.asin].progress += event.progress elif isinstance(event, SetFinishedEvent): self._data[event.asin].status = ReadingStatus.COMPLETED else: raise TypeError
[ "def", "process_event", "(", "self", ",", "event", ")", ":", "if", "not", "isinstance", "(", "event", ",", "KindleEvent", ")", ":", "pass", "elif", "isinstance", "(", "event", ",", "AddEvent", ")", ":", "self", ".", "_data", "[", "event", ".", "asin", "]", "=", "BookSnapshot", "(", "event", ".", "asin", ")", "elif", "isinstance", "(", "event", ",", "SetReadingEvent", ")", ":", "self", ".", "_data", "[", "event", ".", "asin", "]", ".", "status", "=", "ReadingStatus", ".", "CURRENT", "self", ".", "_data", "[", "event", ".", "asin", "]", ".", "progress", "=", "event", ".", "initial_progress", "elif", "isinstance", "(", "event", ",", "ReadEvent", ")", ":", "self", ".", "_data", "[", "event", ".", "asin", "]", ".", "progress", "+=", "event", ".", "progress", "elif", "isinstance", "(", "event", ",", "SetFinishedEvent", ")", ":", "self", ".", "_data", "[", "event", ".", "asin", "]", ".", "status", "=", "ReadingStatus", ".", "COMPLETED", "else", ":", "raise", "TypeError" ]
Apply an event to the snapshot instance
[ "Apply", "an", "event", "to", "the", "snapshot", "instance" ]
338eeb1deeff30c198e721b660ae4daca3660911
https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/snapshot.py#L41-L56
247,414
msuozzo/Aduro
aduro/snapshot.py
KindleLibrarySnapshot.calc_update_events
def calc_update_events(self, asin_to_progress): """Calculate and return an iterable of `KindleEvent`s which, when applied to the current snapshot, result in the the current snapshot reflecting the progress state of the `asin_to_progress` mapping. Functionally, this method generates `AddEvent`s and `ReadEvent`s from updated Kindle Library state. Args: asin_to_progress: A map of book asins to the integral representation of progress used in the current snapshot. Returns: A list of Event objects that account for the changes detected in the `asin_to_progress`. """ new_events = [] for asin, new_progress in asin_to_progress.iteritems(): try: book_snapshot = self.get_book(asin) except KeyError: new_events.append(AddEvent(asin)) else: if book_snapshot.status == ReadingStatus.CURRENT: change = new_progress - book_snapshot.progress if change > 0: new_events.append(ReadEvent(asin, change)) return new_events
python
def calc_update_events(self, asin_to_progress): """Calculate and return an iterable of `KindleEvent`s which, when applied to the current snapshot, result in the the current snapshot reflecting the progress state of the `asin_to_progress` mapping. Functionally, this method generates `AddEvent`s and `ReadEvent`s from updated Kindle Library state. Args: asin_to_progress: A map of book asins to the integral representation of progress used in the current snapshot. Returns: A list of Event objects that account for the changes detected in the `asin_to_progress`. """ new_events = [] for asin, new_progress in asin_to_progress.iteritems(): try: book_snapshot = self.get_book(asin) except KeyError: new_events.append(AddEvent(asin)) else: if book_snapshot.status == ReadingStatus.CURRENT: change = new_progress - book_snapshot.progress if change > 0: new_events.append(ReadEvent(asin, change)) return new_events
[ "def", "calc_update_events", "(", "self", ",", "asin_to_progress", ")", ":", "new_events", "=", "[", "]", "for", "asin", ",", "new_progress", "in", "asin_to_progress", ".", "iteritems", "(", ")", ":", "try", ":", "book_snapshot", "=", "self", ".", "get_book", "(", "asin", ")", "except", "KeyError", ":", "new_events", ".", "append", "(", "AddEvent", "(", "asin", ")", ")", "else", ":", "if", "book_snapshot", ".", "status", "==", "ReadingStatus", ".", "CURRENT", ":", "change", "=", "new_progress", "-", "book_snapshot", ".", "progress", "if", "change", ">", "0", ":", "new_events", ".", "append", "(", "ReadEvent", "(", "asin", ",", "change", ")", ")", "return", "new_events" ]
Calculate and return an iterable of `KindleEvent`s which, when applied to the current snapshot, result in the the current snapshot reflecting the progress state of the `asin_to_progress` mapping. Functionally, this method generates `AddEvent`s and `ReadEvent`s from updated Kindle Library state. Args: asin_to_progress: A map of book asins to the integral representation of progress used in the current snapshot. Returns: A list of Event objects that account for the changes detected in the `asin_to_progress`.
[ "Calculate", "and", "return", "an", "iterable", "of", "KindleEvent", "s", "which", "when", "applied", "to", "the", "current", "snapshot", "result", "in", "the", "the", "current", "snapshot", "reflecting", "the", "progress", "state", "of", "the", "asin_to_progress", "mapping", "." ]
338eeb1deeff30c198e721b660ae4daca3660911
https://github.com/msuozzo/Aduro/blob/338eeb1deeff30c198e721b660ae4daca3660911/aduro/snapshot.py#L66-L93
247,415
xtrementl/focus
focus/parser/parser.py
SettingParser._reset
def _reset(self): """ Rebuilds structure for AST and resets internal data. """ self._filename = None self._block_map = {} self._ast = [] self._ast.append(None) # header self._ast.append([]) # options list self._ast.append([])
python
def _reset(self): """ Rebuilds structure for AST and resets internal data. """ self._filename = None self._block_map = {} self._ast = [] self._ast.append(None) # header self._ast.append([]) # options list self._ast.append([])
[ "def", "_reset", "(", "self", ")", ":", "self", ".", "_filename", "=", "None", "self", ".", "_block_map", "=", "{", "}", "self", ".", "_ast", "=", "[", "]", "self", ".", "_ast", ".", "append", "(", "None", ")", "# header", "self", ".", "_ast", ".", "append", "(", "[", "]", ")", "# options list", "self", ".", "_ast", ".", "append", "(", "[", "]", ")" ]
Rebuilds structure for AST and resets internal data.
[ "Rebuilds", "structure", "for", "AST", "and", "resets", "internal", "data", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L164-L173
247,416
xtrementl/focus
focus/parser/parser.py
SettingParser._get_token
def _get_token(self, regex=None): """ Consumes the next token in the token stream. `regex` Validate against the specified `re.compile()` regex instance. Returns token string. * Raises a ``ParseError`` exception if stream is empty or regex match fails. """ item = self._lexer.get_token() if not item: raise ParseError(u'Unexpected end of file') else: line_no, token = item if regex and not regex.match(token): pattern = u"Unexpected format in token '{0}' on line {1}" token_val = common.from_utf8(token.strip()) raise ParseError(pattern.format(token_val, line_no)) return token
python
def _get_token(self, regex=None): """ Consumes the next token in the token stream. `regex` Validate against the specified `re.compile()` regex instance. Returns token string. * Raises a ``ParseError`` exception if stream is empty or regex match fails. """ item = self._lexer.get_token() if not item: raise ParseError(u'Unexpected end of file') else: line_no, token = item if regex and not regex.match(token): pattern = u"Unexpected format in token '{0}' on line {1}" token_val = common.from_utf8(token.strip()) raise ParseError(pattern.format(token_val, line_no)) return token
[ "def", "_get_token", "(", "self", ",", "regex", "=", "None", ")", ":", "item", "=", "self", ".", "_lexer", ".", "get_token", "(", ")", "if", "not", "item", ":", "raise", "ParseError", "(", "u'Unexpected end of file'", ")", "else", ":", "line_no", ",", "token", "=", "item", "if", "regex", "and", "not", "regex", ".", "match", "(", "token", ")", ":", "pattern", "=", "u\"Unexpected format in token '{0}' on line {1}\"", "token_val", "=", "common", ".", "from_utf8", "(", "token", ".", "strip", "(", ")", ")", "raise", "ParseError", "(", "pattern", ".", "format", "(", "token_val", ",", "line_no", ")", ")", "return", "token" ]
Consumes the next token in the token stream. `regex` Validate against the specified `re.compile()` regex instance. Returns token string. * Raises a ``ParseError`` exception if stream is empty or regex match fails.
[ "Consumes", "the", "next", "token", "in", "the", "token", "stream", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L175-L198
247,417
xtrementl/focus
focus/parser/parser.py
SettingParser._lookahead_token
def _lookahead_token(self, count=1): """ Peeks into the token stream up to the specified number of tokens without consuming any tokens from the stream. ``count`` Look ahead in stream up to a maximum number of tokens. Returns string token or ``None``. """ stack = [] next_token = None # fetch the specified number of tokens ahead in stream while count > 0: item = self._lexer.get_token() if not item: break stack.append(item) count -= 1 # store the latest token and push the tokens back on the # lexer stack so we don't consume them while stack: line_no, token = stack.pop() if not next_token: next_token = token self._lexer.push_token(line_no, token) return next_token
python
def _lookahead_token(self, count=1): """ Peeks into the token stream up to the specified number of tokens without consuming any tokens from the stream. ``count`` Look ahead in stream up to a maximum number of tokens. Returns string token or ``None``. """ stack = [] next_token = None # fetch the specified number of tokens ahead in stream while count > 0: item = self._lexer.get_token() if not item: break stack.append(item) count -= 1 # store the latest token and push the tokens back on the # lexer stack so we don't consume them while stack: line_no, token = stack.pop() if not next_token: next_token = token self._lexer.push_token(line_no, token) return next_token
[ "def", "_lookahead_token", "(", "self", ",", "count", "=", "1", ")", ":", "stack", "=", "[", "]", "next_token", "=", "None", "# fetch the specified number of tokens ahead in stream", "while", "count", ">", "0", ":", "item", "=", "self", ".", "_lexer", ".", "get_token", "(", ")", "if", "not", "item", ":", "break", "stack", ".", "append", "(", "item", ")", "count", "-=", "1", "# store the latest token and push the tokens back on the", "# lexer stack so we don't consume them", "while", "stack", ":", "line_no", ",", "token", "=", "stack", ".", "pop", "(", ")", "if", "not", "next_token", ":", "next_token", "=", "token", "self", ".", "_lexer", ".", "push_token", "(", "line_no", ",", "token", ")", "return", "next_token" ]
Peeks into the token stream up to the specified number of tokens without consuming any tokens from the stream. ``count`` Look ahead in stream up to a maximum number of tokens. Returns string token or ``None``.
[ "Peeks", "into", "the", "token", "stream", "up", "to", "the", "specified", "number", "of", "tokens", "without", "consuming", "any", "tokens", "from", "the", "stream", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L200-L230
247,418
xtrementl/focus
focus/parser/parser.py
SettingParser._expect_token
def _expect_token(self, expected): """ Compares the next token in the stream to the specified token. `expected` Expected token string to match. * Raises a ``ParseError`` exception if token doesn't match `expected`. """ item = self._lexer.get_token() if not item: raise ParseError(u'Unexpected end of file') else: line_no, token = item if token != expected: raise ParseError(u"Unexpected token '{0}', " u"expecting '{1}' on line {2}" .format(common.from_utf8(token.strip()), expected, line_no))
python
def _expect_token(self, expected): """ Compares the next token in the stream to the specified token. `expected` Expected token string to match. * Raises a ``ParseError`` exception if token doesn't match `expected`. """ item = self._lexer.get_token() if not item: raise ParseError(u'Unexpected end of file') else: line_no, token = item if token != expected: raise ParseError(u"Unexpected token '{0}', " u"expecting '{1}' on line {2}" .format(common.from_utf8(token.strip()), expected, line_no))
[ "def", "_expect_token", "(", "self", ",", "expected", ")", ":", "item", "=", "self", ".", "_lexer", ".", "get_token", "(", ")", "if", "not", "item", ":", "raise", "ParseError", "(", "u'Unexpected end of file'", ")", "else", ":", "line_no", ",", "token", "=", "item", "if", "token", "!=", "expected", ":", "raise", "ParseError", "(", "u\"Unexpected token '{0}', \"", "u\"expecting '{1}' on line {2}\"", ".", "format", "(", "common", ".", "from_utf8", "(", "token", ".", "strip", "(", ")", ")", ",", "expected", ",", "line_no", ")", ")" ]
Compares the next token in the stream to the specified token. `expected` Expected token string to match. * Raises a ``ParseError`` exception if token doesn't match `expected`.
[ "Compares", "the", "next", "token", "in", "the", "stream", "to", "the", "specified", "token", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L232-L254
247,419
xtrementl/focus
focus/parser/parser.py
SettingParser._expect_empty
def _expect_empty(self): """ Checks if the token stream is empty. * Raises a ``ParseError` exception if a token is found. """ item = self._lexer.get_token() if item: line_no, token = item raise ParseError(u"Unexpected token '{0}' on line {1}" .format(common.from_utf8(token.strip()), line_no))
python
def _expect_empty(self): """ Checks if the token stream is empty. * Raises a ``ParseError` exception if a token is found. """ item = self._lexer.get_token() if item: line_no, token = item raise ParseError(u"Unexpected token '{0}' on line {1}" .format(common.from_utf8(token.strip()), line_no))
[ "def", "_expect_empty", "(", "self", ")", ":", "item", "=", "self", ".", "_lexer", ".", "get_token", "(", ")", "if", "item", ":", "line_no", ",", "token", "=", "item", "raise", "ParseError", "(", "u\"Unexpected token '{0}' on line {1}\"", ".", "format", "(", "common", ".", "from_utf8", "(", "token", ".", "strip", "(", ")", ")", ",", "line_no", ")", ")" ]
Checks if the token stream is empty. * Raises a ``ParseError` exception if a token is found.
[ "Checks", "if", "the", "token", "stream", "is", "empty", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L256-L266
247,420
xtrementl/focus
focus/parser/parser.py
SettingParser.readstream
def readstream(self, stream): """ Reads the specified stream and parses the token elements generated from tokenizing the input data. `stream` ``File``-like object. Returns boolean. """ self._reset() try: # tokenize input stream self._lexer = SettingLexer() self._lexer.readstream(stream) # parse tokens into AST self._parse() return True except IOError: self._reset() return False
python
def readstream(self, stream): """ Reads the specified stream and parses the token elements generated from tokenizing the input data. `stream` ``File``-like object. Returns boolean. """ self._reset() try: # tokenize input stream self._lexer = SettingLexer() self._lexer.readstream(stream) # parse tokens into AST self._parse() return True except IOError: self._reset() return False
[ "def", "readstream", "(", "self", ",", "stream", ")", ":", "self", ".", "_reset", "(", ")", "try", ":", "# tokenize input stream", "self", ".", "_lexer", "=", "SettingLexer", "(", ")", "self", ".", "_lexer", ".", "readstream", "(", "stream", ")", "# parse tokens into AST", "self", ".", "_parse", "(", ")", "return", "True", "except", "IOError", ":", "self", ".", "_reset", "(", ")", "return", "False" ]
Reads the specified stream and parses the token elements generated from tokenizing the input data. `stream` ``File``-like object. Returns boolean.
[ "Reads", "the", "specified", "stream", "and", "parses", "the", "token", "elements", "generated", "from", "tokenizing", "the", "input", "data", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L393-L416
247,421
xtrementl/focus
focus/parser/parser.py
SettingParser.write
def write(self, filename, header=None): """ Writes the AST as a configuration file. `filename` Filename to save configuration file to. `header` Header string to use for the file. Returns boolean. """ origfile = self._filename try: with open(filename, 'w') as _file: self.writestream(_file, header) self._filename = filename return True except IOError: self._filename = origfile return False
python
def write(self, filename, header=None): """ Writes the AST as a configuration file. `filename` Filename to save configuration file to. `header` Header string to use for the file. Returns boolean. """ origfile = self._filename try: with open(filename, 'w') as _file: self.writestream(_file, header) self._filename = filename return True except IOError: self._filename = origfile return False
[ "def", "write", "(", "self", ",", "filename", ",", "header", "=", "None", ")", ":", "origfile", "=", "self", ".", "_filename", "try", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "_file", ":", "self", ".", "writestream", "(", "_file", ",", "header", ")", "self", ".", "_filename", "=", "filename", "return", "True", "except", "IOError", ":", "self", ".", "_filename", "=", "origfile", "return", "False" ]
Writes the AST as a configuration file. `filename` Filename to save configuration file to. `header` Header string to use for the file. Returns boolean.
[ "Writes", "the", "AST", "as", "a", "configuration", "file", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L418-L439
247,422
xtrementl/focus
focus/parser/parser.py
SettingParser.add_option
def add_option(self, block, name, *values): """ Adds an option to the AST, either as a non-block option or for an existing block. `block` Block name. Set to ``None`` for non-block option. `name` Option name. `*values` String values for the option. * Raises a ``ValueError`` exception if `values` is empty, `name` is invalid, or `block` doesn't exist. """ if not self.RE_NAME.match(name): raise ValueError(u"Invalid option name '{0}'" .format(common.from_utf8(name))) if not values: raise ValueError(u"Must provide a value") else: values = list(values) if block: # block doesn't exist if not block in self._block_map: raise ValueError(u"Block '{0}' does not exist" .format(common.from_utf8(block))) # lookup block index and append block_idx = self._block_map[block] # 0: block name, 1: option_list self._ast[2][block_idx][1].append([name, values]) else: # non-block option self._ast[1].append([name, values])
python
def add_option(self, block, name, *values): """ Adds an option to the AST, either as a non-block option or for an existing block. `block` Block name. Set to ``None`` for non-block option. `name` Option name. `*values` String values for the option. * Raises a ``ValueError`` exception if `values` is empty, `name` is invalid, or `block` doesn't exist. """ if not self.RE_NAME.match(name): raise ValueError(u"Invalid option name '{0}'" .format(common.from_utf8(name))) if not values: raise ValueError(u"Must provide a value") else: values = list(values) if block: # block doesn't exist if not block in self._block_map: raise ValueError(u"Block '{0}' does not exist" .format(common.from_utf8(block))) # lookup block index and append block_idx = self._block_map[block] # 0: block name, 1: option_list self._ast[2][block_idx][1].append([name, values]) else: # non-block option self._ast[1].append([name, values])
[ "def", "add_option", "(", "self", ",", "block", ",", "name", ",", "*", "values", ")", ":", "if", "not", "self", ".", "RE_NAME", ".", "match", "(", "name", ")", ":", "raise", "ValueError", "(", "u\"Invalid option name '{0}'\"", ".", "format", "(", "common", ".", "from_utf8", "(", "name", ")", ")", ")", "if", "not", "values", ":", "raise", "ValueError", "(", "u\"Must provide a value\"", ")", "else", ":", "values", "=", "list", "(", "values", ")", "if", "block", ":", "# block doesn't exist", "if", "not", "block", "in", "self", ".", "_block_map", ":", "raise", "ValueError", "(", "u\"Block '{0}' does not exist\"", ".", "format", "(", "common", ".", "from_utf8", "(", "block", ")", ")", ")", "# lookup block index and append", "block_idx", "=", "self", ".", "_block_map", "[", "block", "]", "# 0: block name, 1: option_list", "self", ".", "_ast", "[", "2", "]", "[", "block_idx", "]", "[", "1", "]", ".", "append", "(", "[", "name", ",", "values", "]", ")", "else", ":", "# non-block option", "self", ".", "_ast", "[", "1", "]", ".", "append", "(", "[", "name", ",", "values", "]", ")" ]
Adds an option to the AST, either as a non-block option or for an existing block. `block` Block name. Set to ``None`` for non-block option. `name` Option name. `*values` String values for the option. * Raises a ``ValueError`` exception if `values` is empty, `name` is invalid, or `block` doesn't exist.
[ "Adds", "an", "option", "to", "the", "AST", "either", "as", "a", "non", "-", "block", "option", "or", "for", "an", "existing", "block", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L509-L547
247,423
xtrementl/focus
focus/parser/parser.py
SettingParser.remove_option
def remove_option(self, block, name): """ Removes first matching option that exists from the AST. `block` Block name. Set to ``None`` for non-block option. `name` Option name to remove. * Raises a ``ValueError`` exception if `name` and/or `block` haven't been added. """ if block: # block doesn't exist if not self._ast or not block in self._block_map: raise ValueError(u"Block '{0}' does not exist" .format(common.from_utf8(block))) # lookup block index and remove block_idx = self._block_map[block] for i, opt in enumerate(self._ast[2][block_idx][1]): if opt[0] == name: item_idx = i break else: raise ValueError(u"Option '{0}' does not exist" .format(common.from_utf8(name))) # pop off the block option options = self._ast[2][block_idx][1] options.pop(item_idx) else: if not self._ast: raise ValueError(u"Option '{0}' does not exist" .format(common.from_utf8(name))) # non-block option for i, opt in enumerate(self._ast[1]): if opt[0] == name: item_idx = i break else: raise ValueError(u"Option '{0}' does not exist" .format(common.from_utf8(name))) # pop off non-block option self._ast[1].pop(item_idx)
python
def remove_option(self, block, name): """ Removes first matching option that exists from the AST. `block` Block name. Set to ``None`` for non-block option. `name` Option name to remove. * Raises a ``ValueError`` exception if `name` and/or `block` haven't been added. """ if block: # block doesn't exist if not self._ast or not block in self._block_map: raise ValueError(u"Block '{0}' does not exist" .format(common.from_utf8(block))) # lookup block index and remove block_idx = self._block_map[block] for i, opt in enumerate(self._ast[2][block_idx][1]): if opt[0] == name: item_idx = i break else: raise ValueError(u"Option '{0}' does not exist" .format(common.from_utf8(name))) # pop off the block option options = self._ast[2][block_idx][1] options.pop(item_idx) else: if not self._ast: raise ValueError(u"Option '{0}' does not exist" .format(common.from_utf8(name))) # non-block option for i, opt in enumerate(self._ast[1]): if opt[0] == name: item_idx = i break else: raise ValueError(u"Option '{0}' does not exist" .format(common.from_utf8(name))) # pop off non-block option self._ast[1].pop(item_idx)
[ "def", "remove_option", "(", "self", ",", "block", ",", "name", ")", ":", "if", "block", ":", "# block doesn't exist", "if", "not", "self", ".", "_ast", "or", "not", "block", "in", "self", ".", "_block_map", ":", "raise", "ValueError", "(", "u\"Block '{0}' does not exist\"", ".", "format", "(", "common", ".", "from_utf8", "(", "block", ")", ")", ")", "# lookup block index and remove", "block_idx", "=", "self", ".", "_block_map", "[", "block", "]", "for", "i", ",", "opt", "in", "enumerate", "(", "self", ".", "_ast", "[", "2", "]", "[", "block_idx", "]", "[", "1", "]", ")", ":", "if", "opt", "[", "0", "]", "==", "name", ":", "item_idx", "=", "i", "break", "else", ":", "raise", "ValueError", "(", "u\"Option '{0}' does not exist\"", ".", "format", "(", "common", ".", "from_utf8", "(", "name", ")", ")", ")", "# pop off the block option", "options", "=", "self", ".", "_ast", "[", "2", "]", "[", "block_idx", "]", "[", "1", "]", "options", ".", "pop", "(", "item_idx", ")", "else", ":", "if", "not", "self", ".", "_ast", ":", "raise", "ValueError", "(", "u\"Option '{0}' does not exist\"", ".", "format", "(", "common", ".", "from_utf8", "(", "name", ")", ")", ")", "# non-block option", "for", "i", ",", "opt", "in", "enumerate", "(", "self", ".", "_ast", "[", "1", "]", ")", ":", "if", "opt", "[", "0", "]", "==", "name", ":", "item_idx", "=", "i", "break", "else", ":", "raise", "ValueError", "(", "u\"Option '{0}' does not exist\"", ".", "format", "(", "common", ".", "from_utf8", "(", "name", ")", ")", ")", "# pop off non-block option", "self", ".", "_ast", "[", "1", "]", ".", "pop", "(", "item_idx", ")" ]
Removes first matching option that exists from the AST. `block` Block name. Set to ``None`` for non-block option. `name` Option name to remove. * Raises a ``ValueError`` exception if `name` and/or `block` haven't been added.
[ "Removes", "first", "matching", "option", "that", "exists", "from", "the", "AST", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L549-L597
247,424
xtrementl/focus
focus/parser/parser.py
SettingParser.add_block
def add_block(self, name): """ Adds a new block to the AST. `name` Block name. * Raises a ``ValueError`` exception if `name` is invalid or an existing block name matches value provided for `name`. """ if not self.RE_NAME.match(name): raise ValueError(u"Invalid block name '{0}'" .format(common.from_utf8(name))) if name in self._block_map: raise ValueError(u"Block '{0}' already exists" .format(common.from_utf8(name))) # add new block and index mapping self._block_map[name] = len(self._ast[2]) # must come first option_list = [] block = [name, option_list] self._ast[2].append(block)
python
def add_block(self, name): """ Adds a new block to the AST. `name` Block name. * Raises a ``ValueError`` exception if `name` is invalid or an existing block name matches value provided for `name`. """ if not self.RE_NAME.match(name): raise ValueError(u"Invalid block name '{0}'" .format(common.from_utf8(name))) if name in self._block_map: raise ValueError(u"Block '{0}' already exists" .format(common.from_utf8(name))) # add new block and index mapping self._block_map[name] = len(self._ast[2]) # must come first option_list = [] block = [name, option_list] self._ast[2].append(block)
[ "def", "add_block", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "RE_NAME", ".", "match", "(", "name", ")", ":", "raise", "ValueError", "(", "u\"Invalid block name '{0}'\"", ".", "format", "(", "common", ".", "from_utf8", "(", "name", ")", ")", ")", "if", "name", "in", "self", ".", "_block_map", ":", "raise", "ValueError", "(", "u\"Block '{0}' already exists\"", ".", "format", "(", "common", ".", "from_utf8", "(", "name", ")", ")", ")", "# add new block and index mapping", "self", ".", "_block_map", "[", "name", "]", "=", "len", "(", "self", ".", "_ast", "[", "2", "]", ")", "# must come first", "option_list", "=", "[", "]", "block", "=", "[", "name", ",", "option_list", "]", "self", ".", "_ast", "[", "2", "]", ".", "append", "(", "block", ")" ]
Adds a new block to the AST. `name` Block name. * Raises a ``ValueError`` exception if `name` is invalid or an existing block name matches value provided for `name`.
[ "Adds", "a", "new", "block", "to", "the", "AST", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L599-L621
247,425
xtrementl/focus
focus/parser/parser.py
SettingParser.remove_block
def remove_block(self, name): """ Removes an existing block from the AST. `name` Block name. * Raises a ``ValueError`` exception if `name` hasn't been added. """ if not self._ast or not name in self._block_map: raise ValueError(u"Block '{0}' does not exist" .format(common.from_utf8(name))) block_idx = self._block_map[name] # remove block self._ast[2].pop(block_idx) del self._block_map[name]
python
def remove_block(self, name): """ Removes an existing block from the AST. `name` Block name. * Raises a ``ValueError`` exception if `name` hasn't been added. """ if not self._ast or not name in self._block_map: raise ValueError(u"Block '{0}' does not exist" .format(common.from_utf8(name))) block_idx = self._block_map[name] # remove block self._ast[2].pop(block_idx) del self._block_map[name]
[ "def", "remove_block", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "_ast", "or", "not", "name", "in", "self", ".", "_block_map", ":", "raise", "ValueError", "(", "u\"Block '{0}' does not exist\"", ".", "format", "(", "common", ".", "from_utf8", "(", "name", ")", ")", ")", "block_idx", "=", "self", ".", "_block_map", "[", "name", "]", "# remove block", "self", ".", "_ast", "[", "2", "]", ".", "pop", "(", "block_idx", ")", "del", "self", ".", "_block_map", "[", "name", "]" ]
Removes an existing block from the AST. `name` Block name. * Raises a ``ValueError`` exception if `name` hasn't been added.
[ "Removes", "an", "existing", "block", "from", "the", "AST", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/parser/parser.py#L623-L640
247,426
fedora-infra/fmn.lib
fmn/lib/models.py
init
def init(db_url, alembic_ini=None, debug=False, create=False): """ Create the tables in the database using the information from the url obtained. :arg db_url, URL used to connect to the database. The URL contains information with regards to the database engine, the host to connect to, the user and password and the database name. ie: <engine>://<user>:<password>@<host>/<dbname> :kwarg alembic_ini, path to the alembic ini file. This is necessary to be able to use alembic correctly, but not for the unit-tests. :kwarg debug, a boolean specifying wether we should have the verbose output of sqlalchemy or not. :return a session that can be used to query the database. """ engine = create_engine(db_url, echo=debug) if create: BASE.metadata.create_all(engine) # This... "causes problems" #if db_url.startswith('sqlite:'): # def _fk_pragma_on_connect(dbapi_con, con_record): # dbapi_con.execute('pragma foreign_keys=ON') # sa.event.listen(engine, 'connect', _fk_pragma_on_connect) if alembic_ini is not None: # pragma: no cover # then, load the Alembic configuration and generate the # version table, "stamping" it with the most recent rev: from alembic.config import Config from alembic import command alembic_cfg = Config(alembic_ini) command.stamp(alembic_cfg, "head") scopedsession = scoped_session(sessionmaker(bind=engine)) return scopedsession
python
def init(db_url, alembic_ini=None, debug=False, create=False): """ Create the tables in the database using the information from the url obtained. :arg db_url, URL used to connect to the database. The URL contains information with regards to the database engine, the host to connect to, the user and password and the database name. ie: <engine>://<user>:<password>@<host>/<dbname> :kwarg alembic_ini, path to the alembic ini file. This is necessary to be able to use alembic correctly, but not for the unit-tests. :kwarg debug, a boolean specifying wether we should have the verbose output of sqlalchemy or not. :return a session that can be used to query the database. """ engine = create_engine(db_url, echo=debug) if create: BASE.metadata.create_all(engine) # This... "causes problems" #if db_url.startswith('sqlite:'): # def _fk_pragma_on_connect(dbapi_con, con_record): # dbapi_con.execute('pragma foreign_keys=ON') # sa.event.listen(engine, 'connect', _fk_pragma_on_connect) if alembic_ini is not None: # pragma: no cover # then, load the Alembic configuration and generate the # version table, "stamping" it with the most recent rev: from alembic.config import Config from alembic import command alembic_cfg = Config(alembic_ini) command.stamp(alembic_cfg, "head") scopedsession = scoped_session(sessionmaker(bind=engine)) return scopedsession
[ "def", "init", "(", "db_url", ",", "alembic_ini", "=", "None", ",", "debug", "=", "False", ",", "create", "=", "False", ")", ":", "engine", "=", "create_engine", "(", "db_url", ",", "echo", "=", "debug", ")", "if", "create", ":", "BASE", ".", "metadata", ".", "create_all", "(", "engine", ")", "# This... \"causes problems\"", "#if db_url.startswith('sqlite:'):", "# def _fk_pragma_on_connect(dbapi_con, con_record):", "# dbapi_con.execute('pragma foreign_keys=ON')", "# sa.event.listen(engine, 'connect', _fk_pragma_on_connect)", "if", "alembic_ini", "is", "not", "None", ":", "# pragma: no cover", "# then, load the Alembic configuration and generate the", "# version table, \"stamping\" it with the most recent rev:", "from", "alembic", ".", "config", "import", "Config", "from", "alembic", "import", "command", "alembic_cfg", "=", "Config", "(", "alembic_ini", ")", "command", ".", "stamp", "(", "alembic_cfg", ",", "\"head\"", ")", "scopedsession", "=", "scoped_session", "(", "sessionmaker", "(", "bind", "=", "engine", ")", ")", "return", "scopedsession" ]
Create the tables in the database using the information from the url obtained. :arg db_url, URL used to connect to the database. The URL contains information with regards to the database engine, the host to connect to, the user and password and the database name. ie: <engine>://<user>:<password>@<host>/<dbname> :kwarg alembic_ini, path to the alembic ini file. This is necessary to be able to use alembic correctly, but not for the unit-tests. :kwarg debug, a boolean specifying wether we should have the verbose output of sqlalchemy or not. :return a session that can be used to query the database.
[ "Create", "the", "tables", "in", "the", "database", "using", "the", "information", "from", "the", "url", "obtained", "." ]
3120725556153d07c1809530f0fadcf250439110
https://github.com/fedora-infra/fmn.lib/blob/3120725556153d07c1809530f0fadcf250439110/fmn/lib/models.py#L69-L104
247,427
fedora-infra/fmn.lib
fmn/lib/models.py
hash_producer
def hash_producer(*args, **kwargs): """ Returns a random hash for a confirmation secret. """ return hashlib.md5(six.text_type(uuid.uuid4()).encode('utf-8')).hexdigest()
python
def hash_producer(*args, **kwargs): """ Returns a random hash for a confirmation secret. """ return hashlib.md5(six.text_type(uuid.uuid4()).encode('utf-8')).hexdigest()
[ "def", "hash_producer", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "hashlib", ".", "md5", "(", "six", ".", "text_type", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")" ]
Returns a random hash for a confirmation secret.
[ "Returns", "a", "random", "hash", "for", "a", "confirmation", "secret", "." ]
3120725556153d07c1809530f0fadcf250439110
https://github.com/fedora-infra/fmn.lib/blob/3120725556153d07c1809530f0fadcf250439110/fmn/lib/models.py#L724-L726
247,428
fedora-infra/fmn.lib
fmn/lib/models.py
Rule.validate_code_path
def validate_code_path(valid_paths, code_path, **kw): """ Raise an exception if code_path is not one of our whitelisted valid_paths. """ root, name = code_path.split(':', 1) if name not in valid_paths[root]: raise ValueError("%r is not a valid code_path" % code_path)
python
def validate_code_path(valid_paths, code_path, **kw): """ Raise an exception if code_path is not one of our whitelisted valid_paths. """ root, name = code_path.split(':', 1) if name not in valid_paths[root]: raise ValueError("%r is not a valid code_path" % code_path)
[ "def", "validate_code_path", "(", "valid_paths", ",", "code_path", ",", "*", "*", "kw", ")", ":", "root", ",", "name", "=", "code_path", ".", "split", "(", "':'", ",", "1", ")", "if", "name", "not", "in", "valid_paths", "[", "root", "]", ":", "raise", "ValueError", "(", "\"%r is not a valid code_path\"", "%", "code_path", ")" ]
Raise an exception if code_path is not one of our whitelisted valid_paths.
[ "Raise", "an", "exception", "if", "code_path", "is", "not", "one", "of", "our", "whitelisted", "valid_paths", "." ]
3120725556153d07c1809530f0fadcf250439110
https://github.com/fedora-infra/fmn.lib/blob/3120725556153d07c1809530f0fadcf250439110/fmn/lib/models.py#L281-L288
247,429
veltzer/pydmt
pydmt/utils/python.py
hlp_source_under
def hlp_source_under(folder): """ this function finds all the python packages under a folder and write the 'packages' and 'package_dir' entries for a python setup.py script """ # walk the folder and find the __init__.py entries for packages. packages = [] package_dir = dict() for root, dirs, files in os.walk(folder): for file in files: if file != '__init__.py': continue full = os.path.dirname(os.path.join(root, file)) relative = os.path.relpath(full, folder) packages.append(relative) package_dir[relative] = full # we use pprint because we want the order to always remain the same return 'packages={0},\npackage_dir={1}'.format(sorted(packages), pprint.pformat(package_dir))
python
def hlp_source_under(folder): """ this function finds all the python packages under a folder and write the 'packages' and 'package_dir' entries for a python setup.py script """ # walk the folder and find the __init__.py entries for packages. packages = [] package_dir = dict() for root, dirs, files in os.walk(folder): for file in files: if file != '__init__.py': continue full = os.path.dirname(os.path.join(root, file)) relative = os.path.relpath(full, folder) packages.append(relative) package_dir[relative] = full # we use pprint because we want the order to always remain the same return 'packages={0},\npackage_dir={1}'.format(sorted(packages), pprint.pformat(package_dir))
[ "def", "hlp_source_under", "(", "folder", ")", ":", "# walk the folder and find the __init__.py entries for packages.", "packages", "=", "[", "]", "package_dir", "=", "dict", "(", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder", ")", ":", "for", "file", "in", "files", ":", "if", "file", "!=", "'__init__.py'", ":", "continue", "full", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", ")", "relative", "=", "os", ".", "path", ".", "relpath", "(", "full", ",", "folder", ")", "packages", ".", "append", "(", "relative", ")", "package_dir", "[", "relative", "]", "=", "full", "# we use pprint because we want the order to always remain the same", "return", "'packages={0},\\npackage_dir={1}'", ".", "format", "(", "sorted", "(", "packages", ")", ",", "pprint", ".", "pformat", "(", "package_dir", ")", ")" ]
this function finds all the python packages under a folder and write the 'packages' and 'package_dir' entries for a python setup.py script
[ "this", "function", "finds", "all", "the", "python", "packages", "under", "a", "folder", "and", "write", "the", "packages", "and", "package_dir", "entries", "for", "a", "python", "setup", ".", "py", "script" ]
11d3db7ea079756c1e4137d3dd8a2cabbcc98bf7
https://github.com/veltzer/pydmt/blob/11d3db7ea079756c1e4137d3dd8a2cabbcc98bf7/pydmt/utils/python.py#L6-L24
247,430
seryl/Python-Cotendo
cotendo/cotendohelper.py
CotendoDNS.show
def show(self): """This could use some love, it's currently here as reference""" for entry in self._entries: print "{'%s': %s, 'records': %s}" % ( entry._record_type, entry.host, entry.records) print
python
def show(self): """This could use some love, it's currently here as reference""" for entry in self._entries: print "{'%s': %s, 'records': %s}" % ( entry._record_type, entry.host, entry.records) print
[ "def", "show", "(", "self", ")", ":", "for", "entry", "in", "self", ".", "_entries", ":", "print", "\"{'%s': %s, 'records': %s}\"", "%", "(", "entry", ".", "_record_type", ",", "entry", ".", "host", ",", "entry", ".", "records", ")", "print" ]
This could use some love, it's currently here as reference
[ "This", "could", "use", "some", "love", "it", "s", "currently", "here", "as", "reference" ]
a55e034f0845332319859f6276adc6ba35f5a121
https://github.com/seryl/Python-Cotendo/blob/a55e034f0845332319859f6276adc6ba35f5a121/cotendo/cotendohelper.py#L53-L58
247,431
seryl/Python-Cotendo
cotendo/cotendohelper.py
CotendoDNS.add_record
def add_record(self, record): """Add or update a given DNS record""" rec = self.get_record(record._record_type, record.host) if rec: rec = record for i,r in enumerate(self._entries): if r._record_type == record._record_type \ and r.host == record.host: self._entries[i] = record else: self._entries.append(record) self.sort() return True
python
def add_record(self, record): """Add or update a given DNS record""" rec = self.get_record(record._record_type, record.host) if rec: rec = record for i,r in enumerate(self._entries): if r._record_type == record._record_type \ and r.host == record.host: self._entries[i] = record else: self._entries.append(record) self.sort() return True
[ "def", "add_record", "(", "self", ",", "record", ")", ":", "rec", "=", "self", ".", "get_record", "(", "record", ".", "_record_type", ",", "record", ".", "host", ")", "if", "rec", ":", "rec", "=", "record", "for", "i", ",", "r", "in", "enumerate", "(", "self", ".", "_entries", ")", ":", "if", "r", ".", "_record_type", "==", "record", ".", "_record_type", "and", "r", ".", "host", "==", "record", ".", "host", ":", "self", ".", "_entries", "[", "i", "]", "=", "record", "else", ":", "self", ".", "_entries", ".", "append", "(", "record", ")", "self", ".", "sort", "(", ")", "return", "True" ]
Add or update a given DNS record
[ "Add", "or", "update", "a", "given", "DNS", "record" ]
a55e034f0845332319859f6276adc6ba35f5a121
https://github.com/seryl/Python-Cotendo/blob/a55e034f0845332319859f6276adc6ba35f5a121/cotendo/cotendohelper.py#L60-L72
247,432
seryl/Python-Cotendo
cotendo/cotendohelper.py
CotendoDNS.diff_record
def diff_record(self, record): """Return the removed and added diffs""" rec = self.get_record(record._record_type, record.host) if rec is not None and record is not None: return {'removed': tuple(set(rec.results) - set(record.results)), 'added': tuple(set(record.results) - set(rec.recults))} else: return False
python
def diff_record(self, record): """Return the removed and added diffs""" rec = self.get_record(record._record_type, record.host) if rec is not None and record is not None: return {'removed': tuple(set(rec.results) - set(record.results)), 'added': tuple(set(record.results) - set(rec.recults))} else: return False
[ "def", "diff_record", "(", "self", ",", "record", ")", ":", "rec", "=", "self", ".", "get_record", "(", "record", ".", "_record_type", ",", "record", ".", "host", ")", "if", "rec", "is", "not", "None", "and", "record", "is", "not", "None", ":", "return", "{", "'removed'", ":", "tuple", "(", "set", "(", "rec", ".", "results", ")", "-", "set", "(", "record", ".", "results", ")", ")", ",", "'added'", ":", "tuple", "(", "set", "(", "record", ".", "results", ")", "-", "set", "(", "rec", ".", "recults", ")", ")", "}", "else", ":", "return", "False" ]
Return the removed and added diffs
[ "Return", "the", "removed", "and", "added", "diffs" ]
a55e034f0845332319859f6276adc6ba35f5a121
https://github.com/seryl/Python-Cotendo/blob/a55e034f0845332319859f6276adc6ba35f5a121/cotendo/cotendohelper.py#L74-L81
247,433
seryl/Python-Cotendo
cotendo/cotendohelper.py
CotendoDNS.get_record
def get_record(self, dns_record_type, host): """Fetch a DNS record""" record_list = self._entries for record in record_list: if record._record_type == dns_record_type \ and record.host == host: return record return False
python
def get_record(self, dns_record_type, host): """Fetch a DNS record""" record_list = self._entries for record in record_list: if record._record_type == dns_record_type \ and record.host == host: return record return False
[ "def", "get_record", "(", "self", ",", "dns_record_type", ",", "host", ")", ":", "record_list", "=", "self", ".", "_entries", "for", "record", "in", "record_list", ":", "if", "record", ".", "_record_type", "==", "dns_record_type", "and", "record", ".", "host", "==", "host", ":", "return", "record", "return", "False" ]
Fetch a DNS record
[ "Fetch", "a", "DNS", "record" ]
a55e034f0845332319859f6276adc6ba35f5a121
https://github.com/seryl/Python-Cotendo/blob/a55e034f0845332319859f6276adc6ba35f5a121/cotendo/cotendohelper.py#L83-L90
247,434
seryl/Python-Cotendo
cotendo/cotendohelper.py
CotendoDNS.del_record
def del_record(self, dns_record_type, host): """Remove a DNS record""" rec = self.get_record(dns_record_type, host) if rec: self._entries = list(set(self._entries) - set([rec])) return True
python
def del_record(self, dns_record_type, host): """Remove a DNS record""" rec = self.get_record(dns_record_type, host) if rec: self._entries = list(set(self._entries) - set([rec])) return True
[ "def", "del_record", "(", "self", ",", "dns_record_type", ",", "host", ")", ":", "rec", "=", "self", ".", "get_record", "(", "dns_record_type", ",", "host", ")", "if", "rec", ":", "self", ".", "_entries", "=", "list", "(", "set", "(", "self", ".", "_entries", ")", "-", "set", "(", "[", "rec", "]", ")", ")", "return", "True" ]
Remove a DNS record
[ "Remove", "a", "DNS", "record" ]
a55e034f0845332319859f6276adc6ba35f5a121
https://github.com/seryl/Python-Cotendo/blob/a55e034f0845332319859f6276adc6ba35f5a121/cotendo/cotendohelper.py#L92-L97
247,435
seryl/Python-Cotendo
cotendo/cotendohelper.py
CotendoDNS.config
def config(self): """Create the finalized configuration""" root = etree.Element("xml") resource_records = etree.SubElement(root, "resource_records") # Append SOA and NS records resource_records.append(SOARecord()._etree) resource_records.append(NSRecord()._etree) # Append the rest for record in self._entries: resource_records.append(record._etree) return etree.tostring(root, encoding="utf-8", pretty_print=True)
python
def config(self): """Create the finalized configuration""" root = etree.Element("xml") resource_records = etree.SubElement(root, "resource_records") # Append SOA and NS records resource_records.append(SOARecord()._etree) resource_records.append(NSRecord()._etree) # Append the rest for record in self._entries: resource_records.append(record._etree) return etree.tostring(root, encoding="utf-8", pretty_print=True)
[ "def", "config", "(", "self", ")", ":", "root", "=", "etree", ".", "Element", "(", "\"xml\"", ")", "resource_records", "=", "etree", ".", "SubElement", "(", "root", ",", "\"resource_records\"", ")", "# Append SOA and NS records", "resource_records", ".", "append", "(", "SOARecord", "(", ")", ".", "_etree", ")", "resource_records", ".", "append", "(", "NSRecord", "(", ")", ".", "_etree", ")", "# Append the rest", "for", "record", "in", "self", ".", "_entries", ":", "resource_records", ".", "append", "(", "record", ".", "_etree", ")", "return", "etree", ".", "tostring", "(", "root", ",", "encoding", "=", "\"utf-8\"", ",", "pretty_print", "=", "True", ")" ]
Create the finalized configuration
[ "Create", "the", "finalized", "configuration" ]
a55e034f0845332319859f6276adc6ba35f5a121
https://github.com/seryl/Python-Cotendo/blob/a55e034f0845332319859f6276adc6ba35f5a121/cotendo/cotendohelper.py#L141-L155
247,436
vietjtnguyen/nut
nut/relay.py
parsePortSpec
def parsePortSpec(spec, separator='-'): ''' Parses a port specification in two forms into the same form. In the first form the specification is just an integer. In this case a tuple is returned containing the same integer twice. In the second form the specification is two numbers separated by a hyphen ('-' by default, specifiable with :param separator:). The two numbers are parsed as integers and returned in the same order as a tuple of two integers. Example: .. code-block: python parsePortSpec('12345') # -> (12345, 12345) parsePortSpec('12345-56789') # -> (12345, 56789) ''' x = list(map(lambda x: x.strip(), spec.split(separator))) return tuple(map(int, x * (3 - len(x))))
python
def parsePortSpec(spec, separator='-'): ''' Parses a port specification in two forms into the same form. In the first form the specification is just an integer. In this case a tuple is returned containing the same integer twice. In the second form the specification is two numbers separated by a hyphen ('-' by default, specifiable with :param separator:). The two numbers are parsed as integers and returned in the same order as a tuple of two integers. Example: .. code-block: python parsePortSpec('12345') # -> (12345, 12345) parsePortSpec('12345-56789') # -> (12345, 56789) ''' x = list(map(lambda x: x.strip(), spec.split(separator))) return tuple(map(int, x * (3 - len(x))))
[ "def", "parsePortSpec", "(", "spec", ",", "separator", "=", "'-'", ")", ":", "x", "=", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "spec", ".", "split", "(", "separator", ")", ")", ")", "return", "tuple", "(", "map", "(", "int", ",", "x", "*", "(", "3", "-", "len", "(", "x", ")", ")", ")", ")" ]
Parses a port specification in two forms into the same form. In the first form the specification is just an integer. In this case a tuple is returned containing the same integer twice. In the second form the specification is two numbers separated by a hyphen ('-' by default, specifiable with :param separator:). The two numbers are parsed as integers and returned in the same order as a tuple of two integers. Example: .. code-block: python parsePortSpec('12345') # -> (12345, 12345) parsePortSpec('12345-56789') # -> (12345, 56789)
[ "Parses", "a", "port", "specification", "in", "two", "forms", "into", "the", "same", "form", "." ]
3b9c272ebc6d858c790e43d2fc507885d8029d36
https://github.com/vietjtnguyen/nut/blob/3b9c272ebc6d858c790e43d2fc507885d8029d36/nut/relay.py#L831-L851
247,437
vietjtnguyen/nut
nut/relay.py
Dump.dump
def dump(self, msg): ''' Dumps the provided message to this dump. ''' msg_size = len(msg) # We start a new batch if the resulting batch file is larger than the # max batch file size. However, if the current batch file size is zero # then that means the message alone is larger than the max batch file # size. In this case instead of splitting up the message across files # which would greatly increase complexity we simply dump that message # into a file of its own even though it will be larger than the max # batch file size. if self._batch_size + msg_size > self._max_batch_file_size \ and self._batch_size > 0: self._startNewBatch() # Write the time stamp and information on how to retrieve the message # from the batch files (batch filename, byte offset, and byte size) global getTime index_file_entry = '{:},{:09d},{:},{:}\n'.format( getTime(), self._batch_index, self._batch_size, msg_size) if sys.version_info >= (3,): self._index_file.write(index_file_entry.encode('utf-8')) else: self._index_file.write(index_file_entry) # Dump the message itself to the current batch file self._batch_file.write(msg) self._batch_size += msg_size # Increment message count self._message_count += 1
python
def dump(self, msg): ''' Dumps the provided message to this dump. ''' msg_size = len(msg) # We start a new batch if the resulting batch file is larger than the # max batch file size. However, if the current batch file size is zero # then that means the message alone is larger than the max batch file # size. In this case instead of splitting up the message across files # which would greatly increase complexity we simply dump that message # into a file of its own even though it will be larger than the max # batch file size. if self._batch_size + msg_size > self._max_batch_file_size \ and self._batch_size > 0: self._startNewBatch() # Write the time stamp and information on how to retrieve the message # from the batch files (batch filename, byte offset, and byte size) global getTime index_file_entry = '{:},{:09d},{:},{:}\n'.format( getTime(), self._batch_index, self._batch_size, msg_size) if sys.version_info >= (3,): self._index_file.write(index_file_entry.encode('utf-8')) else: self._index_file.write(index_file_entry) # Dump the message itself to the current batch file self._batch_file.write(msg) self._batch_size += msg_size # Increment message count self._message_count += 1
[ "def", "dump", "(", "self", ",", "msg", ")", ":", "msg_size", "=", "len", "(", "msg", ")", "# We start a new batch if the resulting batch file is larger than the", "# max batch file size. However, if the current batch file size is zero", "# then that means the message alone is larger than the max batch file", "# size. In this case instead of splitting up the message across files", "# which would greatly increase complexity we simply dump that message", "# into a file of its own even though it will be larger than the max", "# batch file size.", "if", "self", ".", "_batch_size", "+", "msg_size", ">", "self", ".", "_max_batch_file_size", "and", "self", ".", "_batch_size", ">", "0", ":", "self", ".", "_startNewBatch", "(", ")", "# Write the time stamp and information on how to retrieve the message", "# from the batch files (batch filename, byte offset, and byte size)", "global", "getTime", "index_file_entry", "=", "'{:},{:09d},{:},{:}\\n'", ".", "format", "(", "getTime", "(", ")", ",", "self", ".", "_batch_index", ",", "self", ".", "_batch_size", ",", "msg_size", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", ")", ":", "self", ".", "_index_file", ".", "write", "(", "index_file_entry", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "self", ".", "_index_file", ".", "write", "(", "index_file_entry", ")", "# Dump the message itself to the current batch file", "self", ".", "_batch_file", ".", "write", "(", "msg", ")", "self", ".", "_batch_size", "+=", "msg_size", "# Increment message count", "self", ".", "_message_count", "+=", "1" ]
Dumps the provided message to this dump.
[ "Dumps", "the", "provided", "message", "to", "this", "dump", "." ]
3b9c272ebc6d858c790e43d2fc507885d8029d36
https://github.com/vietjtnguyen/nut/blob/3b9c272ebc6d858c790e43d2fc507885d8029d36/nut/relay.py#L1023-L1055
247,438
20c/xbahn
xbahn/shortcuts.py
api_server
def api_server(connection, server_class): """ Establishes an API Server on the supplied connection Arguments: - connection (xbahn.connection.Connection) - server_class (xbahn.api.Server) Returns: - server_class: server instance """ # run api server on connection return server_class( link=xbahn.connection.link.Link( # use the connection to receive messages receive=connection, # use the connection to respond to received messages respond=connection ) )
python
def api_server(connection, server_class): """ Establishes an API Server on the supplied connection Arguments: - connection (xbahn.connection.Connection) - server_class (xbahn.api.Server) Returns: - server_class: server instance """ # run api server on connection return server_class( link=xbahn.connection.link.Link( # use the connection to receive messages receive=connection, # use the connection to respond to received messages respond=connection ) )
[ "def", "api_server", "(", "connection", ",", "server_class", ")", ":", "# run api server on connection", "return", "server_class", "(", "link", "=", "xbahn", ".", "connection", ".", "link", ".", "Link", "(", "# use the connection to receive messages", "receive", "=", "connection", ",", "# use the connection to respond to received messages", "respond", "=", "connection", ")", ")" ]
Establishes an API Server on the supplied connection Arguments: - connection (xbahn.connection.Connection) - server_class (xbahn.api.Server) Returns: - server_class: server instance
[ "Establishes", "an", "API", "Server", "on", "the", "supplied", "connection" ]
afb27b0576841338a366d7cac0200a782bd84be6
https://github.com/20c/xbahn/blob/afb27b0576841338a366d7cac0200a782bd84be6/xbahn/shortcuts.py#L79-L99
247,439
opinkerfi/nago
nago/extensions/nodes.py
list_nodes
def list_nodes(search="unsigned"): """ List all connected nodes """ nodes = nago.core.get_nodes() if search == "all": return map(lambda x: {x[0]: x[1].data}, nodes.items()) elif search == 'unsigned': result = {} for token, node in nodes.items(): if node.get('access') is None: result[token] = node.data return result else: result = {} for token, node in nodes.items(): host_name = node.get('host_name') if search in (token, host_name): result[token] = node.data return result
python
def list_nodes(search="unsigned"): """ List all connected nodes """ nodes = nago.core.get_nodes() if search == "all": return map(lambda x: {x[0]: x[1].data}, nodes.items()) elif search == 'unsigned': result = {} for token, node in nodes.items(): if node.get('access') is None: result[token] = node.data return result else: result = {} for token, node in nodes.items(): host_name = node.get('host_name') if search in (token, host_name): result[token] = node.data return result
[ "def", "list_nodes", "(", "search", "=", "\"unsigned\"", ")", ":", "nodes", "=", "nago", ".", "core", ".", "get_nodes", "(", ")", "if", "search", "==", "\"all\"", ":", "return", "map", "(", "lambda", "x", ":", "{", "x", "[", "0", "]", ":", "x", "[", "1", "]", ".", "data", "}", ",", "nodes", ".", "items", "(", ")", ")", "elif", "search", "==", "'unsigned'", ":", "result", "=", "{", "}", "for", "token", ",", "node", "in", "nodes", ".", "items", "(", ")", ":", "if", "node", ".", "get", "(", "'access'", ")", "is", "None", ":", "result", "[", "token", "]", "=", "node", ".", "data", "return", "result", "else", ":", "result", "=", "{", "}", "for", "token", ",", "node", "in", "nodes", ".", "items", "(", ")", ":", "host_name", "=", "node", ".", "get", "(", "'host_name'", ")", "if", "search", "in", "(", "token", ",", "host_name", ")", ":", "result", "[", "token", "]", "=", "node", ".", "data", "return", "result" ]
List all connected nodes
[ "List", "all", "connected", "nodes" ]
85e1bdd1de0122f56868a483e7599e1b36a439b0
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/nodes.py#L11-L29
247,440
opinkerfi/nago
nago/extensions/nodes.py
add
def add(node_name, **kwargs): """ Create a new node and generate a token for it """ result = {} kwargs = kwargs.copy() overwrite = kwargs.pop('overwrite', False) node = nago.core.get_node(node_name) if not node: node = nago.core.Node() elif not overwrite: result['status'] = 'error' result['message'] = "node %s already exists. add argument overwrite=1 to overwrite it." % (node_name) return result else: node.delete() node = nago.core.Node() node['host_name'] = node_name for k, v in kwargs.items(): node[k] = v node.save() result['message'] = "node successfully saved" result['node_data'] = node.data return result
python
def add(node_name, **kwargs): """ Create a new node and generate a token for it """ result = {} kwargs = kwargs.copy() overwrite = kwargs.pop('overwrite', False) node = nago.core.get_node(node_name) if not node: node = nago.core.Node() elif not overwrite: result['status'] = 'error' result['message'] = "node %s already exists. add argument overwrite=1 to overwrite it." % (node_name) return result else: node.delete() node = nago.core.Node() node['host_name'] = node_name for k, v in kwargs.items(): node[k] = v node.save() result['message'] = "node successfully saved" result['node_data'] = node.data return result
[ "def", "add", "(", "node_name", ",", "*", "*", "kwargs", ")", ":", "result", "=", "{", "}", "kwargs", "=", "kwargs", ".", "copy", "(", ")", "overwrite", "=", "kwargs", ".", "pop", "(", "'overwrite'", ",", "False", ")", "node", "=", "nago", ".", "core", ".", "get_node", "(", "node_name", ")", "if", "not", "node", ":", "node", "=", "nago", ".", "core", ".", "Node", "(", ")", "elif", "not", "overwrite", ":", "result", "[", "'status'", "]", "=", "'error'", "result", "[", "'message'", "]", "=", "\"node %s already exists. add argument overwrite=1 to overwrite it.\"", "%", "(", "node_name", ")", "return", "result", "else", ":", "node", ".", "delete", "(", ")", "node", "=", "nago", ".", "core", ".", "Node", "(", ")", "node", "[", "'host_name'", "]", "=", "node_name", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "node", "[", "k", "]", "=", "v", "node", ".", "save", "(", ")", "result", "[", "'message'", "]", "=", "\"node successfully saved\"", "result", "[", "'node_data'", "]", "=", "node", ".", "data", "return", "result" ]
Create a new node and generate a token for it
[ "Create", "a", "new", "node", "and", "generate", "a", "token", "for", "it" ]
85e1bdd1de0122f56868a483e7599e1b36a439b0
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/nodes.py#L32-L53
247,441
opinkerfi/nago
nago/extensions/nodes.py
delete
def delete(node_name): """ Delete a specific node """ result = {} node = nago.core.get_node(node_name) if not node: result['status'] = 'error' result['message'] = "node not found." else: node.delete() result['status'] = 'success' result['message'] = 'node deleted.' return result
python
def delete(node_name): """ Delete a specific node """ result = {} node = nago.core.get_node(node_name) if not node: result['status'] = 'error' result['message'] = "node not found." else: node.delete() result['status'] = 'success' result['message'] = 'node deleted.' return result
[ "def", "delete", "(", "node_name", ")", ":", "result", "=", "{", "}", "node", "=", "nago", ".", "core", ".", "get_node", "(", "node_name", ")", "if", "not", "node", ":", "result", "[", "'status'", "]", "=", "'error'", "result", "[", "'message'", "]", "=", "\"node not found.\"", "else", ":", "node", ".", "delete", "(", ")", "result", "[", "'status'", "]", "=", "'success'", "result", "[", "'message'", "]", "=", "'node deleted.'", "return", "result" ]
Delete a specific node
[ "Delete", "a", "specific", "node" ]
85e1bdd1de0122f56868a483e7599e1b36a439b0
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/nodes.py#L56-L67
247,442
opinkerfi/nago
nago/extensions/nodes.py
sign
def sign(node=None): """ Sign a specific node to grant it access you can specify "all" to sign all nodes returns the nodes that were signed """ if not node: raise Exception("Specify either 'all' your specify token/host_name of node to sign. ") if node == 'all': node = 'unsigned' nodes = list_nodes(search=node) result = {} for token, i in nodes.items(): i['access'] = 'node' i.save() result[token] = i return result
python
def sign(node=None): """ Sign a specific node to grant it access you can specify "all" to sign all nodes returns the nodes that were signed """ if not node: raise Exception("Specify either 'all' your specify token/host_name of node to sign. ") if node == 'all': node = 'unsigned' nodes = list_nodes(search=node) result = {} for token, i in nodes.items(): i['access'] = 'node' i.save() result[token] = i return result
[ "def", "sign", "(", "node", "=", "None", ")", ":", "if", "not", "node", ":", "raise", "Exception", "(", "\"Specify either 'all' your specify token/host_name of node to sign. \"", ")", "if", "node", "==", "'all'", ":", "node", "=", "'unsigned'", "nodes", "=", "list_nodes", "(", "search", "=", "node", ")", "result", "=", "{", "}", "for", "token", ",", "i", "in", "nodes", ".", "items", "(", ")", ":", "i", "[", "'access'", "]", "=", "'node'", "i", ".", "save", "(", ")", "result", "[", "token", "]", "=", "i", "return", "result" ]
Sign a specific node to grant it access you can specify "all" to sign all nodes returns the nodes that were signed
[ "Sign", "a", "specific", "node", "to", "grant", "it", "access" ]
85e1bdd1de0122f56868a483e7599e1b36a439b0
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/nodes.py#L70-L87
247,443
opinkerfi/nago
nago/extensions/nodes.py
set_attribute
def set_attribute(token_or_hostname, **kwargs): """ Change the attributes of a connected node """ node = nago.core.get_node(token_or_hostname) or {} if not kwargs: return "No changes made" for k, v in kwargs.items(): node[k] = v node.save() return "Saved %s changes" % len(kwargs)
python
def set_attribute(token_or_hostname, **kwargs): """ Change the attributes of a connected node """ node = nago.core.get_node(token_or_hostname) or {} if not kwargs: return "No changes made" for k, v in kwargs.items(): node[k] = v node.save() return "Saved %s changes" % len(kwargs)
[ "def", "set_attribute", "(", "token_or_hostname", ",", "*", "*", "kwargs", ")", ":", "node", "=", "nago", ".", "core", ".", "get_node", "(", "token_or_hostname", ")", "or", "{", "}", "if", "not", "kwargs", ":", "return", "\"No changes made\"", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "node", "[", "k", "]", "=", "v", "node", ".", "save", "(", ")", "return", "\"Saved %s changes\"", "%", "len", "(", "kwargs", ")" ]
Change the attributes of a connected node
[ "Change", "the", "attributes", "of", "a", "connected", "node" ]
85e1bdd1de0122f56868a483e7599e1b36a439b0
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/nodes.py#L90-L98
247,444
opinkerfi/nago
nago/extensions/nodes.py
ping
def ping(token_or_hostname=None): """ Send an echo request to a nago host. Arguments: token_or_host_name -- The remote node to ping If node is not provided, simply return pong You can use the special nodenames "server" or "master" """ if not token_or_hostname: return "Pong!" node = nago.core.get_node(token_or_hostname) if not node and token_or_hostname in ('master', 'server'): token_or_hostname = nago.settings.get_option('server') node = nago.core.get_node(token_or_hostname) if not node: try: address = socket.gethostbyname(token_or_hostname) node = nago.core.Node() node['host_name'] = token_or_hostname node['address'] = address node['access'] = 'node' if token_or_hostname == nago.settings.get_option('server'): node['access'] = 'master' node.save() except Exception: raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % token_or_hostname) return node.send_command('nodes', 'ping')
python
def ping(token_or_hostname=None): """ Send an echo request to a nago host. Arguments: token_or_host_name -- The remote node to ping If node is not provided, simply return pong You can use the special nodenames "server" or "master" """ if not token_or_hostname: return "Pong!" node = nago.core.get_node(token_or_hostname) if not node and token_or_hostname in ('master', 'server'): token_or_hostname = nago.settings.get_option('server') node = nago.core.get_node(token_or_hostname) if not node: try: address = socket.gethostbyname(token_or_hostname) node = nago.core.Node() node['host_name'] = token_or_hostname node['address'] = address node['access'] = 'node' if token_or_hostname == nago.settings.get_option('server'): node['access'] = 'master' node.save() except Exception: raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % token_or_hostname) return node.send_command('nodes', 'ping')
[ "def", "ping", "(", "token_or_hostname", "=", "None", ")", ":", "if", "not", "token_or_hostname", ":", "return", "\"Pong!\"", "node", "=", "nago", ".", "core", ".", "get_node", "(", "token_or_hostname", ")", "if", "not", "node", "and", "token_or_hostname", "in", "(", "'master'", ",", "'server'", ")", ":", "token_or_hostname", "=", "nago", ".", "settings", ".", "get_option", "(", "'server'", ")", "node", "=", "nago", ".", "core", ".", "get_node", "(", "token_or_hostname", ")", "if", "not", "node", ":", "try", ":", "address", "=", "socket", ".", "gethostbyname", "(", "token_or_hostname", ")", "node", "=", "nago", ".", "core", ".", "Node", "(", ")", "node", "[", "'host_name'", "]", "=", "token_or_hostname", "node", "[", "'address'", "]", "=", "address", "node", "[", "'access'", "]", "=", "'node'", "if", "token_or_hostname", "==", "nago", ".", "settings", ".", "get_option", "(", "'server'", ")", ":", "node", "[", "'access'", "]", "=", "'master'", "node", ".", "save", "(", ")", "except", "Exception", ":", "raise", "Exception", "(", "\"'%s' was not found in list of known hosts, and does not resolve to a valid address\"", "%", "token_or_hostname", ")", "return", "node", ".", "send_command", "(", "'nodes'", ",", "'ping'", ")" ]
Send an echo request to a nago host. Arguments: token_or_host_name -- The remote node to ping If node is not provided, simply return pong You can use the special nodenames "server" or "master"
[ "Send", "an", "echo", "request", "to", "a", "nago", "host", "." ]
85e1bdd1de0122f56868a483e7599e1b36a439b0
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/nodes.py#L101-L127
247,445
opinkerfi/nago
nago/extensions/nodes.py
connect
def connect(remote_host): """ Connect to remote host and show our status """ if remote_host in ('master', 'server'): remote_host = nago.settings.get_option('server') node = nago.core.get_node(remote_host) if not node: try: address = socket.gethostbyname(remote_host) node = nago.core.Node() node['host_name'] = remote_host node['address'] = address node['access'] = 'node' if token_or_hostname == nago.settings.get_option('server'): node['access'] = 'master' node.save() except Exception: raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % remote_host) ping_result = node.send_command('nodes', 'ping') if 'Pong' in ping_result.get('result', ''): return "Connection with %s ok" % remote_host else: return ping_result.get('result', ping_result)
python
def connect(remote_host): """ Connect to remote host and show our status """ if remote_host in ('master', 'server'): remote_host = nago.settings.get_option('server') node = nago.core.get_node(remote_host) if not node: try: address = socket.gethostbyname(remote_host) node = nago.core.Node() node['host_name'] = remote_host node['address'] = address node['access'] = 'node' if token_or_hostname == nago.settings.get_option('server'): node['access'] = 'master' node.save() except Exception: raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % remote_host) ping_result = node.send_command('nodes', 'ping') if 'Pong' in ping_result.get('result', ''): return "Connection with %s ok" % remote_host else: return ping_result.get('result', ping_result)
[ "def", "connect", "(", "remote_host", ")", ":", "if", "remote_host", "in", "(", "'master'", ",", "'server'", ")", ":", "remote_host", "=", "nago", ".", "settings", ".", "get_option", "(", "'server'", ")", "node", "=", "nago", ".", "core", ".", "get_node", "(", "remote_host", ")", "if", "not", "node", ":", "try", ":", "address", "=", "socket", ".", "gethostbyname", "(", "remote_host", ")", "node", "=", "nago", ".", "core", ".", "Node", "(", ")", "node", "[", "'host_name'", "]", "=", "remote_host", "node", "[", "'address'", "]", "=", "address", "node", "[", "'access'", "]", "=", "'node'", "if", "token_or_hostname", "==", "nago", ".", "settings", ".", "get_option", "(", "'server'", ")", ":", "node", "[", "'access'", "]", "=", "'master'", "node", ".", "save", "(", ")", "except", "Exception", ":", "raise", "Exception", "(", "\"'%s' was not found in list of known hosts, and does not resolve to a valid address\"", "%", "remote_host", ")", "ping_result", "=", "node", ".", "send_command", "(", "'nodes'", ",", "'ping'", ")", "if", "'Pong'", "in", "ping_result", ".", "get", "(", "'result'", ",", "''", ")", ":", "return", "\"Connection with %s ok\"", "%", "remote_host", "else", ":", "return", "ping_result", ".", "get", "(", "'result'", ",", "ping_result", ")" ]
Connect to remote host and show our status
[ "Connect", "to", "remote", "host", "and", "show", "our", "status" ]
85e1bdd1de0122f56868a483e7599e1b36a439b0
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/nodes.py#L130-L151
247,446
djtaylor/python-lsbinit
lsbinit/__init__.py
set_environ
def set_environ(inherit=True, append={}): """ Helper method for passing environment variables to the subprocess. """ _environ = {} if not inherit else environ for k,v in append.iteritems(): _environ[k] = v return _environ
python
def set_environ(inherit=True, append={}): """ Helper method for passing environment variables to the subprocess. """ _environ = {} if not inherit else environ for k,v in append.iteritems(): _environ[k] = v return _environ
[ "def", "set_environ", "(", "inherit", "=", "True", ",", "append", "=", "{", "}", ")", ":", "_environ", "=", "{", "}", "if", "not", "inherit", "else", "environ", "for", "k", ",", "v", "in", "append", ".", "iteritems", "(", ")", ":", "_environ", "[", "k", "]", "=", "v", "return", "_environ" ]
Helper method for passing environment variables to the subprocess.
[ "Helper", "method", "for", "passing", "environment", "variables", "to", "the", "subprocess", "." ]
a41fc551226f61ac2bf1b8b0f3f5395db85e75a2
https://github.com/djtaylor/python-lsbinit/blob/a41fc551226f61ac2bf1b8b0f3f5395db85e75a2/lsbinit/__init__.py#L21-L28
247,447
djtaylor/python-lsbinit
lsbinit/__init__.py
LSBInit._colorize
def _colorize(self, msg, color=None, encode=False): """ Colorize a string. """ # Valid colors colors = { 'red': '31', 'green': '32', 'yellow': '33' } # No color specified or unsupported color if not color or not color in colors: return msg # The colorized string if encode: return u'\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg) return '\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg)
python
def _colorize(self, msg, color=None, encode=False): """ Colorize a string. """ # Valid colors colors = { 'red': '31', 'green': '32', 'yellow': '33' } # No color specified or unsupported color if not color or not color in colors: return msg # The colorized string if encode: return u'\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg) return '\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg)
[ "def", "_colorize", "(", "self", ",", "msg", ",", "color", "=", "None", ",", "encode", "=", "False", ")", ":", "# Valid colors", "colors", "=", "{", "'red'", ":", "'31'", ",", "'green'", ":", "'32'", ",", "'yellow'", ":", "'33'", "}", "# No color specified or unsupported color", "if", "not", "color", "or", "not", "color", "in", "colors", ":", "return", "msg", "# The colorized string", "if", "encode", ":", "return", "u'\\x1b[1;{}m{}\\x1b[0m'", ".", "format", "(", "colors", "[", "color", "]", ",", "msg", ")", "return", "'\\x1b[1;{}m{}\\x1b[0m'", ".", "format", "(", "colors", "[", "color", "]", ",", "msg", ")" ]
Colorize a string.
[ "Colorize", "a", "string", "." ]
a41fc551226f61ac2bf1b8b0f3f5395db85e75a2
https://github.com/djtaylor/python-lsbinit/blob/a41fc551226f61ac2bf1b8b0f3f5395db85e75a2/lsbinit/__init__.py#L51-L70
247,448
djtaylor/python-lsbinit
lsbinit/__init__.py
LSBInit.is_running
def is_running(self): """ Check if the service is running. """ try: kill(int(self.pid.get()), 0) return True # Process not running, remove PID/lock file if it exists except: self.pid.remove() self.lock.remove() return False
python
def is_running(self): """ Check if the service is running. """ try: kill(int(self.pid.get()), 0) return True # Process not running, remove PID/lock file if it exists except: self.pid.remove() self.lock.remove() return False
[ "def", "is_running", "(", "self", ")", ":", "try", ":", "kill", "(", "int", "(", "self", ".", "pid", ".", "get", "(", ")", ")", ",", "0", ")", "return", "True", "# Process not running, remove PID/lock file if it exists", "except", ":", "self", ".", "pid", ".", "remove", "(", ")", "self", ".", "lock", ".", "remove", "(", ")", "return", "False" ]
Check if the service is running.
[ "Check", "if", "the", "service", "is", "running", "." ]
a41fc551226f61ac2bf1b8b0f3f5395db85e75a2
https://github.com/djtaylor/python-lsbinit/blob/a41fc551226f61ac2bf1b8b0f3f5395db85e75a2/lsbinit/__init__.py#L72-L84
247,449
djtaylor/python-lsbinit
lsbinit/__init__.py
LSBInit.set_output
def set_output(self): """ Set the output for the service command. """ if not self.output: return open(devnull, 'w') # Get the output file path output_dir = dirname(self.output) # Make the path exists if not isdir(output_dir): try: makedirs(output_dir) except Exception as e: self.die('Failed to create output directory "{}": {}'.format(output_dir, str(e))) return open(self.output, 'a')
python
def set_output(self): """ Set the output for the service command. """ if not self.output: return open(devnull, 'w') # Get the output file path output_dir = dirname(self.output) # Make the path exists if not isdir(output_dir): try: makedirs(output_dir) except Exception as e: self.die('Failed to create output directory "{}": {}'.format(output_dir, str(e))) return open(self.output, 'a')
[ "def", "set_output", "(", "self", ")", ":", "if", "not", "self", ".", "output", ":", "return", "open", "(", "devnull", ",", "'w'", ")", "# Get the output file path", "output_dir", "=", "dirname", "(", "self", ".", "output", ")", "# Make the path exists", "if", "not", "isdir", "(", "output_dir", ")", ":", "try", ":", "makedirs", "(", "output_dir", ")", "except", "Exception", "as", "e", ":", "self", ".", "die", "(", "'Failed to create output directory \"{}\": {}'", ".", "format", "(", "output_dir", ",", "str", "(", "e", ")", ")", ")", "return", "open", "(", "self", ".", "output", ",", "'a'", ")" ]
Set the output for the service command.
[ "Set", "the", "output", "for", "the", "service", "command", "." ]
a41fc551226f61ac2bf1b8b0f3f5395db85e75a2
https://github.com/djtaylor/python-lsbinit/blob/a41fc551226f61ac2bf1b8b0f3f5395db85e75a2/lsbinit/__init__.py#L86-L102
247,450
djtaylor/python-lsbinit
lsbinit/__init__.py
LSBInit.do_status
def do_status(self): """ Get the status of the service. """ # Get the PID of the service pid = self.pid.get() # Status color / attributes status_color = 'green' if pid else 'red' status_dot = self._colorize(UNICODE['dot'], status_color, encode=True) # Active text active_txt = { 'active': '{} since {}'.format(self._colorize('active (running)', 'green'), self.pid.birthday()[1]), 'inactive': 'inactive (dead)' } # Print the status message print(status_dot, end=' ') print('{}.service - LSB: {}'.format(self.name, self.desc)) print(' Loaded: loaded (/etc/init.d/{})'.format(self.name)) print(' Active: {}'.format(active_txt['active' if pid else 'inactive'])) # Extra information if running if pid: ps = self.pid.ps() print(' Process: {}; [{}]'.format(pid, ps[0])) if ps[1]: for c in ps[1]: print(' Child: {}'.format(c)) print('')
python
def do_status(self): """ Get the status of the service. """ # Get the PID of the service pid = self.pid.get() # Status color / attributes status_color = 'green' if pid else 'red' status_dot = self._colorize(UNICODE['dot'], status_color, encode=True) # Active text active_txt = { 'active': '{} since {}'.format(self._colorize('active (running)', 'green'), self.pid.birthday()[1]), 'inactive': 'inactive (dead)' } # Print the status message print(status_dot, end=' ') print('{}.service - LSB: {}'.format(self.name, self.desc)) print(' Loaded: loaded (/etc/init.d/{})'.format(self.name)) print(' Active: {}'.format(active_txt['active' if pid else 'inactive'])) # Extra information if running if pid: ps = self.pid.ps() print(' Process: {}; [{}]'.format(pid, ps[0])) if ps[1]: for c in ps[1]: print(' Child: {}'.format(c)) print('')
[ "def", "do_status", "(", "self", ")", ":", "# Get the PID of the service", "pid", "=", "self", ".", "pid", ".", "get", "(", ")", "# Status color / attributes", "status_color", "=", "'green'", "if", "pid", "else", "'red'", "status_dot", "=", "self", ".", "_colorize", "(", "UNICODE", "[", "'dot'", "]", ",", "status_color", ",", "encode", "=", "True", ")", "# Active text", "active_txt", "=", "{", "'active'", ":", "'{} since {}'", ".", "format", "(", "self", ".", "_colorize", "(", "'active (running)'", ",", "'green'", ")", ",", "self", ".", "pid", ".", "birthday", "(", ")", "[", "1", "]", ")", ",", "'inactive'", ":", "'inactive (dead)'", "}", "# Print the status message", "print", "(", "status_dot", ",", "end", "=", "' '", ")", "print", "(", "'{}.service - LSB: {}'", ".", "format", "(", "self", ".", "name", ",", "self", ".", "desc", ")", ")", "print", "(", "' Loaded: loaded (/etc/init.d/{})'", ".", "format", "(", "self", ".", "name", ")", ")", "print", "(", "' Active: {}'", ".", "format", "(", "active_txt", "[", "'active'", "if", "pid", "else", "'inactive'", "]", ")", ")", "# Extra information if running", "if", "pid", ":", "ps", "=", "self", ".", "pid", ".", "ps", "(", ")", "print", "(", "' Process: {}; [{}]'", ".", "format", "(", "pid", ",", "ps", "[", "0", "]", ")", ")", "if", "ps", "[", "1", "]", ":", "for", "c", "in", "ps", "[", "1", "]", ":", "print", "(", "' Child: {}'", ".", "format", "(", "c", ")", ")", "print", "(", "''", ")" ]
Get the status of the service.
[ "Get", "the", "status", "of", "the", "service", "." ]
a41fc551226f61ac2bf1b8b0f3f5395db85e75a2
https://github.com/djtaylor/python-lsbinit/blob/a41fc551226f61ac2bf1b8b0f3f5395db85e75a2/lsbinit/__init__.py#L151-L182
247,451
djtaylor/python-lsbinit
lsbinit/__init__.py
LSBInit.interface
def interface(self): """ Public method for handling service command argument. """ # Possible control arguments controls = { 'start': self.do_start, 'stop': self.do_stop, 'status': self.do_status, 'restart': self.do_restart, 'reload': self.do_restart } # Process the control argument try: controls[self.command]() except KeyError: self.write_stdout('Usage: {} {{start|stop|status|restart|reload}}'.format(self.name), 3) exit(0)
python
def interface(self): """ Public method for handling service command argument. """ # Possible control arguments controls = { 'start': self.do_start, 'stop': self.do_stop, 'status': self.do_status, 'restart': self.do_restart, 'reload': self.do_restart } # Process the control argument try: controls[self.command]() except KeyError: self.write_stdout('Usage: {} {{start|stop|status|restart|reload}}'.format(self.name), 3) exit(0)
[ "def", "interface", "(", "self", ")", ":", "# Possible control arguments", "controls", "=", "{", "'start'", ":", "self", ".", "do_start", ",", "'stop'", ":", "self", ".", "do_stop", ",", "'status'", ":", "self", ".", "do_status", ",", "'restart'", ":", "self", ".", "do_restart", ",", "'reload'", ":", "self", ".", "do_restart", "}", "# Process the control argument", "try", ":", "controls", "[", "self", ".", "command", "]", "(", ")", "except", "KeyError", ":", "self", ".", "write_stdout", "(", "'Usage: {} {{start|stop|status|restart|reload}}'", ".", "format", "(", "self", ".", "name", ")", ",", "3", ")", "exit", "(", "0", ")" ]
Public method for handling service command argument.
[ "Public", "method", "for", "handling", "service", "command", "argument", "." ]
a41fc551226f61ac2bf1b8b0f3f5395db85e75a2
https://github.com/djtaylor/python-lsbinit/blob/a41fc551226f61ac2bf1b8b0f3f5395db85e75a2/lsbinit/__init__.py#L195-L214
247,452
treycucco/bidon
bidon/spreadsheet/csv.py
CSVWorksheet.parse_cell
def parse_cell(self, cell, coords, cell_mode=CellMode.cooked): """Tries to convert the value first to an int, then a float and if neither is successful it returns the string value. """ try: return int(cell) except ValueError: pass try: return float(cell) except ValueError: pass # TODO Check for dates? return cell
python
def parse_cell(self, cell, coords, cell_mode=CellMode.cooked): """Tries to convert the value first to an int, then a float and if neither is successful it returns the string value. """ try: return int(cell) except ValueError: pass try: return float(cell) except ValueError: pass # TODO Check for dates? return cell
[ "def", "parse_cell", "(", "self", ",", "cell", ",", "coords", ",", "cell_mode", "=", "CellMode", ".", "cooked", ")", ":", "try", ":", "return", "int", "(", "cell", ")", "except", "ValueError", ":", "pass", "try", ":", "return", "float", "(", "cell", ")", "except", "ValueError", ":", "pass", "# TODO Check for dates?", "return", "cell" ]
Tries to convert the value first to an int, then a float and if neither is successful it returns the string value.
[ "Tries", "to", "convert", "the", "value", "first", "to", "an", "int", "then", "a", "float", "and", "if", "neither", "is", "successful", "it", "returns", "the", "string", "value", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/spreadsheet/csv.py#L20-L33
247,453
treycucco/bidon
bidon/spreadsheet/csv.py
CSVWorkbook.iterate_sheets
def iterate_sheets(self, *args, **kwargs): """Opens self.filename and reads it with a csv reader. If self.filename ends with .gz, the file will be decompressed with gzip before being passed to csv.reader. If the filename is not a string, it is assumed to be a file-like object which will be passed directly to csv.reader. """ if isinstance(self.filename, str): if self.filename.endswith(".gz") or self.is_gzipped: with gzip.open(self.filename, "rt") as rfile: reader = csv.reader(rfile, *args, **kwargs) yield list(reader) else: with open(self.filename, "r") as rfile: reader = csv.reader(rfile, *args, **kwargs) yield list(reader) else: reader = csv.reader(self.filename, *args, **kwargs) yield list(reader)
python
def iterate_sheets(self, *args, **kwargs): """Opens self.filename and reads it with a csv reader. If self.filename ends with .gz, the file will be decompressed with gzip before being passed to csv.reader. If the filename is not a string, it is assumed to be a file-like object which will be passed directly to csv.reader. """ if isinstance(self.filename, str): if self.filename.endswith(".gz") or self.is_gzipped: with gzip.open(self.filename, "rt") as rfile: reader = csv.reader(rfile, *args, **kwargs) yield list(reader) else: with open(self.filename, "r") as rfile: reader = csv.reader(rfile, *args, **kwargs) yield list(reader) else: reader = csv.reader(self.filename, *args, **kwargs) yield list(reader)
[ "def", "iterate_sheets", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "self", ".", "filename", ",", "str", ")", ":", "if", "self", ".", "filename", ".", "endswith", "(", "\".gz\"", ")", "or", "self", ".", "is_gzipped", ":", "with", "gzip", ".", "open", "(", "self", ".", "filename", ",", "\"rt\"", ")", "as", "rfile", ":", "reader", "=", "csv", ".", "reader", "(", "rfile", ",", "*", "args", ",", "*", "*", "kwargs", ")", "yield", "list", "(", "reader", ")", "else", ":", "with", "open", "(", "self", ".", "filename", ",", "\"r\"", ")", "as", "rfile", ":", "reader", "=", "csv", ".", "reader", "(", "rfile", ",", "*", "args", ",", "*", "*", "kwargs", ")", "yield", "list", "(", "reader", ")", "else", ":", "reader", "=", "csv", ".", "reader", "(", "self", ".", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", "yield", "list", "(", "reader", ")" ]
Opens self.filename and reads it with a csv reader. If self.filename ends with .gz, the file will be decompressed with gzip before being passed to csv.reader. If the filename is not a string, it is assumed to be a file-like object which will be passed directly to csv.reader.
[ "Opens", "self", ".", "filename", "and", "reads", "it", "with", "a", "csv", "reader", "." ]
d9f24596841d0e69e8ac70a1d1a1deecea95e340
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/spreadsheet/csv.py#L47-L65
247,454
langloisjp/pysvcmetrics
metrics.py
time_methods
def time_methods(obj, methods, prefix=None): """ Patch obj so calls to given methods are timed >>> class C(object): ... def m1(self): ... return 'ok' ... ... def m2(self, arg): ... return arg ... >>> c = C() >>> time_methods(c, ['m1', 'm2']) >>> c.m1() 'ok' >>> c.m2('ok') 'ok' >>> c = C() >>> time_methods(c, ['m1'], 'mymetrics') """ if prefix: prefix = prefix + '.' else: prefix = '' for method in methods: current_method = getattr(obj, method) new_method = timed(prefix)(current_method) setattr(obj, method, new_method)
python
def time_methods(obj, methods, prefix=None): """ Patch obj so calls to given methods are timed >>> class C(object): ... def m1(self): ... return 'ok' ... ... def m2(self, arg): ... return arg ... >>> c = C() >>> time_methods(c, ['m1', 'm2']) >>> c.m1() 'ok' >>> c.m2('ok') 'ok' >>> c = C() >>> time_methods(c, ['m1'], 'mymetrics') """ if prefix: prefix = prefix + '.' else: prefix = '' for method in methods: current_method = getattr(obj, method) new_method = timed(prefix)(current_method) setattr(obj, method, new_method)
[ "def", "time_methods", "(", "obj", ",", "methods", ",", "prefix", "=", "None", ")", ":", "if", "prefix", ":", "prefix", "=", "prefix", "+", "'.'", "else", ":", "prefix", "=", "''", "for", "method", "in", "methods", ":", "current_method", "=", "getattr", "(", "obj", ",", "method", ")", "new_method", "=", "timed", "(", "prefix", ")", "(", "current_method", ")", "setattr", "(", "obj", ",", "method", ",", "new_method", ")" ]
Patch obj so calls to given methods are timed >>> class C(object): ... def m1(self): ... return 'ok' ... ... def m2(self, arg): ... return arg ... >>> c = C() >>> time_methods(c, ['m1', 'm2']) >>> c.m1() 'ok' >>> c.m2('ok') 'ok' >>> c = C() >>> time_methods(c, ['m1'], 'mymetrics')
[ "Patch", "obj", "so", "calls", "to", "given", "methods", "are", "timed" ]
a126fc029ab645d9db46c0f5712c416cdf80e370
https://github.com/langloisjp/pysvcmetrics/blob/a126fc029ab645d9db46c0f5712c416cdf80e370/metrics.py#L123-L151
247,455
CodyKochmann/stricttuple
build/lib/stricttuple/__init__.py
typedtuple.validate_fields
def validate_fields(self, **kwargs): """ ensures that all incoming fields are the types that were specified """ for field in self.fields: value = kwargs[field] required_type = self.fields[field] if type(value) != required_type: raise TypeError('{}.{} needs to be a {}, recieved: {}({})'.format( self.name, field, required_type.__name__, type(value).__name__, value.__repr__()))
python
def validate_fields(self, **kwargs): """ ensures that all incoming fields are the types that were specified """ for field in self.fields: value = kwargs[field] required_type = self.fields[field] if type(value) != required_type: raise TypeError('{}.{} needs to be a {}, recieved: {}({})'.format( self.name, field, required_type.__name__, type(value).__name__, value.__repr__()))
[ "def", "validate_fields", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "field", "in", "self", ".", "fields", ":", "value", "=", "kwargs", "[", "field", "]", "required_type", "=", "self", ".", "fields", "[", "field", "]", "if", "type", "(", "value", ")", "!=", "required_type", ":", "raise", "TypeError", "(", "'{}.{} needs to be a {}, recieved: {}({})'", ".", "format", "(", "self", ".", "name", ",", "field", ",", "required_type", ".", "__name__", ",", "type", "(", "value", ")", ".", "__name__", ",", "value", ".", "__repr__", "(", ")", ")", ")" ]
ensures that all incoming fields are the types that were specified
[ "ensures", "that", "all", "incoming", "fields", "are", "the", "types", "that", "were", "specified" ]
072cbd6f7b90f3f666dc0f2c10ab6056d86dfc72
https://github.com/CodyKochmann/stricttuple/blob/072cbd6f7b90f3f666dc0f2c10ab6056d86dfc72/build/lib/stricttuple/__init__.py#L45-L56
247,456
DoWhileGeek/authentise-services
authentise_services/slicing_settings.py
SlicingSettings.upload
def upload(self, path, engine, description=None): """Create a new config resource in the slicing service and upload the path contents to it""" if description is None: head, tail = ntpath.split(path) description = tail or ntpath.basename(head) url = "http://quickslice.{}/config/raw/".format(self.config.host) with open(path) as config_file: content = config_file.read() payload = {"engine": engine, "description": description, "content": content} post_resp = requests.post(url, json=payload, cookies={"session": self.session}) if not post_resp.ok: raise errors.ResourceError("config upload to slicing service failed") self.description = description self.location = post_resp.headers["Location"]
python
def upload(self, path, engine, description=None): """Create a new config resource in the slicing service and upload the path contents to it""" if description is None: head, tail = ntpath.split(path) description = tail or ntpath.basename(head) url = "http://quickslice.{}/config/raw/".format(self.config.host) with open(path) as config_file: content = config_file.read() payload = {"engine": engine, "description": description, "content": content} post_resp = requests.post(url, json=payload, cookies={"session": self.session}) if not post_resp.ok: raise errors.ResourceError("config upload to slicing service failed") self.description = description self.location = post_resp.headers["Location"]
[ "def", "upload", "(", "self", ",", "path", ",", "engine", ",", "description", "=", "None", ")", ":", "if", "description", "is", "None", ":", "head", ",", "tail", "=", "ntpath", ".", "split", "(", "path", ")", "description", "=", "tail", "or", "ntpath", ".", "basename", "(", "head", ")", "url", "=", "\"http://quickslice.{}/config/raw/\"", ".", "format", "(", "self", ".", "config", ".", "host", ")", "with", "open", "(", "path", ")", "as", "config_file", ":", "content", "=", "config_file", ".", "read", "(", ")", "payload", "=", "{", "\"engine\"", ":", "engine", ",", "\"description\"", ":", "description", ",", "\"content\"", ":", "content", "}", "post_resp", "=", "requests", ".", "post", "(", "url", ",", "json", "=", "payload", ",", "cookies", "=", "{", "\"session\"", ":", "self", ".", "session", "}", ")", "if", "not", "post_resp", ".", "ok", ":", "raise", "errors", ".", "ResourceError", "(", "\"config upload to slicing service failed\"", ")", "self", ".", "description", "=", "description", "self", ".", "location", "=", "post_resp", ".", "headers", "[", "\"Location\"", "]" ]
Create a new config resource in the slicing service and upload the path contents to it
[ "Create", "a", "new", "config", "resource", "in", "the", "slicing", "service", "and", "upload", "the", "path", "contents", "to", "it" ]
ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d
https://github.com/DoWhileGeek/authentise-services/blob/ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d/authentise_services/slicing_settings.py#L31-L53
247,457
DoWhileGeek/authentise-services
authentise_services/slicing_settings.py
SlicingSettings.download
def download(self, path): """downloads a config resource to the path""" service_get_resp = requests.get(self.location, cookies={"session": self.session}) payload = service_get_resp.json() download_get_resp = requests.get(payload["content"]) with open(path, "wb") as config_file: config_file.write(download_get_resp.content)
python
def download(self, path): """downloads a config resource to the path""" service_get_resp = requests.get(self.location, cookies={"session": self.session}) payload = service_get_resp.json() download_get_resp = requests.get(payload["content"]) with open(path, "wb") as config_file: config_file.write(download_get_resp.content)
[ "def", "download", "(", "self", ",", "path", ")", ":", "service_get_resp", "=", "requests", ".", "get", "(", "self", ".", "location", ",", "cookies", "=", "{", "\"session\"", ":", "self", ".", "session", "}", ")", "payload", "=", "service_get_resp", ".", "json", "(", ")", "download_get_resp", "=", "requests", ".", "get", "(", "payload", "[", "\"content\"", "]", ")", "with", "open", "(", "path", ",", "\"wb\"", ")", "as", "config_file", ":", "config_file", ".", "write", "(", "download_get_resp", ".", "content", ")" ]
downloads a config resource to the path
[ "downloads", "a", "config", "resource", "to", "the", "path" ]
ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d
https://github.com/DoWhileGeek/authentise-services/blob/ee32bd7f7de15d3fb24c0a6374640d3a1ec8096d/authentise_services/slicing_settings.py#L55-L63
247,458
edeposit/edeposit.amqp.antivirus
src/edeposit/amqp/antivirus/antivirus.py
save_and_scan
def save_and_scan(filename, b64_data): """ Save `b64_data` to temporary file and scan it for viruses. Args: filename (str): Name of the file - used as basename for tmp file. b64_data (str): Content of the file encoded in base64. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. """ with NTFile(suffix="_"+os.path.basename(filename), mode="wb") as ifile: ifile.write( b64decode(b64_data) ) ifile.flush() os.chmod(ifile.name, 0755) return scan_file(ifile.name)
python
def save_and_scan(filename, b64_data): """ Save `b64_data` to temporary file and scan it for viruses. Args: filename (str): Name of the file - used as basename for tmp file. b64_data (str): Content of the file encoded in base64. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. """ with NTFile(suffix="_"+os.path.basename(filename), mode="wb") as ifile: ifile.write( b64decode(b64_data) ) ifile.flush() os.chmod(ifile.name, 0755) return scan_file(ifile.name)
[ "def", "save_and_scan", "(", "filename", ",", "b64_data", ")", ":", "with", "NTFile", "(", "suffix", "=", "\"_\"", "+", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "mode", "=", "\"wb\"", ")", "as", "ifile", ":", "ifile", ".", "write", "(", "b64decode", "(", "b64_data", ")", ")", "ifile", ".", "flush", "(", ")", "os", ".", "chmod", "(", "ifile", ".", "name", ",", "0755", ")", "return", "scan_file", "(", "ifile", ".", "name", ")" ]
Save `b64_data` to temporary file and scan it for viruses. Args: filename (str): Name of the file - used as basename for tmp file. b64_data (str): Content of the file encoded in base64. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict.
[ "Save", "b64_data", "to", "temporary", "file", "and", "scan", "it", "for", "viruses", "." ]
011b38bbe920819fab99a5891b1e70732321a598
https://github.com/edeposit/edeposit.amqp.antivirus/blob/011b38bbe920819fab99a5891b1e70732321a598/src/edeposit/amqp/antivirus/antivirus.py#L47-L66
247,459
fedora-infra/fmn.rules
fmn/rules/pagure.py
pagure_specific_project_filter
def pagure_specific_project_filter(config, message, project=None, *args, **kw): """ Particular pagure projects Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects. Specify multiple projects by separating them with a comma ','. """ if not pagure_catchall(config, message): return False project = kw.get('project', project) link = fedmsg.meta.msg2link(message, **config) if not link: return False project = project.split(',') if project else [] valid = False for proj in project: if '://pagure.io/%s/' % proj.strip() in link: valid = True return valid
python
def pagure_specific_project_filter(config, message, project=None, *args, **kw): """ Particular pagure projects Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects. Specify multiple projects by separating them with a comma ','. """ if not pagure_catchall(config, message): return False project = kw.get('project', project) link = fedmsg.meta.msg2link(message, **config) if not link: return False project = project.split(',') if project else [] valid = False for proj in project: if '://pagure.io/%s/' % proj.strip() in link: valid = True return valid
[ "def", "pagure_specific_project_filter", "(", "config", ",", "message", ",", "project", "=", "None", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "not", "pagure_catchall", "(", "config", ",", "message", ")", ":", "return", "False", "project", "=", "kw", ".", "get", "(", "'project'", ",", "project", ")", "link", "=", "fedmsg", ".", "meta", ".", "msg2link", "(", "message", ",", "*", "*", "config", ")", "if", "not", "link", ":", "return", "False", "project", "=", "project", ".", "split", "(", "','", ")", "if", "project", "else", "[", "]", "valid", "=", "False", "for", "proj", "in", "project", ":", "if", "'://pagure.io/%s/'", "%", "proj", ".", "strip", "(", ")", "in", "link", ":", "valid", "=", "True", "return", "valid" ]
Particular pagure projects Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects. Specify multiple projects by separating them with a comma ','.
[ "Particular", "pagure", "projects" ]
f9ec790619fcc8b41803077c4dec094e5127fc24
https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/pagure.py#L18-L41
247,460
fedora-infra/fmn.rules
fmn/rules/pagure.py
pagure_specific_project_tag_filter
def pagure_specific_project_tag_filter(config, message, tags=None, *args, **kw): """ Particular pagure project tags Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects having the specified tags. Specify multiple tags by separating them with a comma ','. """ if not pagure_catchall(config, message): return False tags = tags.split(',') if tags else [] tags = [tag.strip() for tag in tags if tag and tag.strip()] project_tags = set() project_tags.update(message.get('project', {}).get('tags', [])) project_tags.update( message.get('pullrequest', {}).get('project', {}).get('tags', [])) project_tags.update( message.get('commit', {}).get('repo', {}).get('tags', [])) valid = len(project_tags.intersection(set(tags))) > 0 return valid
python
def pagure_specific_project_tag_filter(config, message, tags=None, *args, **kw): """ Particular pagure project tags Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects having the specified tags. Specify multiple tags by separating them with a comma ','. """ if not pagure_catchall(config, message): return False tags = tags.split(',') if tags else [] tags = [tag.strip() for tag in tags if tag and tag.strip()] project_tags = set() project_tags.update(message.get('project', {}).get('tags', [])) project_tags.update( message.get('pullrequest', {}).get('project', {}).get('tags', [])) project_tags.update( message.get('commit', {}).get('repo', {}).get('tags', [])) valid = len(project_tags.intersection(set(tags))) > 0 return valid
[ "def", "pagure_specific_project_tag_filter", "(", "config", ",", "message", ",", "tags", "=", "None", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "not", "pagure_catchall", "(", "config", ",", "message", ")", ":", "return", "False", "tags", "=", "tags", ".", "split", "(", "','", ")", "if", "tags", "else", "[", "]", "tags", "=", "[", "tag", ".", "strip", "(", ")", "for", "tag", "in", "tags", "if", "tag", "and", "tag", ".", "strip", "(", ")", "]", "project_tags", "=", "set", "(", ")", "project_tags", ".", "update", "(", "message", ".", "get", "(", "'project'", ",", "{", "}", ")", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "project_tags", ".", "update", "(", "message", ".", "get", "(", "'pullrequest'", ",", "{", "}", ")", ".", "get", "(", "'project'", ",", "{", "}", ")", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "project_tags", ".", "update", "(", "message", ".", "get", "(", "'commit'", ",", "{", "}", ")", ".", "get", "(", "'repo'", ",", "{", "}", ")", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "valid", "=", "len", "(", "project_tags", ".", "intersection", "(", "set", "(", "tags", ")", ")", ")", ">", "0", "return", "valid" ]
Particular pagure project tags Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects having the specified tags. Specify multiple tags by separating them with a comma ','.
[ "Particular", "pagure", "project", "tags" ]
f9ec790619fcc8b41803077c4dec094e5127fc24
https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/pagure.py#L45-L68
247,461
chatfirst/chatfirst
chatfirst/__init__.py
build_uri
def build_uri(orig_uriparts, kwargs): """ Build the URI from the original uriparts and kwargs. Modifies kwargs. """ uriparts = [] for uripart in orig_uriparts: # If this part matches a keyword argument (starting with _), use # the supplied value. Otherwise, just use the part. if uripart.startswith("_"): part = (str(kwargs.pop(uripart, uripart))) else: part = uripart uriparts.append(part) uri = '/'.join(uriparts) # If an id kwarg is present and there is no id to fill in in # the list of uriparts, assume the id goes at the end. id = kwargs.pop('id', None) if id: uri += "/%s" % (id) return uri
python
def build_uri(orig_uriparts, kwargs): """ Build the URI from the original uriparts and kwargs. Modifies kwargs. """ uriparts = [] for uripart in orig_uriparts: # If this part matches a keyword argument (starting with _), use # the supplied value. Otherwise, just use the part. if uripart.startswith("_"): part = (str(kwargs.pop(uripart, uripart))) else: part = uripart uriparts.append(part) uri = '/'.join(uriparts) # If an id kwarg is present and there is no id to fill in in # the list of uriparts, assume the id goes at the end. id = kwargs.pop('id', None) if id: uri += "/%s" % (id) return uri
[ "def", "build_uri", "(", "orig_uriparts", ",", "kwargs", ")", ":", "uriparts", "=", "[", "]", "for", "uripart", "in", "orig_uriparts", ":", "# If this part matches a keyword argument (starting with _), use", "# the supplied value. Otherwise, just use the part.", "if", "uripart", ".", "startswith", "(", "\"_\"", ")", ":", "part", "=", "(", "str", "(", "kwargs", ".", "pop", "(", "uripart", ",", "uripart", ")", ")", ")", "else", ":", "part", "=", "uripart", "uriparts", ".", "append", "(", "part", ")", "uri", "=", "'/'", ".", "join", "(", "uriparts", ")", "# If an id kwarg is present and there is no id to fill in in", "# the list of uriparts, assume the id goes at the end.", "id", "=", "kwargs", ".", "pop", "(", "'id'", ",", "None", ")", "if", "id", ":", "uri", "+=", "\"/%s\"", "%", "(", "id", ")", "return", "uri" ]
Build the URI from the original uriparts and kwargs. Modifies kwargs.
[ "Build", "the", "URI", "from", "the", "original", "uriparts", "and", "kwargs", ".", "Modifies", "kwargs", "." ]
11e023fc372e034dfd3417b61b67759ef8c37ad6
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/__init__.py#L100-L121
247,462
nicholasbishop/trask
trask/phase1.py
load
def load(path): """Load |path| and recursively expand any includes.""" with open(path) as rfile: steps = MODEL.parse(rfile.read()) new_steps = [] for step in steps: new_steps += expand_includes(step, path) return new_steps
python
def load(path): """Load |path| and recursively expand any includes.""" with open(path) as rfile: steps = MODEL.parse(rfile.read()) new_steps = [] for step in steps: new_steps += expand_includes(step, path) return new_steps
[ "def", "load", "(", "path", ")", ":", "with", "open", "(", "path", ")", "as", "rfile", ":", "steps", "=", "MODEL", ".", "parse", "(", "rfile", ".", "read", "(", ")", ")", "new_steps", "=", "[", "]", "for", "step", "in", "steps", ":", "new_steps", "+=", "expand_includes", "(", "step", ",", "path", ")", "return", "new_steps" ]
Load |path| and recursively expand any includes.
[ "Load", "|path|", "and", "recursively", "expand", "any", "includes", "." ]
a97688425f70b539c7710b498627da9a6e39afd8
https://github.com/nicholasbishop/trask/blob/a97688425f70b539c7710b498627da9a6e39afd8/trask/phase1.py#L68-L77
247,463
abe-winter/pg13-py
pg13/syncmessage.py
parse_serialdiff
def parse_serialdiff(sd_dict): "helper for translate_check" if isinstance(sd_dict,list): if len(sd_dict)!=2 or sd_dict[0]!='checkstale': raise NotImplementedError(sd_dict[0],len(sd_dict)) return CheckStale(sd_dict[1]) if isinstance(sd_dict['deltas'],list): # i.e. for VHString the whole deltas field is a single string # warning below: Delta.text will be a list sometimes. always? sd_dict['deltas']=[diff.Delta(d['slice']['a'],d['slice']['b'],d['replace']) for d in sd_dict['deltas']] return SerialDiff(**sd_dict)
python
def parse_serialdiff(sd_dict): "helper for translate_check" if isinstance(sd_dict,list): if len(sd_dict)!=2 or sd_dict[0]!='checkstale': raise NotImplementedError(sd_dict[0],len(sd_dict)) return CheckStale(sd_dict[1]) if isinstance(sd_dict['deltas'],list): # i.e. for VHString the whole deltas field is a single string # warning below: Delta.text will be a list sometimes. always? sd_dict['deltas']=[diff.Delta(d['slice']['a'],d['slice']['b'],d['replace']) for d in sd_dict['deltas']] return SerialDiff(**sd_dict)
[ "def", "parse_serialdiff", "(", "sd_dict", ")", ":", "if", "isinstance", "(", "sd_dict", ",", "list", ")", ":", "if", "len", "(", "sd_dict", ")", "!=", "2", "or", "sd_dict", "[", "0", "]", "!=", "'checkstale'", ":", "raise", "NotImplementedError", "(", "sd_dict", "[", "0", "]", ",", "len", "(", "sd_dict", ")", ")", "return", "CheckStale", "(", "sd_dict", "[", "1", "]", ")", "if", "isinstance", "(", "sd_dict", "[", "'deltas'", "]", ",", "list", ")", ":", "# i.e. for VHString the whole deltas field is a single string", "# warning below: Delta.text will be a list sometimes. always?", "sd_dict", "[", "'deltas'", "]", "=", "[", "diff", ".", "Delta", "(", "d", "[", "'slice'", "]", "[", "'a'", "]", ",", "d", "[", "'slice'", "]", "[", "'b'", "]", ",", "d", "[", "'replace'", "]", ")", "for", "d", "in", "sd_dict", "[", "'deltas'", "]", "]", "return", "SerialDiff", "(", "*", "*", "sd_dict", ")" ]
helper for translate_check
[ "helper", "for", "translate_check" ]
c78806f99f35541a8756987e86edca3438aa97f5
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/syncmessage.py#L32-L40
247,464
abe-winter/pg13-py
pg13/syncmessage.py
translate_update
def translate_update(blob): "converts JSON parse output to self-aware objects" # note below: v will be int or null return {translate_key(k):parse_serialdiff(v) for k,v in blob.items()}
python
def translate_update(blob): "converts JSON parse output to self-aware objects" # note below: v will be int or null return {translate_key(k):parse_serialdiff(v) for k,v in blob.items()}
[ "def", "translate_update", "(", "blob", ")", ":", "# note below: v will be int or null", "return", "{", "translate_key", "(", "k", ")", ":", "parse_serialdiff", "(", "v", ")", "for", "k", ",", "v", "in", "blob", ".", "items", "(", ")", "}" ]
converts JSON parse output to self-aware objects
[ "converts", "JSON", "parse", "output", "to", "self", "-", "aware", "objects" ]
c78806f99f35541a8756987e86edca3438aa97f5
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/syncmessage.py#L41-L44
247,465
abe-winter/pg13-py
pg13/syncmessage.py
process_check
def process_check(pool,model,field,version): "helper for do_check. version is an integer or null. returns ..." try: syncable=model[field] except pg.FieldError: return ['?field'] if not isinstance(syncable,syncschema.Syncable): return ['here',None,syncable] if syncable.version()>version: # this includes version=None. this is the load case as well as update. return ['here',syncable.version(),syncable.generate()] # 'here' as in 'here, take this' or 'here you go' elif syncable.version()==version: return ['ok',version] elif syncable.version()<version: return ['upload',syncable.version()] else: raise RuntimeError("shouldn't get here")
python
def process_check(pool,model,field,version): "helper for do_check. version is an integer or null. returns ..." try: syncable=model[field] except pg.FieldError: return ['?field'] if not isinstance(syncable,syncschema.Syncable): return ['here',None,syncable] if syncable.version()>version: # this includes version=None. this is the load case as well as update. return ['here',syncable.version(),syncable.generate()] # 'here' as in 'here, take this' or 'here you go' elif syncable.version()==version: return ['ok',version] elif syncable.version()<version: return ['upload',syncable.version()] else: raise RuntimeError("shouldn't get here")
[ "def", "process_check", "(", "pool", ",", "model", ",", "field", ",", "version", ")", ":", "try", ":", "syncable", "=", "model", "[", "field", "]", "except", "pg", ".", "FieldError", ":", "return", "[", "'?field'", "]", "if", "not", "isinstance", "(", "syncable", ",", "syncschema", ".", "Syncable", ")", ":", "return", "[", "'here'", ",", "None", ",", "syncable", "]", "if", "syncable", ".", "version", "(", ")", ">", "version", ":", "# this includes version=None. this is the load case as well as update.", "return", "[", "'here'", ",", "syncable", ".", "version", "(", ")", ",", "syncable", ".", "generate", "(", ")", "]", "# 'here' as in 'here, take this' or 'here you go'", "elif", "syncable", ".", "version", "(", ")", "==", "version", ":", "return", "[", "'ok'", ",", "version", "]", "elif", "syncable", ".", "version", "(", ")", "<", "version", ":", "return", "[", "'upload'", ",", "syncable", ".", "version", "(", ")", "]", "else", ":", "raise", "RuntimeError", "(", "\"shouldn't get here\"", ")" ]
helper for do_check. version is an integer or null. returns ...
[ "helper", "for", "do_check", ".", "version", "is", "an", "integer", "or", "null", ".", "returns", "..." ]
c78806f99f35541a8756987e86edca3438aa97f5
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/syncmessage.py#L88-L97
247,466
abe-winter/pg13-py
pg13/syncmessage.py
add_missing_children
def add_missing_children(models,request,include_children_for,modelgb): "helper for do_check. mutates request" for (nombre,pkey),model in models.items(): for modelclass,pkeys in model.refkeys(include_children_for.get(nombre,())).items(): # warning: this is defaulting to all fields of child object. don't give clients a way to restrict that until there's a reason to. childname=modelgb['row',modelclass].name for childfield,cftype in modelclass.FIELDS: if not isinstance(cftype,basestring) and inspect.isclass(cftype) and issubclass(cftype,syncschema.Syncable): merge_null_missing(request,childname,childfield,pkeys) elif childfield in modelclass.SENDRAW: merge_null_missing(request,childname,childfield,pkeys) else: pass # intentional: ignore the field return request # the in-place updated original
python
def add_missing_children(models,request,include_children_for,modelgb): "helper for do_check. mutates request" for (nombre,pkey),model in models.items(): for modelclass,pkeys in model.refkeys(include_children_for.get(nombre,())).items(): # warning: this is defaulting to all fields of child object. don't give clients a way to restrict that until there's a reason to. childname=modelgb['row',modelclass].name for childfield,cftype in modelclass.FIELDS: if not isinstance(cftype,basestring) and inspect.isclass(cftype) and issubclass(cftype,syncschema.Syncable): merge_null_missing(request,childname,childfield,pkeys) elif childfield in modelclass.SENDRAW: merge_null_missing(request,childname,childfield,pkeys) else: pass # intentional: ignore the field return request # the in-place updated original
[ "def", "add_missing_children", "(", "models", ",", "request", ",", "include_children_for", ",", "modelgb", ")", ":", "for", "(", "nombre", ",", "pkey", ")", ",", "model", "in", "models", ".", "items", "(", ")", ":", "for", "modelclass", ",", "pkeys", "in", "model", ".", "refkeys", "(", "include_children_for", ".", "get", "(", "nombre", ",", "(", ")", ")", ")", ".", "items", "(", ")", ":", "# warning: this is defaulting to all fields of child object. don't give clients a way to restrict that until there's a reason to.", "childname", "=", "modelgb", "[", "'row'", ",", "modelclass", "]", ".", "name", "for", "childfield", ",", "cftype", "in", "modelclass", ".", "FIELDS", ":", "if", "not", "isinstance", "(", "cftype", ",", "basestring", ")", "and", "inspect", ".", "isclass", "(", "cftype", ")", "and", "issubclass", "(", "cftype", ",", "syncschema", ".", "Syncable", ")", ":", "merge_null_missing", "(", "request", ",", "childname", ",", "childfield", ",", "pkeys", ")", "elif", "childfield", "in", "modelclass", ".", "SENDRAW", ":", "merge_null_missing", "(", "request", ",", "childname", ",", "childfield", ",", "pkeys", ")", "else", ":", "pass", "# intentional: ignore the field", "return", "request", "# the in-place updated original" ]
helper for do_check. mutates request
[ "helper", "for", "do_check", ".", "mutates", "request" ]
c78806f99f35541a8756987e86edca3438aa97f5
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/syncmessage.py#L101-L112
247,467
ScienceLogic/amiuploader
amiimporter/amiupload.py
parse_args
def parse_args(): """ Argument parser and validator """ parser = argparse.ArgumentParser(description="Uploads specified VMDK file to AWS s3 bucket, and converts to AMI") parser.add_argument('-r', '--aws_regions', type=str, nargs='+', required=True, help='list of AWS regions where uploaded ami should be copied. Available' ' regions: {}.'.format(AWSUtilities.aws_regions)) parser.add_argument('-a', '--aws_profile', type=str, required=True, help='AWS profile name to use for aws cli commands') parser.add_argument('-b', '--s3_bucket', type=str, required=True, help='The aws_bucket of the profile to upload and save vmdk to') parser.add_argument('-f', '--vmdk_upload_file', type=str, required=True, help="The file to upload if executing ") parser.add_argument('-n', '--ami_name', type=str, required=False, help='The name to give to the uploaded ami. ' 'Defaults to the name of the file') parser.add_argument('-d', '--directory', type=str, default=tempfile.mkdtemp(), help='Directory to save temp aws config upload files') args = parser.parse_args() if not args.ami_name: args.ami_name = os.path.basename(args.vmdk_upload_file) validate_args(args) return args
python
def parse_args(): """ Argument parser and validator """ parser = argparse.ArgumentParser(description="Uploads specified VMDK file to AWS s3 bucket, and converts to AMI") parser.add_argument('-r', '--aws_regions', type=str, nargs='+', required=True, help='list of AWS regions where uploaded ami should be copied. Available' ' regions: {}.'.format(AWSUtilities.aws_regions)) parser.add_argument('-a', '--aws_profile', type=str, required=True, help='AWS profile name to use for aws cli commands') parser.add_argument('-b', '--s3_bucket', type=str, required=True, help='The aws_bucket of the profile to upload and save vmdk to') parser.add_argument('-f', '--vmdk_upload_file', type=str, required=True, help="The file to upload if executing ") parser.add_argument('-n', '--ami_name', type=str, required=False, help='The name to give to the uploaded ami. ' 'Defaults to the name of the file') parser.add_argument('-d', '--directory', type=str, default=tempfile.mkdtemp(), help='Directory to save temp aws config upload files') args = parser.parse_args() if not args.ami_name: args.ami_name = os.path.basename(args.vmdk_upload_file) validate_args(args) return args
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Uploads specified VMDK file to AWS s3 bucket, and converts to AMI\"", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--aws_regions'", ",", "type", "=", "str", ",", "nargs", "=", "'+'", ",", "required", "=", "True", ",", "help", "=", "'list of AWS regions where uploaded ami should be copied. Available'", "' regions: {}.'", ".", "format", "(", "AWSUtilities", ".", "aws_regions", ")", ")", "parser", ".", "add_argument", "(", "'-a'", ",", "'--aws_profile'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'AWS profile name to use for aws cli commands'", ")", "parser", ".", "add_argument", "(", "'-b'", ",", "'--s3_bucket'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'The aws_bucket of the profile to upload and save vmdk to'", ")", "parser", ".", "add_argument", "(", "'-f'", ",", "'--vmdk_upload_file'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "\"The file to upload if executing \"", ")", "parser", ".", "add_argument", "(", "'-n'", ",", "'--ami_name'", ",", "type", "=", "str", ",", "required", "=", "False", ",", "help", "=", "'The name to give to the uploaded ami. '", "'Defaults to the name of the file'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--directory'", ",", "type", "=", "str", ",", "default", "=", "tempfile", ".", "mkdtemp", "(", ")", ",", "help", "=", "'Directory to save temp aws config upload files'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "not", "args", ".", "ami_name", ":", "args", ".", "ami_name", "=", "os", ".", "path", ".", "basename", "(", "args", ".", "vmdk_upload_file", ")", "validate_args", "(", "args", ")", "return", "args" ]
Argument parser and validator
[ "Argument", "parser", "and", "validator" ]
c36c247b2226107b38571cbc6119118b1fe07182
https://github.com/ScienceLogic/amiuploader/blob/c36c247b2226107b38571cbc6119118b1fe07182/amiimporter/amiupload.py#L17-L41
247,468
eddiejessup/spatious
spatious/rotation.py
R_rot_2d
def R_rot_2d(th): """Return a 2-dimensional rotation matrix. Parameters ---------- th: array, shape (n, 1) Angles about which to rotate. Returns ------- R: array, shape (n, 2, 2) """ s, = np.sin(th).T c, = np.cos(th).T R = np.empty((len(th), 2, 2), dtype=np.float) R[:, 0, 0] = c R[:, 0, 1] = -s R[:, 1, 0] = s R[:, 1, 1] = c return R
python
def R_rot_2d(th): """Return a 2-dimensional rotation matrix. Parameters ---------- th: array, shape (n, 1) Angles about which to rotate. Returns ------- R: array, shape (n, 2, 2) """ s, = np.sin(th).T c, = np.cos(th).T R = np.empty((len(th), 2, 2), dtype=np.float) R[:, 0, 0] = c R[:, 0, 1] = -s R[:, 1, 0] = s R[:, 1, 1] = c return R
[ "def", "R_rot_2d", "(", "th", ")", ":", "s", ",", "=", "np", ".", "sin", "(", "th", ")", ".", "T", "c", ",", "=", "np", ".", "cos", "(", "th", ")", ".", "T", "R", "=", "np", ".", "empty", "(", "(", "len", "(", "th", ")", ",", "2", ",", "2", ")", ",", "dtype", "=", "np", ".", "float", ")", "R", "[", ":", ",", "0", ",", "0", "]", "=", "c", "R", "[", ":", ",", "0", ",", "1", "]", "=", "-", "s", "R", "[", ":", ",", "1", ",", "0", "]", "=", "s", "R", "[", ":", ",", "1", ",", "1", "]", "=", "c", "return", "R" ]
Return a 2-dimensional rotation matrix. Parameters ---------- th: array, shape (n, 1) Angles about which to rotate. Returns ------- R: array, shape (n, 2, 2)
[ "Return", "a", "2", "-", "dimensional", "rotation", "matrix", "." ]
b7ae91bec029e85a45a7f303ee184076433723cd
https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/rotation.py#L8-L30
247,469
eddiejessup/spatious
spatious/rotation.py
R_rot_3d
def R_rot_3d(th): """Return a 3-dimensional rotation matrix. Parameters ---------- th: array, shape (n, 3) Angles about which to rotate along each axis. Returns ------- R: array, shape (n, 3, 3) """ sx, sy, sz = np.sin(th).T cx, cy, cz = np.cos(th).T R = np.empty((len(th), 3, 3), dtype=np.float) R[:, 0, 0] = cy * cz R[:, 0, 1] = -cy * sz R[:, 0, 2] = sy R[:, 1, 0] = sx * sy * cz + cx * sz R[:, 1, 1] = -sx * sy * sz + cx * cz R[:, 1, 2] = -sx * cy R[:, 2, 0] = -cx * sy * cz + sx * sz R[:, 2, 1] = cx * sy * sz + sx * cz R[:, 2, 2] = cx * cy return R
python
def R_rot_3d(th): """Return a 3-dimensional rotation matrix. Parameters ---------- th: array, shape (n, 3) Angles about which to rotate along each axis. Returns ------- R: array, shape (n, 3, 3) """ sx, sy, sz = np.sin(th).T cx, cy, cz = np.cos(th).T R = np.empty((len(th), 3, 3), dtype=np.float) R[:, 0, 0] = cy * cz R[:, 0, 1] = -cy * sz R[:, 0, 2] = sy R[:, 1, 0] = sx * sy * cz + cx * sz R[:, 1, 1] = -sx * sy * sz + cx * cz R[:, 1, 2] = -sx * cy R[:, 2, 0] = -cx * sy * cz + sx * sz R[:, 2, 1] = cx * sy * sz + sx * cz R[:, 2, 2] = cx * cy return R
[ "def", "R_rot_3d", "(", "th", ")", ":", "sx", ",", "sy", ",", "sz", "=", "np", ".", "sin", "(", "th", ")", ".", "T", "cx", ",", "cy", ",", "cz", "=", "np", ".", "cos", "(", "th", ")", ".", "T", "R", "=", "np", ".", "empty", "(", "(", "len", "(", "th", ")", ",", "3", ",", "3", ")", ",", "dtype", "=", "np", ".", "float", ")", "R", "[", ":", ",", "0", ",", "0", "]", "=", "cy", "*", "cz", "R", "[", ":", ",", "0", ",", "1", "]", "=", "-", "cy", "*", "sz", "R", "[", ":", ",", "0", ",", "2", "]", "=", "sy", "R", "[", ":", ",", "1", ",", "0", "]", "=", "sx", "*", "sy", "*", "cz", "+", "cx", "*", "sz", "R", "[", ":", ",", "1", ",", "1", "]", "=", "-", "sx", "*", "sy", "*", "sz", "+", "cx", "*", "cz", "R", "[", ":", ",", "1", ",", "2", "]", "=", "-", "sx", "*", "cy", "R", "[", ":", ",", "2", ",", "0", "]", "=", "-", "cx", "*", "sy", "*", "cz", "+", "sx", "*", "sz", "R", "[", ":", ",", "2", ",", "1", "]", "=", "cx", "*", "sy", "*", "sz", "+", "sx", "*", "cz", "R", "[", ":", ",", "2", ",", "2", "]", "=", "cx", "*", "cy", "return", "R" ]
Return a 3-dimensional rotation matrix. Parameters ---------- th: array, shape (n, 3) Angles about which to rotate along each axis. Returns ------- R: array, shape (n, 3, 3)
[ "Return", "a", "3", "-", "dimensional", "rotation", "matrix", "." ]
b7ae91bec029e85a45a7f303ee184076433723cd
https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/rotation.py#L33-L60
247,470
eddiejessup/spatious
spatious/rotation.py
R_rot
def R_rot(th): """Return a rotation matrix. Parameters ---------- th: array, shape (n, m) Angles by which to rotate about each m rotational degree of freedom (m=1 in 2 dimensions, m=3 in 3 dimensions). Returns ------- R: array, shape (n, m, m) """ try: dof = th.shape[-1] # If th is a python float except AttributeError: dof = 1 th = np.array([th]) except IndexError: dof = 1 th = np.array([th]) # If th is a numpy float, i.e. 0d array if dof == 1: return R_rot_2d(th) elif dof == 3: return R_rot_3d(th) else: raise Exception('Rotation matrix not implemented in this dimension')
python
def R_rot(th): """Return a rotation matrix. Parameters ---------- th: array, shape (n, m) Angles by which to rotate about each m rotational degree of freedom (m=1 in 2 dimensions, m=3 in 3 dimensions). Returns ------- R: array, shape (n, m, m) """ try: dof = th.shape[-1] # If th is a python float except AttributeError: dof = 1 th = np.array([th]) except IndexError: dof = 1 th = np.array([th]) # If th is a numpy float, i.e. 0d array if dof == 1: return R_rot_2d(th) elif dof == 3: return R_rot_3d(th) else: raise Exception('Rotation matrix not implemented in this dimension')
[ "def", "R_rot", "(", "th", ")", ":", "try", ":", "dof", "=", "th", ".", "shape", "[", "-", "1", "]", "# If th is a python float", "except", "AttributeError", ":", "dof", "=", "1", "th", "=", "np", ".", "array", "(", "[", "th", "]", ")", "except", "IndexError", ":", "dof", "=", "1", "th", "=", "np", ".", "array", "(", "[", "th", "]", ")", "# If th is a numpy float, i.e. 0d array", "if", "dof", "==", "1", ":", "return", "R_rot_2d", "(", "th", ")", "elif", "dof", "==", "3", ":", "return", "R_rot_3d", "(", "th", ")", "else", ":", "raise", "Exception", "(", "'Rotation matrix not implemented in this dimension'", ")" ]
Return a rotation matrix. Parameters ---------- th: array, shape (n, m) Angles by which to rotate about each m rotational degree of freedom (m=1 in 2 dimensions, m=3 in 3 dimensions). Returns ------- R: array, shape (n, m, m)
[ "Return", "a", "rotation", "matrix", "." ]
b7ae91bec029e85a45a7f303ee184076433723cd
https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/rotation.py#L63-L91
247,471
eddiejessup/spatious
spatious/rotation.py
rotate
def rotate(a, th): """Return cartesian vectors, after rotation by specified angles about each degree of freedom. Parameters ---------- a: array, shape (n, d) Input d-dimensional cartesian vectors, left unchanged. th: array, shape (n, m) Angles by which to rotate about each m rotational degree of freedom (m=1 in 2 dimensions, m=3 in 3 dimensions). Returns ------- ar: array, shape of a Rotated cartesian vectors. """ return np.sum(a[..., np.newaxis] * R_rot(th), axis=-2)
python
def rotate(a, th): """Return cartesian vectors, after rotation by specified angles about each degree of freedom. Parameters ---------- a: array, shape (n, d) Input d-dimensional cartesian vectors, left unchanged. th: array, shape (n, m) Angles by which to rotate about each m rotational degree of freedom (m=1 in 2 dimensions, m=3 in 3 dimensions). Returns ------- ar: array, shape of a Rotated cartesian vectors. """ return np.sum(a[..., np.newaxis] * R_rot(th), axis=-2)
[ "def", "rotate", "(", "a", ",", "th", ")", ":", "return", "np", ".", "sum", "(", "a", "[", "...", ",", "np", ".", "newaxis", "]", "*", "R_rot", "(", "th", ")", ",", "axis", "=", "-", "2", ")" ]
Return cartesian vectors, after rotation by specified angles about each degree of freedom. Parameters ---------- a: array, shape (n, d) Input d-dimensional cartesian vectors, left unchanged. th: array, shape (n, m) Angles by which to rotate about each m rotational degree of freedom (m=1 in 2 dimensions, m=3 in 3 dimensions). Returns ------- ar: array, shape of a Rotated cartesian vectors.
[ "Return", "cartesian", "vectors", "after", "rotation", "by", "specified", "angles", "about", "each", "degree", "of", "freedom", "." ]
b7ae91bec029e85a45a7f303ee184076433723cd
https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/rotation.py#L94-L111
247,472
westerncapelabs/django-snappy-vumi-bouncer
snappybouncer/actions.py
export_select_fields_csv_action
def export_select_fields_csv_action( description="Export selected objects as CSV file", fields=None, exclude=None, header=True): """ This function returns an export csv action 'fields' is a list of tuples denoting the field and label to be exported. Labels make up the header row of the exported file if header=True. fields=[ ('field1', 'label1'), ('field2', 'label2'), ('field3', 'label3'), ] 'exclude' is a flat list of fields to exclude. If 'exclude' is passed, 'fields' will not be used. Either use 'fields' or 'exclude.' exclude=['field1', 'field2', field3] 'header' is whether or not to output the column names as the first row From: http://djangosnippets.org/snippets/2712/ Which is in turn based on: http://djangosnippets.org/snippets/2020/ """ def export_as_csv(modeladmin, request, queryset): """ Generic csv export admin action. based on http://djangosnippets.org/snippets/1697/ """ opts = modeladmin.model._meta field_names = [field.name for field in opts.fields] labels = [] if exclude: field_names = [v for v in field_names if v not in exclude] elif fields: field_names = [k for k, v in fields if k in field_names] labels = [v for k, v in fields if k in field_names] response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = ('attachment; filename=%s.csv' % unicode(opts).replace('.', '_')) writer = csv.writer(response) if header: if labels: writer.writerow(labels) else: writer.writerow(field_names) for obj in queryset: writer.writerow([unicode(getattr(obj, field)).encode('utf-8') for field in field_names]) return response export_as_csv.short_description = description return export_as_csv
python
def export_select_fields_csv_action( description="Export selected objects as CSV file", fields=None, exclude=None, header=True): """ This function returns an export csv action 'fields' is a list of tuples denoting the field and label to be exported. Labels make up the header row of the exported file if header=True. fields=[ ('field1', 'label1'), ('field2', 'label2'), ('field3', 'label3'), ] 'exclude' is a flat list of fields to exclude. If 'exclude' is passed, 'fields' will not be used. Either use 'fields' or 'exclude.' exclude=['field1', 'field2', field3] 'header' is whether or not to output the column names as the first row From: http://djangosnippets.org/snippets/2712/ Which is in turn based on: http://djangosnippets.org/snippets/2020/ """ def export_as_csv(modeladmin, request, queryset): """ Generic csv export admin action. based on http://djangosnippets.org/snippets/1697/ """ opts = modeladmin.model._meta field_names = [field.name for field in opts.fields] labels = [] if exclude: field_names = [v for v in field_names if v not in exclude] elif fields: field_names = [k for k, v in fields if k in field_names] labels = [v for k, v in fields if k in field_names] response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = ('attachment; filename=%s.csv' % unicode(opts).replace('.', '_')) writer = csv.writer(response) if header: if labels: writer.writerow(labels) else: writer.writerow(field_names) for obj in queryset: writer.writerow([unicode(getattr(obj, field)).encode('utf-8') for field in field_names]) return response export_as_csv.short_description = description return export_as_csv
[ "def", "export_select_fields_csv_action", "(", "description", "=", "\"Export selected objects as CSV file\"", ",", "fields", "=", "None", ",", "exclude", "=", "None", ",", "header", "=", "True", ")", ":", "def", "export_as_csv", "(", "modeladmin", ",", "request", ",", "queryset", ")", ":", "\"\"\"\n Generic csv export admin action.\n based on http://djangosnippets.org/snippets/1697/\n \"\"\"", "opts", "=", "modeladmin", ".", "model", ".", "_meta", "field_names", "=", "[", "field", ".", "name", "for", "field", "in", "opts", ".", "fields", "]", "labels", "=", "[", "]", "if", "exclude", ":", "field_names", "=", "[", "v", "for", "v", "in", "field_names", "if", "v", "not", "in", "exclude", "]", "elif", "fields", ":", "field_names", "=", "[", "k", "for", "k", ",", "v", "in", "fields", "if", "k", "in", "field_names", "]", "labels", "=", "[", "v", "for", "k", ",", "v", "in", "fields", "if", "k", "in", "field_names", "]", "response", "=", "HttpResponse", "(", "mimetype", "=", "'text/csv'", ")", "response", "[", "'Content-Disposition'", "]", "=", "(", "'attachment; filename=%s.csv'", "%", "unicode", "(", "opts", ")", ".", "replace", "(", "'.'", ",", "'_'", ")", ")", "writer", "=", "csv", ".", "writer", "(", "response", ")", "if", "header", ":", "if", "labels", ":", "writer", ".", "writerow", "(", "labels", ")", "else", ":", "writer", ".", "writerow", "(", "field_names", ")", "for", "obj", "in", "queryset", ":", "writer", ".", "writerow", "(", "[", "unicode", "(", "getattr", "(", "obj", ",", "field", ")", ")", ".", "encode", "(", "'utf-8'", ")", "for", "field", "in", "field_names", "]", ")", "return", "response", "export_as_csv", ".", "short_description", "=", "description", "return", "export_as_csv" ]
This function returns an export csv action 'fields' is a list of tuples denoting the field and label to be exported. Labels make up the header row of the exported file if header=True. fields=[ ('field1', 'label1'), ('field2', 'label2'), ('field3', 'label3'), ] 'exclude' is a flat list of fields to exclude. If 'exclude' is passed, 'fields' will not be used. Either use 'fields' or 'exclude.' exclude=['field1', 'field2', field3] 'header' is whether or not to output the column names as the first row From: http://djangosnippets.org/snippets/2712/ Which is in turn based on: http://djangosnippets.org/snippets/2020/
[ "This", "function", "returns", "an", "export", "csv", "action" ]
5750827020aa83f0f5eecee87a2fe8f19dfaac16
https://github.com/westerncapelabs/django-snappy-vumi-bouncer/blob/5750827020aa83f0f5eecee87a2fe8f19dfaac16/snappybouncer/actions.py#L5-L59
247,473
michaelkuty/django-service-templates
django_service_templates/engine.py
JinjaRenderer.render
def render(self, name=None, template=None, context={}): ''''Render Template meta from jinja2 templates. ''' if isinstance(template, Template): _template = template else: _template = Template.objects.get(name=name) # Maybe cache or save local ? response = self.env.from_string( _template.content).render(context) return response
python
def render(self, name=None, template=None, context={}): ''''Render Template meta from jinja2 templates. ''' if isinstance(template, Template): _template = template else: _template = Template.objects.get(name=name) # Maybe cache or save local ? response = self.env.from_string( _template.content).render(context) return response
[ "def", "render", "(", "self", ",", "name", "=", "None", ",", "template", "=", "None", ",", "context", "=", "{", "}", ")", ":", "if", "isinstance", "(", "template", ",", "Template", ")", ":", "_template", "=", "template", "else", ":", "_template", "=", "Template", ".", "objects", ".", "get", "(", "name", "=", "name", ")", "# Maybe cache or save local ?", "response", "=", "self", ".", "env", ".", "from_string", "(", "_template", ".", "content", ")", ".", "render", "(", "context", ")", "return", "response" ]
Render Template meta from jinja2 templates.
[ "Render", "Template", "meta", "from", "jinja2", "templates", "." ]
4f85bd812aeac8e01e1031f2118a68b344793118
https://github.com/michaelkuty/django-service-templates/blob/4f85bd812aeac8e01e1031f2118a68b344793118/django_service_templates/engine.py#L11-L25
247,474
townsenddw/jhubctl
jhubctl/hubs/single.py
Hub.get
def get(self): """Get specific information about this hub.""" output = helm("get", self.release) if output.returncode != 0: print("Something went wrong!") print(output.stderr) else: print(output.stdout)
python
def get(self): """Get specific information about this hub.""" output = helm("get", self.release) if output.returncode != 0: print("Something went wrong!") print(output.stderr) else: print(output.stdout)
[ "def", "get", "(", "self", ")", ":", "output", "=", "helm", "(", "\"get\"", ",", "self", ".", "release", ")", "if", "output", ".", "returncode", "!=", "0", ":", "print", "(", "\"Something went wrong!\"", ")", "print", "(", "output", ".", "stderr", ")", "else", ":", "print", "(", "output", ".", "stdout", ")" ]
Get specific information about this hub.
[ "Get", "specific", "information", "about", "this", "hub", "." ]
c8c20f86a16e9d01dd90e4607d81423417cc773b
https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/hubs/single.py#L52-L59
247,475
townsenddw/jhubctl
jhubctl/hubs/single.py
Hub.create
def create(self): """Create a single instance of notebook.""" # Point to chart repo. out = helm( "repo", "add", "jupyterhub", self.helm_repo ) out = helm("repo", "update") # Get token to secure Jupyterhub secret_yaml = self.get_security_yaml() # Get Jupyterhub. out = helm( "upgrade", "--install", self.release, "jupyterhub/jupyterhub", namespace=self.namespace, version=self.version, input=secret_yaml ) if out.returncode != 0: print(out.stderr) else: print(out.stdout)
python
def create(self): """Create a single instance of notebook.""" # Point to chart repo. out = helm( "repo", "add", "jupyterhub", self.helm_repo ) out = helm("repo", "update") # Get token to secure Jupyterhub secret_yaml = self.get_security_yaml() # Get Jupyterhub. out = helm( "upgrade", "--install", self.release, "jupyterhub/jupyterhub", namespace=self.namespace, version=self.version, input=secret_yaml ) if out.returncode != 0: print(out.stderr) else: print(out.stdout)
[ "def", "create", "(", "self", ")", ":", "# Point to chart repo.", "out", "=", "helm", "(", "\"repo\"", ",", "\"add\"", ",", "\"jupyterhub\"", ",", "self", ".", "helm_repo", ")", "out", "=", "helm", "(", "\"repo\"", ",", "\"update\"", ")", "# Get token to secure Jupyterhub", "secret_yaml", "=", "self", ".", "get_security_yaml", "(", ")", "# Get Jupyterhub.", "out", "=", "helm", "(", "\"upgrade\"", ",", "\"--install\"", ",", "self", ".", "release", ",", "\"jupyterhub/jupyterhub\"", ",", "namespace", "=", "self", ".", "namespace", ",", "version", "=", "self", ".", "version", ",", "input", "=", "secret_yaml", ")", "if", "out", ".", "returncode", "!=", "0", ":", "print", "(", "out", ".", "stderr", ")", "else", ":", "print", "(", "out", ".", "stdout", ")" ]
Create a single instance of notebook.
[ "Create", "a", "single", "instance", "of", "notebook", "." ]
c8c20f86a16e9d01dd90e4607d81423417cc773b
https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/hubs/single.py#L61-L88
247,476
townsenddw/jhubctl
jhubctl/hubs/single.py
Hub.delete
def delete(self): """Delete a Jupyterhub.""" # Delete the Helm Release out = helm( "delete", self.release, "--purge" ) if out.returncode != 0: print(out.stderr) else: print(out.stdout) # Delete the Kubernetes namespace out = kubectl( "delete", "namespace", self.namespace ) if out.returncode != 0: print(out.stderr) else: print(out.stdout)
python
def delete(self): """Delete a Jupyterhub.""" # Delete the Helm Release out = helm( "delete", self.release, "--purge" ) if out.returncode != 0: print(out.stderr) else: print(out.stdout) # Delete the Kubernetes namespace out = kubectl( "delete", "namespace", self.namespace ) if out.returncode != 0: print(out.stderr) else: print(out.stdout)
[ "def", "delete", "(", "self", ")", ":", "# Delete the Helm Release", "out", "=", "helm", "(", "\"delete\"", ",", "self", ".", "release", ",", "\"--purge\"", ")", "if", "out", ".", "returncode", "!=", "0", ":", "print", "(", "out", ".", "stderr", ")", "else", ":", "print", "(", "out", ".", "stdout", ")", "# Delete the Kubernetes namespace", "out", "=", "kubectl", "(", "\"delete\"", ",", "\"namespace\"", ",", "self", ".", "namespace", ")", "if", "out", ".", "returncode", "!=", "0", ":", "print", "(", "out", ".", "stderr", ")", "else", ":", "print", "(", "out", ".", "stdout", ")" ]
Delete a Jupyterhub.
[ "Delete", "a", "Jupyterhub", "." ]
c8c20f86a16e9d01dd90e4607d81423417cc773b
https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/hubs/single.py#L90-L112
247,477
townsenddw/jhubctl
jhubctl/hubs/single.py
Hub._parse_description
def _parse_description(self, description_text): """Turn description to dictionary.""" text = description_text text = text.strip() lines = text.split('\n') data = {} for line in lines: if ":" in line: idx = line.index(":") key = line[:idx] value = line[idx+1:].lstrip().rstrip() data[key] = value else: if isinstance(value, list) is False: value = [value] value.append(line.lstrip().rstrip()) data[key] = value return data
python
def _parse_description(self, description_text): """Turn description to dictionary.""" text = description_text text = text.strip() lines = text.split('\n') data = {} for line in lines: if ":" in line: idx = line.index(":") key = line[:idx] value = line[idx+1:].lstrip().rstrip() data[key] = value else: if isinstance(value, list) is False: value = [value] value.append(line.lstrip().rstrip()) data[key] = value return data
[ "def", "_parse_description", "(", "self", ",", "description_text", ")", ":", "text", "=", "description_text", "text", "=", "text", ".", "strip", "(", ")", "lines", "=", "text", ".", "split", "(", "'\\n'", ")", "data", "=", "{", "}", "for", "line", "in", "lines", ":", "if", "\":\"", "in", "line", ":", "idx", "=", "line", ".", "index", "(", "\":\"", ")", "key", "=", "line", "[", ":", "idx", "]", "value", "=", "line", "[", "idx", "+", "1", ":", "]", ".", "lstrip", "(", ")", ".", "rstrip", "(", ")", "data", "[", "key", "]", "=", "value", "else", ":", "if", "isinstance", "(", "value", ",", "list", ")", "is", "False", ":", "value", "=", "[", "value", "]", "value", ".", "append", "(", "line", ".", "lstrip", "(", ")", ".", "rstrip", "(", ")", ")", "data", "[", "key", "]", "=", "value", "return", "data" ]
Turn description to dictionary.
[ "Turn", "description", "to", "dictionary", "." ]
c8c20f86a16e9d01dd90e4607d81423417cc773b
https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/hubs/single.py#L125-L143
247,478
keithhackbarth/clowder_python_client
clowder.py
_validate_data
def _validate_data(data): """Validates the given data and raises an error if any non-allowed keys are provided or any required keys are missing. :param data: Data to send to API :type data: dict """ data_keys = set(data.keys()) extra_keys = data_keys - set(ALLOWED_KEYS) missing_keys = set(REQUIRED_KEYS) - data_keys if extra_keys: raise ValueError( 'Invalid data keys {!r}'.format(', '.join(extra_keys)) ) if missing_keys: raise ValueError( 'Missing keys {!r}'.format(', '.join(missing_keys)) )
python
def _validate_data(data): """Validates the given data and raises an error if any non-allowed keys are provided or any required keys are missing. :param data: Data to send to API :type data: dict """ data_keys = set(data.keys()) extra_keys = data_keys - set(ALLOWED_KEYS) missing_keys = set(REQUIRED_KEYS) - data_keys if extra_keys: raise ValueError( 'Invalid data keys {!r}'.format(', '.join(extra_keys)) ) if missing_keys: raise ValueError( 'Missing keys {!r}'.format(', '.join(missing_keys)) )
[ "def", "_validate_data", "(", "data", ")", ":", "data_keys", "=", "set", "(", "data", ".", "keys", "(", ")", ")", "extra_keys", "=", "data_keys", "-", "set", "(", "ALLOWED_KEYS", ")", "missing_keys", "=", "set", "(", "REQUIRED_KEYS", ")", "-", "data_keys", "if", "extra_keys", ":", "raise", "ValueError", "(", "'Invalid data keys {!r}'", ".", "format", "(", "', '", ".", "join", "(", "extra_keys", ")", ")", ")", "if", "missing_keys", ":", "raise", "ValueError", "(", "'Missing keys {!r}'", ".", "format", "(", "', '", ".", "join", "(", "missing_keys", ")", ")", ")" ]
Validates the given data and raises an error if any non-allowed keys are provided or any required keys are missing. :param data: Data to send to API :type data: dict
[ "Validates", "the", "given", "data", "and", "raises", "an", "error", "if", "any", "non", "-", "allowed", "keys", "are", "provided", "or", "any", "required", "keys", "are", "missing", "." ]
ebe91dd348e347461fc6a244ca45bb41d767a5be
https://github.com/keithhackbarth/clowder_python_client/blob/ebe91dd348e347461fc6a244ca45bb41d767a5be/clowder.py#L22-L41
247,479
keithhackbarth/clowder_python_client
clowder.py
_send
def _send(data): """Send data to the Clowder API. :param data: Dictionary of API data :type data: dict """ url = data.get('url', CLOWDER_API_URL) _validate_data(data) if api_key is not None: data['api_key'] = api_key if 'value' not in data: data['value'] = data.get('status', 1) if 'frequency' in data: data['frequency'] = _clean_frequency(data['frequency']) try: requests.post(url, data=data, timeout=TIMEOUT).text # This confirms you that the request has reached server # And that the request has been sent # Because we don't care about the response, we set the timeout # value to be low and ignore read exceptions except requests.exceptions.ReadTimeout as err: pass # Allow a wildcard expection for any other type of processing error except requests.exceptions.RequestException as err: logging.error('Clowder expection %s', err)
python
def _send(data): """Send data to the Clowder API. :param data: Dictionary of API data :type data: dict """ url = data.get('url', CLOWDER_API_URL) _validate_data(data) if api_key is not None: data['api_key'] = api_key if 'value' not in data: data['value'] = data.get('status', 1) if 'frequency' in data: data['frequency'] = _clean_frequency(data['frequency']) try: requests.post(url, data=data, timeout=TIMEOUT).text # This confirms you that the request has reached server # And that the request has been sent # Because we don't care about the response, we set the timeout # value to be low and ignore read exceptions except requests.exceptions.ReadTimeout as err: pass # Allow a wildcard expection for any other type of processing error except requests.exceptions.RequestException as err: logging.error('Clowder expection %s', err)
[ "def", "_send", "(", "data", ")", ":", "url", "=", "data", ".", "get", "(", "'url'", ",", "CLOWDER_API_URL", ")", "_validate_data", "(", "data", ")", "if", "api_key", "is", "not", "None", ":", "data", "[", "'api_key'", "]", "=", "api_key", "if", "'value'", "not", "in", "data", ":", "data", "[", "'value'", "]", "=", "data", ".", "get", "(", "'status'", ",", "1", ")", "if", "'frequency'", "in", "data", ":", "data", "[", "'frequency'", "]", "=", "_clean_frequency", "(", "data", "[", "'frequency'", "]", ")", "try", ":", "requests", ".", "post", "(", "url", ",", "data", "=", "data", ",", "timeout", "=", "TIMEOUT", ")", ".", "text", "# This confirms you that the request has reached server", "# And that the request has been sent", "# Because we don't care about the response, we set the timeout", "# value to be low and ignore read exceptions", "except", "requests", ".", "exceptions", ".", "ReadTimeout", "as", "err", ":", "pass", "# Allow a wildcard expection for any other type of processing error", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "err", ":", "logging", ".", "error", "(", "'Clowder expection %s'", ",", "err", ")" ]
Send data to the Clowder API. :param data: Dictionary of API data :type data: dict
[ "Send", "data", "to", "the", "Clowder", "API", "." ]
ebe91dd348e347461fc6a244ca45bb41d767a5be
https://github.com/keithhackbarth/clowder_python_client/blob/ebe91dd348e347461fc6a244ca45bb41d767a5be/clowder.py#L44-L75
247,480
keithhackbarth/clowder_python_client
clowder.py
submit
def submit(**kwargs): """Shortcut that takes an alert to evaluate and makes the appropriate API call based on the results. :param kwargs: A list of keyword arguments :type kwargs: dict """ if 'alert' not in kwargs: raise ValueError('Alert required') if 'value' not in kwargs: raise ValueError('Value required') alert = kwargs.pop('alert') value = kwargs['value'] if alert(value): fail(kwargs) else: ok(kwargs)
python
def submit(**kwargs): """Shortcut that takes an alert to evaluate and makes the appropriate API call based on the results. :param kwargs: A list of keyword arguments :type kwargs: dict """ if 'alert' not in kwargs: raise ValueError('Alert required') if 'value' not in kwargs: raise ValueError('Value required') alert = kwargs.pop('alert') value = kwargs['value'] if alert(value): fail(kwargs) else: ok(kwargs)
[ "def", "submit", "(", "*", "*", "kwargs", ")", ":", "if", "'alert'", "not", "in", "kwargs", ":", "raise", "ValueError", "(", "'Alert required'", ")", "if", "'value'", "not", "in", "kwargs", ":", "raise", "ValueError", "(", "'Value required'", ")", "alert", "=", "kwargs", ".", "pop", "(", "'alert'", ")", "value", "=", "kwargs", "[", "'value'", "]", "if", "alert", "(", "value", ")", ":", "fail", "(", "kwargs", ")", "else", ":", "ok", "(", "kwargs", ")" ]
Shortcut that takes an alert to evaluate and makes the appropriate API call based on the results. :param kwargs: A list of keyword arguments :type kwargs: dict
[ "Shortcut", "that", "takes", "an", "alert", "to", "evaluate", "and", "makes", "the", "appropriate", "API", "call", "based", "on", "the", "results", "." ]
ebe91dd348e347461fc6a244ca45bb41d767a5be
https://github.com/keithhackbarth/clowder_python_client/blob/ebe91dd348e347461fc6a244ca45bb41d767a5be/clowder.py#L120-L139
247,481
keithhackbarth/clowder_python_client
clowder.py
_clean_frequency
def _clean_frequency(frequency): """Converts a frequency value to an integer. Raises an error if an invalid type is given. :param frequency: A frequency :type frequency: int or datetime.timedelta :rtype: int """ if isinstance(frequency, int): return frequency elif isinstance(frequency, datetime.timedelta): return int(frequency.total_seconds()) raise ValueError('Invalid frequency {!r}'.format(frequency))
python
def _clean_frequency(frequency): """Converts a frequency value to an integer. Raises an error if an invalid type is given. :param frequency: A frequency :type frequency: int or datetime.timedelta :rtype: int """ if isinstance(frequency, int): return frequency elif isinstance(frequency, datetime.timedelta): return int(frequency.total_seconds()) raise ValueError('Invalid frequency {!r}'.format(frequency))
[ "def", "_clean_frequency", "(", "frequency", ")", ":", "if", "isinstance", "(", "frequency", ",", "int", ")", ":", "return", "frequency", "elif", "isinstance", "(", "frequency", ",", "datetime", ".", "timedelta", ")", ":", "return", "int", "(", "frequency", ".", "total_seconds", "(", ")", ")", "raise", "ValueError", "(", "'Invalid frequency {!r}'", ".", "format", "(", "frequency", ")", ")" ]
Converts a frequency value to an integer. Raises an error if an invalid type is given. :param frequency: A frequency :type frequency: int or datetime.timedelta :rtype: int
[ "Converts", "a", "frequency", "value", "to", "an", "integer", ".", "Raises", "an", "error", "if", "an", "invalid", "type", "is", "given", "." ]
ebe91dd348e347461fc6a244ca45bb41d767a5be
https://github.com/keithhackbarth/clowder_python_client/blob/ebe91dd348e347461fc6a244ca45bb41d767a5be/clowder.py#L142-L155
247,482
fedora-infra/fmn.rules
fmn/rules/koschei.py
koschei_group
def koschei_group(config, message, group=None): """ Particular Koschei package groups This rule limits message to particular `Koschei <https://apps.fedoraproject.org/koschei/>`_ groups. You can specify more groups separated by commas. """ if not group or 'koschei' not in message['topic']: return False groups = set([item.strip() for item in group.split(',')]) return bool(groups.intersection(message['msg'].get('groups', [])))
python
def koschei_group(config, message, group=None): """ Particular Koschei package groups This rule limits message to particular `Koschei <https://apps.fedoraproject.org/koschei/>`_ groups. You can specify more groups separated by commas. """ if not group or 'koschei' not in message['topic']: return False groups = set([item.strip() for item in group.split(',')]) return bool(groups.intersection(message['msg'].get('groups', [])))
[ "def", "koschei_group", "(", "config", ",", "message", ",", "group", "=", "None", ")", ":", "if", "not", "group", "or", "'koschei'", "not", "in", "message", "[", "'topic'", "]", ":", "return", "False", "groups", "=", "set", "(", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "group", ".", "split", "(", "','", ")", "]", ")", "return", "bool", "(", "groups", ".", "intersection", "(", "message", "[", "'msg'", "]", ".", "get", "(", "'groups'", ",", "[", "]", ")", ")", ")" ]
Particular Koschei package groups This rule limits message to particular `Koschei <https://apps.fedoraproject.org/koschei/>`_ groups. You can specify more groups separated by commas.
[ "Particular", "Koschei", "package", "groups" ]
f9ec790619fcc8b41803077c4dec094e5127fc24
https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/koschei.py#L15-L25
247,483
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/rabbitmq/driver.py
Requester.on_response
def on_response(self, ch, method_frame, props, body): """ setup response is correlation id is the good one """ LOGGER.debug("rabbitmq.Requester.on_response") if self.corr_id == props.correlation_id: self.response = {'props': props, 'body': body} else: LOGGER.warn("rabbitmq.Requester.on_response - discarded response : " + str(props.correlation_id)) LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': props, 'body': body }))
python
def on_response(self, ch, method_frame, props, body): """ setup response is correlation id is the good one """ LOGGER.debug("rabbitmq.Requester.on_response") if self.corr_id == props.correlation_id: self.response = {'props': props, 'body': body} else: LOGGER.warn("rabbitmq.Requester.on_response - discarded response : " + str(props.correlation_id)) LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': props, 'body': body }))
[ "def", "on_response", "(", "self", ",", "ch", ",", "method_frame", ",", "props", ",", "body", ")", ":", "LOGGER", ".", "debug", "(", "\"rabbitmq.Requester.on_response\"", ")", "if", "self", ".", "corr_id", "==", "props", ".", "correlation_id", ":", "self", ".", "response", "=", "{", "'props'", ":", "props", ",", "'body'", ":", "body", "}", "else", ":", "LOGGER", ".", "warn", "(", "\"rabbitmq.Requester.on_response - discarded response : \"", "+", "str", "(", "props", ".", "correlation_id", ")", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "props", ",", "'body'", ":", "body", "}", ")", ")" ]
setup response is correlation id is the good one
[ "setup", "response", "is", "correlation", "id", "is", "the", "good", "one" ]
0a7feddebf66fee4bef38d64f456d93a7e9fcd68
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/rabbitmq/driver.py#L132-L145
247,484
artisanofcode/python-broadway
scripts/release.py
error
def error(message, *args, **kwargs): """ print an error message """ print('[!] ' + message.format(*args, **kwargs)) sys.exit(1)
python
def error(message, *args, **kwargs): """ print an error message """ print('[!] ' + message.format(*args, **kwargs)) sys.exit(1)
[ "def", "error", "(", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "print", "(", "'[!] '", "+", "message", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
print an error message
[ "print", "an", "error", "message" ]
a051ca5a922ecb38a541df59e8740e2a047d9a4a
https://github.com/artisanofcode/python-broadway/blob/a051ca5a922ecb38a541df59e8740e2a047d9a4a/scripts/release.py#L25-L29
247,485
artisanofcode/python-broadway
scripts/release.py
parse_changes
def parse_changes(): """ grab version from CHANGES and validate entry """ with open('CHANGES') as changes: for match in re.finditer(RE_CHANGES, changes.read(1024), re.M): if len(match.group(1)) != len(match.group(3)): error('incorrect underline in CHANGES') date = datetime.datetime.strptime(match.group(4), '%Y-%m-%d').date() if date != datetime.date.today(): error('release date is not today') return match.group(2) error('invalid release entry in CHANGES')
python
def parse_changes(): """ grab version from CHANGES and validate entry """ with open('CHANGES') as changes: for match in re.finditer(RE_CHANGES, changes.read(1024), re.M): if len(match.group(1)) != len(match.group(3)): error('incorrect underline in CHANGES') date = datetime.datetime.strptime(match.group(4), '%Y-%m-%d').date() if date != datetime.date.today(): error('release date is not today') return match.group(2) error('invalid release entry in CHANGES')
[ "def", "parse_changes", "(", ")", ":", "with", "open", "(", "'CHANGES'", ")", "as", "changes", ":", "for", "match", "in", "re", ".", "finditer", "(", "RE_CHANGES", ",", "changes", ".", "read", "(", "1024", ")", ",", "re", ".", "M", ")", ":", "if", "len", "(", "match", ".", "group", "(", "1", ")", ")", "!=", "len", "(", "match", ".", "group", "(", "3", ")", ")", ":", "error", "(", "'incorrect underline in CHANGES'", ")", "date", "=", "datetime", ".", "datetime", ".", "strptime", "(", "match", ".", "group", "(", "4", ")", ",", "'%Y-%m-%d'", ")", ".", "date", "(", ")", "if", "date", "!=", "datetime", ".", "date", ".", "today", "(", ")", ":", "error", "(", "'release date is not today'", ")", "return", "match", ".", "group", "(", "2", ")", "error", "(", "'invalid release entry in CHANGES'", ")" ]
grab version from CHANGES and validate entry
[ "grab", "version", "from", "CHANGES", "and", "validate", "entry" ]
a051ca5a922ecb38a541df59e8740e2a047d9a4a
https://github.com/artisanofcode/python-broadway/blob/a051ca5a922ecb38a541df59e8740e2a047d9a4a/scripts/release.py#L38-L54
247,486
artisanofcode/python-broadway
scripts/release.py
increment_version
def increment_version(version): """ get the next version """ parts = [int(v) for v in version.split('.')] parts[-1] += 1 parts.append('dev0') return '.'.join(map(str, parts))
python
def increment_version(version): """ get the next version """ parts = [int(v) for v in version.split('.')] parts[-1] += 1 parts.append('dev0') return '.'.join(map(str, parts))
[ "def", "increment_version", "(", "version", ")", ":", "parts", "=", "[", "int", "(", "v", ")", "for", "v", "in", "version", ".", "split", "(", "'.'", ")", "]", "parts", "[", "-", "1", "]", "+=", "1", "parts", ".", "append", "(", "'dev0'", ")", "return", "'.'", ".", "join", "(", "map", "(", "str", ",", "parts", ")", ")" ]
get the next version
[ "get", "the", "next", "version" ]
a051ca5a922ecb38a541df59e8740e2a047d9a4a
https://github.com/artisanofcode/python-broadway/blob/a051ca5a922ecb38a541df59e8740e2a047d9a4a/scripts/release.py#L57-L64
247,487
artisanofcode/python-broadway
scripts/release.py
set_version
def set_version(version): """ set the version in the projects root module """ with open(FILENAME) as pythonfile: content = pythonfile.read() output = re.sub(RE_VERSION, r"\1'{}'".format(version), content) if content == output: error('failed updating {}'.format(FILENAME)) with open(FILENAME, 'w') as pythonfile: pythonfile.write(output)
python
def set_version(version): """ set the version in the projects root module """ with open(FILENAME) as pythonfile: content = pythonfile.read() output = re.sub(RE_VERSION, r"\1'{}'".format(version), content) if content == output: error('failed updating {}'.format(FILENAME)) with open(FILENAME, 'w') as pythonfile: pythonfile.write(output)
[ "def", "set_version", "(", "version", ")", ":", "with", "open", "(", "FILENAME", ")", "as", "pythonfile", ":", "content", "=", "pythonfile", ".", "read", "(", ")", "output", "=", "re", ".", "sub", "(", "RE_VERSION", ",", "r\"\\1'{}'\"", ".", "format", "(", "version", ")", ",", "content", ")", "if", "content", "==", "output", ":", "error", "(", "'failed updating {}'", ".", "format", "(", "FILENAME", ")", ")", "with", "open", "(", "FILENAME", ",", "'w'", ")", "as", "pythonfile", ":", "pythonfile", ".", "write", "(", "output", ")" ]
set the version in the projects root module
[ "set", "the", "version", "in", "the", "projects", "root", "module" ]
a051ca5a922ecb38a541df59e8740e2a047d9a4a
https://github.com/artisanofcode/python-broadway/blob/a051ca5a922ecb38a541df59e8740e2a047d9a4a/scripts/release.py#L67-L79
247,488
artisanofcode/python-broadway
scripts/release.py
upload
def upload(): """ build the files and upload to pypi """ def twine(*args): """ run a twine command """ process = run(sys.executable, '-m', 'twine', *args) return process.wait() != 0 if run(sys.executable, 'setup.py', 'sdist', 'bdist_wheel').wait() != 0: error('failed building packages') if twine('register', glob.glob('dist/*')[0]): error('register failed') if twine('upload', '-s', '-i', 'CB164668', '--skip-existing', 'dist/*'): error('upload failed')
python
def upload(): """ build the files and upload to pypi """ def twine(*args): """ run a twine command """ process = run(sys.executable, '-m', 'twine', *args) return process.wait() != 0 if run(sys.executable, 'setup.py', 'sdist', 'bdist_wheel').wait() != 0: error('failed building packages') if twine('register', glob.glob('dist/*')[0]): error('register failed') if twine('upload', '-s', '-i', 'CB164668', '--skip-existing', 'dist/*'): error('upload failed')
[ "def", "upload", "(", ")", ":", "def", "twine", "(", "*", "args", ")", ":", "\"\"\" run a twine command \"\"\"", "process", "=", "run", "(", "sys", ".", "executable", ",", "'-m'", ",", "'twine'", ",", "*", "args", ")", "return", "process", ".", "wait", "(", ")", "!=", "0", "if", "run", "(", "sys", ".", "executable", ",", "'setup.py'", ",", "'sdist'", ",", "'bdist_wheel'", ")", ".", "wait", "(", ")", "!=", "0", ":", "error", "(", "'failed building packages'", ")", "if", "twine", "(", "'register'", ",", "glob", ".", "glob", "(", "'dist/*'", ")", "[", "0", "]", ")", ":", "error", "(", "'register failed'", ")", "if", "twine", "(", "'upload'", ",", "'-s'", ",", "'-i'", ",", "'CB164668'", ",", "'--skip-existing'", ",", "'dist/*'", ")", ":", "error", "(", "'upload failed'", ")" ]
build the files and upload to pypi
[ "build", "the", "files", "and", "upload", "to", "pypi" ]
a051ca5a922ecb38a541df59e8740e2a047d9a4a
https://github.com/artisanofcode/python-broadway/blob/a051ca5a922ecb38a541df59e8740e2a047d9a4a/scripts/release.py#L82-L98
247,489
artisanofcode/python-broadway
scripts/release.py
check_tag
def check_tag(version): """ check theres not already a tag for this version """ output = run('git', 'tag', stdout=subprocess.PIPE).communicate()[0] tags = set(output.decode('utf-8').splitlines()) if 'v{}'.format(version) in tags: error('version already exists')
python
def check_tag(version): """ check theres not already a tag for this version """ output = run('git', 'tag', stdout=subprocess.PIPE).communicate()[0] tags = set(output.decode('utf-8').splitlines()) if 'v{}'.format(version) in tags: error('version already exists')
[ "def", "check_tag", "(", "version", ")", ":", "output", "=", "run", "(", "'git'", ",", "'tag'", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "tags", "=", "set", "(", "output", ".", "decode", "(", "'utf-8'", ")", ".", "splitlines", "(", ")", ")", "if", "'v{}'", ".", "format", "(", "version", ")", "in", "tags", ":", "error", "(", "'version already exists'", ")" ]
check theres not already a tag for this version
[ "check", "theres", "not", "already", "a", "tag", "for", "this", "version" ]
a051ca5a922ecb38a541df59e8740e2a047d9a4a
https://github.com/artisanofcode/python-broadway/blob/a051ca5a922ecb38a541df59e8740e2a047d9a4a/scripts/release.py#L101-L108
247,490
dossier/dossier.web
dossier/web/interface.py
as_multi_dict
def as_multi_dict(d): 'Coerce a dictionary to a bottle.MultiDict' if isinstance(d, bottle.MultiDict): return d md = bottle.MultiDict() for k, v in d.iteritems(): if isinstance(v, list): for x in v: md[k] = x else: md[k] = v return md
python
def as_multi_dict(d): 'Coerce a dictionary to a bottle.MultiDict' if isinstance(d, bottle.MultiDict): return d md = bottle.MultiDict() for k, v in d.iteritems(): if isinstance(v, list): for x in v: md[k] = x else: md[k] = v return md
[ "def", "as_multi_dict", "(", "d", ")", ":", "if", "isinstance", "(", "d", ",", "bottle", ".", "MultiDict", ")", ":", "return", "d", "md", "=", "bottle", ".", "MultiDict", "(", ")", "for", "k", ",", "v", "in", "d", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "v", ",", "list", ")", ":", "for", "x", "in", "v", ":", "md", "[", "k", "]", "=", "x", "else", ":", "md", "[", "k", "]", "=", "v", "return", "md" ]
Coerce a dictionary to a bottle.MultiDict
[ "Coerce", "a", "dictionary", "to", "a", "bottle", ".", "MultiDict" ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/interface.py#L376-L387
247,491
dossier/dossier.web
dossier/web/interface.py
Queryable.set_query_params
def set_query_params(self, query_params): '''Set the query parameters. The query parameters should be a dictionary mapping keys to strings or lists of strings. :param query_params: query parameters :type query_params: ``name |--> (str | [str])`` :rtype: :class:`Queryable` ''' self.query_params = as_multi_dict(query_params) self.apply_param_schema() return self
python
def set_query_params(self, query_params): '''Set the query parameters. The query parameters should be a dictionary mapping keys to strings or lists of strings. :param query_params: query parameters :type query_params: ``name |--> (str | [str])`` :rtype: :class:`Queryable` ''' self.query_params = as_multi_dict(query_params) self.apply_param_schema() return self
[ "def", "set_query_params", "(", "self", ",", "query_params", ")", ":", "self", ".", "query_params", "=", "as_multi_dict", "(", "query_params", ")", "self", ".", "apply_param_schema", "(", ")", "return", "self" ]
Set the query parameters. The query parameters should be a dictionary mapping keys to strings or lists of strings. :param query_params: query parameters :type query_params: ``name |--> (str | [str])`` :rtype: :class:`Queryable`
[ "Set", "the", "query", "parameters", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/interface.py#L129-L141
247,492
dossier/dossier.web
dossier/web/interface.py
Queryable.add_query_params
def add_query_params(self, query_params): '''Overwrite the given query parameters. This is the same as :meth:`Queryable.set_query_params`, except it overwrites existing parameters individually whereas ``set_query_params`` deletes all existing key in ``query_params``. ''' query_params = as_multi_dict(query_params) for k in query_params: self.query_params.pop(k, None) for v in query_params.getlist(k): self.query_params[k] = v self.apply_param_schema() return self
python
def add_query_params(self, query_params): '''Overwrite the given query parameters. This is the same as :meth:`Queryable.set_query_params`, except it overwrites existing parameters individually whereas ``set_query_params`` deletes all existing key in ``query_params``. ''' query_params = as_multi_dict(query_params) for k in query_params: self.query_params.pop(k, None) for v in query_params.getlist(k): self.query_params[k] = v self.apply_param_schema() return self
[ "def", "add_query_params", "(", "self", ",", "query_params", ")", ":", "query_params", "=", "as_multi_dict", "(", "query_params", ")", "for", "k", "in", "query_params", ":", "self", ".", "query_params", ".", "pop", "(", "k", ",", "None", ")", "for", "v", "in", "query_params", ".", "getlist", "(", "k", ")", ":", "self", ".", "query_params", "[", "k", "]", "=", "v", "self", ".", "apply_param_schema", "(", ")", "return", "self" ]
Overwrite the given query parameters. This is the same as :meth:`Queryable.set_query_params`, except it overwrites existing parameters individually whereas ``set_query_params`` deletes all existing key in ``query_params``.
[ "Overwrite", "the", "given", "query", "parameters", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/interface.py#L143-L157
247,493
dossier/dossier.web
dossier/web/interface.py
Queryable.apply_param_schema
def apply_param_schema(self): '''Applies the schema defined to the given parameters. This combines the values in ``config_params`` and ``query_params``, and converts them to typed Python values per ``param_schema``. This is called automatically whenever the query parameters are updated. ''' def param_str(name, cons, default): try: v = self.query_params.get(name, default) if v is None: return v if len(v) == 0: return default return cons(v) except (TypeError, ValueError): return default def param_num(name, cons, default, minimum, maximum): try: n = cons(self.query_params.get(name, default)) return min(maximum, max(minimum, n)) except (TypeError, ValueError): return default for name, schema in getattr(self, 'param_schema', {}).iteritems(): default = self.config_params.get(name, schema.get('default', None)) v = None if schema['type'] == 'bool': v = param_str(name, lambda s: bool(int(s)), False) elif schema['type'] == 'int': v = param_num( name, int, default=default, minimum=schema.get('min', 0), maximum=schema.get('max', 1000000)) elif schema['type'] == 'float': v = param_num( name, float, default=default, minimum=schema.get('min', 0), maximum=schema.get('max', 1000000)) elif schema['type'] is 'bytes': v = param_str(name, schema.get('cons', str), default) elif schema['type'] is 'unicode': encoding = schema.get('encoding', 'utf-8') v = param_str(name, lambda s: s.decode(encoding), default) self.params[name] = v
python
def apply_param_schema(self): '''Applies the schema defined to the given parameters. This combines the values in ``config_params`` and ``query_params``, and converts them to typed Python values per ``param_schema``. This is called automatically whenever the query parameters are updated. ''' def param_str(name, cons, default): try: v = self.query_params.get(name, default) if v is None: return v if len(v) == 0: return default return cons(v) except (TypeError, ValueError): return default def param_num(name, cons, default, minimum, maximum): try: n = cons(self.query_params.get(name, default)) return min(maximum, max(minimum, n)) except (TypeError, ValueError): return default for name, schema in getattr(self, 'param_schema', {}).iteritems(): default = self.config_params.get(name, schema.get('default', None)) v = None if schema['type'] == 'bool': v = param_str(name, lambda s: bool(int(s)), False) elif schema['type'] == 'int': v = param_num( name, int, default=default, minimum=schema.get('min', 0), maximum=schema.get('max', 1000000)) elif schema['type'] == 'float': v = param_num( name, float, default=default, minimum=schema.get('min', 0), maximum=schema.get('max', 1000000)) elif schema['type'] is 'bytes': v = param_str(name, schema.get('cons', str), default) elif schema['type'] is 'unicode': encoding = schema.get('encoding', 'utf-8') v = param_str(name, lambda s: s.decode(encoding), default) self.params[name] = v
[ "def", "apply_param_schema", "(", "self", ")", ":", "def", "param_str", "(", "name", ",", "cons", ",", "default", ")", ":", "try", ":", "v", "=", "self", ".", "query_params", ".", "get", "(", "name", ",", "default", ")", "if", "v", "is", "None", ":", "return", "v", "if", "len", "(", "v", ")", "==", "0", ":", "return", "default", "return", "cons", "(", "v", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "default", "def", "param_num", "(", "name", ",", "cons", ",", "default", ",", "minimum", ",", "maximum", ")", ":", "try", ":", "n", "=", "cons", "(", "self", ".", "query_params", ".", "get", "(", "name", ",", "default", ")", ")", "return", "min", "(", "maximum", ",", "max", "(", "minimum", ",", "n", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "default", "for", "name", ",", "schema", "in", "getattr", "(", "self", ",", "'param_schema'", ",", "{", "}", ")", ".", "iteritems", "(", ")", ":", "default", "=", "self", ".", "config_params", ".", "get", "(", "name", ",", "schema", ".", "get", "(", "'default'", ",", "None", ")", ")", "v", "=", "None", "if", "schema", "[", "'type'", "]", "==", "'bool'", ":", "v", "=", "param_str", "(", "name", ",", "lambda", "s", ":", "bool", "(", "int", "(", "s", ")", ")", ",", "False", ")", "elif", "schema", "[", "'type'", "]", "==", "'int'", ":", "v", "=", "param_num", "(", "name", ",", "int", ",", "default", "=", "default", ",", "minimum", "=", "schema", ".", "get", "(", "'min'", ",", "0", ")", ",", "maximum", "=", "schema", ".", "get", "(", "'max'", ",", "1000000", ")", ")", "elif", "schema", "[", "'type'", "]", "==", "'float'", ":", "v", "=", "param_num", "(", "name", ",", "float", ",", "default", "=", "default", ",", "minimum", "=", "schema", ".", "get", "(", "'min'", ",", "0", ")", ",", "maximum", "=", "schema", ".", "get", "(", "'max'", ",", "1000000", ")", ")", "elif", "schema", "[", "'type'", "]", "is", "'bytes'", ":", "v", "=", "param_str", "(", "name", ",", "schema", ".", "get", "(", "'cons'", ",", "str", ")", ",", "default", ")", "elif", "schema", "[", "'type'", "]", "is", "'unicode'", ":", "encoding", "=", "schema", ".", "get", "(", "'encoding'", ",", "'utf-8'", ")", "v", "=", "param_str", "(", "name", ",", "lambda", "s", ":", "s", ".", "decode", "(", "encoding", ")", ",", "default", ")", "self", ".", "params", "[", "name", "]", "=", "v" ]
Applies the schema defined to the given parameters. This combines the values in ``config_params`` and ``query_params``, and converts them to typed Python values per ``param_schema``. This is called automatically whenever the query parameters are updated.
[ "Applies", "the", "schema", "defined", "to", "the", "given", "parameters", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/interface.py#L159-L207
247,494
dossier/dossier.web
dossier/web/interface.py
SearchEngine.create_filter_predicate
def create_filter_predicate(self): '''Creates a filter predicate. The list of available filters is given by calls to ``add_filter``, and the list of filters to use is given by parameters in ``params``. In this default implementation, multiple filters can be specified with the ``filter`` parameter. Each filter is initialized with the same set of query parameters given to the search engine. The returned function accepts a ``(content_id, FC)`` and returns ``True`` if and only if every selected predicate returns ``True`` on the same input. ''' assert self.query_content_id is not None, \ 'must call SearchEngine.set_query_id first' filter_names = self.query_params.getlist('filter') if len(filter_names) == 0 and 'already_labeled' in self._filters: filter_names = ['already_labeled'] init_filters = [(n, self._filters[n]) for n in filter_names] preds = [lambda _: True] for name, p in init_filters: preds.append(p.set_query_id(self.query_content_id) .set_query_params(self.query_params) .create_predicate()) return lambda (cid, fc): fc is not None and all(p((cid, fc)) for p in preds)
python
def create_filter_predicate(self): '''Creates a filter predicate. The list of available filters is given by calls to ``add_filter``, and the list of filters to use is given by parameters in ``params``. In this default implementation, multiple filters can be specified with the ``filter`` parameter. Each filter is initialized with the same set of query parameters given to the search engine. The returned function accepts a ``(content_id, FC)`` and returns ``True`` if and only if every selected predicate returns ``True`` on the same input. ''' assert self.query_content_id is not None, \ 'must call SearchEngine.set_query_id first' filter_names = self.query_params.getlist('filter') if len(filter_names) == 0 and 'already_labeled' in self._filters: filter_names = ['already_labeled'] init_filters = [(n, self._filters[n]) for n in filter_names] preds = [lambda _: True] for name, p in init_filters: preds.append(p.set_query_id(self.query_content_id) .set_query_params(self.query_params) .create_predicate()) return lambda (cid, fc): fc is not None and all(p((cid, fc)) for p in preds)
[ "def", "create_filter_predicate", "(", "self", ")", ":", "assert", "self", ".", "query_content_id", "is", "not", "None", ",", "'must call SearchEngine.set_query_id first'", "filter_names", "=", "self", ".", "query_params", ".", "getlist", "(", "'filter'", ")", "if", "len", "(", "filter_names", ")", "==", "0", "and", "'already_labeled'", "in", "self", ".", "_filters", ":", "filter_names", "=", "[", "'already_labeled'", "]", "init_filters", "=", "[", "(", "n", ",", "self", ".", "_filters", "[", "n", "]", ")", "for", "n", "in", "filter_names", "]", "preds", "=", "[", "lambda", "_", ":", "True", "]", "for", "name", ",", "p", "in", "init_filters", ":", "preds", ".", "append", "(", "p", ".", "set_query_id", "(", "self", ".", "query_content_id", ")", ".", "set_query_params", "(", "self", ".", "query_params", ")", ".", "create_predicate", "(", ")", ")", "return", "lambda", "(", "cid", ",", "fc", ")", ":", "fc", "is", "not", "None", "and", "all", "(", "p", "(", "(", "cid", ",", "fc", ")", ")", "for", "p", "in", "preds", ")" ]
Creates a filter predicate. The list of available filters is given by calls to ``add_filter``, and the list of filters to use is given by parameters in ``params``. In this default implementation, multiple filters can be specified with the ``filter`` parameter. Each filter is initialized with the same set of query parameters given to the search engine. The returned function accepts a ``(content_id, FC)`` and returns ``True`` if and only if every selected predicate returns ``True`` on the same input.
[ "Creates", "a", "filter", "predicate", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/interface.py#L268-L297
247,495
dossier/dossier.web
dossier/web/interface.py
SearchEngine.results
def results(self): '''Returns results as a JSON encodable Python value. This calls :meth:`SearchEngine.recommendations` and converts the results returned into JSON encodable values. Namely, feature collections are slimmed down to only features that are useful to an end-user. ''' results = self.recommendations() transformed = [] for t in results['results']: if len(t) == 2: cid, fc = t info = {} elif len(t) == 3: cid, fc, info = t else: bottle.abort(500, 'Invalid search result: "%r"' % t) result = info result['content_id'] = cid if not self.params['omit_fc']: result['fc'] = util.fc_to_json(fc) transformed.append(result) results['results'] = transformed return results
python
def results(self): '''Returns results as a JSON encodable Python value. This calls :meth:`SearchEngine.recommendations` and converts the results returned into JSON encodable values. Namely, feature collections are slimmed down to only features that are useful to an end-user. ''' results = self.recommendations() transformed = [] for t in results['results']: if len(t) == 2: cid, fc = t info = {} elif len(t) == 3: cid, fc, info = t else: bottle.abort(500, 'Invalid search result: "%r"' % t) result = info result['content_id'] = cid if not self.params['omit_fc']: result['fc'] = util.fc_to_json(fc) transformed.append(result) results['results'] = transformed return results
[ "def", "results", "(", "self", ")", ":", "results", "=", "self", ".", "recommendations", "(", ")", "transformed", "=", "[", "]", "for", "t", "in", "results", "[", "'results'", "]", ":", "if", "len", "(", "t", ")", "==", "2", ":", "cid", ",", "fc", "=", "t", "info", "=", "{", "}", "elif", "len", "(", "t", ")", "==", "3", ":", "cid", ",", "fc", ",", "info", "=", "t", "else", ":", "bottle", ".", "abort", "(", "500", ",", "'Invalid search result: \"%r\"'", "%", "t", ")", "result", "=", "info", "result", "[", "'content_id'", "]", "=", "cid", "if", "not", "self", ".", "params", "[", "'omit_fc'", "]", ":", "result", "[", "'fc'", "]", "=", "util", ".", "fc_to_json", "(", "fc", ")", "transformed", ".", "append", "(", "result", ")", "results", "[", "'results'", "]", "=", "transformed", "return", "results" ]
Returns results as a JSON encodable Python value. This calls :meth:`SearchEngine.recommendations` and converts the results returned into JSON encodable values. Namely, feature collections are slimmed down to only features that are useful to an end-user.
[ "Returns", "results", "as", "a", "JSON", "encodable", "Python", "value", "." ]
1cad1cce3c37d3a4e956abc710a2bc1afe16a092
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/interface.py#L310-L334
247,496
sternoru/goscalecms
goscale/plugins/pictures/models.py
Picasa._get_entry_link
def _get_entry_link(self, entry): """ Returns a unique link for an entry """ entry_link = None for link in entry.link: if '/data/' not in link.href and '/lh/' not in link.href: entry_link = link.href break return entry_link or entry.link[0].href
python
def _get_entry_link(self, entry): """ Returns a unique link for an entry """ entry_link = None for link in entry.link: if '/data/' not in link.href and '/lh/' not in link.href: entry_link = link.href break return entry_link or entry.link[0].href
[ "def", "_get_entry_link", "(", "self", ",", "entry", ")", ":", "entry_link", "=", "None", "for", "link", "in", "entry", ".", "link", ":", "if", "'/data/'", "not", "in", "link", ".", "href", "and", "'/lh/'", "not", "in", "link", ".", "href", ":", "entry_link", "=", "link", ".", "href", "break", "return", "entry_link", "or", "entry", ".", "link", "[", "0", "]", ".", "href" ]
Returns a unique link for an entry
[ "Returns", "a", "unique", "link", "for", "an", "entry" ]
7eee50357c47ebdfe3e573a8b4be3b67892d229e
https://github.com/sternoru/goscalecms/blob/7eee50357c47ebdfe3e573a8b4be3b67892d229e/goscale/plugins/pictures/models.py#L75-L83
247,497
KnowledgeLinks/rdfframework
rdfframework/rdfclass/rdfclass.py
find
def find(value): """ returns a dictionary of rdfclasses based on the a lowercase search args: value: the value to search by """ value = str(value).lower() rtn_dict = RegistryDictionary() for attr in dir(MODULE.rdfclass): if value in attr.lower(): try: item = getattr(MODULE.rdfclass, attr) if issubclass(item, RdfClassBase): rtn_dict[attr] = item except TypeError: pass return rtn_dict
python
def find(value): """ returns a dictionary of rdfclasses based on the a lowercase search args: value: the value to search by """ value = str(value).lower() rtn_dict = RegistryDictionary() for attr in dir(MODULE.rdfclass): if value in attr.lower(): try: item = getattr(MODULE.rdfclass, attr) if issubclass(item, RdfClassBase): rtn_dict[attr] = item except TypeError: pass return rtn_dict
[ "def", "find", "(", "value", ")", ":", "value", "=", "str", "(", "value", ")", ".", "lower", "(", ")", "rtn_dict", "=", "RegistryDictionary", "(", ")", "for", "attr", "in", "dir", "(", "MODULE", ".", "rdfclass", ")", ":", "if", "value", "in", "attr", ".", "lower", "(", ")", ":", "try", ":", "item", "=", "getattr", "(", "MODULE", ".", "rdfclass", ",", "attr", ")", "if", "issubclass", "(", "item", ",", "RdfClassBase", ")", ":", "rtn_dict", "[", "attr", "]", "=", "item", "except", "TypeError", ":", "pass", "return", "rtn_dict" ]
returns a dictionary of rdfclasses based on the a lowercase search args: value: the value to search by
[ "returns", "a", "dictionary", "of", "rdfclasses", "based", "on", "the", "a", "lowercase", "search" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/rdfclass.py#L32-L49
247,498
KnowledgeLinks/rdfframework
rdfframework/rdfclass/rdfclass.py
list_hierarchy
def list_hierarchy(class_name, bases): """ Creates a list of the class hierarchy Args: ----- class_name: name of the current class bases: list/tuple of bases for the current class """ class_list = [Uri(class_name)] for base in bases: if base.__name__ not in IGNORE_CLASSES: class_list.append(Uri(base.__name__)) return list([i for i in set(class_list)])
python
def list_hierarchy(class_name, bases): """ Creates a list of the class hierarchy Args: ----- class_name: name of the current class bases: list/tuple of bases for the current class """ class_list = [Uri(class_name)] for base in bases: if base.__name__ not in IGNORE_CLASSES: class_list.append(Uri(base.__name__)) return list([i for i in set(class_list)])
[ "def", "list_hierarchy", "(", "class_name", ",", "bases", ")", ":", "class_list", "=", "[", "Uri", "(", "class_name", ")", "]", "for", "base", "in", "bases", ":", "if", "base", ".", "__name__", "not", "in", "IGNORE_CLASSES", ":", "class_list", ".", "append", "(", "Uri", "(", "base", ".", "__name__", ")", ")", "return", "list", "(", "[", "i", "for", "i", "in", "set", "(", "class_list", ")", "]", ")" ]
Creates a list of the class hierarchy Args: ----- class_name: name of the current class bases: list/tuple of bases for the current class
[ "Creates", "a", "list", "of", "the", "class", "hierarchy" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/rdfclass.py#L705-L719
247,499
KnowledgeLinks/rdfframework
rdfframework/rdfclass/rdfclass.py
es_get_class_defs
def es_get_class_defs(cls_def, cls_name): """ Reads through the class defs and gets the related es class defintions Args: ----- class_defs: RdfDataset of class definitions """ rtn_dict = {key: value for key, value in cls_def.items() \ if key.startswith("kds_es")} for key in rtn_dict: del cls_def[key] return rtn_dict
python
def es_get_class_defs(cls_def, cls_name): """ Reads through the class defs and gets the related es class defintions Args: ----- class_defs: RdfDataset of class definitions """ rtn_dict = {key: value for key, value in cls_def.items() \ if key.startswith("kds_es")} for key in rtn_dict: del cls_def[key] return rtn_dict
[ "def", "es_get_class_defs", "(", "cls_def", ",", "cls_name", ")", ":", "rtn_dict", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "cls_def", ".", "items", "(", ")", "if", "key", ".", "startswith", "(", "\"kds_es\"", ")", "}", "for", "key", "in", "rtn_dict", ":", "del", "cls_def", "[", "key", "]", "return", "rtn_dict" ]
Reads through the class defs and gets the related es class defintions Args: ----- class_defs: RdfDataset of class definitions
[ "Reads", "through", "the", "class", "defs", "and", "gets", "the", "related", "es", "class", "defintions" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/rdfclass/rdfclass.py#L721-L734