nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
SUSE/DeepSea
9c7fad93915ba1250c40d50c855011e9fe41ed21
srv/modules/runners/net.py
python
_summarize_iperf
(results)
return server_results
iperf summarize the successes, failures and errors across all minions
iperf summarize the successes, failures and errors across all minions
[ "iperf", "summarize", "the", "successes", "failures", "and", "errors", "across", "all", "minions" ]
def _summarize_iperf(results): """ iperf summarize the successes, failures and errors across all minions """ server_results = {} log.debug("Results {} ".format(results)) for result in results: for host in result: log.debug("Server {}".format(result[host]['server'])) if not result[host]['server'] in server_results: server_results.update({result[host]['server']: ""}) if result[host]['succeeded']: log.debug("filter:\n{}".format(result[host]['filter'])) server_results[result[host]['server']] += " " + result[host]['filter'] log.debug("Speed {}".format(server_results[result[host]['server']])) elif result[host]['failed']: log.debug("failed:\n{}".format(result[host]['failed'])) server_results[result[host]['server']] += " Failed to connect from {}".format(host) elif result[host]['errored']: log.debug("errored :\n{}".format(result[host]['errored'])) server_results[result[host]['server']] += " {} iperf error check installation.".format(host) for key, result in six.iteritems(server_results): total = 0 speed = result.split('Mbits/sec') speed = [_f for _f in speed if _f] try: for value in speed: total += float(value.strip()) # server_results[key] = str(total) + " Mbits/sec" server_results[key] = int(total) except ValueError: continue return server_results
[ "def", "_summarize_iperf", "(", "results", ")", ":", "server_results", "=", "{", "}", "log", ".", "debug", "(", "\"Results {} \"", ".", "format", "(", "results", ")", ")", "for", "result", "in", "results", ":", "for", "host", "in", "result", ":", "log", ".", "debug", "(", "\"Server {}\"", ".", "format", "(", "result", "[", "host", "]", "[", "'server'", "]", ")", ")", "if", "not", "result", "[", "host", "]", "[", "'server'", "]", "in", "server_results", ":", "server_results", ".", "update", "(", "{", "result", "[", "host", "]", "[", "'server'", "]", ":", "\"\"", "}", ")", "if", "result", "[", "host", "]", "[", "'succeeded'", "]", ":", "log", ".", "debug", "(", "\"filter:\\n{}\"", ".", "format", "(", "result", "[", "host", "]", "[", "'filter'", "]", ")", ")", "server_results", "[", "result", "[", "host", "]", "[", "'server'", "]", "]", "+=", "\" \"", "+", "result", "[", "host", "]", "[", "'filter'", "]", "log", ".", "debug", "(", "\"Speed {}\"", ".", "format", "(", "server_results", "[", "result", "[", "host", "]", "[", "'server'", "]", "]", ")", ")", "elif", "result", "[", "host", "]", "[", "'failed'", "]", ":", "log", ".", "debug", "(", "\"failed:\\n{}\"", ".", "format", "(", "result", "[", "host", "]", "[", "'failed'", "]", ")", ")", "server_results", "[", "result", "[", "host", "]", "[", "'server'", "]", "]", "+=", "\" Failed to connect from {}\"", ".", "format", "(", "host", ")", "elif", "result", "[", "host", "]", "[", "'errored'", "]", ":", "log", ".", "debug", "(", "\"errored :\\n{}\"", ".", "format", "(", "result", "[", "host", "]", "[", "'errored'", "]", ")", ")", "server_results", "[", "result", "[", "host", "]", "[", "'server'", "]", "]", "+=", "\" {} iperf error check installation.\"", ".", "format", "(", "host", ")", "for", "key", ",", "result", "in", "six", ".", "iteritems", "(", "server_results", ")", ":", "total", "=", "0", "speed", "=", "result", ".", "split", "(", "'Mbits/sec'", ")", "speed", "=", "[", "_f", "for", "_f", "in", "speed", "if", "_f", "]", "try", ":", "for", "value", "in", "speed", ":", "total", "+=", "float", "(", "value", ".", "strip", "(", ")", ")", "# server_results[key] = str(total) + \" Mbits/sec\"", "server_results", "[", "key", "]", "=", "int", "(", "total", ")", "except", "ValueError", ":", "continue", "return", "server_results" ]
https://github.com/SUSE/DeepSea/blob/9c7fad93915ba1250c40d50c855011e9fe41ed21/srv/modules/runners/net.py#L601-L634
pyansys/pymapdl
c07291fc062b359abf0e92b95a92d753a95ef3d7
ansys/mapdl/core/xpl.py
python
ansXpl.help
(self)
return self._mapdl.run("*XPL,HELP")
XPL help message Examples -------- >>> print(xpl.help())
XPL help message
[ "XPL", "help", "message" ]
def help(self): """XPL help message Examples -------- >>> print(xpl.help()) """ return self._mapdl.run("*XPL,HELP")
[ "def", "help", "(", "self", ")", ":", "return", "self", ".", "_mapdl", ".", "run", "(", "\"*XPL,HELP\"", ")" ]
https://github.com/pyansys/pymapdl/blob/c07291fc062b359abf0e92b95a92d753a95ef3d7/ansys/mapdl/core/xpl.py#L101-L108
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python3-alpha/python-libs/pyxmpp2/roster.py
python
Roster.get_items_by_name
(self, name, case_sensitive = True)
return result
Return a list of items with given name. :Parameters: - `name`: name to look-up - `case_sensitive`: if `False` the matching will be case insensitive. :Types: - `name`: `str` - `case_sensitive`: `bool` :Returntype: `list` of `RosterItem`
Return a list of items with given name.
[ "Return", "a", "list", "of", "items", "with", "given", "name", "." ]
def get_items_by_name(self, name, case_sensitive = True): """ Return a list of items with given name. :Parameters: - `name`: name to look-up - `case_sensitive`: if `False` the matching will be case insensitive. :Types: - `name`: `str` - `case_sensitive`: `bool` :Returntype: `list` of `RosterItem` """ if not case_sensitive and name: name = name.lower() result = [] for item in self._items: if item.name == name: result.append(item) elif item.name is None: continue elif not case_sensitive and item.name.lower() == name: result.append(item) return result
[ "def", "get_items_by_name", "(", "self", ",", "name", ",", "case_sensitive", "=", "True", ")", ":", "if", "not", "case_sensitive", "and", "name", ":", "name", "=", "name", ".", "lower", "(", ")", "result", "=", "[", "]", "for", "item", "in", "self", ".", "_items", ":", "if", "item", ".", "name", "==", "name", ":", "result", ".", "append", "(", "item", ")", "elif", "item", ".", "name", "is", "None", ":", "continue", "elif", "not", "case_sensitive", "and", "item", ".", "name", ".", "lower", "(", ")", "==", "name", ":", "result", ".", "append", "(", "item", ")", "return", "result" ]
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python3-alpha/python-libs/pyxmpp2/roster.py#L550-L574
lightforever/mlcomp
c78fdb77ec9c4ec8ff11beea50b90cab20903ad9
mlcomp/db/report_info/info.py
python
ReportLayoutInfo._get_metric
(self)
return ReportLayoutMetric.from_dict(self.data['metric'])
[]
def _get_metric(self) -> ReportLayoutMetric: return ReportLayoutMetric.from_dict(self.data['metric'])
[ "def", "_get_metric", "(", "self", ")", "->", "ReportLayoutMetric", ":", "return", "ReportLayoutMetric", ".", "from_dict", "(", "self", ".", "data", "[", "'metric'", "]", ")" ]
https://github.com/lightforever/mlcomp/blob/c78fdb77ec9c4ec8ff11beea50b90cab20903ad9/mlcomp/db/report_info/info.py#L104-L105
caiiiac/Machine-Learning-with-Python
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
MachineLearning/venv/lib/python3.5/site-packages/sklearn/mixture/gmm.py
python
_GMMBase._set_covars
(self, covars)
Provide values for covariance.
Provide values for covariance.
[ "Provide", "values", "for", "covariance", "." ]
def _set_covars(self, covars): """Provide values for covariance.""" covars = np.asarray(covars) _validate_covars(covars, self.covariance_type, self.n_components) self.covars_ = covars
[ "def", "_set_covars", "(", "self", ",", "covars", ")", ":", "covars", "=", "np", ".", "asarray", "(", "covars", ")", "_validate_covars", "(", "covars", ",", "self", ".", "covariance_type", ",", "self", ".", "n_components", ")", "self", ".", "covars_", "=", "covars" ]
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/sklearn/mixture/gmm.py#L296-L300
tensorflow/ranking
94cccec8b4e71d2cc4489c61e2623522738c2924
tensorflow_ranking/python/head.py
python
_RankingHead._labels_and_logits_metrics
(self, labels, logits)
return metrics_dict
Returns metrics for labels and logits.
Returns metrics for labels and logits.
[ "Returns", "metrics", "for", "labels", "and", "logits", "." ]
def _labels_and_logits_metrics(self, labels, logits): """Returns metrics for labels and logits.""" is_label_valid = tf.reshape(tf.greater_equal(labels, 0.), [-1]) metrics_dict = {} for name, tensor in [('labels_mean', labels), ('logits_mean', logits)]: metrics_dict[name] = tf.compat.v1.metrics.mean( tf.boolean_mask(tensor=tf.reshape(tensor, [-1]), mask=is_label_valid)) return metrics_dict
[ "def", "_labels_and_logits_metrics", "(", "self", ",", "labels", ",", "logits", ")", ":", "is_label_valid", "=", "tf", ".", "reshape", "(", "tf", ".", "greater_equal", "(", "labels", ",", "0.", ")", ",", "[", "-", "1", "]", ")", "metrics_dict", "=", "{", "}", "for", "name", ",", "tensor", "in", "[", "(", "'labels_mean'", ",", "labels", ")", ",", "(", "'logits_mean'", ",", "logits", ")", "]", ":", "metrics_dict", "[", "name", "]", "=", "tf", ".", "compat", ".", "v1", ".", "metrics", ".", "mean", "(", "tf", ".", "boolean_mask", "(", "tensor", "=", "tf", ".", "reshape", "(", "tensor", ",", "[", "-", "1", "]", ")", ",", "mask", "=", "is_label_valid", ")", ")", "return", "metrics_dict" ]
https://github.com/tensorflow/ranking/blob/94cccec8b4e71d2cc4489c61e2623522738c2924/tensorflow_ranking/python/head.py#L208-L215
absolute-quantum/cats-blender-plugin
725be9bf7dcd0c6c974ca7662f9255a9d4c3795a
extern_tools/mmd_tools_local/bpyutils.py
python
select_object
(obj, objects=[])
return __SelectObjects(obj, objects)
Select objects. It is recommended to use 'select_object' with 'with' statement like the following code. This function can select "hidden" objects safely. with select_object(obj): some functions...
Select objects.
[ "Select", "objects", "." ]
def select_object(obj, objects=[]): """ Select objects. It is recommended to use 'select_object' with 'with' statement like the following code. This function can select "hidden" objects safely. with select_object(obj): some functions... """ return __SelectObjects(obj, objects)
[ "def", "select_object", "(", "obj", ",", "objects", "=", "[", "]", ")", ":", "return", "__SelectObjects", "(", "obj", ",", "objects", ")" ]
https://github.com/absolute-quantum/cats-blender-plugin/blob/725be9bf7dcd0c6c974ca7662f9255a9d4c3795a/extern_tools/mmd_tools_local/bpyutils.py#L96-L105
galaxyproject/galaxy
4c03520f05062e0f4a1b3655dc0b7452fda69943
lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
python
ToolShedRepositoriesController.uninstall_repository
(self, trans, id=None, **kwd)
DELETE /api/tool_shed_repositories/id DELETE /api/tool_shed_repositories/ :param id: encoded repository id. Either id or name, owner, changeset_revision and tool_shed_url need to be supplied :param kwd: 'remove_from_disk': Remove repository from disk or deactivate repository. Defaults to `True` (= remove repository from disk). 'name': Repository name 'owner': Repository owner 'changeset_revision': Changeset revision to uninstall 'tool_shed_url': Tool Shed URL
DELETE /api/tool_shed_repositories/id DELETE /api/tool_shed_repositories/
[ "DELETE", "/", "api", "/", "tool_shed_repositories", "/", "id", "DELETE", "/", "api", "/", "tool_shed_repositories", "/" ]
def uninstall_repository(self, trans, id=None, **kwd): """ DELETE /api/tool_shed_repositories/id DELETE /api/tool_shed_repositories/ :param id: encoded repository id. Either id or name, owner, changeset_revision and tool_shed_url need to be supplied :param kwd: 'remove_from_disk': Remove repository from disk or deactivate repository. Defaults to `True` (= remove repository from disk). 'name': Repository name 'owner': Repository owner 'changeset_revision': Changeset revision to uninstall 'tool_shed_url': Tool Shed URL """ remove_from_disk = util.asbool(kwd.get('remove_from_disk', True)) if id: try: repository = get_tool_shed_repository_by_id(self.app, id) except ValueError: raise HTTPBadRequest(detail=f"No repository with id '{id}' found") else: tsr_arguments = ['name', 'owner', 'changeset_revision', 'tool_shed_url'] try: tsr_arguments = {key: kwd[key] for key in tsr_arguments} except KeyError as e: raise HTTPBadRequest(detail=f"Missing required parameter '{e.args[0]}'") repository = get_installed_repository(app=self.app, tool_shed=tsr_arguments['tool_shed_url'], name=tsr_arguments['name'], owner=tsr_arguments['owner'], changeset_revision=tsr_arguments['changeset_revision']) if not repository: raise HTTPBadRequest(detail="Repository not found") irm = InstalledRepositoryManager(app=self.app) errors = irm.uninstall_repository(repository=repository, remove_from_disk=remove_from_disk) if not errors: action = 'removed' if remove_from_disk else 'deactivated' return {'message': f'The repository named {repository.name} has been {action}.'} else: raise Exception(f'Attempting to uninstall tool dependencies for repository named {repository.name} resulted in errors: {errors}')
[ "def", "uninstall_repository", "(", "self", ",", "trans", ",", "id", "=", "None", ",", "*", "*", "kwd", ")", ":", "remove_from_disk", "=", "util", ".", "asbool", "(", "kwd", ".", "get", "(", "'remove_from_disk'", ",", "True", ")", ")", "if", "id", ":", "try", ":", "repository", "=", "get_tool_shed_repository_by_id", "(", "self", ".", "app", ",", "id", ")", "except", "ValueError", ":", "raise", "HTTPBadRequest", "(", "detail", "=", "f\"No repository with id '{id}' found\"", ")", "else", ":", "tsr_arguments", "=", "[", "'name'", ",", "'owner'", ",", "'changeset_revision'", ",", "'tool_shed_url'", "]", "try", ":", "tsr_arguments", "=", "{", "key", ":", "kwd", "[", "key", "]", "for", "key", "in", "tsr_arguments", "}", "except", "KeyError", "as", "e", ":", "raise", "HTTPBadRequest", "(", "detail", "=", "f\"Missing required parameter '{e.args[0]}'\"", ")", "repository", "=", "get_installed_repository", "(", "app", "=", "self", ".", "app", ",", "tool_shed", "=", "tsr_arguments", "[", "'tool_shed_url'", "]", ",", "name", "=", "tsr_arguments", "[", "'name'", "]", ",", "owner", "=", "tsr_arguments", "[", "'owner'", "]", ",", "changeset_revision", "=", "tsr_arguments", "[", "'changeset_revision'", "]", ")", "if", "not", "repository", ":", "raise", "HTTPBadRequest", "(", "detail", "=", "\"Repository not found\"", ")", "irm", "=", "InstalledRepositoryManager", "(", "app", "=", "self", ".", "app", ")", "errors", "=", "irm", ".", "uninstall_repository", "(", "repository", "=", "repository", ",", "remove_from_disk", "=", "remove_from_disk", ")", "if", "not", "errors", ":", "action", "=", "'removed'", "if", "remove_from_disk", "else", "'deactivated'", "return", "{", "'message'", ":", "f'The repository named {repository.name} has been {action}.'", "}", "else", ":", "raise", "Exception", "(", "f'Attempting to uninstall tool dependencies for repository named {repository.name} resulted in errors: {errors}'", ")" ]
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py#L271-L311
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/paho/mqtt/client.py
python
Client.on_message
(self, func)
Define the message received callback implementation. Expected signature is: on_message_callback(client, userdata, message) client: the client instance for this callback userdata: the private user data as set in Client() or userdata_set() message: an instance of MQTTMessage. This is a class with members topic, payload, qos, retain. Decorator: @client.message_callback() (```client``` is the name of the instance which this callback is being attached to)
Define the message received callback implementation.
[ "Define", "the", "message", "received", "callback", "implementation", "." ]
def on_message(self, func): """ Define the message received callback implementation. Expected signature is: on_message_callback(client, userdata, message) client: the client instance for this callback userdata: the private user data as set in Client() or userdata_set() message: an instance of MQTTMessage. This is a class with members topic, payload, qos, retain. Decorator: @client.message_callback() (```client``` is the name of the instance which this callback is being attached to) """ with self._callback_mutex: self._on_message = func
[ "def", "on_message", "(", "self", ",", "func", ")", ":", "with", "self", ".", "_callback_mutex", ":", "self", ".", "_on_message", "=", "func" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/paho/mqtt/client.py#L1982-L1998
Komodo/KomodoEdit
61edab75dce2bdb03943b387b0608ea36f548e8e
src/codeintel/play/scrap.py
python
_PriorityQueue.remove_id
(self, id)
Remove all current requests with the given id. Does not return anything.
Remove all current requests with the given id.
[ "Remove", "all", "current", "requests", "with", "the", "given", "id", "." ]
def remove_id(self, id): """Remove all current requests with the given id. Does not return anything. """ log.debug("in _PriorityQueue.remove_id, acquiring esema") if not self.esema.acquire(0): # do not block to acquire lock # return if could not acquire: means queue is empty and # therefore do not have any items to remove log.debug("in _PriorityQueue.remove_id, did not acquire esema") return log.debug("in _PriorityQueue.remove_id, acquired mutex") log.debug("in _PriorityQueue.remove_id, acquiring mutex") self.mutex.acquire() release_esema = 1 try: self._remove_id(id) # Failure means empty state also unchanged - release_esema # remains true. release_esema = not self._empty() finally: if release_esema: log.debug("in _PriorityQueue.remove_id, releasing esema") self.esema.release() log.debug("in _PriorityQueue.remove_id, releasing mutex") self.mutex.release()
[ "def", "remove_id", "(", "self", ",", "id", ")", ":", "log", ".", "debug", "(", "\"in _PriorityQueue.remove_id, acquiring esema\"", ")", "if", "not", "self", ".", "esema", ".", "acquire", "(", "0", ")", ":", "# do not block to acquire lock", "# return if could not acquire: means queue is empty and", "# therefore do not have any items to remove", "log", ".", "debug", "(", "\"in _PriorityQueue.remove_id, did not acquire esema\"", ")", "return", "log", ".", "debug", "(", "\"in _PriorityQueue.remove_id, acquired mutex\"", ")", "log", ".", "debug", "(", "\"in _PriorityQueue.remove_id, acquiring mutex\"", ")", "self", ".", "mutex", ".", "acquire", "(", ")", "release_esema", "=", "1", "try", ":", "self", ".", "_remove_id", "(", "id", ")", "# Failure means empty state also unchanged - release_esema", "# remains true.", "release_esema", "=", "not", "self", ".", "_empty", "(", ")", "finally", ":", "if", "release_esema", ":", "log", ".", "debug", "(", "\"in _PriorityQueue.remove_id, releasing esema\"", ")", "self", ".", "esema", ".", "release", "(", ")", "log", ".", "debug", "(", "\"in _PriorityQueue.remove_id, releasing mutex\"", ")", "self", ".", "mutex", ".", "release", "(", ")" ]
https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/codeintel/play/scrap.py#L76-L101
open-mmlab/mmskeleton
b4c076baa9e02e69b5876c49fa7c509866d902c7
tools/publish_model.py
python
process_checkpoint
(in_file, out_file)
[]
def process_checkpoint(in_file, out_file): checkpoint = torch.load(in_file, map_location='cpu') # remove optimizer for smaller file size if 'optimizer' in checkpoint: del checkpoint['optimizer'] # if it is necessary to remove some sensitive data in checkpoint['meta'], # add the code here. torch.save(checkpoint, out_file) sha = subprocess.check_output(['sha256sum', out_file]).decode() final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) subprocess.Popen(['mv', out_file, final_file])
[ "def", "process_checkpoint", "(", "in_file", ",", "out_file", ")", ":", "checkpoint", "=", "torch", ".", "load", "(", "in_file", ",", "map_location", "=", "'cpu'", ")", "# remove optimizer for smaller file size", "if", "'optimizer'", "in", "checkpoint", ":", "del", "checkpoint", "[", "'optimizer'", "]", "# if it is necessary to remove some sensitive data in checkpoint['meta'],", "# add the code here.", "torch", ".", "save", "(", "checkpoint", ",", "out_file", ")", "sha", "=", "subprocess", ".", "check_output", "(", "[", "'sha256sum'", ",", "out_file", "]", ")", ".", "decode", "(", ")", "final_file", "=", "out_file", ".", "rstrip", "(", "'.pth'", ")", "+", "'-{}.pth'", ".", "format", "(", "sha", "[", ":", "8", "]", ")", "subprocess", ".", "Popen", "(", "[", "'mv'", ",", "out_file", ",", "final_file", "]", ")" ]
https://github.com/open-mmlab/mmskeleton/blob/b4c076baa9e02e69b5876c49fa7c509866d902c7/tools/publish_model.py#L16-L26
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/views.py
python
runCmd
(request)
return HttpResponse('{"TrackMark":%s, "TotalNum":%s}' %(track_mark, task_num))
[]
def runCmd(request): track_mark = MultiRunCounter.AddNumber() user_input = request.POST['command'] user_account = request.POST['UserName'] iplists = request.POST['IPLists'].split(',') task_num = len(set(iplists)) print "user input command is: %s and username is:%s and iplists are: %s" %(user_input,user_account,' '.join(iplists)) cmd = "python %s/TriAquae/backend/multiprocessing_runCMD2.py %s '%s' '%s' %s &" % (tri_config.Working_dir,track_mark,' '.join(iplists),user_input,user_account) os.system(cmd) return HttpResponse('{"TrackMark":%s, "TotalNum":%s}' %(track_mark, task_num))
[ "def", "runCmd", "(", "request", ")", ":", "track_mark", "=", "MultiRunCounter", ".", "AddNumber", "(", ")", "user_input", "=", "request", ".", "POST", "[", "'command'", "]", "user_account", "=", "request", ".", "POST", "[", "'UserName'", "]", "iplists", "=", "request", ".", "POST", "[", "'IPLists'", "]", ".", "split", "(", "','", ")", "task_num", "=", "len", "(", "set", "(", "iplists", ")", ")", "print", "\"user input command is: %s and username is:%s and iplists are: %s\"", "%", "(", "user_input", ",", "user_account", ",", "' '", ".", "join", "(", "iplists", ")", ")", "cmd", "=", "\"python %s/TriAquae/backend/multiprocessing_runCMD2.py %s '%s' '%s' %s &\"", "%", "(", "tri_config", ".", "Working_dir", ",", "track_mark", ",", "' '", ".", "join", "(", "iplists", ")", ",", "user_input", ",", "user_account", ")", "os", ".", "system", "(", "cmd", ")", "return", "HttpResponse", "(", "'{\"TrackMark\":%s, \"TotalNum\":%s}'", "%", "(", "track_mark", ",", "task_num", ")", ")" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/views.py#L252-L262
jinfagang/alfred
dd7420d1410f82f9dadf07a30b6fad5a71168001
alfred/utils/file_io.py
python
PathManager.isfile
(path: str)
return PathManager.__get_path_handler(path)._isfile(path)
Checks if there the resource at the given URI is a file. Args: path (str): A URI supported by this PathHandler Returns: bool: true if the path is a file
Checks if there the resource at the given URI is a file.
[ "Checks", "if", "there", "the", "resource", "at", "the", "given", "URI", "is", "a", "file", "." ]
def isfile(path: str) -> bool: """ Checks if there the resource at the given URI is a file. Args: path (str): A URI supported by this PathHandler Returns: bool: true if the path is a file """ return PathManager.__get_path_handler(path)._isfile(path)
[ "def", "isfile", "(", "path", ":", "str", ")", "->", "bool", ":", "return", "PathManager", ".", "__get_path_handler", "(", "path", ")", ".", "_isfile", "(", "path", ")" ]
https://github.com/jinfagang/alfred/blob/dd7420d1410f82f9dadf07a30b6fad5a71168001/alfred/utils/file_io.py#L469-L479
daler/pybedtools
ffe0d4bd2f32a0a5fc0cea049ee73773c4d57573
pybedtools/bedtool.py
python
BedTool.check_genome
(self, **kwargs)
return kwargs
Handles the different ways of specifying a genome in kwargs: g='genome.file' specifies a file directly genome='dm3' gets the file from genome registry self.chromsizes could be a dict.\
Handles the different ways of specifying a genome in kwargs:
[ "Handles", "the", "different", "ways", "of", "specifying", "a", "genome", "in", "kwargs", ":" ]
def check_genome(self, **kwargs): """ Handles the different ways of specifying a genome in kwargs: g='genome.file' specifies a file directly genome='dm3' gets the file from genome registry self.chromsizes could be a dict.\ """ # If both g and genome are missing, assume self.chromsizes if ("g" not in kwargs) and ("genome" not in kwargs): if hasattr(self, "chromsizes"): kwargs["g"] = self.chromsizes else: raise ValueError( 'No genome specified. Use the "g" or ' '"genome" kwargs, or use the ' ".set_chromsizes() method" ) # If both specified, rather than make an implicit decision, raise an # exception if "g" in kwargs and "genome" in kwargs: raise ValueError('Cannot specify both "g" and "genome"') # Something like genome='dm3' was specified if "g" not in kwargs and "genome" in kwargs: if isinstance(kwargs["genome"], dict): genome_dict = kwargs["genome"] else: genome_dict = pybedtools.chromsizes(kwargs["genome"]) genome_file = pybedtools.chromsizes_to_file(genome_dict) kwargs["g"] = genome_file del kwargs["genome"] # By the time we get here, 'g' is specified. # If a dict was provided, convert to tempfile here if isinstance(kwargs["g"], dict): kwargs["g"] = pybedtools.chromsizes_to_file(kwargs["g"]) if not os.path.exists(kwargs["g"]): msg = 'Genome file "%s" does not exist' % (kwargs["g"]) if six.PY2: raise ValueError(msg) raise FileNotFoundError(msg) return kwargs
[ "def", "check_genome", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# If both g and genome are missing, assume self.chromsizes", "if", "(", "\"g\"", "not", "in", "kwargs", ")", "and", "(", "\"genome\"", "not", "in", "kwargs", ")", ":", "if", "hasattr", "(", "self", ",", "\"chromsizes\"", ")", ":", "kwargs", "[", "\"g\"", "]", "=", "self", ".", "chromsizes", "else", ":", "raise", "ValueError", "(", "'No genome specified. Use the \"g\" or '", "'\"genome\" kwargs, or use the '", "\".set_chromsizes() method\"", ")", "# If both specified, rather than make an implicit decision, raise an", "# exception", "if", "\"g\"", "in", "kwargs", "and", "\"genome\"", "in", "kwargs", ":", "raise", "ValueError", "(", "'Cannot specify both \"g\" and \"genome\"'", ")", "# Something like genome='dm3' was specified", "if", "\"g\"", "not", "in", "kwargs", "and", "\"genome\"", "in", "kwargs", ":", "if", "isinstance", "(", "kwargs", "[", "\"genome\"", "]", ",", "dict", ")", ":", "genome_dict", "=", "kwargs", "[", "\"genome\"", "]", "else", ":", "genome_dict", "=", "pybedtools", ".", "chromsizes", "(", "kwargs", "[", "\"genome\"", "]", ")", "genome_file", "=", "pybedtools", ".", "chromsizes_to_file", "(", "genome_dict", ")", "kwargs", "[", "\"g\"", "]", "=", "genome_file", "del", "kwargs", "[", "\"genome\"", "]", "# By the time we get here, 'g' is specified.", "# If a dict was provided, convert to tempfile here", "if", "isinstance", "(", "kwargs", "[", "\"g\"", "]", ",", "dict", ")", ":", "kwargs", "[", "\"g\"", "]", "=", "pybedtools", ".", "chromsizes_to_file", "(", "kwargs", "[", "\"g\"", "]", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "kwargs", "[", "\"g\"", "]", ")", ":", "msg", "=", "'Genome file \"%s\" does not exist'", "%", "(", "kwargs", "[", "\"g\"", "]", ")", "if", "six", ".", "PY2", ":", "raise", "ValueError", "(", "msg", ")", "raise", "FileNotFoundError", "(", "msg", ")", "return", "kwargs" ]
https://github.com/daler/pybedtools/blob/ffe0d4bd2f32a0a5fc0cea049ee73773c4d57573/pybedtools/bedtool.py#L1582-L1629
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/StringIO.py
python
StringIO.close
(self)
Free the memory buffer.
Free the memory buffer.
[ "Free", "the", "memory", "buffer", "." ]
def close(self): """Free the memory buffer. """ if not self.closed: self.closed = True del self.buf, self.pos
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "self", ".", "closed", "=", "True", "del", "self", ".", "buf", ",", "self", ".", "pos" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/StringIO.py#L81-L86
jina-ai/jina
c77a492fcd5adba0fc3de5347bea83dd4e7d8087
jina/peapods/networking.py
python
K8sGrpcConnectionPool._fetch_initial_state
(self)
[]
async def _fetch_initial_state(self): namespaced_pods = self._k8s_client.list_namespaced_pod(self._namespace) for item in namespaced_pods.items: await self._process_item(item)
[ "async", "def", "_fetch_initial_state", "(", "self", ")", ":", "namespaced_pods", "=", "self", ".", "_k8s_client", ".", "list_namespaced_pod", "(", "self", ".", "_namespace", ")", "for", "item", "in", "namespaced_pods", ".", "items", ":", "await", "self", ".", "_process_item", "(", "item", ")" ]
https://github.com/jina-ai/jina/blob/c77a492fcd5adba0fc3de5347bea83dd4e7d8087/jina/peapods/networking.py#L732-L735
python/cpython
e13cdca0f5224ec4e23bdd04bb3120506964bc8b
Lib/lib2to3/patcomp.py
python
PatternCompiler.compile_node
(self, node)
return pattern.optimize()
Compiles a node, recursively. This is one big switch on the node type.
Compiles a node, recursively.
[ "Compiles", "a", "node", "recursively", "." ]
def compile_node(self, node): """Compiles a node, recursively. This is one big switch on the node type. """ # XXX Optimize certain Wildcard-containing-Wildcard patterns # that can be merged if node.type == self.syms.Matcher: node = node.children[0] # Avoid unneeded recursion if node.type == self.syms.Alternatives: # Skip the odd children since they are just '|' tokens alts = [self.compile_node(ch) for ch in node.children[::2]] if len(alts) == 1: return alts[0] p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) return p.optimize() if node.type == self.syms.Alternative: units = [self.compile_node(ch) for ch in node.children] if len(units) == 1: return units[0] p = pytree.WildcardPattern([units], min=1, max=1) return p.optimize() if node.type == self.syms.NegatedUnit: pattern = self.compile_basic(node.children[1:]) p = pytree.NegatedPattern(pattern) return p.optimize() assert node.type == self.syms.Unit name = None nodes = node.children if len(nodes) >= 3 and nodes[1].type == token.EQUAL: name = nodes[0].value nodes = nodes[2:] repeat = None if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: repeat = nodes[-1] nodes = nodes[:-1] # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] pattern = self.compile_basic(nodes, repeat) if repeat is not None: assert repeat.type == self.syms.Repeater children = repeat.children child = children[0] if child.type == token.STAR: min = 0 max = pytree.HUGE elif child.type == token.PLUS: min = 1 max = pytree.HUGE elif child.type == token.LBRACE: assert children[-1].type == token.RBRACE assert len(children) in (3, 5) min = max = self.get_int(children[1]) if len(children) == 5: max = self.get_int(children[3]) else: assert False if min != 1 or max != 1: pattern = pattern.optimize() pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) if name is not None: pattern.name = name return pattern.optimize()
[ "def", "compile_node", "(", "self", ",", "node", ")", ":", "# XXX Optimize certain Wildcard-containing-Wildcard patterns", "# that can be merged", "if", "node", ".", "type", "==", "self", ".", "syms", ".", "Matcher", ":", "node", "=", "node", ".", "children", "[", "0", "]", "# Avoid unneeded recursion", "if", "node", ".", "type", "==", "self", ".", "syms", ".", "Alternatives", ":", "# Skip the odd children since they are just '|' tokens", "alts", "=", "[", "self", ".", "compile_node", "(", "ch", ")", "for", "ch", "in", "node", ".", "children", "[", ":", ":", "2", "]", "]", "if", "len", "(", "alts", ")", "==", "1", ":", "return", "alts", "[", "0", "]", "p", "=", "pytree", ".", "WildcardPattern", "(", "[", "[", "a", "]", "for", "a", "in", "alts", "]", ",", "min", "=", "1", ",", "max", "=", "1", ")", "return", "p", ".", "optimize", "(", ")", "if", "node", ".", "type", "==", "self", ".", "syms", ".", "Alternative", ":", "units", "=", "[", "self", ".", "compile_node", "(", "ch", ")", "for", "ch", "in", "node", ".", "children", "]", "if", "len", "(", "units", ")", "==", "1", ":", "return", "units", "[", "0", "]", "p", "=", "pytree", ".", "WildcardPattern", "(", "[", "units", "]", ",", "min", "=", "1", ",", "max", "=", "1", ")", "return", "p", ".", "optimize", "(", ")", "if", "node", ".", "type", "==", "self", ".", "syms", ".", "NegatedUnit", ":", "pattern", "=", "self", ".", "compile_basic", "(", "node", ".", "children", "[", "1", ":", "]", ")", "p", "=", "pytree", ".", "NegatedPattern", "(", "pattern", ")", "return", "p", ".", "optimize", "(", ")", "assert", "node", ".", "type", "==", "self", ".", "syms", ".", "Unit", "name", "=", "None", "nodes", "=", "node", ".", "children", "if", "len", "(", "nodes", ")", ">=", "3", "and", "nodes", "[", "1", "]", ".", "type", "==", "token", ".", "EQUAL", ":", "name", "=", "nodes", "[", "0", "]", ".", "value", "nodes", "=", "nodes", "[", "2", ":", "]", "repeat", "=", "None", "if", "len", "(", "nodes", ")", ">=", "2", "and", "nodes", "[", "-", "1", "]", ".", "type", "==", "self", ".", "syms", ".", "Repeater", ":", "repeat", "=", "nodes", "[", "-", "1", "]", "nodes", "=", "nodes", "[", ":", "-", "1", "]", "# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]", "pattern", "=", "self", ".", "compile_basic", "(", "nodes", ",", "repeat", ")", "if", "repeat", "is", "not", "None", ":", "assert", "repeat", ".", "type", "==", "self", ".", "syms", ".", "Repeater", "children", "=", "repeat", ".", "children", "child", "=", "children", "[", "0", "]", "if", "child", ".", "type", "==", "token", ".", "STAR", ":", "min", "=", "0", "max", "=", "pytree", ".", "HUGE", "elif", "child", ".", "type", "==", "token", ".", "PLUS", ":", "min", "=", "1", "max", "=", "pytree", ".", "HUGE", "elif", "child", ".", "type", "==", "token", ".", "LBRACE", ":", "assert", "children", "[", "-", "1", "]", ".", "type", "==", "token", ".", "RBRACE", "assert", "len", "(", "children", ")", "in", "(", "3", ",", "5", ")", "min", "=", "max", "=", "self", ".", "get_int", "(", "children", "[", "1", "]", ")", "if", "len", "(", "children", ")", "==", "5", ":", "max", "=", "self", ".", "get_int", "(", "children", "[", "3", "]", ")", "else", ":", "assert", "False", "if", "min", "!=", "1", "or", "max", "!=", "1", ":", "pattern", "=", "pattern", ".", "optimize", "(", ")", "pattern", "=", "pytree", ".", "WildcardPattern", "(", "[", "[", "pattern", "]", "]", ",", "min", "=", "min", ",", "max", "=", "max", ")", "if", "name", "is", "not", "None", ":", "pattern", ".", "name", "=", "name", "return", "pattern", ".", "optimize", "(", ")" ]
https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/lib2to3/patcomp.py#L67-L136
Ekultek/WhatWaf
b14e866b7537e9f81581f3b7e20dd5ed5e4dafc0
lib/settings.py
python
auto_assign
(url, ssl=False)
check if a protocol is given in the URL if it isn't we'll auto assign it
check if a protocol is given in the URL if it isn't we'll auto assign it
[ "check", "if", "a", "protocol", "is", "given", "in", "the", "URL", "if", "it", "isn", "t", "we", "ll", "auto", "assign", "it" ]
def auto_assign(url, ssl=False): """ check if a protocol is given in the URL if it isn't we'll auto assign it """ if PROTOCOL_DETECTION.search(url) is None: if ssl: lib.formatter.warn("no protocol discovered, assigning HTTPS (SSL)") return "https://{}".format(url.strip()) else: lib.formatter.warn("no protocol discovered assigning HTTP") return "http://{}".format(url.strip()) else: if ssl: lib.formatter.info("forcing HTTPS (SSL) connection") items = PROTOCOL_DETECTION.split(url) item = items[-1].split("://") item[0] = "https://" return ''.join(item) else: return url.strip()
[ "def", "auto_assign", "(", "url", ",", "ssl", "=", "False", ")", ":", "if", "PROTOCOL_DETECTION", ".", "search", "(", "url", ")", "is", "None", ":", "if", "ssl", ":", "lib", ".", "formatter", ".", "warn", "(", "\"no protocol discovered, assigning HTTPS (SSL)\"", ")", "return", "\"https://{}\"", ".", "format", "(", "url", ".", "strip", "(", ")", ")", "else", ":", "lib", ".", "formatter", ".", "warn", "(", "\"no protocol discovered assigning HTTP\"", ")", "return", "\"http://{}\"", ".", "format", "(", "url", ".", "strip", "(", ")", ")", "else", ":", "if", "ssl", ":", "lib", ".", "formatter", ".", "info", "(", "\"forcing HTTPS (SSL) connection\"", ")", "items", "=", "PROTOCOL_DETECTION", ".", "split", "(", "url", ")", "item", "=", "items", "[", "-", "1", "]", ".", "split", "(", "\"://\"", ")", "item", "[", "0", "]", "=", "\"https://\"", "return", "''", ".", "join", "(", "item", ")", "else", ":", "return", "url", ".", "strip", "(", ")" ]
https://github.com/Ekultek/WhatWaf/blob/b14e866b7537e9f81581f3b7e20dd5ed5e4dafc0/lib/settings.py#L539-L558
almarklein/visvis
766ed97767b44a55a6ff72c742d7385e074d3d55
wobjects/textures.py
python
Texture3D._GetLimits
(self)
return Wobject._GetLimits(self, x1, x2, y1, y2, z1, z2)
Get the limits in world coordinates between which the object exists.
Get the limits in world coordinates between which the object exists.
[ "Get", "the", "limits", "in", "world", "coordinates", "between", "which", "the", "object", "exists", "." ]
def _GetLimits(self): """ Get the limits in world coordinates between which the object exists. """ # Obtain untransformed coords shape = self._texture1._dataRef.shape x1, x2 = -0.5, shape[2]-0.5 y1, y2 = -0.5, shape[1]-0.5 z1, z2 = -0.5, shape[0]-0.5 # There we are return Wobject._GetLimits(self, x1, x2, y1, y2, z1, z2)
[ "def", "_GetLimits", "(", "self", ")", ":", "# Obtain untransformed coords", "shape", "=", "self", ".", "_texture1", ".", "_dataRef", ".", "shape", "x1", ",", "x2", "=", "-", "0.5", ",", "shape", "[", "2", "]", "-", "0.5", "y1", ",", "y2", "=", "-", "0.5", ",", "shape", "[", "1", "]", "-", "0.5", "z1", ",", "z2", "=", "-", "0.5", ",", "shape", "[", "0", "]", "-", "0.5", "# There we are", "return", "Wobject", ".", "_GetLimits", "(", "self", ",", "x1", ",", "x2", ",", "y1", ",", "y2", ",", "z1", ",", "z2", ")" ]
https://github.com/almarklein/visvis/blob/766ed97767b44a55a6ff72c742d7385e074d3d55/wobjects/textures.py#L972-L983
heimlich1024/OD_CopyPasteExternal
943b993198e16d19f1fb4ba44049e498abf1e993
Modo/Kits/OD_ModoCopyPasteExternal/od_copy_paste_external/paste_from_external.py
python
execute
()
[]
def execute(): scene = modo.Scene() # Read temporary Data File od_data_file = tempfile.gettempdir() + os.sep + "ODVertexData.txt" if os.path.exists(od_data_file): f = open(od_data_file, "r") lines = f.readlines() f.close() up_axis = {0: "x", 1: "y", 2: "z"}[scene.sceneItem.channel("upAxis").get()] vert_line = [] poly_line = [] uv_maps = [] morph_maps = [] weight_maps = [] vertex_normals = [] count = 0 # Parse File to see what Data we have for line in lines: if line.startswith("VERTICES:"): vert_line.append([int(line.strip().split(":")[1].strip()), count]) if line.startswith("POLYGONS:"): poly_line.append([int(line.strip().split(":")[1].strip()), count]) if line.startswith("UV:"): uv_maps.append( [line.strip().split(":")[1:], count] ) # changed this to add the # of uv coordinates into the mix if line.startswith("MORPH"): morph_maps.append([line.split(":")[1].strip(), count]) if line.startswith("WEIGHT"): weight_maps.append([line.split(":")[1].strip(), count]) if line.startswith("VERTEXNORMALS"): vertex_normals.append([line.split(":")[1].strip(), count]) count += 1 # Add a new mesh object to the scene and grab the geometry object mesh = scene.selectedByType("mesh") if mesh: mesh = mesh[0] if len(mesh.geometry.vertices) > 0: mesh.geometry.internalMesh.Clear() else: mesh = scene.addMesh("ODCopy") # select new empty mesh scene.select(mesh) geo = mesh.geometry # generate points for verts in vert_line: points = [] for i in xrange(verts[1] + 1, verts[1] + verts[0] + 1): x = lines[i].split(" ") points.append( geo.vertices.new((float(x[0]), float(x[1]), float(x[2].strip()))) ) # Query Existing Materials all_surfaces = [] for material in scene.items("advancedMaterial"): all_surfaces.append(material.name) # generate Polys from the Points and assign materials for polygons in poly_line: polys = [] count = 0 for i in xrange(polygons[1] + 1, polygons[1] + polygons[0] + 1): pts = [] surf = (lines[i].split(";;")[1]).strip() ptch = (lines[i].split(";;")[2]).strip() if surf not in all_surfaces: all_surfaces.append(surf) scene.addMaterial(name=surf) for x in (lines[i].split(";;")[0]).strip().split(","): pts.append(int(x.strip())) ptype = lx.symbol.iPTYP_FACE if ptch == "CCSS": ptype = lx.symbol.iPTYP_PSUB elif ptch == "SUBD": ptype = lx.symbol.iPTYP_SUBD geo.polygons.new(vertices=(pts), reversed=False, polyType=ptype) geo.polygons[count].materialTag = surf count += 1 # Apply Weights for weight_map in weight_maps: weight = geo.vmaps.addMap(lx.symbol.i_VMAP_WEIGHT, weight_map[0]) for i in range(len(geo.vertices)): if lines[weight_map[1] + 1 + i].strip() != "None": weight[i] = float(lines[weight_map[1] + 1 + i].strip()) # Apply Morphs for morph_map in morph_maps: mo = geo.vmaps.addMorphMap(morph_map[0], False) for i in range(len(geo.vertices)): if lines[morph_map[1] + 1 + i].strip() != "None": mo.setAbsolutePosition( i, [ float(lines[i + 1].split(" ")[0]) + float(lines[morph_map[1] + 1 + i].split(" ")[0]), float(lines[i + 1].split(" ")[1]) + float(lines[morph_map[1] + 1 + i].split(" ")[1]), float(lines[i + 1].split(" ")[2]) + float(lines[morph_map[1] + 1 + i].split(" ")[2]), ], ) # Apply UV Maps for uv_map in uv_maps: uvm = geo.vmaps.addMap(lx.symbol.i_VMAP_TEXTUREUV, uv_map[0][0]) count = 0 for i in range(int(uv_map[0][1])): line = lines[uv_map[1] + 1 + count] split = line.split(":") # check the format to see if it has a point and poly classifier, # determining with that, whether the uv is discontinuous or continuous if len(split) > 3: geo.polygons[int(split[2])].setUV( (float(split[0].split(" ")[0]), float(split[0].split(" ")[1])), geo.vertices[int(split[4])], uvm, ) else: pass count += 1 # Apply Vertex Normals for vertex_normal in vertex_normals: normals = geo.vmaps.addVertexNormalMap(vertex_normal[0]) line_number_start = vertex_normal[1] for i in range(int(lines[line_number_start].split(":")[2])): values = lines[line_number_start + 1 + i].split(":") normal_value = tuple(float(x) for x in values[0].split(" ")) polygon = modo.MeshPolygon(int(values[2]), geo) for i in range(polygon.numVertices): vert_number = int(values[4].replace("\n", "")) normals.setNormal( normal_value, modo.MeshVertex(vert_number, geo), polygon ) geo.setMeshEdits() vertex_normal_maps = mesh.geometry.vmaps.getMapsByType(lx.symbol.i_VMAP_NORMAL) lx.eval('select.vertexMap "%s" norm replace' % vertex_normal_maps[0].name) lx.eval("vertMap.convertToHardEdge false") else: print("No Data File Available.")
[ "def", "execute", "(", ")", ":", "scene", "=", "modo", ".", "Scene", "(", ")", "# Read temporary Data File", "od_data_file", "=", "tempfile", ".", "gettempdir", "(", ")", "+", "os", ".", "sep", "+", "\"ODVertexData.txt\"", "if", "os", ".", "path", ".", "exists", "(", "od_data_file", ")", ":", "f", "=", "open", "(", "od_data_file", ",", "\"r\"", ")", "lines", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "up_axis", "=", "{", "0", ":", "\"x\"", ",", "1", ":", "\"y\"", ",", "2", ":", "\"z\"", "}", "[", "scene", ".", "sceneItem", ".", "channel", "(", "\"upAxis\"", ")", ".", "get", "(", ")", "]", "vert_line", "=", "[", "]", "poly_line", "=", "[", "]", "uv_maps", "=", "[", "]", "morph_maps", "=", "[", "]", "weight_maps", "=", "[", "]", "vertex_normals", "=", "[", "]", "count", "=", "0", "# Parse File to see what Data we have", "for", "line", "in", "lines", ":", "if", "line", ".", "startswith", "(", "\"VERTICES:\"", ")", ":", "vert_line", ".", "append", "(", "[", "int", "(", "line", ".", "strip", "(", ")", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "strip", "(", ")", ")", ",", "count", "]", ")", "if", "line", ".", "startswith", "(", "\"POLYGONS:\"", ")", ":", "poly_line", ".", "append", "(", "[", "int", "(", "line", ".", "strip", "(", ")", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "strip", "(", ")", ")", ",", "count", "]", ")", "if", "line", ".", "startswith", "(", "\"UV:\"", ")", ":", "uv_maps", ".", "append", "(", "[", "line", ".", "strip", "(", ")", ".", "split", "(", "\":\"", ")", "[", "1", ":", "]", ",", "count", "]", ")", "# changed this to add the # of uv coordinates into the mix", "if", "line", ".", "startswith", "(", "\"MORPH\"", ")", ":", "morph_maps", ".", "append", "(", "[", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "strip", "(", ")", ",", "count", "]", ")", "if", "line", ".", "startswith", "(", "\"WEIGHT\"", ")", ":", "weight_maps", ".", "append", "(", "[", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "strip", "(", ")", ",", "count", "]", ")", "if", "line", ".", "startswith", "(", "\"VERTEXNORMALS\"", ")", ":", "vertex_normals", ".", "append", "(", "[", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "strip", "(", ")", ",", "count", "]", ")", "count", "+=", "1", "# Add a new mesh object to the scene and grab the geometry object", "mesh", "=", "scene", ".", "selectedByType", "(", "\"mesh\"", ")", "if", "mesh", ":", "mesh", "=", "mesh", "[", "0", "]", "if", "len", "(", "mesh", ".", "geometry", ".", "vertices", ")", ">", "0", ":", "mesh", ".", "geometry", ".", "internalMesh", ".", "Clear", "(", ")", "else", ":", "mesh", "=", "scene", ".", "addMesh", "(", "\"ODCopy\"", ")", "# select new empty mesh", "scene", ".", "select", "(", "mesh", ")", "geo", "=", "mesh", ".", "geometry", "# generate points", "for", "verts", "in", "vert_line", ":", "points", "=", "[", "]", "for", "i", "in", "xrange", "(", "verts", "[", "1", "]", "+", "1", ",", "verts", "[", "1", "]", "+", "verts", "[", "0", "]", "+", "1", ")", ":", "x", "=", "lines", "[", "i", "]", ".", "split", "(", "\" \"", ")", "points", ".", "append", "(", "geo", ".", "vertices", ".", "new", "(", "(", "float", "(", "x", "[", "0", "]", ")", ",", "float", "(", "x", "[", "1", "]", ")", ",", "float", "(", "x", "[", "2", "]", ".", "strip", "(", ")", ")", ")", ")", ")", "# Query Existing Materials", "all_surfaces", "=", "[", "]", "for", "material", "in", "scene", ".", "items", "(", "\"advancedMaterial\"", ")", ":", "all_surfaces", ".", "append", "(", "material", ".", "name", ")", "# generate Polys from the Points and assign materials", "for", "polygons", "in", "poly_line", ":", "polys", "=", "[", "]", "count", "=", "0", "for", "i", "in", "xrange", "(", "polygons", "[", "1", "]", "+", "1", ",", "polygons", "[", "1", "]", "+", "polygons", "[", "0", "]", "+", "1", ")", ":", "pts", "=", "[", "]", "surf", "=", "(", "lines", "[", "i", "]", ".", "split", "(", "\";;\"", ")", "[", "1", "]", ")", ".", "strip", "(", ")", "ptch", "=", "(", "lines", "[", "i", "]", ".", "split", "(", "\";;\"", ")", "[", "2", "]", ")", ".", "strip", "(", ")", "if", "surf", "not", "in", "all_surfaces", ":", "all_surfaces", ".", "append", "(", "surf", ")", "scene", ".", "addMaterial", "(", "name", "=", "surf", ")", "for", "x", "in", "(", "lines", "[", "i", "]", ".", "split", "(", "\";;\"", ")", "[", "0", "]", ")", ".", "strip", "(", ")", ".", "split", "(", "\",\"", ")", ":", "pts", ".", "append", "(", "int", "(", "x", ".", "strip", "(", ")", ")", ")", "ptype", "=", "lx", ".", "symbol", ".", "iPTYP_FACE", "if", "ptch", "==", "\"CCSS\"", ":", "ptype", "=", "lx", ".", "symbol", ".", "iPTYP_PSUB", "elif", "ptch", "==", "\"SUBD\"", ":", "ptype", "=", "lx", ".", "symbol", ".", "iPTYP_SUBD", "geo", ".", "polygons", ".", "new", "(", "vertices", "=", "(", "pts", ")", ",", "reversed", "=", "False", ",", "polyType", "=", "ptype", ")", "geo", ".", "polygons", "[", "count", "]", ".", "materialTag", "=", "surf", "count", "+=", "1", "# Apply Weights", "for", "weight_map", "in", "weight_maps", ":", "weight", "=", "geo", ".", "vmaps", ".", "addMap", "(", "lx", ".", "symbol", ".", "i_VMAP_WEIGHT", ",", "weight_map", "[", "0", "]", ")", "for", "i", "in", "range", "(", "len", "(", "geo", ".", "vertices", ")", ")", ":", "if", "lines", "[", "weight_map", "[", "1", "]", "+", "1", "+", "i", "]", ".", "strip", "(", ")", "!=", "\"None\"", ":", "weight", "[", "i", "]", "=", "float", "(", "lines", "[", "weight_map", "[", "1", "]", "+", "1", "+", "i", "]", ".", "strip", "(", ")", ")", "# Apply Morphs", "for", "morph_map", "in", "morph_maps", ":", "mo", "=", "geo", ".", "vmaps", ".", "addMorphMap", "(", "morph_map", "[", "0", "]", ",", "False", ")", "for", "i", "in", "range", "(", "len", "(", "geo", ".", "vertices", ")", ")", ":", "if", "lines", "[", "morph_map", "[", "1", "]", "+", "1", "+", "i", "]", ".", "strip", "(", ")", "!=", "\"None\"", ":", "mo", ".", "setAbsolutePosition", "(", "i", ",", "[", "float", "(", "lines", "[", "i", "+", "1", "]", ".", "split", "(", "\" \"", ")", "[", "0", "]", ")", "+", "float", "(", "lines", "[", "morph_map", "[", "1", "]", "+", "1", "+", "i", "]", ".", "split", "(", "\" \"", ")", "[", "0", "]", ")", ",", "float", "(", "lines", "[", "i", "+", "1", "]", ".", "split", "(", "\" \"", ")", "[", "1", "]", ")", "+", "float", "(", "lines", "[", "morph_map", "[", "1", "]", "+", "1", "+", "i", "]", ".", "split", "(", "\" \"", ")", "[", "1", "]", ")", ",", "float", "(", "lines", "[", "i", "+", "1", "]", ".", "split", "(", "\" \"", ")", "[", "2", "]", ")", "+", "float", "(", "lines", "[", "morph_map", "[", "1", "]", "+", "1", "+", "i", "]", ".", "split", "(", "\" \"", ")", "[", "2", "]", ")", ",", "]", ",", ")", "# Apply UV Maps", "for", "uv_map", "in", "uv_maps", ":", "uvm", "=", "geo", ".", "vmaps", ".", "addMap", "(", "lx", ".", "symbol", ".", "i_VMAP_TEXTUREUV", ",", "uv_map", "[", "0", "]", "[", "0", "]", ")", "count", "=", "0", "for", "i", "in", "range", "(", "int", "(", "uv_map", "[", "0", "]", "[", "1", "]", ")", ")", ":", "line", "=", "lines", "[", "uv_map", "[", "1", "]", "+", "1", "+", "count", "]", "split", "=", "line", ".", "split", "(", "\":\"", ")", "# check the format to see if it has a point and poly classifier,", "# determining with that, whether the uv is discontinuous or continuous", "if", "len", "(", "split", ")", ">", "3", ":", "geo", ".", "polygons", "[", "int", "(", "split", "[", "2", "]", ")", "]", ".", "setUV", "(", "(", "float", "(", "split", "[", "0", "]", ".", "split", "(", "\" \"", ")", "[", "0", "]", ")", ",", "float", "(", "split", "[", "0", "]", ".", "split", "(", "\" \"", ")", "[", "1", "]", ")", ")", ",", "geo", ".", "vertices", "[", "int", "(", "split", "[", "4", "]", ")", "]", ",", "uvm", ",", ")", "else", ":", "pass", "count", "+=", "1", "# Apply Vertex Normals", "for", "vertex_normal", "in", "vertex_normals", ":", "normals", "=", "geo", ".", "vmaps", ".", "addVertexNormalMap", "(", "vertex_normal", "[", "0", "]", ")", "line_number_start", "=", "vertex_normal", "[", "1", "]", "for", "i", "in", "range", "(", "int", "(", "lines", "[", "line_number_start", "]", ".", "split", "(", "\":\"", ")", "[", "2", "]", ")", ")", ":", "values", "=", "lines", "[", "line_number_start", "+", "1", "+", "i", "]", ".", "split", "(", "\":\"", ")", "normal_value", "=", "tuple", "(", "float", "(", "x", ")", "for", "x", "in", "values", "[", "0", "]", ".", "split", "(", "\" \"", ")", ")", "polygon", "=", "modo", ".", "MeshPolygon", "(", "int", "(", "values", "[", "2", "]", ")", ",", "geo", ")", "for", "i", "in", "range", "(", "polygon", ".", "numVertices", ")", ":", "vert_number", "=", "int", "(", "values", "[", "4", "]", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ")", "normals", ".", "setNormal", "(", "normal_value", ",", "modo", ".", "MeshVertex", "(", "vert_number", ",", "geo", ")", ",", "polygon", ")", "geo", ".", "setMeshEdits", "(", ")", "vertex_normal_maps", "=", "mesh", ".", "geometry", ".", "vmaps", ".", "getMapsByType", "(", "lx", ".", "symbol", ".", "i_VMAP_NORMAL", ")", "lx", ".", "eval", "(", "'select.vertexMap \"%s\" norm replace'", "%", "vertex_normal_maps", "[", "0", "]", ".", "name", ")", "lx", ".", "eval", "(", "\"vertMap.convertToHardEdge false\"", ")", "else", ":", "print", "(", "\"No Data File Available.\"", ")" ]
https://github.com/heimlich1024/OD_CopyPasteExternal/blob/943b993198e16d19f1fb4ba44049e498abf1e993/Modo/Kits/OD_ModoCopyPasteExternal/od_copy_paste_external/paste_from_external.py#L18-L163
uber/petastorm
3579e68b86d17b57339efd0da1e8a56033d121d1
petastorm/etl/dataset_metadata.py
python
infer_or_load_unischema
(dataset)
Try to recover Unischema object stored by ``materialize_dataset`` function. If it can be loaded, infer Unischema from native Parquet schema
Try to recover Unischema object stored by ``materialize_dataset`` function. If it can be loaded, infer Unischema from native Parquet schema
[ "Try", "to", "recover", "Unischema", "object", "stored", "by", "materialize_dataset", "function", ".", "If", "it", "can", "be", "loaded", "infer", "Unischema", "from", "native", "Parquet", "schema" ]
def infer_or_load_unischema(dataset): """Try to recover Unischema object stored by ``materialize_dataset`` function. If it can be loaded, infer Unischema from native Parquet schema""" try: return get_schema(dataset) except PetastormMetadataError: logger.info('Failed loading Unischema from metadata in %s. Assuming the dataset was not created with ' 'Petastorm. Will try to construct from native Parquet schema.') return Unischema.from_arrow_schema(dataset)
[ "def", "infer_or_load_unischema", "(", "dataset", ")", ":", "try", ":", "return", "get_schema", "(", "dataset", ")", "except", "PetastormMetadataError", ":", "logger", ".", "info", "(", "'Failed loading Unischema from metadata in %s. Assuming the dataset was not created with '", "'Petastorm. Will try to construct from native Parquet schema.'", ")", "return", "Unischema", ".", "from_arrow_schema", "(", "dataset", ")" ]
https://github.com/uber/petastorm/blob/3579e68b86d17b57339efd0da1e8a56033d121d1/petastorm/etl/dataset_metadata.py#L410-L418
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
tensorflow_dl_models/tutorials/image/imagenet/classify_image.py
python
maybe_download_and_extract
()
Download and extract model tar file.
Download and extract model tar file.
[ "Download", "and", "extract", "model", "tar", "file", "." ]
def maybe_download_and_extract(): """Download and extract model tar file.""" dest_directory = FLAGS.model_dir if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory)
[ "def", "maybe_download_and_extract", "(", ")", ":", "dest_directory", "=", "FLAGS", ".", "model_dir", "if", "not", "os", ".", "path", ".", "exists", "(", "dest_directory", ")", ":", "os", ".", "makedirs", "(", "dest_directory", ")", "filename", "=", "DATA_URL", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "filepath", "=", "os", ".", "path", ".", "join", "(", "dest_directory", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "def", "_progress", "(", "count", ",", "block_size", ",", "total_size", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'\\r>> Downloading %s %.1f%%'", "%", "(", "filename", ",", "float", "(", "count", "*", "block_size", ")", "/", "float", "(", "total_size", ")", "*", "100.0", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "filepath", ",", "_", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "DATA_URL", ",", "filepath", ",", "_progress", ")", "print", "(", ")", "statinfo", "=", "os", ".", "stat", "(", "filepath", ")", "print", "(", "'Successfully downloaded'", ",", "filename", ",", "statinfo", ".", "st_size", ",", "'bytes.'", ")", "tarfile", ".", "open", "(", "filepath", ",", "'r:gz'", ")", ".", "extractall", "(", "dest_directory", ")" ]
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/tensorflow_dl_models/tutorials/image/imagenet/classify_image.py#L170-L186
thinkle/gourmet
8af29c8ded24528030e5ae2ea3461f61c1e5a575
gourmet/gtk_extras/pango_buffer.py
python
PangoBuffer.set_text
(self, text: Union[str, bytes])
[]
def set_text(self, text: Union[str, bytes]) -> None: super().set_text('') # Clear the widget if isinstance(text, bytes): # data loaded from the database are bytes, not str text = text.decode("utf-8") self.insert_markup(self.get_start_iter(), text, -1)
[ "def", "set_text", "(", "self", ",", "text", ":", "Union", "[", "str", ",", "bytes", "]", ")", "->", "None", ":", "super", "(", ")", ".", "set_text", "(", "''", ")", "# Clear the widget", "if", "isinstance", "(", "text", ",", "bytes", ")", ":", "# data loaded from the database are bytes, not str", "text", "=", "text", ".", "decode", "(", "\"utf-8\"", ")", "self", ".", "insert_markup", "(", "self", ".", "get_start_iter", "(", ")", ",", "text", ",", "-", "1", ")" ]
https://github.com/thinkle/gourmet/blob/8af29c8ded24528030e5ae2ea3461f61c1e5a575/gourmet/gtk_extras/pango_buffer.py#L34-L39
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/numpy-1.10.0.dev0_046311a-py3.3-win-amd64.egg/numpy/lib/format.py
python
dtype_to_descr
(dtype)
Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype.
Get a serializable descriptor from the dtype.
[ "Get", "a", "serializable", "descriptor", "from", "the", "dtype", "." ]
def dtype_to_descr(dtype): """ Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype. """ if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get # fiddled with. This needs to be fixed in the C implementation of # dtype(). return dtype.descr else: return dtype.str
[ "def", "dtype_to_descr", "(", "dtype", ")", ":", "if", "dtype", ".", "names", "is", "not", "None", ":", "# This is a record array. The .descr is fine. XXX: parts of the", "# record array with an empty name, like padding bytes, still get", "# fiddled with. This needs to be fixed in the C implementation of", "# dtype().", "return", "dtype", ".", "descr", "else", ":", "return", "dtype", ".", "str" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/numpy-1.10.0.dev0_046311a-py3.3-win-amd64.egg/numpy/lib/format.py#L223-L253
pfnet/pytorch-pfn-extras
b7ced31c1e78a0527c36d745ca091ec270da49e3
pytorch_pfn_extras/dataset/tabular/tabular_dataset.py
python
TabularDataset.fetch
(self)
Fetch data. This method fetches all data of the dataset/view. Note that this method returns a column-major data (i.e. :obj:`([a[0], ..., a[3]], ..., [c[0], ... c[3]])`, :obj:`{'a': [a[0], ..., a[3]], ..., 'c': [c[0], ..., c[3]]}`, or :obj:`[a[0], ..., a[3]]`). Returns: If :attr:`mode` is :class:`tuple`, this method returns a tuple of lists/arrays. If :attr:`mode` is :class:`dict`, this method returns a dict of lists/arrays.
Fetch data.
[ "Fetch", "data", "." ]
def fetch(self): """Fetch data. This method fetches all data of the dataset/view. Note that this method returns a column-major data (i.e. :obj:`([a[0], ..., a[3]], ..., [c[0], ... c[3]])`, :obj:`{'a': [a[0], ..., a[3]], ..., 'c': [c[0], ..., c[3]]}`, or :obj:`[a[0], ..., a[3]]`). Returns: If :attr:`mode` is :class:`tuple`, this method returns a tuple of lists/arrays. If :attr:`mode` is :class:`dict`, this method returns a dict of lists/arrays. """ examples = self.get_examples(None, None) if self.mode is tuple: return examples elif self.mode is dict: return dict(zip(self.keys, examples)) elif self.mode is None: return examples[0]
[ "def", "fetch", "(", "self", ")", ":", "examples", "=", "self", ".", "get_examples", "(", "None", ",", "None", ")", "if", "self", ".", "mode", "is", "tuple", ":", "return", "examples", "elif", "self", ".", "mode", "is", "dict", ":", "return", "dict", "(", "zip", "(", "self", ".", "keys", ",", "examples", ")", ")", "elif", "self", ".", "mode", "is", "None", ":", "return", "examples", "[", "0", "]" ]
https://github.com/pfnet/pytorch-pfn-extras/blob/b7ced31c1e78a0527c36d745ca091ec270da49e3/pytorch_pfn_extras/dataset/tabular/tabular_dataset.py#L130-L151
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Parser/spark.py
python
GenericScanner.t_default
(self, s)
r'( . | \n )+
r'( . | \n )+
[ "r", "(", ".", "|", "\\", "n", ")", "+" ]
def t_default(self, s): r'( . | \n )+' print "Specification error: unmatched input" raise SystemExit
[ "def", "t_default", "(", "self", ",", "s", ")", ":", "print", "\"Specification error: unmatched input\"", "raise", "SystemExit" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Parser/spark.py#L79-L82
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/keystoneng.py
python
project_list
(auth=None, **kwargs)
return cloud.list_projects(**kwargs)
List projects CLI Example: .. code-block:: bash salt '*' keystoneng.project_list salt '*' keystoneng.project_list domain_id=b62e76fbeeff4e8fb77073f591cf211e
List projects
[ "List", "projects" ]
def project_list(auth=None, **kwargs): """ List projects CLI Example: .. code-block:: bash salt '*' keystoneng.project_list salt '*' keystoneng.project_list domain_id=b62e76fbeeff4e8fb77073f591cf211e """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.list_projects(**kwargs)
[ "def", "project_list", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_openstack_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "*", "*", "kwargs", ")", "return", "cloud", ".", "list_projects", "(", "*", "*", "kwargs", ")" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/keystoneng.py#L287-L300
vprimachenko/Sublime-Colorcoder
a333b7527561deb0a2919c4db54df35ccb0e524d
colorcoder.py
python
colorshemeemodifier.modify_color_scheme
(l,s,read_original = False)
[]
def modify_color_scheme(l,s,read_original = False): read_original = read_original and sublime.load_settings("Preferences.sublime-settings").has("original_color_scheme") if read_original and sublime.load_settings("Preferences.sublime-settings").get('color_scheme').find('/Colorcoder/') == -1: read_original = False if read_original and sublime.load_settings("Preferences.sublime-settings").get('original_color_scheme').find('/Colorcoder/') != -1: print("original theme already colorcoded, abort") return global modification_running if modification_running: return modification_running = True name = sublime.load_settings("Preferences.sublime-settings").get("original_color_scheme") if read_original else sublime.active_window().active_view().settings().get('color_scheme') try: cs = plistlib.readPlistFromBytes(sublime.load_binary_resource(name)) tokenclr = "#000000" for rule in cs["settings"]: if "scope" not in rule and "name" not in rule: bgc = rule["settings"]["background"] r = int(bgc[1:3],16) g = int(bgc[3:5],16) b = int(bgc[5:7],16) if b>0: b = b-1 elif g>0: g = g-1 elif r>0: r = r-1 else: rule["settings"]["background"] = "#000001" tokenclr = "#%02x%02x%02x" % (r,g,b) break cs["name"] = cs["name"] + " (Colorcoded)" for x in range(0,256): cs["settings"].append(dict( scope="cc0x%x" % x, settings=dict( foreground="#"+''.join(map(lambda c: "%02x" % int(256*c),colorsys.hls_to_rgb(x/256., l, s))), background=tokenclr ) )) newname = "/Colorcoder/%s (Colorcoded).tmTheme" % re.search("/([^/]+).tmTheme$", name).group(1) plistlib.writePlist(cs,"%s%s" % (sublime.packages_path(),newname)) sublime.load_settings("Preferences.sublime-settings").set("original_color_scheme", name) sublime.load_settings("Preferences.sublime-settings").set("color_scheme","Packages%s" % newname) sublime.save_settings("Preferences.sublime-settings") except Exception as e: sublime.error_message("Colorcoder was not able to parse the colorscheme\nCheck the console for the actual error message.") sublime.active_window().run_command("show_panel", {"panel": "console", "toggle": True}) print(e) finally: modification_running = False
[ "def", "modify_color_scheme", "(", "l", ",", "s", ",", "read_original", "=", "False", ")", ":", "read_original", "=", "read_original", "and", "sublime", ".", "load_settings", "(", "\"Preferences.sublime-settings\"", ")", ".", "has", "(", "\"original_color_scheme\"", ")", "if", "read_original", "and", "sublime", ".", "load_settings", "(", "\"Preferences.sublime-settings\"", ")", ".", "get", "(", "'color_scheme'", ")", ".", "find", "(", "'/Colorcoder/'", ")", "==", "-", "1", ":", "read_original", "=", "False", "if", "read_original", "and", "sublime", ".", "load_settings", "(", "\"Preferences.sublime-settings\"", ")", ".", "get", "(", "'original_color_scheme'", ")", ".", "find", "(", "'/Colorcoder/'", ")", "!=", "-", "1", ":", "print", "(", "\"original theme already colorcoded, abort\"", ")", "return", "global", "modification_running", "if", "modification_running", ":", "return", "modification_running", "=", "True", "name", "=", "sublime", ".", "load_settings", "(", "\"Preferences.sublime-settings\"", ")", ".", "get", "(", "\"original_color_scheme\"", ")", "if", "read_original", "else", "sublime", ".", "active_window", "(", ")", ".", "active_view", "(", ")", ".", "settings", "(", ")", ".", "get", "(", "'color_scheme'", ")", "try", ":", "cs", "=", "plistlib", ".", "readPlistFromBytes", "(", "sublime", ".", "load_binary_resource", "(", "name", ")", ")", "tokenclr", "=", "\"#000000\"", "for", "rule", "in", "cs", "[", "\"settings\"", "]", ":", "if", "\"scope\"", "not", "in", "rule", "and", "\"name\"", "not", "in", "rule", ":", "bgc", "=", "rule", "[", "\"settings\"", "]", "[", "\"background\"", "]", "r", "=", "int", "(", "bgc", "[", "1", ":", "3", "]", ",", "16", ")", "g", "=", "int", "(", "bgc", "[", "3", ":", "5", "]", ",", "16", ")", "b", "=", "int", "(", "bgc", "[", "5", ":", "7", "]", ",", "16", ")", "if", "b", ">", "0", ":", "b", "=", "b", "-", "1", "elif", "g", ">", "0", ":", "g", "=", "g", "-", "1", "elif", "r", ">", "0", ":", "r", "=", "r", "-", "1", "else", ":", "rule", "[", "\"settings\"", "]", "[", "\"background\"", "]", "=", "\"#000001\"", "tokenclr", "=", "\"#%02x%02x%02x\"", "%", "(", "r", ",", "g", ",", "b", ")", "break", "cs", "[", "\"name\"", "]", "=", "cs", "[", "\"name\"", "]", "+", "\" (Colorcoded)\"", "for", "x", "in", "range", "(", "0", ",", "256", ")", ":", "cs", "[", "\"settings\"", "]", ".", "append", "(", "dict", "(", "scope", "=", "\"cc0x%x\"", "%", "x", ",", "settings", "=", "dict", "(", "foreground", "=", "\"#\"", "+", "''", ".", "join", "(", "map", "(", "lambda", "c", ":", "\"%02x\"", "%", "int", "(", "256", "*", "c", ")", ",", "colorsys", ".", "hls_to_rgb", "(", "x", "/", "256.", ",", "l", ",", "s", ")", ")", ")", ",", "background", "=", "tokenclr", ")", ")", ")", "newname", "=", "\"/Colorcoder/%s (Colorcoded).tmTheme\"", "%", "re", ".", "search", "(", "\"/([^/]+).tmTheme$\"", ",", "name", ")", ".", "group", "(", "1", ")", "plistlib", ".", "writePlist", "(", "cs", ",", "\"%s%s\"", "%", "(", "sublime", ".", "packages_path", "(", ")", ",", "newname", ")", ")", "sublime", ".", "load_settings", "(", "\"Preferences.sublime-settings\"", ")", ".", "set", "(", "\"original_color_scheme\"", ",", "name", ")", "sublime", ".", "load_settings", "(", "\"Preferences.sublime-settings\"", ")", ".", "set", "(", "\"color_scheme\"", ",", "\"Packages%s\"", "%", "newname", ")", "sublime", ".", "save_settings", "(", "\"Preferences.sublime-settings\"", ")", "except", "Exception", "as", "e", ":", "sublime", ".", "error_message", "(", "\"Colorcoder was not able to parse the colorscheme\\nCheck the console for the actual error message.\"", ")", "sublime", ".", "active_window", "(", ")", ".", "run_command", "(", "\"show_panel\"", ",", "{", "\"panel\"", ":", "\"console\"", ",", "\"toggle\"", ":", "True", "}", ")", "print", "(", "e", ")", "finally", ":", "modification_running", "=", "False" ]
https://github.com/vprimachenko/Sublime-Colorcoder/blob/a333b7527561deb0a2919c4db54df35ccb0e524d/colorcoder.py#L184-L240
lutris/lutris
66675a4d5537f6b2a2ba2b6df0b3cdf8924c823a
lutris/runners/dosbox.py
python
dosbox.working_dir
(self)
return super().working_dir
Return the working directory to use when running the game.
Return the working directory to use when running the game.
[ "Return", "the", "working", "directory", "to", "use", "when", "running", "the", "game", "." ]
def working_dir(self): """Return the working directory to use when running the game.""" option = self.game_config.get("working_dir") if option: return os.path.expanduser(option) if self.main_file: return os.path.dirname(self.main_file) return super().working_dir
[ "def", "working_dir", "(", "self", ")", ":", "option", "=", "self", ".", "game_config", ".", "get", "(", "\"working_dir\"", ")", "if", "option", ":", "return", "os", ".", "path", ".", "expanduser", "(", "option", ")", "if", "self", ".", "main_file", ":", "return", "os", ".", "path", ".", "dirname", "(", "self", ".", "main_file", ")", "return", "super", "(", ")", ".", "working_dir" ]
https://github.com/lutris/lutris/blob/66675a4d5537f6b2a2ba2b6df0b3cdf8924c823a/lutris/runners/dosbox.py#L127-L134
timkpaine/paperboy
6c0854b2c0dad139c25153e520ca79ffed820fa4
paperboy/scheduler/local/schedule.py
python
_parse_dow
(now, field)
return False
[]
def _parse_dow(now, field): # run every time? if str(field) == '*' or str(field).upper() == now.upper(): return True return False
[ "def", "_parse_dow", "(", "now", ",", "field", ")", ":", "# run every time?", "if", "str", "(", "field", ")", "==", "'*'", "or", "str", "(", "field", ")", ".", "upper", "(", ")", "==", "now", ".", "upper", "(", ")", ":", "return", "True", "return", "False" ]
https://github.com/timkpaine/paperboy/blob/6c0854b2c0dad139c25153e520ca79ffed820fa4/paperboy/scheduler/local/schedule.py#L58-L62
quantumblacklabs/causalnex
127d9324a3d68c1795299c7522f22cdea880f344
causalnex/ebaybbn/bbn.py
python
JoinTreeCliqueNode.variable_names
(self)
return sorted(var_names)
Return the set of variable names that this clique represents
Return the set of variable names that this clique represents
[ "Return", "the", "set", "of", "variable", "names", "that", "this", "clique", "represents" ]
def variable_names(self): """Return the set of variable names that this clique represents""" var_names = [] for node in self.clique.nodes: var_names.append(node.variable_name) return sorted(var_names)
[ "def", "variable_names", "(", "self", ")", ":", "var_names", "=", "[", "]", "for", "node", "in", "self", ".", "clique", ".", "nodes", ":", "var_names", ".", "append", "(", "node", ".", "variable_name", ")", "return", "sorted", "(", "var_names", ")" ]
https://github.com/quantumblacklabs/causalnex/blob/127d9324a3d68c1795299c7522f22cdea880f344/causalnex/ebaybbn/bbn.py#L493-L499
ring04h/dirfuzz
52373582c465a395769473d14ad6725b1f9e4156
libs/requests/packages/urllib3/connectionpool.py
python
HTTPConnectionPool.is_same_host
(self, url)
return (scheme, host, port) == (self.scheme, self.host, self.port)
Check if the given ``url`` is a member of the same host as this connection pool.
Check if the given ``url`` is a member of the same host as this connection pool.
[ "Check", "if", "the", "given", "url", "is", "a", "member", "of", "the", "same", "host", "as", "this", "connection", "pool", "." ]
def is_same_host(self, url): """ Check if the given ``url`` is a member of the same host as this connection pool. """ if url.startswith('/'): return True # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) # Use explicit default port for comparison when none is given if self.port and not port: port = port_by_scheme.get(scheme) elif not self.port and port == port_by_scheme.get(scheme): port = None return (scheme, host, port) == (self.scheme, self.host, self.port)
[ "def", "is_same_host", "(", "self", ",", "url", ")", ":", "if", "url", ".", "startswith", "(", "'/'", ")", ":", "return", "True", "# TODO: Add optional support for socket.gethostbyname checking.", "scheme", ",", "host", ",", "port", "=", "get_host", "(", "url", ")", "# Use explicit default port for comparison when none is given", "if", "self", ".", "port", "and", "not", "port", ":", "port", "=", "port_by_scheme", ".", "get", "(", "scheme", ")", "elif", "not", "self", ".", "port", "and", "port", "==", "port_by_scheme", ".", "get", "(", "scheme", ")", ":", "port", "=", "None", "return", "(", "scheme", ",", "host", ",", "port", ")", "==", "(", "self", ".", "scheme", ",", "self", ".", "host", ",", "self", ".", "port", ")" ]
https://github.com/ring04h/dirfuzz/blob/52373582c465a395769473d14ad6725b1f9e4156/libs/requests/packages/urllib3/connectionpool.py#L383-L400
NVIDIA/DeepLearningExamples
589604d49e016cd9ef4525f7abcc9c7b826cfc5e
PyTorch/Translation/GNMT/seq2seq/models/gnmt.py
python
GNMT.__init__
(self, vocab_size, hidden_size=1024, num_layers=4, dropout=0.2, batch_first=False, share_embedding=True)
Constructor for the GNMT v2 model. :param vocab_size: size of vocabulary (number of tokens) :param hidden_size: internal hidden size of the model :param num_layers: number of layers, applies to both encoder and decoder :param dropout: probability of dropout (in encoder and decoder) :param batch_first: if True the model uses (batch,seq,feature) tensors, if false the model uses (seq, batch, feature) :param share_embedding: if True embeddings are shared between encoder and decoder
Constructor for the GNMT v2 model.
[ "Constructor", "for", "the", "GNMT", "v2", "model", "." ]
def __init__(self, vocab_size, hidden_size=1024, num_layers=4, dropout=0.2, batch_first=False, share_embedding=True): """ Constructor for the GNMT v2 model. :param vocab_size: size of vocabulary (number of tokens) :param hidden_size: internal hidden size of the model :param num_layers: number of layers, applies to both encoder and decoder :param dropout: probability of dropout (in encoder and decoder) :param batch_first: if True the model uses (batch,seq,feature) tensors, if false the model uses (seq, batch, feature) :param share_embedding: if True embeddings are shared between encoder and decoder """ super(GNMT, self).__init__(batch_first=batch_first) if share_embedding: embedder = nn.Embedding(vocab_size, hidden_size, padding_idx=config.PAD) nn.init.uniform_(embedder.weight.data, -0.1, 0.1) else: embedder = None self.encoder = ResidualRecurrentEncoder(vocab_size, hidden_size, num_layers, dropout, batch_first, embedder) self.decoder = ResidualRecurrentDecoder(vocab_size, hidden_size, num_layers, dropout, batch_first, embedder)
[ "def", "__init__", "(", "self", ",", "vocab_size", ",", "hidden_size", "=", "1024", ",", "num_layers", "=", "4", ",", "dropout", "=", "0.2", ",", "batch_first", "=", "False", ",", "share_embedding", "=", "True", ")", ":", "super", "(", "GNMT", ",", "self", ")", ".", "__init__", "(", "batch_first", "=", "batch_first", ")", "if", "share_embedding", ":", "embedder", "=", "nn", ".", "Embedding", "(", "vocab_size", ",", "hidden_size", ",", "padding_idx", "=", "config", ".", "PAD", ")", "nn", ".", "init", ".", "uniform_", "(", "embedder", ".", "weight", ".", "data", ",", "-", "0.1", ",", "0.1", ")", "else", ":", "embedder", "=", "None", "self", ".", "encoder", "=", "ResidualRecurrentEncoder", "(", "vocab_size", ",", "hidden_size", ",", "num_layers", ",", "dropout", ",", "batch_first", ",", "embedder", ")", "self", ".", "decoder", "=", "ResidualRecurrentDecoder", "(", "vocab_size", ",", "hidden_size", ",", "num_layers", ",", "dropout", ",", "batch_first", ",", "embedder", ")" ]
https://github.com/NVIDIA/DeepLearningExamples/blob/589604d49e016cd9ef4525f7abcc9c7b826cfc5e/PyTorch/Translation/GNMT/seq2seq/models/gnmt.py#L34-L65
misterch0c/shadowbroker
e3a069bea47a2c1009697941ac214adc6f90aa8d
windows/Resources/Python/Core/Lib/logging/handlers.py
python
NTEventLogHandler.close
(self)
Clean up this handler. You can remove the application name from the registry as a source of event log entries. However, if you do this, you will not be able to see the events as you intended in the Event Log Viewer - it needs to be able to access the registry to get the DLL name.
Clean up this handler. You can remove the application name from the registry as a source of event log entries. However, if you do this, you will not be able to see the events as you intended in the Event Log Viewer - it needs to be able to access the registry to get the DLL name.
[ "Clean", "up", "this", "handler", ".", "You", "can", "remove", "the", "application", "name", "from", "the", "registry", "as", "a", "source", "of", "event", "log", "entries", ".", "However", "if", "you", "do", "this", "you", "will", "not", "be", "able", "to", "see", "the", "events", "as", "you", "intended", "in", "the", "Event", "Log", "Viewer", "-", "it", "needs", "to", "be", "able", "to", "access", "the", "registry", "to", "get", "the", "DLL", "name", "." ]
def close(self): """ Clean up this handler. You can remove the application name from the registry as a source of event log entries. However, if you do this, you will not be able to see the events as you intended in the Event Log Viewer - it needs to be able to access the registry to get the DLL name. """ logging.Handler.close(self)
[ "def", "close", "(", "self", ")", ":", "logging", ".", "Handler", ".", "close", "(", "self", ")" ]
https://github.com/misterch0c/shadowbroker/blob/e3a069bea47a2c1009697941ac214adc6f90aa8d/windows/Resources/Python/Core/Lib/logging/handlers.py#L909-L919
abulka/pynsource
886bf4ea05bede67fe7846185fbe78704c2a0e8a
Research/hexmvc/lib/architecture_support.py
python
multicast.__bool__
(self)
return operator.truth(reduce(lambda a, b: a and b, list(self.values()), 1))
A multicast is logically true if all delegate attributes are logically true
A multicast is logically true if all delegate attributes are logically true
[ "A", "multicast", "is", "logically", "true", "if", "all", "delegate", "attributes", "are", "logically", "true" ]
def __bool__(self): "A multicast is logically true if all delegate attributes are logically true" return operator.truth(reduce(lambda a, b: a and b, list(self.values()), 1))
[ "def", "__bool__", "(", "self", ")", ":", "return", "operator", ".", "truth", "(", "reduce", "(", "lambda", "a", ",", "b", ":", "a", "and", "b", ",", "list", "(", "self", ".", "values", "(", ")", ")", ",", "1", ")", ")" ]
https://github.com/abulka/pynsource/blob/886bf4ea05bede67fe7846185fbe78704c2a0e8a/Research/hexmvc/lib/architecture_support.py#L25-L28
ganeti/ganeti
d340a9ddd12f501bef57da421b5f9b969a4ba905
lib/rapi/client.py
python
GanetiRapiClient.DeleteInstanceTags
(self, instance, tags, dry_run=False, reason=None)
return self._SendRequest(HTTP_DELETE, ("/%s/instances/%s/tags" % (GANETI_RAPI_VERSION, instance)), query, None)
Deletes tags from an instance. @type instance: str @param instance: instance to delete tags from @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run @type reason: string @param reason: the reason for executing this operation @rtype: string @return: job id
Deletes tags from an instance.
[ "Deletes", "tags", "from", "an", "instance", "." ]
def DeleteInstanceTags(self, instance, tags, dry_run=False, reason=None): """Deletes tags from an instance. @type instance: str @param instance: instance to delete tags from @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run @type reason: string @param reason: the reason for executing this operation @rtype: string @return: job id """ query = [("tag", t) for t in tags] _AppendDryRunIf(query, dry_run) _AppendReason(query, reason) return self._SendRequest(HTTP_DELETE, ("/%s/instances/%s/tags" % (GANETI_RAPI_VERSION, instance)), query, None)
[ "def", "DeleteInstanceTags", "(", "self", ",", "instance", ",", "tags", ",", "dry_run", "=", "False", ",", "reason", "=", "None", ")", ":", "query", "=", "[", "(", "\"tag\"", ",", "t", ")", "for", "t", "in", "tags", "]", "_AppendDryRunIf", "(", "query", ",", "dry_run", ")", "_AppendReason", "(", "query", ",", "reason", ")", "return", "self", ".", "_SendRequest", "(", "HTTP_DELETE", ",", "(", "\"/%s/instances/%s/tags\"", "%", "(", "GANETI_RAPI_VERSION", ",", "instance", ")", ")", ",", "query", ",", "None", ")" ]
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/rapi/client.py#L1125-L1146
graalvm/mx
29c0debab406352df3af246be2f8973be5db69ae
mx.py
python
SuiteConstituent.__ne__
(self, other)
return self._comparison_key() != other._comparison_key()
[]
def __ne__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self._comparison_key() != other._comparison_key()
[ "def", "__ne__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "self", ".", "__class__", ")", ":", "return", "NotImplemented", "return", "self", ".", "_comparison_key", "(", ")", "!=", "other", ".", "_comparison_key", "(", ")" ]
https://github.com/graalvm/mx/blob/29c0debab406352df3af246be2f8973be5db69ae/mx.py#L1325-L1328
sony/nnabla-examples
068be490aacf73740502a1c3b10f8b2d15a52d32
object-detection/centernet/src/lib/utils/voc_eval_lib/voc_datasets/imdb.py
python
imdb.competition_mode
(self, on)
Turn competition mode on or off.
Turn competition mode on or off.
[ "Turn", "competition", "mode", "on", "or", "off", "." ]
def competition_mode(self, on): """Turn competition mode on or off.""" pass
[ "def", "competition_mode", "(", "self", ",", "on", ")", ":", "pass" ]
https://github.com/sony/nnabla-examples/blob/068be490aacf73740502a1c3b10f8b2d15a52d32/object-detection/centernet/src/lib/utils/voc_eval_lib/voc_datasets/imdb.py#L284-L286
docker-archive/docker-registry
f93b432d3fc7befa508ab27a590e6d0f78c86401
docker_registry/tags.py
python
get_repository_tag_json
(namespace, repository, tag)
return toolkit.response(data)
[]
def get_repository_tag_json(namespace, repository, tag): json_path = store.repository_tag_json_path(namespace, repository, tag) data = {'last_update': None, 'docker_version': None, 'docker_go_version': None, 'arch': 'amd64', 'os': 'linux', 'kernel': None} try: # Note(dmp): unicode patch data = store.get_json(json_path) except exceptions.FileNotFoundError: # We ignore the error, we'll serve the default json declared above pass return toolkit.response(data)
[ "def", "get_repository_tag_json", "(", "namespace", ",", "repository", ",", "tag", ")", ":", "json_path", "=", "store", ".", "repository_tag_json_path", "(", "namespace", ",", "repository", ",", "tag", ")", "data", "=", "{", "'last_update'", ":", "None", ",", "'docker_version'", ":", "None", ",", "'docker_go_version'", ":", "None", ",", "'arch'", ":", "'amd64'", ",", "'os'", ":", "'linux'", ",", "'kernel'", ":", "None", "}", "try", ":", "# Note(dmp): unicode patch", "data", "=", "store", ".", "get_json", "(", "json_path", ")", "except", "exceptions", ".", "FileNotFoundError", ":", "# We ignore the error, we'll serve the default json declared above", "pass", "return", "toolkit", ".", "response", "(", "data", ")" ]
https://github.com/docker-archive/docker-registry/blob/f93b432d3fc7befa508ab27a590e6d0f78c86401/docker_registry/tags.py#L147-L161
nicrusso7/rex-gym
26663048bd3c3da307714da4458b1a2a9dc81824
rex_gym/agents/tools/mock_algorithm.py
python
MockAlgorithm.__init__
(self, envs)
Produce random actions and empty summaries. Args: envs: List of in-graph environments.
Produce random actions and empty summaries.
[ "Produce", "random", "actions", "and", "empty", "summaries", "." ]
def __init__(self, envs): """Produce random actions and empty summaries. Args: envs: List of in-graph environments. """ self._envs = envs
[ "def", "__init__", "(", "self", ",", "envs", ")", ":", "self", ".", "_envs", "=", "envs" ]
https://github.com/nicrusso7/rex-gym/blob/26663048bd3c3da307714da4458b1a2a9dc81824/rex_gym/agents/tools/mock_algorithm.py#L21-L27
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/arrow/arrow.py
python
Arrow.strftime
(self, format: str)
return self._datetime.strftime(format)
Formats in the style of ``datetime.strftime``. :param format: the format string. Usage:: >>> arrow.utcnow().strftime('%d-%m-%Y %H:%M:%S') '23-01-2019 12:28:17'
Formats in the style of ``datetime.strftime``.
[ "Formats", "in", "the", "style", "of", "datetime", ".", "strftime", "." ]
def strftime(self, format: str) -> str: """Formats in the style of ``datetime.strftime``. :param format: the format string. Usage:: >>> arrow.utcnow().strftime('%d-%m-%Y %H:%M:%S') '23-01-2019 12:28:17' """ return self._datetime.strftime(format)
[ "def", "strftime", "(", "self", ",", "format", ":", "str", ")", "->", "str", ":", "return", "self", ".", "_datetime", ".", "strftime", "(", "format", ")" ]
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/arrow/arrow.py#L1692-L1704
x0rz/EQGRP_Lost_in_Translation
6692b1486f562f027567a49523b8c151a4050988
windows/fuzzbunch/pyreadline/console/console.py
python
Console.next_serial
(self)
return self.serial
Get next event serial number.
Get next event serial number.
[ "Get", "next", "event", "serial", "number", "." ]
def next_serial(self): '''Get next event serial number.''' self.serial += 1 return self.serial
[ "def", "next_serial", "(", "self", ")", ":", "self", ".", "serial", "+=", "1", "return", "self", ".", "serial" ]
https://github.com/x0rz/EQGRP_Lost_in_Translation/blob/6692b1486f562f027567a49523b8c151a4050988/windows/fuzzbunch/pyreadline/console/console.py#L583-L586
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/modelcluster/fields.py
python
ParentalKey.check
(self, **kwargs)
return errors
[]
def check(self, **kwargs): from modelcluster.models import ClusterableModel errors = super(ParentalKey, self).check(**kwargs) # Check that the destination model is a subclass of ClusterableModel. # If self.rel.to is a string at this point, it means that Django has been unable # to resolve it as a model name; if so, skip this test so that Django's own # system checks can report the appropriate error if isinstance(self.rel.to, type) and not issubclass(self.rel.to, ClusterableModel): errors.append( checks.Error( 'ParentalKey must point to a subclass of ClusterableModel.', hint='Change {model_name} into a ClusterableModel or use a ForeignKey instead.'.format( model_name=self.rel.to._meta.app_label + '.' + self.rel.to.__name__, ), obj=self, id='modelcluster.E001', ) ) # ParentalKeys must have an accessor name (#49) if self.rel.get_accessor_name() == '+': errors.append( checks.Error( "related_name='+' is not allowed on ParentalKey fields", hint="Either change it to a valid name or remove it", obj=self, id='modelcluster.E002', ) ) return errors
[ "def", "check", "(", "self", ",", "*", "*", "kwargs", ")", ":", "from", "modelcluster", ".", "models", "import", "ClusterableModel", "errors", "=", "super", "(", "ParentalKey", ",", "self", ")", ".", "check", "(", "*", "*", "kwargs", ")", "# Check that the destination model is a subclass of ClusterableModel.", "# If self.rel.to is a string at this point, it means that Django has been unable", "# to resolve it as a model name; if so, skip this test so that Django's own", "# system checks can report the appropriate error", "if", "isinstance", "(", "self", ".", "rel", ".", "to", ",", "type", ")", "and", "not", "issubclass", "(", "self", ".", "rel", ".", "to", ",", "ClusterableModel", ")", ":", "errors", ".", "append", "(", "checks", ".", "Error", "(", "'ParentalKey must point to a subclass of ClusterableModel.'", ",", "hint", "=", "'Change {model_name} into a ClusterableModel or use a ForeignKey instead.'", ".", "format", "(", "model_name", "=", "self", ".", "rel", ".", "to", ".", "_meta", ".", "app_label", "+", "'.'", "+", "self", ".", "rel", ".", "to", ".", "__name__", ",", ")", ",", "obj", "=", "self", ",", "id", "=", "'modelcluster.E001'", ",", ")", ")", "# ParentalKeys must have an accessor name (#49)", "if", "self", ".", "rel", ".", "get_accessor_name", "(", ")", "==", "'+'", ":", "errors", ".", "append", "(", "checks", ".", "Error", "(", "\"related_name='+' is not allowed on ParentalKey fields\"", ",", "hint", "=", "\"Either change it to a valid name or remove it\"", ",", "obj", "=", "self", ",", "id", "=", "'modelcluster.E002'", ",", ")", ")", "return", "errors" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/modelcluster/fields.py#L248-L280
glue-viz/glue
840b4c1364b0fa63bf67c914540c93dd71df41e1
glue/core/data_factories/helpers.py
python
data_label
(path)
return name
Convert a file path into a data label, by stripping out slashes, file extensions, etc.
Convert a file path into a data label, by stripping out slashes, file extensions, etc.
[ "Convert", "a", "file", "path", "into", "a", "data", "label", "by", "stripping", "out", "slashes", "file", "extensions", "etc", "." ]
def data_label(path): """Convert a file path into a data label, by stripping out slashes, file extensions, etc.""" if os.path.basename(path) == '': path = os.path.dirname(path) _, fname = os.path.split(path) name, _ = os.path.splitext(fname) return name
[ "def", "data_label", "(", "path", ")", ":", "if", "os", ".", "path", ".", "basename", "(", "path", ")", "==", "''", ":", "path", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "_", ",", "fname", "=", "os", ".", "path", ".", "split", "(", "path", ")", "name", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "return", "name" ]
https://github.com/glue-viz/glue/blob/840b4c1364b0fa63bf67c914540c93dd71df41e1/glue/core/data_factories/helpers.py#L308-L315
IntelPython/sdc
1ebf55c00ef38dfbd401a70b3945e352a5a38b87
docs/source/buildscripts/user_guide_gen.py
python
generate_api_index_for_module
(the_module)
return module_doc
[]
def generate_api_index_for_module(the_module): module_description = generate_module_doc(the_module) if module_description is None: module_description = '' module_doc = '' module_header_flag = False # Document functions first, if any tab = Texttable() for func in the_module['functions']: # Iterate through the module functions name = func['function_name'] obj = getattr(the_module['module_object'], name) # Retrieve the function object description = get_function_short_description(obj).strip() tab.add_rows([[name, description]], header=False) module_name = '' func_doc = tab.draw() if func_doc and func_doc != '': # If the function list is not empty then add module name to the document module_name = the_module['module_name'] module_doc += create_header_str(module_name, '~') + '\n\n' + module_description + '\n\n' + \ create_header_str('Functions:', '-') + \ '\n\n' + func_doc + '\n\n' module_header_flag = True # Document classes classes_header_flag = False for the_class in the_module['classes']: # Iterate through the module classes tab.reset() class_name = the_class['class_name'] class_obj = the_class['class_object'] class_description = class_obj.__doc__ if not class_description: class_description = '' class_doc = '' class_header_flag = False # Document class attributes first, if any for attr in the_class['class_attributes']: # Iterate through the class attributes name = attr obj = getattr(the_class['class_object'], name) # Retrieve the attribute object description = get_function_short_description(obj).strip() tab.add_rows([[name, description]], header=False) attr_doc = tab.draw() if attr_doc and attr_doc != '': # If the attribute list is not empty then add class name to the document class_header_flag = True class_doc += create_header_str(class_name, '^') + '\n\n' + class_description + '\n\n' + \ create_header_str('Attributes:', '+') + \ '\n\n' + attr_doc + '\n\n' # Document class methods, if any for method in the_class['class_methods']: # Iterate through the class methods name = method obj = getattr(the_class['class_object'], name) # Retrieve the method object description = get_function_short_description(obj).strip() tab.add_rows([[name, description]], header=False) method_doc = tab.draw() if method_doc and method_doc != '': # If the method list is not empty then add class name to the document if not class_header_flag: class_doc += create_header_str(class_name, '^') + '\n\n' + class_description + '\n\n' + \ create_header_str('Methods:', '+') + \ '\n\n' + method_doc + '\n\n' class_header_flag = True else: class_doc += create_header_str('Methods:', '+') + \ '\n\n' + method_doc + '\n\n' if not module_header_flag: # There is no module header yet if class_header_flag: # There were methods/attributes for the class module_doc += create_header_str(module_name, '~') + '\n\n' + module_description + '\n\n' + \ create_header_str('Classes:', '-') + \ '\n\n' + class_doc + '\n\n' module_header_flag = True classes_header_flag = True else: # The module header has been added if class_header_flag: # There are new methods/attributes for the class if not classes_header_flag: # First class of the module description module_doc += create_header_str('Classes:', '-') + '\n\n' module_doc += '\n\n' + class_doc + '\n\n' return module_doc
[ "def", "generate_api_index_for_module", "(", "the_module", ")", ":", "module_description", "=", "generate_module_doc", "(", "the_module", ")", "if", "module_description", "is", "None", ":", "module_description", "=", "''", "module_doc", "=", "''", "module_header_flag", "=", "False", "# Document functions first, if any", "tab", "=", "Texttable", "(", ")", "for", "func", "in", "the_module", "[", "'functions'", "]", ":", "# Iterate through the module functions", "name", "=", "func", "[", "'function_name'", "]", "obj", "=", "getattr", "(", "the_module", "[", "'module_object'", "]", ",", "name", ")", "# Retrieve the function object", "description", "=", "get_function_short_description", "(", "obj", ")", ".", "strip", "(", ")", "tab", ".", "add_rows", "(", "[", "[", "name", ",", "description", "]", "]", ",", "header", "=", "False", ")", "module_name", "=", "''", "func_doc", "=", "tab", ".", "draw", "(", ")", "if", "func_doc", "and", "func_doc", "!=", "''", ":", "# If the function list is not empty then add module name to the document", "module_name", "=", "the_module", "[", "'module_name'", "]", "module_doc", "+=", "create_header_str", "(", "module_name", ",", "'~'", ")", "+", "'\\n\\n'", "+", "module_description", "+", "'\\n\\n'", "+", "create_header_str", "(", "'Functions:'", ",", "'-'", ")", "+", "'\\n\\n'", "+", "func_doc", "+", "'\\n\\n'", "module_header_flag", "=", "True", "# Document classes", "classes_header_flag", "=", "False", "for", "the_class", "in", "the_module", "[", "'classes'", "]", ":", "# Iterate through the module classes", "tab", ".", "reset", "(", ")", "class_name", "=", "the_class", "[", "'class_name'", "]", "class_obj", "=", "the_class", "[", "'class_object'", "]", "class_description", "=", "class_obj", ".", "__doc__", "if", "not", "class_description", ":", "class_description", "=", "''", "class_doc", "=", "''", "class_header_flag", "=", "False", "# Document class attributes first, if any", "for", "attr", "in", "the_class", "[", "'class_attributes'", "]", ":", "# Iterate through the class attributes", "name", "=", "attr", "obj", "=", "getattr", "(", "the_class", "[", "'class_object'", "]", ",", "name", ")", "# Retrieve the attribute object", "description", "=", "get_function_short_description", "(", "obj", ")", ".", "strip", "(", ")", "tab", ".", "add_rows", "(", "[", "[", "name", ",", "description", "]", "]", ",", "header", "=", "False", ")", "attr_doc", "=", "tab", ".", "draw", "(", ")", "if", "attr_doc", "and", "attr_doc", "!=", "''", ":", "# If the attribute list is not empty then add class name to the document", "class_header_flag", "=", "True", "class_doc", "+=", "create_header_str", "(", "class_name", ",", "'^'", ")", "+", "'\\n\\n'", "+", "class_description", "+", "'\\n\\n'", "+", "create_header_str", "(", "'Attributes:'", ",", "'+'", ")", "+", "'\\n\\n'", "+", "attr_doc", "+", "'\\n\\n'", "# Document class methods, if any", "for", "method", "in", "the_class", "[", "'class_methods'", "]", ":", "# Iterate through the class methods", "name", "=", "method", "obj", "=", "getattr", "(", "the_class", "[", "'class_object'", "]", ",", "name", ")", "# Retrieve the method object", "description", "=", "get_function_short_description", "(", "obj", ")", ".", "strip", "(", ")", "tab", ".", "add_rows", "(", "[", "[", "name", ",", "description", "]", "]", ",", "header", "=", "False", ")", "method_doc", "=", "tab", ".", "draw", "(", ")", "if", "method_doc", "and", "method_doc", "!=", "''", ":", "# If the method list is not empty then add class name to the document", "if", "not", "class_header_flag", ":", "class_doc", "+=", "create_header_str", "(", "class_name", ",", "'^'", ")", "+", "'\\n\\n'", "+", "class_description", "+", "'\\n\\n'", "+", "create_header_str", "(", "'Methods:'", ",", "'+'", ")", "+", "'\\n\\n'", "+", "method_doc", "+", "'\\n\\n'", "class_header_flag", "=", "True", "else", ":", "class_doc", "+=", "create_header_str", "(", "'Methods:'", ",", "'+'", ")", "+", "'\\n\\n'", "+", "method_doc", "+", "'\\n\\n'", "if", "not", "module_header_flag", ":", "# There is no module header yet", "if", "class_header_flag", ":", "# There were methods/attributes for the class", "module_doc", "+=", "create_header_str", "(", "module_name", ",", "'~'", ")", "+", "'\\n\\n'", "+", "module_description", "+", "'\\n\\n'", "+", "create_header_str", "(", "'Classes:'", ",", "'-'", ")", "+", "'\\n\\n'", "+", "class_doc", "+", "'\\n\\n'", "module_header_flag", "=", "True", "classes_header_flag", "=", "True", "else", ":", "# The module header has been added", "if", "class_header_flag", ":", "# There are new methods/attributes for the class", "if", "not", "classes_header_flag", ":", "# First class of the module description", "module_doc", "+=", "create_header_str", "(", "'Classes:'", ",", "'-'", ")", "+", "'\\n\\n'", "module_doc", "+=", "'\\n\\n'", "+", "class_doc", "+", "'\\n\\n'", "return", "module_doc" ]
https://github.com/IntelPython/sdc/blob/1ebf55c00ef38dfbd401a70b3945e352a5a38b87/docs/source/buildscripts/user_guide_gen.py#L76-L156
bayespy/bayespy
0e6e6130c888a4295cc9421d61d4ad27b2960ebb
bayespy/inference/vmp/nodes/gamma.py
python
GammaDistribution.compute_moments_and_cgf
(self, phi, mask=True)
return (u, g)
r""" Compute the moments and :math:`g(\phi)`. .. math:: \overline{\mathbf{u}} (\boldsymbol{\phi}) &= \begin{bmatrix} - \frac{\phi_2} {\phi_1} \\ \psi(\phi_2) - \log(-\phi_1) \end{bmatrix} \\ g_{\boldsymbol{\phi}} (\boldsymbol{\phi}) &= TODO
r""" Compute the moments and :math:`g(\phi)`.
[ "r", "Compute", "the", "moments", "and", ":", "math", ":", "g", "(", "\\", "phi", ")", "." ]
def compute_moments_and_cgf(self, phi, mask=True): r""" Compute the moments and :math:`g(\phi)`. .. math:: \overline{\mathbf{u}} (\boldsymbol{\phi}) &= \begin{bmatrix} - \frac{\phi_2} {\phi_1} \\ \psi(\phi_2) - \log(-\phi_1) \end{bmatrix} \\ g_{\boldsymbol{\phi}} (\boldsymbol{\phi}) &= TODO """ with np.errstate(invalid='raise', divide='raise'): log_b = np.log(-phi[0]) u0 = phi[1] / (-phi[0]) u1 = special.digamma(phi[1]) - log_b u = [u0, u1] g = phi[1] * log_b - special.gammaln(phi[1]) return (u, g)
[ "def", "compute_moments_and_cgf", "(", "self", ",", "phi", ",", "mask", "=", "True", ")", ":", "with", "np", ".", "errstate", "(", "invalid", "=", "'raise'", ",", "divide", "=", "'raise'", ")", ":", "log_b", "=", "np", ".", "log", "(", "-", "phi", "[", "0", "]", ")", "u0", "=", "phi", "[", "1", "]", "/", "(", "-", "phi", "[", "0", "]", ")", "u1", "=", "special", ".", "digamma", "(", "phi", "[", "1", "]", ")", "-", "log_b", "u", "=", "[", "u0", ",", "u1", "]", "g", "=", "phi", "[", "1", "]", "*", "log_b", "-", "special", ".", "gammaln", "(", "phi", "[", "1", "]", ")", "return", "(", "u", ",", "g", ")" ]
https://github.com/bayespy/bayespy/blob/0e6e6130c888a4295cc9421d61d4ad27b2960ebb/bayespy/inference/vmp/nodes/gamma.py#L124-L148
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/pydoc.py
python
HTMLDoc.classlink
(self, object, modname)
return classname(object, modname)
Make a link for a class.
Make a link for a class.
[ "Make", "a", "link", "for", "a", "class", "." ]
def classlink(self, object, modname): """Make a link for a class.""" name, module = object.__name__, sys.modules.get(object.__module__) if hasattr(module, name) and getattr(module, name) is object: return '<a href="%s.html#%s">%s</a>' % ( module.__name__, name, classname(object, modname)) return classname(object, modname)
[ "def", "classlink", "(", "self", ",", "object", ",", "modname", ")", ":", "name", ",", "module", "=", "object", ".", "__name__", ",", "sys", ".", "modules", ".", "get", "(", "object", ".", "__module__", ")", "if", "hasattr", "(", "module", ",", "name", ")", "and", "getattr", "(", "module", ",", "name", ")", "is", "object", ":", "return", "'<a href=\"%s.html#%s\">%s</a>'", "%", "(", "module", ".", "__name__", ",", "name", ",", "classname", "(", "object", ",", "modname", ")", ")", "return", "classname", "(", "object", ",", "modname", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/pydoc.py#L642-L648
amosbastian/fpl
141e10d315df3f886b103ce4351e8e56a96489f8
fpl/utils.py
python
get_headers
(referer)
return { "Content-Type": "application/json; charset=UTF-8", "X-Requested-With": "XMLHttpRequest", "Referer": referer }
Returns the headers needed for the transfer request.
Returns the headers needed for the transfer request.
[ "Returns", "the", "headers", "needed", "for", "the", "transfer", "request", "." ]
def get_headers(referer): """Returns the headers needed for the transfer request.""" return { "Content-Type": "application/json; charset=UTF-8", "X-Requested-With": "XMLHttpRequest", "Referer": referer }
[ "def", "get_headers", "(", "referer", ")", ":", "return", "{", "\"Content-Type\"", ":", "\"application/json; charset=UTF-8\"", ",", "\"X-Requested-With\"", ":", "\"XMLHttpRequest\"", ",", "\"Referer\"", ":", "referer", "}" ]
https://github.com/amosbastian/fpl/blob/141e10d315df3f886b103ce4351e8e56a96489f8/fpl/utils.py#L183-L189
GoogleCloudPlatform/PerfKitBenchmarker
6e3412d7d5e414b8ca30ed5eaf970cef1d919a67
perfkitbenchmarker/linux_packages/cuda_toolkit.py
python
_InstallCudaPatch
(vm, patch_url)
Installs CUDA Toolkit patch from NVIDIA. Args: vm: VM to install patch on patch_url: url of the CUDA patch to install
Installs CUDA Toolkit patch from NVIDIA.
[ "Installs", "CUDA", "Toolkit", "patch", "from", "NVIDIA", "." ]
def _InstallCudaPatch(vm, patch_url): """Installs CUDA Toolkit patch from NVIDIA. Args: vm: VM to install patch on patch_url: url of the CUDA patch to install """ # Need to append .deb to package name because the file downloaded from # NVIDIA is missing the .deb extension. basename = posixpath.basename(patch_url) + '.deb' vm.RemoteCommand('wget -q %s -O %s' % (patch_url, basename)) vm.RemoteCommand('sudo dpkg -i %s' % basename) vm.RemoteCommand('sudo apt-get update') # Need to be extra careful on the command below because without these # precautions, it was brining up a menu option about grub's menu.lst # on AWS Ubuntu16.04 and thus causing the RemoteCommand to hang and fail. vm.RemoteCommand( 'sudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -yq cuda')
[ "def", "_InstallCudaPatch", "(", "vm", ",", "patch_url", ")", ":", "# Need to append .deb to package name because the file downloaded from", "# NVIDIA is missing the .deb extension.", "basename", "=", "posixpath", ".", "basename", "(", "patch_url", ")", "+", "'.deb'", "vm", ".", "RemoteCommand", "(", "'wget -q %s -O %s'", "%", "(", "patch_url", ",", "basename", ")", ")", "vm", ".", "RemoteCommand", "(", "'sudo dpkg -i %s'", "%", "basename", ")", "vm", ".", "RemoteCommand", "(", "'sudo apt-get update'", ")", "# Need to be extra careful on the command below because without these", "# precautions, it was brining up a menu option about grub's menu.lst", "# on AWS Ubuntu16.04 and thus causing the RemoteCommand to hang and fail.", "vm", ".", "RemoteCommand", "(", "'sudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -yq cuda'", ")" ]
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/blob/6e3412d7d5e414b8ca30ed5eaf970cef1d919a67/perfkitbenchmarker/linux_packages/cuda_toolkit.py#L130-L148
michaelhush/M-LOOP
cd0bf2d0de0bfe7f533156399a94b576f7f34a35
mloop/visualizations.py
python
GaussianProcessVisualizer.plot_hyperparameters_vs_run
(self, *args, **kwargs)
Deprecated. Use `plot_hyperparameters_vs_fit()` instead.
Deprecated. Use `plot_hyperparameters_vs_fit()` instead.
[ "Deprecated", ".", "Use", "plot_hyperparameters_vs_fit", "()", "instead", "." ]
def plot_hyperparameters_vs_run(self, *args, **kwargs): ''' Deprecated. Use `plot_hyperparameters_vs_fit()` instead. ''' msg = ("plot_hyperparameters_vs_run() is deprecated; " "use plot_hyperparameters_vs_fit() instead.") warnings.warn(msg) self.plot_hyperparameters_vs_fit(*args, **kwargs)
[ "def", "plot_hyperparameters_vs_run", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "msg", "=", "(", "\"plot_hyperparameters_vs_run() is deprecated; \"", "\"use plot_hyperparameters_vs_fit() instead.\"", ")", "warnings", ".", "warn", "(", "msg", ")", "self", ".", "plot_hyperparameters_vs_fit", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/michaelhush/M-LOOP/blob/cd0bf2d0de0bfe7f533156399a94b576f7f34a35/mloop/visualizations.py#L1100-L1107
mattupstate/flask-security
674b18103fa8734aca71bbd084ea01e3709817ef
flask_security/utils.py
python
capture_registrations
()
Testing utility for capturing registrations. :param confirmation_sent_at: An optional datetime object to set the user's `confirmation_sent_at` to
Testing utility for capturing registrations.
[ "Testing", "utility", "for", "capturing", "registrations", "." ]
def capture_registrations(): """Testing utility for capturing registrations. :param confirmation_sent_at: An optional datetime object to set the user's `confirmation_sent_at` to """ registrations = [] def _on(app, **data): registrations.append(data) user_registered.connect(_on) try: yield registrations finally: user_registered.disconnect(_on)
[ "def", "capture_registrations", "(", ")", ":", "registrations", "=", "[", "]", "def", "_on", "(", "app", ",", "*", "*", "data", ")", ":", "registrations", ".", "append", "(", "data", ")", "user_registered", ".", "connect", "(", "_on", ")", "try", ":", "yield", "registrations", "finally", ":", "user_registered", ".", "disconnect", "(", "_on", ")" ]
https://github.com/mattupstate/flask-security/blob/674b18103fa8734aca71bbd084ea01e3709817ef/flask_security/utils.py#L481-L497
open-io/oio-sds
16041950b6056a55d5ce7ca77795defe6dfa6c61
oio/common/tool.py
python
Tool.create_worker
(self, queue_workers, queue_reply)
Create worker to process the items.
Create worker to process the items.
[ "Create", "worker", "to", "process", "the", "items", "." ]
def create_worker(self, queue_workers, queue_reply): """ Create worker to process the items. """ raise NotImplementedError()
[ "def", "create_worker", "(", "self", ",", "queue_workers", ",", "queue_reply", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/open-io/oio-sds/blob/16041950b6056a55d5ce7ca77795defe6dfa6c61/oio/common/tool.py#L201-L205
yourlabs/django-autocomplete-light
3eb84a09c5828e4a9409f0aec14c2874bcbe17df
src/dal_select2/views.py
python
Select2GroupListView.get_item_as_group
(self, entry)
return (group, item),
Return the item with its group.
Return the item with its group.
[ "Return", "the", "item", "with", "its", "group", "." ]
def get_item_as_group(self, entry): """Return the item with its group.""" group = None item = entry if isinstance(entry, collections.Sequence) and \ not isinstance(entry, six.string_types): entry_length = len(entry) if all(isinstance(el, list) for el in entry) and entry_length > 1: group, item = entry[0:2] return (group, item), elif all(isinstance(el, list) for el in entry) and entry_length > 1: group, item = entry[0:2] return (group, item), else: if(entry_length > 1): group, item = entry[0:2] elif(entry_length > 0): item = entry[0] if not isinstance(item, collections.Sequence) or \ isinstance(item, six.string_types): item = (item,) return (group, item),
[ "def", "get_item_as_group", "(", "self", ",", "entry", ")", ":", "group", "=", "None", "item", "=", "entry", "if", "isinstance", "(", "entry", ",", "collections", ".", "Sequence", ")", "and", "not", "isinstance", "(", "entry", ",", "six", ".", "string_types", ")", ":", "entry_length", "=", "len", "(", "entry", ")", "if", "all", "(", "isinstance", "(", "el", ",", "list", ")", "for", "el", "in", "entry", ")", "and", "entry_length", ">", "1", ":", "group", ",", "item", "=", "entry", "[", "0", ":", "2", "]", "return", "(", "group", ",", "item", ")", ",", "elif", "all", "(", "isinstance", "(", "el", ",", "list", ")", "for", "el", "in", "entry", ")", "and", "entry_length", ">", "1", ":", "group", ",", "item", "=", "entry", "[", "0", ":", "2", "]", "return", "(", "group", ",", "item", ")", ",", "else", ":", "if", "(", "entry_length", ">", "1", ")", ":", "group", ",", "item", "=", "entry", "[", "0", ":", "2", "]", "elif", "(", "entry_length", ">", "0", ")", ":", "item", "=", "entry", "[", "0", "]", "if", "not", "isinstance", "(", "item", ",", "collections", ".", "Sequence", ")", "or", "isinstance", "(", "item", ",", "six", ".", "string_types", ")", ":", "item", "=", "(", "item", ",", ")", "return", "(", "group", ",", "item", ")", "," ]
https://github.com/yourlabs/django-autocomplete-light/blob/3eb84a09c5828e4a9409f0aec14c2874bcbe17df/src/dal_select2/views.py#L195-L222
shichao-an/leetcode-python
6c523ef4759a57433e10271b584eece16f9f05f3
first_bad_version/solution.py
python
Solution.firstBadVersion
(self, n)
:type n: int :rtype: int
:type n: int :rtype: int
[ ":", "type", "n", ":", "int", ":", "rtype", ":", "int" ]
def firstBadVersion(self, n): """ :type n: int :rtype: int """ left = 1 right = n while left + 1 < right: mid = left + (right - left) / 2 if isBadVersion(mid): right = mid else: left = mid if isBadVersion(left): return left elif isBadVersion(right): return right
[ "def", "firstBadVersion", "(", "self", ",", "n", ")", ":", "left", "=", "1", "right", "=", "n", "while", "left", "+", "1", "<", "right", ":", "mid", "=", "left", "+", "(", "right", "-", "left", ")", "/", "2", "if", "isBadVersion", "(", "mid", ")", ":", "right", "=", "mid", "else", ":", "left", "=", "mid", "if", "isBadVersion", "(", "left", ")", ":", "return", "left", "elif", "isBadVersion", "(", "right", ")", ":", "return", "right" ]
https://github.com/shichao-an/leetcode-python/blob/6c523ef4759a57433e10271b584eece16f9f05f3/first_bad_version/solution.py#L21-L37
KoalixSwitzerland/koalixcrm
87d125379845d6ab990c19500d63cbed4051040a
koalixcrm/crm/inlinemixin.py
python
LimitedAdminInlineMixin.limit_inline_choices
(formset, field, empty=False, **filters)
This function fetches the queryset with available choices for a given `field` and filters it based on the criteria specified in filters, unless `empty=True`. In this case, no choices will be made available.
This function fetches the queryset with available choices for a given `field` and filters it based on the criteria specified in filters, unless `empty=True`. In this case, no choices will be made available.
[ "This", "function", "fetches", "the", "queryset", "with", "available", "choices", "for", "a", "given", "field", "and", "filters", "it", "based", "on", "the", "criteria", "specified", "in", "filters", "unless", "empty", "=", "True", ".", "In", "this", "case", "no", "choices", "will", "be", "made", "available", "." ]
def limit_inline_choices(formset, field, empty=False, **filters): """ This function fetches the queryset with available choices for a given `field` and filters it based on the criteria specified in filters, unless `empty=True`. In this case, no choices will be made available. """ assert field in formset.form.base_fields qs = formset.form.base_fields[field].queryset if empty: formset.form.base_fields[field].queryset = qs.none() else: qs = qs.filter(**filters) formset.form.base_fields[field].queryset = qs
[ "def", "limit_inline_choices", "(", "formset", ",", "field", ",", "empty", "=", "False", ",", "*", "*", "filters", ")", ":", "assert", "field", "in", "formset", ".", "form", ".", "base_fields", "qs", "=", "formset", ".", "form", ".", "base_fields", "[", "field", "]", ".", "queryset", "if", "empty", ":", "formset", ".", "form", ".", "base_fields", "[", "field", "]", ".", "queryset", "=", "qs", ".", "none", "(", ")", "else", ":", "qs", "=", "qs", ".", "filter", "(", "*", "*", "filters", ")", "formset", ".", "form", ".", "base_fields", "[", "field", "]", ".", "queryset", "=", "qs" ]
https://github.com/KoalixSwitzerland/koalixcrm/blob/87d125379845d6ab990c19500d63cbed4051040a/koalixcrm/crm/inlinemixin.py#L25-L37
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/fractions.py
python
Fraction.__rfloordiv__
(b, a)
a // b
a // b
[ "a", "//", "b" ]
def __rfloordiv__(b, a): """a // b""" # Will be math.floor(a / b) in 3.0. div = a / b if isinstance(div, Rational): # trunc(math.floor(div)) doesn't work if the rational is # more precise than a float because the intermediate # rounding may cross an integer boundary. return div.numerator // div.denominator else: return math.floor(div)
[ "def", "__rfloordiv__", "(", "b", ",", "a", ")", ":", "# Will be math.floor(a / b) in 3.0.", "div", "=", "a", "/", "b", "if", "isinstance", "(", "div", ",", "Rational", ")", ":", "# trunc(math.floor(div)) doesn't work if the rational is", "# more precise than a float because the intermediate", "# rounding may cross an integer boundary.", "return", "div", ".", "numerator", "//", "div", ".", "denominator", "else", ":", "return", "math", ".", "floor", "(", "div", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/bin/x86/Debug/Lib/fractions.py#L429-L439
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_image.py
python
Utils.filter_versions
(stdout)
return version_dict
filter the oc version output
filter the oc version output
[ "filter", "the", "oc", "version", "output" ]
def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict
[ "def", "filter_versions", "(", "stdout", ")", ":", "version_dict", "=", "{", "}", "version_search", "=", "[", "'oc'", ",", "'openshift'", ",", "'kubernetes'", "]", "for", "line", "in", "stdout", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", ":", "for", "term", "in", "version_search", ":", "if", "not", "line", ":", "continue", "if", "line", ".", "startswith", "(", "term", ")", ":", "version_dict", "[", "term", "]", "=", "line", ".", "split", "(", ")", "[", "-", "1", "]", "# horrible hack to get openshift version in Openshift 3.2", "# By default \"oc version in 3.2 does not return an \"openshift\" version", "if", "\"openshift\"", "not", "in", "version_dict", ":", "version_dict", "[", "\"openshift\"", "]", "=", "version_dict", "[", "\"oc\"", "]", "return", "version_dict" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_image.py#L1279-L1297
PaddlePaddle/PGL
e48545f2814523c777b8a9a9188bf5a7f00d6e52
ogb_examples/graphproppred/ogbg_molpcba/utils/util.py
python
make_dir
(path)
Build directory
Build directory
[ "Build", "directory" ]
def make_dir(path): """Build directory""" if not os.path.exists(path): os.makedirs(path)
[ "def", "make_dir", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")" ]
https://github.com/PaddlePaddle/PGL/blob/e48545f2814523c777b8a9a9188bf5a7f00d6e52/ogb_examples/graphproppred/ogbg_molpcba/utils/util.py#L42-L45
liwanlei/FXTest
414a20024ae164035ec31982cda252eaa6b129b8
common/opearexcel.py
python
create_interface
(filename: str, interfacelist: list)
[]
def create_interface(filename: str, interfacelist: list): try: file = Workbook(filename) table = file.add_sheet('接口', cell_overwrite_ok=True) for i in range(1, 9): table.col(i).width = 300 * 25 style = yangshi1() table.write(0, 0, '编号', style=style) table.write(0, 1, '项目名字', style=style) table.write(0, 2, '模块名字', style=style) table.write(0, 3, '接口名字', style=style) table.write(0, 4, '接口url', style=style) table.write(0, 5, '接口协议', style=style) table.write(0, 6, '请求头', style=style) table.write(0, 7, '请求方式', style=style) # table.write(0, 8, '请求示例', style=style) # table.write(0, 9, '请求返回示例', style=style) table.write(0, 8, '添加人', style=style) stylen = yangshi2() for i in range(len(interfacelist)): table.write(i + 1, 0, str(interfacelist[i].id), style=stylen) table.write(i + 1, 1, str(interfacelist[i].projects), style=stylen) table.write(i + 1, 2, str(interfacelist[i].models), style=stylen) table.write(i + 1, 3, interfacelist[i].Interface_name, style=stylen) table.write(i + 1, 4, interfacelist[i].Interface_url, style=stylen) table.write(i + 1, 5, interfacelist[i].interfacetype, style=stylen) table.write(i + 1, 6, interfacelist[i].Interface_headers, style=stylen) table.write(i + 1, 7, interfacelist[i].Interface_meth, style=stylen) # table.write(i + 1, 8, interfacelist[i].Interface_par, style=stylen) # table.write(i + 1, 9, interfacelist[i].Interface_back, style=stylen) table.write(i + 1, 8, str(interfacelist[i].users), style=stylen) i += 1 file.save(filename) return {'code': 0, 'message': filename} except Exception as e: return {'code': 1, 'error': e}
[ "def", "create_interface", "(", "filename", ":", "str", ",", "interfacelist", ":", "list", ")", ":", "try", ":", "file", "=", "Workbook", "(", "filename", ")", "table", "=", "file", ".", "add_sheet", "(", "'接口', ce", "l", "_overwrite_ok=Tru", "e", ")", "", "for", "i", "in", "range", "(", "1", ",", "9", ")", ":", "table", ".", "col", "(", "i", ")", ".", "width", "=", "300", "*", "25", "style", "=", "yangshi1", "(", ")", "table", ".", "write", "(", "0", ",", "0", ",", "'编号', st", "y", "e=sty", "l", "e)", "", "table", ".", "write", "(", "0", ",", "1", ",", "'项目名字', style=", "s", "yle)", "", "", "", "table", ".", "write", "(", "0", ",", "2", ",", "'模块名字', style=", "s", "yle)", "", "", "", "table", ".", "write", "(", "0", ",", "3", ",", "'接口名字', style=", "s", "yle)", "", "", "", "table", ".", "write", "(", "0", ",", "4", ",", "'接口url', st", "y", "e=sty", "l", "e)", "", "table", ".", "write", "(", "0", ",", "5", ",", "'接口协议', style=", "s", "yle)", "", "", "", "table", ".", "write", "(", "0", ",", "6", ",", "'请求头', styl", "e", "style", ")", "", "", "table", ".", "write", "(", "0", ",", "7", ",", "'请求方式', style=", "s", "yle)", "", "", "", "# table.write(0, 8, '请求示例', style=style)", "# table.write(0, 9, '请求返回示例', style=style)", "table", ".", "write", "(", "0", ",", "8", ",", "'添加人', styl", "e", "style", ")", "", "", "stylen", "=", "yangshi2", "(", ")", "for", "i", "in", "range", "(", "len", "(", "interfacelist", ")", ")", ":", "table", ".", "write", "(", "i", "+", "1", ",", "0", ",", "str", "(", "interfacelist", "[", "i", "]", ".", "id", ")", ",", "style", "=", "stylen", ")", "table", ".", "write", "(", "i", "+", "1", ",", "1", ",", "str", "(", "interfacelist", "[", "i", "]", ".", "projects", ")", ",", "style", "=", "stylen", ")", "table", ".", "write", "(", "i", "+", "1", ",", "2", ",", "str", "(", "interfacelist", "[", "i", "]", ".", "models", ")", ",", "style", "=", "stylen", ")", "table", ".", "write", "(", "i", "+", "1", ",", "3", ",", "interfacelist", "[", "i", "]", ".", "Interface_name", ",", "style", "=", "stylen", ")", "table", ".", "write", "(", "i", "+", "1", ",", "4", ",", "interfacelist", "[", "i", "]", ".", "Interface_url", ",", "style", "=", "stylen", ")", "table", ".", "write", "(", "i", "+", "1", ",", "5", ",", "interfacelist", "[", "i", "]", ".", "interfacetype", ",", "style", "=", "stylen", ")", "table", ".", "write", "(", "i", "+", "1", ",", "6", ",", "interfacelist", "[", "i", "]", ".", "Interface_headers", ",", "style", "=", "stylen", ")", "table", ".", "write", "(", "i", "+", "1", ",", "7", ",", "interfacelist", "[", "i", "]", ".", "Interface_meth", ",", "style", "=", "stylen", ")", "# table.write(i + 1, 8, interfacelist[i].Interface_par, style=stylen)", "# table.write(i + 1, 9, interfacelist[i].Interface_back, style=stylen)", "table", ".", "write", "(", "i", "+", "1", ",", "8", ",", "str", "(", "interfacelist", "[", "i", "]", ".", "users", ")", ",", "style", "=", "stylen", ")", "i", "+=", "1", "file", ".", "save", "(", "filename", ")", "return", "{", "'code'", ":", "0", ",", "'message'", ":", "filename", "}", "except", "Exception", "as", "e", ":", "return", "{", "'code'", ":", "1", ",", "'error'", ":", "e", "}" ]
https://github.com/liwanlei/FXTest/blob/414a20024ae164035ec31982cda252eaa6b129b8/common/opearexcel.py#L37-L72
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/gdata/src/gdata/tlslite/integration/AsyncStateMachine.py
python
AsyncStateMachine.inReadEvent
(self)
Tell the state machine it can read from the socket.
Tell the state machine it can read from the socket.
[ "Tell", "the", "state", "machine", "it", "can", "read", "from", "the", "socket", "." ]
def inReadEvent(self): """Tell the state machine it can read from the socket.""" try: self._checkAssert() if self.handshaker: self._doHandshakeOp() elif self.closer: self._doCloseOp() elif self.reader: self._doReadOp() elif self.writer: self._doWriteOp() else: self.reader = self.tlsConnection.readAsync(16384) self._doReadOp() except: self._clear() raise
[ "def", "inReadEvent", "(", "self", ")", ":", "try", ":", "self", ".", "_checkAssert", "(", ")", "if", "self", ".", "handshaker", ":", "self", ".", "_doHandshakeOp", "(", ")", "elif", "self", ".", "closer", ":", "self", ".", "_doCloseOp", "(", ")", "elif", "self", ".", "reader", ":", "self", ".", "_doReadOp", "(", ")", "elif", "self", ".", "writer", ":", "self", ".", "_doWriteOp", "(", ")", "else", ":", "self", ".", "reader", "=", "self", ".", "tlsConnection", ".", "readAsync", "(", "16384", ")", "self", ".", "_doReadOp", "(", ")", "except", ":", "self", ".", "_clear", "(", ")", "raise" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/gdata/src/gdata/tlslite/integration/AsyncStateMachine.py#L118-L135
iTechArt/convtools-ita
25c1057e20581d957bec1339758325dc98fec43e
src/convtools/base.py
python
FilterConversion._gen_code_and_update_ctx
(self, code_input, ctx)
return self.conversion.gen_code_and_update_ctx(code_input, ctx)
[]
def _gen_code_and_update_ctx(self, code_input, ctx): return self.conversion.gen_code_and_update_ctx(code_input, ctx)
[ "def", "_gen_code_and_update_ctx", "(", "self", ",", "code_input", ",", "ctx", ")", ":", "return", "self", ".", "conversion", ".", "gen_code_and_update_ctx", "(", "code_input", ",", "ctx", ")" ]
https://github.com/iTechArt/convtools-ita/blob/25c1057e20581d957bec1339758325dc98fec43e/src/convtools/base.py#L1319-L1320
nuxeo/FunkLoad
8a3a44c20398098d03197baeef27a4177858df1b
src/funkload/ReportRenderDiff.py
python
getRPath
(a, b)
return '../' * len(a_path[i:]) + '/'.join(b_path[i:])
Return a relative path of b from a.
Return a relative path of b from a.
[ "Return", "a", "relative", "path", "of", "b", "from", "a", "." ]
def getRPath(a, b): """Return a relative path of b from a.""" a_path = a.split('/') b_path = b.split('/') for i in range(min(len(a_path), len(b_path))): if a_path[i] != b_path[i]: break return '../' * len(a_path[i:]) + '/'.join(b_path[i:])
[ "def", "getRPath", "(", "a", ",", "b", ")", ":", "a_path", "=", "a", ".", "split", "(", "'/'", ")", "b_path", "=", "b", ".", "split", "(", "'/'", ")", "for", "i", "in", "range", "(", "min", "(", "len", "(", "a_path", ")", ",", "len", "(", "b_path", ")", ")", ")", ":", "if", "a_path", "[", "i", "]", "!=", "b_path", "[", "i", "]", ":", "break", "return", "'../'", "*", "len", "(", "a_path", "[", "i", ":", "]", ")", "+", "'/'", ".", "join", "(", "b_path", "[", "i", ":", "]", ")" ]
https://github.com/nuxeo/FunkLoad/blob/8a3a44c20398098d03197baeef27a4177858df1b/src/funkload/ReportRenderDiff.py#L52-L59
google/ci_edit
ffaa52473673cc7ec2080bc59996d61414d662c9
app/curses_util.py
python
column_to_index
(column, string)
return index
If the visual cursor is on |column|, which index of the string is the cursor on?
If the visual cursor is on |column|, which index of the string is the cursor on?
[ "If", "the", "visual", "cursor", "is", "on", "|column|", "which", "index", "of", "the", "string", "is", "the", "cursor", "on?" ]
def column_to_index(column, string): """If the visual cursor is on |column|, which index of the string is the cursor on?""" if app.config.strict_debug: assert isinstance(column, int) assert isinstance(string, unicode) if not string: return None indexLimit = len(string) - 1 colCursor = 0 index = 0 for ch in string: colCursor += char_width(ch, colCursor) if colCursor > column: break index += 1 if index > indexLimit: return None return index
[ "def", "column_to_index", "(", "column", ",", "string", ")", ":", "if", "app", ".", "config", ".", "strict_debug", ":", "assert", "isinstance", "(", "column", ",", "int", ")", "assert", "isinstance", "(", "string", ",", "unicode", ")", "if", "not", "string", ":", "return", "None", "indexLimit", "=", "len", "(", "string", ")", "-", "1", "colCursor", "=", "0", "index", "=", "0", "for", "ch", "in", "string", ":", "colCursor", "+=", "char_width", "(", "ch", ",", "colCursor", ")", "if", "colCursor", ">", "column", ":", "break", "index", "+=", "1", "if", "index", ">", "indexLimit", ":", "return", "None", "return", "index" ]
https://github.com/google/ci_edit/blob/ffaa52473673cc7ec2080bc59996d61414d662c9/app/curses_util.py#L236-L254
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/dialects/sybase/base.py
python
SybaseDialect.has_table
(self, connection, table_name, schema=None)
[]
def has_table(self, connection, table_name, schema=None): try: self.get_table_id(connection, table_name, schema) except exc.NoSuchTableError: return False else: return True
[ "def", "has_table", "(", "self", ",", "connection", ",", "table_name", ",", "schema", "=", "None", ")", ":", "try", ":", "self", ".", "get_table_id", "(", "connection", ",", "table_name", ",", "schema", ")", "except", "exc", ".", "NoSuchTableError", ":", "return", "False", "else", ":", "return", "True" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/dialects/sybase/base.py#L833-L839
kamalgill/flask-appengine-template
11760f83faccbb0d0afe416fc58e67ecfb4643c2
src/lib/flask/ctx.py
python
RequestContext.auto_pop
(self, exc)
[]
def auto_pop(self, exc): if self.request.environ.get('flask._preserve_context') or \ (exc is not None and self.app.preserve_context_on_exception): self.preserved = True self._preserved_exc = exc else: self.pop(exc)
[ "def", "auto_pop", "(", "self", ",", "exc", ")", ":", "if", "self", ".", "request", ".", "environ", ".", "get", "(", "'flask._preserve_context'", ")", "or", "(", "exc", "is", "not", "None", "and", "self", ".", "app", ".", "preserve_context_on_exception", ")", ":", "self", ".", "preserved", "=", "True", "self", ".", "_preserved_exc", "=", "exc", "else", ":", "self", ".", "pop", "(", "exc", ")" ]
https://github.com/kamalgill/flask-appengine-template/blob/11760f83faccbb0d0afe416fc58e67ecfb4643c2/src/lib/flask/ctx.py#L381-L387
pulp/pulp
a0a28d804f997b6f81c391378aff2e4c90183df9
nodes/child/pulp_node/importers/strategies.py
python
ImporterStrategy.synchronize
(self, request)
Synchronize the content units associated with the specified repository. :param request: A synchronization request. :type request: SyncRequest
Synchronize the content units associated with the specified repository. :param request: A synchronization request. :type request: SyncRequest
[ "Synchronize", "the", "content", "units", "associated", "with", "the", "specified", "repository", ".", ":", "param", "request", ":", "A", "synchronization", "request", ".", ":", "type", "request", ":", "SyncRequest" ]
def synchronize(self, request): """ Synchronize the content units associated with the specified repository. :param request: A synchronization request. :type request: SyncRequest """ request.started() try: self._synchronize(request) except NodeError, ne: request.summary.errors.append(ne) except Exception, e: _log.exception(request.repo_id) request.summary.errors.append(CaughtException(e, request.repo_id))
[ "def", "synchronize", "(", "self", ",", "request", ")", ":", "request", ".", "started", "(", ")", "try", ":", "self", ".", "_synchronize", "(", "request", ")", "except", "NodeError", ",", "ne", ":", "request", ".", "summary", ".", "errors", ".", "append", "(", "ne", ")", "except", "Exception", ",", "e", ":", "_log", ".", "exception", "(", "request", ".", "repo_id", ")", "request", ".", "summary", ".", "errors", ".", "append", "(", "CaughtException", "(", "e", ",", "request", ".", "repo_id", ")", ")" ]
https://github.com/pulp/pulp/blob/a0a28d804f997b6f81c391378aff2e4c90183df9/nodes/child/pulp_node/importers/strategies.py#L106-L120
dmlc/gluon-cv
709bc139919c02f7454cb411311048be188cde64
gluoncv/torch/data/structures/instances.py
python
Instances.remove
(self, name: str)
Remove the field called `name`.
Remove the field called `name`.
[ "Remove", "the", "field", "called", "name", "." ]
def remove(self, name: str) -> None: """ Remove the field called `name`. """ del self._fields[name]
[ "def", "remove", "(", "self", ",", "name", ":", "str", ")", "->", "None", ":", "del", "self", ".", "_fields", "[", "name", "]" ]
https://github.com/dmlc/gluon-cv/blob/709bc139919c02f7454cb411311048be188cde64/gluoncv/torch/data/structures/instances.py#L82-L86
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/docutils/statemachine.py
python
StateWS.first_known_indent
(self, match, context, next_state)
return context, next_state, results
Handle an indented text block (first line's indent known). Extend or override in subclasses. Recursively run the registered state machine for known-indent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``.
Handle an indented text block (first line's indent known).
[ "Handle", "an", "indented", "text", "block", "(", "first", "line", "s", "indent", "known", ")", "." ]
def first_known_indent(self, match, context, next_state): """ Handle an indented text block (first line's indent known). Extend or override in subclasses. Recursively run the registered state machine for known-indent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``. """ indented, line_offset, blank_finish = \ self.state_machine.get_first_known_indented(match.end()) sm = self.known_indent_sm(debug=self.debug, **self.known_indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results
[ "def", "first_known_indent", "(", "self", ",", "match", ",", "context", ",", "next_state", ")", ":", "indented", ",", "line_offset", ",", "blank_finish", "=", "self", ".", "state_machine", ".", "get_first_known_indented", "(", "match", ".", "end", "(", ")", ")", "sm", "=", "self", ".", "known_indent_sm", "(", "debug", "=", "self", ".", "debug", ",", "*", "*", "self", ".", "known_indent_sm_kwargs", ")", "results", "=", "sm", ".", "run", "(", "indented", ",", "input_offset", "=", "line_offset", ")", "return", "context", ",", "next_state", ",", "results" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/docutils/statemachine.py#L1019-L1034
TRI-ML/packnet-sfm
f59b1d615777a9987285a10e45b5d87b0369fa7d
packnet_sfm/networks/layers/resnet/depth_decoder.py
python
DepthDecoder.forward
(self, input_features)
return self.outputs
[]
def forward(self, input_features): self.outputs = {} # decoder x = input_features[-1] for i in range(4, -1, -1): x = self.convs[("upconv", i, 0)](x) x = [upsample(x)] if self.use_skips and i > 0: x += [input_features[i - 1]] x = torch.cat(x, 1) x = self.convs[("upconv", i, 1)](x) if i in self.scales: self.outputs[("disp", i)] = self.sigmoid(self.convs[("dispconv", i)](x)) return self.outputs
[ "def", "forward", "(", "self", ",", "input_features", ")", ":", "self", ".", "outputs", "=", "{", "}", "# decoder", "x", "=", "input_features", "[", "-", "1", "]", "for", "i", "in", "range", "(", "4", ",", "-", "1", ",", "-", "1", ")", ":", "x", "=", "self", ".", "convs", "[", "(", "\"upconv\"", ",", "i", ",", "0", ")", "]", "(", "x", ")", "x", "=", "[", "upsample", "(", "x", ")", "]", "if", "self", ".", "use_skips", "and", "i", ">", "0", ":", "x", "+=", "[", "input_features", "[", "i", "-", "1", "]", "]", "x", "=", "torch", ".", "cat", "(", "x", ",", "1", ")", "x", "=", "self", ".", "convs", "[", "(", "\"upconv\"", ",", "i", ",", "1", ")", "]", "(", "x", ")", "if", "i", "in", "self", ".", "scales", ":", "self", ".", "outputs", "[", "(", "\"disp\"", ",", "i", ")", "]", "=", "self", ".", "sigmoid", "(", "self", ".", "convs", "[", "(", "\"dispconv\"", ",", "i", ")", "]", "(", "x", ")", ")", "return", "self", ".", "outputs" ]
https://github.com/TRI-ML/packnet-sfm/blob/f59b1d615777a9987285a10e45b5d87b0369fa7d/packnet_sfm/networks/layers/resnet/depth_decoder.py#L49-L64
sanic-org/sanic
8b0eaa097cf4ef13a6b52dce24ae93fb20f73947
sanic/asgi.py
python
ASGIApp.__call__
(self)
Handle the incoming request.
Handle the incoming request.
[ "Handle", "the", "incoming", "request", "." ]
async def __call__(self) -> None: """ Handle the incoming request. """ try: self.stage = Stage.HANDLER await self.sanic_app.handle_request(self.request) except Exception as e: await self.sanic_app.handle_exception(self.request, e)
[ "async", "def", "__call__", "(", "self", ")", "->", "None", ":", "try", ":", "self", ".", "stage", "=", "Stage", ".", "HANDLER", "await", "self", ".", "sanic_app", ".", "handle_request", "(", "self", ".", "request", ")", "except", "Exception", "as", "e", ":", "await", "self", ".", "sanic_app", ".", "handle_exception", "(", "self", ".", "request", ",", "e", ")" ]
https://github.com/sanic-org/sanic/blob/8b0eaa097cf4ef13a6b52dce24ae93fb20f73947/sanic/asgi.py#L215-L223
evennia/evennia
fa79110ba6b219932f22297838e8ac72ebc0be0e
evennia/contrib/ingame_python/utils.py
python
register_events
(path_or_typeclass)
return typeclass
Register the events in this typeclass. Args: path_or_typeclass (str or type): the Python path leading to the class containing events, or the class itself. Returns: The typeclass itself. Notes: This function will read events from the `_events` class variable defined in the typeclass given in parameters. It will add the events, either to the script if it exists, or to some temporary storage, waiting for the script to be initialized.
Register the events in this typeclass.
[ "Register", "the", "events", "in", "this", "typeclass", "." ]
def register_events(path_or_typeclass): """ Register the events in this typeclass. Args: path_or_typeclass (str or type): the Python path leading to the class containing events, or the class itself. Returns: The typeclass itself. Notes: This function will read events from the `_events` class variable defined in the typeclass given in parameters. It will add the events, either to the script if it exists, or to some temporary storage, waiting for the script to be initialized. """ if isinstance(path_or_typeclass, str): typeclass = class_from_module(path_or_typeclass) else: typeclass = path_or_typeclass typeclass_name = typeclass.__module__ + "." + typeclass.__name__ try: storage = ScriptDB.objects.get(db_key="event_handler") assert storage.is_active assert storage.ndb.events is not None except (ScriptDB.DoesNotExist, AssertionError): storage = EVENTS # If the script is started, add the event directly. # Otherwise, add it to the temporary storage. for name, tup in getattr(typeclass, "_events", {}).items(): if len(tup) == 4: variables, help_text, custom_call, custom_add = tup elif len(tup) == 3: variables, help_text, custom_call = tup custom_add = None elif len(tup) == 2: variables, help_text = tup custom_call = None custom_add = None else: variables = help_text = custom_call = custom_add = None if isinstance(storage, list): storage.append((typeclass_name, name, variables, help_text, custom_call, custom_add)) else: storage.add_event(typeclass_name, name, variables, help_text, custom_call, custom_add) return typeclass
[ "def", "register_events", "(", "path_or_typeclass", ")", ":", "if", "isinstance", "(", "path_or_typeclass", ",", "str", ")", ":", "typeclass", "=", "class_from_module", "(", "path_or_typeclass", ")", "else", ":", "typeclass", "=", "path_or_typeclass", "typeclass_name", "=", "typeclass", ".", "__module__", "+", "\".\"", "+", "typeclass", ".", "__name__", "try", ":", "storage", "=", "ScriptDB", ".", "objects", ".", "get", "(", "db_key", "=", "\"event_handler\"", ")", "assert", "storage", ".", "is_active", "assert", "storage", ".", "ndb", ".", "events", "is", "not", "None", "except", "(", "ScriptDB", ".", "DoesNotExist", ",", "AssertionError", ")", ":", "storage", "=", "EVENTS", "# If the script is started, add the event directly.", "# Otherwise, add it to the temporary storage.", "for", "name", ",", "tup", "in", "getattr", "(", "typeclass", ",", "\"_events\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "len", "(", "tup", ")", "==", "4", ":", "variables", ",", "help_text", ",", "custom_call", ",", "custom_add", "=", "tup", "elif", "len", "(", "tup", ")", "==", "3", ":", "variables", ",", "help_text", ",", "custom_call", "=", "tup", "custom_add", "=", "None", "elif", "len", "(", "tup", ")", "==", "2", ":", "variables", ",", "help_text", "=", "tup", "custom_call", "=", "None", "custom_add", "=", "None", "else", ":", "variables", "=", "help_text", "=", "custom_call", "=", "custom_add", "=", "None", "if", "isinstance", "(", "storage", ",", "list", ")", ":", "storage", ".", "append", "(", "(", "typeclass_name", ",", "name", ",", "variables", ",", "help_text", ",", "custom_call", ",", "custom_add", ")", ")", "else", ":", "storage", ".", "add_event", "(", "typeclass_name", ",", "name", ",", "variables", ",", "help_text", ",", "custom_call", ",", "custom_add", ")", "return", "typeclass" ]
https://github.com/evennia/evennia/blob/fa79110ba6b219932f22297838e8ac72ebc0be0e/evennia/contrib/ingame_python/utils.py#L35-L86
pantsbuild/pants
2e126e78ffc40cb108408316b90e8beebee1df9e
src/python/pants/option/parser.py
python
Parser.register
(self, *args, **kwargs)
Register an option.
Register an option.
[ "Register", "an", "option", "." ]
def register(self, *args, **kwargs) -> None: """Register an option.""" if args: dest = self.parse_dest(*args, **kwargs) self._check_deprecated(dest, kwargs, print_warning=False) if self.is_bool(kwargs): default = kwargs.get("default") if default is None: # Unless a tri-state bool is explicitly opted into with the `UnsetBool` default value, # boolean options always have an implicit boolean-typed default. We make that default # explicit here. kwargs["default"] = not self.ensure_bool(kwargs.get("implicit_value", True)) elif default is UnsetBool: kwargs["default"] = None # Record the args. We'll do the underlying parsing on-demand. self._option_registrations.append((args, kwargs)) # Look for direct conflicts. for arg in args: if arg in self._known_args: raise OptionAlreadyRegistered(self.scope, arg) self._known_args.update(args)
[ "def", "register", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "None", ":", "if", "args", ":", "dest", "=", "self", ".", "parse_dest", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_check_deprecated", "(", "dest", ",", "kwargs", ",", "print_warning", "=", "False", ")", "if", "self", ".", "is_bool", "(", "kwargs", ")", ":", "default", "=", "kwargs", ".", "get", "(", "\"default\"", ")", "if", "default", "is", "None", ":", "# Unless a tri-state bool is explicitly opted into with the `UnsetBool` default value,", "# boolean options always have an implicit boolean-typed default. We make that default", "# explicit here.", "kwargs", "[", "\"default\"", "]", "=", "not", "self", ".", "ensure_bool", "(", "kwargs", ".", "get", "(", "\"implicit_value\"", ",", "True", ")", ")", "elif", "default", "is", "UnsetBool", ":", "kwargs", "[", "\"default\"", "]", "=", "None", "# Record the args. We'll do the underlying parsing on-demand.", "self", ".", "_option_registrations", ".", "append", "(", "(", "args", ",", "kwargs", ")", ")", "# Look for direct conflicts.", "for", "arg", "in", "args", ":", "if", "arg", "in", "self", ".", "_known_args", ":", "raise", "OptionAlreadyRegistered", "(", "self", ".", "scope", ",", "arg", ")", "self", ".", "_known_args", ".", "update", "(", "args", ")" ]
https://github.com/pantsbuild/pants/blob/2e126e78ffc40cb108408316b90e8beebee1df9e/src/python/pants/option/parser.py#L317-L340
CaptainEven/RepNet-MDNet-VehicleReID
d3d184331206ca4bdb5ea399e5b90a9ccc53b400
RepNet.py
python
test_car_match_data
(resume, pair_set_txt, img_root, batch_size=16)
return acc, th
:param resume: :param pair_set_txt: :param batch_size: :return:
:param resume: :param pair_set_txt: :param batch_size: :return:
[ ":", "param", "resume", ":", ":", "param", "pair_set_txt", ":", ":", "param", "batch_size", ":", ":", "return", ":" ]
def test_car_match_data(resume, pair_set_txt, img_root, batch_size=16): """ :param resume: :param pair_set_txt: :param batch_size: :return: """ if not os.path.isfile(pair_set_txt): print('=> [Err]: invalid file.') return pairs, imgs_path = [], [] with open(pair_set_txt, 'r', encoding='utf-8') as fh: for line in fh.readlines(): line = line.strip().split() imgs_path.append(img_root + '/' + line[0]) imgs_path.append(img_root + '/' + line[1]) pairs.append(line) print('=> total %d pairs.' % (len(pairs))) print('=> total %d image samples.' % (len(imgs_path))) imgs_path.sort() # 计算特征向量字典 feature_map = gen_feature_map(resume=resume, imgs_path=imgs_path, batch_size=batch_size) # 计算所有pair的sim sims, labels = [], [] for pair in pairs: img_path_1 = img_root + '/' + pair[0] img_path_2 = img_root + '/' + pair[1] sim = cosin_metric(feature_map[img_path_1], feature_map[img_path_2]) label = int(pair[2]) sims.append(sim) labels.append(label) # 统计最佳阈值及其对应的准确率 acc, th = cal_accuracy(sims, labels) print('=> best threshold: %.3f, accuracy: %.3f%%' % (th, acc * 100.0)) return acc, th
[ "def", "test_car_match_data", "(", "resume", ",", "pair_set_txt", ",", "img_root", ",", "batch_size", "=", "16", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "pair_set_txt", ")", ":", "print", "(", "'=> [Err]: invalid file.'", ")", "return", "pairs", ",", "imgs_path", "=", "[", "]", ",", "[", "]", "with", "open", "(", "pair_set_txt", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "fh", ":", "for", "line", "in", "fh", ".", "readlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "imgs_path", ".", "append", "(", "img_root", "+", "'/'", "+", "line", "[", "0", "]", ")", "imgs_path", ".", "append", "(", "img_root", "+", "'/'", "+", "line", "[", "1", "]", ")", "pairs", ".", "append", "(", "line", ")", "print", "(", "'=> total %d pairs.'", "%", "(", "len", "(", "pairs", ")", ")", ")", "print", "(", "'=> total %d image samples.'", "%", "(", "len", "(", "imgs_path", ")", ")", ")", "imgs_path", ".", "sort", "(", ")", "# 计算特征向量字典", "feature_map", "=", "gen_feature_map", "(", "resume", "=", "resume", ",", "imgs_path", "=", "imgs_path", ",", "batch_size", "=", "batch_size", ")", "# 计算所有pair的sim", "sims", ",", "labels", "=", "[", "]", ",", "[", "]", "for", "pair", "in", "pairs", ":", "img_path_1", "=", "img_root", "+", "'/'", "+", "pair", "[", "0", "]", "img_path_2", "=", "img_root", "+", "'/'", "+", "pair", "[", "1", "]", "sim", "=", "cosin_metric", "(", "feature_map", "[", "img_path_1", "]", ",", "feature_map", "[", "img_path_2", "]", ")", "label", "=", "int", "(", "pair", "[", "2", "]", ")", "sims", ".", "append", "(", "sim", ")", "labels", ".", "append", "(", "label", ")", "# 统计最佳阈值及其对应的准确率", "acc", ",", "th", "=", "cal_accuracy", "(", "sims", ",", "labels", ")", "print", "(", "'=> best threshold: %.3f, accuracy: %.3f%%'", "%", "(", "th", ",", "acc", "*", "100.0", ")", ")", "return", "acc", ",", "th" ]
https://github.com/CaptainEven/RepNet-MDNet-VehicleReID/blob/d3d184331206ca4bdb5ea399e5b90a9ccc53b400/RepNet.py#L1178-L1225
deepinsight/insightface
c0b25f998a649f662c7136eb389abcacd7900e9d
detection/scrfd/mmdet/core/export/pytorch2onnx.py
python
preprocess_example_input
(input_config)
return one_img, one_meta
Prepare an example input image for ``generate_inputs_and_wrap_model``. Args: input_config (dict): customized config describing the example input. Returns: tuple: (one_img, one_meta), tensor of the example input image and \ meta information for the example input image. Examples: >>> from mmdet.core.export import preprocess_example_input >>> input_config = { >>> 'input_shape': (1,3,224,224), >>> 'input_path': 'demo/demo.jpg', >>> 'normalize_cfg': { >>> 'mean': (123.675, 116.28, 103.53), >>> 'std': (58.395, 57.12, 57.375) >>> } >>> } >>> one_img, one_meta = preprocess_example_input(input_config) >>> print(one_img.shape) torch.Size([1, 3, 224, 224]) >>> print(one_meta) {'img_shape': (224, 224, 3), 'ori_shape': (224, 224, 3), 'pad_shape': (224, 224, 3), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False}
Prepare an example input image for ``generate_inputs_and_wrap_model``.
[ "Prepare", "an", "example", "input", "image", "for", "generate_inputs_and_wrap_model", "." ]
def preprocess_example_input(input_config): """Prepare an example input image for ``generate_inputs_and_wrap_model``. Args: input_config (dict): customized config describing the example input. Returns: tuple: (one_img, one_meta), tensor of the example input image and \ meta information for the example input image. Examples: >>> from mmdet.core.export import preprocess_example_input >>> input_config = { >>> 'input_shape': (1,3,224,224), >>> 'input_path': 'demo/demo.jpg', >>> 'normalize_cfg': { >>> 'mean': (123.675, 116.28, 103.53), >>> 'std': (58.395, 57.12, 57.375) >>> } >>> } >>> one_img, one_meta = preprocess_example_input(input_config) >>> print(one_img.shape) torch.Size([1, 3, 224, 224]) >>> print(one_meta) {'img_shape': (224, 224, 3), 'ori_shape': (224, 224, 3), 'pad_shape': (224, 224, 3), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False} """ input_path = input_config['input_path'] input_shape = input_config['input_shape'] one_img = mmcv.imread(input_path) one_img = mmcv.imresize(one_img, input_shape[2:][::-1]) show_img = one_img.copy() if 'normalize_cfg' in input_config.keys(): normalize_cfg = input_config['normalize_cfg'] mean = np.array(normalize_cfg['mean'], dtype=np.float32) std = np.array(normalize_cfg['std'], dtype=np.float32) one_img = mmcv.imnormalize(one_img, mean, std) one_img = one_img.transpose(2, 0, 1) one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_( True) (_, C, H, W) = input_shape one_meta = { 'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False, 'show_img': show_img, } return one_img, one_meta
[ "def", "preprocess_example_input", "(", "input_config", ")", ":", "input_path", "=", "input_config", "[", "'input_path'", "]", "input_shape", "=", "input_config", "[", "'input_shape'", "]", "one_img", "=", "mmcv", ".", "imread", "(", "input_path", ")", "one_img", "=", "mmcv", ".", "imresize", "(", "one_img", ",", "input_shape", "[", "2", ":", "]", "[", ":", ":", "-", "1", "]", ")", "show_img", "=", "one_img", ".", "copy", "(", ")", "if", "'normalize_cfg'", "in", "input_config", ".", "keys", "(", ")", ":", "normalize_cfg", "=", "input_config", "[", "'normalize_cfg'", "]", "mean", "=", "np", ".", "array", "(", "normalize_cfg", "[", "'mean'", "]", ",", "dtype", "=", "np", ".", "float32", ")", "std", "=", "np", ".", "array", "(", "normalize_cfg", "[", "'std'", "]", ",", "dtype", "=", "np", ".", "float32", ")", "one_img", "=", "mmcv", ".", "imnormalize", "(", "one_img", ",", "mean", ",", "std", ")", "one_img", "=", "one_img", ".", "transpose", "(", "2", ",", "0", ",", "1", ")", "one_img", "=", "torch", ".", "from_numpy", "(", "one_img", ")", ".", "unsqueeze", "(", "0", ")", ".", "float", "(", ")", ".", "requires_grad_", "(", "True", ")", "(", "_", ",", "C", ",", "H", ",", "W", ")", "=", "input_shape", "one_meta", "=", "{", "'img_shape'", ":", "(", "H", ",", "W", ",", "C", ")", ",", "'ori_shape'", ":", "(", "H", ",", "W", ",", "C", ")", ",", "'pad_shape'", ":", "(", "H", ",", "W", ",", "C", ")", ",", "'filename'", ":", "'<demo>.png'", ",", "'scale_factor'", ":", "1.0", ",", "'flip'", ":", "False", ",", "'show_img'", ":", "show_img", ",", "}", "return", "one_img", ",", "one_meta" ]
https://github.com/deepinsight/insightface/blob/c0b25f998a649f662c7136eb389abcacd7900e9d/detection/scrfd/mmdet/core/export/pytorch2onnx.py#L88-L143
learningequality/kolibri
d056dbc477aaf651ab843caa141a6a1e0a491046
kolibri/plugins/registry.py
python
__initialize
()
return registry
Called once to register hook callbacks.
Called once to register hook callbacks.
[ "Called", "once", "to", "register", "hook", "callbacks", "." ]
def __initialize(): """ Called once to register hook callbacks. """ global __initialized registry = Registry() logger.debug("Loading kolibri plugin registry...") was_configured = settings.configured if was_configured: raise RuntimeError( "Django settings already configured when plugin registry initialized" ) registry.register_plugins(config.ACTIVE_PLUGINS) __initialized = True return registry
[ "def", "__initialize", "(", ")", ":", "global", "__initialized", "registry", "=", "Registry", "(", ")", "logger", ".", "debug", "(", "\"Loading kolibri plugin registry...\"", ")", "was_configured", "=", "settings", ".", "configured", "if", "was_configured", ":", "raise", "RuntimeError", "(", "\"Django settings already configured when plugin registry initialized\"", ")", "registry", ".", "register_plugins", "(", "config", ".", "ACTIVE_PLUGINS", ")", "__initialized", "=", "True", "return", "registry" ]
https://github.com/learningequality/kolibri/blob/d056dbc477aaf651ab843caa141a6a1e0a491046/kolibri/plugins/registry.py#L165-L179
TencentCloud/tencentcloud-sdk-python
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
tencentcloud/ckafka/v20190819/models.py
python
ModifyInstanceAttributesConfig.__init__
(self)
r""" :param AutoCreateTopicEnable: 自动创建 true 表示开启,false 表示不开启 :type AutoCreateTopicEnable: bool :param DefaultNumPartitions: 可选,如果auto.create.topic.enable设置为true没有设置该值时,默认设置为3 :type DefaultNumPartitions: int :param DefaultReplicationFactor: 如歌auto.create.topic.enable设置为true没有指定该值时默认设置为2 :type DefaultReplicationFactor: int
r""" :param AutoCreateTopicEnable: 自动创建 true 表示开启,false 表示不开启 :type AutoCreateTopicEnable: bool :param DefaultNumPartitions: 可选,如果auto.create.topic.enable设置为true没有设置该值时,默认设置为3 :type DefaultNumPartitions: int :param DefaultReplicationFactor: 如歌auto.create.topic.enable设置为true没有指定该值时默认设置为2 :type DefaultReplicationFactor: int
[ "r", ":", "param", "AutoCreateTopicEnable", ":", "自动创建", "true", "表示开启,false", "表示不开启", ":", "type", "AutoCreateTopicEnable", ":", "bool", ":", "param", "DefaultNumPartitions", ":", "可选,如果auto", ".", "create", ".", "topic", ".", "enable设置为true没有设置该值时,默认设置为3", ":", "type", "DefaultNumPartitions", ":", "int", ":", "param", "DefaultReplicationFactor", ":", "如歌auto", ".", "create", ".", "topic", ".", "enable设置为true没有指定该值时默认设置为2", ":", "type", "DefaultReplicationFactor", ":", "int" ]
def __init__(self): r""" :param AutoCreateTopicEnable: 自动创建 true 表示开启,false 表示不开启 :type AutoCreateTopicEnable: bool :param DefaultNumPartitions: 可选,如果auto.create.topic.enable设置为true没有设置该值时,默认设置为3 :type DefaultNumPartitions: int :param DefaultReplicationFactor: 如歌auto.create.topic.enable设置为true没有指定该值时默认设置为2 :type DefaultReplicationFactor: int """ self.AutoCreateTopicEnable = None self.DefaultNumPartitions = None self.DefaultReplicationFactor = None
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "AutoCreateTopicEnable", "=", "None", "self", ".", "DefaultNumPartitions", "=", "None", "self", ".", "DefaultReplicationFactor", "=", "None" ]
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/ckafka/v20190819/models.py#L3967-L3978
sql-machine-learning/elasticdl
f7ded492965791d09d6c2b7dae13134d68af903d
elasticdl/python/common/model_handler.py
python
ParameterServerModelHandler._replace_attr_with_keras_embedding
(model)
return model
Replace the elasticdl.layers.Embedding attributes in the model with `tf.keras.layers.Embedding` or `SparseEmbedding` layers.
Replace the elasticdl.layers.Embedding attributes in the model with `tf.keras.layers.Embedding` or `SparseEmbedding` layers.
[ "Replace", "the", "elasticdl", ".", "layers", ".", "Embedding", "attributes", "in", "the", "model", "with", "tf", ".", "keras", ".", "layers", ".", "Embedding", "or", "SparseEmbedding", "layers", "." ]
def _replace_attr_with_keras_embedding(model): """Replace the elasticdl.layers.Embedding attributes in the model with `tf.keras.layers.Embedding` or `SparseEmbedding` layers. """ for name, value in model.__dict__.items(): if type(value) == Embedding: # The combiner is not None only for SparseEmbedding, if value.combiner is not None: logger.info("Replace elasticdl with SparseEmbedding") embedding_layer = SparseEmbedding( output_dim=value.output_dim, input_dim=value.input_dim, embeddings_initializer=value.embeddings_initializer, combiner=value.combiner, ) else: logger.info( "Replace elasticdl with tf.kerasl.layers.Embedding" ) embedding_layer = tf.keras.layers.Embedding( output_dim=value.output_dim, input_dim=value.input_dim, embeddings_initializer=value.embeddings_initializer, mask_zero=value.mask_zero, input_length=value.input_length, ) setattr(model, name, embedding_layer) elif type(value) == tf.keras.layers.DenseFeatures: feature_layer = _replace_edl_embedding_column_with_tf(value) setattr(model, name, feature_layer) return model
[ "def", "_replace_attr_with_keras_embedding", "(", "model", ")", ":", "for", "name", ",", "value", "in", "model", ".", "__dict__", ".", "items", "(", ")", ":", "if", "type", "(", "value", ")", "==", "Embedding", ":", "# The combiner is not None only for SparseEmbedding,", "if", "value", ".", "combiner", "is", "not", "None", ":", "logger", ".", "info", "(", "\"Replace elasticdl with SparseEmbedding\"", ")", "embedding_layer", "=", "SparseEmbedding", "(", "output_dim", "=", "value", ".", "output_dim", ",", "input_dim", "=", "value", ".", "input_dim", ",", "embeddings_initializer", "=", "value", ".", "embeddings_initializer", ",", "combiner", "=", "value", ".", "combiner", ",", ")", "else", ":", "logger", ".", "info", "(", "\"Replace elasticdl with tf.kerasl.layers.Embedding\"", ")", "embedding_layer", "=", "tf", ".", "keras", ".", "layers", ".", "Embedding", "(", "output_dim", "=", "value", ".", "output_dim", ",", "input_dim", "=", "value", ".", "input_dim", ",", "embeddings_initializer", "=", "value", ".", "embeddings_initializer", ",", "mask_zero", "=", "value", ".", "mask_zero", ",", "input_length", "=", "value", ".", "input_length", ",", ")", "setattr", "(", "model", ",", "name", ",", "embedding_layer", ")", "elif", "type", "(", "value", ")", "==", "tf", ".", "keras", ".", "layers", ".", "DenseFeatures", ":", "feature_layer", "=", "_replace_edl_embedding_column_with_tf", "(", "value", ")", "setattr", "(", "model", ",", "name", ",", "feature_layer", ")", "return", "model" ]
https://github.com/sql-machine-learning/elasticdl/blob/f7ded492965791d09d6c2b7dae13134d68af903d/elasticdl/python/common/model_handler.py#L431-L461
YosaiProject/yosai
7f96aa6b837ceae9bf3d7387cd7e35f5ab032575
yosai/core/subject/subject.py
python
DelegatingSubject.login
(self, authc_token)
:type authc_token: authc_abcs.AuthenticationToken authc_token's password is cleartext that is stored as a bytearray. The authc_token password is cleared in memory, within the authc_token, when authentication is successful.
:type authc_token: authc_abcs.AuthenticationToken
[ ":", "type", "authc_token", ":", "authc_abcs", ".", "AuthenticationToken" ]
def login(self, authc_token): """ :type authc_token: authc_abcs.AuthenticationToken authc_token's password is cleartext that is stored as a bytearray. The authc_token password is cleared in memory, within the authc_token, when authentication is successful. """ self.clear_run_as_identities_internal() # login raises an AuthenticationException if it fails to authenticate: subject = self.security_manager.login(subject=self, authc_token=authc_token) identifiers = None host = None if isinstance(subject, DelegatingSubject): # directly reference the attributes in case there are assumed # identities (Run-As) -- we don't want to lose the 'real' identifiers identifiers = subject._identifiers host = subject.host else: identifiers = subject.identifiers # use the property accessor if not identifiers: msg = ("Identifiers returned from security_manager.login(authc_token" + ") returned None or empty value. This value must be" + " non-None and populated with one or more elements.") raise ValueError(msg) self._identifiers = identifiers self.authenticated = True if not host: try: host = authc_token.host except AttributeError: # likely not using a HostAuthenticationToken host = None self.host = host session = subject.get_session(False) if session: session.stop_session_callback = self.session_stopped self.session = session else: self.session = None
[ "def", "login", "(", "self", ",", "authc_token", ")", ":", "self", ".", "clear_run_as_identities_internal", "(", ")", "# login raises an AuthenticationException if it fails to authenticate:", "subject", "=", "self", ".", "security_manager", ".", "login", "(", "subject", "=", "self", ",", "authc_token", "=", "authc_token", ")", "identifiers", "=", "None", "host", "=", "None", "if", "isinstance", "(", "subject", ",", "DelegatingSubject", ")", ":", "# directly reference the attributes in case there are assumed", "# identities (Run-As) -- we don't want to lose the 'real' identifiers", "identifiers", "=", "subject", ".", "_identifiers", "host", "=", "subject", ".", "host", "else", ":", "identifiers", "=", "subject", ".", "identifiers", "# use the property accessor", "if", "not", "identifiers", ":", "msg", "=", "(", "\"Identifiers returned from security_manager.login(authc_token\"", "+", "\") returned None or empty value. This value must be\"", "+", "\" non-None and populated with one or more elements.\"", ")", "raise", "ValueError", "(", "msg", ")", "self", ".", "_identifiers", "=", "identifiers", "self", ".", "authenticated", "=", "True", "if", "not", "host", ":", "try", ":", "host", "=", "authc_token", ".", "host", "except", "AttributeError", ":", "# likely not using a HostAuthenticationToken", "host", "=", "None", "self", ".", "host", "=", "host", "session", "=", "subject", ".", "get_session", "(", "False", ")", "if", "session", ":", "session", ".", "stop_session_callback", "=", "self", ".", "session_stopped", "self", ".", "session", "=", "session", "else", ":", "self", ".", "session", "=", "None" ]
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/subject/subject.py#L386-L429
bikalims/bika.lims
35e4bbdb5a3912cae0b5eb13e51097c8b0486349
bika/lims/validators.py
python
NIBvalidator.__call__
(self, value, *args, **kwargs)
return nib[-2] * 10 + nib[-1] == 98 - _sumLists(table, nib[:-2]) % 97
Check the NIB number value:: string with NIB.
Check the NIB number value:: string with NIB.
[ "Check", "the", "NIB", "number", "value", "::", "string", "with", "NIB", "." ]
def __call__(self, value, *args, **kwargs): """ Check the NIB number value:: string with NIB. """ instance = kwargs['instance'] translate = getToolByName(instance, 'translation_service').translate LEN_NIB = 21 table = (73, 17, 89, 38, 62, 45, 53, 15, 50, 5, 49, 34, 81, 76, 27, 90, 9, 30, 3) # convert to entire numbers list nib = _toIntList(value) # checking the length of the number if len(nib) != LEN_NIB: msg = _('Incorrect NIB number: %s' % value) return to_utf8(translate(msg)) # last numbers algorithm validator return nib[-2] * 10 + nib[-1] == 98 - _sumLists(table, nib[:-2]) % 97
[ "def", "__call__", "(", "self", ",", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "instance", "=", "kwargs", "[", "'instance'", "]", "translate", "=", "getToolByName", "(", "instance", ",", "'translation_service'", ")", ".", "translate", "LEN_NIB", "=", "21", "table", "=", "(", "73", ",", "17", ",", "89", ",", "38", ",", "62", ",", "45", ",", "53", ",", "15", ",", "50", ",", "5", ",", "49", ",", "34", ",", "81", ",", "76", ",", "27", ",", "90", ",", "9", ",", "30", ",", "3", ")", "# convert to entire numbers list", "nib", "=", "_toIntList", "(", "value", ")", "# checking the length of the number", "if", "len", "(", "nib", ")", "!=", "LEN_NIB", ":", "msg", "=", "_", "(", "'Incorrect NIB number: %s'", "%", "value", ")", "return", "to_utf8", "(", "translate", "(", "msg", ")", ")", "# last numbers algorithm validator", "return", "nib", "[", "-", "2", "]", "*", "10", "+", "nib", "[", "-", "1", "]", "==", "98", "-", "_sumLists", "(", "table", ",", "nib", "[", ":", "-", "2", "]", ")", "%", "97" ]
https://github.com/bikalims/bika.lims/blob/35e4bbdb5a3912cae0b5eb13e51097c8b0486349/bika/lims/validators.py#L984-L1003
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/trace.py
python
_Ignore.__init__
(self, modules=None, dirs=None)
[]
def __init__(self, modules=None, dirs=None): self._mods = set() if not modules else set(modules) self._dirs = [] if not dirs else [os.path.normpath(d) for d in dirs] self._ignore = { '<string>': 1 }
[ "def", "__init__", "(", "self", ",", "modules", "=", "None", ",", "dirs", "=", "None", ")", ":", "self", ".", "_mods", "=", "set", "(", ")", "if", "not", "modules", "else", "set", "(", "modules", ")", "self", ".", "_dirs", "=", "[", "]", "if", "not", "dirs", "else", "[", "os", ".", "path", ".", "normpath", "(", "d", ")", "for", "d", "in", "dirs", "]", "self", ".", "_ignore", "=", "{", "'<string>'", ":", "1", "}" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/trace.py#L69-L73
edisonlz/fastor
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
base/site-packages/docutils/parsers/rst/states.py
python
RFC2822List.rfc2822
(self, match, context, next_state)
return [], 'RFC2822List', []
RFC2822-style field list item.
RFC2822-style field list item.
[ "RFC2822", "-", "style", "field", "list", "item", "." ]
def rfc2822(self, match, context, next_state): """RFC2822-style field list item.""" field, blank_finish = self.rfc2822_field(match) self.parent += field self.blank_finish = blank_finish return [], 'RFC2822List', []
[ "def", "rfc2822", "(", "self", ",", "match", ",", "context", ",", "next_state", ")", ":", "field", ",", "blank_finish", "=", "self", ".", "rfc2822_field", "(", "match", ")", "self", ".", "parent", "+=", "field", "self", ".", "blank_finish", "=", "blank_finish", "return", "[", "]", ",", "'RFC2822List'", ",", "[", "]" ]
https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/docutils/parsers/rst/states.py#L2510-L2515
SFDO-Tooling/CumulusCI
825ae1f122b25dc41761c52a4ddfa1938d2a4b6e
cumulusci/tasks/bulkdata/snowfakery.py
python
Snowfakery.update_running_totals
(self)
Read and collate result reports from sub-processes/sub-threads
Read and collate result reports from sub-processes/sub-threads
[ "Read", "and", "collate", "result", "reports", "from", "sub", "-", "processes", "/", "sub", "-", "threads" ]
def update_running_totals(self) -> None: """Read and collate result reports from sub-processes/sub-threads""" while True: try: results = self.queue_manager.get_results_report() except Empty: break if "results" in results: self.update_running_totals_from_load_step_results(results["results"]) elif "error" in results: self.logger.warning(f"Error in load: {results}") else: # pragma: no cover self.logger.warning(f"Unexpected message from subtask: {results}")
[ "def", "update_running_totals", "(", "self", ")", "->", "None", ":", "while", "True", ":", "try", ":", "results", "=", "self", ".", "queue_manager", ".", "get_results_report", "(", ")", "except", "Empty", ":", "break", "if", "\"results\"", "in", "results", ":", "self", ".", "update_running_totals_from_load_step_results", "(", "results", "[", "\"results\"", "]", ")", "elif", "\"error\"", "in", "results", ":", "self", ".", "logger", ".", "warning", "(", "f\"Error in load: {results}\"", ")", "else", ":", "# pragma: no cover", "self", ".", "logger", ".", "warning", "(", "f\"Unexpected message from subtask: {results}\"", ")" ]
https://github.com/SFDO-Tooling/CumulusCI/blob/825ae1f122b25dc41761c52a4ddfa1938d2a4b6e/cumulusci/tasks/bulkdata/snowfakery.py#L399-L411
ctxis/CAPE
dae9fa6a254ecdbabeb7eb0d2389fa63722c1e82
lib/cuckoo/common/objects.py
python
File.get_md5
(self)
return self._md5
Get MD5. @return: MD5.
Get MD5.
[ "Get", "MD5", "." ]
def get_md5(self): """Get MD5. @return: MD5. """ if not self._md5: self.calc_hashes() return self._md5
[ "def", "get_md5", "(", "self", ")", ":", "if", "not", "self", ".", "_md5", ":", "self", ".", "calc_hashes", "(", ")", "return", "self", ".", "_md5" ]
https://github.com/ctxis/CAPE/blob/dae9fa6a254ecdbabeb7eb0d2389fa63722c1e82/lib/cuckoo/common/objects.py#L230-L235
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/importlib/_bootstrap.py
python
FrozenImporter.is_package
(cls, fullname)
return _imp.is_frozen_package(fullname)
Return True if the frozen module is a package.
Return True if the frozen module is a package.
[ "Return", "True", "if", "the", "frozen", "module", "is", "a", "package", "." ]
def is_package(cls, fullname): """Return True if the frozen module is a package.""" return _imp.is_frozen_package(fullname)
[ "def", "is_package", "(", "cls", ",", "fullname", ")", ":", "return", "_imp", ".", "is_frozen_package", "(", "fullname", ")" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/importlib/_bootstrap.py#L734-L736
CouchPotato/CouchPotatoV1
135b3331d1b88ef645e29b76f2d4cc4a732c9232
cherrypy/lib/sessions.py
python
FileSession.setup
(cls, **kwargs)
Set up the storage system for file-based sessions. This should only be called once per process; this will be done automatically when using sessions.init (as the built-in Tool does).
Set up the storage system for file-based sessions. This should only be called once per process; this will be done automatically when using sessions.init (as the built-in Tool does).
[ "Set", "up", "the", "storage", "system", "for", "file", "-", "based", "sessions", ".", "This", "should", "only", "be", "called", "once", "per", "process", ";", "this", "will", "be", "done", "automatically", "when", "using", "sessions", ".", "init", "(", "as", "the", "built", "-", "in", "Tool", "does", ")", "." ]
def setup(cls, **kwargs): """Set up the storage system for file-based sessions. This should only be called once per process; this will be done automatically when using sessions.init (as the built-in Tool does). """ # The 'storage_path' arg is required for file-based sessions. kwargs['storage_path'] = os.path.abspath(kwargs['storage_path']) for k, v in kwargs.items(): setattr(cls, k, v) # Warn if any lock files exist at startup. lockfiles = [fname for fname in os.listdir(cls.storage_path) if (fname.startswith(cls.SESSION_PREFIX) and fname.endswith(cls.LOCK_SUFFIX))] if lockfiles: plural = ('', 's')[len(lockfiles) > 1] warn("%s session lockfile%s found at startup. If you are " "only running one process, then you may need to " "manually delete the lockfiles found at %r." % (len(lockfiles), plural, cls.storage_path))
[ "def", "setup", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "# The 'storage_path' arg is required for file-based sessions.", "kwargs", "[", "'storage_path'", "]", "=", "os", ".", "path", ".", "abspath", "(", "kwargs", "[", "'storage_path'", "]", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "setattr", "(", "cls", ",", "k", ",", "v", ")", "# Warn if any lock files exist at startup.", "lockfiles", "=", "[", "fname", "for", "fname", "in", "os", ".", "listdir", "(", "cls", ".", "storage_path", ")", "if", "(", "fname", ".", "startswith", "(", "cls", ".", "SESSION_PREFIX", ")", "and", "fname", ".", "endswith", "(", "cls", ".", "LOCK_SUFFIX", ")", ")", "]", "if", "lockfiles", ":", "plural", "=", "(", "''", ",", "'s'", ")", "[", "len", "(", "lockfiles", ")", ">", "1", "]", "warn", "(", "\"%s session lockfile%s found at startup. If you are \"", "\"only running one process, then you may need to \"", "\"manually delete the lockfiles found at %r.\"", "%", "(", "len", "(", "lockfiles", ")", ",", "plural", ",", "cls", ".", "storage_path", ")", ")" ]
https://github.com/CouchPotato/CouchPotatoV1/blob/135b3331d1b88ef645e29b76f2d4cc4a732c9232/cherrypy/lib/sessions.py#L401-L422
zach-morris/plugin.program.iagl
482071af023c826804e3201343543efc266119a1
resources/lib/launch.py
python
iagl_launch.post_launch_check
(self,game_launch_status=None,**kwargs)
[]
def post_launch_check(self,game_launch_status=None,**kwargs): if game_launch_status and game_launch_status['launch_process'] and not self.settings.get('ext_launchers').get('close_kodi'): from subprocess import TimeoutExpired dp = xbmcgui.DialogProgress() dp.create(loc_str(30377),loc_str(30380)) dp.update(0,loc_str(30380)) perc = 0 finished=False check=None while not finished: try: check = game_launch_status['launch_process'].wait(timeout=WAIT_FOR_PROCESS_EXIT) except TimeoutExpired: perc=perc+10 dp.update(perc%100,loc_str(30380)) finished=False check=None if dp.iscanceled(): finished=True dp.close() break if isinstance(check,int): finished = True dp.close() break if finished: dp.close() break del dp if not self.settings.get('ext_launchers').get('close_kodi') and self.settings.get('ext_launchers').get('stop_audio_controller') and self.settings.get('ext_launchers').get('environment') not in ['android','android_ra32','android_aarch64']: xbmc.log(msg='IAGL: Re-Enabling Audio and Controller Input',level=xbmc.LOGDEBUG) xbmc.audioResume() xbmc.enableNavSounds(True) xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Settings.SetSettingValue","params":{"setting":"input.enablejoystick","value":true},"id":"1"}')
[ "def", "post_launch_check", "(", "self", ",", "game_launch_status", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "game_launch_status", "and", "game_launch_status", "[", "'launch_process'", "]", "and", "not", "self", ".", "settings", ".", "get", "(", "'ext_launchers'", ")", ".", "get", "(", "'close_kodi'", ")", ":", "from", "subprocess", "import", "TimeoutExpired", "dp", "=", "xbmcgui", ".", "DialogProgress", "(", ")", "dp", ".", "create", "(", "loc_str", "(", "30377", ")", ",", "loc_str", "(", "30380", ")", ")", "dp", ".", "update", "(", "0", ",", "loc_str", "(", "30380", ")", ")", "perc", "=", "0", "finished", "=", "False", "check", "=", "None", "while", "not", "finished", ":", "try", ":", "check", "=", "game_launch_status", "[", "'launch_process'", "]", ".", "wait", "(", "timeout", "=", "WAIT_FOR_PROCESS_EXIT", ")", "except", "TimeoutExpired", ":", "perc", "=", "perc", "+", "10", "dp", ".", "update", "(", "perc", "%", "100", ",", "loc_str", "(", "30380", ")", ")", "finished", "=", "False", "check", "=", "None", "if", "dp", ".", "iscanceled", "(", ")", ":", "finished", "=", "True", "dp", ".", "close", "(", ")", "break", "if", "isinstance", "(", "check", ",", "int", ")", ":", "finished", "=", "True", "dp", ".", "close", "(", ")", "break", "if", "finished", ":", "dp", ".", "close", "(", ")", "break", "del", "dp", "if", "not", "self", ".", "settings", ".", "get", "(", "'ext_launchers'", ")", ".", "get", "(", "'close_kodi'", ")", "and", "self", ".", "settings", ".", "get", "(", "'ext_launchers'", ")", ".", "get", "(", "'stop_audio_controller'", ")", "and", "self", ".", "settings", ".", "get", "(", "'ext_launchers'", ")", ".", "get", "(", "'environment'", ")", "not", "in", "[", "'android'", ",", "'android_ra32'", ",", "'android_aarch64'", "]", ":", "xbmc", ".", "log", "(", "msg", "=", "'IAGL: Re-Enabling Audio and Controller Input'", ",", "level", "=", "xbmc", ".", "LOGDEBUG", ")", "xbmc", ".", "audioResume", "(", ")", "xbmc", ".", "enableNavSounds", "(", "True", ")", "xbmc", ".", "executeJSONRPC", "(", "'{\"jsonrpc\":\"2.0\",\"method\":\"Settings.SetSettingValue\",\"params\":{\"setting\":\"input.enablejoystick\",\"value\":true},\"id\":\"1\"}'", ")" ]
https://github.com/zach-morris/plugin.program.iagl/blob/482071af023c826804e3201343543efc266119a1/resources/lib/launch.py#L67-L100
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
ext/oauthlib/openid/connect/core/request_validator.py
python
RequestValidator.validate_silent_login
(self, request)
Ensure session user has authorized silent OpenID login. If no user is logged in or has not authorized silent login, this method should return False. If the user is logged in but associated with multiple accounts and not selected which one to link to the token then this method should raise an oauthlib.oauth2.AccountSelectionRequired error. :param request: OAuthlib request. :type request: oauthlib.common.Request :rtype: True or False Method is used by: - OpenIDConnectAuthCode - OpenIDConnectImplicit - OpenIDConnectHybrid
Ensure session user has authorized silent OpenID login.
[ "Ensure", "session", "user", "has", "authorized", "silent", "OpenID", "login", "." ]
def validate_silent_login(self, request): """Ensure session user has authorized silent OpenID login. If no user is logged in or has not authorized silent login, this method should return False. If the user is logged in but associated with multiple accounts and not selected which one to link to the token then this method should raise an oauthlib.oauth2.AccountSelectionRequired error. :param request: OAuthlib request. :type request: oauthlib.common.Request :rtype: True or False Method is used by: - OpenIDConnectAuthCode - OpenIDConnectImplicit - OpenIDConnectHybrid """ raise NotImplementedError('Subclasses must implement this method.')
[ "def", "validate_silent_login", "(", "self", ",", "request", ")", ":", "raise", "NotImplementedError", "(", "'Subclasses must implement this method.'", ")" ]
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/oauthlib/openid/connect/core/request_validator.py#L156-L175
openstack/ironic
b392dc19bcd29cef5a69ec00d2f18a7a19a679e5
ironic/api/controllers/v1/utils.py
python
is_valid_logical_name
(name)
Determine if the provided name is a valid hostname.
Determine if the provided name is a valid hostname.
[ "Determine", "if", "the", "provided", "name", "is", "a", "valid", "hostname", "." ]
def is_valid_logical_name(name): """Determine if the provided name is a valid hostname.""" if api.request.version.minor < versions.MINOR_10_UNRESTRICTED_NODE_NAME: return utils.is_hostname_safe(name) else: return utils.is_valid_logical_name(name)
[ "def", "is_valid_logical_name", "(", "name", ")", ":", "if", "api", ".", "request", ".", "version", ".", "minor", "<", "versions", ".", "MINOR_10_UNRESTRICTED_NODE_NAME", ":", "return", "utils", ".", "is_hostname_safe", "(", "name", ")", "else", ":", "return", "utils", ".", "is_valid_logical_name", "(", "name", ")" ]
https://github.com/openstack/ironic/blob/b392dc19bcd29cef5a69ec00d2f18a7a19a679e5/ironic/api/controllers/v1/utils.py#L686-L691
saulpw/visidata
577f34127c09116e3cbe1fcb3f67d54484785ae7
visidata/loaders/rec.py
python
decode_multiline
(line, fp)
Parse *line* and lookahead into *fp* as iterator for continuing lines. Return (multiline, next_line) where *multiline* can contain newlines and *next_line is the line after the combined *multiline*. Handle "\\" at end and "+" at beginning of lines. *next_line* will be None iff iterator is exhausted.
Parse *line* and lookahead into *fp* as iterator for continuing lines. Return (multiline, next_line) where *multiline* can contain newlines and *next_line is the line after the combined *multiline*. Handle "\\" at end and "+" at beginning of lines. *next_line* will be None iff iterator is exhausted.
[ "Parse", "*", "line", "*", "and", "lookahead", "into", "*", "fp", "*", "as", "iterator", "for", "continuing", "lines", ".", "Return", "(", "multiline", "next_line", ")", "where", "*", "multiline", "*", "can", "contain", "newlines", "and", "*", "next_line", "is", "the", "line", "after", "the", "combined", "*", "multiline", "*", ".", "Handle", "\\\\", "at", "end", "and", "+", "at", "beginning", "of", "lines", ".", "*", "next_line", "*", "will", "be", "None", "iff", "iterator", "is", "exhausted", "." ]
def decode_multiline(line, fp): 'Parse *line* and lookahead into *fp* as iterator for continuing lines. Return (multiline, next_line) where *multiline* can contain newlines and *next_line is the line after the combined *multiline*. Handle "\\" at end and "+" at beginning of lines. *next_line* will be None iff iterator is exhausted.' while True: try: next_line = next(fp) except StopIteration: return line, None if line.endswith('\\'): line = line[:-1] + next_line elif next_line.startswith('+'): # strip leading r'+ ?' next_line = next_line[2:] if next_line.startswith('+ ') else next_line[1:] line += '\n' + next_line else: return line, next_line
[ "def", "decode_multiline", "(", "line", ",", "fp", ")", ":", "while", "True", ":", "try", ":", "next_line", "=", "next", "(", "fp", ")", "except", "StopIteration", ":", "return", "line", ",", "None", "if", "line", ".", "endswith", "(", "'\\\\'", ")", ":", "line", "=", "line", "[", ":", "-", "1", "]", "+", "next_line", "elif", "next_line", ".", "startswith", "(", "'+'", ")", ":", "# strip leading r'+ ?'", "next_line", "=", "next_line", "[", "2", ":", "]", "if", "next_line", ".", "startswith", "(", "'+ '", ")", "else", "next_line", "[", "1", ":", "]", "line", "+=", "'\\n'", "+", "next_line", "else", ":", "return", "line", ",", "next_line" ]
https://github.com/saulpw/visidata/blob/577f34127c09116e3cbe1fcb3f67d54484785ae7/visidata/loaders/rec.py#L10-L25
cackharot/suds-py3
1d92cc6297efee31bfd94b50b99c431505d7de21
suds/sax/element.py
python
PrefixNormalizer.genPrefixes
(self)
return prefixes
Generate a I{reverse} mapping of unique prefixes for all namespaces. @return: A referse dict of prefixes. @rtype: {u, p}
Generate a I{reverse} mapping of unique prefixes for all namespaces.
[ "Generate", "a", "I", "{", "reverse", "}", "mapping", "of", "unique", "prefixes", "for", "all", "namespaces", "." ]
def genPrefixes(self): """ Generate a I{reverse} mapping of unique prefixes for all namespaces. @return: A referse dict of prefixes. @rtype: {u, p} """ prefixes = {} n = 0 for u in self.namespaces: p = 'ns%d' % n prefixes[u] = p n += 1 return prefixes
[ "def", "genPrefixes", "(", "self", ")", ":", "prefixes", "=", "{", "}", "n", "=", "0", "for", "u", "in", "self", ".", "namespaces", ":", "p", "=", "'ns%d'", "%", "n", "prefixes", "[", "u", "]", "=", "p", "n", "+=", "1", "return", "prefixes" ]
https://github.com/cackharot/suds-py3/blob/1d92cc6297efee31bfd94b50b99c431505d7de21/suds/sax/element.py#L1050-L1062
moinwiki/moin
568f223231aadecbd3b21a701ec02271f8d8021d
src/moin/storage/middleware/indexing.py
python
PropertiesMixin.fqname
(self)
return self._fqname(self.name)
return the fully qualified name including the namespace: NS:NAME
return the fully qualified name including the namespace: NS:NAME
[ "return", "the", "fully", "qualified", "name", "including", "the", "namespace", ":", "NS", ":", "NAME" ]
def fqname(self): """ return the fully qualified name including the namespace: NS:NAME """ return self._fqname(self.name)
[ "def", "fqname", "(", "self", ")", ":", "return", "self", ".", "_fqname", "(", "self", ".", "name", ")" ]
https://github.com/moinwiki/moin/blob/568f223231aadecbd3b21a701ec02271f8d8021d/src/moin/storage/middleware/indexing.py#L970-L974
Axelrod-Python/Axelrod
00e18323c1b1af74df873773e44f31e1b9a299c6
axelrod/strategies/finite_state_machines.py
python
EvolvableFSMPlayer.__init__
( self, transitions: tuple = None, initial_state: int = None, initial_action: Action = None, num_states: int = None, mutation_probability: float = 0.1, seed: int = None, )
If transitions, initial_state, and initial_action are None then generate random parameters using num_states.
If transitions, initial_state, and initial_action are None then generate random parameters using num_states.
[ "If", "transitions", "initial_state", "and", "initial_action", "are", "None", "then", "generate", "random", "parameters", "using", "num_states", "." ]
def __init__( self, transitions: tuple = None, initial_state: int = None, initial_action: Action = None, num_states: int = None, mutation_probability: float = 0.1, seed: int = None, ) -> None: """If transitions, initial_state, and initial_action are None then generate random parameters using num_states.""" EvolvablePlayer.__init__(self, seed=seed) ( transitions, initial_state, initial_action, num_states, ) = self._normalize_parameters( transitions, initial_state, initial_action, num_states ) FSMPlayer.__init__( self, transitions=transitions, initial_state=initial_state, initial_action=initial_action, ) self.mutation_probability = mutation_probability self.overwrite_init_kwargs( transitions=transitions, initial_state=initial_state, initial_action=initial_action, num_states=self.num_states, )
[ "def", "__init__", "(", "self", ",", "transitions", ":", "tuple", "=", "None", ",", "initial_state", ":", "int", "=", "None", ",", "initial_action", ":", "Action", "=", "None", ",", "num_states", ":", "int", "=", "None", ",", "mutation_probability", ":", "float", "=", "0.1", ",", "seed", ":", "int", "=", "None", ",", ")", "->", "None", ":", "EvolvablePlayer", ".", "__init__", "(", "self", ",", "seed", "=", "seed", ")", "(", "transitions", ",", "initial_state", ",", "initial_action", ",", "num_states", ",", ")", "=", "self", ".", "_normalize_parameters", "(", "transitions", ",", "initial_state", ",", "initial_action", ",", "num_states", ")", "FSMPlayer", ".", "__init__", "(", "self", ",", "transitions", "=", "transitions", ",", "initial_state", "=", "initial_state", ",", "initial_action", "=", "initial_action", ",", ")", "self", ".", "mutation_probability", "=", "mutation_probability", "self", ".", "overwrite_init_kwargs", "(", "transitions", "=", "transitions", ",", "initial_state", "=", "initial_state", ",", "initial_action", "=", "initial_action", ",", "num_states", "=", "self", ".", "num_states", ",", ")" ]
https://github.com/Axelrod-Python/Axelrod/blob/00e18323c1b1af74df873773e44f31e1b9a299c6/axelrod/strategies/finite_state_machines.py#L146-L178
SteveDoyle2/pyNastran
eda651ac2d4883d95a34951f8a002ff94f642a1a
pyNastran/op2/tables/oes_stressStrain/complex/oes_rods.py
python
ComplexRodArray.add_sort1
(self, dt, eid, axial, torsion)
unvectorized method for adding SORT1 transient data
unvectorized method for adding SORT1 transient data
[ "unvectorized", "method", "for", "adding", "SORT1", "transient", "data" ]
def add_sort1(self, dt, eid, axial, torsion): """unvectorized method for adding SORT1 transient data""" assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid) self._times[self.itime] = dt self.element[self.ielement] = eid self.data[self.itime, self.ielement, :] = [axial, torsion] self.ielement += 1
[ "def", "add_sort1", "(", "self", ",", "dt", ",", "eid", ",", "axial", ",", "torsion", ")", ":", "assert", "isinstance", "(", "eid", ",", "integer_types", ")", "and", "eid", ">", "0", ",", "'dt=%s eid=%s'", "%", "(", "dt", ",", "eid", ")", "self", ".", "_times", "[", "self", ".", "itime", "]", "=", "dt", "self", ".", "element", "[", "self", ".", "ielement", "]", "=", "eid", "self", ".", "data", "[", "self", ".", "itime", ",", "self", ".", "ielement", ",", ":", "]", "=", "[", "axial", ",", "torsion", "]", "self", ".", "ielement", "+=", "1" ]
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/op2/tables/oes_stressStrain/complex/oes_rods.py#L117-L123
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_vendored_deps/library/oc_storageclass.py
python
OpenShiftCLI._delete
(self, resource, name=None, selector=None)
return self.openshift_cmd(cmd)
call oc delete on a resource
call oc delete on a resource
[ "call", "oc", "delete", "on", "a", "resource" ]
def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' cmd = ['delete', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) else: raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd)
[ "def", "_delete", "(", "self", ",", "resource", ",", "name", "=", "None", ",", "selector", "=", "None", ")", ":", "cmd", "=", "[", "'delete'", ",", "resource", "]", "if", "selector", "is", "not", "None", ":", "cmd", ".", "append", "(", "'--selector={}'", ".", "format", "(", "selector", ")", ")", "elif", "name", "is", "not", "None", ":", "cmd", ".", "append", "(", "name", ")", "else", ":", "raise", "OpenShiftCLIError", "(", "'Either name or selector is required when calling delete.'", ")", "return", "self", ".", "openshift_cmd", "(", "cmd", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_vendored_deps/library/oc_storageclass.py#L960-L970
ChenglongChen/tensorflow-DSMM
52a499a162f3837aa11bb1bb4c1029accfe5743d
src/tf_common/optimizer.py
python
LazyAddSignOptimizer._apply_dense
(self, grad, var)
return control_flow_ops.group(*[var_update, m_t])
[]
def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype) alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype) eps = 1e-7 # cap for moving average m = self.get_slot(var, "m") m_t = m.assign(tf.maximum(beta_t * m + eps, tf.abs(grad))) var_update = state_ops.assign_sub(var, lr_t * grad * (1.0 + alpha_t * tf.sign(grad) * tf.sign(m_t))) # Create an op that groups multiple operations # When this op finishes, all ops in input have finished return control_flow_ops.group(*[var_update, m_t])
[ "def", "_apply_dense", "(", "self", ",", "grad", ",", "var", ")", ":", "lr_t", "=", "math_ops", ".", "cast", "(", "self", ".", "_lr_t", ",", "var", ".", "dtype", ".", "base_dtype", ")", "beta_t", "=", "math_ops", ".", "cast", "(", "self", ".", "_beta_t", ",", "var", ".", "dtype", ".", "base_dtype", ")", "alpha_t", "=", "math_ops", ".", "cast", "(", "self", ".", "_alpha_t", ",", "var", ".", "dtype", ".", "base_dtype", ")", "eps", "=", "1e-7", "# cap for moving average", "m", "=", "self", ".", "get_slot", "(", "var", ",", "\"m\"", ")", "m_t", "=", "m", ".", "assign", "(", "tf", ".", "maximum", "(", "beta_t", "*", "m", "+", "eps", ",", "tf", ".", "abs", "(", "grad", ")", ")", ")", "var_update", "=", "state_ops", ".", "assign_sub", "(", "var", ",", "lr_t", "*", "grad", "*", "(", "1.0", "+", "alpha_t", "*", "tf", ".", "sign", "(", "grad", ")", "*", "tf", ".", "sign", "(", "m_t", ")", ")", ")", "# Create an op that groups multiple operations", "# When this op finishes, all ops in input have finished", "return", "control_flow_ops", ".", "group", "(", "*", "[", "var_update", ",", "m_t", "]", ")" ]
https://github.com/ChenglongChen/tensorflow-DSMM/blob/52a499a162f3837aa11bb1bb4c1029accfe5743d/src/tf_common/optimizer.py#L110-L123
INK-USC/KagNet
b386661ac5841774b9d17cc132e991a7bef3c5ef
grounding/grounding_concepts.py
python
test
()
[]
def test(): nlp = spacy.load('en_core_web_sm', disable=['ner', 'parser', 'textcat']) nlp.add_pipe(nlp.create_pipe('sentencizer')) res = match_mentioned_concepts(nlp, sents=["Sometimes people say that someone stupid has no swimming pool."], answers=["swimming pool"]) print(res)
[ "def", "test", "(", ")", ":", "nlp", "=", "spacy", ".", "load", "(", "'en_core_web_sm'", ",", "disable", "=", "[", "'ner'", ",", "'parser'", ",", "'textcat'", "]", ")", "nlp", ".", "add_pipe", "(", "nlp", ".", "create_pipe", "(", "'sentencizer'", ")", ")", "res", "=", "match_mentioned_concepts", "(", "nlp", ",", "sents", "=", "[", "\"Sometimes people say that someone stupid has no swimming pool.\"", "]", ",", "answers", "=", "[", "\"swimming pool\"", "]", ")", "print", "(", "res", ")" ]
https://github.com/INK-USC/KagNet/blob/b386661ac5841774b9d17cc132e991a7bef3c5ef/grounding/grounding_concepts.py#L171-L175
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
ext/tornado/routing.py
python
ReversibleRouter.reverse_url
(self, name: str, *args: Any)
Returns url string for a given route name and arguments or ``None`` if no match is found. :arg str name: route name. :arg args: url parameters. :returns: parametrized url string for a given route name (or ``None``).
Returns url string for a given route name and arguments or ``None`` if no match is found.
[ "Returns", "url", "string", "for", "a", "given", "route", "name", "and", "arguments", "or", "None", "if", "no", "match", "is", "found", "." ]
def reverse_url(self, name: str, *args: Any) -> Optional[str]: """Returns url string for a given route name and arguments or ``None`` if no match is found. :arg str name: route name. :arg args: url parameters. :returns: parametrized url string for a given route name (or ``None``). """ raise NotImplementedError()
[ "def", "reverse_url", "(", "self", ",", "name", ":", "str", ",", "*", "args", ":", "Any", ")", "->", "Optional", "[", "str", "]", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/tornado/routing.py#L218-L226
stopstalk/stopstalk-deployment
10c3ab44c4ece33ae515f6888c15033db2004bb1
aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/distlib/database.py
python
Distribution.__init__
(self, metadata)
Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution.
Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution.
[ "Initialise", "an", "instance", ".", ":", "param", "metadata", ":", "The", "instance", "of", ":", "class", ":", "Metadata", "describing", "this", "distribution", "." ]
def __init__(self, metadata): """ Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution. """ self.metadata = metadata self.name = metadata.name self.key = self.name.lower() # for case-insensitive comparisons self.version = metadata.version self.locator = None self.digest = None self.extras = None # additional features requested self.context = None # environment marker overrides self.download_urls = set() self.digests = {}
[ "def", "__init__", "(", "self", ",", "metadata", ")", ":", "self", ".", "metadata", "=", "metadata", "self", ".", "name", "=", "metadata", ".", "name", "self", ".", "key", "=", "self", ".", "name", ".", "lower", "(", ")", "# for case-insensitive comparisons", "self", ".", "version", "=", "metadata", ".", "version", "self", ".", "locator", "=", "None", "self", ".", "digest", "=", "None", "self", ".", "extras", "=", "None", "# additional features requested", "self", ".", "context", "=", "None", "# environment marker overrides", "self", ".", "download_urls", "=", "set", "(", ")", "self", ".", "digests", "=", "{", "}" ]
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/pip/_vendor/distlib/database.py#L332-L347
JoneXiong/YouPBX
e24a8b74814761bc90fd86f4217c92ec7238874b
pbx/db/operates/plivohelper.py
python
REST.conference_hangup
(self, call_params)
return self.request(path, method, call_params)
REST Conference Hangup helper
REST Conference Hangup helper
[ "REST", "Conference", "Hangup", "helper" ]
def conference_hangup(self, call_params): """REST Conference Hangup helper """ path = '/' + self.api_version + '/ConferenceHangup/' method = 'POST' return self.request(path, method, call_params)
[ "def", "conference_hangup", "(", "self", ",", "call_params", ")", ":", "path", "=", "'/'", "+", "self", ".", "api_version", "+", "'/ConferenceHangup/'", "method", "=", "'POST'", "return", "self", ".", "request", "(", "path", ",", "method", ",", "call_params", ")" ]
https://github.com/JoneXiong/YouPBX/blob/e24a8b74814761bc90fd86f4217c92ec7238874b/pbx/db/operates/plivohelper.py#L293-L298
snakemake/snakemake
987282dde8a2db5174414988c134a39ae8836a61
snakemake/remote/__init__.py
python
PooledDomainObject.connection_pool
(self)
return self.connection_pools[conn_pool_label_tuple]
set up a pool of re-usable active connections
set up a pool of re-usable active connections
[ "set", "up", "a", "pool", "of", "re", "-", "usable", "active", "connections" ]
def connection_pool(self): """set up a pool of re-usable active connections""" # merge this object's values with those of its parent provider args_to_use, kwargs_to_use = self.get_args_to_use() # hashing connection pool on tuple of relevant arguments. There # may be a better way to do this conn_pool_label_tuple = ( type(self), *args_to_use, *[kwargs_to_use.get(k, None) for k in self.conn_keywords], ) if conn_pool_label_tuple not in self.connection_pools: create_callback = partial( self.create_connection, *args_to_use, **kwargs_to_use ) self.connection_pools[conn_pool_label_tuple] = ConnectionPool( create_callback, close=self.close_connection, max_size=self.pool_size ) return self.connection_pools[conn_pool_label_tuple]
[ "def", "connection_pool", "(", "self", ")", ":", "# merge this object's values with those of its parent provider", "args_to_use", ",", "kwargs_to_use", "=", "self", ".", "get_args_to_use", "(", ")", "# hashing connection pool on tuple of relevant arguments. There", "# may be a better way to do this", "conn_pool_label_tuple", "=", "(", "type", "(", "self", ")", ",", "*", "args_to_use", ",", "*", "[", "kwargs_to_use", ".", "get", "(", "k", ",", "None", ")", "for", "k", "in", "self", ".", "conn_keywords", "]", ",", ")", "if", "conn_pool_label_tuple", "not", "in", "self", ".", "connection_pools", ":", "create_callback", "=", "partial", "(", "self", ".", "create_connection", ",", "*", "args_to_use", ",", "*", "*", "kwargs_to_use", ")", "self", ".", "connection_pools", "[", "conn_pool_label_tuple", "]", "=", "ConnectionPool", "(", "create_callback", ",", "close", "=", "self", ".", "close_connection", ",", "max_size", "=", "self", ".", "pool_size", ")", "return", "self", ".", "connection_pools", "[", "conn_pool_label_tuple", "]" ]
https://github.com/snakemake/snakemake/blob/987282dde8a2db5174414988c134a39ae8836a61/snakemake/remote/__init__.py#L375-L396