repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Neurosim-lab/netpyne
netpyne/cell/compartCell.py
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/cell/compartCell.py#L1231-L1243
def calcAbsSegCoords(self): ''' Calculate absolute seg coords by translating the relative seg coords -- used for LFP calc''' from .. import sim p3dsoma = self.getSomaPos() pop = self.tags['pop'] morphSegCoords = sim.net.pops[pop]._morphSegCoords # rotated coordinates around z axis first then shift relative to the soma self._segCoords = {} p3dsoma = p3dsoma[np.newaxis].T # trasnpose 1d array to enable matrix calculation self._segCoords['p0'] = p3dsoma + morphSegCoords['p0'] self._segCoords['p1'] = p3dsoma + morphSegCoords['p1']
[ "def", "calcAbsSegCoords", "(", "self", ")", ":", "from", ".", ".", "import", "sim", "p3dsoma", "=", "self", ".", "getSomaPos", "(", ")", "pop", "=", "self", ".", "tags", "[", "'pop'", "]", "morphSegCoords", "=", "sim", ".", "net", ".", "pops", "[", "pop", "]", ".", "_morphSegCoords", "# rotated coordinates around z axis first then shift relative to the soma", "self", ".", "_segCoords", "=", "{", "}", "p3dsoma", "=", "p3dsoma", "[", "np", ".", "newaxis", "]", ".", "T", "# trasnpose 1d array to enable matrix calculation", "self", ".", "_segCoords", "[", "'p0'", "]", "=", "p3dsoma", "+", "morphSegCoords", "[", "'p0'", "]", "self", ".", "_segCoords", "[", "'p1'", "]", "=", "p3dsoma", "+", "morphSegCoords", "[", "'p1'", "]" ]
Calculate absolute seg coords by translating the relative seg coords -- used for LFP calc
[ "Calculate", "absolute", "seg", "coords", "by", "translating", "the", "relative", "seg", "coords", "--", "used", "for", "LFP", "calc" ]
python
train
46.384615
soldag/python-pwmled
pwmled/driver/gpio.py
https://github.com/soldag/python-pwmled/blob/09cde36ecc0153fa81dc2a1b9bb07d1c0e418c8c/pwmled/driver/gpio.py#L23-L30
def _set_pwm(self, raw_values): """ Set pwm values on the controlled pins. :param raw_values: Raw values to set (0-255). """ for i in range(len(self._pins)): self._pi.set_PWM_dutycycle(self._pins[i], raw_values[i])
[ "def", "_set_pwm", "(", "self", ",", "raw_values", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_pins", ")", ")", ":", "self", ".", "_pi", ".", "set_PWM_dutycycle", "(", "self", ".", "_pins", "[", "i", "]", ",", "raw_values", "[", "i", "]", ")" ]
Set pwm values on the controlled pins. :param raw_values: Raw values to set (0-255).
[ "Set", "pwm", "values", "on", "the", "controlled", "pins", "." ]
python
train
32.5
tensorflow/tensor2tensor
tensor2tensor/data_generators/text_encoder.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L369-L384
def _init_vocab(self, token_generator, add_reserved_tokens=True): """Initialize vocabulary with tokens from token_generator.""" self._id_to_token = {} non_reserved_start_index = 0 if add_reserved_tokens: self._id_to_token.update(enumerate(RESERVED_TOKENS)) non_reserved_start_index = len(RESERVED_TOKENS) self._id_to_token.update( enumerate(token_generator, start=non_reserved_start_index)) # _token_to_id is the reverse of _id_to_token self._token_to_id = dict((v, k) for k, v in six.iteritems(self._id_to_token))
[ "def", "_init_vocab", "(", "self", ",", "token_generator", ",", "add_reserved_tokens", "=", "True", ")", ":", "self", ".", "_id_to_token", "=", "{", "}", "non_reserved_start_index", "=", "0", "if", "add_reserved_tokens", ":", "self", ".", "_id_to_token", ".", "update", "(", "enumerate", "(", "RESERVED_TOKENS", ")", ")", "non_reserved_start_index", "=", "len", "(", "RESERVED_TOKENS", ")", "self", ".", "_id_to_token", ".", "update", "(", "enumerate", "(", "token_generator", ",", "start", "=", "non_reserved_start_index", ")", ")", "# _token_to_id is the reverse of _id_to_token", "self", ".", "_token_to_id", "=", "dict", "(", "(", "v", ",", "k", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "self", ".", "_id_to_token", ")", ")" ]
Initialize vocabulary with tokens from token_generator.
[ "Initialize", "vocabulary", "with", "tokens", "from", "token_generator", "." ]
python
train
36.25
asweigart/pyautogui
pyautogui/__init__.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L1029-L1061
def typewrite(message, interval=0.0, pause=None, _pause=True): """Performs a keyboard key press down, followed by a release, for each of the characters in message. The message argument can also be list of strings, in which case any valid keyboard name can be used. Since this performs a sequence of keyboard presses and does not hold down keys, it cannot be used to perform keyboard shortcuts. Use the hotkey() function for that. Args: message (str, list): If a string, then the characters to be pressed. If a list, then the key names of the keys to press in order. The valid names are listed in KEYBOARD_KEYS. interval (float, optional): The number of seconds in between each press. 0.0 by default, for no pause in between presses. Returns: None """ interval = float(interval) _failSafeCheck() for c in message: if len(c) > 1: c = c.lower() press(c, _pause=False) time.sleep(interval) _failSafeCheck() _autoPause(pause, _pause)
[ "def", "typewrite", "(", "message", ",", "interval", "=", "0.0", ",", "pause", "=", "None", ",", "_pause", "=", "True", ")", ":", "interval", "=", "float", "(", "interval", ")", "_failSafeCheck", "(", ")", "for", "c", "in", "message", ":", "if", "len", "(", "c", ")", ">", "1", ":", "c", "=", "c", ".", "lower", "(", ")", "press", "(", "c", ",", "_pause", "=", "False", ")", "time", ".", "sleep", "(", "interval", ")", "_failSafeCheck", "(", ")", "_autoPause", "(", "pause", ",", "_pause", ")" ]
Performs a keyboard key press down, followed by a release, for each of the characters in message. The message argument can also be list of strings, in which case any valid keyboard name can be used. Since this performs a sequence of keyboard presses and does not hold down keys, it cannot be used to perform keyboard shortcuts. Use the hotkey() function for that. Args: message (str, list): If a string, then the characters to be pressed. If a list, then the key names of the keys to press in order. The valid names are listed in KEYBOARD_KEYS. interval (float, optional): The number of seconds in between each press. 0.0 by default, for no pause in between presses. Returns: None
[ "Performs", "a", "keyboard", "key", "press", "down", "followed", "by", "a", "release", "for", "each", "of", "the", "characters", "in", "message", "." ]
python
train
31.666667
numenta/htmresearch
projects/l2_pooling/topology_experiments.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/topology_experiments.py#L186-L247
def plotConvergenceByDistantConnectionChance(results, featureRange, columnRange, longDistanceConnectionsRange, numTrials): """ Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f, c, t] = how long it took it to converge with f unique # features, c columns and topology t. convergence = numpy.zeros((len(featureRange), len(longDistanceConnectionsRange), len(columnRange))) for r in results: print longDistanceConnectionsRange.index(r["longDistanceConnections"]) print columnRange.index(r["numColumns"]) convergence[featureRange.index(r["numFeatures"]), longDistanceConnectionsRange.index(r["longDistanceConnections"]), columnRange.index(r["numColumns"])] += r["convergencePoint"] convergence /= numTrials # For each column, print convergence as fct of number of unique features for i, c in enumerate(columnRange): for j, r in enumerate(longDistanceConnectionsRange): print c, r, convergence[:, j, i] # Print everything anyway for debugging print "Average convergence array=", convergence ######################################################################## # # Create the plot. x-axis= plt.figure(figsize=(8, 6), dpi=80) plotPath = os.path.join("plots", "convergence_by_random_connection_chance.pdf") # Plot each curve legendList = [] colormap = plt.get_cmap("jet") colorList = [colormap(x) for x in numpy.linspace(0., 1., len(featureRange)*len(longDistanceConnectionsRange))] for i, r in enumerate(longDistanceConnectionsRange): for j, f in enumerate(featureRange): currentColor = i*len(featureRange) + j print columnRange print convergence[j, i, :] legendList.append('Connection_prob = {}, num features = {}'.format(r, f)) plt.plot(columnRange, convergence[j, i, :], color=colorList[currentColor]) # format plt.legend(legendList, loc = "lower left") plt.xlabel("Number of columns") plt.xticks(columnRange) plt.yticks(range(0,int(convergence.max())+1)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (multiple columns)") # save plt.show() plt.savefig(plotPath) plt.close()
[ "def", "plotConvergenceByDistantConnectionChance", "(", "results", ",", "featureRange", ",", "columnRange", ",", "longDistanceConnectionsRange", ",", "numTrials", ")", ":", "########################################################################", "#", "# Accumulate all the results per column in a convergence array.", "#", "# Convergence[f, c, t] = how long it took it to converge with f unique", "# features, c columns and topology t.", "convergence", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "featureRange", ")", ",", "len", "(", "longDistanceConnectionsRange", ")", ",", "len", "(", "columnRange", ")", ")", ")", "for", "r", "in", "results", ":", "print", "longDistanceConnectionsRange", ".", "index", "(", "r", "[", "\"longDistanceConnections\"", "]", ")", "print", "columnRange", ".", "index", "(", "r", "[", "\"numColumns\"", "]", ")", "convergence", "[", "featureRange", ".", "index", "(", "r", "[", "\"numFeatures\"", "]", ")", ",", "longDistanceConnectionsRange", ".", "index", "(", "r", "[", "\"longDistanceConnections\"", "]", ")", ",", "columnRange", ".", "index", "(", "r", "[", "\"numColumns\"", "]", ")", "]", "+=", "r", "[", "\"convergencePoint\"", "]", "convergence", "/=", "numTrials", "# For each column, print convergence as fct of number of unique features", "for", "i", ",", "c", "in", "enumerate", "(", "columnRange", ")", ":", "for", "j", ",", "r", "in", "enumerate", "(", "longDistanceConnectionsRange", ")", ":", "print", "c", ",", "r", ",", "convergence", "[", ":", ",", "j", ",", "i", "]", "# Print everything anyway for debugging", "print", "\"Average convergence array=\"", ",", "convergence", "########################################################################", "#", "# Create the plot. x-axis=", "plt", ".", "figure", "(", "figsize", "=", "(", "8", ",", "6", ")", ",", "dpi", "=", "80", ")", "plotPath", "=", "os", ".", "path", ".", "join", "(", "\"plots\"", ",", "\"convergence_by_random_connection_chance.pdf\"", ")", "# Plot each curve", "legendList", "=", "[", "]", "colormap", "=", "plt", ".", "get_cmap", "(", "\"jet\"", ")", "colorList", "=", "[", "colormap", "(", "x", ")", "for", "x", "in", "numpy", ".", "linspace", "(", "0.", ",", "1.", ",", "len", "(", "featureRange", ")", "*", "len", "(", "longDistanceConnectionsRange", ")", ")", "]", "for", "i", ",", "r", "in", "enumerate", "(", "longDistanceConnectionsRange", ")", ":", "for", "j", ",", "f", "in", "enumerate", "(", "featureRange", ")", ":", "currentColor", "=", "i", "*", "len", "(", "featureRange", ")", "+", "j", "print", "columnRange", "print", "convergence", "[", "j", ",", "i", ",", ":", "]", "legendList", ".", "append", "(", "'Connection_prob = {}, num features = {}'", ".", "format", "(", "r", ",", "f", ")", ")", "plt", ".", "plot", "(", "columnRange", ",", "convergence", "[", "j", ",", "i", ",", ":", "]", ",", "color", "=", "colorList", "[", "currentColor", "]", ")", "# format", "plt", ".", "legend", "(", "legendList", ",", "loc", "=", "\"lower left\"", ")", "plt", ".", "xlabel", "(", "\"Number of columns\"", ")", "plt", ".", "xticks", "(", "columnRange", ")", "plt", ".", "yticks", "(", "range", "(", "0", ",", "int", "(", "convergence", ".", "max", "(", ")", ")", "+", "1", ")", ")", "plt", ".", "ylabel", "(", "\"Average number of touches\"", ")", "plt", ".", "title", "(", "\"Number of touches to recognize one object (multiple columns)\"", ")", "# save", "plt", ".", "show", "(", ")", "plt", ".", "savefig", "(", "plotPath", ")", "plt", ".", "close", "(", ")" ]
Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features.
[ "Plots", "the", "convergence", "graph", ":", "iterations", "vs", "number", "of", "columns", ".", "Each", "curve", "shows", "the", "convergence", "for", "a", "given", "number", "of", "unique", "features", "." ]
python
train
38.387097
cltk/cltk
cltk/utils/matrix_corpus_fun.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/utils/matrix_corpus_fun.py#L301-L318
def divide_separate_words(string_matrix: List[List[str]]) -> List[List[str]]: """ As part of processing, some words obviously need to be separated. :param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence. :return: >>> divide_separate_words([['ita vero'], ['quid', 'est', 'veritas']]) [['ita', 'vero'], ['quid', 'est', 'veritas']] """ new_X = [] for sentence in string_matrix: data_row = [] # type: List[str] for word in sentence: if ' ' in word: data_row += word.split() else: data_row.append(word) new_X.append(data_row) return new_X
[ "def", "divide_separate_words", "(", "string_matrix", ":", "List", "[", "List", "[", "str", "]", "]", ")", "->", "List", "[", "List", "[", "str", "]", "]", ":", "new_X", "=", "[", "]", "for", "sentence", "in", "string_matrix", ":", "data_row", "=", "[", "]", "# type: List[str]", "for", "word", "in", "sentence", ":", "if", "' '", "in", "word", ":", "data_row", "+=", "word", ".", "split", "(", ")", "else", ":", "data_row", ".", "append", "(", "word", ")", "new_X", ".", "append", "(", "data_row", ")", "return", "new_X" ]
As part of processing, some words obviously need to be separated. :param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence. :return: >>> divide_separate_words([['ita vero'], ['quid', 'est', 'veritas']]) [['ita', 'vero'], ['quid', 'est', 'veritas']]
[ "As", "part", "of", "processing", "some", "words", "obviously", "need", "to", "be", "separated", ".", ":", "param", "string_matrix", ":", "a", "data", "matrix", ":", "a", "list", "wrapping", "a", "list", "of", "strings", "with", "each", "sublist", "being", "a", "sentence", ".", ":", "return", ":", ">>>", "divide_separate_words", "(", "[[", "ita", "vero", "]", "[", "quid", "est", "veritas", "]]", ")", "[[", "ita", "vero", "]", "[", "quid", "est", "veritas", "]]" ]
python
train
38.277778
MycroftAI/mycroft-skills-manager
msm/mycroft_skills_manager.py
https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L163-L168
def write_skills_data(self, data=None): """ Write skills data hash if it has been modified. """ data = data or self.skills_data if skills_data_hash(data) != self.skills_data_hash: write_skills_data(data) self.skills_data_hash = skills_data_hash(data)
[ "def", "write_skills_data", "(", "self", ",", "data", "=", "None", ")", ":", "data", "=", "data", "or", "self", ".", "skills_data", "if", "skills_data_hash", "(", "data", ")", "!=", "self", ".", "skills_data_hash", ":", "write_skills_data", "(", "data", ")", "self", ".", "skills_data_hash", "=", "skills_data_hash", "(", "data", ")" ]
Write skills data hash if it has been modified.
[ "Write", "skills", "data", "hash", "if", "it", "has", "been", "modified", "." ]
python
train
48.833333
gamechanger/dusty
dusty/compiler/spec_assembler.py
https://github.com/gamechanger/dusty/blob/dc12de90bb6945023d6f43a8071e984313a1d984/dusty/compiler/spec_assembler.py#L112-L123
def _get_expanded_active_specs(specs): """ This function removes any unnecessary bundles, apps, libs, and services that aren't needed by the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed indirectly by each app """ _filter_active(constants.CONFIG_BUNDLES_KEY, specs) _filter_active('apps', specs) _expand_libs_in_apps(specs) _filter_active('libs', specs) _filter_active('services', specs) _add_active_assets(specs)
[ "def", "_get_expanded_active_specs", "(", "specs", ")", ":", "_filter_active", "(", "constants", ".", "CONFIG_BUNDLES_KEY", ",", "specs", ")", "_filter_active", "(", "'apps'", ",", "specs", ")", "_expand_libs_in_apps", "(", "specs", ")", "_filter_active", "(", "'libs'", ",", "specs", ")", "_filter_active", "(", "'services'", ",", "specs", ")", "_add_active_assets", "(", "specs", ")" ]
This function removes any unnecessary bundles, apps, libs, and services that aren't needed by the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed indirectly by each app
[ "This", "function", "removes", "any", "unnecessary", "bundles", "apps", "libs", "and", "services", "that", "aren", "t", "needed", "by", "the", "activated_bundles", ".", "It", "also", "expands", "inside", "specs", ".", "apps", ".", "depends", ".", "libs", "all", "libs", "that", "are", "needed", "indirectly", "by", "each", "app" ]
python
valid
41
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L2313-L2324
def destroy(self, uuid): """ Destroy a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return: """ args = { 'uuid': uuid, } self._domain_action_chk.check(args) self._client.sync('kvm.destroy', args)
[ "def", "destroy", "(", "self", ",", "uuid", ")", ":", "args", "=", "{", "'uuid'", ":", "uuid", ",", "}", "self", ".", "_domain_action_chk", ".", "check", "(", "args", ")", "self", ".", "_client", ".", "sync", "(", "'kvm.destroy'", ",", "args", ")" ]
Destroy a kvm domain by uuid :param uuid: uuid of the kvm container (same as the used in create) :return:
[ "Destroy", "a", "kvm", "domain", "by", "uuid", ":", "param", "uuid", ":", "uuid", "of", "the", "kvm", "container", "(", "same", "as", "the", "used", "in", "create", ")", ":", "return", ":" ]
python
train
26
pystorm/pystorm
pystorm/component.py
https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L50-L63
def remote_pdb_handler(signum, frame): """ Handler to drop us into a remote debugger upon receiving SIGUSR1 """ try: from remote_pdb import RemotePdb rdb = RemotePdb(host="127.0.0.1", port=0) rdb.set_trace(frame=frame) except ImportError: log.warning( "remote_pdb unavailable. Please install remote_pdb to " "allow remote debugging." ) # Restore signal handler for later signal.signal(signum, remote_pdb_handler)
[ "def", "remote_pdb_handler", "(", "signum", ",", "frame", ")", ":", "try", ":", "from", "remote_pdb", "import", "RemotePdb", "rdb", "=", "RemotePdb", "(", "host", "=", "\"127.0.0.1\"", ",", "port", "=", "0", ")", "rdb", ".", "set_trace", "(", "frame", "=", "frame", ")", "except", "ImportError", ":", "log", ".", "warning", "(", "\"remote_pdb unavailable. Please install remote_pdb to \"", "\"allow remote debugging.\"", ")", "# Restore signal handler for later", "signal", ".", "signal", "(", "signum", ",", "remote_pdb_handler", ")" ]
Handler to drop us into a remote debugger upon receiving SIGUSR1
[ "Handler", "to", "drop", "us", "into", "a", "remote", "debugger", "upon", "receiving", "SIGUSR1" ]
python
train
34.642857
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/schedulermanagementservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/schedulermanagementservice.py#L204-L219
def get_job_collection(self, cloud_service_id, job_collection_id): ''' The Get Job Collection operation gets the details of a job collection cloud_service_id: The cloud service id job_collection_id: Name of the hosted service. ''' _validate_not_none('cloud_service_id', cloud_service_id) _validate_not_none('job_collection_id', job_collection_id) path = self._get_job_collection_path( cloud_service_id, job_collection_id) return self._perform_get(path, Resource)
[ "def", "get_job_collection", "(", "self", ",", "cloud_service_id", ",", "job_collection_id", ")", ":", "_validate_not_none", "(", "'cloud_service_id'", ",", "cloud_service_id", ")", "_validate_not_none", "(", "'job_collection_id'", ",", "job_collection_id", ")", "path", "=", "self", ".", "_get_job_collection_path", "(", "cloud_service_id", ",", "job_collection_id", ")", "return", "self", ".", "_perform_get", "(", "path", ",", "Resource", ")" ]
The Get Job Collection operation gets the details of a job collection cloud_service_id: The cloud service id job_collection_id: Name of the hosted service.
[ "The", "Get", "Job", "Collection", "operation", "gets", "the", "details", "of", "a", "job", "collection" ]
python
test
34.875
coleifer/walrus
walrus/containers.py
https://github.com/coleifer/walrus/blob/82bf15a6613487b5b5fefeb488f186d7e0106547/walrus/containers.py#L842-L852
def bpopmax(self, timeout=0): """ Atomically remove the highest-scoring item from the set, blocking until an item becomes available or timeout is reached (0 for no timeout, default). Returns a 2-tuple of (item, score). """ res = self.database.bzpopmax(self.key, timeout) if res is not None: return (res[1], res[2])
[ "def", "bpopmax", "(", "self", ",", "timeout", "=", "0", ")", ":", "res", "=", "self", ".", "database", ".", "bzpopmax", "(", "self", ".", "key", ",", "timeout", ")", "if", "res", "is", "not", "None", ":", "return", "(", "res", "[", "1", "]", ",", "res", "[", "2", "]", ")" ]
Atomically remove the highest-scoring item from the set, blocking until an item becomes available or timeout is reached (0 for no timeout, default). Returns a 2-tuple of (item, score).
[ "Atomically", "remove", "the", "highest", "-", "scoring", "item", "from", "the", "set", "blocking", "until", "an", "item", "becomes", "available", "or", "timeout", "is", "reached", "(", "0", "for", "no", "timeout", "default", ")", "." ]
python
train
34.636364
googleapis/google-cloud-python
api_core/google/api_core/exceptions.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/exceptions.py#L362-L381
def from_http_status(status_code, message, **kwargs): """Create a :class:`GoogleAPICallError` from an HTTP status code. Args: status_code (int): The HTTP status code. message (str): The exception message. kwargs: Additional arguments passed to the :class:`GoogleAPICallError` constructor. Returns: GoogleAPICallError: An instance of the appropriate subclass of :class:`GoogleAPICallError`. """ error_class = exception_class_for_http_status(status_code) error = error_class(message, **kwargs) if error.code is None: error.code = status_code return error
[ "def", "from_http_status", "(", "status_code", ",", "message", ",", "*", "*", "kwargs", ")", ":", "error_class", "=", "exception_class_for_http_status", "(", "status_code", ")", "error", "=", "error_class", "(", "message", ",", "*", "*", "kwargs", ")", "if", "error", ".", "code", "is", "None", ":", "error", ".", "code", "=", "status_code", "return", "error" ]
Create a :class:`GoogleAPICallError` from an HTTP status code. Args: status_code (int): The HTTP status code. message (str): The exception message. kwargs: Additional arguments passed to the :class:`GoogleAPICallError` constructor. Returns: GoogleAPICallError: An instance of the appropriate subclass of :class:`GoogleAPICallError`.
[ "Create", "a", ":", "class", ":", "GoogleAPICallError", "from", "an", "HTTP", "status", "code", "." ]
python
train
31.65
inveniosoftware/invenio-pidstore
invenio_pidstore/providers/datacite.py
https://github.com/inveniosoftware/invenio-pidstore/blob/8bf35f4e62d5dcaf1a2cfe5803245ba5220a9b78/invenio_pidstore/providers/datacite.py#L171-L211
def sync_status(self): """Synchronize DOI status DataCite MDS. :returns: `True` if is sync successfully. """ status = None try: try: self.api.doi_get(self.pid.pid_value) status = PIDStatus.REGISTERED except DataCiteGoneError: status = PIDStatus.DELETED except DataCiteNoContentError: status = PIDStatus.REGISTERED except DataCiteNotFoundError: pass if status is None: try: self.api.metadata_get(self.pid.pid_value) status = PIDStatus.RESERVED except DataCiteGoneError: status = PIDStatus.DELETED except DataCiteNoContentError: status = PIDStatus.REGISTERED except DataCiteNotFoundError: pass except (DataCiteError, HttpError): logger.exception("Failed to sync status from DataCite", extra=dict(pid=self.pid)) raise if status is None: status = PIDStatus.NEW self.pid.sync_status(status) logger.info("Successfully synced status from DataCite", extra=dict(pid=self.pid)) return True
[ "def", "sync_status", "(", "self", ")", ":", "status", "=", "None", "try", ":", "try", ":", "self", ".", "api", ".", "doi_get", "(", "self", ".", "pid", ".", "pid_value", ")", "status", "=", "PIDStatus", ".", "REGISTERED", "except", "DataCiteGoneError", ":", "status", "=", "PIDStatus", ".", "DELETED", "except", "DataCiteNoContentError", ":", "status", "=", "PIDStatus", ".", "REGISTERED", "except", "DataCiteNotFoundError", ":", "pass", "if", "status", "is", "None", ":", "try", ":", "self", ".", "api", ".", "metadata_get", "(", "self", ".", "pid", ".", "pid_value", ")", "status", "=", "PIDStatus", ".", "RESERVED", "except", "DataCiteGoneError", ":", "status", "=", "PIDStatus", ".", "DELETED", "except", "DataCiteNoContentError", ":", "status", "=", "PIDStatus", ".", "REGISTERED", "except", "DataCiteNotFoundError", ":", "pass", "except", "(", "DataCiteError", ",", "HttpError", ")", ":", "logger", ".", "exception", "(", "\"Failed to sync status from DataCite\"", ",", "extra", "=", "dict", "(", "pid", "=", "self", ".", "pid", ")", ")", "raise", "if", "status", "is", "None", ":", "status", "=", "PIDStatus", ".", "NEW", "self", ".", "pid", ".", "sync_status", "(", "status", ")", "logger", ".", "info", "(", "\"Successfully synced status from DataCite\"", ",", "extra", "=", "dict", "(", "pid", "=", "self", ".", "pid", ")", ")", "return", "True" ]
Synchronize DOI status DataCite MDS. :returns: `True` if is sync successfully.
[ "Synchronize", "DOI", "status", "DataCite", "MDS", "." ]
python
train
32.04878
bitesofcode/projexui
projexui/dialogs/xshortcutdialog/xshortcutdialog.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xshortcutdialog/xshortcutdialog.py#L163-L179
def setActions( self, actions ): """ Sets the list of actions that will be used for this shortcut dialog \ when editing. :param actions | [<QAction>, ..] """ self.uiActionTREE.blockSignals(True) self.uiActionTREE.setUpdatesEnabled(False) self.uiActionTREE.clear() for action in actions: self.uiActionTREE.addTopLevelItem(ActionItem(action)) self.uiActionTREE.sortByColumn(0, Qt.AscendingOrder) self.uiActionTREE.blockSignals(False) self.uiActionTREE.setUpdatesEnabled(True)
[ "def", "setActions", "(", "self", ",", "actions", ")", ":", "self", ".", "uiActionTREE", ".", "blockSignals", "(", "True", ")", "self", ".", "uiActionTREE", ".", "setUpdatesEnabled", "(", "False", ")", "self", ".", "uiActionTREE", ".", "clear", "(", ")", "for", "action", "in", "actions", ":", "self", ".", "uiActionTREE", ".", "addTopLevelItem", "(", "ActionItem", "(", "action", ")", ")", "self", ".", "uiActionTREE", ".", "sortByColumn", "(", "0", ",", "Qt", ".", "AscendingOrder", ")", "self", ".", "uiActionTREE", ".", "blockSignals", "(", "False", ")", "self", ".", "uiActionTREE", ".", "setUpdatesEnabled", "(", "True", ")" ]
Sets the list of actions that will be used for this shortcut dialog \ when editing. :param actions | [<QAction>, ..]
[ "Sets", "the", "list", "of", "actions", "that", "will", "be", "used", "for", "this", "shortcut", "dialog", "\\", "when", "editing", ".", ":", "param", "actions", "|", "[", "<QAction", ">", "..", "]" ]
python
train
35.117647
Kozea/cairocffi
cairocffi/surfaces.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/surfaces.py#L342-L382
def set_fallback_resolution(self, x_pixels_per_inch, y_pixels_per_inch): """ Set the horizontal and vertical resolution for image fallbacks. When certain operations aren't supported natively by a backend, cairo will fallback by rendering operations to an image and then overlaying that image onto the output. For backends that are natively vector-oriented, this method can be used to set the resolution used for these image fallbacks, (larger values will result in more detailed images, but also larger file sizes). Some examples of natively vector-oriented backends are the ps, pdf, and svg backends. For backends that are natively raster-oriented, image fallbacks are still possible, but they are always performed at the native device resolution. So this method has no effect on those backends. .. note:: The fallback resolution only takes effect at the time of completing a page (with :meth:`show_page` or :meth:`copy_page`) so there is currently no way to have more than one fallback resolution in effect on a single page. The default fallback resoultion is 300 pixels per inch in both dimensions. :param x_pixels_per_inch: horizontal resolution in pixels per inch :type x_pixels_per_inch: float :param y_pixels_per_inch: vertical resolution in pixels per inch :type y_pixels_per_inch: float """ cairo.cairo_surface_set_fallback_resolution( self._pointer, x_pixels_per_inch, y_pixels_per_inch) self._check_status()
[ "def", "set_fallback_resolution", "(", "self", ",", "x_pixels_per_inch", ",", "y_pixels_per_inch", ")", ":", "cairo", ".", "cairo_surface_set_fallback_resolution", "(", "self", ".", "_pointer", ",", "x_pixels_per_inch", ",", "y_pixels_per_inch", ")", "self", ".", "_check_status", "(", ")" ]
Set the horizontal and vertical resolution for image fallbacks. When certain operations aren't supported natively by a backend, cairo will fallback by rendering operations to an image and then overlaying that image onto the output. For backends that are natively vector-oriented, this method can be used to set the resolution used for these image fallbacks, (larger values will result in more detailed images, but also larger file sizes). Some examples of natively vector-oriented backends are the ps, pdf, and svg backends. For backends that are natively raster-oriented, image fallbacks are still possible, but they are always performed at the native device resolution. So this method has no effect on those backends. .. note:: The fallback resolution only takes effect at the time of completing a page (with :meth:`show_page` or :meth:`copy_page`) so there is currently no way to have more than one fallback resolution in effect on a single page. The default fallback resoultion is 300 pixels per inch in both dimensions. :param x_pixels_per_inch: horizontal resolution in pixels per inch :type x_pixels_per_inch: float :param y_pixels_per_inch: vertical resolution in pixels per inch :type y_pixels_per_inch: float
[ "Set", "the", "horizontal", "and", "vertical", "resolution", "for", "image", "fallbacks", "." ]
python
train
40.634146
Danielhiversen/pymill
mill/__init__.py
https://github.com/Danielhiversen/pymill/blob/f091385914b53682012d0948c549beb4a5a96794/mill/__init__.py#L193-L197
def sync_request(self, command, payload, retry=2): """Request data.""" loop = asyncio.get_event_loop() task = loop.create_task(self.request(command, payload, retry)) return loop.run_until_complete(task)
[ "def", "sync_request", "(", "self", ",", "command", ",", "payload", ",", "retry", "=", "2", ")", ":", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "task", "=", "loop", ".", "create_task", "(", "self", ".", "request", "(", "command", ",", "payload", ",", "retry", ")", ")", "return", "loop", ".", "run_until_complete", "(", "task", ")" ]
Request data.
[ "Request", "data", "." ]
python
train
46
dwavesystems/dwave-system
dwave/embedding/polynomialembedder.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/polynomialembedder.py#L188-L210
def _compute_vline_scores(self): """Does the hard work to prepare ``vline_score``. """ M, N, L = self.M, self.N, self.L vline_score = {} for x in range(M): laststart = [0 if (x, 0, 1, k) in self else None for k in range(L)] for y in range(N): block = [0] * (y + 1) for k in range(L): if (x, y, 1, k) not in self: laststart[k] = None elif laststart[k] is None: laststart[k] = y block[y] += 1 elif y and (x, y, 1, k) not in self[x, y - 1, 1, k]: laststart[k] = y else: for y1 in range(laststart[k], y + 1): block[y1] += 1 for y1 in range(y + 1): vline_score[x, y1, y] = block[y1] self._vline_score = vline_score
[ "def", "_compute_vline_scores", "(", "self", ")", ":", "M", ",", "N", ",", "L", "=", "self", ".", "M", ",", "self", ".", "N", ",", "self", ".", "L", "vline_score", "=", "{", "}", "for", "x", "in", "range", "(", "M", ")", ":", "laststart", "=", "[", "0", "if", "(", "x", ",", "0", ",", "1", ",", "k", ")", "in", "self", "else", "None", "for", "k", "in", "range", "(", "L", ")", "]", "for", "y", "in", "range", "(", "N", ")", ":", "block", "=", "[", "0", "]", "*", "(", "y", "+", "1", ")", "for", "k", "in", "range", "(", "L", ")", ":", "if", "(", "x", ",", "y", ",", "1", ",", "k", ")", "not", "in", "self", ":", "laststart", "[", "k", "]", "=", "None", "elif", "laststart", "[", "k", "]", "is", "None", ":", "laststart", "[", "k", "]", "=", "y", "block", "[", "y", "]", "+=", "1", "elif", "y", "and", "(", "x", ",", "y", ",", "1", ",", "k", ")", "not", "in", "self", "[", "x", ",", "y", "-", "1", ",", "1", ",", "k", "]", ":", "laststart", "[", "k", "]", "=", "y", "else", ":", "for", "y1", "in", "range", "(", "laststart", "[", "k", "]", ",", "y", "+", "1", ")", ":", "block", "[", "y1", "]", "+=", "1", "for", "y1", "in", "range", "(", "y", "+", "1", ")", ":", "vline_score", "[", "x", ",", "y1", ",", "y", "]", "=", "block", "[", "y1", "]", "self", ".", "_vline_score", "=", "vline_score" ]
Does the hard work to prepare ``vline_score``.
[ "Does", "the", "hard", "work", "to", "prepare", "vline_score", "." ]
python
train
41.521739
paylogic/pip-accel
pip_accel/caches/s3.py
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/caches/s3.py#L173-L203
def put(self, filename, handle): """ Upload a distribution archive to the configured Amazon S3 bucket. If the :attr:`~.Config.s3_cache_readonly` configuration option is enabled this method does nothing. :param filename: The filename of the distribution archive (a string). :param handle: A file-like object that provides access to the distribution archive. :raises: :exc:`.CacheBackendError` when any underlying method fails. """ if self.config.s3_cache_readonly: logger.info('Skipping upload to S3 bucket (using S3 in read only mode).') else: timer = Timer() self.check_prerequisites() with PatchedBotoConfig(): from boto.s3.key import Key raw_key = self.get_cache_key(filename) logger.info("Uploading distribution archive to S3 bucket: %s", raw_key) key = Key(self.s3_bucket) key.key = raw_key try: key.set_contents_from_file(handle) except Exception as e: logger.info("Encountered error writing to S3 bucket, " "falling back to read only mode (exception: %s)", e) self.config.s3_cache_readonly = True else: logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer)
[ "def", "put", "(", "self", ",", "filename", ",", "handle", ")", ":", "if", "self", ".", "config", ".", "s3_cache_readonly", ":", "logger", ".", "info", "(", "'Skipping upload to S3 bucket (using S3 in read only mode).'", ")", "else", ":", "timer", "=", "Timer", "(", ")", "self", ".", "check_prerequisites", "(", ")", "with", "PatchedBotoConfig", "(", ")", ":", "from", "boto", ".", "s3", ".", "key", "import", "Key", "raw_key", "=", "self", ".", "get_cache_key", "(", "filename", ")", "logger", ".", "info", "(", "\"Uploading distribution archive to S3 bucket: %s\"", ",", "raw_key", ")", "key", "=", "Key", "(", "self", ".", "s3_bucket", ")", "key", ".", "key", "=", "raw_key", "try", ":", "key", ".", "set_contents_from_file", "(", "handle", ")", "except", "Exception", "as", "e", ":", "logger", ".", "info", "(", "\"Encountered error writing to S3 bucket, \"", "\"falling back to read only mode (exception: %s)\"", ",", "e", ")", "self", ".", "config", ".", "s3_cache_readonly", "=", "True", "else", ":", "logger", ".", "info", "(", "\"Finished uploading distribution archive to S3 bucket in %s.\"", ",", "timer", ")" ]
Upload a distribution archive to the configured Amazon S3 bucket. If the :attr:`~.Config.s3_cache_readonly` configuration option is enabled this method does nothing. :param filename: The filename of the distribution archive (a string). :param handle: A file-like object that provides access to the distribution archive. :raises: :exc:`.CacheBackendError` when any underlying method fails.
[ "Upload", "a", "distribution", "archive", "to", "the", "configured", "Amazon", "S3", "bucket", "." ]
python
train
46.903226
BernardFW/bernard
src/bernard/platforms/telegram/platform.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/telegram/platform.py#L670-L676
async def _send_sleep(self, request: Request, stack: Stack): """ Sleep for the amount of time specified in the Sleep layer """ duration = stack.get_layer(lyr.Sleep).duration await sleep(duration)
[ "async", "def", "_send_sleep", "(", "self", ",", "request", ":", "Request", ",", "stack", ":", "Stack", ")", ":", "duration", "=", "stack", ".", "get_layer", "(", "lyr", ".", "Sleep", ")", ".", "duration", "await", "sleep", "(", "duration", ")" ]
Sleep for the amount of time specified in the Sleep layer
[ "Sleep", "for", "the", "amount", "of", "time", "specified", "in", "the", "Sleep", "layer" ]
python
train
32.857143
GeospatialPython/pyshp
shapefile.py
https://github.com/GeospatialPython/pyshp/blob/71231ddc5aa54f155d4f0563c56006fffbfc84e7/shapefile.py#L635-L650
def load(self, shapefile=None): """Opens a shapefile from a filename or file-like object. Normally this method would be called by the constructor with the file name as an argument.""" if shapefile: (shapeName, ext) = os.path.splitext(shapefile) self.shapeName = shapeName self.load_shp(shapeName) self.load_shx(shapeName) self.load_dbf(shapeName) if not (self.shp or self.dbf): raise ShapefileException("Unable to open %s.dbf or %s.shp." % (shapeName, shapeName)) if self.shp: self.__shpHeader() if self.dbf: self.__dbfHeader()
[ "def", "load", "(", "self", ",", "shapefile", "=", "None", ")", ":", "if", "shapefile", ":", "(", "shapeName", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "shapefile", ")", "self", ".", "shapeName", "=", "shapeName", "self", ".", "load_shp", "(", "shapeName", ")", "self", ".", "load_shx", "(", "shapeName", ")", "self", ".", "load_dbf", "(", "shapeName", ")", "if", "not", "(", "self", ".", "shp", "or", "self", ".", "dbf", ")", ":", "raise", "ShapefileException", "(", "\"Unable to open %s.dbf or %s.shp.\"", "%", "(", "shapeName", ",", "shapeName", ")", ")", "if", "self", ".", "shp", ":", "self", ".", "__shpHeader", "(", ")", "if", "self", ".", "dbf", ":", "self", ".", "__dbfHeader", "(", ")" ]
Opens a shapefile from a filename or file-like object. Normally this method would be called by the constructor with the file name as an argument.
[ "Opens", "a", "shapefile", "from", "a", "filename", "or", "file", "-", "like", "object", ".", "Normally", "this", "method", "would", "be", "called", "by", "the", "constructor", "with", "the", "file", "name", "as", "an", "argument", "." ]
python
train
42.9375
RudolfCardinal/pythonlib
cardinal_pythonlib/django/function_cache.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/function_cache.py#L47-L70
def get_call_signature(fn: FunctionType, args: ArgsType, kwargs: KwargsType, debug_cache: bool = False) -> str: """ Takes a function and its args/kwargs, and produces a string description of the function call (the call signature) suitable for use indirectly as a cache key. The string is a JSON representation. See ``make_cache_key`` for a more suitable actual cache key. """ # Note that the function won't have the __self__ argument (as in # fn.__self__), at this point, even if it's a member function. try: call_sig = json_encode((fn.__qualname__, args, kwargs)) except TypeError: log.critical( "\nTo decorate using @django_cache_function without specifying " "cache_key, the decorated function's owning class and its " "parameters must be JSON-serializable (see jsonfunc.py, " "django_cache_fn.py).\n") raise if debug_cache: log.debug("Making call signature {!r}", call_sig) return call_sig
[ "def", "get_call_signature", "(", "fn", ":", "FunctionType", ",", "args", ":", "ArgsType", ",", "kwargs", ":", "KwargsType", ",", "debug_cache", ":", "bool", "=", "False", ")", "->", "str", ":", "# Note that the function won't have the __self__ argument (as in", "# fn.__self__), at this point, even if it's a member function.", "try", ":", "call_sig", "=", "json_encode", "(", "(", "fn", ".", "__qualname__", ",", "args", ",", "kwargs", ")", ")", "except", "TypeError", ":", "log", ".", "critical", "(", "\"\\nTo decorate using @django_cache_function without specifying \"", "\"cache_key, the decorated function's owning class and its \"", "\"parameters must be JSON-serializable (see jsonfunc.py, \"", "\"django_cache_fn.py).\\n\"", ")", "raise", "if", "debug_cache", ":", "log", ".", "debug", "(", "\"Making call signature {!r}\"", ",", "call_sig", ")", "return", "call_sig" ]
Takes a function and its args/kwargs, and produces a string description of the function call (the call signature) suitable for use indirectly as a cache key. The string is a JSON representation. See ``make_cache_key`` for a more suitable actual cache key.
[ "Takes", "a", "function", "and", "its", "args", "/", "kwargs", "and", "produces", "a", "string", "description", "of", "the", "function", "call", "(", "the", "call", "signature", ")", "suitable", "for", "use", "indirectly", "as", "a", "cache", "key", ".", "The", "string", "is", "a", "JSON", "representation", ".", "See", "make_cache_key", "for", "a", "more", "suitable", "actual", "cache", "key", "." ]
python
train
44.458333
chrisspen/burlap
burlap/mysql.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/mysql.py#L518-L541
def query(query, use_sudo=True, **kwargs): """ Run a MySQL query. """ func = use_sudo and run_as_root or run user = kwargs.get('mysql_user') or env.get('mysql_user') password = kwargs.get('mysql_password') or env.get('mysql_password') options = [ '--batch', '--raw', '--skip-column-names', ] if user: options.append('--user=%s' % quote(user)) if password: options.append('--password=%s' % quote(password)) options = ' '.join(options) return func('mysql %(options)s --execute=%(query)s' % { 'options': options, 'query': quote(query), })
[ "def", "query", "(", "query", ",", "use_sudo", "=", "True", ",", "*", "*", "kwargs", ")", ":", "func", "=", "use_sudo", "and", "run_as_root", "or", "run", "user", "=", "kwargs", ".", "get", "(", "'mysql_user'", ")", "or", "env", ".", "get", "(", "'mysql_user'", ")", "password", "=", "kwargs", ".", "get", "(", "'mysql_password'", ")", "or", "env", ".", "get", "(", "'mysql_password'", ")", "options", "=", "[", "'--batch'", ",", "'--raw'", ",", "'--skip-column-names'", ",", "]", "if", "user", ":", "options", ".", "append", "(", "'--user=%s'", "%", "quote", "(", "user", ")", ")", "if", "password", ":", "options", ".", "append", "(", "'--password=%s'", "%", "quote", "(", "password", ")", ")", "options", "=", "' '", ".", "join", "(", "options", ")", "return", "func", "(", "'mysql %(options)s --execute=%(query)s'", "%", "{", "'options'", ":", "options", ",", "'query'", ":", "quote", "(", "query", ")", ",", "}", ")" ]
Run a MySQL query.
[ "Run", "a", "MySQL", "query", "." ]
python
valid
25.958333
yamcs/yamcs-python
yamcs-client/yamcs/tmtc/model.py
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/tmtc/model.py#L173-L182
def generation_time(self): """ The generation time as set by Yamcs. :type: :class:`~datetime.datetime` """ entry = self._proto.commandQueueEntry if entry.HasField('generationTimeUTC'): return parse_isostring(entry.generationTimeUTC) return None
[ "def", "generation_time", "(", "self", ")", ":", "entry", "=", "self", ".", "_proto", ".", "commandQueueEntry", "if", "entry", ".", "HasField", "(", "'generationTimeUTC'", ")", ":", "return", "parse_isostring", "(", "entry", ".", "generationTimeUTC", ")", "return", "None" ]
The generation time as set by Yamcs. :type: :class:`~datetime.datetime`
[ "The", "generation", "time", "as", "set", "by", "Yamcs", "." ]
python
train
30.4
boriel/zxbasic
zxbpp.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbpp.py#L89-L102
def get_include_path(): """ Default include path using a tricky sys calls. """ f1 = os.path.basename(sys.argv[0]).lower() # script filename f2 = os.path.basename(sys.executable).lower() # Executable filename # If executable filename and script name are the same, we are if f1 == f2 or f2 == f1 + '.exe': # under a "compiled" python binary result = os.path.dirname(os.path.realpath(sys.executable)) else: result = os.path.dirname(os.path.realpath(__file__)) return result
[ "def", "get_include_path", "(", ")", ":", "f1", "=", "os", ".", "path", ".", "basename", "(", "sys", ".", "argv", "[", "0", "]", ")", ".", "lower", "(", ")", "# script filename", "f2", "=", "os", ".", "path", ".", "basename", "(", "sys", ".", "executable", ")", ".", "lower", "(", ")", "# Executable filename", "# If executable filename and script name are the same, we are", "if", "f1", "==", "f2", "or", "f2", "==", "f1", "+", "'.exe'", ":", "# under a \"compiled\" python binary", "result", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "sys", ".", "executable", ")", ")", "else", ":", "result", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "return", "result" ]
Default include path using a tricky sys calls.
[ "Default", "include", "path", "using", "a", "tricky", "sys", "calls", "." ]
python
train
36.714286
postlund/pyatv
pyatv/net.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/net.py#L42-L65
async def post_data(self, path, data=None, headers=None, timeout=None): """Perform a POST request.""" url = self.base_url + path _LOGGER.debug('POST URL: %s', url) self._log_data(data, False) resp = None try: resp = await self._session.post( url, headers=headers, data=data, timeout=DEFAULT_TIMEOUT if timeout is None else timeout) if resp.content_length is not None: resp_data = await resp.read() else: resp_data = None self._log_data(resp_data, True) return resp_data, resp.status except Exception as ex: if resp is not None: resp.close() raise ex finally: if resp is not None: await resp.release()
[ "async", "def", "post_data", "(", "self", ",", "path", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "timeout", "=", "None", ")", ":", "url", "=", "self", ".", "base_url", "+", "path", "_LOGGER", ".", "debug", "(", "'POST URL: %s'", ",", "url", ")", "self", ".", "_log_data", "(", "data", ",", "False", ")", "resp", "=", "None", "try", ":", "resp", "=", "await", "self", ".", "_session", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "timeout", "=", "DEFAULT_TIMEOUT", "if", "timeout", "is", "None", "else", "timeout", ")", "if", "resp", ".", "content_length", "is", "not", "None", ":", "resp_data", "=", "await", "resp", ".", "read", "(", ")", "else", ":", "resp_data", "=", "None", "self", ".", "_log_data", "(", "resp_data", ",", "True", ")", "return", "resp_data", ",", "resp", ".", "status", "except", "Exception", "as", "ex", ":", "if", "resp", "is", "not", "None", ":", "resp", ".", "close", "(", ")", "raise", "ex", "finally", ":", "if", "resp", "is", "not", "None", ":", "await", "resp", ".", "release", "(", ")" ]
Perform a POST request.
[ "Perform", "a", "POST", "request", "." ]
python
train
34.75
JukeboxPipeline/jukebox-core
src/jukeboxcore/reftrack.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/reftrack.py#L1634-L1643
def fetch_replace_restriction(self, ): """Fetch whether unloading is restricted :returns: True, if unloading is restricted :rtype: :class:`bool` :raises: None """ inter = self.get_refobjinter() restricted = self.status() is None return restricted or inter.fetch_action_restriction(self, 'replace')
[ "def", "fetch_replace_restriction", "(", "self", ",", ")", ":", "inter", "=", "self", ".", "get_refobjinter", "(", ")", "restricted", "=", "self", ".", "status", "(", ")", "is", "None", "return", "restricted", "or", "inter", ".", "fetch_action_restriction", "(", "self", ",", "'replace'", ")" ]
Fetch whether unloading is restricted :returns: True, if unloading is restricted :rtype: :class:`bool` :raises: None
[ "Fetch", "whether", "unloading", "is", "restricted" ]
python
train
35.3
KoffeinFlummi/Chronyk
chronyk/chronyk.py
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L554-L572
def timestring(self, pattern="%Y-%m-%d %H:%M:%S", timezone=None): """Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone timestamp = self.__timestamp__ - timezone timestamp -= LOCALTZ return _strftime(pattern, _gmtime(timestamp))
[ "def", "timestring", "(", "self", ",", "pattern", "=", "\"%Y-%m-%d %H:%M:%S\"", ",", "timezone", "=", "None", ")", ":", "if", "timezone", "is", "None", ":", "timezone", "=", "self", ".", "timezone", "timestamp", "=", "self", ".", "__timestamp__", "-", "timezone", "timestamp", "-=", "LOCALTZ", "return", "_strftime", "(", "pattern", ",", "_gmtime", "(", "timestamp", ")", ")" ]
Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
[ "Returns", "a", "time", "string", "." ]
python
train
44.210526
pyvisa/pyvisa
pyvisa/thirdparty/prettytable.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/thirdparty/prettytable.py#L1440-L1451
def from_html_one(html_code, **kwargs): """ Generates a PrettyTables from a string of HTML code which contains only a single <table> """ tables = from_html(html_code, **kwargs) try: assert len(tables) == 1 except AssertionError: raise Exception("More than one <table> in provided HTML code! Use from_html instead.") return tables[0]
[ "def", "from_html_one", "(", "html_code", ",", "*", "*", "kwargs", ")", ":", "tables", "=", "from_html", "(", "html_code", ",", "*", "*", "kwargs", ")", "try", ":", "assert", "len", "(", "tables", ")", "==", "1", "except", "AssertionError", ":", "raise", "Exception", "(", "\"More than one <table> in provided HTML code! Use from_html instead.\"", ")", "return", "tables", "[", "0", "]" ]
Generates a PrettyTables from a string of HTML code which contains only a single <table>
[ "Generates", "a", "PrettyTables", "from", "a", "string", "of", "HTML", "code", "which", "contains", "only", "a", "single", "<table", ">" ]
python
train
30.916667
materialsproject/pymatgen
pymatgen/entries/compatibility.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/entries/compatibility.py#L425-L440
def get_corrections_dict(self, entry): """ Returns the corrections applied to a particular entry. Args: entry: A ComputedEntry object. Returns: ({correction_name: value}) """ corrections = {} for c in self.corrections: val = c.get_correction(entry) if val != 0: corrections[str(c)] = val return corrections
[ "def", "get_corrections_dict", "(", "self", ",", "entry", ")", ":", "corrections", "=", "{", "}", "for", "c", "in", "self", ".", "corrections", ":", "val", "=", "c", ".", "get_correction", "(", "entry", ")", "if", "val", "!=", "0", ":", "corrections", "[", "str", "(", "c", ")", "]", "=", "val", "return", "corrections" ]
Returns the corrections applied to a particular entry. Args: entry: A ComputedEntry object. Returns: ({correction_name: value})
[ "Returns", "the", "corrections", "applied", "to", "a", "particular", "entry", "." ]
python
train
26.3125
bennylope/django-organizations
organizations/abstract.py
https://github.com/bennylope/django-organizations/blob/85f753a8f7a8f0f31636c9209fb69e7030a5c79a/organizations/abstract.py#L178-L187
def change_owner(self, new_owner): """ Changes ownership of an organization. """ old_owner = self.owner.organization_user self.owner.organization_user = new_owner self.owner.save() # Owner changed signal owner_changed.send(sender=self, old=old_owner, new=new_owner)
[ "def", "change_owner", "(", "self", ",", "new_owner", ")", ":", "old_owner", "=", "self", ".", "owner", ".", "organization_user", "self", ".", "owner", ".", "organization_user", "=", "new_owner", "self", ".", "owner", ".", "save", "(", ")", "# Owner changed signal", "owner_changed", ".", "send", "(", "sender", "=", "self", ",", "old", "=", "old_owner", ",", "new", "=", "new_owner", ")" ]
Changes ownership of an organization.
[ "Changes", "ownership", "of", "an", "organization", "." ]
python
train
32.1
rbarrois/mpdlcd
mpdlcd/display_pattern.py
https://github.com/rbarrois/mpdlcd/blob/85f16c8cc0883f8abb4c2cc7f69729c3e2f857da/mpdlcd/display_pattern.py#L446-L464
def add(self, pattern_txt): """Add a pattern to the list. Args: pattern_txt (str list): the pattern, as a list of lines. """ self.patterns[len(pattern_txt)] = pattern_txt low = 0 high = len(pattern_txt) - 1 while not pattern_txt[low]: low += 1 while not pattern_txt[high]: high -= 1 min_pattern = pattern_txt[low:high + 1] self.min_patterns[len(min_pattern)] = min_pattern
[ "def", "add", "(", "self", ",", "pattern_txt", ")", ":", "self", ".", "patterns", "[", "len", "(", "pattern_txt", ")", "]", "=", "pattern_txt", "low", "=", "0", "high", "=", "len", "(", "pattern_txt", ")", "-", "1", "while", "not", "pattern_txt", "[", "low", "]", ":", "low", "+=", "1", "while", "not", "pattern_txt", "[", "high", "]", ":", "high", "-=", "1", "min_pattern", "=", "pattern_txt", "[", "low", ":", "high", "+", "1", "]", "self", ".", "min_patterns", "[", "len", "(", "min_pattern", ")", "]", "=", "min_pattern" ]
Add a pattern to the list. Args: pattern_txt (str list): the pattern, as a list of lines.
[ "Add", "a", "pattern", "to", "the", "list", "." ]
python
train
25
modin-project/modin
modin/pandas/base.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1289-L1300
def gt(self, other, axis="columns", level=None): """Checks element-wise that this is greater than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the gt over. level: The Multilevel index level to apply gt over. Returns: A new DataFrame filled with Booleans. """ return self._binary_op("gt", other, axis=axis, level=level)
[ "def", "gt", "(", "self", ",", "other", ",", "axis", "=", "\"columns\"", ",", "level", "=", "None", ")", ":", "return", "self", ".", "_binary_op", "(", "\"gt\"", ",", "other", ",", "axis", "=", "axis", ",", "level", "=", "level", ")" ]
Checks element-wise that this is greater than other. Args: other: A DataFrame or Series or scalar to compare to. axis: The axis to perform the gt over. level: The Multilevel index level to apply gt over. Returns: A new DataFrame filled with Booleans.
[ "Checks", "element", "-", "wise", "that", "this", "is", "greater", "than", "other", ".", "Args", ":", "other", ":", "A", "DataFrame", "or", "Series", "or", "scalar", "to", "compare", "to", ".", "axis", ":", "The", "axis", "to", "perform", "the", "gt", "over", ".", "level", ":", "The", "Multilevel", "index", "level", "to", "apply", "gt", "over", ".", "Returns", ":", "A", "new", "DataFrame", "filled", "with", "Booleans", "." ]
python
train
38
newville/wxmplot
wxmplot/baseframe.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/baseframe.py#L104-L108
def save_figure(self,event=None, transparent=False, dpi=600): """ save figure image to file""" if self.panel is not None: self.panel.save_figure(event=event, transparent=transparent, dpi=dpi)
[ "def", "save_figure", "(", "self", ",", "event", "=", "None", ",", "transparent", "=", "False", ",", "dpi", "=", "600", ")", ":", "if", "self", ".", "panel", "is", "not", "None", ":", "self", ".", "panel", ".", "save_figure", "(", "event", "=", "event", ",", "transparent", "=", "transparent", ",", "dpi", "=", "dpi", ")" ]
save figure image to file
[ "save", "figure", "image", "to", "file" ]
python
train
50
rackerlabs/fleece
fleece/cli/run/run.py
https://github.com/rackerlabs/fleece/blob/42d79dfa0777e99dbb09bc46105449a9be5dbaa9/fleece/cli/run/run.py#L120-L135
def get_account(config, environment, stage=None): """Find environment name in config object and return AWS account.""" if environment is None and stage: environment = get_environment(config, stage) account = None for env in config.get('environments', []): if env.get('name') == environment: account = env.get('account') role = env.get('role') username = os.environ.get(env.get('rs_username_var')) \ if env.get('rs_username_var') else None apikey = os.environ.get(env.get('rs_apikey_var')) \ if env.get('rs_apikey_var') else None if not account: sys.exit(ACCT_NOT_FOUND_ERROR.format(environment)) return account, role, username, apikey
[ "def", "get_account", "(", "config", ",", "environment", ",", "stage", "=", "None", ")", ":", "if", "environment", "is", "None", "and", "stage", ":", "environment", "=", "get_environment", "(", "config", ",", "stage", ")", "account", "=", "None", "for", "env", "in", "config", ".", "get", "(", "'environments'", ",", "[", "]", ")", ":", "if", "env", ".", "get", "(", "'name'", ")", "==", "environment", ":", "account", "=", "env", ".", "get", "(", "'account'", ")", "role", "=", "env", ".", "get", "(", "'role'", ")", "username", "=", "os", ".", "environ", ".", "get", "(", "env", ".", "get", "(", "'rs_username_var'", ")", ")", "if", "env", ".", "get", "(", "'rs_username_var'", ")", "else", "None", "apikey", "=", "os", ".", "environ", ".", "get", "(", "env", ".", "get", "(", "'rs_apikey_var'", ")", ")", "if", "env", ".", "get", "(", "'rs_apikey_var'", ")", "else", "None", "if", "not", "account", ":", "sys", ".", "exit", "(", "ACCT_NOT_FOUND_ERROR", ".", "format", "(", "environment", ")", ")", "return", "account", ",", "role", ",", "username", ",", "apikey" ]
Find environment name in config object and return AWS account.
[ "Find", "environment", "name", "in", "config", "object", "and", "return", "AWS", "account", "." ]
python
train
46.6875
HDI-Project/MLBlocks
mlblocks/datasets.py
https://github.com/HDI-Project/MLBlocks/blob/e1ca77bce3c4537c0800a4c1395e1b6bbde5465d/mlblocks/datasets.py#L357-L373
def load_amazon(): """Amazon product co-purchasing network and ground-truth communities. Network was collected by crawling Amazon website. It is based on Customers Who Bought This Item Also Bought feature of the Amazon website. If a product i is frequently co-purchased with product j, the graph contains an undirected edge from i to j. Each product category provided by Amazon defines each ground-truth community. """ dataset_path = _load('amazon') X = _load_csv(dataset_path, 'data') y = X.pop('label').values graph = nx.Graph(nx.read_gml(os.path.join(dataset_path, 'graph.gml'))) return Dataset(load_amazon.__doc__, X, y, normalized_mutual_info_score, graph=graph)
[ "def", "load_amazon", "(", ")", ":", "dataset_path", "=", "_load", "(", "'amazon'", ")", "X", "=", "_load_csv", "(", "dataset_path", ",", "'data'", ")", "y", "=", "X", ".", "pop", "(", "'label'", ")", ".", "values", "graph", "=", "nx", ".", "Graph", "(", "nx", ".", "read_gml", "(", "os", ".", "path", ".", "join", "(", "dataset_path", ",", "'graph.gml'", ")", ")", ")", "return", "Dataset", "(", "load_amazon", ".", "__doc__", ",", "X", ",", "y", ",", "normalized_mutual_info_score", ",", "graph", "=", "graph", ")" ]
Amazon product co-purchasing network and ground-truth communities. Network was collected by crawling Amazon website. It is based on Customers Who Bought This Item Also Bought feature of the Amazon website. If a product i is frequently co-purchased with product j, the graph contains an undirected edge from i to j. Each product category provided by Amazon defines each ground-truth community.
[ "Amazon", "product", "co", "-", "purchasing", "network", "and", "ground", "-", "truth", "communities", "." ]
python
train
41.176471
klavinslab/coral
coral/analysis/_structure/viennarna.py
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/viennarna.py#L18-L140
def cofold(self, strand1, strand2, temp=37.0, dangles=2, nolp=False, nogu=False, noclosinggu=False, constraints=None, canonicalbponly=False, partition=-1, pfscale=None, gquad=False): '''Run the RNAcofold command and retrieve the result in a dictionary. :param strand1: Strand 1 for running RNAcofold. :type strand1: coral.DNA or coral.RNA :param strand1: Strand 2 for running RNAcofold. :type strand2: coral.DNA or coral.RNA :param temp: Temperature at which to run the calculations. :type temp: float :param dangles: How to treat dangling end energies. Set to 0 to ignore dangling ends. Set to 1 to limit unpaired bases to at most one dangling end (default for MFE calc). Set to 2 (the default) to remove the limit in 1. Set to 3 to allow coaxial stacking of adjacent helices in .multi-loops :type dangles: int :param nolp: Produce structures without lonely pairs (isolated single base pairs). :type nolp: bool :param nogu: Do not allow GU pairs. :type nogu: bool :param noclosinggu: Do not allow GU pairs at the end of helices. :type noclosinggu: bool :param constraints: Any structural constraints to use. Format is defined at http://www.tbi.univie.ac.at/RNA/RNAfold.1.html :type constraints: str :param canonicalbponly: Remove non-canonical base pairs from the structure constraint (if applicable). :type canonicalbponly: bool :param partition: Calculates the partition function for the sequence. :type partition: int :param pfscale: Scaling factor for the partition function. :type pfScale: float :param gquad: Incorporate G-Quadruplex formation into the structure prediction. :type gquad: bool :returns: Dictionary of calculated values, defaulting to values of MFE ('mfe': float) and dotbracket structure ('dotbracket': str). More keys are added depending on keyword arguments. :rtype: dict ''' cmd_args = [] cmd_kwargs = {'--temp=': str(temp)} cmd_kwargs['--dangles='] = dangles if nolp: cmd_args.append('--noLP') if nogu: cmd_args.append('--noGU') if noclosinggu: cmd_args.append('--noClosingGU') if constraints is not None: cmd_args.append('--constraint') if canonicalbponly: cmd_args.append('--canonicalBPonly') if partition: cmd_args.append('--partfunc') if pfscale is not None: cmd_kwargs['pfScale'] = float(pfscale) if gquad: cmd_args.append('--gquad') inputs = ['>strands\n{}&{}'.format(str(strand1), str(strand2))] if constraints is not None: inputs.append(constraints) rnafold_output = self._run('RNAcofold', inputs, cmd_args, cmd_kwargs) # Process the output output = {} lines = rnafold_output.splitlines() # Line 1 is the name of the sequence input, line 2 is the sequence lines.pop(0) lines.pop(0) # Line 3 is the dotbracket + mfe for strand1 line3 = lines.pop(0) output['dotbracket'] = self._lparse(line3, '^(.*) \(') output['mfe'] = float(self._lparse(line3, ' \((.*)\)$')) # Optional outputs if partition: # Line 4 is 'a coarse representation of the pair probabilities' and # the ensemble free energy line4 = lines.pop(0) output['coarse'] = self._lparse(line4, '^(.*) \[') output['ensemble'] = float(self._lparse(line4, ' \[(.*)\]$')) # Line 5 is the centroid structure, its free energy, and distance # to the ensemble line5 = lines.pop(0) 'ensemble (.*),' output['frequency'] = float(self._lparse(line5, 'ensemble (.*),')) output['deltaG'] = float(self._lparse(line5, 'binding=(.*)$')) # Parse the postscript file (the only place the probability matrix # is) with open(os.path.join(self._tempdir, 'strands_dp.ps')) as f: pattern = 'start of base pair probability data\n(.*)\nshowpage' dotplot_file = f.read() dotplot_data = re.search(pattern, dotplot_file, flags=re.DOTALL).group(1).split('\n') # Dimension of the dotplot - compares seq1, seq2 to self and # to each other (concatenation of seq1 and seq2 = axis) dim = len(strand1) + len(strand2) ensemble_probs = np.zeros((dim, dim)) optimal_probs = np.zeros((dim, dim)) for point in dotplot_data: point_split = point.split(' ') # Use zero indexing i = int(point_split[0]) - 1 j = int(point_split[1]) - 1 sqprob = float(point_split[2]) probtype = point_split[3] if probtype == 'ubox': ensemble_probs[i][j] = sqprob**2 else: optimal_probs[i][j] = sqprob**2 output['ensemble_matrix'] = ensemble_probs output['optimal_matrix'] = optimal_probs return output
[ "def", "cofold", "(", "self", ",", "strand1", ",", "strand2", ",", "temp", "=", "37.0", ",", "dangles", "=", "2", ",", "nolp", "=", "False", ",", "nogu", "=", "False", ",", "noclosinggu", "=", "False", ",", "constraints", "=", "None", ",", "canonicalbponly", "=", "False", ",", "partition", "=", "-", "1", ",", "pfscale", "=", "None", ",", "gquad", "=", "False", ")", ":", "cmd_args", "=", "[", "]", "cmd_kwargs", "=", "{", "'--temp='", ":", "str", "(", "temp", ")", "}", "cmd_kwargs", "[", "'--dangles='", "]", "=", "dangles", "if", "nolp", ":", "cmd_args", ".", "append", "(", "'--noLP'", ")", "if", "nogu", ":", "cmd_args", ".", "append", "(", "'--noGU'", ")", "if", "noclosinggu", ":", "cmd_args", ".", "append", "(", "'--noClosingGU'", ")", "if", "constraints", "is", "not", "None", ":", "cmd_args", ".", "append", "(", "'--constraint'", ")", "if", "canonicalbponly", ":", "cmd_args", ".", "append", "(", "'--canonicalBPonly'", ")", "if", "partition", ":", "cmd_args", ".", "append", "(", "'--partfunc'", ")", "if", "pfscale", "is", "not", "None", ":", "cmd_kwargs", "[", "'pfScale'", "]", "=", "float", "(", "pfscale", ")", "if", "gquad", ":", "cmd_args", ".", "append", "(", "'--gquad'", ")", "inputs", "=", "[", "'>strands\\n{}&{}'", ".", "format", "(", "str", "(", "strand1", ")", ",", "str", "(", "strand2", ")", ")", "]", "if", "constraints", "is", "not", "None", ":", "inputs", ".", "append", "(", "constraints", ")", "rnafold_output", "=", "self", ".", "_run", "(", "'RNAcofold'", ",", "inputs", ",", "cmd_args", ",", "cmd_kwargs", ")", "# Process the output", "output", "=", "{", "}", "lines", "=", "rnafold_output", ".", "splitlines", "(", ")", "# Line 1 is the name of the sequence input, line 2 is the sequence", "lines", ".", "pop", "(", "0", ")", "lines", ".", "pop", "(", "0", ")", "# Line 3 is the dotbracket + mfe for strand1", "line3", "=", "lines", ".", "pop", "(", "0", ")", "output", "[", "'dotbracket'", "]", "=", "self", ".", "_lparse", "(", "line3", ",", "'^(.*) \\('", ")", "output", "[", "'mfe'", "]", "=", "float", "(", "self", ".", "_lparse", "(", "line3", ",", "' \\((.*)\\)$'", ")", ")", "# Optional outputs", "if", "partition", ":", "# Line 4 is 'a coarse representation of the pair probabilities' and", "# the ensemble free energy", "line4", "=", "lines", ".", "pop", "(", "0", ")", "output", "[", "'coarse'", "]", "=", "self", ".", "_lparse", "(", "line4", ",", "'^(.*) \\['", ")", "output", "[", "'ensemble'", "]", "=", "float", "(", "self", ".", "_lparse", "(", "line4", ",", "' \\[(.*)\\]$'", ")", ")", "# Line 5 is the centroid structure, its free energy, and distance", "# to the ensemble", "line5", "=", "lines", ".", "pop", "(", "0", ")", "'ensemble (.*),'", "output", "[", "'frequency'", "]", "=", "float", "(", "self", ".", "_lparse", "(", "line5", ",", "'ensemble (.*),'", ")", ")", "output", "[", "'deltaG'", "]", "=", "float", "(", "self", ".", "_lparse", "(", "line5", ",", "'binding=(.*)$'", ")", ")", "# Parse the postscript file (the only place the probability matrix", "# is)", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_tempdir", ",", "'strands_dp.ps'", ")", ")", "as", "f", ":", "pattern", "=", "'start of base pair probability data\\n(.*)\\nshowpage'", "dotplot_file", "=", "f", ".", "read", "(", ")", "dotplot_data", "=", "re", ".", "search", "(", "pattern", ",", "dotplot_file", ",", "flags", "=", "re", ".", "DOTALL", ")", ".", "group", "(", "1", ")", ".", "split", "(", "'\\n'", ")", "# Dimension of the dotplot - compares seq1, seq2 to self and", "# to each other (concatenation of seq1 and seq2 = axis)", "dim", "=", "len", "(", "strand1", ")", "+", "len", "(", "strand2", ")", "ensemble_probs", "=", "np", ".", "zeros", "(", "(", "dim", ",", "dim", ")", ")", "optimal_probs", "=", "np", ".", "zeros", "(", "(", "dim", ",", "dim", ")", ")", "for", "point", "in", "dotplot_data", ":", "point_split", "=", "point", ".", "split", "(", "' '", ")", "# Use zero indexing", "i", "=", "int", "(", "point_split", "[", "0", "]", ")", "-", "1", "j", "=", "int", "(", "point_split", "[", "1", "]", ")", "-", "1", "sqprob", "=", "float", "(", "point_split", "[", "2", "]", ")", "probtype", "=", "point_split", "[", "3", "]", "if", "probtype", "==", "'ubox'", ":", "ensemble_probs", "[", "i", "]", "[", "j", "]", "=", "sqprob", "**", "2", "else", ":", "optimal_probs", "[", "i", "]", "[", "j", "]", "=", "sqprob", "**", "2", "output", "[", "'ensemble_matrix'", "]", "=", "ensemble_probs", "output", "[", "'optimal_matrix'", "]", "=", "optimal_probs", "return", "output" ]
Run the RNAcofold command and retrieve the result in a dictionary. :param strand1: Strand 1 for running RNAcofold. :type strand1: coral.DNA or coral.RNA :param strand1: Strand 2 for running RNAcofold. :type strand2: coral.DNA or coral.RNA :param temp: Temperature at which to run the calculations. :type temp: float :param dangles: How to treat dangling end energies. Set to 0 to ignore dangling ends. Set to 1 to limit unpaired bases to at most one dangling end (default for MFE calc). Set to 2 (the default) to remove the limit in 1. Set to 3 to allow coaxial stacking of adjacent helices in .multi-loops :type dangles: int :param nolp: Produce structures without lonely pairs (isolated single base pairs). :type nolp: bool :param nogu: Do not allow GU pairs. :type nogu: bool :param noclosinggu: Do not allow GU pairs at the end of helices. :type noclosinggu: bool :param constraints: Any structural constraints to use. Format is defined at http://www.tbi.univie.ac.at/RNA/RNAfold.1.html :type constraints: str :param canonicalbponly: Remove non-canonical base pairs from the structure constraint (if applicable). :type canonicalbponly: bool :param partition: Calculates the partition function for the sequence. :type partition: int :param pfscale: Scaling factor for the partition function. :type pfScale: float :param gquad: Incorporate G-Quadruplex formation into the structure prediction. :type gquad: bool :returns: Dictionary of calculated values, defaulting to values of MFE ('mfe': float) and dotbracket structure ('dotbracket': str). More keys are added depending on keyword arguments. :rtype: dict
[ "Run", "the", "RNAcofold", "command", "and", "retrieve", "the", "result", "in", "a", "dictionary", "." ]
python
train
45.349593
funilrys/PyFunceble
PyFunceble/database.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/database.py#L468-L550
def add(self): """ Save the current :code.`PyFunceble.CONFIGURATION['to_test']` into the current timestamp. """ if PyFunceble.CONFIGURATION["inactive_database"]: # The database subsystem is activated. # We get the timestamp to use as index. timestamp = str(self._timestamp()) if ( "inactive_db" in PyFunceble.INTERN and PyFunceble.INTERN["file_to_test"] in PyFunceble.INTERN["inactive_db"] ): # * The file path is not into the database. if ( timestamp in PyFunceble.INTERN["inactive_db"][ PyFunceble.INTERN["file_to_test"] ] ): # The timetamp is already into the database related to the file we # are testing. if ( PyFunceble.INTERN["to_test"] not in PyFunceble.INTERN["inactive_db"][ PyFunceble.INTERN["file_to_test"] ][timestamp] ): # The currently tested element is not into the database related # to the file we are testing. # We append the currently tested element into the database. PyFunceble.INTERN["inactive_db"][ PyFunceble.INTERN["file_to_test"] ][timestamp].append(PyFunceble.INTERN["to_test"]) else: # The timetamp is not into the database related to the file we # are testing. # We append the index and the database element into the databse # related to the file we are testing. PyFunceble.INTERN["inactive_db"][ PyFunceble.INTERN["file_to_test"] ].update({timestamp: [PyFunceble.INTERN["to_test"]]}) if ( "to_test" in PyFunceble.INTERN["inactive_db"][ PyFunceble.INTERN["file_to_test"] ] and PyFunceble.INTERN["to_test"] in PyFunceble.INTERN["inactive_db"][ PyFunceble.INTERN["file_to_test"] ]["to_test"] ): # * The `to_test` index is into the database related to the file we # are testing. # and # * The element we are testing is into the `to_test` index related to # the file we are testing. # We remove the element from the list of element to test. PyFunceble.INTERN["inactive_db"][PyFunceble.INTERN["file_to_test"]][ "to_test" ].remove(PyFunceble.INTERN["to_test"]) else: # The file path is not into the database. # We initiate the file path and its content into the database. PyFunceble.INTERN["inactive_db"] = { PyFunceble.INTERN["file_to_test"]: { timestamp: [PyFunceble.INTERN["to_test"]] } } # And we save the data into the database. self._backup()
[ "def", "add", "(", "self", ")", ":", "if", "PyFunceble", ".", "CONFIGURATION", "[", "\"inactive_database\"", "]", ":", "# The database subsystem is activated.", "# We get the timestamp to use as index.", "timestamp", "=", "str", "(", "self", ".", "_timestamp", "(", ")", ")", "if", "(", "\"inactive_db\"", "in", "PyFunceble", ".", "INTERN", "and", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "in", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", ")", ":", "# * The file path is not into the database.", "if", "(", "timestamp", "in", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "]", ")", ":", "# The timetamp is already into the database related to the file we", "# are testing.", "if", "(", "PyFunceble", ".", "INTERN", "[", "\"to_test\"", "]", "not", "in", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "]", "[", "timestamp", "]", ")", ":", "# The currently tested element is not into the database related", "# to the file we are testing.", "# We append the currently tested element into the database.", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "]", "[", "timestamp", "]", ".", "append", "(", "PyFunceble", ".", "INTERN", "[", "\"to_test\"", "]", ")", "else", ":", "# The timetamp is not into the database related to the file we", "# are testing.", "# We append the index and the database element into the databse", "# related to the file we are testing.", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "]", ".", "update", "(", "{", "timestamp", ":", "[", "PyFunceble", ".", "INTERN", "[", "\"to_test\"", "]", "]", "}", ")", "if", "(", "\"to_test\"", "in", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "]", "and", "PyFunceble", ".", "INTERN", "[", "\"to_test\"", "]", "in", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "]", "[", "\"to_test\"", "]", ")", ":", "# * The `to_test` index is into the database related to the file we", "# are testing.", "# and", "# * The element we are testing is into the `to_test` index related to", "# the file we are testing.", "# We remove the element from the list of element to test.", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "]", "[", "\"to_test\"", "]", ".", "remove", "(", "PyFunceble", ".", "INTERN", "[", "\"to_test\"", "]", ")", "else", ":", "# The file path is not into the database.", "# We initiate the file path and its content into the database.", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", "=", "{", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", ":", "{", "timestamp", ":", "[", "PyFunceble", ".", "INTERN", "[", "\"to_test\"", "]", "]", "}", "}", "# And we save the data into the database.", "self", ".", "_backup", "(", ")" ]
Save the current :code.`PyFunceble.CONFIGURATION['to_test']` into the current timestamp.
[ "Save", "the", "current", ":", "code", ".", "PyFunceble", ".", "CONFIGURATION", "[", "to_test", "]", "into", "the", "current", "timestamp", "." ]
python
test
41.216867
log2timeline/plaso
plaso/multi_processing/engine.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/multi_processing/engine.py#L62-L75
def _AbortJoin(self, timeout=None): """Aborts all registered processes by joining with the parent process. Args: timeout (int): number of seconds to wait for processes to join, where None represents no timeout. """ for pid, process in iter(self._processes_per_pid.items()): logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format( process.name, pid)) process.join(timeout=timeout) if not process.is_alive(): logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format( process.name, pid))
[ "def", "_AbortJoin", "(", "self", ",", "timeout", "=", "None", ")", ":", "for", "pid", ",", "process", "in", "iter", "(", "self", ".", "_processes_per_pid", ".", "items", "(", ")", ")", ":", "logger", ".", "debug", "(", "'Waiting for process: {0:s} (PID: {1:d}).'", ".", "format", "(", "process", ".", "name", ",", "pid", ")", ")", "process", ".", "join", "(", "timeout", "=", "timeout", ")", "if", "not", "process", ".", "is_alive", "(", ")", ":", "logger", ".", "debug", "(", "'Process {0:s} (PID: {1:d}) stopped.'", ".", "format", "(", "process", ".", "name", ",", "pid", ")", ")" ]
Aborts all registered processes by joining with the parent process. Args: timeout (int): number of seconds to wait for processes to join, where None represents no timeout.
[ "Aborts", "all", "registered", "processes", "by", "joining", "with", "the", "parent", "process", "." ]
python
train
40.071429
scikit-learn-contrib/forest-confidence-interval
forestci/forestci.py
https://github.com/scikit-learn-contrib/forest-confidence-interval/blob/401c63a74a27d775eff0f72b6c20ffd568491fe0/forestci/forestci.py#L32-L67
def calc_inbag(n_samples, forest): """ Derive samples used to create trees in scikit-learn RandomForest objects. Recovers the samples in each tree from the random state of that tree using :func:`forest._generate_sample_indices`. Parameters ---------- n_samples : int The number of samples used to fit the scikit-learn RandomForest object. forest : RandomForest Regressor or Classifier object that is already fit by scikit-learn. Returns ------- Array that records how many times a data point was placed in a tree. Columns are individual trees. Rows are the number of times a sample was used in a tree. """ if not forest.bootstrap: e_s = "Cannot calculate the inbag from a forest that has " e_s = " bootstrap=False" raise ValueError(e_s) n_trees = forest.n_estimators inbag = np.zeros((n_samples, n_trees)) sample_idx = [] for t_idx in range(n_trees): sample_idx.append( _generate_sample_indices(forest.estimators_[t_idx].random_state, n_samples)) inbag[:, t_idx] = np.bincount(sample_idx[-1], minlength=n_samples) return inbag
[ "def", "calc_inbag", "(", "n_samples", ",", "forest", ")", ":", "if", "not", "forest", ".", "bootstrap", ":", "e_s", "=", "\"Cannot calculate the inbag from a forest that has \"", "e_s", "=", "\" bootstrap=False\"", "raise", "ValueError", "(", "e_s", ")", "n_trees", "=", "forest", ".", "n_estimators", "inbag", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "n_trees", ")", ")", "sample_idx", "=", "[", "]", "for", "t_idx", "in", "range", "(", "n_trees", ")", ":", "sample_idx", ".", "append", "(", "_generate_sample_indices", "(", "forest", ".", "estimators_", "[", "t_idx", "]", ".", "random_state", ",", "n_samples", ")", ")", "inbag", "[", ":", ",", "t_idx", "]", "=", "np", ".", "bincount", "(", "sample_idx", "[", "-", "1", "]", ",", "minlength", "=", "n_samples", ")", "return", "inbag" ]
Derive samples used to create trees in scikit-learn RandomForest objects. Recovers the samples in each tree from the random state of that tree using :func:`forest._generate_sample_indices`. Parameters ---------- n_samples : int The number of samples used to fit the scikit-learn RandomForest object. forest : RandomForest Regressor or Classifier object that is already fit by scikit-learn. Returns ------- Array that records how many times a data point was placed in a tree. Columns are individual trees. Rows are the number of times a sample was used in a tree.
[ "Derive", "samples", "used", "to", "create", "trees", "in", "scikit", "-", "learn", "RandomForest", "objects", "." ]
python
valid
32.833333
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/bases.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L848-L878
def _get_ID2position_mapper(self, position_mapper): ''' Defines a position parser that is used to map between sample IDs and positions. Parameters -------------- {_bases_position_mapper} TODO: Fix the name to work with more than 26 letters of the alphabet. ''' def num_parser(x, order): i, j = unravel_index(int(x - 1), self.shape, order=order) return (self.row_labels[i], self.col_labels[j]) if hasattr(position_mapper, '__call__'): mapper = position_mapper elif isinstance(position_mapper, collections.Mapping): mapper = lambda x: position_mapper[x] elif position_mapper == 'name': mapper = lambda x: (x[0], int(x[1:])) elif position_mapper in ('row_first_enumerator', 'number'): mapper = lambda x: num_parser(x, 'F') elif position_mapper == 'col_first_enumerator': mapper = lambda x: num_parser(x, 'C') else: msg = '"{}" is not a known key_to_position_parser.'.format(position_mapper) raise ValueError(msg) return mapper
[ "def", "_get_ID2position_mapper", "(", "self", ",", "position_mapper", ")", ":", "def", "num_parser", "(", "x", ",", "order", ")", ":", "i", ",", "j", "=", "unravel_index", "(", "int", "(", "x", "-", "1", ")", ",", "self", ".", "shape", ",", "order", "=", "order", ")", "return", "(", "self", ".", "row_labels", "[", "i", "]", ",", "self", ".", "col_labels", "[", "j", "]", ")", "if", "hasattr", "(", "position_mapper", ",", "'__call__'", ")", ":", "mapper", "=", "position_mapper", "elif", "isinstance", "(", "position_mapper", ",", "collections", ".", "Mapping", ")", ":", "mapper", "=", "lambda", "x", ":", "position_mapper", "[", "x", "]", "elif", "position_mapper", "==", "'name'", ":", "mapper", "=", "lambda", "x", ":", "(", "x", "[", "0", "]", ",", "int", "(", "x", "[", "1", ":", "]", ")", ")", "elif", "position_mapper", "in", "(", "'row_first_enumerator'", ",", "'number'", ")", ":", "mapper", "=", "lambda", "x", ":", "num_parser", "(", "x", ",", "'F'", ")", "elif", "position_mapper", "==", "'col_first_enumerator'", ":", "mapper", "=", "lambda", "x", ":", "num_parser", "(", "x", ",", "'C'", ")", "else", ":", "msg", "=", "'\"{}\" is not a known key_to_position_parser.'", ".", "format", "(", "position_mapper", ")", "raise", "ValueError", "(", "msg", ")", "return", "mapper" ]
Defines a position parser that is used to map between sample IDs and positions. Parameters -------------- {_bases_position_mapper} TODO: Fix the name to work with more than 26 letters of the alphabet.
[ "Defines", "a", "position", "parser", "that", "is", "used", "to", "map", "between", "sample", "IDs", "and", "positions", "." ]
python
train
36.806452
jingw/pyhdfs
pyhdfs.py
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L690-L693
def delete_snapshot(self, path, snapshotname, **kwargs): """Delete a snapshot of a directory""" response = self._delete(path, 'DELETESNAPSHOT', snapshotname=snapshotname, **kwargs) assert not response.content
[ "def", "delete_snapshot", "(", "self", ",", "path", ",", "snapshotname", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "_delete", "(", "path", ",", "'DELETESNAPSHOT'", ",", "snapshotname", "=", "snapshotname", ",", "*", "*", "kwargs", ")", "assert", "not", "response", ".", "content" ]
Delete a snapshot of a directory
[ "Delete", "a", "snapshot", "of", "a", "directory" ]
python
train
57.25
lxc/python2-lxc
lxc/__init__.py
https://github.com/lxc/python2-lxc/blob/b7ec757d2bea1e5787c3e65b1359b8893491ef90/lxc/__init__.py#L426-L449
def list_containers(active=True, defined=True, as_object=False, config_path=None): """ List the containers on the system. """ if config_path: if not os.path.exists(config_path): return tuple() try: entries = _lxc.list_containers(active=active, defined=defined, config_path=config_path) except ValueError: return tuple() else: try: entries = _lxc.list_containers(active=active, defined=defined) except ValueError: return tuple() if as_object: return tuple([Container(name, config_path) for name in entries]) else: return entries
[ "def", "list_containers", "(", "active", "=", "True", ",", "defined", "=", "True", ",", "as_object", "=", "False", ",", "config_path", "=", "None", ")", ":", "if", "config_path", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "config_path", ")", ":", "return", "tuple", "(", ")", "try", ":", "entries", "=", "_lxc", ".", "list_containers", "(", "active", "=", "active", ",", "defined", "=", "defined", ",", "config_path", "=", "config_path", ")", "except", "ValueError", ":", "return", "tuple", "(", ")", "else", ":", "try", ":", "entries", "=", "_lxc", ".", "list_containers", "(", "active", "=", "active", ",", "defined", "=", "defined", ")", "except", "ValueError", ":", "return", "tuple", "(", ")", "if", "as_object", ":", "return", "tuple", "(", "[", "Container", "(", "name", ",", "config_path", ")", "for", "name", "in", "entries", "]", ")", "else", ":", "return", "entries" ]
List the containers on the system.
[ "List", "the", "containers", "on", "the", "system", "." ]
python
train
29.875
taborlab/FlowCal
FlowCal/plot.py
https://github.com/taborlab/FlowCal/blob/031a7af82acb1d46879a8e384a1a00f27f0bdc7a/FlowCal/plot.py#L130-L154
def transform_non_affine(self, x, mask_out_of_range=True): """ Transform a Nx1 numpy array. Parameters ---------- x : array Data to be transformed. mask_out_of_range : bool, optional Whether to mask input values out of range. Return ------ array or masked array Transformed data. """ # Mask out-of-range values if mask_out_of_range: x_masked = np.ma.masked_where((x < self._xmin) | (x > self._xmax), x) else: x_masked = x # Calculate s and return return np.interp(x_masked, self._x_range, self._s_range)
[ "def", "transform_non_affine", "(", "self", ",", "x", ",", "mask_out_of_range", "=", "True", ")", ":", "# Mask out-of-range values", "if", "mask_out_of_range", ":", "x_masked", "=", "np", ".", "ma", ".", "masked_where", "(", "(", "x", "<", "self", ".", "_xmin", ")", "|", "(", "x", ">", "self", ".", "_xmax", ")", ",", "x", ")", "else", ":", "x_masked", "=", "x", "# Calculate s and return", "return", "np", ".", "interp", "(", "x_masked", ",", "self", ".", "_x_range", ",", "self", ".", "_s_range", ")" ]
Transform a Nx1 numpy array. Parameters ---------- x : array Data to be transformed. mask_out_of_range : bool, optional Whether to mask input values out of range. Return ------ array or masked array Transformed data.
[ "Transform", "a", "Nx1", "numpy", "array", "." ]
python
train
28.16
erinxocon/requests-xml
requests_xml.py
https://github.com/erinxocon/requests-xml/blob/923571ceae4ddd4f2f57a2fc8780d89b50f3e7a1/requests_xml.py#L235-L243
def search(self, template: str, first: bool = False) -> _Result: """Search the :class:`Element <Element>` for the given parse template. :param template: The Parse template to use. """ elements = [r for r in findall(template, self.xml)] return _get_first_or_list(elements, first)
[ "def", "search", "(", "self", ",", "template", ":", "str", ",", "first", ":", "bool", "=", "False", ")", "->", "_Result", ":", "elements", "=", "[", "r", "for", "r", "in", "findall", "(", "template", ",", "self", ".", "xml", ")", "]", "return", "_get_first_or_list", "(", "elements", ",", "first", ")" ]
Search the :class:`Element <Element>` for the given parse template. :param template: The Parse template to use.
[ "Search", "the", ":", "class", ":", "Element", "<Element", ">", "for", "the", "given", "parse", "template", "." ]
python
train
35.555556
anoroozian/pyvt
pyvt/__init__.py
https://github.com/anoroozian/pyvt/blob/bf36f833f1f1b7d93169fd9ad451e06b7d46afc1/pyvt/__init__.py#L149-L269
def retrieve(self, thing, thing_type=None): """ Retrieve a report from VirusTotal based on a hash, IP, domain, file or URL or ScanID. NOTE: URLs must include the scheme (e.g. http://)\n :param thing: a file name on the local system, a URL or list of URLs, an IP or list of IPs, a domain or list of domains, a hash or list of hashes :param thing_type: Optional, a hint to the function as to what you are sending it :return: Returns a a dictionary with thing as key and the API json response as the value If thing was a list of things to query the results will be a dictionary with every thing in the list as a key :raises TypeError: if it gets something other than a URL, IP domain, hash or ScanID :raises TypeError: if VirusTotal returns something we can't parse. """ # trust the user-supplied type over the automatic identification thing_id = self._whatis(thing) if thing_type is None: thing_type = thing_id query_parameters = {} # Query API for URL(s) if thing_type == API_Constants.URL: # Get the scan results for a given URL or list of URLs. query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_URL_REPORT if not isinstance(thing, list): thing = [thing] grouped_urls = self._grouped(thing, self._urls_per_retrieve) # break list of URLS down to API limits results = {} for group in grouped_urls: query_parameters = {"resource": "\n".join([url for url in group])} self._limit_call_handler() try: response = self._post_query(query, query_parameters) except: raise TypeError # If we get a list of URLs that has N urls and N mod '_url_per_retrieve' is 1 # for example [url, url, url], when limit is 2, the last query will not return a list if not isinstance(response, list): response = [response] for index, scanid in enumerate(group): results[scanid] = response[index] result = results # Query API for domain(s) elif thing_type == API_Constants.DOMAIN: query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_DOMAIN_REPORT if not isinstance(thing, list): thing = [thing] results = {} for domain in thing: query_parameters["domain"] = domain self._limit_call_handler() response = self._get_query(query, query_parameters) results[domain] = response result = results # Query API for IP(s) elif thing_type == API_Constants.IP: query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_IP_REPORT if not isinstance(thing, list): thing = [thing] results = {} for ip in thing: query_parameters["ip"] = ip self._limit_call_handler() try: response = self._get_query(query, query_parameters) except: raise TypeError results[ip] = response result = results # Query API for HASH, bulk HASH queries not possible elif thing_type == API_Constants.HASH: query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_FILE_REPORT results = {} if not isinstance(thing, list): thing = [thing] query_parameters["resource"] = ", ".join(thing) self._limit_call_handler() response = self._get_query(query, query_parameters) if not isinstance(response, list): response = [response] for index, hash in enumerate(thing): results[hash] = response[index] result = results elif thing_type == "scanid": query = API_Constants.CONST_API_URL + API_Constants.API_ACTION_GET_URL_REPORT if not isinstance(thing, list): thing = [thing] results = {} for scanid in thing: query_parameters["resource"] = scanid self._limit_call_handler() try: response = self._post_query(query, query_parameters) except: raise TypeError results[scanid] = response result = results else: raise TypeError("Unimplemented '%s'." % thing_type) return result
[ "def", "retrieve", "(", "self", ",", "thing", ",", "thing_type", "=", "None", ")", ":", "# trust the user-supplied type over the automatic identification", "thing_id", "=", "self", ".", "_whatis", "(", "thing", ")", "if", "thing_type", "is", "None", ":", "thing_type", "=", "thing_id", "query_parameters", "=", "{", "}", "# Query API for URL(s)", "if", "thing_type", "==", "API_Constants", ".", "URL", ":", "# Get the scan results for a given URL or list of URLs.", "query", "=", "API_Constants", ".", "CONST_API_URL", "+", "API_Constants", ".", "API_ACTION_GET_URL_REPORT", "if", "not", "isinstance", "(", "thing", ",", "list", ")", ":", "thing", "=", "[", "thing", "]", "grouped_urls", "=", "self", ".", "_grouped", "(", "thing", ",", "self", ".", "_urls_per_retrieve", ")", "# break list of URLS down to API limits", "results", "=", "{", "}", "for", "group", "in", "grouped_urls", ":", "query_parameters", "=", "{", "\"resource\"", ":", "\"\\n\"", ".", "join", "(", "[", "url", "for", "url", "in", "group", "]", ")", "}", "self", ".", "_limit_call_handler", "(", ")", "try", ":", "response", "=", "self", ".", "_post_query", "(", "query", ",", "query_parameters", ")", "except", ":", "raise", "TypeError", "# If we get a list of URLs that has N urls and N mod '_url_per_retrieve' is 1", "# for example [url, url, url], when limit is 2, the last query will not return a list", "if", "not", "isinstance", "(", "response", ",", "list", ")", ":", "response", "=", "[", "response", "]", "for", "index", ",", "scanid", "in", "enumerate", "(", "group", ")", ":", "results", "[", "scanid", "]", "=", "response", "[", "index", "]", "result", "=", "results", "# Query API for domain(s)", "elif", "thing_type", "==", "API_Constants", ".", "DOMAIN", ":", "query", "=", "API_Constants", ".", "CONST_API_URL", "+", "API_Constants", ".", "API_ACTION_GET_DOMAIN_REPORT", "if", "not", "isinstance", "(", "thing", ",", "list", ")", ":", "thing", "=", "[", "thing", "]", "results", "=", "{", "}", "for", "domain", "in", "thing", ":", "query_parameters", "[", "\"domain\"", "]", "=", "domain", "self", ".", "_limit_call_handler", "(", ")", "response", "=", "self", ".", "_get_query", "(", "query", ",", "query_parameters", ")", "results", "[", "domain", "]", "=", "response", "result", "=", "results", "# Query API for IP(s)", "elif", "thing_type", "==", "API_Constants", ".", "IP", ":", "query", "=", "API_Constants", ".", "CONST_API_URL", "+", "API_Constants", ".", "API_ACTION_GET_IP_REPORT", "if", "not", "isinstance", "(", "thing", ",", "list", ")", ":", "thing", "=", "[", "thing", "]", "results", "=", "{", "}", "for", "ip", "in", "thing", ":", "query_parameters", "[", "\"ip\"", "]", "=", "ip", "self", ".", "_limit_call_handler", "(", ")", "try", ":", "response", "=", "self", ".", "_get_query", "(", "query", ",", "query_parameters", ")", "except", ":", "raise", "TypeError", "results", "[", "ip", "]", "=", "response", "result", "=", "results", "# Query API for HASH, bulk HASH queries not possible", "elif", "thing_type", "==", "API_Constants", ".", "HASH", ":", "query", "=", "API_Constants", ".", "CONST_API_URL", "+", "API_Constants", ".", "API_ACTION_GET_FILE_REPORT", "results", "=", "{", "}", "if", "not", "isinstance", "(", "thing", ",", "list", ")", ":", "thing", "=", "[", "thing", "]", "query_parameters", "[", "\"resource\"", "]", "=", "\", \"", ".", "join", "(", "thing", ")", "self", ".", "_limit_call_handler", "(", ")", "response", "=", "self", ".", "_get_query", "(", "query", ",", "query_parameters", ")", "if", "not", "isinstance", "(", "response", ",", "list", ")", ":", "response", "=", "[", "response", "]", "for", "index", ",", "hash", "in", "enumerate", "(", "thing", ")", ":", "results", "[", "hash", "]", "=", "response", "[", "index", "]", "result", "=", "results", "elif", "thing_type", "==", "\"scanid\"", ":", "query", "=", "API_Constants", ".", "CONST_API_URL", "+", "API_Constants", ".", "API_ACTION_GET_URL_REPORT", "if", "not", "isinstance", "(", "thing", ",", "list", ")", ":", "thing", "=", "[", "thing", "]", "results", "=", "{", "}", "for", "scanid", "in", "thing", ":", "query_parameters", "[", "\"resource\"", "]", "=", "scanid", "self", ".", "_limit_call_handler", "(", ")", "try", ":", "response", "=", "self", ".", "_post_query", "(", "query", ",", "query_parameters", ")", "except", ":", "raise", "TypeError", "results", "[", "scanid", "]", "=", "response", "result", "=", "results", "else", ":", "raise", "TypeError", "(", "\"Unimplemented '%s'.\"", "%", "thing_type", ")", "return", "result" ]
Retrieve a report from VirusTotal based on a hash, IP, domain, file or URL or ScanID. NOTE: URLs must include the scheme (e.g. http://)\n :param thing: a file name on the local system, a URL or list of URLs, an IP or list of IPs, a domain or list of domains, a hash or list of hashes :param thing_type: Optional, a hint to the function as to what you are sending it :return: Returns a a dictionary with thing as key and the API json response as the value If thing was a list of things to query the results will be a dictionary with every thing in the list as a key :raises TypeError: if it gets something other than a URL, IP domain, hash or ScanID :raises TypeError: if VirusTotal returns something we can't parse.
[ "Retrieve", "a", "report", "from", "VirusTotal", "based", "on", "a", "hash", "IP", "domain", "file", "or", "URL", "or", "ScanID", ".", "NOTE", ":", "URLs", "must", "include", "the", "scheme", "(", "e", ".", "g", ".", "http", ":", "//", ")", "\\", "n" ]
python
train
38.801653
annoviko/pyclustering
pyclustering/cluster/optics.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/optics.py#L478-L490
def __allocate_clusters(self): """! @brief Performs cluster allocation and builds ordering diagram that is based on reachability-distances. """ self.__initialize(self.__sample_pointer) for optic_object in self.__optics_objects: if optic_object.processed is False: self.__expand_cluster_order(optic_object) self.__extract_clusters()
[ "def", "__allocate_clusters", "(", "self", ")", ":", "self", ".", "__initialize", "(", "self", ".", "__sample_pointer", ")", "for", "optic_object", "in", "self", ".", "__optics_objects", ":", "if", "optic_object", ".", "processed", "is", "False", ":", "self", ".", "__expand_cluster_order", "(", "optic_object", ")", "self", ".", "__extract_clusters", "(", ")" ]
! @brief Performs cluster allocation and builds ordering diagram that is based on reachability-distances.
[ "!" ]
python
valid
34.076923
RedFantom/ttkthemes
ttkthemes/_widget.py
https://github.com/RedFantom/ttkthemes/blob/e7fc354c02faf0e3eb4842d7f44131a1c43dd299/ttkthemes/_widget.py#L199-L225
def _setup_images(directory, brightness, saturation, hue, preserve_transparency): """ Apply modifiers to the images of a theme Modifies the images using the PIL.ImageEnhance module. Using this function, theme images are modified to given them a unique look and feel. Works best with PNG-based images. """ for file_name in os.listdir(directory): with open(os.path.join(directory, file_name), "rb") as fi: image = Image.open(fi).convert("RGBA") # Only perform required operations if brightness != 1.0: enhancer = ImageEnhance.Brightness(image) image = enhancer.enhance(brightness) if saturation != 1.0: enhancer = ImageEnhance.Color(image) image = enhancer.enhance(saturation) if hue != 1.0: image = imgops.shift_hue(image, hue) if preserve_transparency is True: image = imgops.make_transparent(image) # Save the new image image.save(os.path.join(directory, file_name.replace("gif", "png"))) image.close() for file_name in (item for item in os.listdir(directory) if item.endswith(".gif")): os.remove(os.path.join(directory, file_name))
[ "def", "_setup_images", "(", "directory", ",", "brightness", ",", "saturation", ",", "hue", ",", "preserve_transparency", ")", ":", "for", "file_name", "in", "os", ".", "listdir", "(", "directory", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "file_name", ")", ",", "\"rb\"", ")", "as", "fi", ":", "image", "=", "Image", ".", "open", "(", "fi", ")", ".", "convert", "(", "\"RGBA\"", ")", "# Only perform required operations", "if", "brightness", "!=", "1.0", ":", "enhancer", "=", "ImageEnhance", ".", "Brightness", "(", "image", ")", "image", "=", "enhancer", ".", "enhance", "(", "brightness", ")", "if", "saturation", "!=", "1.0", ":", "enhancer", "=", "ImageEnhance", ".", "Color", "(", "image", ")", "image", "=", "enhancer", ".", "enhance", "(", "saturation", ")", "if", "hue", "!=", "1.0", ":", "image", "=", "imgops", ".", "shift_hue", "(", "image", ",", "hue", ")", "if", "preserve_transparency", "is", "True", ":", "image", "=", "imgops", ".", "make_transparent", "(", "image", ")", "# Save the new image", "image", ".", "save", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "file_name", ".", "replace", "(", "\"gif\"", ",", "\"png\"", ")", ")", ")", "image", ".", "close", "(", ")", "for", "file_name", "in", "(", "item", "for", "item", "in", "os", ".", "listdir", "(", "directory", ")", "if", "item", ".", "endswith", "(", "\".gif\"", ")", ")", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "file_name", ")", ")" ]
Apply modifiers to the images of a theme Modifies the images using the PIL.ImageEnhance module. Using this function, theme images are modified to given them a unique look and feel. Works best with PNG-based images.
[ "Apply", "modifiers", "to", "the", "images", "of", "a", "theme" ]
python
train
48.296296
OLC-Bioinformatics/sipprverse
pointfinder/PointFinder.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/pointfinder/PointFinder.py#L603-L611
def get_codon(seq, codon_no, start_offset): """ This function takes a sequece and a codon number and returns the codon found in the sequence at that position """ seq = seq.replace("-","") codon_start_pos = int(codon_no - 1)*3 - start_offset codon = seq[codon_start_pos:codon_start_pos + 3] return codon
[ "def", "get_codon", "(", "seq", ",", "codon_no", ",", "start_offset", ")", ":", "seq", "=", "seq", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", "codon_start_pos", "=", "int", "(", "codon_no", "-", "1", ")", "*", "3", "-", "start_offset", "codon", "=", "seq", "[", "codon_start_pos", ":", "codon_start_pos", "+", "3", "]", "return", "codon" ]
This function takes a sequece and a codon number and returns the codon found in the sequence at that position
[ "This", "function", "takes", "a", "sequece", "and", "a", "codon", "number", "and", "returns", "the", "codon", "found", "in", "the", "sequence", "at", "that", "position" ]
python
train
36.333333
jobovy/galpy
galpy/actionAngle/actionAngleTorus_c.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleTorus_c.py#L204-L272
def actionAngleTorus_hessian_c(pot,jr,jphi,jz, tol=0.003,dJ=0.001): """ NAME: actionAngleTorus_hessian_c PURPOSE: compute dO/dJ on a single torus INPUT: pot - Potential object or list thereof jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) tol= (0.003) goal for |dJ|/|J| along the torus dJ= (0.001) action difference when computing derivatives (Hessian or Jacobian) OUTPUT: (dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error flag) Note: dO/dJ is *not* symmetrized here HISTORY: 2016-07-15 - Written - Bovy (UofT) """ #Parse the potential from galpy.orbit.integrateFullOrbit import _parse_pot npot, pot_type, pot_args= _parse_pot(pot,potfortorus=True) #Set up result dOdJT= numpy.empty(9) Omegar= numpy.empty(1) Omegaphi= numpy.empty(1) Omegaz= numpy.empty(1) flag= ctypes.c_int(0) #Set up the C code ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE') actionAngleTorus_HessFunc= _lib.actionAngleTorus_hessianFreqs actionAngleTorus_HessFunc.argtypes=\ [ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ndpointer(dtype=numpy.int32,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ctypes.c_double, ctypes.c_double, ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ndpointer(dtype=numpy.float64,flags=ndarrayFlags), ctypes.POINTER(ctypes.c_int)] #Array requirements dOdJT= numpy.require(dOdJT,dtype=numpy.float64,requirements=['C','W']) Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W']) Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,requirements=['C','W']) Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W']) #Run the C code actionAngleTorus_HessFunc(ctypes.c_double(jr), ctypes.c_double(jphi), ctypes.c_double(jz), ctypes.c_int(npot), pot_type, pot_args, ctypes.c_double(tol), ctypes.c_double(dJ), dOdJT, Omegar,Omegaphi,Omegaz, ctypes.byref(flag)) return (dOdJT.reshape((3,3)).T,Omegar[0],Omegaphi[0],Omegaz[0],flag.value)
[ "def", "actionAngleTorus_hessian_c", "(", "pot", ",", "jr", ",", "jphi", ",", "jz", ",", "tol", "=", "0.003", ",", "dJ", "=", "0.001", ")", ":", "#Parse the potential", "from", "galpy", ".", "orbit", ".", "integrateFullOrbit", "import", "_parse_pot", "npot", ",", "pot_type", ",", "pot_args", "=", "_parse_pot", "(", "pot", ",", "potfortorus", "=", "True", ")", "#Set up result", "dOdJT", "=", "numpy", ".", "empty", "(", "9", ")", "Omegar", "=", "numpy", ".", "empty", "(", "1", ")", "Omegaphi", "=", "numpy", ".", "empty", "(", "1", ")", "Omegaz", "=", "numpy", ".", "empty", "(", "1", ")", "flag", "=", "ctypes", ".", "c_int", "(", "0", ")", "#Set up the C code", "ndarrayFlags", "=", "(", "'C_CONTIGUOUS'", ",", "'WRITEABLE'", ")", "actionAngleTorus_HessFunc", "=", "_lib", ".", "actionAngleTorus_hessianFreqs", "actionAngleTorus_HessFunc", ".", "argtypes", "=", "[", "ctypes", ".", "c_double", ",", "ctypes", ".", "c_double", ",", "ctypes", ".", "c_double", ",", "ctypes", ".", "c_int", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "int32", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ctypes", ".", "c_double", ",", "ctypes", ".", "c_double", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ndpointer", "(", "dtype", "=", "numpy", ".", "float64", ",", "flags", "=", "ndarrayFlags", ")", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_int", ")", "]", "#Array requirements", "dOdJT", "=", "numpy", ".", "require", "(", "dOdJT", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "Omegar", "=", "numpy", ".", "require", "(", "Omegar", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "Omegaphi", "=", "numpy", ".", "require", "(", "Omegaphi", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "Omegaz", "=", "numpy", ".", "require", "(", "Omegaz", ",", "dtype", "=", "numpy", ".", "float64", ",", "requirements", "=", "[", "'C'", ",", "'W'", "]", ")", "#Run the C code", "actionAngleTorus_HessFunc", "(", "ctypes", ".", "c_double", "(", "jr", ")", ",", "ctypes", ".", "c_double", "(", "jphi", ")", ",", "ctypes", ".", "c_double", "(", "jz", ")", ",", "ctypes", ".", "c_int", "(", "npot", ")", ",", "pot_type", ",", "pot_args", ",", "ctypes", ".", "c_double", "(", "tol", ")", ",", "ctypes", ".", "c_double", "(", "dJ", ")", ",", "dOdJT", ",", "Omegar", ",", "Omegaphi", ",", "Omegaz", ",", "ctypes", ".", "byref", "(", "flag", ")", ")", "return", "(", "dOdJT", ".", "reshape", "(", "(", "3", ",", "3", ")", ")", ".", "T", ",", "Omegar", "[", "0", "]", ",", "Omegaphi", "[", "0", "]", ",", "Omegaz", "[", "0", "]", ",", "flag", ".", "value", ")" ]
NAME: actionAngleTorus_hessian_c PURPOSE: compute dO/dJ on a single torus INPUT: pot - Potential object or list thereof jr - radial action (scalar) jphi - azimuthal action (scalar) jz - vertical action (scalar) tol= (0.003) goal for |dJ|/|J| along the torus dJ= (0.001) action difference when computing derivatives (Hessian or Jacobian) OUTPUT: (dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error flag) Note: dO/dJ is *not* symmetrized here HISTORY: 2016-07-15 - Written - Bovy (UofT)
[ "NAME", ":", "actionAngleTorus_hessian_c", "PURPOSE", ":", "compute", "dO", "/", "dJ", "on", "a", "single", "torus", "INPUT", ":", "pot", "-", "Potential", "object", "or", "list", "thereof", "jr", "-", "radial", "action", "(", "scalar", ")", "jphi", "-", "azimuthal", "action", "(", "scalar", ")", "jz", "-", "vertical", "action", "(", "scalar", ")", "tol", "=", "(", "0", ".", "003", ")", "goal", "for", "|dJ|", "/", "|J|", "along", "the", "torus", "dJ", "=", "(", "0", ".", "001", ")", "action", "difference", "when", "computing", "derivatives", "(", "Hessian", "or", "Jacobian", ")", "OUTPUT", ":", "(", "dO", "/", "dJ", "Omegar", "Omegaphi", "Omegaz", "Autofit", "error", "flag", ")", "Note", ":", "dO", "/", "dJ", "is", "*", "not", "*", "symmetrized", "here", "HISTORY", ":", "2016", "-", "07", "-", "15", "-", "Written", "-", "Bovy", "(", "UofT", ")" ]
python
train
37.942029
portfors-lab/sparkle
sparkle/stim/stimulus_model.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/stimulus_model.py#L355-L366
def contains(self, stimtype): """Returns whether the specified stimlus type is a component in this stimulus :param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus :type stimtype: str :returns: bool -- if the stimtype is in the model """ for track in self._segments: for component in track: if component.__class__.__name__ == stimtype: return True return False
[ "def", "contains", "(", "self", ",", "stimtype", ")", ":", "for", "track", "in", "self", ".", "_segments", ":", "for", "component", "in", "track", ":", "if", "component", ".", "__class__", ".", "__name__", "==", "stimtype", ":", "return", "True", "return", "False" ]
Returns whether the specified stimlus type is a component in this stimulus :param stimtype: :class:`AbstractStimulusComponent<sparkle.stim.abstract_component.AbstractStimulusComponent>` subclass class name to test for membership in the components of this stimulus :type stimtype: str :returns: bool -- if the stimtype is in the model
[ "Returns", "whether", "the", "specified", "stimlus", "type", "is", "a", "component", "in", "this", "stimulus" ]
python
train
48.916667
tanghaibao/jcvi
jcvi/assembly/hic.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L1149-L1165
def iter_last_tour(tourfile, clm): """ Extract last tour from tourfile. The clm instance is also passed in to see if any contig is covered in the clm. """ row = open(tourfile).readlines()[-1] _tour, _tour_o = separate_tour_and_o(row) tour = [] tour_o = [] for tc, to in zip(_tour, _tour_o): if tc not in clm.contigs: logging.debug("Contig `{}` in file `{}` not found in `{}`" .format(tc, tourfile, clm.idsfile)) continue tour.append(tc) tour_o.append(to) return tour, tour_o
[ "def", "iter_last_tour", "(", "tourfile", ",", "clm", ")", ":", "row", "=", "open", "(", "tourfile", ")", ".", "readlines", "(", ")", "[", "-", "1", "]", "_tour", ",", "_tour_o", "=", "separate_tour_and_o", "(", "row", ")", "tour", "=", "[", "]", "tour_o", "=", "[", "]", "for", "tc", ",", "to", "in", "zip", "(", "_tour", ",", "_tour_o", ")", ":", "if", "tc", "not", "in", "clm", ".", "contigs", ":", "logging", ".", "debug", "(", "\"Contig `{}` in file `{}` not found in `{}`\"", ".", "format", "(", "tc", ",", "tourfile", ",", "clm", ".", "idsfile", ")", ")", "continue", "tour", ".", "append", "(", "tc", ")", "tour_o", ".", "append", "(", "to", ")", "return", "tour", ",", "tour_o" ]
Extract last tour from tourfile. The clm instance is also passed in to see if any contig is covered in the clm.
[ "Extract", "last", "tour", "from", "tourfile", ".", "The", "clm", "instance", "is", "also", "passed", "in", "to", "see", "if", "any", "contig", "is", "covered", "in", "the", "clm", "." ]
python
train
33.647059
nephila/python-taiga
taiga/models/models.py
https://github.com/nephila/python-taiga/blob/5b471d6b8b59e5d410162a6f1c2f0d4188445a56/taiga/models/models.py#L472-L480
def stats(self): """ Get the stats for the current :class:`Milestone` """ response = self.requester.get( '/{endpoint}/{id}/stats', endpoint=self.endpoint, id=self.id ) return response.json()
[ "def", "stats", "(", "self", ")", ":", "response", "=", "self", ".", "requester", ".", "get", "(", "'/{endpoint}/{id}/stats'", ",", "endpoint", "=", "self", ".", "endpoint", ",", "id", "=", "self", ".", "id", ")", "return", "response", ".", "json", "(", ")" ]
Get the stats for the current :class:`Milestone`
[ "Get", "the", "stats", "for", "the", "current", ":", "class", ":", "Milestone" ]
python
train
28.222222
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L979-L993
def retrieve(self, order_id, id) : """ Retrieve a single line item Returns a single line item of an order, according to the unique line item ID provided :calls: ``get /orders/{order_id}/line_items/{id}`` :param int order_id: Unique identifier of a Order. :param int id: Unique identifier of a LineItem. :return: Dictionary that support attriubte-style access and represent LineItem resource. :rtype: dict """ _, _, line_item = self.http_client.get("/orders/{order_id}/line_items/{id}".format(order_id=order_id, id=id)) return line_item
[ "def", "retrieve", "(", "self", ",", "order_id", ",", "id", ")", ":", "_", ",", "_", ",", "line_item", "=", "self", ".", "http_client", ".", "get", "(", "\"/orders/{order_id}/line_items/{id}\"", ".", "format", "(", "order_id", "=", "order_id", ",", "id", "=", "id", ")", ")", "return", "line_item" ]
Retrieve a single line item Returns a single line item of an order, according to the unique line item ID provided :calls: ``get /orders/{order_id}/line_items/{id}`` :param int order_id: Unique identifier of a Order. :param int id: Unique identifier of a LineItem. :return: Dictionary that support attriubte-style access and represent LineItem resource. :rtype: dict
[ "Retrieve", "a", "single", "line", "item" ]
python
train
40.8
modin-project/modin
modin/backends/pandas/query_compiler.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1233-L1249
def last_valid_index(self): """Returns index of last non-NaN/NULL value. Return: Scalar of index name. """ def last_valid_index_builder(df): df.index = pandas.RangeIndex(len(df.index)) return df.apply(lambda df: df.last_valid_index()) func = self._build_mapreduce_func(last_valid_index_builder) # We get the maximum from each column, then take the max of that to get # last_valid_index. The `to_pandas()` here is just for a single value and # `squeeze` will convert it to a scalar. first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze() return self.index[first_result]
[ "def", "last_valid_index", "(", "self", ")", ":", "def", "last_valid_index_builder", "(", "df", ")", ":", "df", ".", "index", "=", "pandas", ".", "RangeIndex", "(", "len", "(", "df", ".", "index", ")", ")", "return", "df", ".", "apply", "(", "lambda", "df", ":", "df", ".", "last_valid_index", "(", ")", ")", "func", "=", "self", ".", "_build_mapreduce_func", "(", "last_valid_index_builder", ")", "# We get the maximum from each column, then take the max of that to get", "# last_valid_index. The `to_pandas()` here is just for a single value and", "# `squeeze` will convert it to a scalar.", "first_result", "=", "self", ".", "_full_axis_reduce", "(", "0", ",", "func", ")", ".", "max", "(", "axis", "=", "1", ")", ".", "to_pandas", "(", ")", ".", "squeeze", "(", ")", "return", "self", ".", "index", "[", "first_result", "]" ]
Returns index of last non-NaN/NULL value. Return: Scalar of index name.
[ "Returns", "index", "of", "last", "non", "-", "NaN", "/", "NULL", "value", "." ]
python
train
41
mpapi/lazylights
lazylights.py
https://github.com/mpapi/lazylights/blob/536dbd3ce75c28b3545cf66f25fc72589488063f/lazylights.py#L502-L512
def set_light_state_raw(self, hue, saturation, brightness, kelvin, bulb=ALL_BULBS, timeout=None): """ Sets the (low-level) light state of one or more bulbs. """ with _blocking(self.lock, self.light_state, self.light_state_event, timeout): self.send(REQ_SET_LIGHT_STATE, bulb, 'xHHHHI', hue, saturation, brightness, kelvin, 0) self.send(REQ_GET_LIGHT_STATE, ALL_BULBS, '') return self.light_state
[ "def", "set_light_state_raw", "(", "self", ",", "hue", ",", "saturation", ",", "brightness", ",", "kelvin", ",", "bulb", "=", "ALL_BULBS", ",", "timeout", "=", "None", ")", ":", "with", "_blocking", "(", "self", ".", "lock", ",", "self", ".", "light_state", ",", "self", ".", "light_state_event", ",", "timeout", ")", ":", "self", ".", "send", "(", "REQ_SET_LIGHT_STATE", ",", "bulb", ",", "'xHHHHI'", ",", "hue", ",", "saturation", ",", "brightness", ",", "kelvin", ",", "0", ")", "self", ".", "send", "(", "REQ_GET_LIGHT_STATE", ",", "ALL_BULBS", ",", "''", ")", "return", "self", ".", "light_state" ]
Sets the (low-level) light state of one or more bulbs.
[ "Sets", "the", "(", "low", "-", "level", ")", "light", "state", "of", "one", "or", "more", "bulbs", "." ]
python
train
47.454545
DistrictDataLabs/yellowbrick
yellowbrick/regressor/residuals.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/regressor/residuals.py#L136-L161
def score(self, X, y=None, **kwargs): """ The score function is the hook for visual interaction. Pass in test data and the visualizer will create predictions on the data and evaluate them with respect to the test values. The evaluation will then be passed to draw() and the result of the estimator score will be returned. Parameters ---------- X : array-like X (also X_test) are the dependent variables of test set to predict y : array-like y (also y_test) is the independent actual variables to score against Returns ------- score : float """ self.score_ = self.estimator.score(X, y, **kwargs) y_pred = self.predict(X) self.draw(y, y_pred) return self.score_
[ "def", "score", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "score_", "=", "self", ".", "estimator", ".", "score", "(", "X", ",", "y", ",", "*", "*", "kwargs", ")", "y_pred", "=", "self", ".", "predict", "(", "X", ")", "self", ".", "draw", "(", "y", ",", "y_pred", ")", "return", "self", ".", "score_" ]
The score function is the hook for visual interaction. Pass in test data and the visualizer will create predictions on the data and evaluate them with respect to the test values. The evaluation will then be passed to draw() and the result of the estimator score will be returned. Parameters ---------- X : array-like X (also X_test) are the dependent variables of test set to predict y : array-like y (also y_test) is the independent actual variables to score against Returns ------- score : float
[ "The", "score", "function", "is", "the", "hook", "for", "visual", "interaction", ".", "Pass", "in", "test", "data", "and", "the", "visualizer", "will", "create", "predictions", "on", "the", "data", "and", "evaluate", "them", "with", "respect", "to", "the", "test", "values", ".", "The", "evaluation", "will", "then", "be", "passed", "to", "draw", "()", "and", "the", "result", "of", "the", "estimator", "score", "will", "be", "returned", "." ]
python
train
31.076923
TeamHG-Memex/eli5
eli5/lime/textutils.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/lime/textutils.py#L112-L144
def replace_random_tokens_bow(self, n_samples, # type: int replacement='', # type: str random_state=None, min_replace=1, # type: Union[int, float] max_replace=1.0, # type: Union[int, float] ): # type: (...) -> List[Tuple[str, int, np.ndarray]] """ Return a list of ``(text, replaced_words_count, mask)`` tuples with n_samples versions of text with some words replaced. If a word is replaced, all duplicate words are also replaced from the text. By default words are replaced with '', i.e. removed. """ if not self.vocab: nomask = np.array([], dtype=int) return [('', 0, nomask)] * n_samples min_replace, max_replace = self._get_min_max(min_replace, max_replace, len(self.vocab)) rng = check_random_state(random_state) replace_sizes = rng.randint(low=min_replace, high=max_replace + 1, size=n_samples) res = [] for num_to_replace in replace_sizes: tokens_to_replace = set(rng.choice(self.vocab, num_to_replace, replace=False)) idx_to_replace = [idx for idx, token in enumerate(self.tokens) if token in tokens_to_replace] mask = indices_to_bool_mask(idx_to_replace, len(self.tokens)) s = self.split.masked(idx_to_replace, replacement) res.append((s.text, num_to_replace, mask)) return res
[ "def", "replace_random_tokens_bow", "(", "self", ",", "n_samples", ",", "# type: int", "replacement", "=", "''", ",", "# type: str", "random_state", "=", "None", ",", "min_replace", "=", "1", ",", "# type: Union[int, float]", "max_replace", "=", "1.0", ",", "# type: Union[int, float]", ")", ":", "# type: (...) -> List[Tuple[str, int, np.ndarray]]", "if", "not", "self", ".", "vocab", ":", "nomask", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "int", ")", "return", "[", "(", "''", ",", "0", ",", "nomask", ")", "]", "*", "n_samples", "min_replace", ",", "max_replace", "=", "self", ".", "_get_min_max", "(", "min_replace", ",", "max_replace", ",", "len", "(", "self", ".", "vocab", ")", ")", "rng", "=", "check_random_state", "(", "random_state", ")", "replace_sizes", "=", "rng", ".", "randint", "(", "low", "=", "min_replace", ",", "high", "=", "max_replace", "+", "1", ",", "size", "=", "n_samples", ")", "res", "=", "[", "]", "for", "num_to_replace", "in", "replace_sizes", ":", "tokens_to_replace", "=", "set", "(", "rng", ".", "choice", "(", "self", ".", "vocab", ",", "num_to_replace", ",", "replace", "=", "False", ")", ")", "idx_to_replace", "=", "[", "idx", "for", "idx", ",", "token", "in", "enumerate", "(", "self", ".", "tokens", ")", "if", "token", "in", "tokens_to_replace", "]", "mask", "=", "indices_to_bool_mask", "(", "idx_to_replace", ",", "len", "(", "self", ".", "tokens", ")", ")", "s", "=", "self", ".", "split", ".", "masked", "(", "idx_to_replace", ",", "replacement", ")", "res", ".", "append", "(", "(", "s", ".", "text", ",", "num_to_replace", ",", "mask", ")", ")", "return", "res" ]
Return a list of ``(text, replaced_words_count, mask)`` tuples with n_samples versions of text with some words replaced. If a word is replaced, all duplicate words are also replaced from the text. By default words are replaced with '', i.e. removed.
[ "Return", "a", "list", "of", "(", "text", "replaced_words_count", "mask", ")", "tuples", "with", "n_samples", "versions", "of", "text", "with", "some", "words", "replaced", ".", "If", "a", "word", "is", "replaced", "all", "duplicate", "words", "are", "also", "replaced", "from", "the", "text", ".", "By", "default", "words", "are", "replaced", "with", "i", ".", "e", ".", "removed", "." ]
python
train
52.242424
gwww/elkm1
elkm1_lib/proto.py
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/proto.py#L82-L108
def write_data(self, data, response_required=None, timeout=5.0, raw=False): """Write data on the asyncio Protocol""" if self._transport is None: return if self._paused: return if self._waiting_for_response: LOG.debug("queueing write %s", data) self._queued_writes.append((data, response_required, timeout)) return if response_required: self._waiting_for_response = response_required if timeout > 0: self._timeout_task = self.loop.call_later( timeout, self._response_required_timeout) if not raw: cksum = 256 - reduce(lambda x, y: x+y, map(ord, data)) % 256 data = data + '{:02X}'.format(cksum) if int(data[0:2], 16) != len(data)-2: LOG.debug("message length wrong: %s", data) LOG.debug("write_data '%s'", data) self._transport.write((data + '\r\n').encode())
[ "def", "write_data", "(", "self", ",", "data", ",", "response_required", "=", "None", ",", "timeout", "=", "5.0", ",", "raw", "=", "False", ")", ":", "if", "self", ".", "_transport", "is", "None", ":", "return", "if", "self", ".", "_paused", ":", "return", "if", "self", ".", "_waiting_for_response", ":", "LOG", ".", "debug", "(", "\"queueing write %s\"", ",", "data", ")", "self", ".", "_queued_writes", ".", "append", "(", "(", "data", ",", "response_required", ",", "timeout", ")", ")", "return", "if", "response_required", ":", "self", ".", "_waiting_for_response", "=", "response_required", "if", "timeout", ">", "0", ":", "self", ".", "_timeout_task", "=", "self", ".", "loop", ".", "call_later", "(", "timeout", ",", "self", ".", "_response_required_timeout", ")", "if", "not", "raw", ":", "cksum", "=", "256", "-", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "map", "(", "ord", ",", "data", ")", ")", "%", "256", "data", "=", "data", "+", "'{:02X}'", ".", "format", "(", "cksum", ")", "if", "int", "(", "data", "[", "0", ":", "2", "]", ",", "16", ")", "!=", "len", "(", "data", ")", "-", "2", ":", "LOG", ".", "debug", "(", "\"message length wrong: %s\"", ",", "data", ")", "LOG", ".", "debug", "(", "\"write_data '%s'\"", ",", "data", ")", "self", ".", "_transport", ".", "write", "(", "(", "data", "+", "'\\r\\n'", ")", ".", "encode", "(", ")", ")" ]
Write data on the asyncio Protocol
[ "Write", "data", "on", "the", "asyncio", "Protocol" ]
python
train
36.037037
PyProphet/pyprophet
pyprophet/stats.py
https://github.com/PyProphet/pyprophet/blob/f546ad171750cd7685afbde6785fe71f82cadb35/pyprophet/stats.py#L133-L159
def pemp(stat, stat0): """ Computes empirical values identically to bioconductor/qvalue empPvals """ assert len(stat0) > 0 assert len(stat) > 0 stat = np.array(stat) stat0 = np.array(stat0) m = len(stat) m0 = len(stat0) statc = np.concatenate((stat, stat0)) v = np.array([True] * m + [False] * m0) perm = np.argsort(-statc, kind="mergesort") # reversed sort, mergesort is stable v = v[perm] u = np.where(v)[0] p = (u - np.arange(m)) / float(m0) # ranks can be fractional, we round down to the next integer, ranking returns values starting # with 1, not 0: ranks = np.floor(scipy.stats.rankdata(-stat)).astype(int) - 1 p = p[ranks] p[p <= 1.0 / m0] = 1.0 / m0 return p
[ "def", "pemp", "(", "stat", ",", "stat0", ")", ":", "assert", "len", "(", "stat0", ")", ">", "0", "assert", "len", "(", "stat", ")", ">", "0", "stat", "=", "np", ".", "array", "(", "stat", ")", "stat0", "=", "np", ".", "array", "(", "stat0", ")", "m", "=", "len", "(", "stat", ")", "m0", "=", "len", "(", "stat0", ")", "statc", "=", "np", ".", "concatenate", "(", "(", "stat", ",", "stat0", ")", ")", "v", "=", "np", ".", "array", "(", "[", "True", "]", "*", "m", "+", "[", "False", "]", "*", "m0", ")", "perm", "=", "np", ".", "argsort", "(", "-", "statc", ",", "kind", "=", "\"mergesort\"", ")", "# reversed sort, mergesort is stable", "v", "=", "v", "[", "perm", "]", "u", "=", "np", ".", "where", "(", "v", ")", "[", "0", "]", "p", "=", "(", "u", "-", "np", ".", "arange", "(", "m", ")", ")", "/", "float", "(", "m0", ")", "# ranks can be fractional, we round down to the next integer, ranking returns values starting", "# with 1, not 0:", "ranks", "=", "np", ".", "floor", "(", "scipy", ".", "stats", ".", "rankdata", "(", "-", "stat", ")", ")", ".", "astype", "(", "int", ")", "-", "1", "p", "=", "p", "[", "ranks", "]", "p", "[", "p", "<=", "1.0", "/", "m0", "]", "=", "1.0", "/", "m0", "return", "p" ]
Computes empirical values identically to bioconductor/qvalue empPvals
[ "Computes", "empirical", "values", "identically", "to", "bioconductor", "/", "qvalue", "empPvals" ]
python
test
26.851852
tarmstrong/nbdiff
nbdiff/notebook_diff.py
https://github.com/tarmstrong/nbdiff/blob/3fdfb89f94fc0f4821bc04999ddf53b34d882ab9/nbdiff/notebook_diff.py#L61-L74
def diff_result_to_cell(item): '''diff.diff returns a dictionary with all the information we need, but we want to extract the cell and change its metadata.''' state = item['state'] if state == 'modified': new_cell = item['modifiedvalue'].data old_cell = item['originalvalue'].data new_cell['metadata']['state'] = state new_cell['metadata']['original'] = old_cell cell = new_cell else: cell = item['value'].data cell['metadata']['state'] = state return cell
[ "def", "diff_result_to_cell", "(", "item", ")", ":", "state", "=", "item", "[", "'state'", "]", "if", "state", "==", "'modified'", ":", "new_cell", "=", "item", "[", "'modifiedvalue'", "]", ".", "data", "old_cell", "=", "item", "[", "'originalvalue'", "]", ".", "data", "new_cell", "[", "'metadata'", "]", "[", "'state'", "]", "=", "state", "new_cell", "[", "'metadata'", "]", "[", "'original'", "]", "=", "old_cell", "cell", "=", "new_cell", "else", ":", "cell", "=", "item", "[", "'value'", "]", ".", "data", "cell", "[", "'metadata'", "]", "[", "'state'", "]", "=", "state", "return", "cell" ]
diff.diff returns a dictionary with all the information we need, but we want to extract the cell and change its metadata.
[ "diff", ".", "diff", "returns", "a", "dictionary", "with", "all", "the", "information", "we", "need", "but", "we", "want", "to", "extract", "the", "cell", "and", "change", "its", "metadata", "." ]
python
train
37.357143
gwastro/pycbc-glue
pycbc_glue/ligolw/utils/ligolw_add.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/ligolw_add.py#L184-L207
def ligolw_add(xmldoc, urls, non_lsc_tables_ok = False, verbose = False, contenthandler = DefaultContentHandler): """ An implementation of the LIGO LW add algorithm. urls is a list of URLs (or filenames) to load, xmldoc is the XML document tree to which they should be added. """ # Input for n, url in enumerate(urls): if verbose: print >>sys.stderr, "%d/%d:" % (n + 1, len(urls)), utils.load_url(url, verbose = verbose, xmldoc = xmldoc, contenthandler = contenthandler) # ID reassignment if not non_lsc_tables_ok and lsctables.HasNonLSCTables(xmldoc): raise ValueError("non-LSC tables found. Use --non-lsc-tables-ok to force") reassign_ids(xmldoc, verbose = verbose) # Document merge if verbose: print >>sys.stderr, "merging elements ..." merge_ligolws(xmldoc) merge_compatible_tables(xmldoc) return xmldoc
[ "def", "ligolw_add", "(", "xmldoc", ",", "urls", ",", "non_lsc_tables_ok", "=", "False", ",", "verbose", "=", "False", ",", "contenthandler", "=", "DefaultContentHandler", ")", ":", "# Input", "for", "n", ",", "url", "in", "enumerate", "(", "urls", ")", ":", "if", "verbose", ":", "print", ">>", "sys", ".", "stderr", ",", "\"%d/%d:\"", "%", "(", "n", "+", "1", ",", "len", "(", "urls", ")", ")", ",", "utils", ".", "load_url", "(", "url", ",", "verbose", "=", "verbose", ",", "xmldoc", "=", "xmldoc", ",", "contenthandler", "=", "contenthandler", ")", "# ID reassignment", "if", "not", "non_lsc_tables_ok", "and", "lsctables", ".", "HasNonLSCTables", "(", "xmldoc", ")", ":", "raise", "ValueError", "(", "\"non-LSC tables found. Use --non-lsc-tables-ok to force\"", ")", "reassign_ids", "(", "xmldoc", ",", "verbose", "=", "verbose", ")", "# Document merge", "if", "verbose", ":", "print", ">>", "sys", ".", "stderr", ",", "\"merging elements ...\"", "merge_ligolws", "(", "xmldoc", ")", "merge_compatible_tables", "(", "xmldoc", ")", "return", "xmldoc" ]
An implementation of the LIGO LW add algorithm. urls is a list of URLs (or filenames) to load, xmldoc is the XML document tree to which they should be added.
[ "An", "implementation", "of", "the", "LIGO", "LW", "add", "algorithm", ".", "urls", "is", "a", "list", "of", "URLs", "(", "or", "filenames", ")", "to", "load", "xmldoc", "is", "the", "XML", "document", "tree", "to", "which", "they", "should", "be", "added", "." ]
python
train
33.958333
saltstack/salt
salt/states/boto_datapipeline.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_datapipeline.py#L72-L251
def present(name, pipeline_objects=None, pipeline_objects_from_pillars='boto_datapipeline_pipeline_objects', parameter_objects=None, parameter_objects_from_pillars='boto_datapipeline_parameter_objects', parameter_values=None, parameter_values_from_pillars='boto_datapipeline_parameter_values', region=None, key=None, keyid=None, profile=None): ''' Ensure the data pipeline exists with matching definition. name Name of the service to ensure a data pipeline exists for. pipeline_objects Pipeline objects to use. Will override objects read from pillars. pipeline_objects_from_pillars The pillar key to use for lookup. parameter_objects Parameter objects to use. Will override objects read from pillars. parameter_objects_from_pillars The pillar key to use for lookup. parameter_values Parameter values to use. Will override values read from pillars. parameter_values_from_pillars The pillar key to use for lookup. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} pipeline_objects = pipeline_objects or {} parameter_objects = parameter_objects or {} parameter_values = parameter_values or {} present, old_pipeline_definition = _pipeline_present_with_definition( name, _pipeline_objects(pipeline_objects_from_pillars, pipeline_objects), _parameter_objects(parameter_objects_from_pillars, parameter_objects), _parameter_values(parameter_values_from_pillars, parameter_values), region=region, key=key, keyid=keyid, profile=profile, ) if present: ret['comment'] = 'AWS data pipeline {0} present'.format(name) return ret if __opts__['test']: ret['comment'] = 'Data pipeline {0} is set to be created or updated'.format(name) ret['result'] = None return ret result_create_pipeline = __salt__['boto_datapipeline.create_pipeline']( name, name, region=region, key=key, keyid=keyid, profile=profile, ) if 'error' in result_create_pipeline: ret['result'] = False ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format( name, result_create_pipeline['error']) return ret pipeline_id = result_create_pipeline['result'] result_pipeline_definition = __salt__['boto_datapipeline.put_pipeline_definition']( pipeline_id, _pipeline_objects(pipeline_objects_from_pillars, pipeline_objects), parameter_objects=_parameter_objects(parameter_objects_from_pillars, parameter_objects), parameter_values=_parameter_values(parameter_values_from_pillars, parameter_values), region=region, key=key, keyid=keyid, profile=profile, ) if 'error' in result_pipeline_definition: if _immutable_fields_error(result_pipeline_definition): # If update not possible, delete and retry result_delete_pipeline = __salt__['boto_datapipeline.delete_pipeline']( pipeline_id, region=region, key=key, keyid=keyid, profile=profile, ) if 'error' in result_delete_pipeline: ret['result'] = False ret['comment'] = 'Failed to delete data pipeline {0}: {1}'.format( pipeline_id, result_delete_pipeline['error']) return ret result_create_pipeline = __salt__['boto_datapipeline.create_pipeline']( name, name, region=region, key=key, keyid=keyid, profile=profile, ) if 'error' in result_create_pipeline: ret['result'] = False ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format( name, result_create_pipeline['error']) return ret pipeline_id = result_create_pipeline['result'] result_pipeline_definition = __salt__['boto_datapipeline.put_pipeline_definition']( pipeline_id, _pipeline_objects(pipeline_objects_from_pillars, pipeline_objects), parameter_objects=_parameter_objects(parameter_objects_from_pillars, parameter_objects), parameter_values=_parameter_values(parameter_values_from_pillars, parameter_values), region=region, key=key, keyid=keyid, profile=profile, ) if 'error' in result_pipeline_definition: # Still erroring after possible retry ret['result'] = False ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format( name, result_pipeline_definition['error']) return ret result_activate_pipeline = __salt__['boto_datapipeline.activate_pipeline']( pipeline_id, region=region, key=key, keyid=keyid, profile=profile, ) if 'error' in result_activate_pipeline: ret['result'] = False ret['comment'] = 'Failed to create data pipeline {0}: {1}'.format( name, result_pipeline_definition['error']) return ret pipeline_definition_result = __salt__['boto_datapipeline.get_pipeline_definition']( pipeline_id, version='active', region=region, key=key, keyid=keyid, profile=profile, ) if 'error' in pipeline_definition_result: new_pipeline_definition = {} else: new_pipeline_definition = _standardize(pipeline_definition_result['result']) if not old_pipeline_definition: ret['changes']['new'] = 'Pipeline created.' ret['comment'] = 'Data pipeline {0} created'.format(name) else: ret['changes']['diff'] = _diff(old_pipeline_definition, new_pipeline_definition) ret['comment'] = 'Data pipeline {0} updated'.format(name) return ret
[ "def", "present", "(", "name", ",", "pipeline_objects", "=", "None", ",", "pipeline_objects_from_pillars", "=", "'boto_datapipeline_pipeline_objects'", ",", "parameter_objects", "=", "None", ",", "parameter_objects_from_pillars", "=", "'boto_datapipeline_parameter_objects'", ",", "parameter_values", "=", "None", ",", "parameter_values_from_pillars", "=", "'boto_datapipeline_parameter_values'", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "pipeline_objects", "=", "pipeline_objects", "or", "{", "}", "parameter_objects", "=", "parameter_objects", "or", "{", "}", "parameter_values", "=", "parameter_values", "or", "{", "}", "present", ",", "old_pipeline_definition", "=", "_pipeline_present_with_definition", "(", "name", ",", "_pipeline_objects", "(", "pipeline_objects_from_pillars", ",", "pipeline_objects", ")", ",", "_parameter_objects", "(", "parameter_objects_from_pillars", ",", "parameter_objects", ")", ",", "_parameter_values", "(", "parameter_values_from_pillars", ",", "parameter_values", ")", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", ")", "if", "present", ":", "ret", "[", "'comment'", "]", "=", "'AWS data pipeline {0} present'", ".", "format", "(", "name", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Data pipeline {0} is set to be created or updated'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "result_create_pipeline", "=", "__salt__", "[", "'boto_datapipeline.create_pipeline'", "]", "(", "name", ",", "name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", ")", "if", "'error'", "in", "result_create_pipeline", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to create data pipeline {0}: {1}'", ".", "format", "(", "name", ",", "result_create_pipeline", "[", "'error'", "]", ")", "return", "ret", "pipeline_id", "=", "result_create_pipeline", "[", "'result'", "]", "result_pipeline_definition", "=", "__salt__", "[", "'boto_datapipeline.put_pipeline_definition'", "]", "(", "pipeline_id", ",", "_pipeline_objects", "(", "pipeline_objects_from_pillars", ",", "pipeline_objects", ")", ",", "parameter_objects", "=", "_parameter_objects", "(", "parameter_objects_from_pillars", ",", "parameter_objects", ")", ",", "parameter_values", "=", "_parameter_values", "(", "parameter_values_from_pillars", ",", "parameter_values", ")", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", ")", "if", "'error'", "in", "result_pipeline_definition", ":", "if", "_immutable_fields_error", "(", "result_pipeline_definition", ")", ":", "# If update not possible, delete and retry", "result_delete_pipeline", "=", "__salt__", "[", "'boto_datapipeline.delete_pipeline'", "]", "(", "pipeline_id", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", ")", "if", "'error'", "in", "result_delete_pipeline", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to delete data pipeline {0}: {1}'", ".", "format", "(", "pipeline_id", ",", "result_delete_pipeline", "[", "'error'", "]", ")", "return", "ret", "result_create_pipeline", "=", "__salt__", "[", "'boto_datapipeline.create_pipeline'", "]", "(", "name", ",", "name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", ")", "if", "'error'", "in", "result_create_pipeline", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to create data pipeline {0}: {1}'", ".", "format", "(", "name", ",", "result_create_pipeline", "[", "'error'", "]", ")", "return", "ret", "pipeline_id", "=", "result_create_pipeline", "[", "'result'", "]", "result_pipeline_definition", "=", "__salt__", "[", "'boto_datapipeline.put_pipeline_definition'", "]", "(", "pipeline_id", ",", "_pipeline_objects", "(", "pipeline_objects_from_pillars", ",", "pipeline_objects", ")", ",", "parameter_objects", "=", "_parameter_objects", "(", "parameter_objects_from_pillars", ",", "parameter_objects", ")", ",", "parameter_values", "=", "_parameter_values", "(", "parameter_values_from_pillars", ",", "parameter_values", ")", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", ")", "if", "'error'", "in", "result_pipeline_definition", ":", "# Still erroring after possible retry", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to create data pipeline {0}: {1}'", ".", "format", "(", "name", ",", "result_pipeline_definition", "[", "'error'", "]", ")", "return", "ret", "result_activate_pipeline", "=", "__salt__", "[", "'boto_datapipeline.activate_pipeline'", "]", "(", "pipeline_id", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", ")", "if", "'error'", "in", "result_activate_pipeline", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to create data pipeline {0}: {1}'", ".", "format", "(", "name", ",", "result_pipeline_definition", "[", "'error'", "]", ")", "return", "ret", "pipeline_definition_result", "=", "__salt__", "[", "'boto_datapipeline.get_pipeline_definition'", "]", "(", "pipeline_id", ",", "version", "=", "'active'", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", ")", "if", "'error'", "in", "pipeline_definition_result", ":", "new_pipeline_definition", "=", "{", "}", "else", ":", "new_pipeline_definition", "=", "_standardize", "(", "pipeline_definition_result", "[", "'result'", "]", ")", "if", "not", "old_pipeline_definition", ":", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "'Pipeline created.'", "ret", "[", "'comment'", "]", "=", "'Data pipeline {0} created'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'changes'", "]", "[", "'diff'", "]", "=", "_diff", "(", "old_pipeline_definition", ",", "new_pipeline_definition", ")", "ret", "[", "'comment'", "]", "=", "'Data pipeline {0} updated'", ".", "format", "(", "name", ")", "return", "ret" ]
Ensure the data pipeline exists with matching definition. name Name of the service to ensure a data pipeline exists for. pipeline_objects Pipeline objects to use. Will override objects read from pillars. pipeline_objects_from_pillars The pillar key to use for lookup. parameter_objects Parameter objects to use. Will override objects read from pillars. parameter_objects_from_pillars The pillar key to use for lookup. parameter_values Parameter values to use. Will override values read from pillars. parameter_values_from_pillars The pillar key to use for lookup. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
[ "Ensure", "the", "data", "pipeline", "exists", "with", "matching", "definition", "." ]
python
train
34.816667
wummel/linkchecker
linkcheck/parser/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/parser/__init__.py#L101-L106
def parse_swf (url_data): """Parse a SWF file for URLs.""" linkfinder = linkparse.swf_url_re.finditer for mo in linkfinder(url_data.get_content()): url = mo.group() url_data.add_url(url)
[ "def", "parse_swf", "(", "url_data", ")", ":", "linkfinder", "=", "linkparse", ".", "swf_url_re", ".", "finditer", "for", "mo", "in", "linkfinder", "(", "url_data", ".", "get_content", "(", ")", ")", ":", "url", "=", "mo", ".", "group", "(", ")", "url_data", ".", "add_url", "(", "url", ")" ]
Parse a SWF file for URLs.
[ "Parse", "a", "SWF", "file", "for", "URLs", "." ]
python
train
34.833333
saltstack/salt
salt/cache/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cache/__init__.py#L158-L180
def fetch(self, bank, key): ''' Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.fetch'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
[ "def", "fetch", "(", "self", ",", "bank", ",", "key", ")", ":", "fun", "=", "'{0}.fetch'", ".", "format", "(", "self", ".", "driver", ")", "return", "self", ".", "modules", "[", "fun", "]", "(", "bank", ",", "key", ",", "*", "*", "self", ".", "_kwargs", ")" ]
Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).
[ "Fetch", "data", "using", "the", "specified", "module" ]
python
train
36.869565
cloudbase/python-hnvclient
hnv/client.py
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L1862-L1902
def process_raw_data(cls, raw_data): """Create a new model using raw API response.""" properties = raw_data.get("properties", {}) raw_content = properties.get("ipSecConfiguration", None) if raw_content is not None: ip_sec = IPSecConfiguration.from_raw_data(raw_content) properties["ipSecConfiguration"] = ip_sec ip_addresses = [] for raw_content in properties.get("ipAddresses", []): ip_addresses.append(IPAddress.from_raw_data(raw_content)) properties["ipAddresses"] = ip_addresses routes = [] for raw_content in properties.get("routes", []): routes.append(NetworkInterfaceRoute.from_raw_data(raw_content)) properties["routes"] = routes raw_content = properties.get("statistics", None) if raw_content is not None: statistics = NetworkInterfaceStatistics.from_raw_data( raw_content) properties["statistics"] = statistics raw_content = properties.get("greConfiguration", None) if raw_content is not None: gre_configuration = GREConfiguration.from_raw_data(raw_content) properties["greConfiguration"] = gre_configuration raw_content = properties.get("l3Configuration", None) if raw_content is not None: l3_configuration = L3Configuration.from_raw_data(raw_content) properties["l3Configuration"] = l3_configuration raw_content = properties.get("gateway", None) if raw_content is not None: gateway = Resource.from_raw_data(raw_content) properties["gateway"] = gateway return super(NetworkConnections, cls).process_raw_data(raw_data)
[ "def", "process_raw_data", "(", "cls", ",", "raw_data", ")", ":", "properties", "=", "raw_data", ".", "get", "(", "\"properties\"", ",", "{", "}", ")", "raw_content", "=", "properties", ".", "get", "(", "\"ipSecConfiguration\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "ip_sec", "=", "IPSecConfiguration", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"ipSecConfiguration\"", "]", "=", "ip_sec", "ip_addresses", "=", "[", "]", "for", "raw_content", "in", "properties", ".", "get", "(", "\"ipAddresses\"", ",", "[", "]", ")", ":", "ip_addresses", ".", "append", "(", "IPAddress", ".", "from_raw_data", "(", "raw_content", ")", ")", "properties", "[", "\"ipAddresses\"", "]", "=", "ip_addresses", "routes", "=", "[", "]", "for", "raw_content", "in", "properties", ".", "get", "(", "\"routes\"", ",", "[", "]", ")", ":", "routes", ".", "append", "(", "NetworkInterfaceRoute", ".", "from_raw_data", "(", "raw_content", ")", ")", "properties", "[", "\"routes\"", "]", "=", "routes", "raw_content", "=", "properties", ".", "get", "(", "\"statistics\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "statistics", "=", "NetworkInterfaceStatistics", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"statistics\"", "]", "=", "statistics", "raw_content", "=", "properties", ".", "get", "(", "\"greConfiguration\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "gre_configuration", "=", "GREConfiguration", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"greConfiguration\"", "]", "=", "gre_configuration", "raw_content", "=", "properties", ".", "get", "(", "\"l3Configuration\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "l3_configuration", "=", "L3Configuration", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"l3Configuration\"", "]", "=", "l3_configuration", "raw_content", "=", "properties", ".", "get", "(", "\"gateway\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "gateway", "=", "Resource", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"gateway\"", "]", "=", "gateway", "return", "super", "(", "NetworkConnections", ",", "cls", ")", ".", "process_raw_data", "(", "raw_data", ")" ]
Create a new model using raw API response.
[ "Create", "a", "new", "model", "using", "raw", "API", "response", "." ]
python
train
41.658537
oceanprotocol/squid-py
squid_py/agreements/storage.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/storage.py#L8-L39
def record_service_agreement(storage_path, service_agreement_id, did, service_definition_id, price, files, start_time, status='pending'): """ Records the given pending service agreement. :param storage_path: storage path for the internal db, str :param service_agreement_id: :param did: DID, str :param service_definition_id: identifier of the service inside the asset DDO, str :param price: Asset price, int :param files: :param start_time: :param status: :return: """ conn = sqlite3.connect(storage_path) try: cursor = conn.cursor() cursor.execute( '''CREATE TABLE IF NOT EXISTS service_agreements (id VARCHAR PRIMARY KEY, did VARCHAR, service_definition_id INTEGER, price INTEGER, files VARCHAR, start_time INTEGER, status VARCHAR(10));''' ) cursor.execute( 'INSERT OR REPLACE INTO service_agreements VALUES (?,?,?,?,?,?,?)', [service_agreement_id, did, service_definition_id, price, files, start_time, status], ) conn.commit() finally: conn.close()
[ "def", "record_service_agreement", "(", "storage_path", ",", "service_agreement_id", ",", "did", ",", "service_definition_id", ",", "price", ",", "files", ",", "start_time", ",", "status", "=", "'pending'", ")", ":", "conn", "=", "sqlite3", ".", "connect", "(", "storage_path", ")", "try", ":", "cursor", "=", "conn", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'''CREATE TABLE IF NOT EXISTS service_agreements\n (id VARCHAR PRIMARY KEY, did VARCHAR, service_definition_id INTEGER, \n price INTEGER, files VARCHAR, start_time INTEGER, status VARCHAR(10));'''", ")", "cursor", ".", "execute", "(", "'INSERT OR REPLACE INTO service_agreements VALUES (?,?,?,?,?,?,?)'", ",", "[", "service_agreement_id", ",", "did", ",", "service_definition_id", ",", "price", ",", "files", ",", "start_time", ",", "status", "]", ",", ")", "conn", ".", "commit", "(", ")", "finally", ":", "conn", ".", "close", "(", ")" ]
Records the given pending service agreement. :param storage_path: storage path for the internal db, str :param service_agreement_id: :param did: DID, str :param service_definition_id: identifier of the service inside the asset DDO, str :param price: Asset price, int :param files: :param start_time: :param status: :return:
[ "Records", "the", "given", "pending", "service", "agreement", "." ]
python
train
36.78125
ppb/pursuedpybear
ppb/scenes.py
https://github.com/ppb/pursuedpybear/blob/db3bfaaf86d14b4d1bb9e0b24cc8dc63f29c2191/ppb/scenes.py#L57-L84
def get(self, *, kind: Type=None, tag: Hashable=None, **_) -> Iterator: """ Get an iterator of objects by kind or tag. kind: Any type. Pass to get a subset of contained items with the given type. tag: Any Hashable object. Pass to get a subset of contained items with the given tag. Pass both kind and tag to get objects that are both that type and that tag. Examples: container.get(type=MyObject) container.get(tag="red") container.get(type=MyObject, tag="red") """ if kind is None and tag is None: raise TypeError("get() takes at least one keyword-only argument. 'kind' or 'tag'.") kinds = self.all tags = self.all if kind is not None: kinds = self.kinds[kind] if tag is not None: tags = self.tags[tag] return (x for x in kinds.intersection(tags))
[ "def", "get", "(", "self", ",", "*", ",", "kind", ":", "Type", "=", "None", ",", "tag", ":", "Hashable", "=", "None", ",", "*", "*", "_", ")", "->", "Iterator", ":", "if", "kind", "is", "None", "and", "tag", "is", "None", ":", "raise", "TypeError", "(", "\"get() takes at least one keyword-only argument. 'kind' or 'tag'.\"", ")", "kinds", "=", "self", ".", "all", "tags", "=", "self", ".", "all", "if", "kind", "is", "not", "None", ":", "kinds", "=", "self", ".", "kinds", "[", "kind", "]", "if", "tag", "is", "not", "None", ":", "tags", "=", "self", ".", "tags", "[", "tag", "]", "return", "(", "x", "for", "x", "in", "kinds", ".", "intersection", "(", "tags", ")", ")" ]
Get an iterator of objects by kind or tag. kind: Any type. Pass to get a subset of contained items with the given type. tag: Any Hashable object. Pass to get a subset of contained items with the given tag. Pass both kind and tag to get objects that are both that type and that tag. Examples: container.get(type=MyObject) container.get(tag="red") container.get(type=MyObject, tag="red")
[ "Get", "an", "iterator", "of", "objects", "by", "kind", "or", "tag", "." ]
python
train
33.464286
facelessuser/backrefs
backrefs/_bregex_parse.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bregex_parse.py#L304-L341
def subgroup(self, t, i): """Handle parenthesis.""" # (?flags) flags = self.get_flags(i, self.version == _regex.V0) if flags: self.flags(flags[2:-1]) return [flags] # (?#comment) comments = self.get_comments(i) if comments: return [comments] verbose = self.verbose # (?flags:pattern) flags = self.get_flags(i, (self.version == _regex.V0), True) if flags: t = flags self.flags(flags[2:-1], scoped=True) current = [] try: while t != ')': if not current: current.append(t) else: current.extend(self.normal(t, i)) t = next(i) except StopIteration: pass self.verbose = verbose if t == ")": current.append(t) return current
[ "def", "subgroup", "(", "self", ",", "t", ",", "i", ")", ":", "# (?flags)", "flags", "=", "self", ".", "get_flags", "(", "i", ",", "self", ".", "version", "==", "_regex", ".", "V0", ")", "if", "flags", ":", "self", ".", "flags", "(", "flags", "[", "2", ":", "-", "1", "]", ")", "return", "[", "flags", "]", "# (?#comment)", "comments", "=", "self", ".", "get_comments", "(", "i", ")", "if", "comments", ":", "return", "[", "comments", "]", "verbose", "=", "self", ".", "verbose", "# (?flags:pattern)", "flags", "=", "self", ".", "get_flags", "(", "i", ",", "(", "self", ".", "version", "==", "_regex", ".", "V0", ")", ",", "True", ")", "if", "flags", ":", "t", "=", "flags", "self", ".", "flags", "(", "flags", "[", "2", ":", "-", "1", "]", ",", "scoped", "=", "True", ")", "current", "=", "[", "]", "try", ":", "while", "t", "!=", "')'", ":", "if", "not", "current", ":", "current", ".", "append", "(", "t", ")", "else", ":", "current", ".", "extend", "(", "self", ".", "normal", "(", "t", ",", "i", ")", ")", "t", "=", "next", "(", "i", ")", "except", "StopIteration", ":", "pass", "self", ".", "verbose", "=", "verbose", "if", "t", "==", "\")\"", ":", "current", ".", "append", "(", "t", ")", "return", "current" ]
Handle parenthesis.
[ "Handle", "parenthesis", "." ]
python
train
23.868421
MicroPyramid/forex-python
forex_python/bitcoin.py
https://github.com/MicroPyramid/forex-python/blob/dc34868ec7c7eb49b3b963d6daa3897b7095ba09/forex_python/bitcoin.py#L75-L97
def convert_to_btc(self, amount, currency): """ Convert X amount to Bit Coins """ if isinstance(amount, Decimal): use_decimal = True else: use_decimal = self._force_decimal url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency) response = requests.get(url) if response.status_code == 200: data = response.json() price = data.get('bpi').get(currency, {}).get('rate_float', None) if price: if use_decimal: price = Decimal(price) try: converted_btc = amount/price return converted_btc except TypeError: raise DecimalFloatMismatchError("convert_to_btc requires amount parameter is of type Decimal when force_decimal=True") raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
[ "def", "convert_to_btc", "(", "self", ",", "amount", ",", "currency", ")", ":", "if", "isinstance", "(", "amount", ",", "Decimal", ")", ":", "use_decimal", "=", "True", "else", ":", "use_decimal", "=", "self", ".", "_force_decimal", "url", "=", "'https://api.coindesk.com/v1/bpi/currentprice/{}.json'", ".", "format", "(", "currency", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "if", "response", ".", "status_code", "==", "200", ":", "data", "=", "response", ".", "json", "(", ")", "price", "=", "data", ".", "get", "(", "'bpi'", ")", ".", "get", "(", "currency", ",", "{", "}", ")", ".", "get", "(", "'rate_float'", ",", "None", ")", "if", "price", ":", "if", "use_decimal", ":", "price", "=", "Decimal", "(", "price", ")", "try", ":", "converted_btc", "=", "amount", "/", "price", "return", "converted_btc", "except", "TypeError", ":", "raise", "DecimalFloatMismatchError", "(", "\"convert_to_btc requires amount parameter is of type Decimal when force_decimal=True\"", ")", "raise", "RatesNotAvailableError", "(", "\"BitCoin Rates Source Not Ready For Given date\"", ")" ]
Convert X amount to Bit Coins
[ "Convert", "X", "amount", "to", "Bit", "Coins" ]
python
train
41.652174
i3visio/deepify
deepify/zeronet.py
https://github.com/i3visio/deepify/blob/2af04e0bea3eaabe96b0565e10f7eeb29b042a2b/deepify/zeronet.py#L52-L81
def _grabContentFromUrl(self, url): """ Function that abstracts capturing a URL. This method rewrites the one from Wrapper. :param url: The URL to be processed. :return: The response in a Json format. """ # Defining an empty object for the response info = {} # This part has to be modified... try: # Configuring the socket queryURL = "http://" + self.info["host"] + ":" + self.info["port"] + "/" + url response = urllib2.urlopen(queryURL) # Rebuilding data to be processed data = str(response.headers) + "\n" data += response.read() # Processing data as expected info = self._createDataStructure(data) # Try to make the errors clear for other users except Exception, e: errMsg = "ERROR Exception. Something seems to be wrong with the Zeronet Bundler." raise Exception( errMsg + " " + str(e)) return info
[ "def", "_grabContentFromUrl", "(", "self", ",", "url", ")", ":", "# Defining an empty object for the response", "info", "=", "{", "}", "# This part has to be modified... ", "try", ":", "# Configuring the socket", "queryURL", "=", "\"http://\"", "+", "self", ".", "info", "[", "\"host\"", "]", "+", "\":\"", "+", "self", ".", "info", "[", "\"port\"", "]", "+", "\"/\"", "+", "url", "response", "=", "urllib2", ".", "urlopen", "(", "queryURL", ")", "# Rebuilding data to be processed", "data", "=", "str", "(", "response", ".", "headers", ")", "+", "\"\\n\"", "data", "+=", "response", ".", "read", "(", ")", "# Processing data as expected", "info", "=", "self", ".", "_createDataStructure", "(", "data", ")", "# Try to make the errors clear for other users", "except", "Exception", ",", "e", ":", "errMsg", "=", "\"ERROR Exception. Something seems to be wrong with the Zeronet Bundler.\"", "raise", "Exception", "(", "errMsg", "+", "\" \"", "+", "str", "(", "e", ")", ")", "return", "info" ]
Function that abstracts capturing a URL. This method rewrites the one from Wrapper. :param url: The URL to be processed. :return: The response in a Json format.
[ "Function", "that", "abstracts", "capturing", "a", "URL", ".", "This", "method", "rewrites", "the", "one", "from", "Wrapper", ".", ":", "param", "url", ":", "The", "URL", "to", "be", "processed", ".", ":", "return", ":", "The", "response", "in", "a", "Json", "format", "." ]
python
train
36.7
instaloader/instaloader
instaloader/instaloader.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloader.py#L284-L321
def save_caption(self, filename: str, mtime: datetime, caption: str) -> None: """Updates picture caption / Post metadata info""" def _elliptify(caption): pcaption = caption.replace('\n', ' ').strip() return '[' + ((pcaption[:29] + u"\u2026") if len(pcaption) > 31 else pcaption) + ']' filename += '.txt' caption += '\n' pcaption = _elliptify(caption) caption = caption.encode("UTF-8") with suppress(FileNotFoundError): with open(filename, 'rb') as file: file_caption = file.read() if file_caption.replace(b'\r\n', b'\n') == caption.replace(b'\r\n', b'\n'): try: self.context.log(pcaption + ' unchanged', end=' ', flush=True) except UnicodeEncodeError: self.context.log('txt unchanged', end=' ', flush=True) return None else: def get_filename(index): return filename if index == 0 else '{0}_old_{2:02}{1}'.format(*os.path.splitext(filename), index) i = 0 while os.path.isfile(get_filename(i)): i = i + 1 for index in range(i, 0, -1): os.rename(get_filename(index - 1), get_filename(index)) try: self.context.log(_elliptify(file_caption.decode("UTF-8")) + ' updated', end=' ', flush=True) except UnicodeEncodeError: self.context.log('txt updated', end=' ', flush=True) try: self.context.log(pcaption, end=' ', flush=True) except UnicodeEncodeError: self.context.log('txt', end=' ', flush=True) with open(filename, 'wb') as text_file: shutil.copyfileobj(BytesIO(caption), text_file) os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
[ "def", "save_caption", "(", "self", ",", "filename", ":", "str", ",", "mtime", ":", "datetime", ",", "caption", ":", "str", ")", "->", "None", ":", "def", "_elliptify", "(", "caption", ")", ":", "pcaption", "=", "caption", ".", "replace", "(", "'\\n'", ",", "' '", ")", ".", "strip", "(", ")", "return", "'['", "+", "(", "(", "pcaption", "[", ":", "29", "]", "+", "u\"\\u2026\"", ")", "if", "len", "(", "pcaption", ")", ">", "31", "else", "pcaption", ")", "+", "']'", "filename", "+=", "'.txt'", "caption", "+=", "'\\n'", "pcaption", "=", "_elliptify", "(", "caption", ")", "caption", "=", "caption", ".", "encode", "(", "\"UTF-8\"", ")", "with", "suppress", "(", "FileNotFoundError", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "file", ":", "file_caption", "=", "file", ".", "read", "(", ")", "if", "file_caption", ".", "replace", "(", "b'\\r\\n'", ",", "b'\\n'", ")", "==", "caption", ".", "replace", "(", "b'\\r\\n'", ",", "b'\\n'", ")", ":", "try", ":", "self", ".", "context", ".", "log", "(", "pcaption", "+", "' unchanged'", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "except", "UnicodeEncodeError", ":", "self", ".", "context", ".", "log", "(", "'txt unchanged'", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "return", "None", "else", ":", "def", "get_filename", "(", "index", ")", ":", "return", "filename", "if", "index", "==", "0", "else", "'{0}_old_{2:02}{1}'", ".", "format", "(", "*", "os", ".", "path", ".", "splitext", "(", "filename", ")", ",", "index", ")", "i", "=", "0", "while", "os", ".", "path", ".", "isfile", "(", "get_filename", "(", "i", ")", ")", ":", "i", "=", "i", "+", "1", "for", "index", "in", "range", "(", "i", ",", "0", ",", "-", "1", ")", ":", "os", ".", "rename", "(", "get_filename", "(", "index", "-", "1", ")", ",", "get_filename", "(", "index", ")", ")", "try", ":", "self", ".", "context", ".", "log", "(", "_elliptify", "(", "file_caption", ".", "decode", "(", "\"UTF-8\"", ")", ")", "+", "' updated'", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "except", "UnicodeEncodeError", ":", "self", ".", "context", ".", "log", "(", "'txt updated'", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "try", ":", "self", ".", "context", ".", "log", "(", "pcaption", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "except", "UnicodeEncodeError", ":", "self", ".", "context", ".", "log", "(", "'txt'", ",", "end", "=", "' '", ",", "flush", "=", "True", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "text_file", ":", "shutil", ".", "copyfileobj", "(", "BytesIO", "(", "caption", ")", ",", "text_file", ")", "os", ".", "utime", "(", "filename", ",", "(", "datetime", ".", "now", "(", ")", ".", "timestamp", "(", ")", ",", "mtime", ".", "timestamp", "(", ")", ")", ")" ]
Updates picture caption / Post metadata info
[ "Updates", "picture", "caption", "/", "Post", "metadata", "info" ]
python
train
49.868421
centralniak/py-raildriver
raildriver/library.py
https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L67-L83
def get_controller_value(self, index_or_name, value_type): """ Returns current/min/max value of controller at given index or name. It is much more efficient to query using an integer index rather than string name. Name is fine for seldom updates but it's not advised to be used every second or so. See `get_controller_list` for an example how to cache a dictionary of {name: index} pairs. :param index_or_name integer index or string name :param value_type one of VALUE_CURRENT, VALUE_MIN, VALUE_MAX :return float """ if not isinstance(index_or_name, int): index = self.get_controller_index(index_or_name) else: index = index_or_name return self.dll.GetControllerValue(index, value_type)
[ "def", "get_controller_value", "(", "self", ",", "index_or_name", ",", "value_type", ")", ":", "if", "not", "isinstance", "(", "index_or_name", ",", "int", ")", ":", "index", "=", "self", ".", "get_controller_index", "(", "index_or_name", ")", "else", ":", "index", "=", "index_or_name", "return", "self", ".", "dll", ".", "GetControllerValue", "(", "index", ",", "value_type", ")" ]
Returns current/min/max value of controller at given index or name. It is much more efficient to query using an integer index rather than string name. Name is fine for seldom updates but it's not advised to be used every second or so. See `get_controller_list` for an example how to cache a dictionary of {name: index} pairs. :param index_or_name integer index or string name :param value_type one of VALUE_CURRENT, VALUE_MIN, VALUE_MAX :return float
[ "Returns", "current", "/", "min", "/", "max", "value", "of", "controller", "at", "given", "index", "or", "name", "." ]
python
train
46.647059
bluec0re/python-helperlib
helperlib/logging.py
https://github.com/bluec0re/python-helperlib/blob/a2ac429668a6b86d3dc5e686978965c938f07d2c/helperlib/logging.py#L73-L124
def default_config(level=logging.INFO, auto_init=True, new_formatter=False, **kwargs): """ Returns the default config dictionary and inits the logging system if requested Keyword arguments: level -- loglevel of the console handler (Default: logging.INFO) auto_init -- initialize the logging system with the provided config (Default: True) **kwargs -- additional options for the logging system """ formatters = { 'color': { '()': __name__ + '.ColorFormatter', 'format': '[%(levelname)s] %(message)s' } } if new_formatter: formatters = { 'color': { '()': __name__ + '.NewColorFormatter', 'format': '[{levelname}] {message}' } } options = { 'version': 1, 'disable_existing_loggers': False, 'formatters': formatters, 'filters': {}, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'color', 'level': level,#logging.getLevelName(level), 'stream': 'ext://sys.stderr', } }, 'loggers': { }, 'root': { 'level': 'NOTSET', 'filters': [], 'handlers': ['console'], } } options.update(kwargs) if auto_init: logging.config.dictConfig(options) return options
[ "def", "default_config", "(", "level", "=", "logging", ".", "INFO", ",", "auto_init", "=", "True", ",", "new_formatter", "=", "False", ",", "*", "*", "kwargs", ")", ":", "formatters", "=", "{", "'color'", ":", "{", "'()'", ":", "__name__", "+", "'.ColorFormatter'", ",", "'format'", ":", "'[%(levelname)s] %(message)s'", "}", "}", "if", "new_formatter", ":", "formatters", "=", "{", "'color'", ":", "{", "'()'", ":", "__name__", "+", "'.NewColorFormatter'", ",", "'format'", ":", "'[{levelname}] {message}'", "}", "}", "options", "=", "{", "'version'", ":", "1", ",", "'disable_existing_loggers'", ":", "False", ",", "'formatters'", ":", "formatters", ",", "'filters'", ":", "{", "}", ",", "'handlers'", ":", "{", "'console'", ":", "{", "'class'", ":", "'logging.StreamHandler'", ",", "'formatter'", ":", "'color'", ",", "'level'", ":", "level", ",", "#logging.getLevelName(level),", "'stream'", ":", "'ext://sys.stderr'", ",", "}", "}", ",", "'loggers'", ":", "{", "}", ",", "'root'", ":", "{", "'level'", ":", "'NOTSET'", ",", "'filters'", ":", "[", "]", ",", "'handlers'", ":", "[", "'console'", "]", ",", "}", "}", "options", ".", "update", "(", "kwargs", ")", "if", "auto_init", ":", "logging", ".", "config", ".", "dictConfig", "(", "options", ")", "return", "options" ]
Returns the default config dictionary and inits the logging system if requested Keyword arguments: level -- loglevel of the console handler (Default: logging.INFO) auto_init -- initialize the logging system with the provided config (Default: True) **kwargs -- additional options for the logging system
[ "Returns", "the", "default", "config", "dictionary", "and", "inits", "the", "logging", "system", "if", "requested", "Keyword", "arguments", ":", "level", "--", "loglevel", "of", "the", "console", "handler", "(", "Default", ":", "logging", ".", "INFO", ")", "auto_init", "--", "initialize", "the", "logging", "system", "with", "the", "provided", "config", "(", "Default", ":", "True", ")", "**", "kwargs", "--", "additional", "options", "for", "the", "logging", "system" ]
python
train
27.596154
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxfile_functions.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L621-L708
def download_folder(project, destdir, folder="/", overwrite=False, chunksize=dxfile.DEFAULT_BUFFER_SIZE, show_progress=False, **kwargs): ''' :param project: Project ID to use as context for this download. :type project: string :param destdir: Local destination location :type destdir: string :param folder: Path to the remote folder to download :type folder: string :param overwrite: Overwrite existing files :type overwrite: boolean Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*. Example:: download_folder("project-xxxx", "/home/jsmith/input", folder="/input") ''' def ensure_local_dir(d): if not os.path.isdir(d): if os.path.exists(d): raise DXFileError("Destination location '{}' already exists and is not a directory".format(d)) logger.debug("Creating destination directory: '%s'", d) os.makedirs(d) def compose_local_dir(d, remote_folder, remote_subfolder): suffix = remote_subfolder[1:] if remote_folder == "/" else remote_subfolder[len(remote_folder) + 1:] if os.sep != '/': suffix = suffix.replace('/', os.sep) return os.path.join(d, suffix) if suffix != "" else d normalized_folder = folder.strip() if normalized_folder != "/" and normalized_folder.endswith("/"): normalized_folder = normalized_folder[:-1] if normalized_folder == "": raise DXFileError("Invalid remote folder name: '{}'".format(folder)) normalized_dest_dir = os.path.normpath(destdir).strip() if normalized_dest_dir == "": raise DXFileError("Invalid destination directory name: '{}'".format(destdir)) # Creating target directory tree remote_folders = list(list_subfolders(project, normalized_folder, recurse=True)) if len(remote_folders) <= 0: raise DXFileError("Remote folder '{}' not found".format(normalized_folder)) remote_folders.sort() for remote_subfolder in remote_folders: ensure_local_dir(compose_local_dir(normalized_dest_dir, normalized_folder, remote_subfolder)) # Downloading files describe_input = dict(fields=dict(folder=True, name=True, id=True, parts=True, size=True, drive=True, md5=True)) # A generator that returns the files one by one. We don't want to materialize it, because # there could be many files here. files_gen = dxpy.search.find_data_objects(classname='file', state='closed', project=project, folder=normalized_folder, recurse=True, describe=describe_input) if files_gen is None: # In python 3, the generator can be None, and iterating on it # will cause an error. return # Now it is safe, in both python 2 and 3, to iterate on the generator for remote_file in files_gen: local_filename = os.path.join(compose_local_dir(normalized_dest_dir, normalized_folder, remote_file['describe']['folder']), remote_file['describe']['name']) if os.path.exists(local_filename) and not overwrite: raise DXFileError( "Destination file '{}' already exists but no overwrite option is provided".format(local_filename) ) logger.debug("Downloading '%s/%s' remote file to '%s' location", ("" if remote_file['describe']['folder'] == "/" else remote_file['describe']['folder']), remote_file['describe']['name'], local_filename) download_dxfile(remote_file['describe']['id'], local_filename, chunksize=chunksize, project=project, show_progress=show_progress, describe_output=remote_file['describe'], **kwargs)
[ "def", "download_folder", "(", "project", ",", "destdir", ",", "folder", "=", "\"/\"", ",", "overwrite", "=", "False", ",", "chunksize", "=", "dxfile", ".", "DEFAULT_BUFFER_SIZE", ",", "show_progress", "=", "False", ",", "*", "*", "kwargs", ")", ":", "def", "ensure_local_dir", "(", "d", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "d", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "raise", "DXFileError", "(", "\"Destination location '{}' already exists and is not a directory\"", ".", "format", "(", "d", ")", ")", "logger", ".", "debug", "(", "\"Creating destination directory: '%s'\"", ",", "d", ")", "os", ".", "makedirs", "(", "d", ")", "def", "compose_local_dir", "(", "d", ",", "remote_folder", ",", "remote_subfolder", ")", ":", "suffix", "=", "remote_subfolder", "[", "1", ":", "]", "if", "remote_folder", "==", "\"/\"", "else", "remote_subfolder", "[", "len", "(", "remote_folder", ")", "+", "1", ":", "]", "if", "os", ".", "sep", "!=", "'/'", ":", "suffix", "=", "suffix", ".", "replace", "(", "'/'", ",", "os", ".", "sep", ")", "return", "os", ".", "path", ".", "join", "(", "d", ",", "suffix", ")", "if", "suffix", "!=", "\"\"", "else", "d", "normalized_folder", "=", "folder", ".", "strip", "(", ")", "if", "normalized_folder", "!=", "\"/\"", "and", "normalized_folder", ".", "endswith", "(", "\"/\"", ")", ":", "normalized_folder", "=", "normalized_folder", "[", ":", "-", "1", "]", "if", "normalized_folder", "==", "\"\"", ":", "raise", "DXFileError", "(", "\"Invalid remote folder name: '{}'\"", ".", "format", "(", "folder", ")", ")", "normalized_dest_dir", "=", "os", ".", "path", ".", "normpath", "(", "destdir", ")", ".", "strip", "(", ")", "if", "normalized_dest_dir", "==", "\"\"", ":", "raise", "DXFileError", "(", "\"Invalid destination directory name: '{}'\"", ".", "format", "(", "destdir", ")", ")", "# Creating target directory tree", "remote_folders", "=", "list", "(", "list_subfolders", "(", "project", ",", "normalized_folder", ",", "recurse", "=", "True", ")", ")", "if", "len", "(", "remote_folders", ")", "<=", "0", ":", "raise", "DXFileError", "(", "\"Remote folder '{}' not found\"", ".", "format", "(", "normalized_folder", ")", ")", "remote_folders", ".", "sort", "(", ")", "for", "remote_subfolder", "in", "remote_folders", ":", "ensure_local_dir", "(", "compose_local_dir", "(", "normalized_dest_dir", ",", "normalized_folder", ",", "remote_subfolder", ")", ")", "# Downloading files", "describe_input", "=", "dict", "(", "fields", "=", "dict", "(", "folder", "=", "True", ",", "name", "=", "True", ",", "id", "=", "True", ",", "parts", "=", "True", ",", "size", "=", "True", ",", "drive", "=", "True", ",", "md5", "=", "True", ")", ")", "# A generator that returns the files one by one. We don't want to materialize it, because", "# there could be many files here.", "files_gen", "=", "dxpy", ".", "search", ".", "find_data_objects", "(", "classname", "=", "'file'", ",", "state", "=", "'closed'", ",", "project", "=", "project", ",", "folder", "=", "normalized_folder", ",", "recurse", "=", "True", ",", "describe", "=", "describe_input", ")", "if", "files_gen", "is", "None", ":", "# In python 3, the generator can be None, and iterating on it", "# will cause an error.", "return", "# Now it is safe, in both python 2 and 3, to iterate on the generator", "for", "remote_file", "in", "files_gen", ":", "local_filename", "=", "os", ".", "path", ".", "join", "(", "compose_local_dir", "(", "normalized_dest_dir", ",", "normalized_folder", ",", "remote_file", "[", "'describe'", "]", "[", "'folder'", "]", ")", ",", "remote_file", "[", "'describe'", "]", "[", "'name'", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "local_filename", ")", "and", "not", "overwrite", ":", "raise", "DXFileError", "(", "\"Destination file '{}' already exists but no overwrite option is provided\"", ".", "format", "(", "local_filename", ")", ")", "logger", ".", "debug", "(", "\"Downloading '%s/%s' remote file to '%s' location\"", ",", "(", "\"\"", "if", "remote_file", "[", "'describe'", "]", "[", "'folder'", "]", "==", "\"/\"", "else", "remote_file", "[", "'describe'", "]", "[", "'folder'", "]", ")", ",", "remote_file", "[", "'describe'", "]", "[", "'name'", "]", ",", "local_filename", ")", "download_dxfile", "(", "remote_file", "[", "'describe'", "]", "[", "'id'", "]", ",", "local_filename", ",", "chunksize", "=", "chunksize", ",", "project", "=", "project", ",", "show_progress", "=", "show_progress", ",", "describe_output", "=", "remote_file", "[", "'describe'", "]", ",", "*", "*", "kwargs", ")" ]
:param project: Project ID to use as context for this download. :type project: string :param destdir: Local destination location :type destdir: string :param folder: Path to the remote folder to download :type folder: string :param overwrite: Overwrite existing files :type overwrite: boolean Downloads the contents of the remote *folder* of the *project* into the local directory specified by *destdir*. Example:: download_folder("project-xxxx", "/home/jsmith/input", folder="/input")
[ ":", "param", "project", ":", "Project", "ID", "to", "use", "as", "context", "for", "this", "download", ".", ":", "type", "project", ":", "string", ":", "param", "destdir", ":", "Local", "destination", "location", ":", "type", "destdir", ":", "string", ":", "param", "folder", ":", "Path", "to", "the", "remote", "folder", "to", "download", ":", "type", "folder", ":", "string", ":", "param", "overwrite", ":", "Overwrite", "existing", "files", ":", "type", "overwrite", ":", "boolean" ]
python
train
47.727273
abilian/abilian-core
abilian/core/models/blob.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/core/models/blob.py#L112-L118
def md5(self): """Return md5 from meta, or compute it if absent.""" md5 = self.meta.get("md5") if md5 is None: md5 = str(hashlib.md5(self.value).hexdigest()) return md5
[ "def", "md5", "(", "self", ")", ":", "md5", "=", "self", ".", "meta", ".", "get", "(", "\"md5\"", ")", "if", "md5", "is", "None", ":", "md5", "=", "str", "(", "hashlib", ".", "md5", "(", "self", ".", "value", ")", ".", "hexdigest", "(", ")", ")", "return", "md5" ]
Return md5 from meta, or compute it if absent.
[ "Return", "md5", "from", "meta", "or", "compute", "it", "if", "absent", "." ]
python
train
29.571429
esheldon/fitsio
fitsio/fitslib.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L120-L190
def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys): """ Convenience function to read the header from the specified FITS HDU The FITSHDR allows access to the values and comments by name and number. parameters ---------- filename: string A filename. ext: number or string, optional The extension. Either the numerical extension from zero or a string extension name. Default read primary header. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to select a particular version. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. case_sensitive: bool, optional Match extension names with case-sensitivity. Default is False. """ dont_create = 0 try: hdunum = ext+1 except TypeError: hdunum = None _fits = _fitsio_wrap.FITS(filename, READONLY, dont_create) if hdunum is None: extname = mks(ext) if extver is None: extver_num = 0 else: extver_num = extver if not case_sensitive: # the builtin movnam_hdu is not case sensitive hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num) else: # for case sensitivity we'll need to run through # all the hdus found = False current_ext = 0 while True: hdunum = current_ext+1 try: hdu_type = _fits.movabs_hdu(hdunum) # noqa - not used name, vers = _fits.get_hdu_name_version(hdunum) if name == extname: if extver is None: # take the first match found = True break else: if extver_num == vers: found = True break except OSError: break current_ext += 1 if not found: raise IOError( 'hdu not found: %s (extver %s)' % (extname, extver)) return FITSHDR(_fits.read_header(hdunum))
[ "def", "read_header", "(", "filename", ",", "ext", "=", "0", ",", "extver", "=", "None", ",", "case_sensitive", "=", "False", ",", "*", "*", "keys", ")", ":", "dont_create", "=", "0", "try", ":", "hdunum", "=", "ext", "+", "1", "except", "TypeError", ":", "hdunum", "=", "None", "_fits", "=", "_fitsio_wrap", ".", "FITS", "(", "filename", ",", "READONLY", ",", "dont_create", ")", "if", "hdunum", "is", "None", ":", "extname", "=", "mks", "(", "ext", ")", "if", "extver", "is", "None", ":", "extver_num", "=", "0", "else", ":", "extver_num", "=", "extver", "if", "not", "case_sensitive", ":", "# the builtin movnam_hdu is not case sensitive", "hdunum", "=", "_fits", ".", "movnam_hdu", "(", "ANY_HDU", ",", "extname", ",", "extver_num", ")", "else", ":", "# for case sensitivity we'll need to run through", "# all the hdus", "found", "=", "False", "current_ext", "=", "0", "while", "True", ":", "hdunum", "=", "current_ext", "+", "1", "try", ":", "hdu_type", "=", "_fits", ".", "movabs_hdu", "(", "hdunum", ")", "# noqa - not used", "name", ",", "vers", "=", "_fits", ".", "get_hdu_name_version", "(", "hdunum", ")", "if", "name", "==", "extname", ":", "if", "extver", "is", "None", ":", "# take the first match", "found", "=", "True", "break", "else", ":", "if", "extver_num", "==", "vers", ":", "found", "=", "True", "break", "except", "OSError", ":", "break", "current_ext", "+=", "1", "if", "not", "found", ":", "raise", "IOError", "(", "'hdu not found: %s (extver %s)'", "%", "(", "extname", ",", "extver", ")", ")", "return", "FITSHDR", "(", "_fits", ".", "read_header", "(", "hdunum", ")", ")" ]
Convenience function to read the header from the specified FITS HDU The FITSHDR allows access to the values and comments by name and number. parameters ---------- filename: string A filename. ext: number or string, optional The extension. Either the numerical extension from zero or a string extension name. Default read primary header. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to select a particular version. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. case_sensitive: bool, optional Match extension names with case-sensitivity. Default is False.
[ "Convenience", "function", "to", "read", "the", "header", "from", "the", "specified", "FITS", "HDU" ]
python
train
33.549296
jaraco/jaraco.itertools
jaraco/itertools.py
https://github.com/jaraco/jaraco.itertools/blob/0dc47c8924fa3d9ab676c3a6e195f03f728b72c6/jaraco/itertools.py#L1218-L1252
def partition_dict(items, key): """ Given an ordered dictionary of items and a key in that dict, return an ordered dict of items before, the keyed item, and an ordered dict of items after. >>> od = collections.OrderedDict(zip(range(5), 'abcde')) >>> before, item, after = partition_dict(od, 3) >>> before OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')]) >>> item 'd' >>> after OrderedDict([(4, 'e')]) Like string.partition, if the key is not found in the items, the before will contain all items, item will be None, and after will be an empty iterable. >>> before, item, after = partition_dict(od, -1) >>> before OrderedDict([(0, 'a'), ..., (4, 'e')]) >>> item >>> list(after) [] """ def unmatched(pair): test_key, item, = pair return test_key != key items_iter = iter(items.items()) item = items.get(key) left = collections.OrderedDict(itertools.takewhile(unmatched, items_iter)) right = collections.OrderedDict(items_iter) return left, item, right
[ "def", "partition_dict", "(", "items", ",", "key", ")", ":", "def", "unmatched", "(", "pair", ")", ":", "test_key", ",", "item", ",", "=", "pair", "return", "test_key", "!=", "key", "items_iter", "=", "iter", "(", "items", ".", "items", "(", ")", ")", "item", "=", "items", ".", "get", "(", "key", ")", "left", "=", "collections", ".", "OrderedDict", "(", "itertools", ".", "takewhile", "(", "unmatched", ",", "items_iter", ")", ")", "right", "=", "collections", ".", "OrderedDict", "(", "items_iter", ")", "return", "left", ",", "item", ",", "right" ]
Given an ordered dictionary of items and a key in that dict, return an ordered dict of items before, the keyed item, and an ordered dict of items after. >>> od = collections.OrderedDict(zip(range(5), 'abcde')) >>> before, item, after = partition_dict(od, 3) >>> before OrderedDict([(0, 'a'), (1, 'b'), (2, 'c')]) >>> item 'd' >>> after OrderedDict([(4, 'e')]) Like string.partition, if the key is not found in the items, the before will contain all items, item will be None, and after will be an empty iterable. >>> before, item, after = partition_dict(od, -1) >>> before OrderedDict([(0, 'a'), ..., (4, 'e')]) >>> item >>> list(after) []
[ "Given", "an", "ordered", "dictionary", "of", "items", "and", "a", "key", "in", "that", "dict", "return", "an", "ordered", "dict", "of", "items", "before", "the", "keyed", "item", "and", "an", "ordered", "dict", "of", "items", "after", "." ]
python
test
27.028571
pyamg/pyamg
pyamg/aggregation/rootnode.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/rootnode.py#L313-L466
def extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates, diagonal_dominance=False, keep=True): """Extend the multigrid hierarchy. Service routine to implement the strength of connection, aggregation, tentative prolongation construction, and prolongation smoothing. Called by smoothed_aggregation_solver. """ def unpack_arg(v): if isinstance(v, tuple): return v[0], v[1] else: return v, {} A = levels[-1].A B = levels[-1].B if A.symmetry == "nonsymmetric": AH = A.H.asformat(A.format) BH = levels[-1].BH # Compute the strength-of-connection matrix C, where larger # C[i, j] denote stronger couplings between i and j. fn, kwargs = unpack_arg(strength[len(levels)-1]) if fn == 'symmetric': C = symmetric_strength_of_connection(A, **kwargs) elif fn == 'classical': C = classical_strength_of_connection(A, **kwargs) elif fn == 'distance': C = distance_strength_of_connection(A, **kwargs) elif (fn == 'ode') or (fn == 'evolution'): if 'B' in kwargs: C = evolution_strength_of_connection(A, **kwargs) else: C = evolution_strength_of_connection(A, B, **kwargs) elif fn == 'energy_based': C = energy_based_strength_of_connection(A, **kwargs) elif fn == 'predefined': C = kwargs['C'].tocsr() elif fn == 'algebraic_distance': C = algebraic_distance(A, **kwargs) elif fn == 'affinity': C = affinity_distance(A, **kwargs) elif fn is None: C = A.tocsr() else: raise ValueError('unrecognized strength of connection method: %s' % str(fn)) # Avoid coarsening diagonally dominant rows flag, kwargs = unpack_arg(diagonal_dominance) if flag: C = eliminate_diag_dom_nodes(A, C, **kwargs) # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A). # AggOp is a boolean matrix, where the sparsity pattern for the k-th column # denotes the fine-grid nodes agglomerated into k-th coarse-grid node. fn, kwargs = unpack_arg(aggregate[len(levels)-1]) if fn == 'standard': AggOp, Cnodes = standard_aggregation(C, **kwargs) elif fn == 'naive': AggOp, Cnodes = naive_aggregation(C, **kwargs) elif fn == 'lloyd': AggOp, Cnodes = lloyd_aggregation(C, **kwargs) elif fn == 'predefined': AggOp = kwargs['AggOp'].tocsr() Cnodes = kwargs['Cnodes'] else: raise ValueError('unrecognized aggregation method %s' % str(fn)) # Improve near nullspace candidates by relaxing on A B = 0 fn, kwargs = unpack_arg(improve_candidates[len(levels)-1]) if fn is not None: b = np.zeros((A.shape[0], 1), dtype=A.dtype) B = relaxation_as_linear_operator((fn, kwargs), A, b) * B levels[-1].B = B if A.symmetry == "nonsymmetric": BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH levels[-1].BH = BH # Compute the tentative prolongator, T, which is a tentative interpolation # matrix from the coarse-grid to the fine-grid. T exactly interpolates # B_fine[:, 0:blocksize(A)] = T B_coarse[:, 0:blocksize(A)]. T, dummy = fit_candidates(AggOp, B[:, 0:blocksize(A)]) del dummy if A.symmetry == "nonsymmetric": TH, dummyH = fit_candidates(AggOp, BH[:, 0:blocksize(A)]) del dummyH # Create necessary root node matrices Cpt_params = (True, get_Cpt_params(A, Cnodes, AggOp, T)) T = scale_T(T, Cpt_params[1]['P_I'], Cpt_params[1]['I_F']) if A.symmetry == "nonsymmetric": TH = scale_T(TH, Cpt_params[1]['P_I'], Cpt_params[1]['I_F']) # Set coarse grid near nullspace modes as injected fine grid near # null-space modes B = Cpt_params[1]['P_I'].T*levels[-1].B if A.symmetry == "nonsymmetric": BH = Cpt_params[1]['P_I'].T*levels[-1].BH # Smooth the tentative prolongator, so that it's accuracy is greatly # improved for algebraically smooth error. fn, kwargs = unpack_arg(smooth[len(levels)-1]) if fn == 'energy': P = energy_prolongation_smoother(A, T, C, B, levels[-1].B, Cpt_params=Cpt_params, **kwargs) elif fn is None: P = T else: raise ValueError('unrecognized prolongation smoother \ method %s' % str(fn)) # Compute the restriction matrix R, which interpolates from the fine-grid # to the coarse-grid. If A is nonsymmetric, then R must be constructed # based on A.H. Otherwise R = P.H or P.T. symmetry = A.symmetry if symmetry == 'hermitian': R = P.H elif symmetry == 'symmetric': R = P.T elif symmetry == 'nonsymmetric': fn, kwargs = unpack_arg(smooth[len(levels)-1]) if fn == 'energy': R = energy_prolongation_smoother(AH, TH, C, BH, levels[-1].BH, Cpt_params=Cpt_params, **kwargs) R = R.H elif fn is None: R = T.H else: raise ValueError('unrecognized prolongation smoother \ method %s' % str(fn)) if keep: levels[-1].C = C # strength of connection matrix levels[-1].AggOp = AggOp # aggregation operator levels[-1].T = T # tentative prolongator levels[-1].Fpts = Cpt_params[1]['Fpts'] # Fpts levels[-1].P_I = Cpt_params[1]['P_I'] # Injection operator levels[-1].I_F = Cpt_params[1]['I_F'] # Identity on F-pts levels[-1].I_C = Cpt_params[1]['I_C'] # Identity on C-pts levels[-1].P = P # smoothed prolongator levels[-1].R = R # restriction operator levels[-1].Cpts = Cpt_params[1]['Cpts'] # Cpts (i.e., rootnodes) levels.append(multilevel_solver.level()) A = R * A * P # Galerkin operator A.symmetry = symmetry levels[-1].A = A levels[-1].B = B # right near nullspace candidates if A.symmetry == "nonsymmetric": levels[-1].BH = BH
[ "def", "extend_hierarchy", "(", "levels", ",", "strength", ",", "aggregate", ",", "smooth", ",", "improve_candidates", ",", "diagonal_dominance", "=", "False", ",", "keep", "=", "True", ")", ":", "def", "unpack_arg", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "tuple", ")", ":", "return", "v", "[", "0", "]", ",", "v", "[", "1", "]", "else", ":", "return", "v", ",", "{", "}", "A", "=", "levels", "[", "-", "1", "]", ".", "A", "B", "=", "levels", "[", "-", "1", "]", ".", "B", "if", "A", ".", "symmetry", "==", "\"nonsymmetric\"", ":", "AH", "=", "A", ".", "H", ".", "asformat", "(", "A", ".", "format", ")", "BH", "=", "levels", "[", "-", "1", "]", ".", "BH", "# Compute the strength-of-connection matrix C, where larger", "# C[i, j] denote stronger couplings between i and j.", "fn", ",", "kwargs", "=", "unpack_arg", "(", "strength", "[", "len", "(", "levels", ")", "-", "1", "]", ")", "if", "fn", "==", "'symmetric'", ":", "C", "=", "symmetric_strength_of_connection", "(", "A", ",", "*", "*", "kwargs", ")", "elif", "fn", "==", "'classical'", ":", "C", "=", "classical_strength_of_connection", "(", "A", ",", "*", "*", "kwargs", ")", "elif", "fn", "==", "'distance'", ":", "C", "=", "distance_strength_of_connection", "(", "A", ",", "*", "*", "kwargs", ")", "elif", "(", "fn", "==", "'ode'", ")", "or", "(", "fn", "==", "'evolution'", ")", ":", "if", "'B'", "in", "kwargs", ":", "C", "=", "evolution_strength_of_connection", "(", "A", ",", "*", "*", "kwargs", ")", "else", ":", "C", "=", "evolution_strength_of_connection", "(", "A", ",", "B", ",", "*", "*", "kwargs", ")", "elif", "fn", "==", "'energy_based'", ":", "C", "=", "energy_based_strength_of_connection", "(", "A", ",", "*", "*", "kwargs", ")", "elif", "fn", "==", "'predefined'", ":", "C", "=", "kwargs", "[", "'C'", "]", ".", "tocsr", "(", ")", "elif", "fn", "==", "'algebraic_distance'", ":", "C", "=", "algebraic_distance", "(", "A", ",", "*", "*", "kwargs", ")", "elif", "fn", "==", "'affinity'", ":", "C", "=", "affinity_distance", "(", "A", ",", "*", "*", "kwargs", ")", "elif", "fn", "is", "None", ":", "C", "=", "A", ".", "tocsr", "(", ")", "else", ":", "raise", "ValueError", "(", "'unrecognized strength of connection method: %s'", "%", "str", "(", "fn", ")", ")", "# Avoid coarsening diagonally dominant rows", "flag", ",", "kwargs", "=", "unpack_arg", "(", "diagonal_dominance", ")", "if", "flag", ":", "C", "=", "eliminate_diag_dom_nodes", "(", "A", ",", "C", ",", "*", "*", "kwargs", ")", "# Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).", "# AggOp is a boolean matrix, where the sparsity pattern for the k-th column", "# denotes the fine-grid nodes agglomerated into k-th coarse-grid node.", "fn", ",", "kwargs", "=", "unpack_arg", "(", "aggregate", "[", "len", "(", "levels", ")", "-", "1", "]", ")", "if", "fn", "==", "'standard'", ":", "AggOp", ",", "Cnodes", "=", "standard_aggregation", "(", "C", ",", "*", "*", "kwargs", ")", "elif", "fn", "==", "'naive'", ":", "AggOp", ",", "Cnodes", "=", "naive_aggregation", "(", "C", ",", "*", "*", "kwargs", ")", "elif", "fn", "==", "'lloyd'", ":", "AggOp", ",", "Cnodes", "=", "lloyd_aggregation", "(", "C", ",", "*", "*", "kwargs", ")", "elif", "fn", "==", "'predefined'", ":", "AggOp", "=", "kwargs", "[", "'AggOp'", "]", ".", "tocsr", "(", ")", "Cnodes", "=", "kwargs", "[", "'Cnodes'", "]", "else", ":", "raise", "ValueError", "(", "'unrecognized aggregation method %s'", "%", "str", "(", "fn", ")", ")", "# Improve near nullspace candidates by relaxing on A B = 0", "fn", ",", "kwargs", "=", "unpack_arg", "(", "improve_candidates", "[", "len", "(", "levels", ")", "-", "1", "]", ")", "if", "fn", "is", "not", "None", ":", "b", "=", "np", ".", "zeros", "(", "(", "A", ".", "shape", "[", "0", "]", ",", "1", ")", ",", "dtype", "=", "A", ".", "dtype", ")", "B", "=", "relaxation_as_linear_operator", "(", "(", "fn", ",", "kwargs", ")", ",", "A", ",", "b", ")", "*", "B", "levels", "[", "-", "1", "]", ".", "B", "=", "B", "if", "A", ".", "symmetry", "==", "\"nonsymmetric\"", ":", "BH", "=", "relaxation_as_linear_operator", "(", "(", "fn", ",", "kwargs", ")", ",", "AH", ",", "b", ")", "*", "BH", "levels", "[", "-", "1", "]", ".", "BH", "=", "BH", "# Compute the tentative prolongator, T, which is a tentative interpolation", "# matrix from the coarse-grid to the fine-grid. T exactly interpolates", "# B_fine[:, 0:blocksize(A)] = T B_coarse[:, 0:blocksize(A)].", "T", ",", "dummy", "=", "fit_candidates", "(", "AggOp", ",", "B", "[", ":", ",", "0", ":", "blocksize", "(", "A", ")", "]", ")", "del", "dummy", "if", "A", ".", "symmetry", "==", "\"nonsymmetric\"", ":", "TH", ",", "dummyH", "=", "fit_candidates", "(", "AggOp", ",", "BH", "[", ":", ",", "0", ":", "blocksize", "(", "A", ")", "]", ")", "del", "dummyH", "# Create necessary root node matrices", "Cpt_params", "=", "(", "True", ",", "get_Cpt_params", "(", "A", ",", "Cnodes", ",", "AggOp", ",", "T", ")", ")", "T", "=", "scale_T", "(", "T", ",", "Cpt_params", "[", "1", "]", "[", "'P_I'", "]", ",", "Cpt_params", "[", "1", "]", "[", "'I_F'", "]", ")", "if", "A", ".", "symmetry", "==", "\"nonsymmetric\"", ":", "TH", "=", "scale_T", "(", "TH", ",", "Cpt_params", "[", "1", "]", "[", "'P_I'", "]", ",", "Cpt_params", "[", "1", "]", "[", "'I_F'", "]", ")", "# Set coarse grid near nullspace modes as injected fine grid near", "# null-space modes", "B", "=", "Cpt_params", "[", "1", "]", "[", "'P_I'", "]", ".", "T", "*", "levels", "[", "-", "1", "]", ".", "B", "if", "A", ".", "symmetry", "==", "\"nonsymmetric\"", ":", "BH", "=", "Cpt_params", "[", "1", "]", "[", "'P_I'", "]", ".", "T", "*", "levels", "[", "-", "1", "]", ".", "BH", "# Smooth the tentative prolongator, so that it's accuracy is greatly", "# improved for algebraically smooth error.", "fn", ",", "kwargs", "=", "unpack_arg", "(", "smooth", "[", "len", "(", "levels", ")", "-", "1", "]", ")", "if", "fn", "==", "'energy'", ":", "P", "=", "energy_prolongation_smoother", "(", "A", ",", "T", ",", "C", ",", "B", ",", "levels", "[", "-", "1", "]", ".", "B", ",", "Cpt_params", "=", "Cpt_params", ",", "*", "*", "kwargs", ")", "elif", "fn", "is", "None", ":", "P", "=", "T", "else", ":", "raise", "ValueError", "(", "'unrecognized prolongation smoother \\\n method %s'", "%", "str", "(", "fn", ")", ")", "# Compute the restriction matrix R, which interpolates from the fine-grid", "# to the coarse-grid. If A is nonsymmetric, then R must be constructed", "# based on A.H. Otherwise R = P.H or P.T.", "symmetry", "=", "A", ".", "symmetry", "if", "symmetry", "==", "'hermitian'", ":", "R", "=", "P", ".", "H", "elif", "symmetry", "==", "'symmetric'", ":", "R", "=", "P", ".", "T", "elif", "symmetry", "==", "'nonsymmetric'", ":", "fn", ",", "kwargs", "=", "unpack_arg", "(", "smooth", "[", "len", "(", "levels", ")", "-", "1", "]", ")", "if", "fn", "==", "'energy'", ":", "R", "=", "energy_prolongation_smoother", "(", "AH", ",", "TH", ",", "C", ",", "BH", ",", "levels", "[", "-", "1", "]", ".", "BH", ",", "Cpt_params", "=", "Cpt_params", ",", "*", "*", "kwargs", ")", "R", "=", "R", ".", "H", "elif", "fn", "is", "None", ":", "R", "=", "T", ".", "H", "else", ":", "raise", "ValueError", "(", "'unrecognized prolongation smoother \\\n method %s'", "%", "str", "(", "fn", ")", ")", "if", "keep", ":", "levels", "[", "-", "1", "]", ".", "C", "=", "C", "# strength of connection matrix", "levels", "[", "-", "1", "]", ".", "AggOp", "=", "AggOp", "# aggregation operator", "levels", "[", "-", "1", "]", ".", "T", "=", "T", "# tentative prolongator", "levels", "[", "-", "1", "]", ".", "Fpts", "=", "Cpt_params", "[", "1", "]", "[", "'Fpts'", "]", "# Fpts", "levels", "[", "-", "1", "]", ".", "P_I", "=", "Cpt_params", "[", "1", "]", "[", "'P_I'", "]", "# Injection operator", "levels", "[", "-", "1", "]", ".", "I_F", "=", "Cpt_params", "[", "1", "]", "[", "'I_F'", "]", "# Identity on F-pts", "levels", "[", "-", "1", "]", ".", "I_C", "=", "Cpt_params", "[", "1", "]", "[", "'I_C'", "]", "# Identity on C-pts", "levels", "[", "-", "1", "]", ".", "P", "=", "P", "# smoothed prolongator", "levels", "[", "-", "1", "]", ".", "R", "=", "R", "# restriction operator", "levels", "[", "-", "1", "]", ".", "Cpts", "=", "Cpt_params", "[", "1", "]", "[", "'Cpts'", "]", "# Cpts (i.e., rootnodes)", "levels", ".", "append", "(", "multilevel_solver", ".", "level", "(", ")", ")", "A", "=", "R", "*", "A", "*", "P", "# Galerkin operator", "A", ".", "symmetry", "=", "symmetry", "levels", "[", "-", "1", "]", ".", "A", "=", "A", "levels", "[", "-", "1", "]", ".", "B", "=", "B", "# right near nullspace candidates", "if", "A", ".", "symmetry", "==", "\"nonsymmetric\"", ":", "levels", "[", "-", "1", "]", ".", "BH", "=", "BH" ]
Extend the multigrid hierarchy. Service routine to implement the strength of connection, aggregation, tentative prolongation construction, and prolongation smoothing. Called by smoothed_aggregation_solver.
[ "Extend", "the", "multigrid", "hierarchy", "." ]
python
train
39.837662
pymc-devs/pymc
pymc/Matplot.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L790-L894
def gof_plot( simdata, trueval, name=None, bins=None, format='png', suffix='-gof', path='./', fontmap=None, verbose=0): """ Plots histogram of replicated data, indicating the location of the observed data :Arguments: simdata: array or PyMC object Trace of simulated data or the PyMC stochastic object containing trace. trueval: numeric True (observed) value of the data bins: int or string The number of bins, or a preferred binning method. Available methods include 'doanes', 'sturges' and 'sqrt' (defaults to 'doanes'). format (optional): string Graphic output format (defaults to png). suffix (optional): string Filename suffix. path (optional): string Specifies location for saving plots (defaults to local directory). fontmap (optional): dict Font map for plot. """ if fontmap is None: fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4} if not isinstance(simdata, ndarray): ## Can't just try and catch because ndarray objects also have ## `trace` method. simdata = simdata.trace() if ndim(trueval) == 1 and ndim(simdata == 2): # Iterate over more than one set of data for i in range(len(trueval)): n = name or 'MCMC' gof_plot( simdata[ :, i], trueval[ i], '%s[%i]' % ( n, i), bins=bins, format=format, suffix=suffix, path=path, fontmap=fontmap, verbose=verbose) return if verbose > 0: print_('Plotting', (name or 'MCMC') + suffix) figure() # Specify number of bins if bins is None: bins = 'sqrt' uniquevals = len(unique(simdata)) if bins == 'sturges': bins = uniquevals * (uniquevals <= 25) or _sturges(len(simdata)) elif bins == 'doanes': bins = uniquevals * ( uniquevals <= 25) or _doanes(simdata, len(simdata)) elif bins == 'sqrt': bins = uniquevals * (uniquevals <= 25) or _sqrt_choice(len(simdata)) elif isinstance(bins, int): bins = bins else: raise ValueError('Invalid bins argument in gof_plot') # Generate histogram hist(simdata, bins) # Plot options xlabel(name or 'Value', fontsize='x-small') ylabel("Frequency", fontsize='x-small') # Smaller tick labels tlabels = gca().get_xticklabels() setp(tlabels, 'fontsize', fontmap[1]) tlabels = gca().get_yticklabels() setp(tlabels, 'fontsize', fontmap[1]) # Plot vertical line at location of true data value axvline(x=trueval, linewidth=2, color='r', linestyle='dotted') if not os.path.exists(path): os.mkdir(path) if not path.endswith('/'): path += '/' # Save to file savefig("%s%s%s.%s" % (path, name or 'MCMC', suffix, format))
[ "def", "gof_plot", "(", "simdata", ",", "trueval", ",", "name", "=", "None", ",", "bins", "=", "None", ",", "format", "=", "'png'", ",", "suffix", "=", "'-gof'", ",", "path", "=", "'./'", ",", "fontmap", "=", "None", ",", "verbose", "=", "0", ")", ":", "if", "fontmap", "is", "None", ":", "fontmap", "=", "{", "1", ":", "10", ",", "2", ":", "8", ",", "3", ":", "6", ",", "4", ":", "5", ",", "5", ":", "4", "}", "if", "not", "isinstance", "(", "simdata", ",", "ndarray", ")", ":", "## Can't just try and catch because ndarray objects also have", "## `trace` method.", "simdata", "=", "simdata", ".", "trace", "(", ")", "if", "ndim", "(", "trueval", ")", "==", "1", "and", "ndim", "(", "simdata", "==", "2", ")", ":", "# Iterate over more than one set of data", "for", "i", "in", "range", "(", "len", "(", "trueval", ")", ")", ":", "n", "=", "name", "or", "'MCMC'", "gof_plot", "(", "simdata", "[", ":", ",", "i", "]", ",", "trueval", "[", "i", "]", ",", "'%s[%i]'", "%", "(", "n", ",", "i", ")", ",", "bins", "=", "bins", ",", "format", "=", "format", ",", "suffix", "=", "suffix", ",", "path", "=", "path", ",", "fontmap", "=", "fontmap", ",", "verbose", "=", "verbose", ")", "return", "if", "verbose", ">", "0", ":", "print_", "(", "'Plotting'", ",", "(", "name", "or", "'MCMC'", ")", "+", "suffix", ")", "figure", "(", ")", "# Specify number of bins", "if", "bins", "is", "None", ":", "bins", "=", "'sqrt'", "uniquevals", "=", "len", "(", "unique", "(", "simdata", ")", ")", "if", "bins", "==", "'sturges'", ":", "bins", "=", "uniquevals", "*", "(", "uniquevals", "<=", "25", ")", "or", "_sturges", "(", "len", "(", "simdata", ")", ")", "elif", "bins", "==", "'doanes'", ":", "bins", "=", "uniquevals", "*", "(", "uniquevals", "<=", "25", ")", "or", "_doanes", "(", "simdata", ",", "len", "(", "simdata", ")", ")", "elif", "bins", "==", "'sqrt'", ":", "bins", "=", "uniquevals", "*", "(", "uniquevals", "<=", "25", ")", "or", "_sqrt_choice", "(", "len", "(", "simdata", ")", ")", "elif", "isinstance", "(", "bins", ",", "int", ")", ":", "bins", "=", "bins", "else", ":", "raise", "ValueError", "(", "'Invalid bins argument in gof_plot'", ")", "# Generate histogram", "hist", "(", "simdata", ",", "bins", ")", "# Plot options", "xlabel", "(", "name", "or", "'Value'", ",", "fontsize", "=", "'x-small'", ")", "ylabel", "(", "\"Frequency\"", ",", "fontsize", "=", "'x-small'", ")", "# Smaller tick labels", "tlabels", "=", "gca", "(", ")", ".", "get_xticklabels", "(", ")", "setp", "(", "tlabels", ",", "'fontsize'", ",", "fontmap", "[", "1", "]", ")", "tlabels", "=", "gca", "(", ")", ".", "get_yticklabels", "(", ")", "setp", "(", "tlabels", ",", "'fontsize'", ",", "fontmap", "[", "1", "]", ")", "# Plot vertical line at location of true data value", "axvline", "(", "x", "=", "trueval", ",", "linewidth", "=", "2", ",", "color", "=", "'r'", ",", "linestyle", "=", "'dotted'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "mkdir", "(", "path", ")", "if", "not", "path", ".", "endswith", "(", "'/'", ")", ":", "path", "+=", "'/'", "# Save to file", "savefig", "(", "\"%s%s%s.%s\"", "%", "(", "path", ",", "name", "or", "'MCMC'", ",", "suffix", ",", "format", ")", ")" ]
Plots histogram of replicated data, indicating the location of the observed data :Arguments: simdata: array or PyMC object Trace of simulated data or the PyMC stochastic object containing trace. trueval: numeric True (observed) value of the data bins: int or string The number of bins, or a preferred binning method. Available methods include 'doanes', 'sturges' and 'sqrt' (defaults to 'doanes'). format (optional): string Graphic output format (defaults to png). suffix (optional): string Filename suffix. path (optional): string Specifies location for saving plots (defaults to local directory). fontmap (optional): dict Font map for plot.
[ "Plots", "histogram", "of", "replicated", "data", "indicating", "the", "location", "of", "the", "observed", "data" ]
python
train
28.961905
erdewit/ib_insync
ib_insync/util.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/util.py#L343-L359
async def timeRangeAsync( start: datetime.time, end: datetime.time, step: float) -> AsyncIterator[datetime.datetime]: """ Async version of :meth:`timeRange`. """ assert step > 0 start = _fillDate(start) end = _fillDate(end) delta = datetime.timedelta(seconds=step) t = start while t < datetime.datetime.now(): t += delta while t <= end: await waitUntilAsync(t) yield t t += delta
[ "async", "def", "timeRangeAsync", "(", "start", ":", "datetime", ".", "time", ",", "end", ":", "datetime", ".", "time", ",", "step", ":", "float", ")", "->", "AsyncIterator", "[", "datetime", ".", "datetime", "]", ":", "assert", "step", ">", "0", "start", "=", "_fillDate", "(", "start", ")", "end", "=", "_fillDate", "(", "end", ")", "delta", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "step", ")", "t", "=", "start", "while", "t", "<", "datetime", ".", "datetime", ".", "now", "(", ")", ":", "t", "+=", "delta", "while", "t", "<=", "end", ":", "await", "waitUntilAsync", "(", "t", ")", "yield", "t", "t", "+=", "delta" ]
Async version of :meth:`timeRange`.
[ "Async", "version", "of", ":", "meth", ":", "timeRange", "." ]
python
train
26.529412
gwastro/pycbc-glue
pycbc_glue/iterutils.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/iterutils.py#L174-L190
def nonuniq(iterable): """ Yield the non-unique items of an iterable, preserving order. If an item occurs N > 0 times in the input sequence, it will occur N-1 times in the output sequence. Example: >>> x = nonuniq([0, 0, 2, 6, 2, 0, 5]) >>> list(x) [0, 2, 0] """ temp_dict = {} for e in iterable: if e in temp_dict: yield e temp_dict.setdefault(e, e)
[ "def", "nonuniq", "(", "iterable", ")", ":", "temp_dict", "=", "{", "}", "for", "e", "in", "iterable", ":", "if", "e", "in", "temp_dict", ":", "yield", "e", "temp_dict", ".", "setdefault", "(", "e", ",", "e", ")" ]
Yield the non-unique items of an iterable, preserving order. If an item occurs N > 0 times in the input sequence, it will occur N-1 times in the output sequence. Example: >>> x = nonuniq([0, 0, 2, 6, 2, 0, 5]) >>> list(x) [0, 2, 0]
[ "Yield", "the", "non", "-", "unique", "items", "of", "an", "iterable", "preserving", "order", ".", "If", "an", "item", "occurs", "N", ">", "0", "times", "in", "the", "input", "sequence", "it", "will", "occur", "N", "-", "1", "times", "in", "the", "output", "sequence", "." ]
python
train
20.882353
markfinger/python-js-host
js_host/manager.py
https://github.com/markfinger/python-js-host/blob/7727138c1eae779335d55fb4d7734698225a6322/js_host/manager.py#L69-L83
def stop_host(self, config_file): """ Stops a managed host specified by `config_file`. """ res = self.send_json_request('host/stop', data={'config': config_file}) if res.status_code != 200: raise UnexpectedResponse( 'Attempted to stop a JSHost. Response: {res_code}: {res_text}'.format( res_code=res.status_code, res_text=res.text, ) ) return res.json()
[ "def", "stop_host", "(", "self", ",", "config_file", ")", ":", "res", "=", "self", ".", "send_json_request", "(", "'host/stop'", ",", "data", "=", "{", "'config'", ":", "config_file", "}", ")", "if", "res", ".", "status_code", "!=", "200", ":", "raise", "UnexpectedResponse", "(", "'Attempted to stop a JSHost. Response: {res_code}: {res_text}'", ".", "format", "(", "res_code", "=", "res", ".", "status_code", ",", "res_text", "=", "res", ".", "text", ",", ")", ")", "return", "res", ".", "json", "(", ")" ]
Stops a managed host specified by `config_file`.
[ "Stops", "a", "managed", "host", "specified", "by", "config_file", "." ]
python
train
32.333333
galactics/beyond
beyond/frames/stations.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/frames/stations.py#L135-L181
def create_station(name, latlonalt, parent_frame=WGS84, orientation='N', mask=None): """Create a ground station instance Args: name (str): Name of the station latlonalt (tuple of float): coordinates of the station, as follow: * Latitude in degrees * Longitude in degrees * Altitude to sea level in meters parent_frame (Frame): Planetocentric rotating frame of reference of coordinates. orientation (str or float): Heading of the station Acceptable values are 'N', 'S', 'E', 'W' or any angle in radians mask: (2D array of float): First dimension is azimuth counterclockwise strictly increasing. Second dimension is elevation. Both in radians Return: TopocentricFrame """ if isinstance(orientation, str): orient = {'N': np.pi, 'S': 0., 'E': np.pi / 2., 'W': 3 * np.pi / 2.} heading = orient[orientation] else: heading = orientation latlonalt = list(latlonalt) latlonalt[:2] = np.radians(latlonalt[:2]) coordinates = TopocentricFrame._geodetic_to_cartesian(*latlonalt) mtd = '_to_%s' % parent_frame.__name__ dct = { mtd: TopocentricFrame._to_parent_frame, 'latlonalt': latlonalt, 'coordinates': coordinates, 'parent_frame': parent_frame, 'heading': heading, 'orientation': orientation, 'mask': np.array(mask) if mask else None, } cls = _MetaFrame(name, (TopocentricFrame,), dct) cls + parent_frame return cls
[ "def", "create_station", "(", "name", ",", "latlonalt", ",", "parent_frame", "=", "WGS84", ",", "orientation", "=", "'N'", ",", "mask", "=", "None", ")", ":", "if", "isinstance", "(", "orientation", ",", "str", ")", ":", "orient", "=", "{", "'N'", ":", "np", ".", "pi", ",", "'S'", ":", "0.", ",", "'E'", ":", "np", ".", "pi", "/", "2.", ",", "'W'", ":", "3", "*", "np", ".", "pi", "/", "2.", "}", "heading", "=", "orient", "[", "orientation", "]", "else", ":", "heading", "=", "orientation", "latlonalt", "=", "list", "(", "latlonalt", ")", "latlonalt", "[", ":", "2", "]", "=", "np", ".", "radians", "(", "latlonalt", "[", ":", "2", "]", ")", "coordinates", "=", "TopocentricFrame", ".", "_geodetic_to_cartesian", "(", "*", "latlonalt", ")", "mtd", "=", "'_to_%s'", "%", "parent_frame", ".", "__name__", "dct", "=", "{", "mtd", ":", "TopocentricFrame", ".", "_to_parent_frame", ",", "'latlonalt'", ":", "latlonalt", ",", "'coordinates'", ":", "coordinates", ",", "'parent_frame'", ":", "parent_frame", ",", "'heading'", ":", "heading", ",", "'orientation'", ":", "orientation", ",", "'mask'", ":", "np", ".", "array", "(", "mask", ")", "if", "mask", "else", "None", ",", "}", "cls", "=", "_MetaFrame", "(", "name", ",", "(", "TopocentricFrame", ",", ")", ",", "dct", ")", "cls", "+", "parent_frame", "return", "cls" ]
Create a ground station instance Args: name (str): Name of the station latlonalt (tuple of float): coordinates of the station, as follow: * Latitude in degrees * Longitude in degrees * Altitude to sea level in meters parent_frame (Frame): Planetocentric rotating frame of reference of coordinates. orientation (str or float): Heading of the station Acceptable values are 'N', 'S', 'E', 'W' or any angle in radians mask: (2D array of float): First dimension is azimuth counterclockwise strictly increasing. Second dimension is elevation. Both in radians Return: TopocentricFrame
[ "Create", "a", "ground", "station", "instance" ]
python
train
32.510638
bokeh/bokeh
bokeh/document/events.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/document/events.py#L811-L820
def dispatch(self, receiver): ''' Dispatch handling of this event to a receiver. This method will invoke ``receiver._session_callback_removed`` if it exists. ''' super(SessionCallbackRemoved, self).dispatch(receiver) if hasattr(receiver, '_session_callback_removed'): receiver._session_callback_removed(self)
[ "def", "dispatch", "(", "self", ",", "receiver", ")", ":", "super", "(", "SessionCallbackRemoved", ",", "self", ")", ".", "dispatch", "(", "receiver", ")", "if", "hasattr", "(", "receiver", ",", "'_session_callback_removed'", ")", ":", "receiver", ".", "_session_callback_removed", "(", "self", ")" ]
Dispatch handling of this event to a receiver. This method will invoke ``receiver._session_callback_removed`` if it exists.
[ "Dispatch", "handling", "of", "this", "event", "to", "a", "receiver", "." ]
python
train
36.1
ralphje/imagemounter
imagemounter/disk.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/disk.py#L352-L358
def get_volumes(self): """Gets a list of all volumes in this disk, including volumes that are contained in other volumes.""" volumes = [] for v in self.volumes: volumes.extend(v.get_volumes()) return volumes
[ "def", "get_volumes", "(", "self", ")", ":", "volumes", "=", "[", "]", "for", "v", "in", "self", ".", "volumes", ":", "volumes", ".", "extend", "(", "v", ".", "get_volumes", "(", ")", ")", "return", "volumes" ]
Gets a list of all volumes in this disk, including volumes that are contained in other volumes.
[ "Gets", "a", "list", "of", "all", "volumes", "in", "this", "disk", "including", "volumes", "that", "are", "contained", "in", "other", "volumes", "." ]
python
train
35.142857
log2timeline/dfvfs
dfvfs/vfs/file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/file_entry.py#L497-L507
def IsFile(self): """Determines if the file entry is a file. Returns: bool: True if the file entry is a file. """ if self._stat_object is None: self._stat_object = self._GetStat() if self._stat_object is not None: self.entry_type = self._stat_object.type return self.entry_type == definitions.FILE_ENTRY_TYPE_FILE
[ "def", "IsFile", "(", "self", ")", ":", "if", "self", ".", "_stat_object", "is", "None", ":", "self", ".", "_stat_object", "=", "self", ".", "_GetStat", "(", ")", "if", "self", ".", "_stat_object", "is", "not", "None", ":", "self", ".", "entry_type", "=", "self", ".", "_stat_object", ".", "type", "return", "self", ".", "entry_type", "==", "definitions", ".", "FILE_ENTRY_TYPE_FILE" ]
Determines if the file entry is a file. Returns: bool: True if the file entry is a file.
[ "Determines", "if", "the", "file", "entry", "is", "a", "file", "." ]
python
train
31.454545
vkruoso/receita-tools
receita/tools/build.py
https://github.com/vkruoso/receita-tools/blob/fd62a252c76541c9feac6470b9048b31348ffe86/receita/tools/build.py#L144-L175
def run(self): """Reads data from disk and generates CSV files.""" # Try to create the directory if not os.path.exists(self.output): try: os.mkdir(self.output) except: print 'failed to create output directory %s' % self.output # Be sure it is a directory if not os.path.isdir(self.output): print 'invalid output directory %s' % self.output sys.exit(1) # Create the CSV handlers visitors = [ _CompaniesCSV(self.output), _ActivitiesCSV(self.output), _ActivitiesSeenCSV(self.output), _QSACSV(self.output), ] # Run by each company populating the CSV files for path in glob.glob(os.path.join(self.input, '*.json')): with open(path, 'r') as f: try: data = json.load(f, encoding='utf-8') except ValueError: continue for visitor in visitors: visitor.visit(data)
[ "def", "run", "(", "self", ")", ":", "# Try to create the directory", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "output", ")", ":", "try", ":", "os", ".", "mkdir", "(", "self", ".", "output", ")", "except", ":", "print", "'failed to create output directory %s'", "%", "self", ".", "output", "# Be sure it is a directory", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "output", ")", ":", "print", "'invalid output directory %s'", "%", "self", ".", "output", "sys", ".", "exit", "(", "1", ")", "# Create the CSV handlers", "visitors", "=", "[", "_CompaniesCSV", "(", "self", ".", "output", ")", ",", "_ActivitiesCSV", "(", "self", ".", "output", ")", ",", "_ActivitiesSeenCSV", "(", "self", ".", "output", ")", ",", "_QSACSV", "(", "self", ".", "output", ")", ",", "]", "# Run by each company populating the CSV files", "for", "path", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "input", ",", "'*.json'", ")", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "try", ":", "data", "=", "json", ".", "load", "(", "f", ",", "encoding", "=", "'utf-8'", ")", "except", "ValueError", ":", "continue", "for", "visitor", "in", "visitors", ":", "visitor", ".", "visit", "(", "data", ")" ]
Reads data from disk and generates CSV files.
[ "Reads", "data", "from", "disk", "and", "generates", "CSV", "files", "." ]
python
train
32.9375
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/python_breakpoint.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/python_breakpoint.py#L94-L102
def _StripCommonPathPrefix(paths): """Removes path common prefix from a list of path strings.""" # Find the longest common prefix in terms of characters. common_prefix = os.path.commonprefix(paths) # Truncate at last segment boundary. E.g. '/aa/bb1/x.py' and '/a/bb2/x.py' # have '/aa/bb' as the common prefix, but we should strip '/aa/' instead. # If there's no '/' found, returns -1+1=0. common_prefix_len = common_prefix.rfind('/') + 1 return [path[common_prefix_len:] for path in paths]
[ "def", "_StripCommonPathPrefix", "(", "paths", ")", ":", "# Find the longest common prefix in terms of characters.", "common_prefix", "=", "os", ".", "path", ".", "commonprefix", "(", "paths", ")", "# Truncate at last segment boundary. E.g. '/aa/bb1/x.py' and '/a/bb2/x.py'", "# have '/aa/bb' as the common prefix, but we should strip '/aa/' instead.", "# If there's no '/' found, returns -1+1=0.", "common_prefix_len", "=", "common_prefix", ".", "rfind", "(", "'/'", ")", "+", "1", "return", "[", "path", "[", "common_prefix_len", ":", "]", "for", "path", "in", "paths", "]" ]
Removes path common prefix from a list of path strings.
[ "Removes", "path", "common", "prefix", "from", "a", "list", "of", "path", "strings", "." ]
python
train
55.333333
Danielhiversen/flux_led
flux_led/__main__.py
https://github.com/Danielhiversen/flux_led/blob/13e87e06ff7589356c83e084a6be768ad1290557/flux_led/__main__.py#L544-L554
def brightness(self): """Return current brightness 0-255. For warm white return current led level. For RGB calculate the HSV and return the 'value'. """ if self.mode == "ww": return int(self.raw_state[9]) else: _, _, v = colorsys.rgb_to_hsv(*self.getRgb()) return v
[ "def", "brightness", "(", "self", ")", ":", "if", "self", ".", "mode", "==", "\"ww\"", ":", "return", "int", "(", "self", ".", "raw_state", "[", "9", "]", ")", "else", ":", "_", ",", "_", ",", "v", "=", "colorsys", ".", "rgb_to_hsv", "(", "*", "self", ".", "getRgb", "(", ")", ")", "return", "v" ]
Return current brightness 0-255. For warm white return current led level. For RGB calculate the HSV and return the 'value'.
[ "Return", "current", "brightness", "0", "-", "255", "." ]
python
train
30.909091
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1841-L1863
def rmultivariate_hypergeometric(n, m, size=None): """ Random multivariate hypergeometric variates. Parameters: - `n` : Number of draws. - `m` : Number of items in each categoy. """ N = len(m) urn = np.repeat(np.arange(N), m) if size: draw = np.array([[urn[i] for i in np.random.permutation(len(urn))[:n]] for j in range(size)]) r = [[np.sum(draw[j] == i) for i in range(len(m))] for j in range(size)] else: draw = np.array([urn[i] for i in np.random.permutation(len(urn))[:n]]) r = [np.sum(draw == i) for i in range(len(m))] return np.asarray(r)
[ "def", "rmultivariate_hypergeometric", "(", "n", ",", "m", ",", "size", "=", "None", ")", ":", "N", "=", "len", "(", "m", ")", "urn", "=", "np", ".", "repeat", "(", "np", ".", "arange", "(", "N", ")", ",", "m", ")", "if", "size", ":", "draw", "=", "np", ".", "array", "(", "[", "[", "urn", "[", "i", "]", "for", "i", "in", "np", ".", "random", ".", "permutation", "(", "len", "(", "urn", ")", ")", "[", ":", "n", "]", "]", "for", "j", "in", "range", "(", "size", ")", "]", ")", "r", "=", "[", "[", "np", ".", "sum", "(", "draw", "[", "j", "]", "==", "i", ")", "for", "i", "in", "range", "(", "len", "(", "m", ")", ")", "]", "for", "j", "in", "range", "(", "size", ")", "]", "else", ":", "draw", "=", "np", ".", "array", "(", "[", "urn", "[", "i", "]", "for", "i", "in", "np", ".", "random", ".", "permutation", "(", "len", "(", "urn", ")", ")", "[", ":", "n", "]", "]", ")", "r", "=", "[", "np", ".", "sum", "(", "draw", "==", "i", ")", "for", "i", "in", "range", "(", "len", "(", "m", ")", ")", "]", "return", "np", ".", "asarray", "(", "r", ")" ]
Random multivariate hypergeometric variates. Parameters: - `n` : Number of draws. - `m` : Number of items in each categoy.
[ "Random", "multivariate", "hypergeometric", "variates", "." ]
python
train
28.130435
lebinh/aq
aq/engines.py
https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/engines.py#L151-L167
def convert_tags_to_dict(item): """ Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs to a dict of {<key>: <value>} for easier querying. This returns a proxied object over given item to return a different tags format as the tags attribute is read-only and we cannot modify it directly. """ if hasattr(item, 'tags'): tags = item.tags if isinstance(tags, list): tags_dict = {} for kv_dict in tags: if isinstance(kv_dict, dict) and 'Key' in kv_dict and 'Value' in kv_dict: tags_dict[kv_dict['Key']] = kv_dict['Value'] return ObjectProxy(item, tags=tags_dict) return item
[ "def", "convert_tags_to_dict", "(", "item", ")", ":", "if", "hasattr", "(", "item", ",", "'tags'", ")", ":", "tags", "=", "item", ".", "tags", "if", "isinstance", "(", "tags", ",", "list", ")", ":", "tags_dict", "=", "{", "}", "for", "kv_dict", "in", "tags", ":", "if", "isinstance", "(", "kv_dict", ",", "dict", ")", "and", "'Key'", "in", "kv_dict", "and", "'Value'", "in", "kv_dict", ":", "tags_dict", "[", "kv_dict", "[", "'Key'", "]", "]", "=", "kv_dict", "[", "'Value'", "]", "return", "ObjectProxy", "(", "item", ",", "tags", "=", "tags_dict", ")", "return", "item" ]
Convert AWS inconvenient tags model of a list of {"Key": <key>, "Value": <value>} pairs to a dict of {<key>: <value>} for easier querying. This returns a proxied object over given item to return a different tags format as the tags attribute is read-only and we cannot modify it directly.
[ "Convert", "AWS", "inconvenient", "tags", "model", "of", "a", "list", "of", "{", "Key", ":", "<key", ">", "Value", ":", "<value", ">", "}", "pairs", "to", "a", "dict", "of", "{", "<key", ">", ":", "<value", ">", "}", "for", "easier", "querying", "." ]
python
train
41.764706
partofthething/ace
ace/smoother.py
https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L54-L57
def add_data_point_xy(self, x, y): """Add a new data point to the data set to be smoothed.""" self.x.append(x) self.y.append(y)
[ "def", "add_data_point_xy", "(", "self", ",", "x", ",", "y", ")", ":", "self", ".", "x", ".", "append", "(", "x", ")", "self", ".", "y", ".", "append", "(", "y", ")" ]
Add a new data point to the data set to be smoothed.
[ "Add", "a", "new", "data", "point", "to", "the", "data", "set", "to", "be", "smoothed", "." ]
python
train
37
tensorflow/datasets
tensorflow_datasets/core/lazy_imports.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/lazy_imports.py#L27-L36
def _try_import(module_name): """Try importing a module, with an informative error message on failure.""" try: mod = importlib.import_module(module_name) return mod except ImportError: err_msg = ("Tried importing %s but failed. See setup.py extras_require. " "The dataset you are trying to use may have additional " "dependencies.") utils.reraise(err_msg)
[ "def", "_try_import", "(", "module_name", ")", ":", "try", ":", "mod", "=", "importlib", ".", "import_module", "(", "module_name", ")", "return", "mod", "except", "ImportError", ":", "err_msg", "=", "(", "\"Tried importing %s but failed. See setup.py extras_require. \"", "\"The dataset you are trying to use may have additional \"", "\"dependencies.\"", ")", "utils", ".", "reraise", "(", "err_msg", ")" ]
Try importing a module, with an informative error message on failure.
[ "Try", "importing", "a", "module", "with", "an", "informative", "error", "message", "on", "failure", "." ]
python
train
39.8
pkgw/pwkit
pwkit/synphot.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/synphot.py#L887-L903
def _load_data(self, band): """In-flight effective areas for the Swift UVOT, as obtained from the CALDB. See Breeveld+ 2011. XXX: confirm that these are equal-energy, not quantum-efficiency. """ d = bandpass_data_fits('sw' + self._band_map[band] + '_20041120v106.arf')[1].data # note: # data.WAVE_MIN[i] < data.WAVE_MIN[i+1], but # data.WAVE_MIN[i] > data.WAVE_MAX[i] (!) # data.WAVE_MIN[i] = data.WAVE_MAX[i+1] (!) wmid = 0.5 * (d.WAVE_MIN + d.WAVE_MAX) # in Ångström df = pd.DataFrame({'wlen': wmid, 'resp': d.SPECRESP, 'wlo': d.WAVE_MAX, 'whi': d.WAVE_MIN}) return df
[ "def", "_load_data", "(", "self", ",", "band", ")", ":", "d", "=", "bandpass_data_fits", "(", "'sw'", "+", "self", ".", "_band_map", "[", "band", "]", "+", "'_20041120v106.arf'", ")", "[", "1", "]", ".", "data", "# note:", "# data.WAVE_MIN[i] < data.WAVE_MIN[i+1], but", "# data.WAVE_MIN[i] > data.WAVE_MAX[i] (!)", "# data.WAVE_MIN[i] = data.WAVE_MAX[i+1] (!)", "wmid", "=", "0.5", "*", "(", "d", ".", "WAVE_MIN", "+", "d", ".", "WAVE_MAX", ")", "# in Ångström", "df", "=", "pd", ".", "DataFrame", "(", "{", "'wlen'", ":", "wmid", ",", "'resp'", ":", "d", ".", "SPECRESP", ",", "'wlo'", ":", "d", ".", "WAVE_MAX", ",", "'whi'", ":", "d", ".", "WAVE_MIN", "}", ")", "return", "df" ]
In-flight effective areas for the Swift UVOT, as obtained from the CALDB. See Breeveld+ 2011. XXX: confirm that these are equal-energy, not quantum-efficiency.
[ "In", "-", "flight", "effective", "areas", "for", "the", "Swift", "UVOT", "as", "obtained", "from", "the", "CALDB", ".", "See", "Breeveld", "+", "2011", ".", "XXX", ":", "confirm", "that", "these", "are", "equal", "-", "energy", "not", "quantum", "-", "efficiency", "." ]
python
train
40.352941
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L4581-L4600
def vdm_b(vdm, lat): """ Converts a virtual dipole moment (VDM) or a virtual axial dipole moment (VADM; input in units of Am^2) to a local magnetic field value (output in units of tesla) Parameters ---------- vdm : V(A)DM in units of Am^2 lat: latitude of site in degrees Returns ------- B: local magnetic field strength in tesla """ rad = old_div(np.pi, 180.) # changed radius of the earth from 3.367e6 3/12/2010 fact = ((6.371e6)**3) * 1e7 colat = (90. - lat) * rad return vdm * (np.sqrt(1 + 3 * (np.cos(colat)**2))) / fact
[ "def", "vdm_b", "(", "vdm", ",", "lat", ")", ":", "rad", "=", "old_div", "(", "np", ".", "pi", ",", "180.", ")", "# changed radius of the earth from 3.367e6 3/12/2010", "fact", "=", "(", "(", "6.371e6", ")", "**", "3", ")", "*", "1e7", "colat", "=", "(", "90.", "-", "lat", ")", "*", "rad", "return", "vdm", "*", "(", "np", ".", "sqrt", "(", "1", "+", "3", "*", "(", "np", ".", "cos", "(", "colat", ")", "**", "2", ")", ")", ")", "/", "fact" ]
Converts a virtual dipole moment (VDM) or a virtual axial dipole moment (VADM; input in units of Am^2) to a local magnetic field value (output in units of tesla) Parameters ---------- vdm : V(A)DM in units of Am^2 lat: latitude of site in degrees Returns ------- B: local magnetic field strength in tesla
[ "Converts", "a", "virtual", "dipole", "moment", "(", "VDM", ")", "or", "a", "virtual", "axial", "dipole", "moment", "(", "VADM", ";", "input", "in", "units", "of", "Am^2", ")", "to", "a", "local", "magnetic", "field", "value", "(", "output", "in", "units", "of", "tesla", ")" ]
python
train
28.8
dead-beef/markovchain
markovchain/image/traversal.py
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/image/traversal.py#L228-L269
def _rspiral(width, height): """Reversed spiral generator. Parameters ---------- width : `int` Spiral width. height : `int` Spiral height. Returns ------- `generator` of (`int`, `int`) Points. """ x0 = 0 y0 = 0 x1 = width - 1 y1 = height - 1 while x0 < x1 and y0 < y1: for x in range(x0, x1): yield x, y0 for y in range(y0, y1): yield x1, y for x in range(x1, x0, -1): yield x, y1 for y in range(y1, y0, -1): yield x0, y x0 += 1 y0 += 1 x1 -= 1 y1 -= 1 if x0 == x1: for y in range(y0, y1 + 1): yield x0, y elif y0 == y1: for x in range(x0, x1 + 1): yield x, y0
[ "def", "_rspiral", "(", "width", ",", "height", ")", ":", "x0", "=", "0", "y0", "=", "0", "x1", "=", "width", "-", "1", "y1", "=", "height", "-", "1", "while", "x0", "<", "x1", "and", "y0", "<", "y1", ":", "for", "x", "in", "range", "(", "x0", ",", "x1", ")", ":", "yield", "x", ",", "y0", "for", "y", "in", "range", "(", "y0", ",", "y1", ")", ":", "yield", "x1", ",", "y", "for", "x", "in", "range", "(", "x1", ",", "x0", ",", "-", "1", ")", ":", "yield", "x", ",", "y1", "for", "y", "in", "range", "(", "y1", ",", "y0", ",", "-", "1", ")", ":", "yield", "x0", ",", "y", "x0", "+=", "1", "y0", "+=", "1", "x1", "-=", "1", "y1", "-=", "1", "if", "x0", "==", "x1", ":", "for", "y", "in", "range", "(", "y0", ",", "y1", "+", "1", ")", ":", "yield", "x0", ",", "y", "elif", "y0", "==", "y1", ":", "for", "x", "in", "range", "(", "x0", ",", "x1", "+", "1", ")", ":", "yield", "x", ",", "y0" ]
Reversed spiral generator. Parameters ---------- width : `int` Spiral width. height : `int` Spiral height. Returns ------- `generator` of (`int`, `int`) Points.
[ "Reversed", "spiral", "generator", "." ]
python
train
21.547619
rigetti/quantumflow
quantumflow/qubits.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/qubits.py#L248-L277
def outer_product(vec0: QubitVector, vec1: QubitVector) -> QubitVector: """Direct product of qubit vectors The tensor ranks must match and qubits must be disjoint. """ R = vec0.rank R1 = vec1.rank N0 = vec0.qubit_nb N1 = vec1.qubit_nb if R != R1: raise ValueError('Incompatibly vectors. Rank must match') if not set(vec0.qubits).isdisjoint(vec1.qubits): raise ValueError('Overlapping qubits') qubits: Qubits = tuple(vec0.qubits) + tuple(vec1.qubits) tensor = bk.outer(vec0.tensor, vec1.tensor) # Interleave (super)-operator axes # R = 1 perm = (0, 1) # R = 2 perm = (0, 2, 1, 3) # R = 4 perm = (0, 4, 1, 5, 2, 6, 3, 7) tensor = bk.reshape(tensor, ([2**N0] * R) + ([2**N1] * R)) perm = [idx for ij in zip(range(0, R), range(R, 2*R)) for idx in ij] tensor = bk.transpose(tensor, perm) return QubitVector(tensor, qubits)
[ "def", "outer_product", "(", "vec0", ":", "QubitVector", ",", "vec1", ":", "QubitVector", ")", "->", "QubitVector", ":", "R", "=", "vec0", ".", "rank", "R1", "=", "vec1", ".", "rank", "N0", "=", "vec0", ".", "qubit_nb", "N1", "=", "vec1", ".", "qubit_nb", "if", "R", "!=", "R1", ":", "raise", "ValueError", "(", "'Incompatibly vectors. Rank must match'", ")", "if", "not", "set", "(", "vec0", ".", "qubits", ")", ".", "isdisjoint", "(", "vec1", ".", "qubits", ")", ":", "raise", "ValueError", "(", "'Overlapping qubits'", ")", "qubits", ":", "Qubits", "=", "tuple", "(", "vec0", ".", "qubits", ")", "+", "tuple", "(", "vec1", ".", "qubits", ")", "tensor", "=", "bk", ".", "outer", "(", "vec0", ".", "tensor", ",", "vec1", ".", "tensor", ")", "# Interleave (super)-operator axes", "# R = 1 perm = (0, 1)", "# R = 2 perm = (0, 2, 1, 3)", "# R = 4 perm = (0, 4, 1, 5, 2, 6, 3, 7)", "tensor", "=", "bk", ".", "reshape", "(", "tensor", ",", "(", "[", "2", "**", "N0", "]", "*", "R", ")", "+", "(", "[", "2", "**", "N1", "]", "*", "R", ")", ")", "perm", "=", "[", "idx", "for", "ij", "in", "zip", "(", "range", "(", "0", ",", "R", ")", ",", "range", "(", "R", ",", "2", "*", "R", ")", ")", "for", "idx", "in", "ij", "]", "tensor", "=", "bk", ".", "transpose", "(", "tensor", ",", "perm", ")", "return", "QubitVector", "(", "tensor", ",", "qubits", ")" ]
Direct product of qubit vectors The tensor ranks must match and qubits must be disjoint.
[ "Direct", "product", "of", "qubit", "vectors" ]
python
train
29.7
tanghaibao/goatools
goatools/grouper/grprplt.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/grprplt.py#L77-L103
def _get_kws_plt(self, usrgos, **kws_usr): """Add go2color and go2bordercolor relevant to this grouping into plot.""" kws_plt = kws_usr.copy() kws_dag = {} hdrgo = kws_plt.get('hdrgo', None) objcolor = GrouperColors(self.grprobj) # GO term colors if 'go2color' not in kws_usr: kws_plt['go2color'] = objcolor.get_go2color_users() elif hdrgo is not None: go2color = kws_plt.get('go2color').copy() go2color[hdrgo] = PltGroupedGosArgs.hdrgo_dflt_color kws_plt['go2color'] = go2color # GO term border colors if 'go2bordercolor' not in kws_usr: kws_plt['go2bordercolor'] = objcolor.get_bordercolor() prune = kws_usr.get('prune', None) if prune is True and hdrgo is not None: kws_dag['dst_srcs_list'] = [(hdrgo, usrgos), (None, set([hdrgo]))] kws_plt['parentcnt'] = True elif prune: kws_dag['dst_srcs_list'] = prune kws_plt['parentcnt'] = True # Group text kws_plt['go2txt'] = self.get_go2txt(self.grprobj, kws_plt.get('go2color'), kws_plt.get('go2bordercolor')) return kws_plt, kws_dag
[ "def", "_get_kws_plt", "(", "self", ",", "usrgos", ",", "*", "*", "kws_usr", ")", ":", "kws_plt", "=", "kws_usr", ".", "copy", "(", ")", "kws_dag", "=", "{", "}", "hdrgo", "=", "kws_plt", ".", "get", "(", "'hdrgo'", ",", "None", ")", "objcolor", "=", "GrouperColors", "(", "self", ".", "grprobj", ")", "# GO term colors", "if", "'go2color'", "not", "in", "kws_usr", ":", "kws_plt", "[", "'go2color'", "]", "=", "objcolor", ".", "get_go2color_users", "(", ")", "elif", "hdrgo", "is", "not", "None", ":", "go2color", "=", "kws_plt", ".", "get", "(", "'go2color'", ")", ".", "copy", "(", ")", "go2color", "[", "hdrgo", "]", "=", "PltGroupedGosArgs", ".", "hdrgo_dflt_color", "kws_plt", "[", "'go2color'", "]", "=", "go2color", "# GO term border colors", "if", "'go2bordercolor'", "not", "in", "kws_usr", ":", "kws_plt", "[", "'go2bordercolor'", "]", "=", "objcolor", ".", "get_bordercolor", "(", ")", "prune", "=", "kws_usr", ".", "get", "(", "'prune'", ",", "None", ")", "if", "prune", "is", "True", "and", "hdrgo", "is", "not", "None", ":", "kws_dag", "[", "'dst_srcs_list'", "]", "=", "[", "(", "hdrgo", ",", "usrgos", ")", ",", "(", "None", ",", "set", "(", "[", "hdrgo", "]", ")", ")", "]", "kws_plt", "[", "'parentcnt'", "]", "=", "True", "elif", "prune", ":", "kws_dag", "[", "'dst_srcs_list'", "]", "=", "prune", "kws_plt", "[", "'parentcnt'", "]", "=", "True", "# Group text", "kws_plt", "[", "'go2txt'", "]", "=", "self", ".", "get_go2txt", "(", "self", ".", "grprobj", ",", "kws_plt", ".", "get", "(", "'go2color'", ")", ",", "kws_plt", ".", "get", "(", "'go2bordercolor'", ")", ")", "return", "kws_plt", ",", "kws_dag" ]
Add go2color and go2bordercolor relevant to this grouping into plot.
[ "Add", "go2color", "and", "go2bordercolor", "relevant", "to", "this", "grouping", "into", "plot", "." ]
python
train
45.666667
neithere/argh
argh/io.py
https://github.com/neithere/argh/blob/dcd3253f2994400a6a58a700c118c53765bc50a4/argh/io.py#L32-L47
def safe_input(prompt): """ Prompts user for input. Correctly handles prompt message encoding. """ if sys.version_info < (3,0): if isinstance(prompt, compat.text_type): # Python 2.x: unicode → bytes encoding = locale.getpreferredencoding() or 'utf-8' prompt = prompt.encode(encoding) else: if not isinstance(prompt, compat.text_type): # Python 3.x: bytes → unicode prompt = prompt.decode() return _input(prompt)
[ "def", "safe_input", "(", "prompt", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "if", "isinstance", "(", "prompt", ",", "compat", ".", "text_type", ")", ":", "# Python 2.x: unicode → bytes", "encoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "or", "'utf-8'", "prompt", "=", "prompt", ".", "encode", "(", "encoding", ")", "else", ":", "if", "not", "isinstance", "(", "prompt", ",", "compat", ".", "text_type", ")", ":", "# Python 3.x: bytes → unicode", "prompt", "=", "prompt", ".", "decode", "(", ")", "return", "_input", "(", "prompt", ")" ]
Prompts user for input. Correctly handles prompt message encoding.
[ "Prompts", "user", "for", "input", ".", "Correctly", "handles", "prompt", "message", "encoding", "." ]
python
test
31.25