repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
dbrattli/OSlash
oslash/observable.py
https://github.com/dbrattli/OSlash/blob/ffdc714c5d454f7519f740254de89f70850929eb/oslash/observable.py#L47-L53
def bind(self, fn: Callable[[Any], 'Observable']) -> 'Observable': r"""Chain continuation passing functions. Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c """ source = self return Observable(lambda on_next: source.subscribe(lambda a: fn(a).subscribe(on_next)))
[ "def", "bind", "(", "self", ",", "fn", ":", "Callable", "[", "[", "Any", "]", ",", "'Observable'", "]", ")", "->", "'Observable'", ":", "source", "=", "self", "return", "Observable", "(", "lambda", "on_next", ":", "source", ".", "subscribe", "(", "lambda", "a", ":", "fn", "(", "a", ")", ".", "subscribe", "(", "on_next", ")", ")", ")" ]
r"""Chain continuation passing functions. Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
[ "r", "Chain", "continuation", "passing", "functions", "." ]
python
train
45
astropy/photutils
photutils/segmentation/properties.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L1061-L1078
def eccentricity(self): """ The eccentricity of the 2D Gaussian function that has the same second-order moments as the source. The eccentricity is the fraction of the distance along the semimajor axis at which the focus lies. .. math:: e = \\sqrt{1 - \\frac{b^2}{a^2}} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively. """ l1, l2 = self.covariance_eigvals if l1 == 0: return 0. # pragma: no cover return np.sqrt(1. - (l2 / l1))
[ "def", "eccentricity", "(", "self", ")", ":", "l1", ",", "l2", "=", "self", ".", "covariance_eigvals", "if", "l1", "==", "0", ":", "return", "0.", "# pragma: no cover", "return", "np", ".", "sqrt", "(", "1.", "-", "(", "l2", "/", "l1", ")", ")" ]
The eccentricity of the 2D Gaussian function that has the same second-order moments as the source. The eccentricity is the fraction of the distance along the semimajor axis at which the focus lies. .. math:: e = \\sqrt{1 - \\frac{b^2}{a^2}} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively.
[ "The", "eccentricity", "of", "the", "2D", "Gaussian", "function", "that", "has", "the", "same", "second", "-", "order", "moments", "as", "the", "source", "." ]
python
train
31.666667
the01/python-floscraper
floscraper/webscraper.py
https://github.com/the01/python-floscraper/blob/d578cd3d6381070d9a07dade1e10387ae33e9a65/floscraper/webscraper.py#L642-L655
def shrink(self, shrink): """ Remove unnecessary parts :param shrink: Object to shringk :type shrink: dict | list :return: Shrunk object :rtype: dict | list """ if isinstance(shrink, list): return self._shrink_list(shrink) if isinstance(shrink, dict): return self._shrink_dict(shrink) return shrink
[ "def", "shrink", "(", "self", ",", "shrink", ")", ":", "if", "isinstance", "(", "shrink", ",", "list", ")", ":", "return", "self", ".", "_shrink_list", "(", "shrink", ")", "if", "isinstance", "(", "shrink", ",", "dict", ")", ":", "return", "self", ".", "_shrink_dict", "(", "shrink", ")", "return", "shrink" ]
Remove unnecessary parts :param shrink: Object to shringk :type shrink: dict | list :return: Shrunk object :rtype: dict | list
[ "Remove", "unnecessary", "parts" ]
python
train
27.857143
googledatalab/pydatalab
google/datalab/ml/_job.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_job.py#L119-L151
def submit_batch_prediction(job_request, job_id=None): """Submit a batch prediction job. Args: job_request: the arguments of the training job in a dict. For example, { 'version_name': 'projects/my-project/models/my-model/versions/my-version', 'data_format': 'TEXT', 'input_paths': ['gs://my_bucket/my_file.csv'], 'output_path': 'gs://my_bucket/predict_output', 'region': 'us-central1', 'max_worker_count': 1, } job_id: id for the training job. If None, an id based on timestamp will be generated. Returns: A Job object representing the batch prediction job. """ if job_id is None: job_id = 'prediction_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S') job = { 'job_id': job_id, 'prediction_input': job_request, } context = datalab.Context.default() cloudml = discovery.build('ml', 'v1', credentials=context.credentials) request = cloudml.projects().jobs().create(body=job, parent='projects/' + context.project_id) request.headers['user-agent'] = 'GoogleCloudDataLab/1.0' request.execute() return Job(job_id)
[ "def", "submit_batch_prediction", "(", "job_request", ",", "job_id", "=", "None", ")", ":", "if", "job_id", "is", "None", ":", "job_id", "=", "'prediction_'", "+", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%y%m%d_%H%M%S'", ")", "job", "=", "{", "'job_id'", ":", "job_id", ",", "'prediction_input'", ":", "job_request", ",", "}", "context", "=", "datalab", ".", "Context", ".", "default", "(", ")", "cloudml", "=", "discovery", ".", "build", "(", "'ml'", ",", "'v1'", ",", "credentials", "=", "context", ".", "credentials", ")", "request", "=", "cloudml", ".", "projects", "(", ")", ".", "jobs", "(", ")", ".", "create", "(", "body", "=", "job", ",", "parent", "=", "'projects/'", "+", "context", ".", "project_id", ")", "request", ".", "headers", "[", "'user-agent'", "]", "=", "'GoogleCloudDataLab/1.0'", "request", ".", "execute", "(", ")", "return", "Job", "(", "job_id", ")" ]
Submit a batch prediction job. Args: job_request: the arguments of the training job in a dict. For example, { 'version_name': 'projects/my-project/models/my-model/versions/my-version', 'data_format': 'TEXT', 'input_paths': ['gs://my_bucket/my_file.csv'], 'output_path': 'gs://my_bucket/predict_output', 'region': 'us-central1', 'max_worker_count': 1, } job_id: id for the training job. If None, an id based on timestamp will be generated. Returns: A Job object representing the batch prediction job.
[ "Submit", "a", "batch", "prediction", "job", "." ]
python
train
36.787879
chemlab/chemlab
chemlab/mviewer/api/selections.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/mviewer/api/selections.py#L54-L69
def select_connected_bonds(): '''Select the bonds connected to the currently selected atoms.''' s = current_system() start, end = s.bonds.transpose() selected = np.zeros(s.n_bonds, 'bool') for i in selected_atoms(): selected |= (i == start) | (i == end) csel = current_selection() bsel = csel['bonds'].add( Selection(selected.nonzero()[0], s.n_bonds)) ret = csel.copy() ret['bonds'] = bsel return select_selection(ret)
[ "def", "select_connected_bonds", "(", ")", ":", "s", "=", "current_system", "(", ")", "start", ",", "end", "=", "s", ".", "bonds", ".", "transpose", "(", ")", "selected", "=", "np", ".", "zeros", "(", "s", ".", "n_bonds", ",", "'bool'", ")", "for", "i", "in", "selected_atoms", "(", ")", ":", "selected", "|=", "(", "i", "==", "start", ")", "|", "(", "i", "==", "end", ")", "csel", "=", "current_selection", "(", ")", "bsel", "=", "csel", "[", "'bonds'", "]", ".", "add", "(", "Selection", "(", "selected", ".", "nonzero", "(", ")", "[", "0", "]", ",", "s", ".", "n_bonds", ")", ")", "ret", "=", "csel", ".", "copy", "(", ")", "ret", "[", "'bonds'", "]", "=", "bsel", "return", "select_selection", "(", "ret", ")" ]
Select the bonds connected to the currently selected atoms.
[ "Select", "the", "bonds", "connected", "to", "the", "currently", "selected", "atoms", "." ]
python
train
29.125
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5943-L5953
def setActionManifestPath(self, pchActionManifestPath): """ Sets the path to the action manifest JSON file that is used by this application. If this information was set on the Steam partner site, calls to this function are ignored. If the Steam partner site setting and the path provided by this call are different, VRInputError_MismatchedActionManifest is returned. This call must be made before the first call to UpdateActionState or IVRSystem::PollNextEvent. """ fn = self.function_table.setActionManifestPath result = fn(pchActionManifestPath) return result
[ "def", "setActionManifestPath", "(", "self", ",", "pchActionManifestPath", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setActionManifestPath", "result", "=", "fn", "(", "pchActionManifestPath", ")", "return", "result" ]
Sets the path to the action manifest JSON file that is used by this application. If this information was set on the Steam partner site, calls to this function are ignored. If the Steam partner site setting and the path provided by this call are different, VRInputError_MismatchedActionManifest is returned. This call must be made before the first call to UpdateActionState or IVRSystem::PollNextEvent.
[ "Sets", "the", "path", "to", "the", "action", "manifest", "JSON", "file", "that", "is", "used", "by", "this", "application", ".", "If", "this", "information", "was", "set", "on", "the", "Steam", "partner", "site", "calls", "to", "this", "function", "are", "ignored", ".", "If", "the", "Steam", "partner", "site", "setting", "and", "the", "path", "provided", "by", "this", "call", "are", "different", "VRInputError_MismatchedActionManifest", "is", "returned", ".", "This", "call", "must", "be", "made", "before", "the", "first", "call", "to", "UpdateActionState", "or", "IVRSystem", "::", "PollNextEvent", "." ]
python
train
56.818182
yunojuno/elasticsearch-django
elasticsearch_django/settings.py
https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/settings.py#L29-L34
def get_setting(key, *default): """Return specific search setting from Django conf.""" if default: return get_settings().get(key, default[0]) else: return get_settings()[key]
[ "def", "get_setting", "(", "key", ",", "*", "default", ")", ":", "if", "default", ":", "return", "get_settings", "(", ")", ".", "get", "(", "key", ",", "default", "[", "0", "]", ")", "else", ":", "return", "get_settings", "(", ")", "[", "key", "]" ]
Return specific search setting from Django conf.
[ "Return", "specific", "search", "setting", "from", "Django", "conf", "." ]
python
train
32.833333
XuShaohua/bcloud
bcloud/gutil.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/gutil.py#L329-L340
def tree_model_natsort(model, row1, row2, user_data=None): '''用natural sorting算法对TreeModel的一个column进行排序''' sort_column, sort_type = model.get_sort_column_id() value1 = model.get_value(row1, sort_column) value2 = model.get_value(row2, sort_column) sort_list1 = util.natsort(value1) sort_list2 = util.natsort(value2) status = sort_list1 < sort_list2 if sort_list1 < sort_list2: return -1 else: return 1
[ "def", "tree_model_natsort", "(", "model", ",", "row1", ",", "row2", ",", "user_data", "=", "None", ")", ":", "sort_column", ",", "sort_type", "=", "model", ".", "get_sort_column_id", "(", ")", "value1", "=", "model", ".", "get_value", "(", "row1", ",", "sort_column", ")", "value2", "=", "model", ".", "get_value", "(", "row2", ",", "sort_column", ")", "sort_list1", "=", "util", ".", "natsort", "(", "value1", ")", "sort_list2", "=", "util", ".", "natsort", "(", "value2", ")", "status", "=", "sort_list1", "<", "sort_list2", "if", "sort_list1", "<", "sort_list2", ":", "return", "-", "1", "else", ":", "return", "1" ]
用natural sorting算法对TreeModel的一个column进行排序
[ "用natural", "sorting算法对TreeModel的一个column进行排序" ]
python
train
36.75
dougalsutherland/skl-groups
skl_groups/kernels/transform.py
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L259-L290
def fit(self, X, y=None): ''' Learn the linear transformation to clipped eigenvalues. Note that if min_eig isn't zero and any of the original eigenvalues were exactly zero, this will leave those eigenvalues as zero. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals.reshape(-1, 1) if self.min_eig == 0: inner = vals > self.min_eig else: with np.errstate(divide='ignore'): inner = np.where(vals >= self.min_eig, 1, np.where(vals == 0, 0, self.min_eig / vals)) self.clip_ = np.dot(vecs, inner * vecs.T) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "n", "=", "X", ".", "shape", "[", "0", "]", "if", "X", ".", "shape", "!=", "(", "n", ",", "n", ")", ":", "raise", "TypeError", "(", "\"Input must be a square matrix.\"", ")", "# TODO: only get negative eigs somehow?", "memory", "=", "get_memory", "(", "self", ".", "memory", ")", "vals", ",", "vecs", "=", "memory", ".", "cache", "(", "scipy", ".", "linalg", ".", "eigh", ",", "ignore", "=", "[", "'overwrite_a'", "]", ")", "(", "X", ",", "overwrite_a", "=", "not", "self", ".", "copy", ")", "vals", "=", "vals", ".", "reshape", "(", "-", "1", ",", "1", ")", "if", "self", ".", "min_eig", "==", "0", ":", "inner", "=", "vals", ">", "self", ".", "min_eig", "else", ":", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ")", ":", "inner", "=", "np", ".", "where", "(", "vals", ">=", "self", ".", "min_eig", ",", "1", ",", "np", ".", "where", "(", "vals", "==", "0", ",", "0", ",", "self", ".", "min_eig", "/", "vals", ")", ")", "self", ".", "clip_", "=", "np", ".", "dot", "(", "vecs", ",", "inner", "*", "vecs", ".", "T", ")", "return", "self" ]
Learn the linear transformation to clipped eigenvalues. Note that if min_eig isn't zero and any of the original eigenvalues were exactly zero, this will leave those eigenvalues as zero. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part.
[ "Learn", "the", "linear", "transformation", "to", "clipped", "eigenvalues", "." ]
python
valid
36.1875
saltstack/salt
salt/utils/openstack/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L869-L875
def keypair_delete(self, name): ''' Delete a keypair ''' nt_ks = self.compute_conn nt_ks.keypairs.delete(name) return 'Keypair deleted: {0}'.format(name)
[ "def", "keypair_delete", "(", "self", ",", "name", ")", ":", "nt_ks", "=", "self", ".", "compute_conn", "nt_ks", ".", "keypairs", ".", "delete", "(", "name", ")", "return", "'Keypair deleted: {0}'", ".", "format", "(", "name", ")" ]
Delete a keypair
[ "Delete", "a", "keypair" ]
python
train
27.857143
TrafficSenseMSD/SumoTools
traci/_vehicle.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_vehicle.py#L896-L932
def setEffort(self, vehID, edgeID, effort=None, begTime=None, endTime=None): """setEffort(string, string, double, int, int) -> None Inserts the information about the effort of edge "edgeID" valid from begin time to end time into the vehicle's internal edge weights container. If the time is not specified, any previously set values for that edge are removed. If begTime or endTime are not specified the value is set for the whole simulation duration. """ if type(edgeID) != str and type(begTime) == str: # legacy handling warnings.warn("Parameter order has changed for setEffort(). Attempting legacy ordering. Please update your code.", stacklevel=2) return self.setEffort(vehID, begTime, endTime, edgeID, effort) if effort is None: # reset self._connection._beginMessage(tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_EDGE_EFFORT, vehID, 1 + 4 + 1 + 4 + len(edgeID)) self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 1) self._connection._packString(edgeID) self._connection._sendExact() elif begTime is None: # set value for the whole simulation self._connection._beginMessage(tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_EDGE_EFFORT, vehID, 1 + 4 + 1 + 4 + len(edgeID) + 1 + 8) self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 2) self._connection._packString(edgeID) self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, effort) self._connection._sendExact() else: self._connection._beginMessage(tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_EDGE_EFFORT, vehID, 1 + 4 + 1 + 4 + 1 + 4 + 1 + 4 + len(edgeID) + 1 + 8) self._connection._string += struct.pack("!BiBiBi", tc.TYPE_COMPOUND, 4, tc.TYPE_INTEGER, begTime, tc.TYPE_INTEGER, endTime) self._connection._packString(edgeID) self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, effort) self._connection._sendExact()
[ "def", "setEffort", "(", "self", ",", "vehID", ",", "edgeID", ",", "effort", "=", "None", ",", "begTime", "=", "None", ",", "endTime", "=", "None", ")", ":", "if", "type", "(", "edgeID", ")", "!=", "str", "and", "type", "(", "begTime", ")", "==", "str", ":", "# legacy handling", "warnings", ".", "warn", "(", "\"Parameter order has changed for setEffort(). Attempting legacy ordering. Please update your code.\"", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "setEffort", "(", "vehID", ",", "begTime", ",", "endTime", ",", "edgeID", ",", "effort", ")", "if", "effort", "is", "None", ":", "# reset", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_EDGE_EFFORT", ",", "vehID", ",", "1", "+", "4", "+", "1", "+", "4", "+", "len", "(", "edgeID", ")", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bi\"", ",", "tc", ".", "TYPE_COMPOUND", ",", "1", ")", "self", ".", "_connection", ".", "_packString", "(", "edgeID", ")", "self", ".", "_connection", ".", "_sendExact", "(", ")", "elif", "begTime", "is", "None", ":", "# set value for the whole simulation", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_EDGE_EFFORT", ",", "vehID", ",", "1", "+", "4", "+", "1", "+", "4", "+", "len", "(", "edgeID", ")", "+", "1", "+", "8", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bi\"", ",", "tc", ".", "TYPE_COMPOUND", ",", "2", ")", "self", ".", "_connection", ".", "_packString", "(", "edgeID", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bd\"", ",", "tc", ".", "TYPE_DOUBLE", ",", "effort", ")", "self", ".", "_connection", ".", "_sendExact", "(", ")", "else", ":", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_VEHICLE_VARIABLE", ",", "tc", ".", "VAR_EDGE_EFFORT", ",", "vehID", ",", "1", "+", "4", "+", "1", "+", "4", "+", "1", "+", "4", "+", "1", "+", "4", "+", "len", "(", "edgeID", ")", "+", "1", "+", "8", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!BiBiBi\"", ",", "tc", ".", "TYPE_COMPOUND", ",", "4", ",", "tc", ".", "TYPE_INTEGER", ",", "begTime", ",", "tc", ".", "TYPE_INTEGER", ",", "endTime", ")", "self", ".", "_connection", ".", "_packString", "(", "edgeID", ")", "self", ".", "_connection", ".", "_string", "+=", "struct", ".", "pack", "(", "\"!Bd\"", ",", "tc", ".", "TYPE_DOUBLE", ",", "effort", ")", "self", ".", "_connection", ".", "_sendExact", "(", ")" ]
setEffort(string, string, double, int, int) -> None Inserts the information about the effort of edge "edgeID" valid from begin time to end time into the vehicle's internal edge weights container. If the time is not specified, any previously set values for that edge are removed. If begTime or endTime are not specified the value is set for the whole simulation duration.
[ "setEffort", "(", "string", "string", "double", "int", "int", ")", "-", ">", "None", "Inserts", "the", "information", "about", "the", "effort", "of", "edge", "edgeID", "valid", "from", "begin", "time", "to", "end", "time", "into", "the", "vehicle", "s", "internal", "edge", "weights", "container", ".", "If", "the", "time", "is", "not", "specified", "any", "previously", "set", "values", "for", "that", "edge", "are", "removed", ".", "If", "begTime", "or", "endTime", "are", "not", "specified", "the", "value", "is", "set", "for", "the", "whole", "simulation", "duration", "." ]
python
train
61.243243
note35/sinon
sinon/lib/base.py
https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/base.py#L207-L227
def wrap2stub(self, customfunc): """ Wrapping the inspector as a stub based on the type Args: customfunc: function that replaces the original Returns: function, the spy wrapper around the customfunc """ if self.args_type == "MODULE_FUNCTION": wrapper = Wrapper.wrap_spy(customfunc, self.obj) setattr(self.obj, self.prop, wrapper) elif self.args_type == "MODULE": wrapper = Wrapper.EmptyClass setattr(CPSCOPE, self.obj.__name__, wrapper) elif self.args_type == "FUNCTION": wrapper = Wrapper.wrap_spy(customfunc) setattr(CPSCOPE, self.obj.__name__, wrapper) elif self.args_type == "PURE": wrapper = Wrapper.wrap_spy(customfunc) setattr(self.pure, "func", wrapper) return wrapper
[ "def", "wrap2stub", "(", "self", ",", "customfunc", ")", ":", "if", "self", ".", "args_type", "==", "\"MODULE_FUNCTION\"", ":", "wrapper", "=", "Wrapper", ".", "wrap_spy", "(", "customfunc", ",", "self", ".", "obj", ")", "setattr", "(", "self", ".", "obj", ",", "self", ".", "prop", ",", "wrapper", ")", "elif", "self", ".", "args_type", "==", "\"MODULE\"", ":", "wrapper", "=", "Wrapper", ".", "EmptyClass", "setattr", "(", "CPSCOPE", ",", "self", ".", "obj", ".", "__name__", ",", "wrapper", ")", "elif", "self", ".", "args_type", "==", "\"FUNCTION\"", ":", "wrapper", "=", "Wrapper", ".", "wrap_spy", "(", "customfunc", ")", "setattr", "(", "CPSCOPE", ",", "self", ".", "obj", ".", "__name__", ",", "wrapper", ")", "elif", "self", ".", "args_type", "==", "\"PURE\"", ":", "wrapper", "=", "Wrapper", ".", "wrap_spy", "(", "customfunc", ")", "setattr", "(", "self", ".", "pure", ",", "\"func\"", ",", "wrapper", ")", "return", "wrapper" ]
Wrapping the inspector as a stub based on the type Args: customfunc: function that replaces the original Returns: function, the spy wrapper around the customfunc
[ "Wrapping", "the", "inspector", "as", "a", "stub", "based", "on", "the", "type", "Args", ":", "customfunc", ":", "function", "that", "replaces", "the", "original", "Returns", ":", "function", "the", "spy", "wrapper", "around", "the", "customfunc" ]
python
train
40.761905
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAARP/QARisk.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAARP/QARisk.py#L252-L266
def max_dropback(self): """最大回撤 """ return round( float( max( [ (self.assets.iloc[idx] - self.assets.iloc[idx::].min()) / self.assets.iloc[idx] for idx in range(len(self.assets)) ] ) ), 2 )
[ "def", "max_dropback", "(", "self", ")", ":", "return", "round", "(", "float", "(", "max", "(", "[", "(", "self", ".", "assets", ".", "iloc", "[", "idx", "]", "-", "self", ".", "assets", ".", "iloc", "[", "idx", ":", ":", "]", ".", "min", "(", ")", ")", "/", "self", ".", "assets", ".", "iloc", "[", "idx", "]", "for", "idx", "in", "range", "(", "len", "(", "self", ".", "assets", ")", ")", "]", ")", ")", ",", "2", ")" ]
最大回撤
[ "最大回撤" ]
python
train
25.8
saltstack/salt
salt/states/tuned.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/tuned.py#L18-L86
def profile(name): ''' This state module allows you to modify system tuned parameters Example tuned.sls file to set profile to virtual-guest tuned: tuned: - profile - name: virtual-guest name tuned profile name to set the system to To see a valid list of states call execution module: :py:func:`tuned.list <salt.modules.tuned.list_>` ''' # create data-structure to return with default value ret = {'name': '', 'changes': {}, 'result': False, 'comment': ''} ret[name] = name profile = name # get the current state of tuned-adm current_state = __salt__['tuned.active']() valid_profiles = __salt__['tuned.list']() # check valid profiles, and return error if profile name is not valid if profile not in valid_profiles: raise salt.exceptions.SaltInvocationError('Invalid Profile Name') # if current state is same as requested state, return without doing much if profile in current_state: ret['result'] = True ret['comment'] = 'System already in the correct state' return ret # test mode if __opts__['test'] is True: ret['comment'] = 'The state of "{0}" will be changed.'.format( current_state) ret['changes'] = { 'old': current_state, 'new': 'Profile will be set to {0}'.format(profile), } # return None when testing ret['result'] = None return ret # we come to this stage if current state is different that requested state # we there have to set the new state request new_state = __salt__['tuned.profile'](profile) # create the comment data structure ret['comment'] = 'The state of "{0}" was changed!'.format(profile) # fill in the ret data structure ret['changes'] = { 'old': current_state, 'new': new_state, } ret['result'] = True # return with the dictionary data structure return ret
[ "def", "profile", "(", "name", ")", ":", "# create data-structure to return with default value", "ret", "=", "{", "'name'", ":", "''", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "ret", "[", "name", "]", "=", "name", "profile", "=", "name", "# get the current state of tuned-adm", "current_state", "=", "__salt__", "[", "'tuned.active'", "]", "(", ")", "valid_profiles", "=", "__salt__", "[", "'tuned.list'", "]", "(", ")", "# check valid profiles, and return error if profile name is not valid", "if", "profile", "not", "in", "valid_profiles", ":", "raise", "salt", ".", "exceptions", ".", "SaltInvocationError", "(", "'Invalid Profile Name'", ")", "# if current state is same as requested state, return without doing much", "if", "profile", "in", "current_state", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'System already in the correct state'", "return", "ret", "# test mode", "if", "__opts__", "[", "'test'", "]", "is", "True", ":", "ret", "[", "'comment'", "]", "=", "'The state of \"{0}\" will be changed.'", ".", "format", "(", "current_state", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "current_state", ",", "'new'", ":", "'Profile will be set to {0}'", ".", "format", "(", "profile", ")", ",", "}", "# return None when testing", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "# we come to this stage if current state is different that requested state", "# we there have to set the new state request", "new_state", "=", "__salt__", "[", "'tuned.profile'", "]", "(", "profile", ")", "# create the comment data structure", "ret", "[", "'comment'", "]", "=", "'The state of \"{0}\" was changed!'", ".", "format", "(", "profile", ")", "# fill in the ret data structure", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "current_state", ",", "'new'", ":", "new_state", ",", "}", "ret", "[", "'result'", "]", "=", "True", "# return with the dictionary data structure", "return", "ret" ]
This state module allows you to modify system tuned parameters Example tuned.sls file to set profile to virtual-guest tuned: tuned: - profile - name: virtual-guest name tuned profile name to set the system to To see a valid list of states call execution module: :py:func:`tuned.list <salt.modules.tuned.list_>`
[ "This", "state", "module", "allows", "you", "to", "modify", "system", "tuned", "parameters" ]
python
train
27.913043
bspaans/python-mingus
mingus/midi/pyfluidsynth.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/pyfluidsynth.py#L41-L48
def cfunc(name, result, *args): """Build and apply a ctypes prototype complete with parameter flags.""" atypes = [] aflags = [] for arg in args: atypes.append(arg[1]) aflags.append((arg[2], arg[0]) + arg[3:]) return CFUNCTYPE(result, *atypes)((name, _fl), tuple(aflags))
[ "def", "cfunc", "(", "name", ",", "result", ",", "*", "args", ")", ":", "atypes", "=", "[", "]", "aflags", "=", "[", "]", "for", "arg", "in", "args", ":", "atypes", ".", "append", "(", "arg", "[", "1", "]", ")", "aflags", ".", "append", "(", "(", "arg", "[", "2", "]", ",", "arg", "[", "0", "]", ")", "+", "arg", "[", "3", ":", "]", ")", "return", "CFUNCTYPE", "(", "result", ",", "*", "atypes", ")", "(", "(", "name", ",", "_fl", ")", ",", "tuple", "(", "aflags", ")", ")" ]
Build and apply a ctypes prototype complete with parameter flags.
[ "Build", "and", "apply", "a", "ctypes", "prototype", "complete", "with", "parameter", "flags", "." ]
python
train
37.375
slundberg/shap
shap/benchmark/metrics.py
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L225-L232
def keep_absolute_resample__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Keep Absolute (resample) xlabel = "Max fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 12 """ return __run_measure(measures.keep_resample, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.roc_auc_score)
[ "def", "keep_absolute_resample__roc_auc", "(", "X", ",", "y", ",", "model_generator", ",", "method_name", ",", "num_fcounts", "=", "11", ")", ":", "return", "__run_measure", "(", "measures", ".", "keep_resample", ",", "X", ",", "y", ",", "model_generator", ",", "method_name", ",", "0", ",", "num_fcounts", ",", "sklearn", ".", "metrics", ".", "roc_auc_score", ")" ]
Keep Absolute (resample) xlabel = "Max fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 12
[ "Keep", "Absolute", "(", "resample", ")", "xlabel", "=", "Max", "fraction", "of", "features", "kept", "ylabel", "=", "ROC", "AUC", "transform", "=", "identity", "sort_order", "=", "12" ]
python
train
46.125
saltstack/salt
salt/modules/pagerduty_util.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pagerduty_util.py#L106-L121
def _list_items(action, key, profile=None, subdomain=None, api_key=None): ''' List items belonging to an API call. This method should be in utils.pagerduty. ''' items = _query( profile=profile, subdomain=subdomain, api_key=api_key, action=action ) ret = {} for item in items[action]: ret[item[key]] = item return ret
[ "def", "_list_items", "(", "action", ",", "key", ",", "profile", "=", "None", ",", "subdomain", "=", "None", ",", "api_key", "=", "None", ")", ":", "items", "=", "_query", "(", "profile", "=", "profile", ",", "subdomain", "=", "subdomain", ",", "api_key", "=", "api_key", ",", "action", "=", "action", ")", "ret", "=", "{", "}", "for", "item", "in", "items", "[", "action", "]", ":", "ret", "[", "item", "[", "key", "]", "]", "=", "item", "return", "ret" ]
List items belonging to an API call. This method should be in utils.pagerduty.
[ "List", "items", "belonging", "to", "an", "API", "call", "." ]
python
train
23.625
theodoregoetz/wernher
wernher/colorline.py
https://github.com/theodoregoetz/wernher/blob/ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e/wernher/colorline.py#L31-L40
def make_segments(x, y): """ Create list of line segments from x and y coordinates, in the correct format for LineCollection: an array of the form numlines x (points per line) x 2 (x and y) array """ points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments
[ "def", "make_segments", "(", "x", ",", "y", ")", ":", "points", "=", "np", ".", "array", "(", "[", "x", ",", "y", "]", ")", ".", "T", ".", "reshape", "(", "-", "1", ",", "1", ",", "2", ")", "segments", "=", "np", ".", "concatenate", "(", "[", "points", "[", ":", "-", "1", "]", ",", "points", "[", "1", ":", "]", "]", ",", "axis", "=", "1", ")", "return", "segments" ]
Create list of line segments from x and y coordinates, in the correct format for LineCollection: an array of the form numlines x (points per line) x 2 (x and y) array
[ "Create", "list", "of", "line", "segments", "from", "x", "and", "y", "coordinates", "in", "the", "correct", "format", "for", "LineCollection", ":", "an", "array", "of", "the", "form", "numlines", "x", "(", "points", "per", "line", ")", "x", "2", "(", "x", "and", "y", ")", "array" ]
python
train
34.6
draperunner/fjlc
fjlc/utils/json_utils.py
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/utils/json_utils.py#L18-L27
def to_json_file(file, data, pretty): """ Writes object instance in JSON formatted String to file :param file: File to write JSON string ot :param data: Object to convert to JSON :param pretty: Use pretty formatting or not """ json_string = to_json(data, pretty) file_utils.write_to_file(file, json_string)
[ "def", "to_json_file", "(", "file", ",", "data", ",", "pretty", ")", ":", "json_string", "=", "to_json", "(", "data", ",", "pretty", ")", "file_utils", ".", "write_to_file", "(", "file", ",", "json_string", ")" ]
Writes object instance in JSON formatted String to file :param file: File to write JSON string ot :param data: Object to convert to JSON :param pretty: Use pretty formatting or not
[ "Writes", "object", "instance", "in", "JSON", "formatted", "String", "to", "file", ":", "param", "file", ":", "File", "to", "write", "JSON", "string", "ot", ":", "param", "data", ":", "Object", "to", "convert", "to", "JSON", ":", "param", "pretty", ":", "Use", "pretty", "formatting", "or", "not" ]
python
train
33.5
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L954-L958
def clause_annotations(self): """The list of clause annotations in ``words`` layer.""" if not self.is_tagged(CLAUSE_ANNOTATION): self.tag_clause_annotations() return [word.get(CLAUSE_ANNOTATION, None) for word in self[WORDS]]
[ "def", "clause_annotations", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "CLAUSE_ANNOTATION", ")", ":", "self", ".", "tag_clause_annotations", "(", ")", "return", "[", "word", ".", "get", "(", "CLAUSE_ANNOTATION", ",", "None", ")", "for", "word", "in", "self", "[", "WORDS", "]", "]" ]
The list of clause annotations in ``words`` layer.
[ "The", "list", "of", "clause", "annotations", "in", "words", "layer", "." ]
python
train
51.4
uwdata/draco
draco/helper.py
https://github.com/uwdata/draco/blob/b130b5ebdb369e18e046706c73dc9c29b8159f7f/draco/helper.py#L37-L56
def read_data_to_asp(file: str) -> List[str]: """ Reads the given JSON file and generates the ASP definition. Args: file: the json data file Returns: the asp definition. """ if file.endswith(".json"): with open(file) as f: data = json.load(f) return schema2asp(data2schema(data)) elif file.endswith(".csv"): df = pd.read_csv(file) df = df.where((pd.notnull(df)), None) data = list(df.T.to_dict().values()) schema = data2schema(data) asp = schema2asp(schema) return asp else: raise Exception("invalid file type")
[ "def", "read_data_to_asp", "(", "file", ":", "str", ")", "->", "List", "[", "str", "]", ":", "if", "file", ".", "endswith", "(", "\".json\"", ")", ":", "with", "open", "(", "file", ")", "as", "f", ":", "data", "=", "json", ".", "load", "(", "f", ")", "return", "schema2asp", "(", "data2schema", "(", "data", ")", ")", "elif", "file", ".", "endswith", "(", "\".csv\"", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "file", ")", "df", "=", "df", ".", "where", "(", "(", "pd", ".", "notnull", "(", "df", ")", ")", ",", "None", ")", "data", "=", "list", "(", "df", ".", "T", ".", "to_dict", "(", ")", ".", "values", "(", ")", ")", "schema", "=", "data2schema", "(", "data", ")", "asp", "=", "schema2asp", "(", "schema", ")", "return", "asp", "else", ":", "raise", "Exception", "(", "\"invalid file type\"", ")" ]
Reads the given JSON file and generates the ASP definition. Args: file: the json data file Returns: the asp definition.
[ "Reads", "the", "given", "JSON", "file", "and", "generates", "the", "ASP", "definition", ".", "Args", ":", "file", ":", "the", "json", "data", "file", "Returns", ":", "the", "asp", "definition", "." ]
python
train
32
kkroening/ffmpeg-python
examples/tensorflow_stream.py
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/examples/tensorflow_stream.py#L199-L213
def _calc_grad_tiled(self, img, t_grad, tile_size=512): '''Compute the value of tensor t_grad over the image in a tiled way. Random shifts are applied to the image to blur tile boundaries over multiple iterations.''' sz = tile_size h, w = img.shape[:2] sx, sy = np.random.randint(sz, size=2) img_shift = np.roll(np.roll(img, sx, 1), sy, 0) grad = np.zeros_like(img) for y in range(0, max(h-sz//2, sz),sz): for x in range(0, max(w-sz//2, sz),sz): sub = img_shift[y:y+sz,x:x+sz] g = self._session.run(t_grad, {self._t_input:sub}) grad[y:y+sz,x:x+sz] = g return np.roll(np.roll(grad, -sx, 1), -sy, 0)
[ "def", "_calc_grad_tiled", "(", "self", ",", "img", ",", "t_grad", ",", "tile_size", "=", "512", ")", ":", "sz", "=", "tile_size", "h", ",", "w", "=", "img", ".", "shape", "[", ":", "2", "]", "sx", ",", "sy", "=", "np", ".", "random", ".", "randint", "(", "sz", ",", "size", "=", "2", ")", "img_shift", "=", "np", ".", "roll", "(", "np", ".", "roll", "(", "img", ",", "sx", ",", "1", ")", ",", "sy", ",", "0", ")", "grad", "=", "np", ".", "zeros_like", "(", "img", ")", "for", "y", "in", "range", "(", "0", ",", "max", "(", "h", "-", "sz", "//", "2", ",", "sz", ")", ",", "sz", ")", ":", "for", "x", "in", "range", "(", "0", ",", "max", "(", "w", "-", "sz", "//", "2", ",", "sz", ")", ",", "sz", ")", ":", "sub", "=", "img_shift", "[", "y", ":", "y", "+", "sz", ",", "x", ":", "x", "+", "sz", "]", "g", "=", "self", ".", "_session", ".", "run", "(", "t_grad", ",", "{", "self", ".", "_t_input", ":", "sub", "}", ")", "grad", "[", "y", ":", "y", "+", "sz", ",", "x", ":", "x", "+", "sz", "]", "=", "g", "return", "np", ".", "roll", "(", "np", ".", "roll", "(", "grad", ",", "-", "sx", ",", "1", ")", ",", "-", "sy", ",", "0", ")" ]
Compute the value of tensor t_grad over the image in a tiled way. Random shifts are applied to the image to blur tile boundaries over multiple iterations.
[ "Compute", "the", "value", "of", "tensor", "t_grad", "over", "the", "image", "in", "a", "tiled", "way", ".", "Random", "shifts", "are", "applied", "to", "the", "image", "to", "blur", "tile", "boundaries", "over", "multiple", "iterations", "." ]
python
train
48.266667
eandersson/amqpstorm
amqpstorm/connection.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/connection.py#L145-L170
def channel(self, rpc_timeout=60, lazy=False): """Open Channel. :param int rpc_timeout: Timeout before we give up waiting for an RPC response from the server. :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. """ LOGGER.debug('Opening a new Channel') if not compatibility.is_integer(rpc_timeout): raise AMQPInvalidArgument('rpc_timeout should be an integer') elif self.is_closed: raise AMQPConnectionError('socket/connection closed') with self.lock: channel_id = self._get_next_available_channel_id() channel = Channel(channel_id, self, rpc_timeout, on_close_impl=self._cleanup_channel) self._channels[channel_id] = channel if not lazy: channel.open() LOGGER.debug('Channel #%d Opened', channel_id) return self._channels[channel_id]
[ "def", "channel", "(", "self", ",", "rpc_timeout", "=", "60", ",", "lazy", "=", "False", ")", ":", "LOGGER", ".", "debug", "(", "'Opening a new Channel'", ")", "if", "not", "compatibility", ".", "is_integer", "(", "rpc_timeout", ")", ":", "raise", "AMQPInvalidArgument", "(", "'rpc_timeout should be an integer'", ")", "elif", "self", ".", "is_closed", ":", "raise", "AMQPConnectionError", "(", "'socket/connection closed'", ")", "with", "self", ".", "lock", ":", "channel_id", "=", "self", ".", "_get_next_available_channel_id", "(", ")", "channel", "=", "Channel", "(", "channel_id", ",", "self", ",", "rpc_timeout", ",", "on_close_impl", "=", "self", ".", "_cleanup_channel", ")", "self", ".", "_channels", "[", "channel_id", "]", "=", "channel", "if", "not", "lazy", ":", "channel", ".", "open", "(", ")", "LOGGER", ".", "debug", "(", "'Channel #%d Opened'", ",", "channel_id", ")", "return", "self", ".", "_channels", "[", "channel_id", "]" ]
Open Channel. :param int rpc_timeout: Timeout before we give up waiting for an RPC response from the server. :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error.
[ "Open", "Channel", "." ]
python
train
43.730769
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/notification/notification_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/notification/notification_client.py#L186-L197
def query_subscriptions(self, subscription_query): """QuerySubscriptions. [Preview API] Query for subscriptions. A subscription is returned if it matches one or more of the specified conditions. :param :class:`<SubscriptionQuery> <azure.devops.v5_0.notification.models.SubscriptionQuery>` subscription_query: :rtype: [NotificationSubscription] """ content = self._serialize.body(subscription_query, 'SubscriptionQuery') response = self._send(http_method='POST', location_id='6864db85-08c0-4006-8e8e-cc1bebe31675', version='5.0-preview.1', content=content) return self._deserialize('[NotificationSubscription]', self._unwrap_collection(response))
[ "def", "query_subscriptions", "(", "self", ",", "subscription_query", ")", ":", "content", "=", "self", ".", "_serialize", ".", "body", "(", "subscription_query", ",", "'SubscriptionQuery'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'6864db85-08c0-4006-8e8e-cc1bebe31675'", ",", "version", "=", "'5.0-preview.1'", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'[NotificationSubscription]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
QuerySubscriptions. [Preview API] Query for subscriptions. A subscription is returned if it matches one or more of the specified conditions. :param :class:`<SubscriptionQuery> <azure.devops.v5_0.notification.models.SubscriptionQuery>` subscription_query: :rtype: [NotificationSubscription]
[ "QuerySubscriptions", ".", "[", "Preview", "API", "]", "Query", "for", "subscriptions", ".", "A", "subscription", "is", "returned", "if", "it", "matches", "one", "or", "more", "of", "the", "specified", "conditions", ".", ":", "param", ":", "class", ":", "<SubscriptionQuery", ">", "<azure", ".", "devops", ".", "v5_0", ".", "notification", ".", "models", ".", "SubscriptionQuery", ">", "subscription_query", ":", ":", "rtype", ":", "[", "NotificationSubscription", "]" ]
python
train
65.666667
fake-name/ChromeController
ChromeController/manager.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/manager.py#L352-L378
def get_page_url_title(self): ''' Get the title and current url from the remote session. Return is a 2-tuple: (page_title, page_url). ''' cr_tab_id = self.transport._get_cr_tab_meta_for_key(self.tab_id)['id'] targets = self.Target_getTargets() assert 'result' in targets assert 'targetInfos' in targets['result'] for tgt in targets['result']['targetInfos']: if tgt['targetId'] == cr_tab_id: # { # 'title': 'Page Title 1', # 'targetId': '9d2c503c-e39e-42cc-b950-96db073918ee', # 'attached': True, # 'url': 'http://localhost:47181/with_title_1', # 'type': 'page' # } title = tgt['title'] cur_url = tgt['url'] return title, cur_url
[ "def", "get_page_url_title", "(", "self", ")", ":", "cr_tab_id", "=", "self", ".", "transport", ".", "_get_cr_tab_meta_for_key", "(", "self", ".", "tab_id", ")", "[", "'id'", "]", "targets", "=", "self", ".", "Target_getTargets", "(", ")", "assert", "'result'", "in", "targets", "assert", "'targetInfos'", "in", "targets", "[", "'result'", "]", "for", "tgt", "in", "targets", "[", "'result'", "]", "[", "'targetInfos'", "]", ":", "if", "tgt", "[", "'targetId'", "]", "==", "cr_tab_id", ":", "# {", "# \t'title': 'Page Title 1',", "# \t'targetId': '9d2c503c-e39e-42cc-b950-96db073918ee',", "# \t'attached': True,", "# \t'url': 'http://localhost:47181/with_title_1',", "# \t'type': 'page'", "# }", "title", "=", "tgt", "[", "'title'", "]", "cur_url", "=", "tgt", "[", "'url'", "]", "return", "title", ",", "cur_url" ]
Get the title and current url from the remote session. Return is a 2-tuple: (page_title, page_url).
[ "Get", "the", "title", "and", "current", "url", "from", "the", "remote", "session", "." ]
python
train
25.037037
croach/django-simple-rest
simple_rest/utils/serializers.py
https://github.com/croach/django-simple-rest/blob/5f5904969d170ef3803a9fb735f814ef76f79427/simple_rest/utils/serializers.py#L53-L90
def to_html(data): """ Serializes a python object as HTML This method uses the to_json method to turn the given data object into formatted JSON that is displayed in an HTML page. If pygments in installed, syntax highlighting will also be applied to the JSON. """ base_html_template = Template(''' <html> <head> {% if style %} <style type="text/css"> {{ style }} </style> {% endif %} </head> <body> {% if style %} {{ body|safe }} {% else %} <pre></code>{{ body }}</code></pre> {% endif %} </body> </html> ''') code = to_json(data, indent=4) if PYGMENTS_INSTALLED: c = Context({ 'body': highlight(code, JSONLexer(), HtmlFormatter()), 'style': HtmlFormatter().get_style_defs('.highlight') }) html = base_html_template.render(c) else: c = Context({'body': code}) html = base_html_template.render(c) return html
[ "def", "to_html", "(", "data", ")", ":", "base_html_template", "=", "Template", "(", "'''\n <html>\n <head>\n {% if style %}\n <style type=\"text/css\">\n {{ style }}\n </style>\n {% endif %}\n </head>\n <body>\n {% if style %}\n {{ body|safe }}\n {% else %}\n <pre></code>{{ body }}</code></pre>\n {% endif %}\n </body>\n </html>\n '''", ")", "code", "=", "to_json", "(", "data", ",", "indent", "=", "4", ")", "if", "PYGMENTS_INSTALLED", ":", "c", "=", "Context", "(", "{", "'body'", ":", "highlight", "(", "code", ",", "JSONLexer", "(", ")", ",", "HtmlFormatter", "(", ")", ")", ",", "'style'", ":", "HtmlFormatter", "(", ")", ".", "get_style_defs", "(", "'.highlight'", ")", "}", ")", "html", "=", "base_html_template", ".", "render", "(", "c", ")", "else", ":", "c", "=", "Context", "(", "{", "'body'", ":", "code", "}", ")", "html", "=", "base_html_template", ".", "render", "(", "c", ")", "return", "html" ]
Serializes a python object as HTML This method uses the to_json method to turn the given data object into formatted JSON that is displayed in an HTML page. If pygments in installed, syntax highlighting will also be applied to the JSON.
[ "Serializes", "a", "python", "object", "as", "HTML" ]
python
train
29.552632
CalebBell/thermo
thermo/thermal_conductivity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/thermal_conductivity.py#L2091-L2141
def calculate(self, T, method): r'''Method to calculate low-pressure gas thermal conductivity at tempearture `T` with a given method. This method has no exception handling; see `T_dependent_property` for that. Parameters ---------- T : float Temperature of the gas, [K] method : str Name of the method to use Returns ------- kg : float Thermal conductivity of the gas at T and a low pressure, [W/m/K] ''' if method == GHARAGHEIZI_G: kg = Gharagheizi_gas(T, self.MW, self.Tb, self.Pc, self.omega) elif method == DIPPR_9B: Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm mug = self.mug(T) if hasattr(self.mug, '__call__') else self.mug kg = DIPPR9B(T, self.MW, Cvgm, mug, self.Tc) elif method == CHUNG: Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm mug = self.mug(T) if hasattr(self.mug, '__call__') else self.mug kg = Chung(T, self.MW, self.Tc, self.omega, Cvgm, mug) elif method == ELI_HANLEY: Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm kg = eli_hanley(T, self.MW, self.Tc, self.Vc, self.Zc, self.omega, Cvgm) elif method == EUCKEN_MOD: Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm mug = self.mug(T) if hasattr(self.mug, '__call__') else self.mug kg = Eucken_modified(self.MW, Cvgm, mug) elif method == EUCKEN: Cvgm = self.Cvgm(T) if hasattr(self.Cvgm, '__call__') else self.Cvgm mug = self.mug(T) if hasattr(self.mug, '__call__') else self.mug kg = Eucken(self.MW, Cvgm, mug) elif method == DIPPR_PERRY_8E: kg = EQ102(T, *self.Perrys2_314_coeffs) elif method == VDI_PPDS: kg = horner(self.VDI_PPDS_coeffs, T) elif method == BAHADORI_G: kg = Bahadori_gas(T, self.MW) elif method == COOLPROP: kg = CoolProp_T_dependent_property(T, self.CASRN, 'L', 'g') elif method in self.tabular_data: kg = self.interpolate(T, method) return kg
[ "def", "calculate", "(", "self", ",", "T", ",", "method", ")", ":", "if", "method", "==", "GHARAGHEIZI_G", ":", "kg", "=", "Gharagheizi_gas", "(", "T", ",", "self", ".", "MW", ",", "self", ".", "Tb", ",", "self", ".", "Pc", ",", "self", ".", "omega", ")", "elif", "method", "==", "DIPPR_9B", ":", "Cvgm", "=", "self", ".", "Cvgm", "(", "T", ")", "if", "hasattr", "(", "self", ".", "Cvgm", ",", "'__call__'", ")", "else", "self", ".", "Cvgm", "mug", "=", "self", ".", "mug", "(", "T", ")", "if", "hasattr", "(", "self", ".", "mug", ",", "'__call__'", ")", "else", "self", ".", "mug", "kg", "=", "DIPPR9B", "(", "T", ",", "self", ".", "MW", ",", "Cvgm", ",", "mug", ",", "self", ".", "Tc", ")", "elif", "method", "==", "CHUNG", ":", "Cvgm", "=", "self", ".", "Cvgm", "(", "T", ")", "if", "hasattr", "(", "self", ".", "Cvgm", ",", "'__call__'", ")", "else", "self", ".", "Cvgm", "mug", "=", "self", ".", "mug", "(", "T", ")", "if", "hasattr", "(", "self", ".", "mug", ",", "'__call__'", ")", "else", "self", ".", "mug", "kg", "=", "Chung", "(", "T", ",", "self", ".", "MW", ",", "self", ".", "Tc", ",", "self", ".", "omega", ",", "Cvgm", ",", "mug", ")", "elif", "method", "==", "ELI_HANLEY", ":", "Cvgm", "=", "self", ".", "Cvgm", "(", "T", ")", "if", "hasattr", "(", "self", ".", "Cvgm", ",", "'__call__'", ")", "else", "self", ".", "Cvgm", "kg", "=", "eli_hanley", "(", "T", ",", "self", ".", "MW", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "Zc", ",", "self", ".", "omega", ",", "Cvgm", ")", "elif", "method", "==", "EUCKEN_MOD", ":", "Cvgm", "=", "self", ".", "Cvgm", "(", "T", ")", "if", "hasattr", "(", "self", ".", "Cvgm", ",", "'__call__'", ")", "else", "self", ".", "Cvgm", "mug", "=", "self", ".", "mug", "(", "T", ")", "if", "hasattr", "(", "self", ".", "mug", ",", "'__call__'", ")", "else", "self", ".", "mug", "kg", "=", "Eucken_modified", "(", "self", ".", "MW", ",", "Cvgm", ",", "mug", ")", "elif", "method", "==", "EUCKEN", ":", "Cvgm", "=", "self", ".", "Cvgm", "(", "T", ")", "if", "hasattr", "(", "self", ".", "Cvgm", ",", "'__call__'", ")", "else", "self", ".", "Cvgm", "mug", "=", "self", ".", "mug", "(", "T", ")", "if", "hasattr", "(", "self", ".", "mug", ",", "'__call__'", ")", "else", "self", ".", "mug", "kg", "=", "Eucken", "(", "self", ".", "MW", ",", "Cvgm", ",", "mug", ")", "elif", "method", "==", "DIPPR_PERRY_8E", ":", "kg", "=", "EQ102", "(", "T", ",", "*", "self", ".", "Perrys2_314_coeffs", ")", "elif", "method", "==", "VDI_PPDS", ":", "kg", "=", "horner", "(", "self", ".", "VDI_PPDS_coeffs", ",", "T", ")", "elif", "method", "==", "BAHADORI_G", ":", "kg", "=", "Bahadori_gas", "(", "T", ",", "self", ".", "MW", ")", "elif", "method", "==", "COOLPROP", ":", "kg", "=", "CoolProp_T_dependent_property", "(", "T", ",", "self", ".", "CASRN", ",", "'L'", ",", "'g'", ")", "elif", "method", "in", "self", ".", "tabular_data", ":", "kg", "=", "self", ".", "interpolate", "(", "T", ",", "method", ")", "return", "kg" ]
r'''Method to calculate low-pressure gas thermal conductivity at tempearture `T` with a given method. This method has no exception handling; see `T_dependent_property` for that. Parameters ---------- T : float Temperature of the gas, [K] method : str Name of the method to use Returns ------- kg : float Thermal conductivity of the gas at T and a low pressure, [W/m/K]
[ "r", "Method", "to", "calculate", "low", "-", "pressure", "gas", "thermal", "conductivity", "at", "tempearture", "T", "with", "a", "given", "method", "." ]
python
valid
43.980392
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/job.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/job.py#L981-L994
def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationEncryptionConfiguration """ prop = self._get_sub_prop("destinationEncryptionConfiguration") if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop
[ "def", "destination_encryption_configuration", "(", "self", ")", ":", "prop", "=", "self", ".", "_get_sub_prop", "(", "\"destinationEncryptionConfiguration\"", ")", "if", "prop", "is", "not", "None", ":", "prop", "=", "EncryptionConfiguration", ".", "from_api_repr", "(", "prop", ")", "return", "prop" ]
google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationEncryptionConfiguration
[ "google", ".", "cloud", ".", "bigquery", ".", "table", ".", "EncryptionConfiguration", ":", "Custom", "encryption", "configuration", "for", "the", "destination", "table", "." ]
python
train
43.928571
dcos/shakedown
shakedown/dcos/security.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/security.py#L117-L135
def remove_user_permission(rid, uid, action='full'): """ Removes user permission on a given resource. :param uid: user id :type uid: str :param rid: resource ID :type rid: str :param action: read, write, update, delete or full :type action: str """ rid = rid.replace('/', '%252F') try: acl_url = urljoin(_acl_url(), 'acls/{}/users/{}/{}'.format(rid, uid, action)) r = http.delete(acl_url) assert r.status_code == 204 except DCOSHTTPException as e: if e.response.status_code != 400: raise
[ "def", "remove_user_permission", "(", "rid", ",", "uid", ",", "action", "=", "'full'", ")", ":", "rid", "=", "rid", ".", "replace", "(", "'/'", ",", "'%252F'", ")", "try", ":", "acl_url", "=", "urljoin", "(", "_acl_url", "(", ")", ",", "'acls/{}/users/{}/{}'", ".", "format", "(", "rid", ",", "uid", ",", "action", ")", ")", "r", "=", "http", ".", "delete", "(", "acl_url", ")", "assert", "r", ".", "status_code", "==", "204", "except", "DCOSHTTPException", "as", "e", ":", "if", "e", ".", "response", ".", "status_code", "!=", "400", ":", "raise" ]
Removes user permission on a given resource. :param uid: user id :type uid: str :param rid: resource ID :type rid: str :param action: read, write, update, delete or full :type action: str
[ "Removes", "user", "permission", "on", "a", "given", "resource", "." ]
python
train
30.684211
saltstack/salt
salt/modules/shadow.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/shadow.py#L53-L95
def info(name, root=None): ''' Return information for the specified user name User to get the information for root Directory to chroot into CLI Example: .. code-block:: bash salt '*' shadow.info root ''' if root is not None: getspnam = functools.partial(_getspnam, root=root) else: getspnam = functools.partial(spwd.getspnam) try: data = getspnam(name) ret = { 'name': data.sp_namp if hasattr(data, 'sp_namp') else data.sp_nam, 'passwd': data.sp_pwdp if hasattr(data, 'sp_pwdp') else data.sp_pwd, 'lstchg': data.sp_lstchg, 'min': data.sp_min, 'max': data.sp_max, 'warn': data.sp_warn, 'inact': data.sp_inact, 'expire': data.sp_expire} except KeyError: return { 'name': '', 'passwd': '', 'lstchg': '', 'min': '', 'max': '', 'warn': '', 'inact': '', 'expire': ''} return ret
[ "def", "info", "(", "name", ",", "root", "=", "None", ")", ":", "if", "root", "is", "not", "None", ":", "getspnam", "=", "functools", ".", "partial", "(", "_getspnam", ",", "root", "=", "root", ")", "else", ":", "getspnam", "=", "functools", ".", "partial", "(", "spwd", ".", "getspnam", ")", "try", ":", "data", "=", "getspnam", "(", "name", ")", "ret", "=", "{", "'name'", ":", "data", ".", "sp_namp", "if", "hasattr", "(", "data", ",", "'sp_namp'", ")", "else", "data", ".", "sp_nam", ",", "'passwd'", ":", "data", ".", "sp_pwdp", "if", "hasattr", "(", "data", ",", "'sp_pwdp'", ")", "else", "data", ".", "sp_pwd", ",", "'lstchg'", ":", "data", ".", "sp_lstchg", ",", "'min'", ":", "data", ".", "sp_min", ",", "'max'", ":", "data", ".", "sp_max", ",", "'warn'", ":", "data", ".", "sp_warn", ",", "'inact'", ":", "data", ".", "sp_inact", ",", "'expire'", ":", "data", ".", "sp_expire", "}", "except", "KeyError", ":", "return", "{", "'name'", ":", "''", ",", "'passwd'", ":", "''", ",", "'lstchg'", ":", "''", ",", "'min'", ":", "''", ",", "'max'", ":", "''", ",", "'warn'", ":", "''", ",", "'inact'", ":", "''", ",", "'expire'", ":", "''", "}", "return", "ret" ]
Return information for the specified user name User to get the information for root Directory to chroot into CLI Example: .. code-block:: bash salt '*' shadow.info root
[ "Return", "information", "for", "the", "specified", "user" ]
python
train
24.186047
gwpy/gwpy
gwpy/timeseries/timeseries.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L175-L257
def average_fft(self, fftlength=None, overlap=0, window=None): """Compute the averaged one-dimensional DFT of this `TimeSeries`. This method computes a number of FFTs of duration ``fftlength`` and ``overlap`` (both given in seconds), and returns the mean average. This method is analogous to the Welch average method for power spectra. Parameters ---------- fftlength : `float` number of seconds in single FFT, default, use whole `TimeSeries` overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : complex-valued `~gwpy.frequencyseries.FrequencySeries` the transformed output, with populated frequencies array metadata See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used. """ from gwpy.spectrogram import Spectrogram # format lengths if fftlength is None: fftlength = self.duration if isinstance(fftlength, units.Quantity): fftlength = fftlength.value nfft = int((fftlength * self.sample_rate).decompose().value) noverlap = int((overlap * self.sample_rate).decompose().value) navg = divmod(self.size-noverlap, (nfft-noverlap))[0] # format window if window is None: window = 'boxcar' if isinstance(window, (str, tuple)): win = signal.get_window(window, nfft) else: win = numpy.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') elif win.shape[0] != nfft: raise ValueError('Window is the wrong size.') win = win.astype(self.dtype) scaling = 1. / numpy.absolute(win).mean() if nfft % 2: nfreqs = (nfft + 1) // 2 else: nfreqs = nfft // 2 + 1 ffts = Spectrogram(numpy.zeros((navg, nfreqs), dtype=numpy.complex), channel=self.channel, epoch=self.epoch, f0=0, df=1 / fftlength, dt=1, copy=True) # stride through TimeSeries, recording FFTs as columns of Spectrogram idx = 0 for i in range(navg): # find step TimeSeries idx_end = idx + nfft if idx_end > self.size: continue stepseries = self[idx:idx_end].detrend() * win # calculated FFT, weight, and stack fft_ = stepseries.fft(nfft=nfft) * scaling ffts.value[i, :] = fft_.value idx += (nfft - noverlap) mean = ffts.mean(0) mean.name = self.name mean.epoch = self.epoch mean.channel = self.channel return mean
[ "def", "average_fft", "(", "self", ",", "fftlength", "=", "None", ",", "overlap", "=", "0", ",", "window", "=", "None", ")", ":", "from", "gwpy", ".", "spectrogram", "import", "Spectrogram", "# format lengths", "if", "fftlength", "is", "None", ":", "fftlength", "=", "self", ".", "duration", "if", "isinstance", "(", "fftlength", ",", "units", ".", "Quantity", ")", ":", "fftlength", "=", "fftlength", ".", "value", "nfft", "=", "int", "(", "(", "fftlength", "*", "self", ".", "sample_rate", ")", ".", "decompose", "(", ")", ".", "value", ")", "noverlap", "=", "int", "(", "(", "overlap", "*", "self", ".", "sample_rate", ")", ".", "decompose", "(", ")", ".", "value", ")", "navg", "=", "divmod", "(", "self", ".", "size", "-", "noverlap", ",", "(", "nfft", "-", "noverlap", ")", ")", "[", "0", "]", "# format window", "if", "window", "is", "None", ":", "window", "=", "'boxcar'", "if", "isinstance", "(", "window", ",", "(", "str", ",", "tuple", ")", ")", ":", "win", "=", "signal", ".", "get_window", "(", "window", ",", "nfft", ")", "else", ":", "win", "=", "numpy", ".", "asarray", "(", "window", ")", "if", "len", "(", "win", ".", "shape", ")", "!=", "1", ":", "raise", "ValueError", "(", "'window must be 1-D'", ")", "elif", "win", ".", "shape", "[", "0", "]", "!=", "nfft", ":", "raise", "ValueError", "(", "'Window is the wrong size.'", ")", "win", "=", "win", ".", "astype", "(", "self", ".", "dtype", ")", "scaling", "=", "1.", "/", "numpy", ".", "absolute", "(", "win", ")", ".", "mean", "(", ")", "if", "nfft", "%", "2", ":", "nfreqs", "=", "(", "nfft", "+", "1", ")", "//", "2", "else", ":", "nfreqs", "=", "nfft", "//", "2", "+", "1", "ffts", "=", "Spectrogram", "(", "numpy", ".", "zeros", "(", "(", "navg", ",", "nfreqs", ")", ",", "dtype", "=", "numpy", ".", "complex", ")", ",", "channel", "=", "self", ".", "channel", ",", "epoch", "=", "self", ".", "epoch", ",", "f0", "=", "0", ",", "df", "=", "1", "/", "fftlength", ",", "dt", "=", "1", ",", "copy", "=", "True", ")", "# stride through TimeSeries, recording FFTs as columns of Spectrogram", "idx", "=", "0", "for", "i", "in", "range", "(", "navg", ")", ":", "# find step TimeSeries", "idx_end", "=", "idx", "+", "nfft", "if", "idx_end", ">", "self", ".", "size", ":", "continue", "stepseries", "=", "self", "[", "idx", ":", "idx_end", "]", ".", "detrend", "(", ")", "*", "win", "# calculated FFT, weight, and stack", "fft_", "=", "stepseries", ".", "fft", "(", "nfft", "=", "nfft", ")", "*", "scaling", "ffts", ".", "value", "[", "i", ",", ":", "]", "=", "fft_", ".", "value", "idx", "+=", "(", "nfft", "-", "noverlap", ")", "mean", "=", "ffts", ".", "mean", "(", "0", ")", "mean", ".", "name", "=", "self", ".", "name", "mean", ".", "epoch", "=", "self", ".", "epoch", "mean", ".", "channel", "=", "self", ".", "channel", "return", "mean" ]
Compute the averaged one-dimensional DFT of this `TimeSeries`. This method computes a number of FFTs of duration ``fftlength`` and ``overlap`` (both given in seconds), and returns the mean average. This method is analogous to the Welch average method for power spectra. Parameters ---------- fftlength : `float` number of seconds in single FFT, default, use whole `TimeSeries` overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats Returns ------- out : complex-valued `~gwpy.frequencyseries.FrequencySeries` the transformed output, with populated frequencies array metadata See Also -------- :mod:`scipy.fftpack` for the definition of the DFT and conventions used.
[ "Compute", "the", "averaged", "one", "-", "dimensional", "DFT", "of", "this", "TimeSeries", "." ]
python
train
37
rene-aguirre/pywinusb
pywinusb/hid/core.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/core.py#L1465-L1499
def send(self, raw_data = None): """Prepare HID raw report (unless raw_data is provided) and send it to HID device """ if self.__report_kind != HidP_Output \ and self.__report_kind != HidP_Feature: raise HIDError("Only for output or feature reports") #valid length if raw_data and (len(raw_data) != self.__raw_report_size): raise HIDError("Report size has to be %d elements (bytes)" \ % self.__raw_report_size) #should be valid report id if raw_data and raw_data[0] != self.__report_id.value: #hint, raw_data should be a plain list of integer values raise HIDError("Not matching report id") # if self.__report_kind != HidP_Output and \ self.__report_kind != HidP_Feature: raise HIDError("Can only send output or feature reports") # if not raw_data: # we'll construct the raw report self.__prepare_raw_data() elif not ( isinstance(raw_data, ctypes.Array) and \ issubclass(raw_data._type_, c_ubyte) ): # pre-memory allocation for performance self.__alloc_raw_data(raw_data) #reference proper object raw_data = self.__raw_data if self.__report_kind == HidP_Output: return self.__hid_object.send_output_report(raw_data) elif self.__report_kind == HidP_Feature: return self.__hid_object.send_feature_report(raw_data) else: pass
[ "def", "send", "(", "self", ",", "raw_data", "=", "None", ")", ":", "if", "self", ".", "__report_kind", "!=", "HidP_Output", "and", "self", ".", "__report_kind", "!=", "HidP_Feature", ":", "raise", "HIDError", "(", "\"Only for output or feature reports\"", ")", "#valid length\r", "if", "raw_data", "and", "(", "len", "(", "raw_data", ")", "!=", "self", ".", "__raw_report_size", ")", ":", "raise", "HIDError", "(", "\"Report size has to be %d elements (bytes)\"", "%", "self", ".", "__raw_report_size", ")", "#should be valid report id\r", "if", "raw_data", "and", "raw_data", "[", "0", "]", "!=", "self", ".", "__report_id", ".", "value", ":", "#hint, raw_data should be a plain list of integer values\r", "raise", "HIDError", "(", "\"Not matching report id\"", ")", "#\r", "if", "self", ".", "__report_kind", "!=", "HidP_Output", "and", "self", ".", "__report_kind", "!=", "HidP_Feature", ":", "raise", "HIDError", "(", "\"Can only send output or feature reports\"", ")", "#\r", "if", "not", "raw_data", ":", "# we'll construct the raw report\r", "self", ".", "__prepare_raw_data", "(", ")", "elif", "not", "(", "isinstance", "(", "raw_data", ",", "ctypes", ".", "Array", ")", "and", "issubclass", "(", "raw_data", ".", "_type_", ",", "c_ubyte", ")", ")", ":", "# pre-memory allocation for performance\r", "self", ".", "__alloc_raw_data", "(", "raw_data", ")", "#reference proper object\r", "raw_data", "=", "self", ".", "__raw_data", "if", "self", ".", "__report_kind", "==", "HidP_Output", ":", "return", "self", ".", "__hid_object", ".", "send_output_report", "(", "raw_data", ")", "elif", "self", ".", "__report_kind", "==", "HidP_Feature", ":", "return", "self", ".", "__hid_object", ".", "send_feature_report", "(", "raw_data", ")", "else", ":", "pass" ]
Prepare HID raw report (unless raw_data is provided) and send it to HID device
[ "Prepare", "HID", "raw", "report", "(", "unless", "raw_data", "is", "provided", ")", "and", "send", "it", "to", "HID", "device" ]
python
train
44.971429
ThreatConnect-Inc/tcex
tcex/tcex_resources.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_resources.py#L1045-L1100
def victim_assets(self, asset_type=None, asset_id=None): """Victim Asset endpoint for this resource with optional asset type. This method will set the resource endpoint for working with Victim Assets. The HTTP GET method will return all Victim Assets associated with this resource or if a asset type is provided it will return the provided asset type if it has been associated. The provided asset type can be associated to this resource using the HTTP POST method. The HTTP DELETE method will remove the provided tag from this resource. **Example Endpoints URI's** +---------+--------------------------------------------------------------------------------+ | Method | API Endpoint URI's | +=========+================================================================================+ | GET | /v2/groups/{resourceType}/{uniqueId}/victimAssets | +---------+--------------------------------------------------------------------------------+ | GET | /v2/groups/{resourceType}/{uniqueId}/victimAssets/{assetType} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/groups/{resourceType}/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{uniqueId}/victimAssets | +---------+--------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{uniqueId}/victimAssets/{assetType} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/victim/{uniqueId}/victimAssets/{assetType} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/victim/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ | DELETE | /v2/groups/{resourceType}/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ | POST | /v2/groups/{resourceType}/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ Args: asset_type (Optional [string]): The asset type. asset_id (Optional [string]): The asset id. """ type_entity_map = { 'emailAddresses': 'victimEmailAddress', 'networkAccounts': 'victimNetworkAccount', 'phoneNumbers': 'victimPhone', 'socialNetworks': 'victimSocialNetwork', 'webSites': 'victimWebSite', } resource = self.copy() resource._request_entity = 'victimAsset' resource._request_uri = '{}/victimAssets'.format(resource._request_uri) if asset_type is not None: resource._request_entity = type_entity_map.get(asset_type, 'victimAsset') resource._request_uri = '{}/{}'.format(resource._request_uri, asset_type) if asset_id is not None: resource._request_uri = '{}/{}'.format(resource._request_uri, asset_id) return resource
[ "def", "victim_assets", "(", "self", ",", "asset_type", "=", "None", ",", "asset_id", "=", "None", ")", ":", "type_entity_map", "=", "{", "'emailAddresses'", ":", "'victimEmailAddress'", ",", "'networkAccounts'", ":", "'victimNetworkAccount'", ",", "'phoneNumbers'", ":", "'victimPhone'", ",", "'socialNetworks'", ":", "'victimSocialNetwork'", ",", "'webSites'", ":", "'victimWebSite'", ",", "}", "resource", "=", "self", ".", "copy", "(", ")", "resource", ".", "_request_entity", "=", "'victimAsset'", "resource", ".", "_request_uri", "=", "'{}/victimAssets'", ".", "format", "(", "resource", ".", "_request_uri", ")", "if", "asset_type", "is", "not", "None", ":", "resource", ".", "_request_entity", "=", "type_entity_map", ".", "get", "(", "asset_type", ",", "'victimAsset'", ")", "resource", ".", "_request_uri", "=", "'{}/{}'", ".", "format", "(", "resource", ".", "_request_uri", ",", "asset_type", ")", "if", "asset_id", "is", "not", "None", ":", "resource", ".", "_request_uri", "=", "'{}/{}'", ".", "format", "(", "resource", ".", "_request_uri", ",", "asset_id", ")", "return", "resource" ]
Victim Asset endpoint for this resource with optional asset type. This method will set the resource endpoint for working with Victim Assets. The HTTP GET method will return all Victim Assets associated with this resource or if a asset type is provided it will return the provided asset type if it has been associated. The provided asset type can be associated to this resource using the HTTP POST method. The HTTP DELETE method will remove the provided tag from this resource. **Example Endpoints URI's** +---------+--------------------------------------------------------------------------------+ | Method | API Endpoint URI's | +=========+================================================================================+ | GET | /v2/groups/{resourceType}/{uniqueId}/victimAssets | +---------+--------------------------------------------------------------------------------+ | GET | /v2/groups/{resourceType}/{uniqueId}/victimAssets/{assetType} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/groups/{resourceType}/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{uniqueId}/victimAssets | +---------+--------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{uniqueId}/victimAssets/{assetType} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/victim/{uniqueId}/victimAssets/{assetType} | +---------+--------------------------------------------------------------------------------+ | GET | /v2/victim/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ | DELETE | /v2/groups/{resourceType}/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ | POST | /v2/groups/{resourceType}/{uniqueId}/victimAssets/{assetType}/{resourceId} | +---------+--------------------------------------------------------------------------------+ Args: asset_type (Optional [string]): The asset type. asset_id (Optional [string]): The asset id.
[ "Victim", "Asset", "endpoint", "for", "this", "resource", "with", "optional", "asset", "type", "." ]
python
train
68.589286
threeML/astromodels
astromodels/utils/angular_distance.py
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/utils/angular_distance.py#L61-L83
def spherical_angle( ra0, dec0, ra1, dec1, ra2, dec2 ): """ Returns the spherical angle distance between two sets of great circles defined by (ra0, dec0), (ra1, dec1) and (ra0, dec0), (ra2, dec2) :param ra0: array or float, longitude of intersection point(s) :param dec0: array or float, latitude of intersection point(s) :param ra1: array or float, longitude of first point(s) :param dec1: array or float, latitude of first point(s) :param ra2: array or float, longitude of second point(s) :param dec2: array or float, latitude of second point(s) :return: spherical angle in degrees """ a = np.deg2rad( angular_distance(ra0, dec0, ra1, dec1)) b = np.deg2rad( angular_distance(ra0, dec0, ra2, dec2)) c = np.deg2rad( angular_distance(ra2, dec2, ra1, dec1)) #use the spherical law of cosines: https://en.wikipedia.org/wiki/Spherical_law_of_cosines#Rearrangements numerator = np.atleast_1d( np.cos(c) - np.cos(a) * np.cos(b) ) denominator = np.atleast_1d( np.sin(a)*np.sin(b) ) return np.where( denominator == 0 , np.zeros( len(denominator)), np.rad2deg( np.arccos( numerator/denominator)) )
[ "def", "spherical_angle", "(", "ra0", ",", "dec0", ",", "ra1", ",", "dec1", ",", "ra2", ",", "dec2", ")", ":", "a", "=", "np", ".", "deg2rad", "(", "angular_distance", "(", "ra0", ",", "dec0", ",", "ra1", ",", "dec1", ")", ")", "b", "=", "np", ".", "deg2rad", "(", "angular_distance", "(", "ra0", ",", "dec0", ",", "ra2", ",", "dec2", ")", ")", "c", "=", "np", ".", "deg2rad", "(", "angular_distance", "(", "ra2", ",", "dec2", ",", "ra1", ",", "dec1", ")", ")", "#use the spherical law of cosines: https://en.wikipedia.org/wiki/Spherical_law_of_cosines#Rearrangements", "numerator", "=", "np", ".", "atleast_1d", "(", "np", ".", "cos", "(", "c", ")", "-", "np", ".", "cos", "(", "a", ")", "*", "np", ".", "cos", "(", "b", ")", ")", "denominator", "=", "np", ".", "atleast_1d", "(", "np", ".", "sin", "(", "a", ")", "*", "np", ".", "sin", "(", "b", ")", ")", "return", "np", ".", "where", "(", "denominator", "==", "0", ",", "np", ".", "zeros", "(", "len", "(", "denominator", ")", ")", ",", "np", ".", "rad2deg", "(", "np", ".", "arccos", "(", "numerator", "/", "denominator", ")", ")", ")" ]
Returns the spherical angle distance between two sets of great circles defined by (ra0, dec0), (ra1, dec1) and (ra0, dec0), (ra2, dec2) :param ra0: array or float, longitude of intersection point(s) :param dec0: array or float, latitude of intersection point(s) :param ra1: array or float, longitude of first point(s) :param dec1: array or float, latitude of first point(s) :param ra2: array or float, longitude of second point(s) :param dec2: array or float, latitude of second point(s) :return: spherical angle in degrees
[ "Returns", "the", "spherical", "angle", "distance", "between", "two", "sets", "of", "great", "circles", "defined", "by", "(", "ra0", "dec0", ")", "(", "ra1", "dec1", ")", "and", "(", "ra0", "dec0", ")", "(", "ra2", "dec2", ")" ]
python
train
50.217391
wbond/certvalidator
certvalidator/context.py
https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/context.py#L433-L475
def retrieve_crls(self, cert): """ :param cert: An asn1crypto.x509.Certificate object :param path: A certvalidator.path.ValidationPath object for the cert :return: A list of asn1crypto.crl.CertificateList objects """ if not self._allow_fetching: return self._crls if cert.issuer_serial not in self._fetched_crls: try: crls = crl_client.fetch( cert, **self._crl_fetch_params ) self._fetched_crls[cert.issuer_serial] = crls for crl_ in crls: try: certs = crl_client.fetch_certs( crl_, user_agent=self._crl_fetch_params.get('user_agent'), timeout=self._crl_fetch_params.get('timeout') ) for cert_ in certs: if self.certificate_registry.add_other_cert(cert_): self._revocation_certs[cert_.issuer_serial] = cert_ except (URLError, socket.error): pass except (URLError, socket.error) as e: self._fetched_crls[cert.issuer_serial] = [] if self._revocation_mode == "soft-fail": self._soft_fail_exceptions.append(e) raise SoftFailError() else: raise return self._fetched_crls[cert.issuer_serial]
[ "def", "retrieve_crls", "(", "self", ",", "cert", ")", ":", "if", "not", "self", ".", "_allow_fetching", ":", "return", "self", ".", "_crls", "if", "cert", ".", "issuer_serial", "not", "in", "self", ".", "_fetched_crls", ":", "try", ":", "crls", "=", "crl_client", ".", "fetch", "(", "cert", ",", "*", "*", "self", ".", "_crl_fetch_params", ")", "self", ".", "_fetched_crls", "[", "cert", ".", "issuer_serial", "]", "=", "crls", "for", "crl_", "in", "crls", ":", "try", ":", "certs", "=", "crl_client", ".", "fetch_certs", "(", "crl_", ",", "user_agent", "=", "self", ".", "_crl_fetch_params", ".", "get", "(", "'user_agent'", ")", ",", "timeout", "=", "self", ".", "_crl_fetch_params", ".", "get", "(", "'timeout'", ")", ")", "for", "cert_", "in", "certs", ":", "if", "self", ".", "certificate_registry", ".", "add_other_cert", "(", "cert_", ")", ":", "self", ".", "_revocation_certs", "[", "cert_", ".", "issuer_serial", "]", "=", "cert_", "except", "(", "URLError", ",", "socket", ".", "error", ")", ":", "pass", "except", "(", "URLError", ",", "socket", ".", "error", ")", "as", "e", ":", "self", ".", "_fetched_crls", "[", "cert", ".", "issuer_serial", "]", "=", "[", "]", "if", "self", ".", "_revocation_mode", "==", "\"soft-fail\"", ":", "self", ".", "_soft_fail_exceptions", ".", "append", "(", "e", ")", "raise", "SoftFailError", "(", ")", "else", ":", "raise", "return", "self", ".", "_fetched_crls", "[", "cert", ".", "issuer_serial", "]" ]
:param cert: An asn1crypto.x509.Certificate object :param path: A certvalidator.path.ValidationPath object for the cert :return: A list of asn1crypto.crl.CertificateList objects
[ ":", "param", "cert", ":", "An", "asn1crypto", ".", "x509", ".", "Certificate", "object" ]
python
train
36.651163
chimera0/accel-brain-code
Reinforcement-Learning/demo/demo_maze_deep_q_network.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Reinforcement-Learning/demo/demo_maze_deep_q_network.py#L45-L79
def inference(self, state_arr, limit=1000): ''' Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route. ''' agent_x, agent_y = np.where(state_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] result_list = [(agent_x, agent_y, 0.0)] self.t = 1 while self.t <= limit: next_action_arr = self.extract_possible_actions(state_arr) next_q_arr = self.function_approximator.inference_q(next_action_arr) action_arr, q = self.select_action(next_action_arr, next_q_arr) agent_x, agent_y = np.where(action_arr[0] == 1) agent_x, agent_y = agent_x[0], agent_y[0] result_list.append((agent_x, agent_y, q[0])) # Update State. state_arr = self.update_state(state_arr, action_arr) # Epsode. self.t += 1 # Check. end_flag = self.check_the_end_flag(state_arr) if end_flag is True: break return result_list
[ "def", "inference", "(", "self", ",", "state_arr", ",", "limit", "=", "1000", ")", ":", "agent_x", ",", "agent_y", "=", "np", ".", "where", "(", "state_arr", "[", "0", "]", "==", "1", ")", "agent_x", ",", "agent_y", "=", "agent_x", "[", "0", "]", ",", "agent_y", "[", "0", "]", "result_list", "=", "[", "(", "agent_x", ",", "agent_y", ",", "0.0", ")", "]", "self", ".", "t", "=", "1", "while", "self", ".", "t", "<=", "limit", ":", "next_action_arr", "=", "self", ".", "extract_possible_actions", "(", "state_arr", ")", "next_q_arr", "=", "self", ".", "function_approximator", ".", "inference_q", "(", "next_action_arr", ")", "action_arr", ",", "q", "=", "self", ".", "select_action", "(", "next_action_arr", ",", "next_q_arr", ")", "agent_x", ",", "agent_y", "=", "np", ".", "where", "(", "action_arr", "[", "0", "]", "==", "1", ")", "agent_x", ",", "agent_y", "=", "agent_x", "[", "0", "]", ",", "agent_y", "[", "0", "]", "result_list", ".", "append", "(", "(", "agent_x", ",", "agent_y", ",", "q", "[", "0", "]", ")", ")", "# Update State.", "state_arr", "=", "self", ".", "update_state", "(", "state_arr", ",", "action_arr", ")", "# Epsode.", "self", ".", "t", "+=", "1", "# Check.", "end_flag", "=", "self", ".", "check_the_end_flag", "(", "state_arr", ")", "if", "end_flag", "is", "True", ":", "break", "return", "result_list" ]
Infernce. Args: state_arr: `np.ndarray` of state. limit: The number of inferencing. Returns: `list of `np.ndarray` of an optimal route.
[ "Infernce", ".", "Args", ":", "state_arr", ":", "np", ".", "ndarray", "of", "state", ".", "limit", ":", "The", "number", "of", "inferencing", ".", "Returns", ":", "list", "of", "np", ".", "ndarray", "of", "an", "optimal", "route", "." ]
python
train
33.228571
astropy/photutils
photutils/utils/misc.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/utils/misc.py#L15-L32
def get_version_info(): """ Return astropy and photutils versions. Returns ------- result : str The astropy and photutils versions. """ from astropy import __version__ astropy_version = __version__ from photutils import __version__ photutils_version = __version__ return 'astropy: {0}, photutils: {1}'.format(astropy_version, photutils_version)
[ "def", "get_version_info", "(", ")", ":", "from", "astropy", "import", "__version__", "astropy_version", "=", "__version__", "from", "photutils", "import", "__version__", "photutils_version", "=", "__version__", "return", "'astropy: {0}, photutils: {1}'", ".", "format", "(", "astropy_version", ",", "photutils_version", ")" ]
Return astropy and photutils versions. Returns ------- result : str The astropy and photutils versions.
[ "Return", "astropy", "and", "photutils", "versions", "." ]
python
train
24
tensorflow/tensor2tensor
tensor2tensor/data_generators/text_encoder.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L338-L351
def _init_vocab_from_file(self, filename): """Load vocab from a file. Args: filename: The file to load vocabulary from. """ with tf.gfile.Open(filename) as f: tokens = [token.strip() for token in f.readlines()] def token_gen(): for token in tokens: yield token self._init_vocab(token_gen(), add_reserved_tokens=False)
[ "def", "_init_vocab_from_file", "(", "self", ",", "filename", ")", ":", "with", "tf", ".", "gfile", ".", "Open", "(", "filename", ")", "as", "f", ":", "tokens", "=", "[", "token", ".", "strip", "(", ")", "for", "token", "in", "f", ".", "readlines", "(", ")", "]", "def", "token_gen", "(", ")", ":", "for", "token", "in", "tokens", ":", "yield", "token", "self", ".", "_init_vocab", "(", "token_gen", "(", ")", ",", "add_reserved_tokens", "=", "False", ")" ]
Load vocab from a file. Args: filename: The file to load vocabulary from.
[ "Load", "vocab", "from", "a", "file", "." ]
python
train
25.5
Nachtfeuer/pipeline
spline/tools/report/collector.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/report/collector.py#L115-L122
def schema_event_items(): """Schema for event items.""" return { 'timestamp': And(int, lambda n: n > 0), Optional('information', default={}): { Optional(Regex(r'([a-z][_a-z]*)')): object } }
[ "def", "schema_event_items", "(", ")", ":", "return", "{", "'timestamp'", ":", "And", "(", "int", ",", "lambda", "n", ":", "n", ">", "0", ")", ",", "Optional", "(", "'information'", ",", "default", "=", "{", "}", ")", ":", "{", "Optional", "(", "Regex", "(", "r'([a-z][_a-z]*)'", ")", ")", ":", "object", "}", "}" ]
Schema for event items.
[ "Schema", "for", "event", "items", "." ]
python
train
32.375
saltstack/salt
salt/utils/vsan.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vsan.py#L158-L184
def get_host_vsan_system(service_instance, host_ref, hostname=None): ''' Returns a host's vsan system service_instance Service instance to the host or vCenter host_ref Refernce to ESXi host hostname Name of ESXi host. Default value is None. ''' if not hostname: hostname = salt.utils.vmware.get_managed_object_name(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.vsanSystem', type=vim.HostSystem, skip=False) objs = salt.utils.vmware.get_mors_with_properties( service_instance, vim.HostVsanSystem, property_list=['config.enabled'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise VMwareObjectRetrievalError('Host\'s \'{0}\' VSAN system was ' 'not retrieved'.format(hostname)) log.trace('[%s] Retrieved VSAN system', hostname) return objs[0]['object']
[ "def", "get_host_vsan_system", "(", "service_instance", ",", "host_ref", ",", "hostname", "=", "None", ")", ":", "if", "not", "hostname", ":", "hostname", "=", "salt", ".", "utils", ".", "vmware", ".", "get_managed_object_name", "(", "host_ref", ")", "traversal_spec", "=", "vmodl", ".", "query", ".", "PropertyCollector", ".", "TraversalSpec", "(", "path", "=", "'configManager.vsanSystem'", ",", "type", "=", "vim", ".", "HostSystem", ",", "skip", "=", "False", ")", "objs", "=", "salt", ".", "utils", ".", "vmware", ".", "get_mors_with_properties", "(", "service_instance", ",", "vim", ".", "HostVsanSystem", ",", "property_list", "=", "[", "'config.enabled'", "]", ",", "container_ref", "=", "host_ref", ",", "traversal_spec", "=", "traversal_spec", ")", "if", "not", "objs", ":", "raise", "VMwareObjectRetrievalError", "(", "'Host\\'s \\'{0}\\' VSAN system was '", "'not retrieved'", ".", "format", "(", "hostname", ")", ")", "log", ".", "trace", "(", "'[%s] Retrieved VSAN system'", ",", "hostname", ")", "return", "objs", "[", "0", "]", "[", "'object'", "]" ]
Returns a host's vsan system service_instance Service instance to the host or vCenter host_ref Refernce to ESXi host hostname Name of ESXi host. Default value is None.
[ "Returns", "a", "host", "s", "vsan", "system" ]
python
train
35.777778
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L2318-L2375
def add_training_data(self, environment_id, collection_id, natural_language_query=None, filter=None, examples=None, **kwargs): """ Add query to training data. Adds a query to the training data for this collection. The query can contain a filter and natural language query. :param str environment_id: The ID of the environment. :param str collection_id: The ID of the collection. :param str natural_language_query: The natural text query for the new training query. :param str filter: The filter used on the collection before the **natural_language_query** is applied. :param list[TrainingExample] examples: Array of training examples. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if environment_id is None: raise ValueError('environment_id must be provided') if collection_id is None: raise ValueError('collection_id must be provided') if examples is not None: examples = [ self._convert_model(x, TrainingExample) for x in examples ] headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('discovery', 'V1', 'add_training_data') headers.update(sdk_headers) params = {'version': self.version} data = { 'natural_language_query': natural_language_query, 'filter': filter, 'examples': examples } url = '/v1/environments/{0}/collections/{1}/training_data'.format( *self._encode_path_vars(environment_id, collection_id)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
[ "def", "add_training_data", "(", "self", ",", "environment_id", ",", "collection_id", ",", "natural_language_query", "=", "None", ",", "filter", "=", "None", ",", "examples", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "environment_id", "is", "None", ":", "raise", "ValueError", "(", "'environment_id must be provided'", ")", "if", "collection_id", "is", "None", ":", "raise", "ValueError", "(", "'collection_id must be provided'", ")", "if", "examples", "is", "not", "None", ":", "examples", "=", "[", "self", ".", "_convert_model", "(", "x", ",", "TrainingExample", ")", "for", "x", "in", "examples", "]", "headers", "=", "{", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'discovery'", ",", "'V1'", ",", "'add_training_data'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'version'", ":", "self", ".", "version", "}", "data", "=", "{", "'natural_language_query'", ":", "natural_language_query", ",", "'filter'", ":", "filter", ",", "'examples'", ":", "examples", "}", "url", "=", "'/v1/environments/{0}/collections/{1}/training_data'", ".", "format", "(", "*", "self", ".", "_encode_path_vars", "(", "environment_id", ",", "collection_id", ")", ")", "response", "=", "self", ".", "request", "(", "method", "=", "'POST'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "json", "=", "data", ",", "accept_json", "=", "True", ")", "return", "response" ]
Add query to training data. Adds a query to the training data for this collection. The query can contain a filter and natural language query. :param str environment_id: The ID of the environment. :param str collection_id: The ID of the collection. :param str natural_language_query: The natural text query for the new training query. :param str filter: The filter used on the collection before the **natural_language_query** is applied. :param list[TrainingExample] examples: Array of training examples. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
[ "Add", "query", "to", "training", "data", "." ]
python
train
36.965517
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/independence/graph/model.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/independence/graph/model.py#L83-L100
def run_feature_selection(self, df_data, target, idx=0, **kwargs): """Run feature selection for one node: wrapper around ``self.predict_features``. Args: df_data (pandas.DataFrame): All the observational data target (str): Name of the target variable idx (int): (optional) For printing purposes Returns: list: scores of each feature relatively to the target """ list_features = list(df_data.columns.values) list_features.remove(target) df_target = pd.DataFrame(df_data[target], columns=[target]) df_features = df_data[list_features] return self.predict_features(df_features, df_target, idx=idx, **kwargs)
[ "def", "run_feature_selection", "(", "self", ",", "df_data", ",", "target", ",", "idx", "=", "0", ",", "*", "*", "kwargs", ")", ":", "list_features", "=", "list", "(", "df_data", ".", "columns", ".", "values", ")", "list_features", ".", "remove", "(", "target", ")", "df_target", "=", "pd", ".", "DataFrame", "(", "df_data", "[", "target", "]", ",", "columns", "=", "[", "target", "]", ")", "df_features", "=", "df_data", "[", "list_features", "]", "return", "self", ".", "predict_features", "(", "df_features", ",", "df_target", ",", "idx", "=", "idx", ",", "*", "*", "kwargs", ")" ]
Run feature selection for one node: wrapper around ``self.predict_features``. Args: df_data (pandas.DataFrame): All the observational data target (str): Name of the target variable idx (int): (optional) For printing purposes Returns: list: scores of each feature relatively to the target
[ "Run", "feature", "selection", "for", "one", "node", ":", "wrapper", "around", "self", ".", "predict_features", "." ]
python
valid
39.888889
ARMmbed/mbed-connector-api-python
mbed_connector_api/mbed_connector_api.py
https://github.com/ARMmbed/mbed-connector-api-python/blob/a5024a01dc67cc192c8bf7a70b251fcf0a3f279b/mbed_connector_api/mbed_connector_api.py#L220-L245
def putResourceValue(self,ep,res,data,cbfn=""): """ Put a value to a resource on an endpoint :param str ep: name of endpoint :param str res: name of resource :param str data: data to send via PUT :param fnptr cbfn: Optional - callback funtion to call when operation is completed :return: successful ``.status_code`` / ``.is_done``. Check the ``.error`` :rtype: asyncResult """ result = asyncResult(callback=cbfn) result.endpoint = ep result.resource = res data = self._putURL("/endpoints/"+ep+res,payload=data) if data.status_code == 200: #immediate success result.error = False result.is_done = True elif data.status_code == 202: self.database['async-responses'][json.loads(data.content)["async-response-id"]]= result else: result.error = response_codes("resource",data.status_code) result.is_done = True result.raw_data = data.content result.status_code = data.status_code return result
[ "def", "putResourceValue", "(", "self", ",", "ep", ",", "res", ",", "data", ",", "cbfn", "=", "\"\"", ")", ":", "result", "=", "asyncResult", "(", "callback", "=", "cbfn", ")", "result", ".", "endpoint", "=", "ep", "result", ".", "resource", "=", "res", "data", "=", "self", ".", "_putURL", "(", "\"/endpoints/\"", "+", "ep", "+", "res", ",", "payload", "=", "data", ")", "if", "data", ".", "status_code", "==", "200", ":", "#immediate success", "result", ".", "error", "=", "False", "result", ".", "is_done", "=", "True", "elif", "data", ".", "status_code", "==", "202", ":", "self", ".", "database", "[", "'async-responses'", "]", "[", "json", ".", "loads", "(", "data", ".", "content", ")", "[", "\"async-response-id\"", "]", "]", "=", "result", "else", ":", "result", ".", "error", "=", "response_codes", "(", "\"resource\"", ",", "data", ".", "status_code", ")", "result", ".", "is_done", "=", "True", "result", ".", "raw_data", "=", "data", ".", "content", "result", ".", "status_code", "=", "data", ".", "status_code", "return", "result" ]
Put a value to a resource on an endpoint :param str ep: name of endpoint :param str res: name of resource :param str data: data to send via PUT :param fnptr cbfn: Optional - callback funtion to call when operation is completed :return: successful ``.status_code`` / ``.is_done``. Check the ``.error`` :rtype: asyncResult
[ "Put", "a", "value", "to", "a", "resource", "on", "an", "endpoint", ":", "param", "str", "ep", ":", "name", "of", "endpoint", ":", "param", "str", "res", ":", "name", "of", "resource", ":", "param", "str", "data", ":", "data", "to", "send", "via", "PUT", ":", "param", "fnptr", "cbfn", ":", "Optional", "-", "callback", "funtion", "to", "call", "when", "operation", "is", "completed", ":", "return", ":", "successful", ".", "status_code", "/", ".", "is_done", ".", "Check", "the", ".", "error", ":", "rtype", ":", "asyncResult" ]
python
train
35.346154
estnltk/estnltk
estnltk/wordnet/wn.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/wordnet/wn.py#L258-L316
def all_synsets(pos=None): """Return all the synsets which have the provided pos. Notes ----- Returns thousands or tens of thousands of synsets - first time will take significant time. Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time. Parameters ---------- pos : str Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`. If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time. Returns ------- list of Synsets Lists the Synsets which have `pos` as part-of-speech. Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`]. """ def _get_unique_synset_idxes(pos): idxes = [] with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin: if pos == None: for line in fin: split_line = line.strip().split(':') idxes.extend([int(x) for x in split_line[2].split()]) else: for line in fin: split_line = line.strip().split(':') if split_line[1] == pos: idxes.extend([int(x) for x in split_line[2].split()]) idxes = list(set(idxes)) idxes.sort() return idxes if pos in LOADED_POS: return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in LEM_POS_2_SS_IDX[lemma][pos]] else: synset_idxes = _get_unique_synset_idxes(pos) if len(synset_idxes) == 0: return [] stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT] unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT] synset_offsets = _get_synset_offsets(unstored_synset_idxes) synsets = _get_synsets(synset_offsets) for synset in synsets: for variant in synset.get_variants(): LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id) LOADED_POS.add(pos) return stored_synsets + synsets
[ "def", "all_synsets", "(", "pos", "=", "None", ")", ":", "def", "_get_unique_synset_idxes", "(", "pos", ")", ":", "idxes", "=", "[", "]", "with", "codecs", ".", "open", "(", "_LIT_POS_FILE", ",", "'rb'", ",", "'utf-8'", ")", "as", "fin", ":", "if", "pos", "==", "None", ":", "for", "line", "in", "fin", ":", "split_line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "':'", ")", "idxes", ".", "extend", "(", "[", "int", "(", "x", ")", "for", "x", "in", "split_line", "[", "2", "]", ".", "split", "(", ")", "]", ")", "else", ":", "for", "line", "in", "fin", ":", "split_line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "':'", ")", "if", "split_line", "[", "1", "]", "==", "pos", ":", "idxes", ".", "extend", "(", "[", "int", "(", "x", ")", "for", "x", "in", "split_line", "[", "2", "]", ".", "split", "(", ")", "]", ")", "idxes", "=", "list", "(", "set", "(", "idxes", ")", ")", "idxes", ".", "sort", "(", ")", "return", "idxes", "if", "pos", "in", "LOADED_POS", ":", "return", "[", "SYNSETS_DICT", "[", "idx", "]", "for", "lemma", "in", "LEM_POS_2_SS_IDX", "for", "idx", "in", "LEM_POS_2_SS_IDX", "[", "lemma", "]", "[", "pos", "]", "]", "else", ":", "synset_idxes", "=", "_get_unique_synset_idxes", "(", "pos", ")", "if", "len", "(", "synset_idxes", ")", "==", "0", ":", "return", "[", "]", "stored_synsets", "=", "[", "SYNSETS_DICT", "[", "synset_idxes", "[", "i", "]", "]", "for", "i", "in", "range", "(", "len", "(", "synset_idxes", ")", ")", "if", "synset_idxes", "[", "i", "]", "in", "SYNSETS_DICT", "]", "unstored_synset_idxes", "=", "[", "synset_idxes", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "synset_idxes", ")", ")", "if", "synset_idxes", "[", "i", "]", "not", "in", "SYNSETS_DICT", "]", "synset_offsets", "=", "_get_synset_offsets", "(", "unstored_synset_idxes", ")", "synsets", "=", "_get_synsets", "(", "synset_offsets", ")", "for", "synset", "in", "synsets", ":", "for", "variant", "in", "synset", ".", "get_variants", "(", ")", ":", "LEM_POS_2_SS_IDX", "[", "variant", ".", "literal", "]", "[", "synset", ".", "pos", "]", ".", "append", "(", "synset", ".", "id", ")", "LOADED_POS", ".", "add", "(", "pos", ")", "return", "stored_synsets", "+", "synsets" ]
Return all the synsets which have the provided pos. Notes ----- Returns thousands or tens of thousands of synsets - first time will take significant time. Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time. Parameters ---------- pos : str Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`. If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time. Returns ------- list of Synsets Lists the Synsets which have `pos` as part-of-speech. Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`].
[ "Return", "all", "the", "synsets", "which", "have", "the", "provided", "pos", "." ]
python
train
37.322034
llllllllll/codetransformer
codetransformer/decompiler/_343.py
https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/decompiler/_343.py#L1490-L1527
def make_defaults_and_annotations(make_function_instr, builders): """ Get the AST expressions corresponding to the defaults, kwonly defaults, and annotations for a function created by `make_function_instr`. """ # Integer counts. n_defaults, n_kwonlydefaults, n_annotations = unpack_make_function_arg( make_function_instr.arg ) if n_annotations: # TOS should be a tuple of annotation names. load_annotation_names = builders.pop() annotations = dict(zip( reversed(load_annotation_names.arg), (make_expr(builders) for _ in range(n_annotations - 1)) )) else: annotations = {} kwonlys = {} while n_kwonlydefaults: default_expr = make_expr(builders) key_instr = builders.pop() if not isinstance(key_instr, instrs.LOAD_CONST): raise DecompilationError( "kwonlydefault key is not a LOAD_CONST: %s" % key_instr ) if not isinstance(key_instr.arg, str): raise DecompilationError( "kwonlydefault key builder is not a " "'LOAD_CONST of a string: %s" % key_instr ) kwonlys[key_instr.arg] = default_expr n_kwonlydefaults -= 1 defaults = make_exprs(builders, n_defaults) return defaults, kwonlys, annotations
[ "def", "make_defaults_and_annotations", "(", "make_function_instr", ",", "builders", ")", ":", "# Integer counts.", "n_defaults", ",", "n_kwonlydefaults", ",", "n_annotations", "=", "unpack_make_function_arg", "(", "make_function_instr", ".", "arg", ")", "if", "n_annotations", ":", "# TOS should be a tuple of annotation names.", "load_annotation_names", "=", "builders", ".", "pop", "(", ")", "annotations", "=", "dict", "(", "zip", "(", "reversed", "(", "load_annotation_names", ".", "arg", ")", ",", "(", "make_expr", "(", "builders", ")", "for", "_", "in", "range", "(", "n_annotations", "-", "1", ")", ")", ")", ")", "else", ":", "annotations", "=", "{", "}", "kwonlys", "=", "{", "}", "while", "n_kwonlydefaults", ":", "default_expr", "=", "make_expr", "(", "builders", ")", "key_instr", "=", "builders", ".", "pop", "(", ")", "if", "not", "isinstance", "(", "key_instr", ",", "instrs", ".", "LOAD_CONST", ")", ":", "raise", "DecompilationError", "(", "\"kwonlydefault key is not a LOAD_CONST: %s\"", "%", "key_instr", ")", "if", "not", "isinstance", "(", "key_instr", ".", "arg", ",", "str", ")", ":", "raise", "DecompilationError", "(", "\"kwonlydefault key builder is not a \"", "\"'LOAD_CONST of a string: %s\"", "%", "key_instr", ")", "kwonlys", "[", "key_instr", ".", "arg", "]", "=", "default_expr", "n_kwonlydefaults", "-=", "1", "defaults", "=", "make_exprs", "(", "builders", ",", "n_defaults", ")", "return", "defaults", ",", "kwonlys", ",", "annotations" ]
Get the AST expressions corresponding to the defaults, kwonly defaults, and annotations for a function created by `make_function_instr`.
[ "Get", "the", "AST", "expressions", "corresponding", "to", "the", "defaults", "kwonly", "defaults", "and", "annotations", "for", "a", "function", "created", "by", "make_function_instr", "." ]
python
train
34.894737
awslabs/mxboard
python/mxboard/summary.py
https://github.com/awslabs/mxboard/blob/36057ff0f05325c9dc2fe046521325bf9d563a88/python/mxboard/summary.py#L169-L189
def image_summary(tag, image): """Outputs a `Summary` protocol buffer with image(s). Parameters ---------- tag : str A name for the generated summary. Will also serve as a series name in TensorBoard. image : MXNet `NDArray` or `numpy.ndarray` Image data that is one of the following layout: (H, W), (C, H, W), (N, C, H, W). The pixel values of the image are assumed to be normalized in the range [0, 1]. The image will be rescaled to the range [0, 255] and cast to `np.uint8` before creating the image protobuf. Returns ------- A `Summary` protobuf of the image. """ tag = _clean_tag(tag) image = _prepare_image(image) image = _make_image(image) return Summary(value=[Summary.Value(tag=tag, image=image)])
[ "def", "image_summary", "(", "tag", ",", "image", ")", ":", "tag", "=", "_clean_tag", "(", "tag", ")", "image", "=", "_prepare_image", "(", "image", ")", "image", "=", "_make_image", "(", "image", ")", "return", "Summary", "(", "value", "=", "[", "Summary", ".", "Value", "(", "tag", "=", "tag", ",", "image", "=", "image", ")", "]", ")" ]
Outputs a `Summary` protocol buffer with image(s). Parameters ---------- tag : str A name for the generated summary. Will also serve as a series name in TensorBoard. image : MXNet `NDArray` or `numpy.ndarray` Image data that is one of the following layout: (H, W), (C, H, W), (N, C, H, W). The pixel values of the image are assumed to be normalized in the range [0, 1]. The image will be rescaled to the range [0, 255] and cast to `np.uint8` before creating the image protobuf. Returns ------- A `Summary` protobuf of the image.
[ "Outputs", "a", "Summary", "protocol", "buffer", "with", "image", "(", "s", ")", "." ]
python
train
38.619048
geophysics-ubonn/crtomo_tools
lib/crtomo/parManager.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/parManager.py#L171-L190
def load_model_from_file(self, filename): """Load one parameter set from a file which contains one value per line No row is skipped. Parameters ---------- filename : string, file path Filename to loaded data from Returns ------- pid : int ID of parameter set """ assert os.path.isfile(filename) data = np.loadtxt(filename).squeeze() assert len(data.shape) == 1 pid = self.add_data(data) return pid
[ "def", "load_model_from_file", "(", "self", ",", "filename", ")", ":", "assert", "os", ".", "path", ".", "isfile", "(", "filename", ")", "data", "=", "np", ".", "loadtxt", "(", "filename", ")", ".", "squeeze", "(", ")", "assert", "len", "(", "data", ".", "shape", ")", "==", "1", "pid", "=", "self", ".", "add_data", "(", "data", ")", "return", "pid" ]
Load one parameter set from a file which contains one value per line No row is skipped. Parameters ---------- filename : string, file path Filename to loaded data from Returns ------- pid : int ID of parameter set
[ "Load", "one", "parameter", "set", "from", "a", "file", "which", "contains", "one", "value", "per", "line" ]
python
train
25.85
dailymuse/oz
oz/sqlalchemy/middleware.py
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/sqlalchemy/middleware.py#L55-L73
def _sqlalchemy_on_connection_close(self): """ Rollsback and closes the active session, since the client disconnected before the request could be completed. """ if hasattr(self, "_db_conns"): try: for db_conn in self._db_conns.values(): db_conn.rollback() except: tornado.log.app_log.warning("Error occurred during database transaction cleanup: %s", str(sys.exc_info()[0])) raise finally: for db_conn in self._db_conns.values(): try: db_conn.close() except: tornado.log.app_log.warning("Error occurred when closing the database connection", exc_info=True)
[ "def", "_sqlalchemy_on_connection_close", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "\"_db_conns\"", ")", ":", "try", ":", "for", "db_conn", "in", "self", ".", "_db_conns", ".", "values", "(", ")", ":", "db_conn", ".", "rollback", "(", ")", "except", ":", "tornado", ".", "log", ".", "app_log", ".", "warning", "(", "\"Error occurred during database transaction cleanup: %s\"", ",", "str", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", ")", "raise", "finally", ":", "for", "db_conn", "in", "self", ".", "_db_conns", ".", "values", "(", ")", ":", "try", ":", "db_conn", ".", "close", "(", ")", "except", ":", "tornado", ".", "log", ".", "app_log", ".", "warning", "(", "\"Error occurred when closing the database connection\"", ",", "exc_info", "=", "True", ")" ]
Rollsback and closes the active session, since the client disconnected before the request could be completed.
[ "Rollsback", "and", "closes", "the", "active", "session", "since", "the", "client", "disconnected", "before", "the", "request", "could", "be", "completed", "." ]
python
train
41.368421
LLNL/scraper
scraper/github/queryManager.py
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L101-L139
def _readGQL(self, filePath, verbose=False): """Read a 'pretty' formatted GraphQL query file into a one-line string. Removes line breaks and comments. Condenses white space. Args: filePath (str): A relative or absolute path to a file containing a GraphQL query. File may use comments and multi-line formatting. .. _GitHub GraphQL Explorer: https://developer.github.com/v4/explorer/ verbose (Optional[bool]): If False, prints will be suppressed. Defaults to False. Returns: str: A single line GraphQL query. """ if not os.path.isfile(filePath): raise RuntimeError("Query file '%s' does not exist." % (filePath)) lastModified = os.path.getmtime(filePath) absPath = os.path.abspath(filePath) if absPath == self.__queryPath and lastModified == self.__queryTimestamp: _vPrint(verbose, "Using cached query '%s'" % (os.path.basename(self.__queryPath))) query_in = self.__query else: _vPrint(verbose, "Reading '%s' ... " % (filePath), end="", flush=True) with open(filePath, "r") as q: # Strip all comments and newlines. query_in = re.sub(r'#.*(\n|\Z)', '\n', q.read()) # Condense extra whitespace. query_in = re.sub(r'\s+', ' ', query_in) # Remove any leading or trailing whitespace. query_in = re.sub(r'(\A\s+)|(\s+\Z)', '', query_in) _vPrint(verbose, "File read!") self.__queryPath = absPath self.__queryTimestamp = lastModified self.__query = query_in return query_in
[ "def", "_readGQL", "(", "self", ",", "filePath", ",", "verbose", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filePath", ")", ":", "raise", "RuntimeError", "(", "\"Query file '%s' does not exist.\"", "%", "(", "filePath", ")", ")", "lastModified", "=", "os", ".", "path", ".", "getmtime", "(", "filePath", ")", "absPath", "=", "os", ".", "path", ".", "abspath", "(", "filePath", ")", "if", "absPath", "==", "self", ".", "__queryPath", "and", "lastModified", "==", "self", ".", "__queryTimestamp", ":", "_vPrint", "(", "verbose", ",", "\"Using cached query '%s'\"", "%", "(", "os", ".", "path", ".", "basename", "(", "self", ".", "__queryPath", ")", ")", ")", "query_in", "=", "self", ".", "__query", "else", ":", "_vPrint", "(", "verbose", ",", "\"Reading '%s' ... \"", "%", "(", "filePath", ")", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "with", "open", "(", "filePath", ",", "\"r\"", ")", "as", "q", ":", "# Strip all comments and newlines.", "query_in", "=", "re", ".", "sub", "(", "r'#.*(\\n|\\Z)'", ",", "'\\n'", ",", "q", ".", "read", "(", ")", ")", "# Condense extra whitespace.", "query_in", "=", "re", ".", "sub", "(", "r'\\s+'", ",", "' '", ",", "query_in", ")", "# Remove any leading or trailing whitespace.", "query_in", "=", "re", ".", "sub", "(", "r'(\\A\\s+)|(\\s+\\Z)'", ",", "''", ",", "query_in", ")", "_vPrint", "(", "verbose", ",", "\"File read!\"", ")", "self", ".", "__queryPath", "=", "absPath", "self", ".", "__queryTimestamp", "=", "lastModified", "self", ".", "__query", "=", "query_in", "return", "query_in" ]
Read a 'pretty' formatted GraphQL query file into a one-line string. Removes line breaks and comments. Condenses white space. Args: filePath (str): A relative or absolute path to a file containing a GraphQL query. File may use comments and multi-line formatting. .. _GitHub GraphQL Explorer: https://developer.github.com/v4/explorer/ verbose (Optional[bool]): If False, prints will be suppressed. Defaults to False. Returns: str: A single line GraphQL query.
[ "Read", "a", "pretty", "formatted", "GraphQL", "query", "file", "into", "a", "one", "-", "line", "string", "." ]
python
test
44.615385
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/QATdx.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QATdx.py#L1376-L1391
def QA_fetch_get_macroindex_list(ip=None, port=None): """宏观指标列表 Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) 38 10 宏观指标 HG """ global extension_market_list extension_market_list = QA_fetch_get_extensionmarket_list( ) if extension_market_list is None else extension_market_list return extension_market_list.query('market==38')
[ "def", "QA_fetch_get_macroindex_list", "(", "ip", "=", "None", ",", "port", "=", "None", ")", ":", "global", "extension_market_list", "extension_market_list", "=", "QA_fetch_get_extensionmarket_list", "(", ")", "if", "extension_market_list", "is", "None", "else", "extension_market_list", "return", "extension_market_list", ".", "query", "(", "'market==38'", ")" ]
宏观指标列表 Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) 38 10 宏观指标 HG
[ "宏观指标列表" ]
python
train
28.5625
CodeLineFi/maclookup-python
src/maclookup/api_client.py
https://github.com/CodeLineFi/maclookup-python/blob/0d87dc6cb1a8c8583c9d242fbb3e98d70d83664f/src/maclookup/api_client.py#L32-L48
def get(self, mac): """Get data from API as instance of ResponseModel. Keyword arguments: mac -- MAC address or OUI for searching """ data = { self._FORMAT_F: 'json', self._SEARCH_F: mac } response = self.__decode_str(self.__call_api(self.__url, data), 'utf-8') if len(response) > 0: return self.__parse(response) raise EmptyResponseException()
[ "def", "get", "(", "self", ",", "mac", ")", ":", "data", "=", "{", "self", ".", "_FORMAT_F", ":", "'json'", ",", "self", ".", "_SEARCH_F", ":", "mac", "}", "response", "=", "self", ".", "__decode_str", "(", "self", ".", "__call_api", "(", "self", ".", "__url", ",", "data", ")", ",", "'utf-8'", ")", "if", "len", "(", "response", ")", ">", "0", ":", "return", "self", ".", "__parse", "(", "response", ")", "raise", "EmptyResponseException", "(", ")" ]
Get data from API as instance of ResponseModel. Keyword arguments: mac -- MAC address or OUI for searching
[ "Get", "data", "from", "API", "as", "instance", "of", "ResponseModel", "." ]
python
train
26.352941
jhorman/pledge
pledge/__init__.py
https://github.com/jhorman/pledge/blob/062ba5b788aeb15e68c85a329374a50b4618544d/pledge/__init__.py#L78-L103
def post(cond): """ Add a postcondition check to the annotated method. The condition is passed the return value of the annotated method. """ source = inspect.getsource(cond).strip() def inner(f): if enabled: # deal with the real function, not a wrapper f = getattr(f, 'wrapped_fn', f) def check_condition(result): if not cond(result): raise AssertionError('Postcondition failure, %s' % source) # append to the rest of the postconditions attached to this method if not hasattr(f, 'postconditions'): f.postconditions = [] f.postconditions.append(check_condition) return check(f) else: return f return inner
[ "def", "post", "(", "cond", ")", ":", "source", "=", "inspect", ".", "getsource", "(", "cond", ")", ".", "strip", "(", ")", "def", "inner", "(", "f", ")", ":", "if", "enabled", ":", "# deal with the real function, not a wrapper", "f", "=", "getattr", "(", "f", ",", "'wrapped_fn'", ",", "f", ")", "def", "check_condition", "(", "result", ")", ":", "if", "not", "cond", "(", "result", ")", ":", "raise", "AssertionError", "(", "'Postcondition failure, %s'", "%", "source", ")", "# append to the rest of the postconditions attached to this method", "if", "not", "hasattr", "(", "f", ",", "'postconditions'", ")", ":", "f", ".", "postconditions", "=", "[", "]", "f", ".", "postconditions", ".", "append", "(", "check_condition", ")", "return", "check", "(", "f", ")", "else", ":", "return", "f", "return", "inner" ]
Add a postcondition check to the annotated method. The condition is passed the return value of the annotated method.
[ "Add", "a", "postcondition", "check", "to", "the", "annotated", "method", ".", "The", "condition", "is", "passed", "the", "return", "value", "of", "the", "annotated", "method", "." ]
python
train
29.923077
mixer/beam-interactive-python
beam_interactive/proto/varint.py
https://github.com/mixer/beam-interactive-python/blob/e035bc45515dea9315b77648a24b5ae8685aa5cf/beam_interactive/proto/varint.py#L62-L86
def _SignedVarintDecoder(mask): """Like _VarintDecoder() but decodes signed values.""" def DecodeVarint(buffer, pos): result = 0 shift = 0 while 1: if pos > len(buffer) -1: raise NotEnoughDataException( "Not enough data to decode varint" ) b = local_ord(buffer[pos]) result |= ((b & 0x7f) << shift) pos += 1 if not (b & 0x80): if result > 0x7fffffffffffffff: result -= (1 << 64) result |= ~mask else: result &= mask return (result, pos) shift += 7 if shift >= 64: raise _DecodeError('Too many bytes when decoding varint.') return DecodeVarint
[ "def", "_SignedVarintDecoder", "(", "mask", ")", ":", "def", "DecodeVarint", "(", "buffer", ",", "pos", ")", ":", "result", "=", "0", "shift", "=", "0", "while", "1", ":", "if", "pos", ">", "len", "(", "buffer", ")", "-", "1", ":", "raise", "NotEnoughDataException", "(", "\"Not enough data to decode varint\"", ")", "b", "=", "local_ord", "(", "buffer", "[", "pos", "]", ")", "result", "|=", "(", "(", "b", "&", "0x7f", ")", "<<", "shift", ")", "pos", "+=", "1", "if", "not", "(", "b", "&", "0x80", ")", ":", "if", "result", ">", "0x7fffffffffffffff", ":", "result", "-=", "(", "1", "<<", "64", ")", "result", "|=", "~", "mask", "else", ":", "result", "&=", "mask", "return", "(", "result", ",", "pos", ")", "shift", "+=", "7", "if", "shift", ">=", "64", ":", "raise", "_DecodeError", "(", "'Too many bytes when decoding varint.'", ")", "return", "DecodeVarint" ]
Like _VarintDecoder() but decodes signed values.
[ "Like", "_VarintDecoder", "()", "but", "decodes", "signed", "values", "." ]
python
train
26.08
ReFirmLabs/binwalk
src/binwalk/plugins/unpfs.py
https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/plugins/unpfs.py#L10-L13
def _make_short(self, data, endianness): """Returns a 2 byte integer.""" data = binwalk.core.compat.str2bytes(data) return struct.unpack('%sH' % endianness, data)[0]
[ "def", "_make_short", "(", "self", ",", "data", ",", "endianness", ")", ":", "data", "=", "binwalk", ".", "core", ".", "compat", ".", "str2bytes", "(", "data", ")", "return", "struct", ".", "unpack", "(", "'%sH'", "%", "endianness", ",", "data", ")", "[", "0", "]" ]
Returns a 2 byte integer.
[ "Returns", "a", "2", "byte", "integer", "." ]
python
train
46.5
pymupdf/PyMuPDF
fitz/utils.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/utils.py#L2136-L2203
def drawSector(self, center, point, beta, fullSector=True): """Draw a circle sector. """ center = Point(center) point = Point(point) l3 = "%g %g m\n" l4 = "%g %g %g %g %g %g c\n" l5 = "%g %g l\n" betar = math.radians(-beta) w360 = math.radians(math.copysign(360, betar)) * (-1) w90 = math.radians(math.copysign(90, betar)) w45 = w90 / 2 while abs(betar) > 2 * math.pi: betar += w360 # bring angle below 360 degrees if not (self.lastPoint == point): self.draw_cont += l3 % JM_TUPLE(point * self.ipctm) self.lastPoint = point Q = Point(0, 0) # just make sure it exists C = center P = point S = P - C # vector 'center' -> 'point' rad = abs(S) # circle radius if not rad > 1e-5: raise ValueError("radius must be positive") alfa = self.horizontal_angle(center, point) while abs(betar) > abs(w90): # draw 90 degree arcs q1 = C.x + math.cos(alfa + w90) * rad q2 = C.y + math.sin(alfa + w90) * rad Q = Point(q1, q2) # the arc's end point r1 = C.x + math.cos(alfa + w45) * rad / math.cos(w45) r2 = C.y + math.sin(alfa + w45) * rad / math.cos(w45) R = Point(r1, r2) # crossing point of tangents kappah = (1 - math.cos(w45)) * 4 / 3 / abs(R - Q) kappa = kappah * abs(P - Q) cp1 = P + (R - P) * kappa # control point 1 cp2 = Q + (R - Q) * kappa # control point 2 self.draw_cont += l4 % JM_TUPLE(list(cp1 * self.ipctm) + \ list(cp2 * self.ipctm) + \ list(Q * self.ipctm)) betar -= w90 # reduce parm angle by 90 deg alfa += w90 # advance start angle by 90 deg P = Q # advance to arc end point # draw (remaining) arc if abs(betar) > 1e-3: # significant degrees left? beta2 = betar / 2 q1 = C.x + math.cos(alfa + betar) * rad q2 = C.y + math.sin(alfa + betar) * rad Q = Point(q1, q2) # the arc's end point r1 = C.x + math.cos(alfa + beta2) * rad / math.cos(beta2) r2 = C.y + math.sin(alfa + beta2) * rad / math.cos(beta2) R = Point(r1, r2) # crossing point of tangents # kappa height is 4/3 of segment height kappah = (1 - math.cos(beta2)) * 4 / 3 / abs(R - Q) # kappa height kappa = kappah * abs(P - Q) / (1 - math.cos(betar)) cp1 = P + (R - P) * kappa # control point 1 cp2 = Q + (R - Q) * kappa # control point 2 self.draw_cont += l4 % JM_TUPLE(list(cp1 * self.ipctm) + \ list(cp2 * self.ipctm) + \ list(Q * self.ipctm)) if fullSector: self.draw_cont += l3 % JM_TUPLE(point * self.ipctm) self.draw_cont += l5 % JM_TUPLE(center * self.ipctm) self.draw_cont += l5 % JM_TUPLE(Q * self.ipctm) self.lastPoint = Q return self.lastPoint
[ "def", "drawSector", "(", "self", ",", "center", ",", "point", ",", "beta", ",", "fullSector", "=", "True", ")", ":", "center", "=", "Point", "(", "center", ")", "point", "=", "Point", "(", "point", ")", "l3", "=", "\"%g %g m\\n\"", "l4", "=", "\"%g %g %g %g %g %g c\\n\"", "l5", "=", "\"%g %g l\\n\"", "betar", "=", "math", ".", "radians", "(", "-", "beta", ")", "w360", "=", "math", ".", "radians", "(", "math", ".", "copysign", "(", "360", ",", "betar", ")", ")", "*", "(", "-", "1", ")", "w90", "=", "math", ".", "radians", "(", "math", ".", "copysign", "(", "90", ",", "betar", ")", ")", "w45", "=", "w90", "/", "2", "while", "abs", "(", "betar", ")", ">", "2", "*", "math", ".", "pi", ":", "betar", "+=", "w360", "# bring angle below 360 degrees", "if", "not", "(", "self", ".", "lastPoint", "==", "point", ")", ":", "self", ".", "draw_cont", "+=", "l3", "%", "JM_TUPLE", "(", "point", "*", "self", ".", "ipctm", ")", "self", ".", "lastPoint", "=", "point", "Q", "=", "Point", "(", "0", ",", "0", ")", "# just make sure it exists", "C", "=", "center", "P", "=", "point", "S", "=", "P", "-", "C", "# vector 'center' -> 'point'", "rad", "=", "abs", "(", "S", ")", "# circle radius", "if", "not", "rad", ">", "1e-5", ":", "raise", "ValueError", "(", "\"radius must be positive\"", ")", "alfa", "=", "self", ".", "horizontal_angle", "(", "center", ",", "point", ")", "while", "abs", "(", "betar", ")", ">", "abs", "(", "w90", ")", ":", "# draw 90 degree arcs", "q1", "=", "C", ".", "x", "+", "math", ".", "cos", "(", "alfa", "+", "w90", ")", "*", "rad", "q2", "=", "C", ".", "y", "+", "math", ".", "sin", "(", "alfa", "+", "w90", ")", "*", "rad", "Q", "=", "Point", "(", "q1", ",", "q2", ")", "# the arc's end point", "r1", "=", "C", ".", "x", "+", "math", ".", "cos", "(", "alfa", "+", "w45", ")", "*", "rad", "/", "math", ".", "cos", "(", "w45", ")", "r2", "=", "C", ".", "y", "+", "math", ".", "sin", "(", "alfa", "+", "w45", ")", "*", "rad", "/", "math", ".", "cos", "(", "w45", ")", "R", "=", "Point", "(", "r1", ",", "r2", ")", "# crossing point of tangents", "kappah", "=", "(", "1", "-", "math", ".", "cos", "(", "w45", ")", ")", "*", "4", "/", "3", "/", "abs", "(", "R", "-", "Q", ")", "kappa", "=", "kappah", "*", "abs", "(", "P", "-", "Q", ")", "cp1", "=", "P", "+", "(", "R", "-", "P", ")", "*", "kappa", "# control point 1", "cp2", "=", "Q", "+", "(", "R", "-", "Q", ")", "*", "kappa", "# control point 2", "self", ".", "draw_cont", "+=", "l4", "%", "JM_TUPLE", "(", "list", "(", "cp1", "*", "self", ".", "ipctm", ")", "+", "list", "(", "cp2", "*", "self", ".", "ipctm", ")", "+", "list", "(", "Q", "*", "self", ".", "ipctm", ")", ")", "betar", "-=", "w90", "# reduce parm angle by 90 deg", "alfa", "+=", "w90", "# advance start angle by 90 deg", "P", "=", "Q", "# advance to arc end point", "# draw (remaining) arc", "if", "abs", "(", "betar", ")", ">", "1e-3", ":", "# significant degrees left?", "beta2", "=", "betar", "/", "2", "q1", "=", "C", ".", "x", "+", "math", ".", "cos", "(", "alfa", "+", "betar", ")", "*", "rad", "q2", "=", "C", ".", "y", "+", "math", ".", "sin", "(", "alfa", "+", "betar", ")", "*", "rad", "Q", "=", "Point", "(", "q1", ",", "q2", ")", "# the arc's end point", "r1", "=", "C", ".", "x", "+", "math", ".", "cos", "(", "alfa", "+", "beta2", ")", "*", "rad", "/", "math", ".", "cos", "(", "beta2", ")", "r2", "=", "C", ".", "y", "+", "math", ".", "sin", "(", "alfa", "+", "beta2", ")", "*", "rad", "/", "math", ".", "cos", "(", "beta2", ")", "R", "=", "Point", "(", "r1", ",", "r2", ")", "# crossing point of tangents", "# kappa height is 4/3 of segment height", "kappah", "=", "(", "1", "-", "math", ".", "cos", "(", "beta2", ")", ")", "*", "4", "/", "3", "/", "abs", "(", "R", "-", "Q", ")", "# kappa height", "kappa", "=", "kappah", "*", "abs", "(", "P", "-", "Q", ")", "/", "(", "1", "-", "math", ".", "cos", "(", "betar", ")", ")", "cp1", "=", "P", "+", "(", "R", "-", "P", ")", "*", "kappa", "# control point 1", "cp2", "=", "Q", "+", "(", "R", "-", "Q", ")", "*", "kappa", "# control point 2", "self", ".", "draw_cont", "+=", "l4", "%", "JM_TUPLE", "(", "list", "(", "cp1", "*", "self", ".", "ipctm", ")", "+", "list", "(", "cp2", "*", "self", ".", "ipctm", ")", "+", "list", "(", "Q", "*", "self", ".", "ipctm", ")", ")", "if", "fullSector", ":", "self", ".", "draw_cont", "+=", "l3", "%", "JM_TUPLE", "(", "point", "*", "self", ".", "ipctm", ")", "self", ".", "draw_cont", "+=", "l5", "%", "JM_TUPLE", "(", "center", "*", "self", ".", "ipctm", ")", "self", ".", "draw_cont", "+=", "l5", "%", "JM_TUPLE", "(", "Q", "*", "self", ".", "ipctm", ")", "self", ".", "lastPoint", "=", "Q", "return", "self", ".", "lastPoint" ]
Draw a circle sector.
[ "Draw", "a", "circle", "sector", "." ]
python
train
50.441176
PythonCharmers/python-future
src/future/backports/xmlrpc/server.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/xmlrpc/server.py#L311-L340
def system_methodHelp(self, method_name): """system.methodHelp('add') => "Adds two integers together" Returns a string containing documentation for the specified method.""" method = None if method_name in self.funcs: method = self.funcs[method_name] elif self.instance is not None: # Instance can implement _methodHelp to return help for a method if hasattr(self.instance, '_methodHelp'): return self.instance._methodHelp(method_name) # if the instance has a _dispatch method then we # don't have enough information to provide help elif not hasattr(self.instance, '_dispatch'): try: method = resolve_dotted_attribute( self.instance, method_name, self.allow_dotted_names ) except AttributeError: pass # Note that we aren't checking that the method actually # be a callable object of some kind if method is None: return "" else: return pydoc.getdoc(method)
[ "def", "system_methodHelp", "(", "self", ",", "method_name", ")", ":", "method", "=", "None", "if", "method_name", "in", "self", ".", "funcs", ":", "method", "=", "self", ".", "funcs", "[", "method_name", "]", "elif", "self", ".", "instance", "is", "not", "None", ":", "# Instance can implement _methodHelp to return help for a method", "if", "hasattr", "(", "self", ".", "instance", ",", "'_methodHelp'", ")", ":", "return", "self", ".", "instance", ".", "_methodHelp", "(", "method_name", ")", "# if the instance has a _dispatch method then we", "# don't have enough information to provide help", "elif", "not", "hasattr", "(", "self", ".", "instance", ",", "'_dispatch'", ")", ":", "try", ":", "method", "=", "resolve_dotted_attribute", "(", "self", ".", "instance", ",", "method_name", ",", "self", ".", "allow_dotted_names", ")", "except", "AttributeError", ":", "pass", "# Note that we aren't checking that the method actually", "# be a callable object of some kind", "if", "method", "is", "None", ":", "return", "\"\"", "else", ":", "return", "pydoc", ".", "getdoc", "(", "method", ")" ]
system.methodHelp('add') => "Adds two integers together" Returns a string containing documentation for the specified method.
[ "system", ".", "methodHelp", "(", "add", ")", "=", ">", "Adds", "two", "integers", "together" ]
python
train
40.4
horazont/aioxmpp
aioxmpp/stream.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stream.py#L2509-L2542
def send_iq_and_wait_for_reply(self, iq, *, timeout=None): """ Send an IQ stanza `iq` and wait for the response. If `timeout` is not :data:`None`, it must be the time in seconds for which to wait for a response. If the response is a ``"result"`` IQ, the value of the :attr:`~aioxmpp.IQ.payload` attribute is returned. Otherwise, the exception generated from the :attr:`~aioxmpp.IQ.error` attribute is raised. .. seealso:: :meth:`register_iq_response_future` and :meth:`send_and_wait_for_sent` for other cases raising exceptions. .. deprecated:: 0.8 This method will be removed in 1.0. Use :meth:`send` instead. .. versionchanged:: 0.8 On a timeout, :class:`TimeoutError` is now raised instead of :class:`asyncio.TimeoutError`. """ warnings.warn( r"send_iq_and_wait_for_reply is deprecated and will be removed in" r" 1.0", DeprecationWarning, stacklevel=1, ) return (yield from self.send(iq, timeout=timeout))
[ "def", "send_iq_and_wait_for_reply", "(", "self", ",", "iq", ",", "*", ",", "timeout", "=", "None", ")", ":", "warnings", ".", "warn", "(", "r\"send_iq_and_wait_for_reply is deprecated and will be removed in\"", "r\" 1.0\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "1", ",", ")", "return", "(", "yield", "from", "self", ".", "send", "(", "iq", ",", "timeout", "=", "timeout", ")", ")" ]
Send an IQ stanza `iq` and wait for the response. If `timeout` is not :data:`None`, it must be the time in seconds for which to wait for a response. If the response is a ``"result"`` IQ, the value of the :attr:`~aioxmpp.IQ.payload` attribute is returned. Otherwise, the exception generated from the :attr:`~aioxmpp.IQ.error` attribute is raised. .. seealso:: :meth:`register_iq_response_future` and :meth:`send_and_wait_for_sent` for other cases raising exceptions. .. deprecated:: 0.8 This method will be removed in 1.0. Use :meth:`send` instead. .. versionchanged:: 0.8 On a timeout, :class:`TimeoutError` is now raised instead of :class:`asyncio.TimeoutError`.
[ "Send", "an", "IQ", "stanza", "iq", "and", "wait", "for", "the", "response", ".", "If", "timeout", "is", "not", ":", "data", ":", "None", "it", "must", "be", "the", "time", "in", "seconds", "for", "which", "to", "wait", "for", "a", "response", "." ]
python
train
33.558824
mkoura/dump2polarion
dump2polarion/csv2sqlite_cli.py
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/csv2sqlite_cli.py#L80-L113
def main(args=None): """Main function for cli.""" args = get_args(args) utils.init_log(args.log_level) if ".csv" not in args.input_file.lower(): logger.warning("Make sure the input file '%s' is in CSV format", args.input_file) try: records = csvtools.get_imported_data(args.input_file) except (EnvironmentError, Dump2PolarionException) as err: logger.fatal(err) return 1 # check if all columns required by `pytest_polarion_cfme` are there required_columns = {"id": "ID", "title": "Title"} missing_columns = [required_columns[k] for k in required_columns if k not in records.results[0]] if missing_columns: logger.fatal( "The input file '%s' is missing following columns: %s", args.input_file, ", ".join(missing_columns), ) return 1 try: dump2sqlite(records, args.output_file) # pylint: disable=broad-except except Exception as err: logger.exception(err) return 1 return 0
[ "def", "main", "(", "args", "=", "None", ")", ":", "args", "=", "get_args", "(", "args", ")", "utils", ".", "init_log", "(", "args", ".", "log_level", ")", "if", "\".csv\"", "not", "in", "args", ".", "input_file", ".", "lower", "(", ")", ":", "logger", ".", "warning", "(", "\"Make sure the input file '%s' is in CSV format\"", ",", "args", ".", "input_file", ")", "try", ":", "records", "=", "csvtools", ".", "get_imported_data", "(", "args", ".", "input_file", ")", "except", "(", "EnvironmentError", ",", "Dump2PolarionException", ")", "as", "err", ":", "logger", ".", "fatal", "(", "err", ")", "return", "1", "# check if all columns required by `pytest_polarion_cfme` are there", "required_columns", "=", "{", "\"id\"", ":", "\"ID\"", ",", "\"title\"", ":", "\"Title\"", "}", "missing_columns", "=", "[", "required_columns", "[", "k", "]", "for", "k", "in", "required_columns", "if", "k", "not", "in", "records", ".", "results", "[", "0", "]", "]", "if", "missing_columns", ":", "logger", ".", "fatal", "(", "\"The input file '%s' is missing following columns: %s\"", ",", "args", ".", "input_file", ",", "\", \"", ".", "join", "(", "missing_columns", ")", ",", ")", "return", "1", "try", ":", "dump2sqlite", "(", "records", ",", "args", ".", "output_file", ")", "# pylint: disable=broad-except", "except", "Exception", "as", "err", ":", "logger", ".", "exception", "(", "err", ")", "return", "1", "return", "0" ]
Main function for cli.
[ "Main", "function", "for", "cli", "." ]
python
train
29.882353
log2timeline/plaso
plaso/storage/sqlite/sqlite_file.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/sqlite/sqlite_file.py#L404-L423
def _ReadAndCheckStorageMetadata(self, check_readable_only=False): """Reads storage metadata and checks that the values are valid. Args: check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to. """ query = 'SELECT key, value FROM metadata' self._cursor.execute(query) metadata_values = {row[0]: row[1] for row in self._cursor.fetchall()} self._CheckStorageMetadata( metadata_values, check_readable_only=check_readable_only) self.format_version = metadata_values['format_version'] self.compression_format = metadata_values['compression_format'] self.serialization_format = metadata_values['serialization_format'] self.storage_type = metadata_values['storage_type']
[ "def", "_ReadAndCheckStorageMetadata", "(", "self", ",", "check_readable_only", "=", "False", ")", ":", "query", "=", "'SELECT key, value FROM metadata'", "self", ".", "_cursor", ".", "execute", "(", "query", ")", "metadata_values", "=", "{", "row", "[", "0", "]", ":", "row", "[", "1", "]", "for", "row", "in", "self", ".", "_cursor", ".", "fetchall", "(", ")", "}", "self", ".", "_CheckStorageMetadata", "(", "metadata_values", ",", "check_readable_only", "=", "check_readable_only", ")", "self", ".", "format_version", "=", "metadata_values", "[", "'format_version'", "]", "self", ".", "compression_format", "=", "metadata_values", "[", "'compression_format'", "]", "self", ".", "serialization_format", "=", "metadata_values", "[", "'serialization_format'", "]", "self", ".", "storage_type", "=", "metadata_values", "[", "'storage_type'", "]" ]
Reads storage metadata and checks that the values are valid. Args: check_readable_only (Optional[bool]): whether the store should only be checked to see if it can be read. If False, the store will be checked to see if it can be read and written to.
[ "Reads", "storage", "metadata", "and", "checks", "that", "the", "values", "are", "valid", "." ]
python
train
42.55
simonw/csvs-to-sqlite
csvs_to_sqlite/cli.py
https://github.com/simonw/csvs-to-sqlite/blob/0a014284eac75c1b06cbdaca362f2a66648c11d2/csvs_to_sqlite/cli.py#L120-L248
def cli( paths, dbname, separator, quoting, skip_errors, replace_tables, table, extract_column, date, datetime, datetime_format, primary_key, fts, index, shape, filename_column, no_index_fks, no_fulltext_fks, ): """ PATHS: paths to individual .csv files or to directories containing .csvs DBNAME: name of the SQLite database file to create """ # make plural for more readable code: extract_columns = extract_column del extract_column if extract_columns: click.echo("extract_columns={}".format(extract_columns)) if dbname.endswith(".csv"): raise click.BadParameter("dbname must not end with .csv") if "." not in dbname: dbname += ".db" db_existed = os.path.exists(dbname) conn = sqlite3.connect(dbname) dataframes = [] csvs = csvs_from_paths(paths) sql_type_overrides = None for name, path in csvs.items(): try: df = load_csv(path, separator, skip_errors, quoting, shape) df.table_name = table or name if filename_column: df[filename_column] = name if shape: shape += ",{}".format(filename_column) sql_type_overrides = apply_shape(df, shape) apply_dates_and_datetimes(df, date, datetime, datetime_format) dataframes.append(df) except LoadCsvError as e: click.echo("Could not load {}: {}".format(path, e), err=True) click.echo("Loaded {} dataframes".format(len(dataframes))) # Use extract_columns to build a column:(table,label) dictionary foreign_keys = {} for col in extract_columns: bits = col.split(":") if len(bits) == 3: foreign_keys[bits[0]] = (bits[1], bits[2]) elif len(bits) == 2: foreign_keys[bits[0]] = (bits[1], "value") else: foreign_keys[bits[0]] = (bits[0], "value") # Now we have loaded the dataframes, we can refactor them created_tables = {} refactored = refactor_dataframes( conn, dataframes, foreign_keys, not no_fulltext_fks ) for df in refactored: # This is a bit trickier because we need to # create the table with extra SQL for foreign keys if replace_tables and table_exists(conn, df.table_name): drop_table(conn, df.table_name) if table_exists(conn, df.table_name): df.to_sql(df.table_name, conn, if_exists="append", index=False) else: to_sql_with_foreign_keys( conn, df, df.table_name, foreign_keys, sql_type_overrides, primary_keys=primary_key, index_fks=not no_index_fks, ) created_tables[df.table_name] = df if index: for index_defn in index: add_index(conn, df.table_name, index_defn) # Create FTS tables if fts: fts_version = best_fts_version() if not fts_version: conn.close() raise click.BadParameter( "Your SQLite version does not support any variant of FTS" ) # Check that columns make sense for table, df in created_tables.items(): for fts_column in fts: if fts_column not in df.columns: raise click.BadParameter( 'FTS column "{}" does not exist'.format(fts_column) ) generate_and_populate_fts(conn, created_tables.keys(), fts, foreign_keys) conn.close() if db_existed: click.echo( "Added {} CSV file{} to {}".format( len(csvs), "" if len(csvs) == 1 else "s", dbname ) ) else: click.echo( "Created {} from {} CSV file{}".format( dbname, len(csvs), "" if len(csvs) == 1 else "s" ) )
[ "def", "cli", "(", "paths", ",", "dbname", ",", "separator", ",", "quoting", ",", "skip_errors", ",", "replace_tables", ",", "table", ",", "extract_column", ",", "date", ",", "datetime", ",", "datetime_format", ",", "primary_key", ",", "fts", ",", "index", ",", "shape", ",", "filename_column", ",", "no_index_fks", ",", "no_fulltext_fks", ",", ")", ":", "# make plural for more readable code:", "extract_columns", "=", "extract_column", "del", "extract_column", "if", "extract_columns", ":", "click", ".", "echo", "(", "\"extract_columns={}\"", ".", "format", "(", "extract_columns", ")", ")", "if", "dbname", ".", "endswith", "(", "\".csv\"", ")", ":", "raise", "click", ".", "BadParameter", "(", "\"dbname must not end with .csv\"", ")", "if", "\".\"", "not", "in", "dbname", ":", "dbname", "+=", "\".db\"", "db_existed", "=", "os", ".", "path", ".", "exists", "(", "dbname", ")", "conn", "=", "sqlite3", ".", "connect", "(", "dbname", ")", "dataframes", "=", "[", "]", "csvs", "=", "csvs_from_paths", "(", "paths", ")", "sql_type_overrides", "=", "None", "for", "name", ",", "path", "in", "csvs", ".", "items", "(", ")", ":", "try", ":", "df", "=", "load_csv", "(", "path", ",", "separator", ",", "skip_errors", ",", "quoting", ",", "shape", ")", "df", ".", "table_name", "=", "table", "or", "name", "if", "filename_column", ":", "df", "[", "filename_column", "]", "=", "name", "if", "shape", ":", "shape", "+=", "\",{}\"", ".", "format", "(", "filename_column", ")", "sql_type_overrides", "=", "apply_shape", "(", "df", ",", "shape", ")", "apply_dates_and_datetimes", "(", "df", ",", "date", ",", "datetime", ",", "datetime_format", ")", "dataframes", ".", "append", "(", "df", ")", "except", "LoadCsvError", "as", "e", ":", "click", ".", "echo", "(", "\"Could not load {}: {}\"", ".", "format", "(", "path", ",", "e", ")", ",", "err", "=", "True", ")", "click", ".", "echo", "(", "\"Loaded {} dataframes\"", ".", "format", "(", "len", "(", "dataframes", ")", ")", ")", "# Use extract_columns to build a column:(table,label) dictionary", "foreign_keys", "=", "{", "}", "for", "col", "in", "extract_columns", ":", "bits", "=", "col", ".", "split", "(", "\":\"", ")", "if", "len", "(", "bits", ")", "==", "3", ":", "foreign_keys", "[", "bits", "[", "0", "]", "]", "=", "(", "bits", "[", "1", "]", ",", "bits", "[", "2", "]", ")", "elif", "len", "(", "bits", ")", "==", "2", ":", "foreign_keys", "[", "bits", "[", "0", "]", "]", "=", "(", "bits", "[", "1", "]", ",", "\"value\"", ")", "else", ":", "foreign_keys", "[", "bits", "[", "0", "]", "]", "=", "(", "bits", "[", "0", "]", ",", "\"value\"", ")", "# Now we have loaded the dataframes, we can refactor them", "created_tables", "=", "{", "}", "refactored", "=", "refactor_dataframes", "(", "conn", ",", "dataframes", ",", "foreign_keys", ",", "not", "no_fulltext_fks", ")", "for", "df", "in", "refactored", ":", "# This is a bit trickier because we need to", "# create the table with extra SQL for foreign keys", "if", "replace_tables", "and", "table_exists", "(", "conn", ",", "df", ".", "table_name", ")", ":", "drop_table", "(", "conn", ",", "df", ".", "table_name", ")", "if", "table_exists", "(", "conn", ",", "df", ".", "table_name", ")", ":", "df", ".", "to_sql", "(", "df", ".", "table_name", ",", "conn", ",", "if_exists", "=", "\"append\"", ",", "index", "=", "False", ")", "else", ":", "to_sql_with_foreign_keys", "(", "conn", ",", "df", ",", "df", ".", "table_name", ",", "foreign_keys", ",", "sql_type_overrides", ",", "primary_keys", "=", "primary_key", ",", "index_fks", "=", "not", "no_index_fks", ",", ")", "created_tables", "[", "df", ".", "table_name", "]", "=", "df", "if", "index", ":", "for", "index_defn", "in", "index", ":", "add_index", "(", "conn", ",", "df", ".", "table_name", ",", "index_defn", ")", "# Create FTS tables", "if", "fts", ":", "fts_version", "=", "best_fts_version", "(", ")", "if", "not", "fts_version", ":", "conn", ".", "close", "(", ")", "raise", "click", ".", "BadParameter", "(", "\"Your SQLite version does not support any variant of FTS\"", ")", "# Check that columns make sense", "for", "table", ",", "df", "in", "created_tables", ".", "items", "(", ")", ":", "for", "fts_column", "in", "fts", ":", "if", "fts_column", "not", "in", "df", ".", "columns", ":", "raise", "click", ".", "BadParameter", "(", "'FTS column \"{}\" does not exist'", ".", "format", "(", "fts_column", ")", ")", "generate_and_populate_fts", "(", "conn", ",", "created_tables", ".", "keys", "(", ")", ",", "fts", ",", "foreign_keys", ")", "conn", ".", "close", "(", ")", "if", "db_existed", ":", "click", ".", "echo", "(", "\"Added {} CSV file{} to {}\"", ".", "format", "(", "len", "(", "csvs", ")", ",", "\"\"", "if", "len", "(", "csvs", ")", "==", "1", "else", "\"s\"", ",", "dbname", ")", ")", "else", ":", "click", ".", "echo", "(", "\"Created {} from {} CSV file{}\"", ".", "format", "(", "dbname", ",", "len", "(", "csvs", ")", ",", "\"\"", "if", "len", "(", "csvs", ")", "==", "1", "else", "\"s\"", ")", ")" ]
PATHS: paths to individual .csv files or to directories containing .csvs DBNAME: name of the SQLite database file to create
[ "PATHS", ":", "paths", "to", "individual", ".", "csv", "files", "or", "to", "directories", "containing", ".", "csvs" ]
python
train
30.147287
dwavesystems/dwave-system
dwave/system/composites/embedding.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/system/composites/embedding.py#L414-L429
def _adjacency_to_edges(adjacency): """determine from an adjacency the list of edges if (u, v) in edges, then (v, u) should not be""" edges = set() for u in adjacency: for v in adjacency[u]: try: edge = (u, v) if u <= v else (v, u) except TypeError: # Py3 does not allow sorting of unlike types if (v, u) in edges: continue edge = (u, v) edges.add(edge) return edges
[ "def", "_adjacency_to_edges", "(", "adjacency", ")", ":", "edges", "=", "set", "(", ")", "for", "u", "in", "adjacency", ":", "for", "v", "in", "adjacency", "[", "u", "]", ":", "try", ":", "edge", "=", "(", "u", ",", "v", ")", "if", "u", "<=", "v", "else", "(", "v", ",", "u", ")", "except", "TypeError", ":", "# Py3 does not allow sorting of unlike types", "if", "(", "v", ",", "u", ")", "in", "edges", ":", "continue", "edge", "=", "(", "u", ",", "v", ")", "edges", ".", "add", "(", "edge", ")", "return", "edges" ]
determine from an adjacency the list of edges if (u, v) in edges, then (v, u) should not be
[ "determine", "from", "an", "adjacency", "the", "list", "of", "edges", "if", "(", "u", "v", ")", "in", "edges", "then", "(", "v", "u", ")", "should", "not", "be" ]
python
train
31.25
rytilahti/python-eq3bt
eq3bt/eq3btsmart.py
https://github.com/rytilahti/python-eq3bt/blob/595459d9885920cf13b7059a1edd2cf38cede1f0/eq3bt/eq3btsmart.py#L375-L378
def activate_eco(self): """Activates the comfort temperature.""" value = struct.pack('B', PROP_ECO) self._conn.make_request(PROP_WRITE_HANDLE, value)
[ "def", "activate_eco", "(", "self", ")", ":", "value", "=", "struct", ".", "pack", "(", "'B'", ",", "PROP_ECO", ")", "self", ".", "_conn", ".", "make_request", "(", "PROP_WRITE_HANDLE", ",", "value", ")" ]
Activates the comfort temperature.
[ "Activates", "the", "comfort", "temperature", "." ]
python
train
42.5
mosesschwartz/scrypture
scrypture/scrypture_api.py
https://github.com/mosesschwartz/scrypture/blob/d51eb0c9835a5122a655078268185ce8ab9ec86a/scrypture/scrypture_api.py#L70-L75
def post(self, uri, params={}, data={}): '''A generic method to make POST requests on the given URI.''' return requests.post( urlparse.urljoin(self.BASE_URL, uri), params=params, data=json.dumps(data), verify=False, auth=self.auth, headers = {'Content-type': 'application/json', 'Accept': 'text/plain'})
[ "def", "post", "(", "self", ",", "uri", ",", "params", "=", "{", "}", ",", "data", "=", "{", "}", ")", ":", "return", "requests", ".", "post", "(", "urlparse", ".", "urljoin", "(", "self", ".", "BASE_URL", ",", "uri", ")", ",", "params", "=", "params", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "verify", "=", "False", ",", "auth", "=", "self", ".", "auth", ",", "headers", "=", "{", "'Content-type'", ":", "'application/json'", ",", "'Accept'", ":", "'text/plain'", "}", ")" ]
A generic method to make POST requests on the given URI.
[ "A", "generic", "method", "to", "make", "POST", "requests", "on", "the", "given", "URI", "." ]
python
train
58.333333
ming060/robotframework-uiautomatorlibrary
uiautomatorlibrary/Mobile.py
https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L592-L600
def register_click_watcher(self, watcher_name, selectors, *condition_list): """ The watcher click on the object which has the *selectors* when conditions match. """ watcher = self.device.watcher(watcher_name) for condition in condition_list: watcher.when(**self.__unicode_to_dict(condition)) watcher.click(**self.__unicode_to_dict(selectors)) self.device.watchers.run()
[ "def", "register_click_watcher", "(", "self", ",", "watcher_name", ",", "selectors", ",", "*", "condition_list", ")", ":", "watcher", "=", "self", ".", "device", ".", "watcher", "(", "watcher_name", ")", "for", "condition", "in", "condition_list", ":", "watcher", ".", "when", "(", "*", "*", "self", ".", "__unicode_to_dict", "(", "condition", ")", ")", "watcher", ".", "click", "(", "*", "*", "self", ".", "__unicode_to_dict", "(", "selectors", ")", ")", "self", ".", "device", ".", "watchers", ".", "run", "(", ")" ]
The watcher click on the object which has the *selectors* when conditions match.
[ "The", "watcher", "click", "on", "the", "object", "which", "has", "the", "*", "selectors", "*", "when", "conditions", "match", "." ]
python
train
47.666667
TheHive-Project/TheHive4py
thehive4py/api.py
https://github.com/TheHive-Project/TheHive4py/blob/35762bbd50d8376943268464326b59c752d6241b/thehive4py/api.py#L166-L189
def create_task_log(self, task_id, case_task_log): """ :param task_id: Task identifier :param case_task_log: TheHive log :type case_task_log: CaseTaskLog defined in models.py :return: TheHive log :rtype: json """ req = self.url + "/api/case/task/{}/log".format(task_id) data = {'_json': json.dumps({"message":case_task_log.message})} if case_task_log.file: f = {'attachment': (os.path.basename(case_task_log.file), open(case_task_log.file, 'rb'), magic.Magic(mime=True).from_file(case_task_log.file))} try: return requests.post(req, data=data,files=f, proxies=self.proxies, auth=self.auth, verify=self.cert) except requests.exceptions.RequestException as e: raise CaseTaskException("Case task log create error: {}".format(e)) else: try: return requests.post(req, headers={'Content-Type': 'application/json'}, data=json.dumps({'message':case_task_log.message}), proxies=self.proxies, auth=self.auth, verify=self.cert) except requests.exceptions.RequestException as e: raise CaseTaskException("Case task log create error: {}".format(e))
[ "def", "create_task_log", "(", "self", ",", "task_id", ",", "case_task_log", ")", ":", "req", "=", "self", ".", "url", "+", "\"/api/case/task/{}/log\"", ".", "format", "(", "task_id", ")", "data", "=", "{", "'_json'", ":", "json", ".", "dumps", "(", "{", "\"message\"", ":", "case_task_log", ".", "message", "}", ")", "}", "if", "case_task_log", ".", "file", ":", "f", "=", "{", "'attachment'", ":", "(", "os", ".", "path", ".", "basename", "(", "case_task_log", ".", "file", ")", ",", "open", "(", "case_task_log", ".", "file", ",", "'rb'", ")", ",", "magic", ".", "Magic", "(", "mime", "=", "True", ")", ".", "from_file", "(", "case_task_log", ".", "file", ")", ")", "}", "try", ":", "return", "requests", ".", "post", "(", "req", ",", "data", "=", "data", ",", "files", "=", "f", ",", "proxies", "=", "self", ".", "proxies", ",", "auth", "=", "self", ".", "auth", ",", "verify", "=", "self", ".", "cert", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "raise", "CaseTaskException", "(", "\"Case task log create error: {}\"", ".", "format", "(", "e", ")", ")", "else", ":", "try", ":", "return", "requests", ".", "post", "(", "req", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", ",", "data", "=", "json", ".", "dumps", "(", "{", "'message'", ":", "case_task_log", ".", "message", "}", ")", ",", "proxies", "=", "self", ".", "proxies", ",", "auth", "=", "self", ".", "auth", ",", "verify", "=", "self", ".", "cert", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "raise", "CaseTaskException", "(", "\"Case task log create error: {}\"", ".", "format", "(", "e", ")", ")" ]
:param task_id: Task identifier :param case_task_log: TheHive log :type case_task_log: CaseTaskLog defined in models.py :return: TheHive log :rtype: json
[ ":", "param", "task_id", ":", "Task", "identifier", ":", "param", "case_task_log", ":", "TheHive", "log", ":", "type", "case_task_log", ":", "CaseTaskLog", "defined", "in", "models", ".", "py", ":", "return", ":", "TheHive", "log", ":", "rtype", ":", "json" ]
python
train
51.083333
squaresLab/BugZoo
bugzoo/client/container.py
https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/client/container.py#L152-L162
def ip_address(self, container: Container ) -> Union[IPv4Address, IPv6Address]: """ The IP address used by a given container, or None if no IP address has been assigned to that container. """ r = self.__api.get('containers/{}/ip'.format(container.uid)) if r.status_code == 200: return r.json() self.__api.handle_erroneous_response(r)
[ "def", "ip_address", "(", "self", ",", "container", ":", "Container", ")", "->", "Union", "[", "IPv4Address", ",", "IPv6Address", "]", ":", "r", "=", "self", ".", "__api", ".", "get", "(", "'containers/{}/ip'", ".", "format", "(", "container", ".", "uid", ")", ")", "if", "r", ".", "status_code", "==", "200", ":", "return", "r", ".", "json", "(", ")", "self", ".", "__api", ".", "handle_erroneous_response", "(", "r", ")" ]
The IP address used by a given container, or None if no IP address has been assigned to that container.
[ "The", "IP", "address", "used", "by", "a", "given", "container", "or", "None", "if", "no", "IP", "address", "has", "been", "assigned", "to", "that", "container", "." ]
python
train
39
gwastro/pycbc
pycbc/inject/inject.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inject/inject.py#L805-L855
def write(filename, samples, write_params=None, static_args=None, injtype=None, **metadata): """Writes the injection samples to the given hdf file. Parameters ---------- filename : str The name of the file to write to. samples : io.FieldArray FieldArray of parameters. write_params : list, optional Only write the given parameter names. All given names must be keys in ``samples``. Default is to write all parameters in ``samples``. static_args : dict, optional Dictionary mapping static parameter names to values. These are written to the ``attrs``. injtype : str, optional Specify which `HDFInjectionSet` class to use for writing. If not provided, will try to determine it by looking for an approximant in the ``static_args``, followed by the ``samples``. \**metadata : All other keyword arguments will be written to the file's attrs. """ # DELETE the following "if" once xml is dropped ext = os.path.basename(filename) if ext.endswith(('.xml', '.xml.gz', '.xmlgz')): _XMLInjectionSet.write(filename, samples, write_params, static_args) else: # try determine the injtype if it isn't given if injtype is None: if static_args is not None and 'approximant' in static_args: injcls = hdf_injtype_from_approximant( static_args['approximant']) elif 'approximant' in samples.fieldnames: apprxs = np.unique(samples['approximant']) # make sure they all correspond to the same injection type injcls = [hdf_injtype_from_approximant(a) for a in apprxs] if not all(c == injcls[0] for c in injcls): raise ValueError("injections must all be of the same " "type") injcls = injcls[0] else: raise ValueError("Could not find an approximant in the " "static args or samples to determine the " "injection type. Please specify an " "injtype instead.") else: injcls = hdfinjtypes[injtype] injcls.write(filename, samples, write_params, static_args, **metadata)
[ "def", "write", "(", "filename", ",", "samples", ",", "write_params", "=", "None", ",", "static_args", "=", "None", ",", "injtype", "=", "None", ",", "*", "*", "metadata", ")", ":", "# DELETE the following \"if\" once xml is dropped", "ext", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "if", "ext", ".", "endswith", "(", "(", "'.xml'", ",", "'.xml.gz'", ",", "'.xmlgz'", ")", ")", ":", "_XMLInjectionSet", ".", "write", "(", "filename", ",", "samples", ",", "write_params", ",", "static_args", ")", "else", ":", "# try determine the injtype if it isn't given", "if", "injtype", "is", "None", ":", "if", "static_args", "is", "not", "None", "and", "'approximant'", "in", "static_args", ":", "injcls", "=", "hdf_injtype_from_approximant", "(", "static_args", "[", "'approximant'", "]", ")", "elif", "'approximant'", "in", "samples", ".", "fieldnames", ":", "apprxs", "=", "np", ".", "unique", "(", "samples", "[", "'approximant'", "]", ")", "# make sure they all correspond to the same injection type", "injcls", "=", "[", "hdf_injtype_from_approximant", "(", "a", ")", "for", "a", "in", "apprxs", "]", "if", "not", "all", "(", "c", "==", "injcls", "[", "0", "]", "for", "c", "in", "injcls", ")", ":", "raise", "ValueError", "(", "\"injections must all be of the same \"", "\"type\"", ")", "injcls", "=", "injcls", "[", "0", "]", "else", ":", "raise", "ValueError", "(", "\"Could not find an approximant in the \"", "\"static args or samples to determine the \"", "\"injection type. Please specify an \"", "\"injtype instead.\"", ")", "else", ":", "injcls", "=", "hdfinjtypes", "[", "injtype", "]", "injcls", ".", "write", "(", "filename", ",", "samples", ",", "write_params", ",", "static_args", ",", "*", "*", "metadata", ")" ]
Writes the injection samples to the given hdf file. Parameters ---------- filename : str The name of the file to write to. samples : io.FieldArray FieldArray of parameters. write_params : list, optional Only write the given parameter names. All given names must be keys in ``samples``. Default is to write all parameters in ``samples``. static_args : dict, optional Dictionary mapping static parameter names to values. These are written to the ``attrs``. injtype : str, optional Specify which `HDFInjectionSet` class to use for writing. If not provided, will try to determine it by looking for an approximant in the ``static_args``, followed by the ``samples``. \**metadata : All other keyword arguments will be written to the file's attrs.
[ "Writes", "the", "injection", "samples", "to", "the", "given", "hdf", "file", "." ]
python
train
50.156863
tsileo/globster
lazy_regex.py
https://github.com/tsileo/globster/blob/9628bce60207b150d39b409cddc3fadb34e70841/lazy_regex.py#L60-L65
def _compile_and_collapse(self): """Actually compile the requested regex""" self._real_regex = self._real_re_compile(*self._regex_args, **self._regex_kwargs) for attr in self._regex_attributes_to_copy: setattr(self, attr, getattr(self._real_regex, attr))
[ "def", "_compile_and_collapse", "(", "self", ")", ":", "self", ".", "_real_regex", "=", "self", ".", "_real_re_compile", "(", "*", "self", ".", "_regex_args", ",", "*", "*", "self", ".", "_regex_kwargs", ")", "for", "attr", "in", "self", ".", "_regex_attributes_to_copy", ":", "setattr", "(", "self", ",", "attr", ",", "getattr", "(", "self", ".", "_real_regex", ",", "attr", ")", ")" ]
Actually compile the requested regex
[ "Actually", "compile", "the", "requested", "regex" ]
python
train
55.666667
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1483-L1488
def get_next_state(self, state, ret, oper): """Returns the next state for a create or delete operation. """ if oper == fw_const.FW_CR_OP: return self.get_next_create_state(state, ret) else: return self.get_next_del_state(state, ret)
[ "def", "get_next_state", "(", "self", ",", "state", ",", "ret", ",", "oper", ")", ":", "if", "oper", "==", "fw_const", ".", "FW_CR_OP", ":", "return", "self", ".", "get_next_create_state", "(", "state", ",", "ret", ")", "else", ":", "return", "self", ".", "get_next_del_state", "(", "state", ",", "ret", ")" ]
Returns the next state for a create or delete operation.
[ "Returns", "the", "next", "state", "for", "a", "create", "or", "delete", "operation", "." ]
python
train
45.833333
spyder-ide/spyder
spyder/plugins/editor/widgets/base.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/base.py#L336-L352
def set_palette(self, background, foreground): """ Set text editor palette colors: background color and caret (text cursor) color """ palette = QPalette() palette.setColor(QPalette.Base, background) palette.setColor(QPalette.Text, foreground) self.setPalette(palette) # Set the right background color when changing color schemes # or creating new Editor windows. This seems to be a Qt bug. # Fixes Issue 2028 and 8069 if self.objectName(): style = "QPlainTextEdit#%s {background: %s; color: %s;}" % \ (self.objectName(), background.name(), foreground.name()) self.setStyleSheet(style)
[ "def", "set_palette", "(", "self", ",", "background", ",", "foreground", ")", ":", "palette", "=", "QPalette", "(", ")", "palette", ".", "setColor", "(", "QPalette", ".", "Base", ",", "background", ")", "palette", ".", "setColor", "(", "QPalette", ".", "Text", ",", "foreground", ")", "self", ".", "setPalette", "(", "palette", ")", "# Set the right background color when changing color schemes\r", "# or creating new Editor windows. This seems to be a Qt bug.\r", "# Fixes Issue 2028 and 8069\r", "if", "self", ".", "objectName", "(", ")", ":", "style", "=", "\"QPlainTextEdit#%s {background: %s; color: %s;}\"", "%", "(", "self", ".", "objectName", "(", ")", ",", "background", ".", "name", "(", ")", ",", "foreground", ".", "name", "(", ")", ")", "self", ".", "setStyleSheet", "(", "style", ")" ]
Set text editor palette colors: background color and caret (text cursor) color
[ "Set", "text", "editor", "palette", "colors", ":", "background", "color", "and", "caret", "(", "text", "cursor", ")", "color" ]
python
train
42.647059
sosreport/sos
sos/__init__.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/__init__.py#L214-L230
def _opt_to_args(cls, opt, val): """Convert a named option and optional value to command line argument notation, correctly handling options that take no value or that have special representations (e.g. verify and verbose). """ no_value = ( "alloptions", "all-logs", "batch", "build", "debug", "experimental", "list-plugins", "list-presets", "list-profiles", "noreport", "quiet", "verify" ) count = ("verbose",) if opt in no_value: return ["--%s" % opt] if opt in count: return ["--%s" % opt for d in range(0, int(val))] return ["--" + opt + "=" + val]
[ "def", "_opt_to_args", "(", "cls", ",", "opt", ",", "val", ")", ":", "no_value", "=", "(", "\"alloptions\"", ",", "\"all-logs\"", ",", "\"batch\"", ",", "\"build\"", ",", "\"debug\"", ",", "\"experimental\"", ",", "\"list-plugins\"", ",", "\"list-presets\"", ",", "\"list-profiles\"", ",", "\"noreport\"", ",", "\"quiet\"", ",", "\"verify\"", ")", "count", "=", "(", "\"verbose\"", ",", ")", "if", "opt", "in", "no_value", ":", "return", "[", "\"--%s\"", "%", "opt", "]", "if", "opt", "in", "count", ":", "return", "[", "\"--%s\"", "%", "opt", "for", "d", "in", "range", "(", "0", ",", "int", "(", "val", ")", ")", "]", "return", "[", "\"--\"", "+", "opt", "+", "\"=\"", "+", "val", "]" ]
Convert a named option and optional value to command line argument notation, correctly handling options that take no value or that have special representations (e.g. verify and verbose).
[ "Convert", "a", "named", "option", "and", "optional", "value", "to", "command", "line", "argument", "notation", "correctly", "handling", "options", "that", "take", "no", "value", "or", "that", "have", "special", "representations", "(", "e", ".", "g", ".", "verify", "and", "verbose", ")", "." ]
python
train
40.882353
devassistant/devassistant
devassistant/dapi/__init__.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/dapi/__init__.py#L604-L611
def _report_problem(self, problem, level=logging.ERROR): '''Report a given problem''' problem = self.basename + ': ' + problem if self._logger.isEnabledFor(level): self._problematic = True if self._check_raises: raise DapInvalid(problem) self._logger.log(level, problem)
[ "def", "_report_problem", "(", "self", ",", "problem", ",", "level", "=", "logging", ".", "ERROR", ")", ":", "problem", "=", "self", ".", "basename", "+", "': '", "+", "problem", "if", "self", ".", "_logger", ".", "isEnabledFor", "(", "level", ")", ":", "self", ".", "_problematic", "=", "True", "if", "self", ".", "_check_raises", ":", "raise", "DapInvalid", "(", "problem", ")", "self", ".", "_logger", ".", "log", "(", "level", ",", "problem", ")" ]
Report a given problem
[ "Report", "a", "given", "problem" ]
python
train
40.875
Tanganelli/CoAPthon3
coapthon/messages/request.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/messages/request.py#L147-L159
def if_match(self, values): """ Set the If-Match option of a request. :param values: the If-Match values :type values : list """ assert isinstance(values, list) for v in values: option = Option() option.number = defines.OptionRegistry.IF_MATCH.number option.value = v self.add_option(option)
[ "def", "if_match", "(", "self", ",", "values", ")", ":", "assert", "isinstance", "(", "values", ",", "list", ")", "for", "v", "in", "values", ":", "option", "=", "Option", "(", ")", "option", ".", "number", "=", "defines", ".", "OptionRegistry", ".", "IF_MATCH", ".", "number", "option", ".", "value", "=", "v", "self", ".", "add_option", "(", "option", ")" ]
Set the If-Match option of a request. :param values: the If-Match values :type values : list
[ "Set", "the", "If", "-", "Match", "option", "of", "a", "request", "." ]
python
train
29.538462
timothycrosley/connectable
connectable/base.py
https://github.com/timothycrosley/connectable/blob/d5958d974c04b16f410c602786809d0e2a6665d2/connectable/base.py#L98-L117
def accept_arguments(method, number_of_arguments=1): """Returns True if the given method will accept the given number of arguments method: the method to perform introspection on number_of_arguments: the number_of_arguments """ if 'method' in method.__class__.__name__: number_of_arguments += 1 func = getattr(method, 'im_func', getattr(method, '__func__')) func_defaults = getattr(func, 'func_defaults', getattr(func, '__defaults__')) number_of_defaults = func_defaults and len(func_defaults) or 0 elif method.__class__.__name__ == 'function': func_defaults = getattr(method, 'func_defaults', getattr(method, '__defaults__')) number_of_defaults = func_defaults and len(func_defaults) or 0 coArgCount = getattr(method, 'func_code', getattr(method, '__code__')).co_argcount if(coArgCount >= number_of_arguments and coArgCount - number_of_defaults <= number_of_arguments): return True return False
[ "def", "accept_arguments", "(", "method", ",", "number_of_arguments", "=", "1", ")", ":", "if", "'method'", "in", "method", ".", "__class__", ".", "__name__", ":", "number_of_arguments", "+=", "1", "func", "=", "getattr", "(", "method", ",", "'im_func'", ",", "getattr", "(", "method", ",", "'__func__'", ")", ")", "func_defaults", "=", "getattr", "(", "func", ",", "'func_defaults'", ",", "getattr", "(", "func", ",", "'__defaults__'", ")", ")", "number_of_defaults", "=", "func_defaults", "and", "len", "(", "func_defaults", ")", "or", "0", "elif", "method", ".", "__class__", ".", "__name__", "==", "'function'", ":", "func_defaults", "=", "getattr", "(", "method", ",", "'func_defaults'", ",", "getattr", "(", "method", ",", "'__defaults__'", ")", ")", "number_of_defaults", "=", "func_defaults", "and", "len", "(", "func_defaults", ")", "or", "0", "coArgCount", "=", "getattr", "(", "method", ",", "'func_code'", ",", "getattr", "(", "method", ",", "'__code__'", ")", ")", ".", "co_argcount", "if", "(", "coArgCount", ">=", "number_of_arguments", "and", "coArgCount", "-", "number_of_defaults", "<=", "number_of_arguments", ")", ":", "return", "True", "return", "False" ]
Returns True if the given method will accept the given number of arguments method: the method to perform introspection on number_of_arguments: the number_of_arguments
[ "Returns", "True", "if", "the", "given", "method", "will", "accept", "the", "given", "number", "of", "arguments" ]
python
train
48.8
amaas-fintech/amaas-core-sdk-python
amaascore/assets/interface.py
https://github.com/amaas-fintech/amaas-core-sdk-python/blob/347b71f8e776b2dde582b015e31b4802d91e8040/amaascore/assets/interface.py#L248-L261
def clear(self, asset_manager_id): """ This method deletes all the data for an asset_manager_id. It should be used with extreme caution. In production it is almost always better to Inactivate rather than delete. """ self.logger.info('Clear Assets - Asset Manager: %s', asset_manager_id) url = '%s/clear/%s' % (self.endpoint, asset_manager_id) response = self.session.delete(url) if response.ok: count = response.json().get('count', 'Unknown') self.logger.info('Deleted %s Assets.', count) return count else: self.logger.error(response.text) response.raise_for_status()
[ "def", "clear", "(", "self", ",", "asset_manager_id", ")", ":", "self", ".", "logger", ".", "info", "(", "'Clear Assets - Asset Manager: %s'", ",", "asset_manager_id", ")", "url", "=", "'%s/clear/%s'", "%", "(", "self", ".", "endpoint", ",", "asset_manager_id", ")", "response", "=", "self", ".", "session", ".", "delete", "(", "url", ")", "if", "response", ".", "ok", ":", "count", "=", "response", ".", "json", "(", ")", ".", "get", "(", "'count'", ",", "'Unknown'", ")", "self", ".", "logger", ".", "info", "(", "'Deleted %s Assets.'", ",", "count", ")", "return", "count", "else", ":", "self", ".", "logger", ".", "error", "(", "response", ".", "text", ")", "response", ".", "raise_for_status", "(", ")" ]
This method deletes all the data for an asset_manager_id. It should be used with extreme caution. In production it is almost always better to Inactivate rather than delete.
[ "This", "method", "deletes", "all", "the", "data", "for", "an", "asset_manager_id", ".", "It", "should", "be", "used", "with", "extreme", "caution", ".", "In", "production", "it", "is", "almost", "always", "better", "to", "Inactivate", "rather", "than", "delete", "." ]
python
train
49.142857
spyder-ide/spyder
spyder/utils/workers.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/workers.py#L151-L157
def _communicate(self): """Callback for communicate.""" if (not self._communicate_first and self._process.state() == QProcess.NotRunning): self.communicate() elif self._fired: self._timer.stop()
[ "def", "_communicate", "(", "self", ")", ":", "if", "(", "not", "self", ".", "_communicate_first", "and", "self", ".", "_process", ".", "state", "(", ")", "==", "QProcess", ".", "NotRunning", ")", ":", "self", ".", "communicate", "(", ")", "elif", "self", ".", "_fired", ":", "self", ".", "_timer", ".", "stop", "(", ")" ]
Callback for communicate.
[ "Callback", "for", "communicate", "." ]
python
train
36
eternnoir/pyTelegramBotAPI
telebot/apihelper.py
https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/apihelper.py#L828-L841
def answer_pre_checkout_query(token, pre_checkout_query_id, ok, error_message=None): """ Once the user has confirmed their payment and shipping details, the Bot API sends the final confirmation in the form of an Update with the field pre_checkout_query. Use this method to respond to such pre-checkout queries. On success, True is returned. Note: The Bot API must receive an answer within 10 seconds after the pre-checkout query was sent. :param token: Bot's token (you don't need to fill this) :param pre_checkout_query_id: Unique identifier for the query to be answered :param ok: Specify True if everything is alright (goods are available, etc.) and the bot is ready to proceed with the order. Use False if there are any problems. :param error_message: Required if ok is False. Error message in human readable form that explains the reason for failure to proceed with the checkout (e.g. "Sorry, somebody just bought the last of our amazing black T-shirts while you were busy filling out your payment details. Please choose a different color or garment!"). Telegram will display this message to the user. :return: """ method_url = 'answerPreCheckoutQuery' payload = {'pre_checkout_query_id': pre_checkout_query_id, 'ok': ok} if error_message: payload['error_message'] = error_message return _make_request(token, method_url, params=payload)
[ "def", "answer_pre_checkout_query", "(", "token", ",", "pre_checkout_query_id", ",", "ok", ",", "error_message", "=", "None", ")", ":", "method_url", "=", "'answerPreCheckoutQuery'", "payload", "=", "{", "'pre_checkout_query_id'", ":", "pre_checkout_query_id", ",", "'ok'", ":", "ok", "}", "if", "error_message", ":", "payload", "[", "'error_message'", "]", "=", "error_message", "return", "_make_request", "(", "token", ",", "method_url", ",", "params", "=", "payload", ")" ]
Once the user has confirmed their payment and shipping details, the Bot API sends the final confirmation in the form of an Update with the field pre_checkout_query. Use this method to respond to such pre-checkout queries. On success, True is returned. Note: The Bot API must receive an answer within 10 seconds after the pre-checkout query was sent. :param token: Bot's token (you don't need to fill this) :param pre_checkout_query_id: Unique identifier for the query to be answered :param ok: Specify True if everything is alright (goods are available, etc.) and the bot is ready to proceed with the order. Use False if there are any problems. :param error_message: Required if ok is False. Error message in human readable form that explains the reason for failure to proceed with the checkout (e.g. "Sorry, somebody just bought the last of our amazing black T-shirts while you were busy filling out your payment details. Please choose a different color or garment!"). Telegram will display this message to the user. :return:
[ "Once", "the", "user", "has", "confirmed", "their", "payment", "and", "shipping", "details", "the", "Bot", "API", "sends", "the", "final", "confirmation", "in", "the", "form", "of", "an", "Update", "with", "the", "field", "pre_checkout_query", ".", "Use", "this", "method", "to", "respond", "to", "such", "pre", "-", "checkout", "queries", ".", "On", "success", "True", "is", "returned", ".", "Note", ":", "The", "Bot", "API", "must", "receive", "an", "answer", "within", "10", "seconds", "after", "the", "pre", "-", "checkout", "query", "was", "sent", ".", ":", "param", "token", ":", "Bot", "s", "token", "(", "you", "don", "t", "need", "to", "fill", "this", ")", ":", "param", "pre_checkout_query_id", ":", "Unique", "identifier", "for", "the", "query", "to", "be", "answered", ":", "param", "ok", ":", "Specify", "True", "if", "everything", "is", "alright", "(", "goods", "are", "available", "etc", ".", ")", "and", "the", "bot", "is", "ready", "to", "proceed", "with", "the", "order", ".", "Use", "False", "if", "there", "are", "any", "problems", ".", ":", "param", "error_message", ":", "Required", "if", "ok", "is", "False", ".", "Error", "message", "in", "human", "readable", "form", "that", "explains", "the", "reason", "for", "failure", "to", "proceed", "with", "the", "checkout", "(", "e", ".", "g", ".", "Sorry", "somebody", "just", "bought", "the", "last", "of", "our", "amazing", "black", "T", "-", "shirts", "while", "you", "were", "busy", "filling", "out", "your", "payment", "details", ".", "Please", "choose", "a", "different", "color", "or", "garment!", ")", ".", "Telegram", "will", "display", "this", "message", "to", "the", "user", ".", ":", "return", ":" ]
python
train
98.928571
dbader/schedule
schedule/__init__.py
https://github.com/dbader/schedule/blob/5d2653c28b1029f1e9ddc85cd9ef26c29a79fcea/schedule/__init__.py#L364-L376
def tag(self, *tags): """ Tags the job with one or more unique indentifiers. Tags must be hashable. Duplicate tags are discarded. :param tags: A unique list of ``Hashable`` tags. :return: The invoked job instance """ if not all(isinstance(tag, collections.Hashable) for tag in tags): raise TypeError('Tags must be hashable') self.tags.update(tags) return self
[ "def", "tag", "(", "self", ",", "*", "tags", ")", ":", "if", "not", "all", "(", "isinstance", "(", "tag", ",", "collections", ".", "Hashable", ")", "for", "tag", "in", "tags", ")", ":", "raise", "TypeError", "(", "'Tags must be hashable'", ")", "self", ".", "tags", ".", "update", "(", "tags", ")", "return", "self" ]
Tags the job with one or more unique indentifiers. Tags must be hashable. Duplicate tags are discarded. :param tags: A unique list of ``Hashable`` tags. :return: The invoked job instance
[ "Tags", "the", "job", "with", "one", "or", "more", "unique", "indentifiers", "." ]
python
train
33.307692
gmr/tinman
tinman/auth/ldapauth.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/auth/ldapauth.py#L24-L54
def auth_user_ldap(uname, pwd): """ Attempts to bind using the uname/pwd combo passed in. If that works, returns true. Otherwise returns false. """ if not uname or not pwd: logging.error("Username or password not supplied") return False ld = ldap.initialize(LDAP_URL) if LDAP_VERSION_3: ld.set_option(ldap.VERSION3, 1) ld.start_tls_s() udn = ld.search_s(LDAP_SEARCH_BASE, ldap.SCOPE_ONELEVEL, '(%s=%s)' % (LDAP_UNAME_ATTR,uname), [LDAP_BIND_ATTR]) if udn: try: bindres = ld.simple_bind_s(udn[0][0], pwd) except ldap.INVALID_CREDENTIALS, ldap.UNWILLING_TO_PERFORM: logging.error("Invalid or incomplete credentials for %s", uname) return False except Exception as out: logging.error("Auth attempt for %s had an unexpected error: %s", uname, out) return False else: return True else: logging.error("No user by that name") return False
[ "def", "auth_user_ldap", "(", "uname", ",", "pwd", ")", ":", "if", "not", "uname", "or", "not", "pwd", ":", "logging", ".", "error", "(", "\"Username or password not supplied\"", ")", "return", "False", "ld", "=", "ldap", ".", "initialize", "(", "LDAP_URL", ")", "if", "LDAP_VERSION_3", ":", "ld", ".", "set_option", "(", "ldap", ".", "VERSION3", ",", "1", ")", "ld", ".", "start_tls_s", "(", ")", "udn", "=", "ld", ".", "search_s", "(", "LDAP_SEARCH_BASE", ",", "ldap", ".", "SCOPE_ONELEVEL", ",", "'(%s=%s)'", "%", "(", "LDAP_UNAME_ATTR", ",", "uname", ")", ",", "[", "LDAP_BIND_ATTR", "]", ")", "if", "udn", ":", "try", ":", "bindres", "=", "ld", ".", "simple_bind_s", "(", "udn", "[", "0", "]", "[", "0", "]", ",", "pwd", ")", "except", "ldap", ".", "INVALID_CREDENTIALS", ",", "ldap", ".", "UNWILLING_TO_PERFORM", ":", "logging", ".", "error", "(", "\"Invalid or incomplete credentials for %s\"", ",", "uname", ")", "return", "False", "except", "Exception", "as", "out", ":", "logging", ".", "error", "(", "\"Auth attempt for %s had an unexpected error: %s\"", ",", "uname", ",", "out", ")", "return", "False", "else", ":", "return", "True", "else", ":", "logging", ".", "error", "(", "\"No user by that name\"", ")", "return", "False" ]
Attempts to bind using the uname/pwd combo passed in. If that works, returns true. Otherwise returns false.
[ "Attempts", "to", "bind", "using", "the", "uname", "/", "pwd", "combo", "passed", "in", ".", "If", "that", "works", "returns", "true", ".", "Otherwise", "returns", "false", "." ]
python
train
33.645161
honsiorovskyi/codeharvester
src/codeharvester/harvester.py
https://github.com/honsiorovskyi/codeharvester/blob/301b907b32ef9bbdb7099657100fbd3829c3ecc8/src/codeharvester/harvester.py#L56-L105
def parse_requirements(self, filename): """ Recursively find all the requirements needed storing them in req_parents, req_paths, req_linenos """ cwd = os.path.dirname(filename) try: fd = open(filename, 'r') for i, line in enumerate(fd.readlines(), 0): req = self.extract_requirement(line) # if the line is not a requirement statement if not req: continue req_path = req if not os.path.isabs(req_path): req_path = os.path.normpath(os.path.join(cwd, req_path)) if not os.path.exists(req_path): logging.warning("Requirement '{0}' could not be resolved: '{1}' does not exist.".format(req, req_path)) if self.flags['cleanup']: self.skip_unresolved_requirement(filename, i) continue # if the requirement is already added to the database, skip it if req_path in self.req_paths: logging.warning("Skipping duplicate requirement '{0}' at '{2}:{3}' [file '{1}'].".format( req, req_path, filename, i+1 # human-recognizable line number )) if self.flags['cleanup']: self.skip_unresolved_requirement(filename, i) continue # store requirements to the global database self.req_parents.append(filename) self.req_paths.append(req_path) self.req_linenos.append(i) # recursion self.parse_requirements(req_path) fd.close() except IOError as err: logging.warning("I/O error: {0}".format(err))
[ "def", "parse_requirements", "(", "self", ",", "filename", ")", ":", "cwd", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "try", ":", "fd", "=", "open", "(", "filename", ",", "'r'", ")", "for", "i", ",", "line", "in", "enumerate", "(", "fd", ".", "readlines", "(", ")", ",", "0", ")", ":", "req", "=", "self", ".", "extract_requirement", "(", "line", ")", "# if the line is not a requirement statement", "if", "not", "req", ":", "continue", "req_path", "=", "req", "if", "not", "os", ".", "path", ".", "isabs", "(", "req_path", ")", ":", "req_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "cwd", ",", "req_path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "req_path", ")", ":", "logging", ".", "warning", "(", "\"Requirement '{0}' could not be resolved: '{1}' does not exist.\"", ".", "format", "(", "req", ",", "req_path", ")", ")", "if", "self", ".", "flags", "[", "'cleanup'", "]", ":", "self", ".", "skip_unresolved_requirement", "(", "filename", ",", "i", ")", "continue", "# if the requirement is already added to the database, skip it", "if", "req_path", "in", "self", ".", "req_paths", ":", "logging", ".", "warning", "(", "\"Skipping duplicate requirement '{0}' at '{2}:{3}' [file '{1}'].\"", ".", "format", "(", "req", ",", "req_path", ",", "filename", ",", "i", "+", "1", "# human-recognizable line number", ")", ")", "if", "self", ".", "flags", "[", "'cleanup'", "]", ":", "self", ".", "skip_unresolved_requirement", "(", "filename", ",", "i", ")", "continue", "# store requirements to the global database", "self", ".", "req_parents", ".", "append", "(", "filename", ")", "self", ".", "req_paths", ".", "append", "(", "req_path", ")", "self", ".", "req_linenos", ".", "append", "(", "i", ")", "# recursion", "self", ".", "parse_requirements", "(", "req_path", ")", "fd", ".", "close", "(", ")", "except", "IOError", "as", "err", ":", "logging", ".", "warning", "(", "\"I/O error: {0}\"", ".", "format", "(", "err", ")", ")" ]
Recursively find all the requirements needed storing them in req_parents, req_paths, req_linenos
[ "Recursively", "find", "all", "the", "requirements", "needed", "storing", "them", "in", "req_parents", "req_paths", "req_linenos" ]
python
train
37.64
pip-services3-python/pip-services3-components-python
pip_services3_components/cache/CacheEntry.py
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/cache/CacheEntry.py#L36-L45
def set_value(self, value, timeout): """ Sets a new value and extends its expiration. :param value: a new cached value. :param timeout: a expiration timeout in milliseconds. """ self.value = value self.expiration = time.perf_counter() * 1000 + timeout
[ "def", "set_value", "(", "self", ",", "value", ",", "timeout", ")", ":", "self", ".", "value", "=", "value", "self", ".", "expiration", "=", "time", ".", "perf_counter", "(", ")", "*", "1000", "+", "timeout" ]
Sets a new value and extends its expiration. :param value: a new cached value. :param timeout: a expiration timeout in milliseconds.
[ "Sets", "a", "new", "value", "and", "extends", "its", "expiration", "." ]
python
train
30
mozilla-b2g/fxos-certsuite
mcts/utils/handlers/adb_b2g.py
https://github.com/mozilla-b2g/fxos-certsuite/blob/152a76c7c4c9d908524cf6e6fc25a498058f363d/mcts/utils/handlers/adb_b2g.py#L246-L259
def reboot(self, timeout=None, wait_polling_interval=None): """Reboot the device, waiting for the adb connection to become stable. :param timeout: Maximum time to wait for reboot. :param wait_polling_interval: Interval at which to poll for device readiness. """ if timeout is None: timeout = self._timeout if wait_polling_interval is None: wait_polling_interval = self._wait_polling_interval self._logger.info("Rebooting device") self.wait_for_device_ready(timeout, after_first=lambda:self.command_output(["reboot"]))
[ "def", "reboot", "(", "self", ",", "timeout", "=", "None", ",", "wait_polling_interval", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "_timeout", "if", "wait_polling_interval", "is", "None", ":", "wait_polling_interval", "=", "self", ".", "_wait_polling_interval", "self", ".", "_logger", ".", "info", "(", "\"Rebooting device\"", ")", "self", ".", "wait_for_device_ready", "(", "timeout", ",", "after_first", "=", "lambda", ":", "self", ".", "command_output", "(", "[", "\"reboot\"", "]", ")", ")" ]
Reboot the device, waiting for the adb connection to become stable. :param timeout: Maximum time to wait for reboot. :param wait_polling_interval: Interval at which to poll for device readiness.
[ "Reboot", "the", "device", "waiting", "for", "the", "adb", "connection", "to", "become", "stable", "." ]
python
train
44.928571
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/_helpers.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/_helpers.py#L204-L215
def encode_dict(values_dict): """Encode a dictionary into protobuf ``Value``-s. Args: values_dict (dict): The dictionary to encode as protobuf fields. Returns: Dict[str, ~google.cloud.firestore_v1beta1.types.Value]: A dictionary of string keys and ``Value`` protobufs as dictionary values. """ return {key: encode_value(value) for key, value in six.iteritems(values_dict)}
[ "def", "encode_dict", "(", "values_dict", ")", ":", "return", "{", "key", ":", "encode_value", "(", "value", ")", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "values_dict", ")", "}" ]
Encode a dictionary into protobuf ``Value``-s. Args: values_dict (dict): The dictionary to encode as protobuf fields. Returns: Dict[str, ~google.cloud.firestore_v1beta1.types.Value]: A dictionary of string keys and ``Value`` protobufs as dictionary values.
[ "Encode", "a", "dictionary", "into", "protobuf", "Value", "-", "s", "." ]
python
train
34.583333
saltstack/salt
salt/modules/libcloud_storage.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_storage.py#L116-L149
def list_container_objects(container_name, profile, **libcloud_kwargs): ''' List container objects (e.g. files) for the given container_id on the given profile :param container_name: Container name :type container_name: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_container_objects method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.list_container_objects MyFolder profile1 ''' conn = _get_driver(profile=profile) container = conn.get_container(container_name) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) objects = conn.list_container_objects(container, **libcloud_kwargs) ret = [] for obj in objects: ret.append({ 'name': obj.name, 'size': obj.size, 'hash': obj.hash, 'container': obj.container.name, 'extra': obj.extra, 'meta_data': obj.meta_data }) return ret
[ "def", "list_container_objects", "(", "container_name", ",", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "container", "=", "conn", ".", "get_container", "(", "container_name", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "libcloud_kwargs", ")", "objects", "=", "conn", ".", "list_container_objects", "(", "container", ",", "*", "*", "libcloud_kwargs", ")", "ret", "=", "[", "]", "for", "obj", "in", "objects", ":", "ret", ".", "append", "(", "{", "'name'", ":", "obj", ".", "name", ",", "'size'", ":", "obj", ".", "size", ",", "'hash'", ":", "obj", ".", "hash", ",", "'container'", ":", "obj", ".", "container", ".", "name", ",", "'extra'", ":", "obj", ".", "extra", ",", "'meta_data'", ":", "obj", ".", "meta_data", "}", ")", "return", "ret" ]
List container objects (e.g. files) for the given container_id on the given profile :param container_name: Container name :type container_name: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's list_container_objects method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.list_container_objects MyFolder profile1
[ "List", "container", "objects", "(", "e", ".", "g", ".", "files", ")", "for", "the", "given", "container_id", "on", "the", "given", "profile" ]
python
train
31.235294
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L4678-L4682
def addSRNLayers(self, inc, hidc, outc): """ Wraps SRN.addThreeLayers() for compatibility. """ self.addThreeLayers(inc, hidc, outc)
[ "def", "addSRNLayers", "(", "self", ",", "inc", ",", "hidc", ",", "outc", ")", ":", "self", ".", "addThreeLayers", "(", "inc", ",", "hidc", ",", "outc", ")" ]
Wraps SRN.addThreeLayers() for compatibility.
[ "Wraps", "SRN", ".", "addThreeLayers", "()", "for", "compatibility", "." ]
python
train
31.8
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L312-L346
def get_with_rtdcbase(self, col1, col2, method, dataset, viscosity=None, add_px_err=False): """Convenience method that extracts the metadata from RTDCBase Parameters ---------- col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). dataset: dclab.rtdc_dataset.RTDCBase The dataset from which to obtain the metadata. viscosity: float or `None` Viscosity of the medium in mPa*s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). add_px_err: bool If True, add pixelation errors according to C. Herold (2017), https://arxiv.org/abs/1704.00572 """ cfg = dataset.config return self.get(col1=col1, col2=col2, method=method, channel_width=cfg["setup"]["channel width"], flow_rate=cfg["setup"]["flow rate"], viscosity=viscosity, add_px_err=add_px_err, px_um=cfg["imaging"]["pixel size"])
[ "def", "get_with_rtdcbase", "(", "self", ",", "col1", ",", "col2", ",", "method", ",", "dataset", ",", "viscosity", "=", "None", ",", "add_px_err", "=", "False", ")", ":", "cfg", "=", "dataset", ".", "config", "return", "self", ".", "get", "(", "col1", "=", "col1", ",", "col2", "=", "col2", ",", "method", "=", "method", ",", "channel_width", "=", "cfg", "[", "\"setup\"", "]", "[", "\"channel width\"", "]", ",", "flow_rate", "=", "cfg", "[", "\"setup\"", "]", "[", "\"flow rate\"", "]", ",", "viscosity", "=", "viscosity", ",", "add_px_err", "=", "add_px_err", ",", "px_um", "=", "cfg", "[", "\"imaging\"", "]", "[", "\"pixel size\"", "]", ")" ]
Convenience method that extracts the metadata from RTDCBase Parameters ---------- col1: str Name of the first feature of all isoelastics (e.g. isoel[0][:,0]) col2: str Name of the second feature of all isoelastics (e.g. isoel[0][:,1]) method: str The method used to compute the isoelastics (must be one of `VALID_METHODS`). dataset: dclab.rtdc_dataset.RTDCBase The dataset from which to obtain the metadata. viscosity: float or `None` Viscosity of the medium in mPa*s. If set to `None`, the flow rate of the imported data will be used (only do this if you do not need the correct values for elastic moduli). add_px_err: bool If True, add pixelation errors according to C. Herold (2017), https://arxiv.org/abs/1704.00572
[ "Convenience", "method", "that", "extracts", "the", "metadata", "from", "RTDCBase" ]
python
train
41.914286
matthewdeanmartin/jiggle_version
jiggle_version/file_opener.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/file_opener.py#L104-L114
def read_metadata(self, file_path): # type: (str) ->str """ Get version out of a .ini file (or .cfg) :return: """ config = configparser.ConfigParser() config.read(file_path) try: return unicode(config["metadata"]["version"]) except KeyError: return ""
[ "def", "read_metadata", "(", "self", ",", "file_path", ")", ":", "# type: (str) ->str", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "file_path", ")", "try", ":", "return", "unicode", "(", "config", "[", "\"metadata\"", "]", "[", "\"version\"", "]", ")", "except", "KeyError", ":", "return", "\"\"" ]
Get version out of a .ini file (or .cfg) :return:
[ "Get", "version", "out", "of", "a", ".", "ini", "file", "(", "or", ".", "cfg", ")", ":", "return", ":" ]
python
train
30
riptano/ccm
ccmlib/node.py
https://github.com/riptano/ccm/blob/275699f79d102b5039b79cc17fa6305dccf18412/ccmlib/node.py#L150-L194
def load(path, name, cluster): """ Load a node from from the path on disk to the config files, the node name and the cluster the node is part of. """ node_path = os.path.join(path, name) filename = os.path.join(node_path, 'node.conf') with open(filename, 'r') as f: data = yaml.safe_load(f) try: itf = data['interfaces'] initial_token = None if 'initial_token' in data: initial_token = data['initial_token'] cassandra_version = None if 'cassandra_version' in data: cassandra_version = LooseVersion(data['cassandra_version']) remote_debug_port = 2000 if 'remote_debug_port' in data: remote_debug_port = data['remote_debug_port'] binary_interface = None if 'binary' in itf and itf['binary'] is not None: binary_interface = tuple(itf['binary']) thrift_interface = None if 'thrift' in itf and itf['thrift'] is not None: thrift_interface = tuple(itf['thrift']) node = cluster.create_node(data['name'], data['auto_bootstrap'], thrift_interface, tuple(itf['storage']), data['jmx_port'], remote_debug_port, initial_token, save=False, binary_interface=binary_interface, byteman_port=data['byteman_port'], derived_cassandra_version=cassandra_version) node.status = data['status'] if 'pid' in data: node.pid = int(data['pid']) if 'install_dir' in data: node.__install_dir = data['install_dir'] if 'config_options' in data: node.__config_options = data['config_options'] if 'dse_config_options' in data: node._dse_config_options = data['dse_config_options'] if 'environment_variables' in data: node.__environment_variables = data['environment_variables'] if 'data_center' in data: node.data_center = data['data_center'] if 'workloads' in data: node.workloads = data['workloads'] return node except KeyError as k: raise common.LoadError("Error Loading " + filename + ", missing property: " + str(k))
[ "def", "load", "(", "path", ",", "name", ",", "cluster", ")", ":", "node_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "node_path", ",", "'node.conf'", ")", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "data", "=", "yaml", ".", "safe_load", "(", "f", ")", "try", ":", "itf", "=", "data", "[", "'interfaces'", "]", "initial_token", "=", "None", "if", "'initial_token'", "in", "data", ":", "initial_token", "=", "data", "[", "'initial_token'", "]", "cassandra_version", "=", "None", "if", "'cassandra_version'", "in", "data", ":", "cassandra_version", "=", "LooseVersion", "(", "data", "[", "'cassandra_version'", "]", ")", "remote_debug_port", "=", "2000", "if", "'remote_debug_port'", "in", "data", ":", "remote_debug_port", "=", "data", "[", "'remote_debug_port'", "]", "binary_interface", "=", "None", "if", "'binary'", "in", "itf", "and", "itf", "[", "'binary'", "]", "is", "not", "None", ":", "binary_interface", "=", "tuple", "(", "itf", "[", "'binary'", "]", ")", "thrift_interface", "=", "None", "if", "'thrift'", "in", "itf", "and", "itf", "[", "'thrift'", "]", "is", "not", "None", ":", "thrift_interface", "=", "tuple", "(", "itf", "[", "'thrift'", "]", ")", "node", "=", "cluster", ".", "create_node", "(", "data", "[", "'name'", "]", ",", "data", "[", "'auto_bootstrap'", "]", ",", "thrift_interface", ",", "tuple", "(", "itf", "[", "'storage'", "]", ")", ",", "data", "[", "'jmx_port'", "]", ",", "remote_debug_port", ",", "initial_token", ",", "save", "=", "False", ",", "binary_interface", "=", "binary_interface", ",", "byteman_port", "=", "data", "[", "'byteman_port'", "]", ",", "derived_cassandra_version", "=", "cassandra_version", ")", "node", ".", "status", "=", "data", "[", "'status'", "]", "if", "'pid'", "in", "data", ":", "node", ".", "pid", "=", "int", "(", "data", "[", "'pid'", "]", ")", "if", "'install_dir'", "in", "data", ":", "node", ".", "__install_dir", "=", "data", "[", "'install_dir'", "]", "if", "'config_options'", "in", "data", ":", "node", ".", "__config_options", "=", "data", "[", "'config_options'", "]", "if", "'dse_config_options'", "in", "data", ":", "node", ".", "_dse_config_options", "=", "data", "[", "'dse_config_options'", "]", "if", "'environment_variables'", "in", "data", ":", "node", ".", "__environment_variables", "=", "data", "[", "'environment_variables'", "]", "if", "'data_center'", "in", "data", ":", "node", ".", "data_center", "=", "data", "[", "'data_center'", "]", "if", "'workloads'", "in", "data", ":", "node", ".", "workloads", "=", "data", "[", "'workloads'", "]", "return", "node", "except", "KeyError", "as", "k", ":", "raise", "common", ".", "LoadError", "(", "\"Error Loading \"", "+", "filename", "+", "\", missing property: \"", "+", "str", "(", "k", ")", ")" ]
Load a node from from the path on disk to the config files, the node name and the cluster the node is part of.
[ "Load", "a", "node", "from", "from", "the", "path", "on", "disk", "to", "the", "config", "files", "the", "node", "name", "and", "the", "cluster", "the", "node", "is", "part", "of", "." ]
python
train
50.711111
timofurrer/colorful
colorful/__init__.py
https://github.com/timofurrer/colorful/blob/919fa6da17865cc5e01e6b16119193a97d180dc9/colorful/__init__.py#L42-L54
def with_setup(self, colormode=None, colorpalette=None, extend_colors=False): """ Return a new Colorful object with the given color config. """ colorful = Colorful( colormode=self.colorful.colormode, colorpalette=copy.copy(self.colorful.colorpalette) ) colorful.setup( colormode=colormode, colorpalette=colorpalette, extend_colors=extend_colors ) yield colorful
[ "def", "with_setup", "(", "self", ",", "colormode", "=", "None", ",", "colorpalette", "=", "None", ",", "extend_colors", "=", "False", ")", ":", "colorful", "=", "Colorful", "(", "colormode", "=", "self", ".", "colorful", ".", "colormode", ",", "colorpalette", "=", "copy", ".", "copy", "(", "self", ".", "colorful", ".", "colorpalette", ")", ")", "colorful", ".", "setup", "(", "colormode", "=", "colormode", ",", "colorpalette", "=", "colorpalette", ",", "extend_colors", "=", "extend_colors", ")", "yield", "colorful" ]
Return a new Colorful object with the given color config.
[ "Return", "a", "new", "Colorful", "object", "with", "the", "given", "color", "config", "." ]
python
valid
34.615385
quantopian/zipline
zipline/utils/sharedoc.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/sharedoc.py#L85-L101
def templated_docstring(**docs): """ Decorator allowing the use of templated docstrings. Examples -------- >>> @templated_docstring(foo='bar') ... def my_func(self, foo): ... '''{foo}''' ... >>> my_func.__doc__ 'bar' """ def decorator(f): f.__doc__ = format_docstring(f.__name__, f.__doc__, docs) return f return decorator
[ "def", "templated_docstring", "(", "*", "*", "docs", ")", ":", "def", "decorator", "(", "f", ")", ":", "f", ".", "__doc__", "=", "format_docstring", "(", "f", ".", "__name__", ",", "f", ".", "__doc__", ",", "docs", ")", "return", "f", "return", "decorator" ]
Decorator allowing the use of templated docstrings. Examples -------- >>> @templated_docstring(foo='bar') ... def my_func(self, foo): ... '''{foo}''' ... >>> my_func.__doc__ 'bar'
[ "Decorator", "allowing", "the", "use", "of", "templated", "docstrings", "." ]
python
train
22.294118
numenta/htmresearch
projects/speech_commands/analyze_experiment.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/speech_commands/analyze_experiment.py#L189-L216
def findOptimalResults(expName, suite, outFile): """ Go through every experiment in the specified folder. For each experiment, find the iteration with the best validation score, and return the metrics associated with that iteration. """ writer = csv.writer(outFile) headers = ["testAccuracy", "bgAccuracy", "maxTotalAccuracy", "experiment path"] writer.writerow(headers) info = [] print("\n================",expName,"=====================") try: # Retrieve the last totalCorrect from each experiment # Print them sorted from best to worst values, params = suite.get_values_fix_params( expName, 0, "testerror", "last") for p in params: expPath = p["name"] if not "results" in expPath: expPath = os.path.join("results", expPath) maxTestAccuracy, maxValidationAccuracy, maxBGAccuracy, maxIter, maxTotalAccuracy = bestScore(expPath, suite) row = [maxTestAccuracy, maxBGAccuracy, maxTotalAccuracy, expPath] info.append(row) writer.writerow(row) print(tabulate(info, headers=headers, tablefmt="grid")) except: print("Couldn't analyze experiment",expName)
[ "def", "findOptimalResults", "(", "expName", ",", "suite", ",", "outFile", ")", ":", "writer", "=", "csv", ".", "writer", "(", "outFile", ")", "headers", "=", "[", "\"testAccuracy\"", ",", "\"bgAccuracy\"", ",", "\"maxTotalAccuracy\"", ",", "\"experiment path\"", "]", "writer", ".", "writerow", "(", "headers", ")", "info", "=", "[", "]", "print", "(", "\"\\n================\"", ",", "expName", ",", "\"=====================\"", ")", "try", ":", "# Retrieve the last totalCorrect from each experiment", "# Print them sorted from best to worst", "values", ",", "params", "=", "suite", ".", "get_values_fix_params", "(", "expName", ",", "0", ",", "\"testerror\"", ",", "\"last\"", ")", "for", "p", "in", "params", ":", "expPath", "=", "p", "[", "\"name\"", "]", "if", "not", "\"results\"", "in", "expPath", ":", "expPath", "=", "os", ".", "path", ".", "join", "(", "\"results\"", ",", "expPath", ")", "maxTestAccuracy", ",", "maxValidationAccuracy", ",", "maxBGAccuracy", ",", "maxIter", ",", "maxTotalAccuracy", "=", "bestScore", "(", "expPath", ",", "suite", ")", "row", "=", "[", "maxTestAccuracy", ",", "maxBGAccuracy", ",", "maxTotalAccuracy", ",", "expPath", "]", "info", ".", "append", "(", "row", ")", "writer", ".", "writerow", "(", "row", ")", "print", "(", "tabulate", "(", "info", ",", "headers", "=", "headers", ",", "tablefmt", "=", "\"grid\"", ")", ")", "except", ":", "print", "(", "\"Couldn't analyze experiment\"", ",", "expName", ")" ]
Go through every experiment in the specified folder. For each experiment, find the iteration with the best validation score, and return the metrics associated with that iteration.
[ "Go", "through", "every", "experiment", "in", "the", "specified", "folder", ".", "For", "each", "experiment", "find", "the", "iteration", "with", "the", "best", "validation", "score", "and", "return", "the", "metrics", "associated", "with", "that", "iteration", "." ]
python
train
40
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/input_readers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L2115-L2133
def from_json(cls, json): """Creates an instance of the InputReader for the given input shard's state. Args: json: The InputReader state as a dict-like object. Returns: An instance of the InputReader configured using the given JSON parameters. """ # Strip out unrecognized parameters, as introduced by b/5960884. params = dict((str(k), v) for k, v in json.iteritems() if k in cls._PARAMS) # This is not symmetric with to_json() wrt. PROTOTYPE_REQUEST_PARAM because # the constructor parameters need to be JSON-encodable, so the decoding # needs to happen there anyways. if cls._OFFSET_PARAM in params: params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM]) return cls(**params)
[ "def", "from_json", "(", "cls", ",", "json", ")", ":", "# Strip out unrecognized parameters, as introduced by b/5960884.", "params", "=", "dict", "(", "(", "str", "(", "k", ")", ",", "v", ")", "for", "k", ",", "v", "in", "json", ".", "iteritems", "(", ")", "if", "k", "in", "cls", ".", "_PARAMS", ")", "# This is not symmetric with to_json() wrt. PROTOTYPE_REQUEST_PARAM because", "# the constructor parameters need to be JSON-encodable, so the decoding", "# needs to happen there anyways.", "if", "cls", ".", "_OFFSET_PARAM", "in", "params", ":", "params", "[", "cls", ".", "_OFFSET_PARAM", "]", "=", "base64", ".", "b64decode", "(", "params", "[", "cls", ".", "_OFFSET_PARAM", "]", ")", "return", "cls", "(", "*", "*", "params", ")" ]
Creates an instance of the InputReader for the given input shard's state. Args: json: The InputReader state as a dict-like object. Returns: An instance of the InputReader configured using the given JSON parameters.
[ "Creates", "an", "instance", "of", "the", "InputReader", "for", "the", "given", "input", "shard", "s", "state", "." ]
python
train
39.947368
odlgroup/odl
odl/util/numerics.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/numerics.py#L557-L603
def _padding_slices_inner(lhs_arr, rhs_arr, axis, offset, pad_mode): """Return slices into the inner array part for a given ``pad_mode``. When performing padding, these slices yield the values from the inner part of a larger array that are to be assigned to the excess part of the same array. Slices for both sides ("left", "right") of the arrays in a given ``axis`` are returned. """ # Calculate the start and stop indices for the inner part istart_inner = offset[axis] n_large = max(lhs_arr.shape[axis], rhs_arr.shape[axis]) n_small = min(lhs_arr.shape[axis], rhs_arr.shape[axis]) istop_inner = istart_inner + n_small # Number of values padded to left and right n_pad_l = istart_inner n_pad_r = n_large - istop_inner if pad_mode == 'periodic': # left: n_pad_l forward, ending at istop_inner - 1 pad_slc_l = slice(istop_inner - n_pad_l, istop_inner) # right: n_pad_r forward, starting at istart_inner pad_slc_r = slice(istart_inner, istart_inner + n_pad_r) elif pad_mode == 'symmetric': # left: n_pad_l backward, ending at istart_inner + 1 pad_slc_l = slice(istart_inner + n_pad_l, istart_inner, -1) # right: n_pad_r backward, starting at istop_inner - 2 # For the corner case that the stopping index is -1, we need to # replace it with None, since -1 as stopping index is equivalent # to the last index, which is not what we want (0 as last index). istop_r = istop_inner - 2 - n_pad_r if istop_r == -1: istop_r = None pad_slc_r = slice(istop_inner - 2, istop_r, -1) elif pad_mode in ('order0', 'order1'): # left: only the first entry, using a slice to avoid squeezing pad_slc_l = slice(istart_inner, istart_inner + 1) # right: only last entry pad_slc_r = slice(istop_inner - 1, istop_inner) else: # Slices are not used, returning trivial ones. The function should not # be used for other modes anyway. pad_slc_l, pad_slc_r = slice(0), slice(0) return pad_slc_l, pad_slc_r
[ "def", "_padding_slices_inner", "(", "lhs_arr", ",", "rhs_arr", ",", "axis", ",", "offset", ",", "pad_mode", ")", ":", "# Calculate the start and stop indices for the inner part", "istart_inner", "=", "offset", "[", "axis", "]", "n_large", "=", "max", "(", "lhs_arr", ".", "shape", "[", "axis", "]", ",", "rhs_arr", ".", "shape", "[", "axis", "]", ")", "n_small", "=", "min", "(", "lhs_arr", ".", "shape", "[", "axis", "]", ",", "rhs_arr", ".", "shape", "[", "axis", "]", ")", "istop_inner", "=", "istart_inner", "+", "n_small", "# Number of values padded to left and right", "n_pad_l", "=", "istart_inner", "n_pad_r", "=", "n_large", "-", "istop_inner", "if", "pad_mode", "==", "'periodic'", ":", "# left: n_pad_l forward, ending at istop_inner - 1", "pad_slc_l", "=", "slice", "(", "istop_inner", "-", "n_pad_l", ",", "istop_inner", ")", "# right: n_pad_r forward, starting at istart_inner", "pad_slc_r", "=", "slice", "(", "istart_inner", ",", "istart_inner", "+", "n_pad_r", ")", "elif", "pad_mode", "==", "'symmetric'", ":", "# left: n_pad_l backward, ending at istart_inner + 1", "pad_slc_l", "=", "slice", "(", "istart_inner", "+", "n_pad_l", ",", "istart_inner", ",", "-", "1", ")", "# right: n_pad_r backward, starting at istop_inner - 2", "# For the corner case that the stopping index is -1, we need to", "# replace it with None, since -1 as stopping index is equivalent", "# to the last index, which is not what we want (0 as last index).", "istop_r", "=", "istop_inner", "-", "2", "-", "n_pad_r", "if", "istop_r", "==", "-", "1", ":", "istop_r", "=", "None", "pad_slc_r", "=", "slice", "(", "istop_inner", "-", "2", ",", "istop_r", ",", "-", "1", ")", "elif", "pad_mode", "in", "(", "'order0'", ",", "'order1'", ")", ":", "# left: only the first entry, using a slice to avoid squeezing", "pad_slc_l", "=", "slice", "(", "istart_inner", ",", "istart_inner", "+", "1", ")", "# right: only last entry", "pad_slc_r", "=", "slice", "(", "istop_inner", "-", "1", ",", "istop_inner", ")", "else", ":", "# Slices are not used, returning trivial ones. The function should not", "# be used for other modes anyway.", "pad_slc_l", ",", "pad_slc_r", "=", "slice", "(", "0", ")", ",", "slice", "(", "0", ")", "return", "pad_slc_l", ",", "pad_slc_r" ]
Return slices into the inner array part for a given ``pad_mode``. When performing padding, these slices yield the values from the inner part of a larger array that are to be assigned to the excess part of the same array. Slices for both sides ("left", "right") of the arrays in a given ``axis`` are returned.
[ "Return", "slices", "into", "the", "inner", "array", "part", "for", "a", "given", "pad_mode", "." ]
python
train
44.276596
kragniz/python-etcd3
etcd3/client.py
https://github.com/kragniz/python-etcd3/blob/0adb14840d4a6011a2023a13f07e247e4c336a80/etcd3/client.py#L895-L910
def update_member(self, member_id, peer_urls): """ Update the configuration of an existing member in the cluster. :param member_id: ID of the member to update :param peer_urls: new list of peer urls the member will use to communicate with the cluster """ member_update_request = etcdrpc.MemberUpdateRequest(ID=member_id, peerURLs=peer_urls) self.clusterstub.MemberUpdate( member_update_request, self.timeout, credentials=self.call_credentials, metadata=self.metadata )
[ "def", "update_member", "(", "self", ",", "member_id", ",", "peer_urls", ")", ":", "member_update_request", "=", "etcdrpc", ".", "MemberUpdateRequest", "(", "ID", "=", "member_id", ",", "peerURLs", "=", "peer_urls", ")", "self", ".", "clusterstub", ".", "MemberUpdate", "(", "member_update_request", ",", "self", ".", "timeout", ",", "credentials", "=", "self", ".", "call_credentials", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Update the configuration of an existing member in the cluster. :param member_id: ID of the member to update :param peer_urls: new list of peer urls the member will use to communicate with the cluster
[ "Update", "the", "configuration", "of", "an", "existing", "member", "in", "the", "cluster", "." ]
python
train
40.75
tango-controls/pytango
tango/utils.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/utils.py#L1242-L1262
def index(self, item, minindex=0, maxindex=None): """Provide an index of first occurence of item in the list. (or raise a ValueError if item not present) If item is not a string, will raise a TypeError. minindex and maxindex are also optional arguments s.index(x[, i[, j]]) return smallest k such that s[k] == x and i <= k < j """ if maxindex is None: maxindex = len(self) minindex = max(0, minindex) - 1 maxindex = min(len(self), maxindex) if not isinstance(item, str): raise TypeError( 'Members of this object must be strings. ' 'You supplied \"%s\"' % type(item)) index = minindex while index < maxindex: index += 1 if item.lower() == self[index].lower(): return index raise ValueError(': list.index(x): x not in list')
[ "def", "index", "(", "self", ",", "item", ",", "minindex", "=", "0", ",", "maxindex", "=", "None", ")", ":", "if", "maxindex", "is", "None", ":", "maxindex", "=", "len", "(", "self", ")", "minindex", "=", "max", "(", "0", ",", "minindex", ")", "-", "1", "maxindex", "=", "min", "(", "len", "(", "self", ")", ",", "maxindex", ")", "if", "not", "isinstance", "(", "item", ",", "str", ")", ":", "raise", "TypeError", "(", "'Members of this object must be strings. '", "'You supplied \\\"%s\\\"'", "%", "type", "(", "item", ")", ")", "index", "=", "minindex", "while", "index", "<", "maxindex", ":", "index", "+=", "1", "if", "item", ".", "lower", "(", ")", "==", "self", "[", "index", "]", ".", "lower", "(", ")", ":", "return", "index", "raise", "ValueError", "(", "': list.index(x): x not in list'", ")" ]
Provide an index of first occurence of item in the list. (or raise a ValueError if item not present) If item is not a string, will raise a TypeError. minindex and maxindex are also optional arguments s.index(x[, i[, j]]) return smallest k such that s[k] == x and i <= k < j
[ "Provide", "an", "index", "of", "first", "occurence", "of", "item", "in", "the", "list", ".", "(", "or", "raise", "a", "ValueError", "if", "item", "not", "present", ")", "If", "item", "is", "not", "a", "string", "will", "raise", "a", "TypeError", ".", "minindex", "and", "maxindex", "are", "also", "optional", "arguments", "s", ".", "index", "(", "x", "[", "i", "[", "j", "]]", ")", "return", "smallest", "k", "such", "that", "s", "[", "k", "]", "==", "x", "and", "i", "<", "=", "k", "<", "j" ]
python
train
42.952381
uw-it-aca/uw-restclients
restclients/bridge/user.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/bridge/user.py#L118-L127
def get_user_by_id(bridge_id, include_course_summary=True): """ :param bridge_id: integer Return a list of BridgeUsers objects with custom fields """ url = author_id_url(bridge_id) + "?%s" % CUSTOM_FIELD if include_course_summary: url = "%s&%s" % (url, COURSE_SUMMARY) resp = get_resource(url) return _process_json_resp_data(resp)
[ "def", "get_user_by_id", "(", "bridge_id", ",", "include_course_summary", "=", "True", ")", ":", "url", "=", "author_id_url", "(", "bridge_id", ")", "+", "\"?%s\"", "%", "CUSTOM_FIELD", "if", "include_course_summary", ":", "url", "=", "\"%s&%s\"", "%", "(", "url", ",", "COURSE_SUMMARY", ")", "resp", "=", "get_resource", "(", "url", ")", "return", "_process_json_resp_data", "(", "resp", ")" ]
:param bridge_id: integer Return a list of BridgeUsers objects with custom fields
[ ":", "param", "bridge_id", ":", "integer", "Return", "a", "list", "of", "BridgeUsers", "objects", "with", "custom", "fields" ]
python
train
36.1
apache/spark
python/pyspark/mllib/evaluation.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/evaluation.py#L514-L521
def recall(self, label=None): """ Returns recall or recall for a given label (category) if specified. """ if label is None: return self.call("recall") else: return self.call("recall", float(label))
[ "def", "recall", "(", "self", ",", "label", "=", "None", ")", ":", "if", "label", "is", "None", ":", "return", "self", ".", "call", "(", "\"recall\"", ")", "else", ":", "return", "self", ".", "call", "(", "\"recall\"", ",", "float", "(", "label", ")", ")" ]
Returns recall or recall for a given label (category) if specified.
[ "Returns", "recall", "or", "recall", "for", "a", "given", "label", "(", "category", ")", "if", "specified", "." ]
python
train
31.75
DarkEnergySurvey/ugali
ugali/analysis/results.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/results.py#L100-L113
def estimate_position_angle(self,param='position_angle',burn=None,clip=10.0,alpha=0.32): """ Estimate the position angle from the posterior dealing with periodicity. """ # Transform so peak in the middle of the distribution pa = self.samples.get(param,burn=burn,clip=clip) peak = ugali.utils.stats.kde_peak(pa) shift = 180.*((pa+90-peak)>180) pa -= shift # Get the kde interval ret = ugali.utils.stats.peak_interval(pa,alpha) if ret[0] < 0: ret[0] += 180.; ret[1][0] += 180.; ret[1][1] += 180.; return ret
[ "def", "estimate_position_angle", "(", "self", ",", "param", "=", "'position_angle'", ",", "burn", "=", "None", ",", "clip", "=", "10.0", ",", "alpha", "=", "0.32", ")", ":", "# Transform so peak in the middle of the distribution", "pa", "=", "self", ".", "samples", ".", "get", "(", "param", ",", "burn", "=", "burn", ",", "clip", "=", "clip", ")", "peak", "=", "ugali", ".", "utils", ".", "stats", ".", "kde_peak", "(", "pa", ")", "shift", "=", "180.", "*", "(", "(", "pa", "+", "90", "-", "peak", ")", ">", "180", ")", "pa", "-=", "shift", "# Get the kde interval", "ret", "=", "ugali", ".", "utils", ".", "stats", ".", "peak_interval", "(", "pa", ",", "alpha", ")", "if", "ret", "[", "0", "]", "<", "0", ":", "ret", "[", "0", "]", "+=", "180.", "ret", "[", "1", "]", "[", "0", "]", "+=", "180.", "ret", "[", "1", "]", "[", "1", "]", "+=", "180.", "return", "ret" ]
Estimate the position angle from the posterior dealing with periodicity.
[ "Estimate", "the", "position", "angle", "from", "the", "posterior", "dealing", "with", "periodicity", "." ]
python
train
42.928571
hugapi/hug
hug/authentication.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/hug/authentication.py#L68-L98
def basic(request, response, verify_user, realm='simple', context=None, **kwargs): """Basic HTTP Authentication""" http_auth = request.auth response.set_header('WWW-Authenticate', 'Basic') if http_auth is None: return if isinstance(http_auth, bytes): http_auth = http_auth.decode('utf8') try: auth_type, user_and_key = http_auth.split(' ', 1) except ValueError: raise HTTPUnauthorized('Authentication Error', 'Authentication header is improperly formed', challenges=('Basic realm="{}"'.format(realm), )) if auth_type.lower() == 'basic': try: user_id, key = base64.decodebytes(bytes(user_and_key.strip(), 'utf8')).decode('utf8').split(':', 1) try: user = verify_user(user_id, key) except TypeError: user = verify_user(user_id, key, context) if user: response.set_header('WWW-Authenticate', '') return user except (binascii.Error, ValueError): raise HTTPUnauthorized('Authentication Error', 'Unable to determine user and password with provided encoding', challenges=('Basic realm="{}"'.format(realm), )) return False
[ "def", "basic", "(", "request", ",", "response", ",", "verify_user", ",", "realm", "=", "'simple'", ",", "context", "=", "None", ",", "*", "*", "kwargs", ")", ":", "http_auth", "=", "request", ".", "auth", "response", ".", "set_header", "(", "'WWW-Authenticate'", ",", "'Basic'", ")", "if", "http_auth", "is", "None", ":", "return", "if", "isinstance", "(", "http_auth", ",", "bytes", ")", ":", "http_auth", "=", "http_auth", ".", "decode", "(", "'utf8'", ")", "try", ":", "auth_type", ",", "user_and_key", "=", "http_auth", ".", "split", "(", "' '", ",", "1", ")", "except", "ValueError", ":", "raise", "HTTPUnauthorized", "(", "'Authentication Error'", ",", "'Authentication header is improperly formed'", ",", "challenges", "=", "(", "'Basic realm=\"{}\"'", ".", "format", "(", "realm", ")", ",", ")", ")", "if", "auth_type", ".", "lower", "(", ")", "==", "'basic'", ":", "try", ":", "user_id", ",", "key", "=", "base64", ".", "decodebytes", "(", "bytes", "(", "user_and_key", ".", "strip", "(", ")", ",", "'utf8'", ")", ")", ".", "decode", "(", "'utf8'", ")", ".", "split", "(", "':'", ",", "1", ")", "try", ":", "user", "=", "verify_user", "(", "user_id", ",", "key", ")", "except", "TypeError", ":", "user", "=", "verify_user", "(", "user_id", ",", "key", ",", "context", ")", "if", "user", ":", "response", ".", "set_header", "(", "'WWW-Authenticate'", ",", "''", ")", "return", "user", "except", "(", "binascii", ".", "Error", ",", "ValueError", ")", ":", "raise", "HTTPUnauthorized", "(", "'Authentication Error'", ",", "'Unable to determine user and password with provided encoding'", ",", "challenges", "=", "(", "'Basic realm=\"{}\"'", ".", "format", "(", "realm", ")", ",", ")", ")", "return", "False" ]
Basic HTTP Authentication
[ "Basic", "HTTP", "Authentication" ]
python
train
42.774194
tamasgal/km3pipe
km3pipe/db.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L645-L648
def unit(self, parameter): "Get the unit for given parameter" parameter = self._get_parameter_name(parameter).lower() return self._parameters[parameter]['Unit']
[ "def", "unit", "(", "self", ",", "parameter", ")", ":", "parameter", "=", "self", ".", "_get_parameter_name", "(", "parameter", ")", ".", "lower", "(", ")", "return", "self", ".", "_parameters", "[", "parameter", "]", "[", "'Unit'", "]" ]
Get the unit for given parameter
[ "Get", "the", "unit", "for", "given", "parameter" ]
python
train
45.25