repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
balloob/pychromecast
pychromecast/socket_client.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/socket_client.py#L745-L762
def _ensure_channel_connected(self, destination_id): """ Ensure we opened a channel to destination_id. """ if destination_id not in self._open_channels: self._open_channels.append(destination_id) self.send_message( destination_id, NS_CONNECTION, {MESSAGE_TYPE: TYPE_CONNECT, 'origin': {}, 'userAgent': 'PyChromecast', 'senderInfo': { 'sdkType': 2, 'version': '15.605.1.3', 'browserVersion': "44.0.2403.30", 'platform': 4, 'systemVersion': 'Macintosh; Intel Mac OS X10_10_3', 'connectionType': 1}}, no_add_request_id=True)
[ "def", "_ensure_channel_connected", "(", "self", ",", "destination_id", ")", ":", "if", "destination_id", "not", "in", "self", ".", "_open_channels", ":", "self", ".", "_open_channels", ".", "append", "(", "destination_id", ")", "self", ".", "send_message", "(", "destination_id", ",", "NS_CONNECTION", ",", "{", "MESSAGE_TYPE", ":", "TYPE_CONNECT", ",", "'origin'", ":", "{", "}", ",", "'userAgent'", ":", "'PyChromecast'", ",", "'senderInfo'", ":", "{", "'sdkType'", ":", "2", ",", "'version'", ":", "'15.605.1.3'", ",", "'browserVersion'", ":", "\"44.0.2403.30\"", ",", "'platform'", ":", "4", ",", "'systemVersion'", ":", "'Macintosh; Intel Mac OS X10_10_3'", ",", "'connectionType'", ":", "1", "}", "}", ",", "no_add_request_id", "=", "True", ")" ]
Ensure we opened a channel to destination_id.
[ "Ensure", "we", "opened", "a", "channel", "to", "destination_id", "." ]
python
train
42.777778
brainiak/brainiak
brainiak/fcma/classifier.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L184-L220
def _normalize_correlation_data(self, corr_data, norm_unit): """Normalize the correlation data if necessary. Fisher-transform and then z-score the data for every norm_unit samples if norm_unit > 1. Parameters ---------- corr_data: the correlation data in shape [num_samples, num_processed_voxels, num_voxels] norm_unit: int the number of samples on which the normalization is performed Returns ------- normalized_corr_data: the normalized correlation data in shape [num_samples, num_voxels, num_voxels] """ # normalize if necessary if norm_unit > 1: num_samples = len(corr_data) [_, d2, d3] = corr_data.shape second_dimension = d2 * d3 # this is a shallow copy normalized_corr_data = corr_data.reshape(1, num_samples, second_dimension) fcma_extension.normalization(normalized_corr_data, norm_unit) normalized_corr_data = normalized_corr_data.reshape(num_samples, d2, d3) logger.debug( 'normalization done' ) else: normalized_corr_data = corr_data return normalized_corr_data
[ "def", "_normalize_correlation_data", "(", "self", ",", "corr_data", ",", "norm_unit", ")", ":", "# normalize if necessary", "if", "norm_unit", ">", "1", ":", "num_samples", "=", "len", "(", "corr_data", ")", "[", "_", ",", "d2", ",", "d3", "]", "=", "corr_data", ".", "shape", "second_dimension", "=", "d2", "*", "d3", "# this is a shallow copy", "normalized_corr_data", "=", "corr_data", ".", "reshape", "(", "1", ",", "num_samples", ",", "second_dimension", ")", "fcma_extension", ".", "normalization", "(", "normalized_corr_data", ",", "norm_unit", ")", "normalized_corr_data", "=", "normalized_corr_data", ".", "reshape", "(", "num_samples", ",", "d2", ",", "d3", ")", "logger", ".", "debug", "(", "'normalization done'", ")", "else", ":", "normalized_corr_data", "=", "corr_data", "return", "normalized_corr_data" ]
Normalize the correlation data if necessary. Fisher-transform and then z-score the data for every norm_unit samples if norm_unit > 1. Parameters ---------- corr_data: the correlation data in shape [num_samples, num_processed_voxels, num_voxels] norm_unit: int the number of samples on which the normalization is performed Returns ------- normalized_corr_data: the normalized correlation data in shape [num_samples, num_voxels, num_voxels]
[ "Normalize", "the", "correlation", "data", "if", "necessary", "." ]
python
train
39.108108
praekelt/jmbo-gallery
gallery/models.py
https://github.com/praekelt/jmbo-gallery/blob/064e005913d79e456ba014b50205c7916df4714a/gallery/models.py#L64-L69
def youtube_id(self): """Extract and return Youtube video id""" m = re.search(r'/embed/([A-Za-z0-9\-=_]*)', self.embed) if m: return m.group(1) return ''
[ "def", "youtube_id", "(", "self", ")", ":", "m", "=", "re", ".", "search", "(", "r'/embed/([A-Za-z0-9\\-=_]*)'", ",", "self", ".", "embed", ")", "if", "m", ":", "return", "m", ".", "group", "(", "1", ")", "return", "''" ]
Extract and return Youtube video id
[ "Extract", "and", "return", "Youtube", "video", "id" ]
python
train
32
MacHu-GWU/angora-project
angora/dataIO/pk.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/pk.py#L363-L379
def obj2str(obj, pk_protocol=pk_protocol): """Convert arbitrary object to utf-8 string, using base64encode algorithm. Usage:: >>> from weatherlab.lib.dataIO.pk import obj2str >>> data = {"a": 1, "b": 2} >>> obj2str(data, pk_protocol=2) 'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg==' **中文文档** 将可Pickle化的Python对象转化为utf-8编码的"字符串" """ return base64.b64encode(pickle.dumps( obj, protocol=pk_protocol)).decode("utf-8")
[ "def", "obj2str", "(", "obj", ",", "pk_protocol", "=", "pk_protocol", ")", ":", "return", "base64", ".", "b64encode", "(", "pickle", ".", "dumps", "(", "obj", ",", "protocol", "=", "pk_protocol", ")", ")", ".", "decode", "(", "\"utf-8\"", ")" ]
Convert arbitrary object to utf-8 string, using base64encode algorithm. Usage:: >>> from weatherlab.lib.dataIO.pk import obj2str >>> data = {"a": 1, "b": 2} >>> obj2str(data, pk_protocol=2) 'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg==' **中文文档** 将可Pickle化的Python对象转化为utf-8编码的"字符串"
[ "Convert", "arbitrary", "object", "to", "utf", "-", "8", "string", "using", "base64encode", "algorithm", "." ]
python
train
27.294118
google/dotty
efilter/parsers/common/tokenizer.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/tokenizer.py#L259-L266
def _parse_next_token(self): """Will parse patterns until it gets to the next token or EOF.""" while self._position < self.limit: token = self._next_pattern() if token: return token return None
[ "def", "_parse_next_token", "(", "self", ")", ":", "while", "self", ".", "_position", "<", "self", ".", "limit", ":", "token", "=", "self", ".", "_next_pattern", "(", ")", "if", "token", ":", "return", "token", "return", "None" ]
Will parse patterns until it gets to the next token or EOF.
[ "Will", "parse", "patterns", "until", "it", "gets", "to", "the", "next", "token", "or", "EOF", "." ]
python
train
31.375
twilio/twilio-python
twilio/rest/api/v2010/account/queue/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/queue/__init__.py#L347-L361
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: QueueContext for this QueueInstance :rtype: twilio.rest.api.v2010.account.queue.QueueContext """ if self._context is None: self._context = QueueContext( self._version, account_sid=self._solution['account_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "QueueContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_context" ]
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: QueueContext for this QueueInstance :rtype: twilio.rest.api.v2010.account.queue.QueueContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
37.666667
santoshphilip/eppy
eppy/modeleditor.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L713-L755
def newidfobject(self, key, aname='', defaultvalues=True, **kwargs): """ Add a new idfobject to the model. If you don't specify a value for a field, the default value will be set. For example :: newidfobject("CONSTRUCTION") newidfobject("CONSTRUCTION", Name='Interior Ceiling_class', Outside_Layer='LW Concrete', Layer_2='soundmat') Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. aname : str, deprecated This parameter is not used. It is left there for backward compatibility. defaultvalues: boolean default is True. If True default values WILL be set. If False, default values WILL NOT be set **kwargs Keyword arguments in the format `field=value` used to set the value of fields in the IDF object when it is created. Returns ------- EpBunch object """ obj = newrawobject(self.model, self.idd_info, key, block=self.block, defaultvalues=defaultvalues) abunch = obj2bunch(self.model, self.idd_info, obj) if aname: warnings.warn("The aname parameter should no longer be used.", UserWarning) namebunch(abunch, aname) self.idfobjects[key].append(abunch) for k, v in list(kwargs.items()): abunch[k] = v return abunch
[ "def", "newidfobject", "(", "self", ",", "key", ",", "aname", "=", "''", ",", "defaultvalues", "=", "True", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "newrawobject", "(", "self", ".", "model", ",", "self", ".", "idd_info", ",", "key", ",", "block", "=", "self", ".", "block", ",", "defaultvalues", "=", "defaultvalues", ")", "abunch", "=", "obj2bunch", "(", "self", ".", "model", ",", "self", ".", "idd_info", ",", "obj", ")", "if", "aname", ":", "warnings", ".", "warn", "(", "\"The aname parameter should no longer be used.\"", ",", "UserWarning", ")", "namebunch", "(", "abunch", ",", "aname", ")", "self", ".", "idfobjects", "[", "key", "]", ".", "append", "(", "abunch", ")", "for", "k", ",", "v", "in", "list", "(", "kwargs", ".", "items", "(", ")", ")", ":", "abunch", "[", "k", "]", "=", "v", "return", "abunch" ]
Add a new idfobject to the model. If you don't specify a value for a field, the default value will be set. For example :: newidfobject("CONSTRUCTION") newidfobject("CONSTRUCTION", Name='Interior Ceiling_class', Outside_Layer='LW Concrete', Layer_2='soundmat') Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. aname : str, deprecated This parameter is not used. It is left there for backward compatibility. defaultvalues: boolean default is True. If True default values WILL be set. If False, default values WILL NOT be set **kwargs Keyword arguments in the format `field=value` used to set the value of fields in the IDF object when it is created. Returns ------- EpBunch object
[ "Add", "a", "new", "idfobject", "to", "the", "model", ".", "If", "you", "don", "t", "specify", "a", "value", "for", "a", "field", "the", "default", "value", "will", "be", "set", "." ]
python
train
34.697674
moonlitesolutions/SolrClient
SolrClient/solrresp.py
https://github.com/moonlitesolutions/SolrClient/blob/19c5280c9f8e97ee104d22ae883c4ccfd7c4f43b/SolrClient/solrresp.py#L222-L231
def get_field_values_as_list(self,field): ''' :param str field: The name of the field for which to pull in values. Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example:: >>> r.get_field_values_as_list('product_name_exact') ['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem'] ''' return [doc[field] for doc in self.docs if field in doc]
[ "def", "get_field_values_as_list", "(", "self", ",", "field", ")", ":", "return", "[", "doc", "[", "field", "]", "for", "doc", "in", "self", ".", "docs", "if", "field", "in", "doc", "]" ]
:param str field: The name of the field for which to pull in values. Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example:: >>> r.get_field_values_as_list('product_name_exact') ['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem']
[ ":", "param", "str", "field", ":", "The", "name", "of", "the", "field", "for", "which", "to", "pull", "in", "values", ".", "Will", "parse", "the", "query", "results", "(", "must", "be", "ungrouped", ")", "and", "return", "all", "values", "of", "field", "as", "a", "list", ".", "Note", "that", "these", "are", "not", "unique", "values", ".", "Example", "::" ]
python
train
77.1
mbj4668/pyang
pyang/__init__.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/__init__.py#L138-L141
def del_module(self, module): """Remove a module from the context""" rev = util.get_latest_revision(module) del self.modules[(module.arg, rev)]
[ "def", "del_module", "(", "self", ",", "module", ")", ":", "rev", "=", "util", ".", "get_latest_revision", "(", "module", ")", "del", "self", ".", "modules", "[", "(", "module", ".", "arg", ",", "rev", ")", "]" ]
Remove a module from the context
[ "Remove", "a", "module", "from", "the", "context" ]
python
train
41
googleapis/google-auth-library-python
google/oauth2/id_token.py
https://github.com/googleapis/google-auth-library-python/blob/2c6ad78917e936f38f87c946209c8031166dc96e/google/oauth2/id_token.py#L144-L159
def verify_firebase_token(id_token, request, audience=None): """Verifies an ID Token issued by Firebase Authentication. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your Firebase application ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token. """ return verify_token( id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL)
[ "def", "verify_firebase_token", "(", "id_token", ",", "request", ",", "audience", "=", "None", ")", ":", "return", "verify_token", "(", "id_token", ",", "request", ",", "audience", "=", "audience", ",", "certs_url", "=", "_GOOGLE_APIS_CERTS_URL", ")" ]
Verifies an ID Token issued by Firebase Authentication. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your Firebase application ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token.
[ "Verifies", "an", "ID", "Token", "issued", "by", "Firebase", "Authentication", "." ]
python
train
39.625
softlayer/softlayer-python
SoftLayer/managers/load_balancer.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/load_balancer.py#L27-L41
def get_lb_pkgs(self): """Retrieves the local load balancer packages. :returns: A dictionary containing the load balancer packages """ _filter = {'items': {'description': utils.query_filter('*Load Balancer*')}} packages = self.prod_pkg.getItems(id=0, filter=_filter) pkgs = [] for package in packages: if not package['description'].startswith('Global'): pkgs.append(package) return pkgs
[ "def", "get_lb_pkgs", "(", "self", ")", ":", "_filter", "=", "{", "'items'", ":", "{", "'description'", ":", "utils", ".", "query_filter", "(", "'*Load Balancer*'", ")", "}", "}", "packages", "=", "self", ".", "prod_pkg", ".", "getItems", "(", "id", "=", "0", ",", "filter", "=", "_filter", ")", "pkgs", "=", "[", "]", "for", "package", "in", "packages", ":", "if", "not", "package", "[", "'description'", "]", ".", "startswith", "(", "'Global'", ")", ":", "pkgs", ".", "append", "(", "package", ")", "return", "pkgs" ]
Retrieves the local load balancer packages. :returns: A dictionary containing the load balancer packages
[ "Retrieves", "the", "local", "load", "balancer", "packages", "." ]
python
train
33.066667
koordinates/python-client
koordinates/publishing.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/publishing.py#L56-L60
def cancel(self): """ Cancel a pending publish task """ target_url = self._client.get_url('PUBLISH', 'DELETE', 'single', {'id': self.id}) r = self._client.request('DELETE', target_url) logger.info("cancel(): %s", r.status_code)
[ "def", "cancel", "(", "self", ")", ":", "target_url", "=", "self", ".", "_client", ".", "get_url", "(", "'PUBLISH'", ",", "'DELETE'", ",", "'single'", ",", "{", "'id'", ":", "self", ".", "id", "}", ")", "r", "=", "self", ".", "_client", ".", "request", "(", "'DELETE'", ",", "target_url", ")", "logger", ".", "info", "(", "\"cancel(): %s\"", ",", "r", ".", "status_code", ")" ]
Cancel a pending publish task
[ "Cancel", "a", "pending", "publish", "task" ]
python
train
51
SmokinCaterpillar/pypet
examples/example_06_parameter_presetting.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/examples/example_06_parameter_presetting.py#L56-L72
def diff_roessler(value_array, a, c): """The Roessler attractor differential equation :param value_array: 3d array containing the x,y, and z component values. :param a: Constant attractor parameter :param c: Constant attractor parameter :return: 3d array of the Roessler system evaluated at `value_array` """ b=a diff_array = np.zeros(3) diff_array[0] = -value_array[1] - value_array[2] diff_array[1] = value_array[0] + a * value_array[1] diff_array[2] = b + value_array[2] * (value_array[0] - c) return diff_array
[ "def", "diff_roessler", "(", "value_array", ",", "a", ",", "c", ")", ":", "b", "=", "a", "diff_array", "=", "np", ".", "zeros", "(", "3", ")", "diff_array", "[", "0", "]", "=", "-", "value_array", "[", "1", "]", "-", "value_array", "[", "2", "]", "diff_array", "[", "1", "]", "=", "value_array", "[", "0", "]", "+", "a", "*", "value_array", "[", "1", "]", "diff_array", "[", "2", "]", "=", "b", "+", "value_array", "[", "2", "]", "*", "(", "value_array", "[", "0", "]", "-", "c", ")", "return", "diff_array" ]
The Roessler attractor differential equation :param value_array: 3d array containing the x,y, and z component values. :param a: Constant attractor parameter :param c: Constant attractor parameter :return: 3d array of the Roessler system evaluated at `value_array`
[ "The", "Roessler", "attractor", "differential", "equation" ]
python
test
32.352941
ajdavis/mongo-mockup-db
mockupdb/__init__.py
https://github.com/ajdavis/mongo-mockup-db/blob/ff8a3f793def59e9037397ef60607fbda6949dac/mockupdb/__init__.py#L1793-L1804
def mock_server_receive(sock, length): """Receive `length` bytes from a socket object.""" msg = b'' while length: chunk = sock.recv(length) if chunk == b'': raise socket.error(errno.ECONNRESET, 'closed') length -= len(chunk) msg += chunk return msg
[ "def", "mock_server_receive", "(", "sock", ",", "length", ")", ":", "msg", "=", "b''", "while", "length", ":", "chunk", "=", "sock", ".", "recv", "(", "length", ")", "if", "chunk", "==", "b''", ":", "raise", "socket", ".", "error", "(", "errno", ".", "ECONNRESET", ",", "'closed'", ")", "length", "-=", "len", "(", "chunk", ")", "msg", "+=", "chunk", "return", "msg" ]
Receive `length` bytes from a socket object.
[ "Receive", "length", "bytes", "from", "a", "socket", "object", "." ]
python
train
24.916667
IntegralDefense/critsapi
critsapi/critsdbapi.py
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsdbapi.py#L68-L74
def connect(self): """ Starts the mongodb connection. Must be called before anything else will work. """ self.client = MongoClient(self.mongo_uri) self.db = self.client[self.db_name]
[ "def", "connect", "(", "self", ")", ":", "self", ".", "client", "=", "MongoClient", "(", "self", ".", "mongo_uri", ")", "self", ".", "db", "=", "self", ".", "client", "[", "self", ".", "db_name", "]" ]
Starts the mongodb connection. Must be called before anything else will work.
[ "Starts", "the", "mongodb", "connection", ".", "Must", "be", "called", "before", "anything", "else", "will", "work", "." ]
python
train
32
luckydonald/pytgbot
pytgbot/api_types/receivable/passport.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/receivable/passport.py#L186-L197
def to_array(self): """ Serializes this PassportFile to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(PassportFile, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['file_size'] = int(self.file_size) # type int array['file_date'] = int(self.file_date) # type int return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "PassportFile", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'file_id'", "]", "=", "u", "(", "self", ".", "file_id", ")", "# py2: type unicode, py3: type str", "array", "[", "'file_size'", "]", "=", "int", "(", "self", ".", "file_size", ")", "# type int", "array", "[", "'file_date'", "]", "=", "int", "(", "self", ".", "file_date", ")", "# type int", "return", "array" ]
Serializes this PassportFile to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "PassportFile", "to", "a", "dictionary", "." ]
python
train
36.833333
minhhoit/yacms
yacms/core/models.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/models.py#L451-L468
def _get_next_or_previous_by_order(self, is_next, **kwargs): """ Retrieves next or previous object by order. We implement our own version instead of Django's so we can hook into the published manager, concrete subclasses and our custom ``with_respect_to`` method. """ lookup = self.with_respect_to() lookup["_order"] = self._order + (1 if is_next else -1) concrete_model = base_concrete_model(Orderable, self) try: queryset = concrete_model.objects.published except AttributeError: queryset = concrete_model.objects.filter try: return queryset(**kwargs).get(**lookup) except concrete_model.DoesNotExist: pass
[ "def", "_get_next_or_previous_by_order", "(", "self", ",", "is_next", ",", "*", "*", "kwargs", ")", ":", "lookup", "=", "self", ".", "with_respect_to", "(", ")", "lookup", "[", "\"_order\"", "]", "=", "self", ".", "_order", "+", "(", "1", "if", "is_next", "else", "-", "1", ")", "concrete_model", "=", "base_concrete_model", "(", "Orderable", ",", "self", ")", "try", ":", "queryset", "=", "concrete_model", ".", "objects", ".", "published", "except", "AttributeError", ":", "queryset", "=", "concrete_model", ".", "objects", ".", "filter", "try", ":", "return", "queryset", "(", "*", "*", "kwargs", ")", ".", "get", "(", "*", "*", "lookup", ")", "except", "concrete_model", ".", "DoesNotExist", ":", "pass" ]
Retrieves next or previous object by order. We implement our own version instead of Django's so we can hook into the published manager, concrete subclasses and our custom ``with_respect_to`` method.
[ "Retrieves", "next", "or", "previous", "object", "by", "order", ".", "We", "implement", "our", "own", "version", "instead", "of", "Django", "s", "so", "we", "can", "hook", "into", "the", "published", "manager", "concrete", "subclasses", "and", "our", "custom", "with_respect_to", "method", "." ]
python
train
41.277778
saltstack/salt
salt/modules/pkgutil.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pkgutil.py#L77-L99
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613 ''' List all available package upgrades on this system CLI Example: .. code-block:: bash salt '*' pkgutil.list_upgrades ''' if salt.utils.data.is_true(refresh): refresh_db() upgrades = {} lines = __salt__['cmd.run_stdout']( '/opt/csw/bin/pkgutil -A --parse').splitlines() for line in lines: comps = line.split('\t') if comps[2] == "SAME": continue if comps[2] == "not installed": continue upgrades[comps[0]] = comps[1] return upgrades
[ "def", "list_upgrades", "(", "refresh", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=W0613", "if", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "refresh", ")", ":", "refresh_db", "(", ")", "upgrades", "=", "{", "}", "lines", "=", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "'/opt/csw/bin/pkgutil -A --parse'", ")", ".", "splitlines", "(", ")", "for", "line", "in", "lines", ":", "comps", "=", "line", ".", "split", "(", "'\\t'", ")", "if", "comps", "[", "2", "]", "==", "\"SAME\"", ":", "continue", "if", "comps", "[", "2", "]", "==", "\"not installed\"", ":", "continue", "upgrades", "[", "comps", "[", "0", "]", "]", "=", "comps", "[", "1", "]", "return", "upgrades" ]
List all available package upgrades on this system CLI Example: .. code-block:: bash salt '*' pkgutil.list_upgrades
[ "List", "all", "available", "package", "upgrades", "on", "this", "system" ]
python
train
26.217391
ANTsX/ANTsPy
ants/utils/ndimage_to_list.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/ndimage_to_list.py#L67-L113
def ndimage_to_list(image): """ Split a n dimensional ANTsImage into a list of n-1 dimensional ANTsImages Arguments --------- image : ANTsImage n-dimensional image to split Returns ------- list of ANTsImage types Example ------- >>> import ants >>> image = ants.image_read(ants.get_ants_data('r16')) >>> image2 = ants.image_read(ants.get_ants_data('r16')) >>> imageTar = ants.make_image( ( *image2.shape, 2 ) ) >>> image3 = ants.list_to_ndimage( imageTar, [image,image2]) >>> image3.dimension == 3 >>> images_unmerged = ants.ndimage_to_list( image3 ) >>> len(images_unmerged) == 2 >>> images_unmerged[0].dimension == 2 """ inpixeltype = image.pixeltype dimension = image.dimension components = 1 imageShape = image.shape nSections = imageShape[ dimension - 1 ] subdimension = dimension - 1 suborigin = iio.get_origin( image )[0:subdimension] subspacing = iio.get_spacing( image )[0:subdimension] subdirection = np.eye( subdimension ) for i in range( subdimension ): subdirection[i,:] = iio.get_direction( image )[i,0:subdimension] subdim = image.shape[ 0:subdimension ] imagelist = [] for i in range( nSections ): img = utils.slice_image( image, axis = subdimension, idx = i ) iio.set_spacing( img, subspacing ) iio.set_origin( img, suborigin ) iio.set_direction( img, subdirection ) imagelist.append( img ) return imagelist
[ "def", "ndimage_to_list", "(", "image", ")", ":", "inpixeltype", "=", "image", ".", "pixeltype", "dimension", "=", "image", ".", "dimension", "components", "=", "1", "imageShape", "=", "image", ".", "shape", "nSections", "=", "imageShape", "[", "dimension", "-", "1", "]", "subdimension", "=", "dimension", "-", "1", "suborigin", "=", "iio", ".", "get_origin", "(", "image", ")", "[", "0", ":", "subdimension", "]", "subspacing", "=", "iio", ".", "get_spacing", "(", "image", ")", "[", "0", ":", "subdimension", "]", "subdirection", "=", "np", ".", "eye", "(", "subdimension", ")", "for", "i", "in", "range", "(", "subdimension", ")", ":", "subdirection", "[", "i", ",", ":", "]", "=", "iio", ".", "get_direction", "(", "image", ")", "[", "i", ",", "0", ":", "subdimension", "]", "subdim", "=", "image", ".", "shape", "[", "0", ":", "subdimension", "]", "imagelist", "=", "[", "]", "for", "i", "in", "range", "(", "nSections", ")", ":", "img", "=", "utils", ".", "slice_image", "(", "image", ",", "axis", "=", "subdimension", ",", "idx", "=", "i", ")", "iio", ".", "set_spacing", "(", "img", ",", "subspacing", ")", "iio", ".", "set_origin", "(", "img", ",", "suborigin", ")", "iio", ".", "set_direction", "(", "img", ",", "subdirection", ")", "imagelist", ".", "append", "(", "img", ")", "return", "imagelist" ]
Split a n dimensional ANTsImage into a list of n-1 dimensional ANTsImages Arguments --------- image : ANTsImage n-dimensional image to split Returns ------- list of ANTsImage types Example ------- >>> import ants >>> image = ants.image_read(ants.get_ants_data('r16')) >>> image2 = ants.image_read(ants.get_ants_data('r16')) >>> imageTar = ants.make_image( ( *image2.shape, 2 ) ) >>> image3 = ants.list_to_ndimage( imageTar, [image,image2]) >>> image3.dimension == 3 >>> images_unmerged = ants.ndimage_to_list( image3 ) >>> len(images_unmerged) == 2 >>> images_unmerged[0].dimension == 2
[ "Split", "a", "n", "dimensional", "ANTsImage", "into", "a", "list", "of", "n", "-", "1", "dimensional", "ANTsImages" ]
python
train
31.446809
edeposit/edeposit.amqp.serializers
src/edeposit/amqp/serializers/serializers.py
https://github.com/edeposit/edeposit.amqp.serializers/blob/44409db650b16658e778255e420da9f14ef9f197/src/edeposit/amqp/serializers/serializers.py#L178-L209
def iiOfAny(instance, classes): """ Returns true, if `instance` is instance of any (iiOfAny) of the `classes`. This function doesn't use :py:func:`isinstance` check, it just compares the `class` names. This can be generaly dangerous, but it is really useful when you are comparing class serialized in one module and deserialized in another. This causes, that module paths in class internals are different and :py:func:`isinstance` and :py:func:`type` comparsions thus fails. Use this function instead, if you wan't to check what type is your deserialized message. Args: instance (object): class instance you want to know the type classes (list): classes, or just one class you want to compare - func automatically converts nonlist/nontuple parameters to list Returns: bool: True if `instance` **can be** instance of any of the `classes`. """ if type(classes) not in [list, tuple]: classes = [classes] return any( type(instance).__name__ == cls.__name__ for cls in classes )
[ "def", "iiOfAny", "(", "instance", ",", "classes", ")", ":", "if", "type", "(", "classes", ")", "not", "in", "[", "list", ",", "tuple", "]", ":", "classes", "=", "[", "classes", "]", "return", "any", "(", "type", "(", "instance", ")", ".", "__name__", "==", "cls", ".", "__name__", "for", "cls", "in", "classes", ")" ]
Returns true, if `instance` is instance of any (iiOfAny) of the `classes`. This function doesn't use :py:func:`isinstance` check, it just compares the `class` names. This can be generaly dangerous, but it is really useful when you are comparing class serialized in one module and deserialized in another. This causes, that module paths in class internals are different and :py:func:`isinstance` and :py:func:`type` comparsions thus fails. Use this function instead, if you wan't to check what type is your deserialized message. Args: instance (object): class instance you want to know the type classes (list): classes, or just one class you want to compare - func automatically converts nonlist/nontuple parameters to list Returns: bool: True if `instance` **can be** instance of any of the `classes`.
[ "Returns", "true", "if", "instance", "is", "instance", "of", "any", "(", "iiOfAny", ")", "of", "the", "classes", "." ]
python
train
34.65625
JukeboxPipeline/jukebox-core
src/jukeboxcore/main.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/main.py#L13-L22
def init_environment(): """Set environment variables that are important for the pipeline. :returns: None :rtype: None :raises: None """ os.environ['DJANGO_SETTINGS_MODULE'] = 'jukeboxcore.djsettings' pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), constants.BUILTIN_PLUGIN_PATH)) os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath
[ "def", "init_environment", "(", ")", ":", "os", ".", "environ", "[", "'DJANGO_SETTINGS_MODULE'", "]", "=", "'jukeboxcore.djsettings'", "pluginpath", "=", "os", ".", "pathsep", ".", "join", "(", "(", "os", ".", "environ", ".", "get", "(", "'JUKEBOX_PLUGIN_PATH'", ",", "''", ")", ",", "constants", ".", "BUILTIN_PLUGIN_PATH", ")", ")", "os", ".", "environ", "[", "'JUKEBOX_PLUGIN_PATH'", "]", "=", "pluginpath" ]
Set environment variables that are important for the pipeline. :returns: None :rtype: None :raises: None
[ "Set", "environment", "variables", "that", "are", "important", "for", "the", "pipeline", "." ]
python
train
37.5
basilfx/flask-daapserver
daapserver/provider.py
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/provider.py#L96-L118
def create_session(self, user_agent, remote_address, client_version): """ Create a new session. :param str user_agent: Client user agent :param str remote_addr: Remote address of client :param str client_version: Remote client version :return: The new session id :rtype: int """ self.session_counter += 1 self.sessions[self.session_counter] = session = self.session_class() # Set session properties session.user_agent = user_agent session.remote_address = remote_address session.client_version = client_version # Invoke hooks invoke_hooks(self.hooks, "session_created", self.session_counter) return self.session_counter
[ "def", "create_session", "(", "self", ",", "user_agent", ",", "remote_address", ",", "client_version", ")", ":", "self", ".", "session_counter", "+=", "1", "self", ".", "sessions", "[", "self", ".", "session_counter", "]", "=", "session", "=", "self", ".", "session_class", "(", ")", "# Set session properties", "session", ".", "user_agent", "=", "user_agent", "session", ".", "remote_address", "=", "remote_address", "session", ".", "client_version", "=", "client_version", "# Invoke hooks", "invoke_hooks", "(", "self", ".", "hooks", ",", "\"session_created\"", ",", "self", ".", "session_counter", ")", "return", "self", ".", "session_counter" ]
Create a new session. :param str user_agent: Client user agent :param str remote_addr: Remote address of client :param str client_version: Remote client version :return: The new session id :rtype: int
[ "Create", "a", "new", "session", "." ]
python
train
32.086957
noxdafox/vminspect
vminspect/filesystem.py
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/filesystem.py#L142-L144
def checksum(self, path, hashtype='sha1'): """Returns the checksum of the given path.""" return self._handler.checksum(hashtype, posix_path(path))
[ "def", "checksum", "(", "self", ",", "path", ",", "hashtype", "=", "'sha1'", ")", ":", "return", "self", ".", "_handler", ".", "checksum", "(", "hashtype", ",", "posix_path", "(", "path", ")", ")" ]
Returns the checksum of the given path.
[ "Returns", "the", "checksum", "of", "the", "given", "path", "." ]
python
train
53.333333
tensorflow/tensorboard
tensorboard/util/tensor_util.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/util/tensor_util.py#L280-L480
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False): """Create a TensorProto. Args: values: Values to put in the TensorProto. dtype: Optional tensor_pb2 DataType value. shape: List of integers representing the dimensions of tensor. verify_shape: Boolean that enables verification of a shape of values. Returns: A `TensorProto`. Depending on the type, it may contain data in the "tensor_content" attribute, which is not directly useful to Python programs. To access the values you should convert the proto back to a numpy ndarray with `tensor_util.MakeNdarray(proto)`. If `values` is a `TensorProto`, it is immediately returned; `dtype` and `shape` are ignored. Raises: TypeError: if unsupported types are provided. ValueError: if arguments have inappropriate values or if verify_shape is True and shape of values is not equals to a shape from the argument. make_tensor_proto accepts "values" of a python scalar, a python list, a numpy ndarray, or a numpy scalar. If "values" is a python scalar or a python list, make_tensor_proto first convert it to numpy ndarray. If dtype is None, the conversion tries its best to infer the right numpy data type. Otherwise, the resulting numpy array has a convertible data type with the given dtype. In either case above, the numpy ndarray (either the caller provided or the auto converted) must have the convertible type with dtype. make_tensor_proto then converts the numpy array to a tensor proto. If "shape" is None, the resulting tensor proto represents the numpy array precisely. Otherwise, "shape" specifies the tensor's shape and the numpy array can not have more elements than what "shape" specifies. """ if isinstance(values, tensor_pb2.TensorProto): return values if dtype: dtype = dtypes.as_dtype(dtype) is_quantized = dtype in [ dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16, dtypes.qint32, ] # We first convert value to a numpy array or scalar. if isinstance(values, (np.ndarray, np.generic)): if dtype: nparray = values.astype(dtype.as_numpy_dtype) else: nparray = values elif callable(getattr(values, "__array__", None)) or isinstance( getattr(values, "__array_interface__", None), dict ): # If a class has the __array__ method, or __array_interface__ dict, then it # is possible to convert to numpy array. nparray = np.asarray(values, dtype=dtype) # This is the preferred way to create an array from the object, so replace # the `values` with the array so that _FlattenToStrings is not run. values = nparray else: if values is None: raise ValueError("None values not supported.") # if dtype is provided, forces numpy array to be the type # provided if possible. if dtype and dtype.is_numpy_compatible: np_dt = dtype.as_numpy_dtype else: np_dt = None # If shape is None, numpy.prod returns None when dtype is not set, but raises # exception when dtype is set to np.int64 if shape is not None and np.prod(shape, dtype=np.int64) == 0: nparray = np.empty(shape, dtype=np_dt) else: _Assertconvertible(values, dtype) nparray = np.array(values, dtype=np_dt) # check to them. # We need to pass in quantized values as tuples, so don't apply the shape if list(nparray.shape) != _GetDenseDimensions(values) and not is_quantized: raise ValueError( """Argument must be a dense tensor: %s""" """ - got shape %s, but wanted %s.""" % (values, list(nparray.shape), _GetDenseDimensions(values)) ) # python/numpy default float type is float64. We prefer float32 instead. if (nparray.dtype == np.float64) and dtype is None: nparray = nparray.astype(np.float32) # python/numpy default int type is int64. We prefer int32 instead. elif (nparray.dtype == np.int64) and dtype is None: downcasted_array = nparray.astype(np.int32) # Do not down cast if it leads to precision loss. if np.array_equal(downcasted_array, nparray): nparray = downcasted_array # if dtype is provided, it must be convertible with what numpy # conversion says. numpy_dtype = dtypes.as_dtype(nparray.dtype) if numpy_dtype is None: raise TypeError("Unrecognized data type: %s" % nparray.dtype) # If dtype was specified and is a quantized type, we convert # numpy_dtype back into the quantized version. if is_quantized: numpy_dtype = dtype if dtype is not None and ( not hasattr(dtype, "base_dtype") or dtype.base_dtype != numpy_dtype.base_dtype ): raise TypeError( "Inconvertible types: %s vs. %s. Value is %s" % (dtype, nparray.dtype, values) ) # If shape is not given, get the shape from the numpy array. if shape is None: shape = nparray.shape is_same_size = True shape_size = nparray.size else: shape = [int(dim) for dim in shape] shape_size = np.prod(shape, dtype=np.int64) is_same_size = shape_size == nparray.size if verify_shape: if not nparray.shape == tuple(shape): raise TypeError( "Expected Tensor's shape: %s, got %s." % (tuple(shape), nparray.shape) ) if nparray.size > shape_size: raise ValueError( "Too many elements provided. Needed at most %d, but received %d" % (shape_size, nparray.size) ) tensor_proto = tensor_pb2.TensorProto( dtype=numpy_dtype.as_datatype_enum, tensor_shape=tensor_shape.as_shape(shape).as_proto(), ) if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1: if nparray.size * nparray.itemsize >= (1 << 31): raise ValueError( "Cannot create a tensor proto whose content is larger than 2GB." ) tensor_proto.tensor_content = nparray.tostring() return tensor_proto # If we were not given values as a numpy array, compute the proto_values # from the given values directly, to avoid numpy trimming nulls from the # strings. Since values could be a list of strings, or a multi-dimensional # list of lists that might or might not correspond to the given shape, # we flatten it conservatively. if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray): proto_values = _FlattenToStrings(values) # At this point, values may be a list of objects that we could not # identify a common type for (hence it was inferred as # np.object/dtypes.string). If we are unable to convert it to a # string, we raise a more helpful error message. # # Ideally, we'd be able to convert the elements of the list to a # common type, but this type inference requires some thinking and # so we defer it for now. try: str_values = [compat.as_bytes(x) for x in proto_values] except TypeError: raise TypeError( "Failed to convert object of type %s to Tensor. " "Contents: %s. Consider casting elements to a " "supported type." % (type(values), values) ) tensor_proto.string_val.extend(str_values) return tensor_proto # TensorFlow expects C order (a.k.a., eigen row major). proto_values = nparray.ravel() append_fn = GetNumpyAppendFn(proto_values.dtype) if append_fn is None: raise TypeError( "Element type not supported in TensorProto: %s" % numpy_dtype.name ) append_fn(tensor_proto, proto_values) return tensor_proto
[ "def", "make_tensor_proto", "(", "values", ",", "dtype", "=", "None", ",", "shape", "=", "None", ",", "verify_shape", "=", "False", ")", ":", "if", "isinstance", "(", "values", ",", "tensor_pb2", ".", "TensorProto", ")", ":", "return", "values", "if", "dtype", ":", "dtype", "=", "dtypes", ".", "as_dtype", "(", "dtype", ")", "is_quantized", "=", "dtype", "in", "[", "dtypes", ".", "qint8", ",", "dtypes", ".", "quint8", ",", "dtypes", ".", "qint16", ",", "dtypes", ".", "quint16", ",", "dtypes", ".", "qint32", ",", "]", "# We first convert value to a numpy array or scalar.", "if", "isinstance", "(", "values", ",", "(", "np", ".", "ndarray", ",", "np", ".", "generic", ")", ")", ":", "if", "dtype", ":", "nparray", "=", "values", ".", "astype", "(", "dtype", ".", "as_numpy_dtype", ")", "else", ":", "nparray", "=", "values", "elif", "callable", "(", "getattr", "(", "values", ",", "\"__array__\"", ",", "None", ")", ")", "or", "isinstance", "(", "getattr", "(", "values", ",", "\"__array_interface__\"", ",", "None", ")", ",", "dict", ")", ":", "# If a class has the __array__ method, or __array_interface__ dict, then it", "# is possible to convert to numpy array.", "nparray", "=", "np", ".", "asarray", "(", "values", ",", "dtype", "=", "dtype", ")", "# This is the preferred way to create an array from the object, so replace", "# the `values` with the array so that _FlattenToStrings is not run.", "values", "=", "nparray", "else", ":", "if", "values", "is", "None", ":", "raise", "ValueError", "(", "\"None values not supported.\"", ")", "# if dtype is provided, forces numpy array to be the type", "# provided if possible.", "if", "dtype", "and", "dtype", ".", "is_numpy_compatible", ":", "np_dt", "=", "dtype", ".", "as_numpy_dtype", "else", ":", "np_dt", "=", "None", "# If shape is None, numpy.prod returns None when dtype is not set, but raises", "# exception when dtype is set to np.int64", "if", "shape", "is", "not", "None", "and", "np", ".", "prod", "(", "shape", ",", "dtype", "=", "np", ".", "int64", ")", "==", "0", ":", "nparray", "=", "np", ".", "empty", "(", "shape", ",", "dtype", "=", "np_dt", ")", "else", ":", "_Assertconvertible", "(", "values", ",", "dtype", ")", "nparray", "=", "np", ".", "array", "(", "values", ",", "dtype", "=", "np_dt", ")", "# check to them.", "# We need to pass in quantized values as tuples, so don't apply the shape", "if", "list", "(", "nparray", ".", "shape", ")", "!=", "_GetDenseDimensions", "(", "values", ")", "and", "not", "is_quantized", ":", "raise", "ValueError", "(", "\"\"\"Argument must be a dense tensor: %s\"\"\"", "\"\"\" - got shape %s, but wanted %s.\"\"\"", "%", "(", "values", ",", "list", "(", "nparray", ".", "shape", ")", ",", "_GetDenseDimensions", "(", "values", ")", ")", ")", "# python/numpy default float type is float64. We prefer float32 instead.", "if", "(", "nparray", ".", "dtype", "==", "np", ".", "float64", ")", "and", "dtype", "is", "None", ":", "nparray", "=", "nparray", ".", "astype", "(", "np", ".", "float32", ")", "# python/numpy default int type is int64. We prefer int32 instead.", "elif", "(", "nparray", ".", "dtype", "==", "np", ".", "int64", ")", "and", "dtype", "is", "None", ":", "downcasted_array", "=", "nparray", ".", "astype", "(", "np", ".", "int32", ")", "# Do not down cast if it leads to precision loss.", "if", "np", ".", "array_equal", "(", "downcasted_array", ",", "nparray", ")", ":", "nparray", "=", "downcasted_array", "# if dtype is provided, it must be convertible with what numpy", "# conversion says.", "numpy_dtype", "=", "dtypes", ".", "as_dtype", "(", "nparray", ".", "dtype", ")", "if", "numpy_dtype", "is", "None", ":", "raise", "TypeError", "(", "\"Unrecognized data type: %s\"", "%", "nparray", ".", "dtype", ")", "# If dtype was specified and is a quantized type, we convert", "# numpy_dtype back into the quantized version.", "if", "is_quantized", ":", "numpy_dtype", "=", "dtype", "if", "dtype", "is", "not", "None", "and", "(", "not", "hasattr", "(", "dtype", ",", "\"base_dtype\"", ")", "or", "dtype", ".", "base_dtype", "!=", "numpy_dtype", ".", "base_dtype", ")", ":", "raise", "TypeError", "(", "\"Inconvertible types: %s vs. %s. Value is %s\"", "%", "(", "dtype", ",", "nparray", ".", "dtype", ",", "values", ")", ")", "# If shape is not given, get the shape from the numpy array.", "if", "shape", "is", "None", ":", "shape", "=", "nparray", ".", "shape", "is_same_size", "=", "True", "shape_size", "=", "nparray", ".", "size", "else", ":", "shape", "=", "[", "int", "(", "dim", ")", "for", "dim", "in", "shape", "]", "shape_size", "=", "np", ".", "prod", "(", "shape", ",", "dtype", "=", "np", ".", "int64", ")", "is_same_size", "=", "shape_size", "==", "nparray", ".", "size", "if", "verify_shape", ":", "if", "not", "nparray", ".", "shape", "==", "tuple", "(", "shape", ")", ":", "raise", "TypeError", "(", "\"Expected Tensor's shape: %s, got %s.\"", "%", "(", "tuple", "(", "shape", ")", ",", "nparray", ".", "shape", ")", ")", "if", "nparray", ".", "size", ">", "shape_size", ":", "raise", "ValueError", "(", "\"Too many elements provided. Needed at most %d, but received %d\"", "%", "(", "shape_size", ",", "nparray", ".", "size", ")", ")", "tensor_proto", "=", "tensor_pb2", ".", "TensorProto", "(", "dtype", "=", "numpy_dtype", ".", "as_datatype_enum", ",", "tensor_shape", "=", "tensor_shape", ".", "as_shape", "(", "shape", ")", ".", "as_proto", "(", ")", ",", ")", "if", "is_same_size", "and", "numpy_dtype", "in", "_TENSOR_CONTENT_TYPES", "and", "shape_size", ">", "1", ":", "if", "nparray", ".", "size", "*", "nparray", ".", "itemsize", ">=", "(", "1", "<<", "31", ")", ":", "raise", "ValueError", "(", "\"Cannot create a tensor proto whose content is larger than 2GB.\"", ")", "tensor_proto", ".", "tensor_content", "=", "nparray", ".", "tostring", "(", ")", "return", "tensor_proto", "# If we were not given values as a numpy array, compute the proto_values", "# from the given values directly, to avoid numpy trimming nulls from the", "# strings. Since values could be a list of strings, or a multi-dimensional", "# list of lists that might or might not correspond to the given shape,", "# we flatten it conservatively.", "if", "numpy_dtype", "==", "dtypes", ".", "string", "and", "not", "isinstance", "(", "values", ",", "np", ".", "ndarray", ")", ":", "proto_values", "=", "_FlattenToStrings", "(", "values", ")", "# At this point, values may be a list of objects that we could not", "# identify a common type for (hence it was inferred as", "# np.object/dtypes.string). If we are unable to convert it to a", "# string, we raise a more helpful error message.", "#", "# Ideally, we'd be able to convert the elements of the list to a", "# common type, but this type inference requires some thinking and", "# so we defer it for now.", "try", ":", "str_values", "=", "[", "compat", ".", "as_bytes", "(", "x", ")", "for", "x", "in", "proto_values", "]", "except", "TypeError", ":", "raise", "TypeError", "(", "\"Failed to convert object of type %s to Tensor. \"", "\"Contents: %s. Consider casting elements to a \"", "\"supported type.\"", "%", "(", "type", "(", "values", ")", ",", "values", ")", ")", "tensor_proto", ".", "string_val", ".", "extend", "(", "str_values", ")", "return", "tensor_proto", "# TensorFlow expects C order (a.k.a., eigen row major).", "proto_values", "=", "nparray", ".", "ravel", "(", ")", "append_fn", "=", "GetNumpyAppendFn", "(", "proto_values", ".", "dtype", ")", "if", "append_fn", "is", "None", ":", "raise", "TypeError", "(", "\"Element type not supported in TensorProto: %s\"", "%", "numpy_dtype", ".", "name", ")", "append_fn", "(", "tensor_proto", ",", "proto_values", ")", "return", "tensor_proto" ]
Create a TensorProto. Args: values: Values to put in the TensorProto. dtype: Optional tensor_pb2 DataType value. shape: List of integers representing the dimensions of tensor. verify_shape: Boolean that enables verification of a shape of values. Returns: A `TensorProto`. Depending on the type, it may contain data in the "tensor_content" attribute, which is not directly useful to Python programs. To access the values you should convert the proto back to a numpy ndarray with `tensor_util.MakeNdarray(proto)`. If `values` is a `TensorProto`, it is immediately returned; `dtype` and `shape` are ignored. Raises: TypeError: if unsupported types are provided. ValueError: if arguments have inappropriate values or if verify_shape is True and shape of values is not equals to a shape from the argument. make_tensor_proto accepts "values" of a python scalar, a python list, a numpy ndarray, or a numpy scalar. If "values" is a python scalar or a python list, make_tensor_proto first convert it to numpy ndarray. If dtype is None, the conversion tries its best to infer the right numpy data type. Otherwise, the resulting numpy array has a convertible data type with the given dtype. In either case above, the numpy ndarray (either the caller provided or the auto converted) must have the convertible type with dtype. make_tensor_proto then converts the numpy array to a tensor proto. If "shape" is None, the resulting tensor proto represents the numpy array precisely. Otherwise, "shape" specifies the tensor's shape and the numpy array can not have more elements than what "shape" specifies.
[ "Create", "a", "TensorProto", "." ]
python
train
39.641791
singularityhub/singularity-cli
spython/main/execute.py
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/execute.py#L15-L92
def execute(self, image = None, command = None, app = None, writable = False, contain = False, bind = None, stream = False, nv = False, return_result=False): ''' execute: send a command to a container Parameters ========== image: full path to singularity image command: command to send to container app: if not None, execute a command in context of an app writable: This option makes the file system accessible as read/write contain: This option disables the automatic sharing of writable filesystems on your host bind: list or single string of bind paths. This option allows you to map directories on your host system to directories within your container using bind mounts nv: if True, load Nvidia Drivers in runtime (default False) return_result: if True, return entire json object with return code and message result (default is False) ''' from spython.utils import check_install check_install() cmd = self._init_command('exec') # nv option leverages any GPU cards if nv is True: cmd += ['--nv'] # If the image is given as a list, it's probably the command if isinstance(image, list): command = image image = None if command is not None: # No image provided, default to use the client's loaded image if image is None: image = self._get_uri() self.quiet = True # If an instance is provided, grab it's name if isinstance(image, self.instance): image = image.get_uri() # Does the user want to use bind paths option? if bind is not None: cmd += self._generate_bind_list(bind) # Does the user want to run an app? if app is not None: cmd = cmd + ['--app', app] sudo = False if writable is True: sudo = True if not isinstance(command, list): command = command.split(' ') cmd = cmd + [image] + command if stream is False: return self._run_command(cmd, sudo=sudo, return_result=return_result) return stream_command(cmd, sudo=sudo) bot.error('Please include a command (list) to execute.')
[ "def", "execute", "(", "self", ",", "image", "=", "None", ",", "command", "=", "None", ",", "app", "=", "None", ",", "writable", "=", "False", ",", "contain", "=", "False", ",", "bind", "=", "None", ",", "stream", "=", "False", ",", "nv", "=", "False", ",", "return_result", "=", "False", ")", ":", "from", "spython", ".", "utils", "import", "check_install", "check_install", "(", ")", "cmd", "=", "self", ".", "_init_command", "(", "'exec'", ")", "# nv option leverages any GPU cards", "if", "nv", "is", "True", ":", "cmd", "+=", "[", "'--nv'", "]", "# If the image is given as a list, it's probably the command", "if", "isinstance", "(", "image", ",", "list", ")", ":", "command", "=", "image", "image", "=", "None", "if", "command", "is", "not", "None", ":", "# No image provided, default to use the client's loaded image", "if", "image", "is", "None", ":", "image", "=", "self", ".", "_get_uri", "(", ")", "self", ".", "quiet", "=", "True", "# If an instance is provided, grab it's name", "if", "isinstance", "(", "image", ",", "self", ".", "instance", ")", ":", "image", "=", "image", ".", "get_uri", "(", ")", "# Does the user want to use bind paths option?", "if", "bind", "is", "not", "None", ":", "cmd", "+=", "self", ".", "_generate_bind_list", "(", "bind", ")", "# Does the user want to run an app?", "if", "app", "is", "not", "None", ":", "cmd", "=", "cmd", "+", "[", "'--app'", ",", "app", "]", "sudo", "=", "False", "if", "writable", "is", "True", ":", "sudo", "=", "True", "if", "not", "isinstance", "(", "command", ",", "list", ")", ":", "command", "=", "command", ".", "split", "(", "' '", ")", "cmd", "=", "cmd", "+", "[", "image", "]", "+", "command", "if", "stream", "is", "False", ":", "return", "self", ".", "_run_command", "(", "cmd", ",", "sudo", "=", "sudo", ",", "return_result", "=", "return_result", ")", "return", "stream_command", "(", "cmd", ",", "sudo", "=", "sudo", ")", "bot", ".", "error", "(", "'Please include a command (list) to execute.'", ")" ]
execute: send a command to a container Parameters ========== image: full path to singularity image command: command to send to container app: if not None, execute a command in context of an app writable: This option makes the file system accessible as read/write contain: This option disables the automatic sharing of writable filesystems on your host bind: list or single string of bind paths. This option allows you to map directories on your host system to directories within your container using bind mounts nv: if True, load Nvidia Drivers in runtime (default False) return_result: if True, return entire json object with return code and message result (default is False)
[ "execute", ":", "send", "a", "command", "to", "a", "container", "Parameters", "==========" ]
python
train
31.320513
novopl/peltak
src/peltak/commands/__init__.py
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/commands/__init__.py#L57-L92
def pretend_option(fn): # type: (FunctionType) -> FunctionType """ Decorator to add a --pretend option to any click command. The value won't be passed down to the command, but rather handled in the callback. The value will be accessible through `peltak.core.context` under 'pretend' if the command needs it. To get the current value you can do: >>> from peltak.commands import click, root_cli >>> from peltak.core import context >>> >>> @root_cli.command('my-command') >>> @pretend_option >>> def my_command(): ... pretend = context.get('pretend', False) This value will be accessible from anywhere in the code. """ def set_pretend(ctx, param, value): # pylint: disable=missing-docstring # type: (click.Context, str, Any) -> None from peltak.core import context from peltak.core import shell context.set('pretend', value or False) if value: shell.cprint('<90>{}', _pretend_msg()) return click.option( '--pretend', is_flag=True, help=("Do not actually do anything, just print shell commands that" "would be executed."), expose_value=False, callback=set_pretend )(fn)
[ "def", "pretend_option", "(", "fn", ")", ":", "# type: (FunctionType) -> FunctionType", "def", "set_pretend", "(", "ctx", ",", "param", ",", "value", ")", ":", "# pylint: disable=missing-docstring", "# type: (click.Context, str, Any) -> None", "from", "peltak", ".", "core", "import", "context", "from", "peltak", ".", "core", "import", "shell", "context", ".", "set", "(", "'pretend'", ",", "value", "or", "False", ")", "if", "value", ":", "shell", ".", "cprint", "(", "'<90>{}'", ",", "_pretend_msg", "(", ")", ")", "return", "click", ".", "option", "(", "'--pretend'", ",", "is_flag", "=", "True", ",", "help", "=", "(", "\"Do not actually do anything, just print shell commands that\"", "\"would be executed.\"", ")", ",", "expose_value", "=", "False", ",", "callback", "=", "set_pretend", ")", "(", "fn", ")" ]
Decorator to add a --pretend option to any click command. The value won't be passed down to the command, but rather handled in the callback. The value will be accessible through `peltak.core.context` under 'pretend' if the command needs it. To get the current value you can do: >>> from peltak.commands import click, root_cli >>> from peltak.core import context >>> >>> @root_cli.command('my-command') >>> @pretend_option >>> def my_command(): ... pretend = context.get('pretend', False) This value will be accessible from anywhere in the code.
[ "Decorator", "to", "add", "a", "--", "pretend", "option", "to", "any", "click", "command", "." ]
python
train
34.611111
nerdvegas/rez
src/rez/bind/_utils.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/bind/_utils.py#L45-L54
def check_version(version, range_=None): """Check that the found software version is within supplied range. Args: version: Version of the package as a Version object. range_: Allowable version range as a VersionRange object. """ if range_ and version not in range_: raise RezBindError("found version %s is not within range %s" % (str(version), str(range_)))
[ "def", "check_version", "(", "version", ",", "range_", "=", "None", ")", ":", "if", "range_", "and", "version", "not", "in", "range_", ":", "raise", "RezBindError", "(", "\"found version %s is not within range %s\"", "%", "(", "str", "(", "version", ")", ",", "str", "(", "range_", ")", ")", ")" ]
Check that the found software version is within supplied range. Args: version: Version of the package as a Version object. range_: Allowable version range as a VersionRange object.
[ "Check", "that", "the", "found", "software", "version", "is", "within", "supplied", "range", "." ]
python
train
41.6
dims/etcd3-gateway
etcd3gw/client.py
https://github.com/dims/etcd3-gateway/blob/ad566c29cbde135aee20cfd32e0a4815ca3b5ee6/etcd3gw/client.py#L165-L183
def put(self, key, value, lease=None): """Put puts the given key into the key-value store. A put request increments the revision of the key-value store and generates one event in the event history. :param key: :param value: :param lease: :return: boolean """ payload = { "key": _encode(key), "value": _encode(value) } if lease: payload['lease'] = lease.id self.post(self.get_url("/kv/put"), json=payload) return True
[ "def", "put", "(", "self", ",", "key", ",", "value", ",", "lease", "=", "None", ")", ":", "payload", "=", "{", "\"key\"", ":", "_encode", "(", "key", ")", ",", "\"value\"", ":", "_encode", "(", "value", ")", "}", "if", "lease", ":", "payload", "[", "'lease'", "]", "=", "lease", ".", "id", "self", ".", "post", "(", "self", ".", "get_url", "(", "\"/kv/put\"", ")", ",", "json", "=", "payload", ")", "return", "True" ]
Put puts the given key into the key-value store. A put request increments the revision of the key-value store and generates one event in the event history. :param key: :param value: :param lease: :return: boolean
[ "Put", "puts", "the", "given", "key", "into", "the", "key", "-", "value", "store", "." ]
python
train
28.421053
gwpy/gwpy
gwpy/timeseries/timeseries.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L1005-L1133
def filter(self, *filt, **kwargs): """Filter this `TimeSeries` with an IIR or FIR filter Parameters ---------- *filt : filter arguments 1, 2, 3, or 4 arguments defining the filter to be applied, - an ``Nx1`` `~numpy.ndarray` of FIR coefficients - an ``Nx6`` `~numpy.ndarray` of SOS coefficients - ``(numerator, denominator)`` polynomials - ``(zeros, poles, gain)`` - ``(A, B, C, D)`` 'state-space' representation filtfilt : `bool`, optional filter forward and backwards to preserve phase, default: `False` analog : `bool`, optional if `True`, filter coefficients will be converted from Hz to Z-domain digital representation, default: `False` inplace : `bool`, optional if `True`, this array will be overwritten with the filtered version, default: `False` **kwargs other keyword arguments are passed to the filter method Returns ------- result : `TimeSeries` the filtered version of the input `TimeSeries` Notes ----- IIR filters are converted either into cascading second-order sections (if `scipy >= 0.16` is installed), or into the ``(numerator, denominator)`` representation before being applied to this `TimeSeries`. .. note:: When using `scipy < 0.16` some higher-order filters may be unstable. With `scipy >= 0.16` higher-order filters are decomposed into second-order-sections, and so are much more stable. FIR filters are passed directly to :func:`scipy.signal.lfilter` or :func:`scipy.signal.filtfilt` without any conversions. See also -------- scipy.signal.sosfilt for details on filtering with second-order sections (`scipy >= 0.16` only) scipy.signal.sosfiltfilt for details on forward-backward filtering with second-order sections (`scipy >= 0.18` only) scipy.signal.lfilter for details on filtering (without SOS) scipy.signal.filtfilt for details on forward-backward filtering (without SOS) Raises ------ ValueError if ``filt`` arguments cannot be interpreted properly Examples -------- We can design an arbitrarily complicated filter using :mod:`gwpy.signal.filter_design` >>> from gwpy.signal import filter_design >>> bp = filter_design.bandpass(50, 250, 4096.) >>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)] >>> zpk = filter_design.concatenate_zpks(bp, *notches) And then can download some data from LOSC to apply it using `TimeSeries.filter`: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> filtered = data.filter(zpk, filtfilt=True) We can plot the original signal, and the filtered version, cutting off either end of the filtered data to remove filter-edge artefacts >>> from gwpy.plot import Plot >>> plot = Plot(data, filtered[128:-128], separate=True) >>> plot.show() """ # parse keyword arguments filtfilt = kwargs.pop('filtfilt', False) # parse filter form, filt = filter_design.parse_filter( filt, analog=kwargs.pop('analog', False), sample_rate=self.sample_rate.to('Hz').value, ) if form == 'zpk': try: sos = signal.zpk2sos(*filt) except AttributeError: # scipy < 0.16, no SOS filtering sos = None b, a = signal.zpk2tf(*filt) else: sos = None b, a = filt # perform filter kwargs.setdefault('axis', 0) if sos is not None and filtfilt: out = signal.sosfiltfilt(sos, self, **kwargs) elif sos is not None: out = signal.sosfilt(sos, self, **kwargs) elif filtfilt: out = signal.filtfilt(b, a, self, **kwargs) else: out = signal.lfilter(b, a, self, **kwargs) # format as type(self) new = out.view(type(self)) new.__metadata_finalize__(self) new._unit = self.unit return new
[ "def", "filter", "(", "self", ",", "*", "filt", ",", "*", "*", "kwargs", ")", ":", "# parse keyword arguments", "filtfilt", "=", "kwargs", ".", "pop", "(", "'filtfilt'", ",", "False", ")", "# parse filter", "form", ",", "filt", "=", "filter_design", ".", "parse_filter", "(", "filt", ",", "analog", "=", "kwargs", ".", "pop", "(", "'analog'", ",", "False", ")", ",", "sample_rate", "=", "self", ".", "sample_rate", ".", "to", "(", "'Hz'", ")", ".", "value", ",", ")", "if", "form", "==", "'zpk'", ":", "try", ":", "sos", "=", "signal", ".", "zpk2sos", "(", "*", "filt", ")", "except", "AttributeError", ":", "# scipy < 0.16, no SOS filtering", "sos", "=", "None", "b", ",", "a", "=", "signal", ".", "zpk2tf", "(", "*", "filt", ")", "else", ":", "sos", "=", "None", "b", ",", "a", "=", "filt", "# perform filter", "kwargs", ".", "setdefault", "(", "'axis'", ",", "0", ")", "if", "sos", "is", "not", "None", "and", "filtfilt", ":", "out", "=", "signal", ".", "sosfiltfilt", "(", "sos", ",", "self", ",", "*", "*", "kwargs", ")", "elif", "sos", "is", "not", "None", ":", "out", "=", "signal", ".", "sosfilt", "(", "sos", ",", "self", ",", "*", "*", "kwargs", ")", "elif", "filtfilt", ":", "out", "=", "signal", ".", "filtfilt", "(", "b", ",", "a", ",", "self", ",", "*", "*", "kwargs", ")", "else", ":", "out", "=", "signal", ".", "lfilter", "(", "b", ",", "a", ",", "self", ",", "*", "*", "kwargs", ")", "# format as type(self)", "new", "=", "out", ".", "view", "(", "type", "(", "self", ")", ")", "new", ".", "__metadata_finalize__", "(", "self", ")", "new", ".", "_unit", "=", "self", ".", "unit", "return", "new" ]
Filter this `TimeSeries` with an IIR or FIR filter Parameters ---------- *filt : filter arguments 1, 2, 3, or 4 arguments defining the filter to be applied, - an ``Nx1`` `~numpy.ndarray` of FIR coefficients - an ``Nx6`` `~numpy.ndarray` of SOS coefficients - ``(numerator, denominator)`` polynomials - ``(zeros, poles, gain)`` - ``(A, B, C, D)`` 'state-space' representation filtfilt : `bool`, optional filter forward and backwards to preserve phase, default: `False` analog : `bool`, optional if `True`, filter coefficients will be converted from Hz to Z-domain digital representation, default: `False` inplace : `bool`, optional if `True`, this array will be overwritten with the filtered version, default: `False` **kwargs other keyword arguments are passed to the filter method Returns ------- result : `TimeSeries` the filtered version of the input `TimeSeries` Notes ----- IIR filters are converted either into cascading second-order sections (if `scipy >= 0.16` is installed), or into the ``(numerator, denominator)`` representation before being applied to this `TimeSeries`. .. note:: When using `scipy < 0.16` some higher-order filters may be unstable. With `scipy >= 0.16` higher-order filters are decomposed into second-order-sections, and so are much more stable. FIR filters are passed directly to :func:`scipy.signal.lfilter` or :func:`scipy.signal.filtfilt` without any conversions. See also -------- scipy.signal.sosfilt for details on filtering with second-order sections (`scipy >= 0.16` only) scipy.signal.sosfiltfilt for details on forward-backward filtering with second-order sections (`scipy >= 0.18` only) scipy.signal.lfilter for details on filtering (without SOS) scipy.signal.filtfilt for details on forward-backward filtering (without SOS) Raises ------ ValueError if ``filt`` arguments cannot be interpreted properly Examples -------- We can design an arbitrarily complicated filter using :mod:`gwpy.signal.filter_design` >>> from gwpy.signal import filter_design >>> bp = filter_design.bandpass(50, 250, 4096.) >>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)] >>> zpk = filter_design.concatenate_zpks(bp, *notches) And then can download some data from LOSC to apply it using `TimeSeries.filter`: >>> from gwpy.timeseries import TimeSeries >>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) >>> filtered = data.filter(zpk, filtfilt=True) We can plot the original signal, and the filtered version, cutting off either end of the filtered data to remove filter-edge artefacts >>> from gwpy.plot import Plot >>> plot = Plot(data, filtered[128:-128], separate=True) >>> plot.show()
[ "Filter", "this", "TimeSeries", "with", "an", "IIR", "or", "FIR", "filter" ]
python
train
34.031008
wummel/linkchecker
third_party/dnspython/dns/zone.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/zone.py#L161-L179
def find_node(self, name, create=False): """Find a node in the zone, possibly creating it. @param name: the name of the node to find @type name: dns.name.Name object or string @param create: should the node be created if it doesn't exist? @type create: bool @raises KeyError: the name is not known and create was not specified. @rtype: dns.node.Node object """ name = self._validate_name(name) node = self.nodes.get(name) if node is None: if not create: raise KeyError node = self.node_factory() self.nodes[name] = node return node
[ "def", "find_node", "(", "self", ",", "name", ",", "create", "=", "False", ")", ":", "name", "=", "self", ".", "_validate_name", "(", "name", ")", "node", "=", "self", ".", "nodes", ".", "get", "(", "name", ")", "if", "node", "is", "None", ":", "if", "not", "create", ":", "raise", "KeyError", "node", "=", "self", ".", "node_factory", "(", ")", "self", ".", "nodes", "[", "name", "]", "=", "node", "return", "node" ]
Find a node in the zone, possibly creating it. @param name: the name of the node to find @type name: dns.name.Name object or string @param create: should the node be created if it doesn't exist? @type create: bool @raises KeyError: the name is not known and create was not specified. @rtype: dns.node.Node object
[ "Find", "a", "node", "in", "the", "zone", "possibly", "creating", "it", "." ]
python
train
34.894737
aliyun/aliyun-odps-python-sdk
odps/df/expr/collections.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/collections.py#L435-L540
def apply(expr, func, axis=0, names=None, types=None, reduce=False, resources=None, keep_nulls=False, args=(), **kwargs): """ Apply a function to a row when axis=1 or column when axis=0. :param expr: :param func: function to apply :param axis: row when axis=1 else column :param names: output names :param types: output types :param reduce: if True will return a sequence else return a collection :param resources: resources to read :param keep_nulls: if True, keep rows producing empty results, only work in lateral views :param args: args for function :param kwargs: kwargs for function :return: :Example: Apply a function to a row: >>> from odps.df import output >>> >>> @output(['iris_add', 'iris_sub'], ['float', 'float']) >>> def handle(row): >>> yield row.sepallength - row.sepalwidth, row.sepallength + row.sepalwidth >>> yield row.petallength - row.petalwidth, row.petallength + row.petalwidth >>> >>> iris.apply(handle, axis=1).count() Apply a function to a column: >>> class Agg(object): >>> >>> def buffer(self): >>> return [0.0, 0] >>> >>> def __call__(self, buffer, val): >>> buffer[0] += val >>> buffer[1] += 1 >>> >>> def merge(self, buffer, pbuffer): >>> buffer[0] += pbuffer[0] >>> buffer[1] += pbuffer[1] >>> >>> def getvalue(self, buffer): >>> if buffer[1] == 0: >>> return 0.0 >>> return buffer[0] / buffer[1] >>> >>> iris.exclude('name').apply(Agg) """ if not isinstance(expr, CollectionExpr): return if isinstance(func, FunctionWrapper): names = names or func.output_names types = types or func.output_types func = func._func if axis == 0: types = types or expr.schema.types types = [validate_data_type(t) for t in types] fields = [expr[n].agg(func, rtype=t, resources=resources) for n, t in zip(expr.schema.names, types)] if names: fields = [f.rename(n) for f, n in zip(fields, names)] else: names = [f.name for f in fields] return Summary(_input=expr, _fields=fields, _schema=Schema.from_lists(names, types)) else: collection_resources = utils.get_collection_resources(resources) if types is not None: if isinstance(types, list): types = tuple(types) elif isinstance(types, six.string_types): types = (types,) types = tuple(validate_data_type(t) for t in types) if reduce: from .element import MappedExpr from ..backends.context import context if names is not None and len(names) > 1: raise ValueError('When reduce, at most one name can be specified') name = names[0] if names is not None else None if not types and kwargs.get('rtype', None) is not None: types = [kwargs.pop('rtype')] tp = types[0] if types is not None else (utils.get_annotation_rtype(func) or string) if not context.is_cached(expr) and (hasattr(expr, '_fields') and expr._fields is not None): inputs = [e.copy_tree(stop_cond=lambda x: any(i is expr.input for i in x.children())) for e in expr._fields] else: inputs = [expr[n] for n in expr.schema.names] return MappedExpr(_func=func, _func_args=args, _func_kwargs=kwargs, _name=name, _data_type=tp, _inputs=inputs, _multiple=True, _resources=resources, _collection_resources=collection_resources) else: return _apply_horizontal(expr, func, names=names, types=types, resources=resources, collection_resources=collection_resources, keep_nulls=keep_nulls, args=args, **kwargs)
[ "def", "apply", "(", "expr", ",", "func", ",", "axis", "=", "0", ",", "names", "=", "None", ",", "types", "=", "None", ",", "reduce", "=", "False", ",", "resources", "=", "None", ",", "keep_nulls", "=", "False", ",", "args", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "expr", ",", "CollectionExpr", ")", ":", "return", "if", "isinstance", "(", "func", ",", "FunctionWrapper", ")", ":", "names", "=", "names", "or", "func", ".", "output_names", "types", "=", "types", "or", "func", ".", "output_types", "func", "=", "func", ".", "_func", "if", "axis", "==", "0", ":", "types", "=", "types", "or", "expr", ".", "schema", ".", "types", "types", "=", "[", "validate_data_type", "(", "t", ")", "for", "t", "in", "types", "]", "fields", "=", "[", "expr", "[", "n", "]", ".", "agg", "(", "func", ",", "rtype", "=", "t", ",", "resources", "=", "resources", ")", "for", "n", ",", "t", "in", "zip", "(", "expr", ".", "schema", ".", "names", ",", "types", ")", "]", "if", "names", ":", "fields", "=", "[", "f", ".", "rename", "(", "n", ")", "for", "f", ",", "n", "in", "zip", "(", "fields", ",", "names", ")", "]", "else", ":", "names", "=", "[", "f", ".", "name", "for", "f", "in", "fields", "]", "return", "Summary", "(", "_input", "=", "expr", ",", "_fields", "=", "fields", ",", "_schema", "=", "Schema", ".", "from_lists", "(", "names", ",", "types", ")", ")", "else", ":", "collection_resources", "=", "utils", ".", "get_collection_resources", "(", "resources", ")", "if", "types", "is", "not", "None", ":", "if", "isinstance", "(", "types", ",", "list", ")", ":", "types", "=", "tuple", "(", "types", ")", "elif", "isinstance", "(", "types", ",", "six", ".", "string_types", ")", ":", "types", "=", "(", "types", ",", ")", "types", "=", "tuple", "(", "validate_data_type", "(", "t", ")", "for", "t", "in", "types", ")", "if", "reduce", ":", "from", ".", "element", "import", "MappedExpr", "from", ".", ".", "backends", ".", "context", "import", "context", "if", "names", "is", "not", "None", "and", "len", "(", "names", ")", ">", "1", ":", "raise", "ValueError", "(", "'When reduce, at most one name can be specified'", ")", "name", "=", "names", "[", "0", "]", "if", "names", "is", "not", "None", "else", "None", "if", "not", "types", "and", "kwargs", ".", "get", "(", "'rtype'", ",", "None", ")", "is", "not", "None", ":", "types", "=", "[", "kwargs", ".", "pop", "(", "'rtype'", ")", "]", "tp", "=", "types", "[", "0", "]", "if", "types", "is", "not", "None", "else", "(", "utils", ".", "get_annotation_rtype", "(", "func", ")", "or", "string", ")", "if", "not", "context", ".", "is_cached", "(", "expr", ")", "and", "(", "hasattr", "(", "expr", ",", "'_fields'", ")", "and", "expr", ".", "_fields", "is", "not", "None", ")", ":", "inputs", "=", "[", "e", ".", "copy_tree", "(", "stop_cond", "=", "lambda", "x", ":", "any", "(", "i", "is", "expr", ".", "input", "for", "i", "in", "x", ".", "children", "(", ")", ")", ")", "for", "e", "in", "expr", ".", "_fields", "]", "else", ":", "inputs", "=", "[", "expr", "[", "n", "]", "for", "n", "in", "expr", ".", "schema", ".", "names", "]", "return", "MappedExpr", "(", "_func", "=", "func", ",", "_func_args", "=", "args", ",", "_func_kwargs", "=", "kwargs", ",", "_name", "=", "name", ",", "_data_type", "=", "tp", ",", "_inputs", "=", "inputs", ",", "_multiple", "=", "True", ",", "_resources", "=", "resources", ",", "_collection_resources", "=", "collection_resources", ")", "else", ":", "return", "_apply_horizontal", "(", "expr", ",", "func", ",", "names", "=", "names", ",", "types", "=", "types", ",", "resources", "=", "resources", ",", "collection_resources", "=", "collection_resources", ",", "keep_nulls", "=", "keep_nulls", ",", "args", "=", "args", ",", "*", "*", "kwargs", ")" ]
Apply a function to a row when axis=1 or column when axis=0. :param expr: :param func: function to apply :param axis: row when axis=1 else column :param names: output names :param types: output types :param reduce: if True will return a sequence else return a collection :param resources: resources to read :param keep_nulls: if True, keep rows producing empty results, only work in lateral views :param args: args for function :param kwargs: kwargs for function :return: :Example: Apply a function to a row: >>> from odps.df import output >>> >>> @output(['iris_add', 'iris_sub'], ['float', 'float']) >>> def handle(row): >>> yield row.sepallength - row.sepalwidth, row.sepallength + row.sepalwidth >>> yield row.petallength - row.petalwidth, row.petallength + row.petalwidth >>> >>> iris.apply(handle, axis=1).count() Apply a function to a column: >>> class Agg(object): >>> >>> def buffer(self): >>> return [0.0, 0] >>> >>> def __call__(self, buffer, val): >>> buffer[0] += val >>> buffer[1] += 1 >>> >>> def merge(self, buffer, pbuffer): >>> buffer[0] += pbuffer[0] >>> buffer[1] += pbuffer[1] >>> >>> def getvalue(self, buffer): >>> if buffer[1] == 0: >>> return 0.0 >>> return buffer[0] / buffer[1] >>> >>> iris.exclude('name').apply(Agg)
[ "Apply", "a", "function", "to", "a", "row", "when", "axis", "=", "1", "or", "column", "when", "axis", "=", "0", "." ]
python
train
37.886792
Accelize/pycosio
pycosio/_core/functions_os_path.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/_core/functions_os_path.py#L108-L124
def isdir(path): """ Return True if path is an existing directory. Equivalent to "os.path.isdir". Args: path (path-like object): Path or URL. Returns: bool: True if directory exists. """ system = get_instance(path) # User may use directory path without trailing '/' # like on standard file systems return system.isdir(system.ensure_dir_path(path))
[ "def", "isdir", "(", "path", ")", ":", "system", "=", "get_instance", "(", "path", ")", "# User may use directory path without trailing '/'", "# like on standard file systems", "return", "system", ".", "isdir", "(", "system", ".", "ensure_dir_path", "(", "path", ")", ")" ]
Return True if path is an existing directory. Equivalent to "os.path.isdir". Args: path (path-like object): Path or URL. Returns: bool: True if directory exists.
[ "Return", "True", "if", "path", "is", "an", "existing", "directory", "." ]
python
train
23
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/config.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/config.py#L148-L154
def from_args(self, **kwargs): """Read config values from `kwargs`.""" for k, v in iitems(kwargs): if v is not None: if k in self.MUST_BE_LIST and isinstance(v, string_class): v = [v] setattr(self, k, v)
[ "def", "from_args", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "k", ",", "v", "in", "iitems", "(", "kwargs", ")", ":", "if", "v", "is", "not", "None", ":", "if", "k", "in", "self", ".", "MUST_BE_LIST", "and", "isinstance", "(", "v", ",", "string_class", ")", ":", "v", "=", "[", "v", "]", "setattr", "(", "self", ",", "k", ",", "v", ")" ]
Read config values from `kwargs`.
[ "Read", "config", "values", "from", "kwargs", "." ]
python
test
39.571429
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/session.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/session.py#L22-L131
def get_session(user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): # type: (Optional[str], Optional[str], Optional[str], Any) -> requests.Session """Set up and return Session object that is set up with retrying. Requires either global user agent to be set or appropriate user agent parameter(s) to be completed. Args: user_agent (Optional[str]): User agent string. HDXPythonUtilities/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. **kwargs: See below auth (Tuple[str, str]): Authorisation information in tuple form (user, pass) OR basic_auth (str): Authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx) OR basic_auth_file (str): Path to file containing authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx) extra_params_dict (Dict): Extra parameters to put on end of url as a dictionary OR extra_params_json (str): Path to JSON file containing extra parameters to put on end of url OR extra_params_yaml (str): Path to YAML file containing extra parameters to put on end of url extra_params_lookup (str): Lookup key for parameters. If not given assumes parameters are at root of the dict. status_forcelist (iterable): HTTP statuses for which to force retry. Defaults to [429, 500, 502, 503, 504]. method_whitelist (iterable): HTTP methods for which to force retry. Defaults t0 frozenset(['GET']). """ s = requests.Session() ua = kwargs.get('full_agent') if not ua: ua = UserAgent.get(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs) s.headers['User-Agent'] = ua extra_params = os.getenv('EXTRA_PARAMS') if extra_params is not None: extra_params_dict = dict() if '=' in extra_params: logger.info('Loading extra parameters from environment variable') for extra_param in extra_params.split(','): key, value = extra_param.split('=') extra_params_dict[key] = value else: extra_params_found = False extra_params_dict = kwargs.get('extra_params_dict') if extra_params_dict: extra_params_found = True logger.info('Loading extra parameters from dictionary') extra_params_json = kwargs.get('extra_params_json', '') if extra_params_json: if extra_params_found: raise SessionError('More than one set of extra parameters given!') extra_params_found = True logger.info('Loading extra parameters from: %s' % extra_params_json) extra_params_dict = load_json(extra_params_json) extra_params_yaml = kwargs.get('extra_params_yaml', '') if extra_params_found: if extra_params_yaml: raise SessionError('More than one set of extra parameters given!') else: if extra_params_yaml: logger.info('Loading extra parameters from: %s' % extra_params_yaml) extra_params_dict = load_yaml(extra_params_yaml) else: extra_params_dict = dict() extra_params_lookup = kwargs.get('extra_params_lookup') if extra_params_lookup: extra_params_dict = extra_params_dict.get(extra_params_lookup) if extra_params_dict is None: raise SessionError('%s does not exist in extra_params!' % extra_params_lookup) auth_found = False basic_auth = os.getenv('BASIC_AUTH') if basic_auth: logger.info('Loading authorisation from basic_auth environment variable') auth_found = True else: basic_auth = kwargs.get('basic_auth') if basic_auth: logger.info('Loading authorisation from basic_auth argument') auth_found = True bauth = extra_params_dict.get('basic_auth') if bauth: if not auth_found: basic_auth = bauth logger.info('Loading authorisation from basic_auth parameter') auth_found = True del extra_params_dict['basic_auth'] s.params = extra_params_dict auth = kwargs.get('auth') if auth: if auth_found: raise SessionError('More than one authorisation given!') logger.info('Loading authorisation from auth argument') auth_found = True basic_auth_file = kwargs.get('basic_auth_file') if basic_auth_file: if auth_found: raise SessionError('More than one authorisation given!') logger.info('Loading authorisation from: %s' % basic_auth_file) basic_auth = load_file_to_str(basic_auth_file) if basic_auth: auth = decode(basic_auth) s.auth = auth status_forcelist = kwargs.get('status_forcelist', [429, 500, 502, 503, 504]) method_whitelist = kwargs.get('method_whitelist', frozenset(['HEAD', 'TRACE', 'GET', 'PUT', 'OPTIONS', 'DELETE'])) retries = Retry(total=5, backoff_factor=0.4, status_forcelist=status_forcelist, method_whitelist=method_whitelist, raise_on_redirect=True, raise_on_status=True) s.mount('http://', HTTPAdapter(max_retries=retries, pool_connections=100, pool_maxsize=100)) s.mount('https://', HTTPAdapter(max_retries=retries, pool_connections=100, pool_maxsize=100)) return s
[ "def", "get_session", "(", "user_agent", "=", "None", ",", "user_agent_config_yaml", "=", "None", ",", "user_agent_lookup", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# type: (Optional[str], Optional[str], Optional[str], Any) -> requests.Session", "s", "=", "requests", ".", "Session", "(", ")", "ua", "=", "kwargs", ".", "get", "(", "'full_agent'", ")", "if", "not", "ua", ":", "ua", "=", "UserAgent", ".", "get", "(", "user_agent", ",", "user_agent_config_yaml", ",", "user_agent_lookup", ",", "*", "*", "kwargs", ")", "s", ".", "headers", "[", "'User-Agent'", "]", "=", "ua", "extra_params", "=", "os", ".", "getenv", "(", "'EXTRA_PARAMS'", ")", "if", "extra_params", "is", "not", "None", ":", "extra_params_dict", "=", "dict", "(", ")", "if", "'='", "in", "extra_params", ":", "logger", ".", "info", "(", "'Loading extra parameters from environment variable'", ")", "for", "extra_param", "in", "extra_params", ".", "split", "(", "','", ")", ":", "key", ",", "value", "=", "extra_param", ".", "split", "(", "'='", ")", "extra_params_dict", "[", "key", "]", "=", "value", "else", ":", "extra_params_found", "=", "False", "extra_params_dict", "=", "kwargs", ".", "get", "(", "'extra_params_dict'", ")", "if", "extra_params_dict", ":", "extra_params_found", "=", "True", "logger", ".", "info", "(", "'Loading extra parameters from dictionary'", ")", "extra_params_json", "=", "kwargs", ".", "get", "(", "'extra_params_json'", ",", "''", ")", "if", "extra_params_json", ":", "if", "extra_params_found", ":", "raise", "SessionError", "(", "'More than one set of extra parameters given!'", ")", "extra_params_found", "=", "True", "logger", ".", "info", "(", "'Loading extra parameters from: %s'", "%", "extra_params_json", ")", "extra_params_dict", "=", "load_json", "(", "extra_params_json", ")", "extra_params_yaml", "=", "kwargs", ".", "get", "(", "'extra_params_yaml'", ",", "''", ")", "if", "extra_params_found", ":", "if", "extra_params_yaml", ":", "raise", "SessionError", "(", "'More than one set of extra parameters given!'", ")", "else", ":", "if", "extra_params_yaml", ":", "logger", ".", "info", "(", "'Loading extra parameters from: %s'", "%", "extra_params_yaml", ")", "extra_params_dict", "=", "load_yaml", "(", "extra_params_yaml", ")", "else", ":", "extra_params_dict", "=", "dict", "(", ")", "extra_params_lookup", "=", "kwargs", ".", "get", "(", "'extra_params_lookup'", ")", "if", "extra_params_lookup", ":", "extra_params_dict", "=", "extra_params_dict", ".", "get", "(", "extra_params_lookup", ")", "if", "extra_params_dict", "is", "None", ":", "raise", "SessionError", "(", "'%s does not exist in extra_params!'", "%", "extra_params_lookup", ")", "auth_found", "=", "False", "basic_auth", "=", "os", ".", "getenv", "(", "'BASIC_AUTH'", ")", "if", "basic_auth", ":", "logger", ".", "info", "(", "'Loading authorisation from basic_auth environment variable'", ")", "auth_found", "=", "True", "else", ":", "basic_auth", "=", "kwargs", ".", "get", "(", "'basic_auth'", ")", "if", "basic_auth", ":", "logger", ".", "info", "(", "'Loading authorisation from basic_auth argument'", ")", "auth_found", "=", "True", "bauth", "=", "extra_params_dict", ".", "get", "(", "'basic_auth'", ")", "if", "bauth", ":", "if", "not", "auth_found", ":", "basic_auth", "=", "bauth", "logger", ".", "info", "(", "'Loading authorisation from basic_auth parameter'", ")", "auth_found", "=", "True", "del", "extra_params_dict", "[", "'basic_auth'", "]", "s", ".", "params", "=", "extra_params_dict", "auth", "=", "kwargs", ".", "get", "(", "'auth'", ")", "if", "auth", ":", "if", "auth_found", ":", "raise", "SessionError", "(", "'More than one authorisation given!'", ")", "logger", ".", "info", "(", "'Loading authorisation from auth argument'", ")", "auth_found", "=", "True", "basic_auth_file", "=", "kwargs", ".", "get", "(", "'basic_auth_file'", ")", "if", "basic_auth_file", ":", "if", "auth_found", ":", "raise", "SessionError", "(", "'More than one authorisation given!'", ")", "logger", ".", "info", "(", "'Loading authorisation from: %s'", "%", "basic_auth_file", ")", "basic_auth", "=", "load_file_to_str", "(", "basic_auth_file", ")", "if", "basic_auth", ":", "auth", "=", "decode", "(", "basic_auth", ")", "s", ".", "auth", "=", "auth", "status_forcelist", "=", "kwargs", ".", "get", "(", "'status_forcelist'", ",", "[", "429", ",", "500", ",", "502", ",", "503", ",", "504", "]", ")", "method_whitelist", "=", "kwargs", ".", "get", "(", "'method_whitelist'", ",", "frozenset", "(", "[", "'HEAD'", ",", "'TRACE'", ",", "'GET'", ",", "'PUT'", ",", "'OPTIONS'", ",", "'DELETE'", "]", ")", ")", "retries", "=", "Retry", "(", "total", "=", "5", ",", "backoff_factor", "=", "0.4", ",", "status_forcelist", "=", "status_forcelist", ",", "method_whitelist", "=", "method_whitelist", ",", "raise_on_redirect", "=", "True", ",", "raise_on_status", "=", "True", ")", "s", ".", "mount", "(", "'http://'", ",", "HTTPAdapter", "(", "max_retries", "=", "retries", ",", "pool_connections", "=", "100", ",", "pool_maxsize", "=", "100", ")", ")", "s", ".", "mount", "(", "'https://'", ",", "HTTPAdapter", "(", "max_retries", "=", "retries", ",", "pool_connections", "=", "100", ",", "pool_maxsize", "=", "100", ")", ")", "return", "s" ]
Set up and return Session object that is set up with retrying. Requires either global user agent to be set or appropriate user agent parameter(s) to be completed. Args: user_agent (Optional[str]): User agent string. HDXPythonUtilities/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. **kwargs: See below auth (Tuple[str, str]): Authorisation information in tuple form (user, pass) OR basic_auth (str): Authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx) OR basic_auth_file (str): Path to file containing authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx) extra_params_dict (Dict): Extra parameters to put on end of url as a dictionary OR extra_params_json (str): Path to JSON file containing extra parameters to put on end of url OR extra_params_yaml (str): Path to YAML file containing extra parameters to put on end of url extra_params_lookup (str): Lookup key for parameters. If not given assumes parameters are at root of the dict. status_forcelist (iterable): HTTP statuses for which to force retry. Defaults to [429, 500, 502, 503, 504]. method_whitelist (iterable): HTTP methods for which to force retry. Defaults t0 frozenset(['GET']).
[ "Set", "up", "and", "return", "Session", "object", "that", "is", "set", "up", "with", "retrying", ".", "Requires", "either", "global", "user", "agent", "to", "be", "set", "or", "appropriate", "user", "agent", "parameter", "(", "s", ")", "to", "be", "completed", "." ]
python
train
49.745455
saltstack/salt
salt/returners/couchbase_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/couchbase_return.py#L211-L235
def save_load(jid, clear_load, minion=None): ''' Save the load to the specified jid ''' cb_ = _get_connection() try: jid_doc = cb_.get(six.text_type(jid)) except couchbase.exceptions.NotFoundError: cb_.add(six.text_type(jid), {}, ttl=_get_ttl()) jid_doc = cb_.get(six.text_type(jid)) jid_doc.value['load'] = clear_load cb_.replace(six.text_type(jid), jid_doc.value, cas=jid_doc.cas, ttl=_get_ttl()) # if you have a tgt, save that for the UI etc if 'tgt' in clear_load and clear_load['tgt'] != '': ckminions = salt.utils.minions.CkMinions(__opts__) # Retrieve the minions list _res = ckminions.check_minions( clear_load['tgt'], clear_load.get('tgt_type', 'glob') ) minions = _res['minions'] save_minions(jid, minions)
[ "def", "save_load", "(", "jid", ",", "clear_load", ",", "minion", "=", "None", ")", ":", "cb_", "=", "_get_connection", "(", ")", "try", ":", "jid_doc", "=", "cb_", ".", "get", "(", "six", ".", "text_type", "(", "jid", ")", ")", "except", "couchbase", ".", "exceptions", ".", "NotFoundError", ":", "cb_", ".", "add", "(", "six", ".", "text_type", "(", "jid", ")", ",", "{", "}", ",", "ttl", "=", "_get_ttl", "(", ")", ")", "jid_doc", "=", "cb_", ".", "get", "(", "six", ".", "text_type", "(", "jid", ")", ")", "jid_doc", ".", "value", "[", "'load'", "]", "=", "clear_load", "cb_", ".", "replace", "(", "six", ".", "text_type", "(", "jid", ")", ",", "jid_doc", ".", "value", ",", "cas", "=", "jid_doc", ".", "cas", ",", "ttl", "=", "_get_ttl", "(", ")", ")", "# if you have a tgt, save that for the UI etc", "if", "'tgt'", "in", "clear_load", "and", "clear_load", "[", "'tgt'", "]", "!=", "''", ":", "ckminions", "=", "salt", ".", "utils", ".", "minions", ".", "CkMinions", "(", "__opts__", ")", "# Retrieve the minions list", "_res", "=", "ckminions", ".", "check_minions", "(", "clear_load", "[", "'tgt'", "]", ",", "clear_load", ".", "get", "(", "'tgt_type'", ",", "'glob'", ")", ")", "minions", "=", "_res", "[", "'minions'", "]", "save_minions", "(", "jid", ",", "minions", ")" ]
Save the load to the specified jid
[ "Save", "the", "load", "to", "the", "specified", "jid" ]
python
train
33.4
materialsproject/pymatgen
pymatgen/analysis/wulff.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/wulff.py#L255-L269
def _get_cross_pt_dual_simp(self, dual_simp): """ |normal| = 1, e_surf is plane's distance to (0, 0, 0), plane function: normal[0]x + normal[1]y + normal[2]z = e_surf from self: normal_e_m to get the plane functions dual_simp: (i, j, k) simplices from the dual convex hull i, j, k: plane index(same order in normal_e_m) """ matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)] matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)] cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e) return cross_pt
[ "def", "_get_cross_pt_dual_simp", "(", "self", ",", "dual_simp", ")", ":", "matrix_surfs", "=", "[", "self", ".", "facets", "[", "dual_simp", "[", "i", "]", "]", ".", "normal", "for", "i", "in", "range", "(", "3", ")", "]", "matrix_e", "=", "[", "self", ".", "facets", "[", "dual_simp", "[", "i", "]", "]", ".", "e_surf", "for", "i", "in", "range", "(", "3", ")", "]", "cross_pt", "=", "sp", ".", "dot", "(", "sp", ".", "linalg", ".", "inv", "(", "matrix_surfs", ")", ",", "matrix_e", ")", "return", "cross_pt" ]
|normal| = 1, e_surf is plane's distance to (0, 0, 0), plane function: normal[0]x + normal[1]y + normal[2]z = e_surf from self: normal_e_m to get the plane functions dual_simp: (i, j, k) simplices from the dual convex hull i, j, k: plane index(same order in normal_e_m)
[ "|normal|", "=", "1", "e_surf", "is", "plane", "s", "distance", "to", "(", "0", "0", "0", ")", "plane", "function", ":", "normal", "[", "0", "]", "x", "+", "normal", "[", "1", "]", "y", "+", "normal", "[", "2", "]", "z", "=", "e_surf" ]
python
train
42.6
ArabellaTech/django-basic-cms
basic_cms/templatetags/pages_tags.py
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/templatetags/pages_tags.py#L460-L465
def do_videoplaceholder(parser, token): """ Method that parse the imageplaceholder template tag. """ name, params = parse_placeholder(parser, token) return VideoPlaceholderNode(name, **params)
[ "def", "do_videoplaceholder", "(", "parser", ",", "token", ")", ":", "name", ",", "params", "=", "parse_placeholder", "(", "parser", ",", "token", ")", "return", "VideoPlaceholderNode", "(", "name", ",", "*", "*", "params", ")" ]
Method that parse the imageplaceholder template tag.
[ "Method", "that", "parse", "the", "imageplaceholder", "template", "tag", "." ]
python
train
34.5
aaugustin/websockets
src/websockets/framing.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/framing.py#L243-L265
def check(frame) -> None: """ Check that this frame contains acceptable values. Raise :exc:`~websockets.exceptions.WebSocketProtocolError` if this frame contains incorrect values. """ # The first parameter is called `frame` rather than `self`, # but it's the instance of class to which this method is bound. if frame.rsv1 or frame.rsv2 or frame.rsv3: raise WebSocketProtocolError("Reserved bits must be 0") if frame.opcode in DATA_OPCODES: return elif frame.opcode in CTRL_OPCODES: if len(frame.data) > 125: raise WebSocketProtocolError("Control frame too long") if not frame.fin: raise WebSocketProtocolError("Fragmented control frame") else: raise WebSocketProtocolError(f"Invalid opcode: {frame.opcode}")
[ "def", "check", "(", "frame", ")", "->", "None", ":", "# The first parameter is called `frame` rather than `self`,", "# but it's the instance of class to which this method is bound.", "if", "frame", ".", "rsv1", "or", "frame", ".", "rsv2", "or", "frame", ".", "rsv3", ":", "raise", "WebSocketProtocolError", "(", "\"Reserved bits must be 0\"", ")", "if", "frame", ".", "opcode", "in", "DATA_OPCODES", ":", "return", "elif", "frame", ".", "opcode", "in", "CTRL_OPCODES", ":", "if", "len", "(", "frame", ".", "data", ")", ">", "125", ":", "raise", "WebSocketProtocolError", "(", "\"Control frame too long\"", ")", "if", "not", "frame", ".", "fin", ":", "raise", "WebSocketProtocolError", "(", "\"Fragmented control frame\"", ")", "else", ":", "raise", "WebSocketProtocolError", "(", "f\"Invalid opcode: {frame.opcode}\"", ")" ]
Check that this frame contains acceptable values. Raise :exc:`~websockets.exceptions.WebSocketProtocolError` if this frame contains incorrect values.
[ "Check", "that", "this", "frame", "contains", "acceptable", "values", "." ]
python
train
37.782609
Workiva/furious
furious/context/context.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/context/context.py#L335-L345
def result(self): """Return the context result object pulled from the persistence_engine if it has been set. """ if not self._result: if not self._persistence_engine: return None self._result = self._persistence_engine.get_context_result(self) return self._result
[ "def", "result", "(", "self", ")", ":", "if", "not", "self", ".", "_result", ":", "if", "not", "self", ".", "_persistence_engine", ":", "return", "None", "self", ".", "_result", "=", "self", ".", "_persistence_engine", ".", "get_context_result", "(", "self", ")", "return", "self", ".", "_result" ]
Return the context result object pulled from the persistence_engine if it has been set.
[ "Return", "the", "context", "result", "object", "pulled", "from", "the", "persistence_engine", "if", "it", "has", "been", "set", "." ]
python
train
30.454545
condereis/realtime-stock
rtstock/utils.py
https://github.com/condereis/realtime-stock/blob/5b3110d0bc2fd3e8354ab2edb5cfe6cafd6f2a94/rtstock/utils.py#L173-L196
def download_historical(tickers_list, output_folder): """Download historical data from Yahoo Finance. Downloads full historical data from Yahoo Finance as CSV. The following fields are available: Adj Close, Close, High, Low, Open and Volume. Files will be saved to output_folder as <ticker>.csv. :param tickers_list: List of tickers that will be returned. :type tickers_list: list of strings :param output_folder: Output folder path :type output_folder: string """ __validate_list(tickers_list) for ticker in tickers_list: file_name = os.path.join(output_folder, ticker + '.csv') with open(file_name, 'wb') as f: base_url = 'http://real-chart.finance.yahoo.com/table.csv?s=' try: urlopen(base_url + ticker) urlretrieve(base_url + ticker, f.name) except: os.remove(file_name) raise RequestError('Unable to process the request. Check if ' + ticker + ' is a valid stock ticker')
[ "def", "download_historical", "(", "tickers_list", ",", "output_folder", ")", ":", "__validate_list", "(", "tickers_list", ")", "for", "ticker", "in", "tickers_list", ":", "file_name", "=", "os", ".", "path", ".", "join", "(", "output_folder", ",", "ticker", "+", "'.csv'", ")", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "f", ":", "base_url", "=", "'http://real-chart.finance.yahoo.com/table.csv?s='", "try", ":", "urlopen", "(", "base_url", "+", "ticker", ")", "urlretrieve", "(", "base_url", "+", "ticker", ",", "f", ".", "name", ")", "except", ":", "os", ".", "remove", "(", "file_name", ")", "raise", "RequestError", "(", "'Unable to process the request. Check if '", "+", "ticker", "+", "' is a valid stock ticker'", ")" ]
Download historical data from Yahoo Finance. Downloads full historical data from Yahoo Finance as CSV. The following fields are available: Adj Close, Close, High, Low, Open and Volume. Files will be saved to output_folder as <ticker>.csv. :param tickers_list: List of tickers that will be returned. :type tickers_list: list of strings :param output_folder: Output folder path :type output_folder: string
[ "Download", "historical", "data", "from", "Yahoo", "Finance", "." ]
python
train
43.708333
google/prettytensor
prettytensor/pretty_tensor_class.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_class.py#L1282-L1306
def attach_template(self, _template, _key, **unbound_var_values): """Attaches the template to this with the _key is supplied with this layer. Note: names were chosen to avoid conflicts. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template. """ if _key in unbound_var_values: raise ValueError('%s specified twice.' % _key) unbound_var_values[_key] = self return _DeferredLayer(self.bookkeeper, _template.as_layer().construct, [], unbound_var_values, scope=self._scope, defaults=self._defaults, partial_context=self._partial_context)
[ "def", "attach_template", "(", "self", ",", "_template", ",", "_key", ",", "*", "*", "unbound_var_values", ")", ":", "if", "_key", "in", "unbound_var_values", ":", "raise", "ValueError", "(", "'%s specified twice.'", "%", "_key", ")", "unbound_var_values", "[", "_key", "]", "=", "self", "return", "_DeferredLayer", "(", "self", ".", "bookkeeper", ",", "_template", ".", "as_layer", "(", ")", ".", "construct", ",", "[", "]", ",", "unbound_var_values", ",", "scope", "=", "self", ".", "_scope", ",", "defaults", "=", "self", ".", "_defaults", ",", "partial_context", "=", "self", ".", "_partial_context", ")" ]
Attaches the template to this with the _key is supplied with this layer. Note: names were chosen to avoid conflicts. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template.
[ "Attaches", "the", "template", "to", "this", "with", "the", "_key", "is", "supplied", "with", "this", "layer", "." ]
python
train
38.96
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L2037-L2069
def onecmd(self, statement: Union[Statement, str]) -> bool: """ This executes the actual do_* method for a command. If the command provided doesn't exist, then it executes default() instead. :param statement: intended to be a Statement instance parsed command from the input stream, alternative acceptance of a str is present only for backward compatibility with cmd :return: a flag indicating whether the interpretation of commands should stop """ # For backwards compatibility with cmd, allow a str to be passed in if not isinstance(statement, Statement): statement = self._complete_statement(statement) # Check if this is a macro if statement.command in self.macros: stop = self._run_macro(statement) else: func = self.cmd_func(statement.command) if func: # Check to see if this command should be stored in history if statement.command not in self.exclude_from_history \ and statement.command not in self.disabled_commands: self.history.append(statement) stop = func(statement) else: stop = self.default(statement) if stop is None: stop = False return stop
[ "def", "onecmd", "(", "self", ",", "statement", ":", "Union", "[", "Statement", ",", "str", "]", ")", "->", "bool", ":", "# For backwards compatibility with cmd, allow a str to be passed in", "if", "not", "isinstance", "(", "statement", ",", "Statement", ")", ":", "statement", "=", "self", ".", "_complete_statement", "(", "statement", ")", "# Check if this is a macro", "if", "statement", ".", "command", "in", "self", ".", "macros", ":", "stop", "=", "self", ".", "_run_macro", "(", "statement", ")", "else", ":", "func", "=", "self", ".", "cmd_func", "(", "statement", ".", "command", ")", "if", "func", ":", "# Check to see if this command should be stored in history", "if", "statement", ".", "command", "not", "in", "self", ".", "exclude_from_history", "and", "statement", ".", "command", "not", "in", "self", ".", "disabled_commands", ":", "self", ".", "history", ".", "append", "(", "statement", ")", "stop", "=", "func", "(", "statement", ")", "else", ":", "stop", "=", "self", ".", "default", "(", "statement", ")", "if", "stop", "is", "None", ":", "stop", "=", "False", "return", "stop" ]
This executes the actual do_* method for a command. If the command provided doesn't exist, then it executes default() instead. :param statement: intended to be a Statement instance parsed command from the input stream, alternative acceptance of a str is present only for backward compatibility with cmd :return: a flag indicating whether the interpretation of commands should stop
[ "This", "executes", "the", "actual", "do_", "*", "method", "for", "a", "command", "." ]
python
train
40.484848
scanny/python-pptx
pptx/chart/axis.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/axis.py#L114-L122
def major_tick_mark(self): """ Read/write :ref:`XlTickMark` value specifying the type of major tick mark to display on this axis. """ majorTickMark = self._element.majorTickMark if majorTickMark is None: return XL_TICK_MARK.CROSS return majorTickMark.val
[ "def", "major_tick_mark", "(", "self", ")", ":", "majorTickMark", "=", "self", ".", "_element", ".", "majorTickMark", "if", "majorTickMark", "is", "None", ":", "return", "XL_TICK_MARK", ".", "CROSS", "return", "majorTickMark", ".", "val" ]
Read/write :ref:`XlTickMark` value specifying the type of major tick mark to display on this axis.
[ "Read", "/", "write", ":", "ref", ":", "XlTickMark", "value", "specifying", "the", "type", "of", "major", "tick", "mark", "to", "display", "on", "this", "axis", "." ]
python
train
34.888889
sprockets/sprockets.mixins.metrics
examples/statsd.py
https://github.com/sprockets/sprockets.mixins.metrics/blob/0b17d5f0c09a2be9db779e17e6789d3d5ff9a0d0/examples/statsd.py#L34-L48
def make_application(): """ Create a application configured to send metrics. Metrics will be sent to localhost:8125 namespaced with ``webapps``. Run netcat or a similar listener then run this example. HTTP GETs will result in a metric like:: webapps.SimpleHandler.GET.204:255.24497032165527|ms """ settings = {} application = web.Application([web.url('/', SimpleHandler)], **settings) statsd.install(application, **{'namespace': 'testing'}) return application
[ "def", "make_application", "(", ")", ":", "settings", "=", "{", "}", "application", "=", "web", ".", "Application", "(", "[", "web", ".", "url", "(", "'/'", ",", "SimpleHandler", ")", "]", ",", "*", "*", "settings", ")", "statsd", ".", "install", "(", "application", ",", "*", "*", "{", "'namespace'", ":", "'testing'", "}", ")", "return", "application" ]
Create a application configured to send metrics. Metrics will be sent to localhost:8125 namespaced with ``webapps``. Run netcat or a similar listener then run this example. HTTP GETs will result in a metric like:: webapps.SimpleHandler.GET.204:255.24497032165527|ms
[ "Create", "a", "application", "configured", "to", "send", "metrics", "." ]
python
train
33.066667
openstack/networking-arista
networking_arista/ml2/security_groups/switch_helper.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/security_groups/switch_helper.py#L370-L387
def _parse_binding_config(self, binding_config): """Parse configured interface -> ACL bindings Bindings are returned as a set of (intf, name, direction) tuples: set([(intf1, acl_name, direction), (intf2, acl_name, direction), ..., ]) """ parsed_bindings = set() for acl in binding_config['aclList']: for intf in acl['configuredIngressIntfs']: parsed_bindings.add((intf['name'], acl['name'], a_const.INGRESS_DIRECTION)) for intf in acl['configuredEgressIntfs']: parsed_bindings.add((intf['name'], acl['name'], a_const.EGRESS_DIRECTION)) return parsed_bindings
[ "def", "_parse_binding_config", "(", "self", ",", "binding_config", ")", ":", "parsed_bindings", "=", "set", "(", ")", "for", "acl", "in", "binding_config", "[", "'aclList'", "]", ":", "for", "intf", "in", "acl", "[", "'configuredIngressIntfs'", "]", ":", "parsed_bindings", ".", "add", "(", "(", "intf", "[", "'name'", "]", ",", "acl", "[", "'name'", "]", ",", "a_const", ".", "INGRESS_DIRECTION", ")", ")", "for", "intf", "in", "acl", "[", "'configuredEgressIntfs'", "]", ":", "parsed_bindings", ".", "add", "(", "(", "intf", "[", "'name'", "]", ",", "acl", "[", "'name'", "]", ",", "a_const", ".", "EGRESS_DIRECTION", ")", ")", "return", "parsed_bindings" ]
Parse configured interface -> ACL bindings Bindings are returned as a set of (intf, name, direction) tuples: set([(intf1, acl_name, direction), (intf2, acl_name, direction), ..., ])
[ "Parse", "configured", "interface", "-", ">", "ACL", "bindings" ]
python
train
42.333333
google/grr
grr/server/grr_response_server/flows/general/administrative.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/general/administrative.py#L56-L114
def WriteAllCrashDetails(client_id, crash_details, flow_session_id=None, hunt_session_id=None, token=None): """Updates the last crash attribute of the client.""" # AFF4. if data_store.AFF4Enabled(): with aff4.FACTORY.Create( client_id, aff4_grr.VFSGRRClient, token=token) as client_obj: client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details)) # Duplicate the crash information in a number of places so we can find it # easily. client_urn = rdf_client.ClientURN(client_id) client_crashes = aff4_grr.VFSGRRClient.CrashCollectionURNForCID(client_urn) with data_store.DB.GetMutationPool() as pool: grr_collections.CrashCollection.StaticAdd( client_crashes, crash_details, mutation_pool=pool) # Relational db. if data_store.RelationalDBEnabled(): try: data_store.REL_DB.WriteClientCrashInfo(client_id, crash_details) except db.UnknownClientError: pass if not flow_session_id: return if data_store.RelationalDBEnabled(): flow_id = flow_session_id.Basename() data_store.REL_DB.UpdateFlow( client_id, flow_id, client_crash_info=crash_details) flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id) if flow_obj.parent_hunt_id: db_compat.ProcessHuntClientCrash( flow_obj, client_crash_info=crash_details) # TODO(amoser): Registering crashes in hunts is currently not implemented for # the relational db. if not data_store.RelationalDBEnabled(): with aff4.FACTORY.Open( flow_session_id, flow.GRRFlow, mode="rw", age=aff4.NEWEST_TIME, token=token) as aff4_flow: aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details)) hunt_session_id = ExtractHuntId(flow_session_id) if hunt_session_id and hunt_session_id != flow_session_id: hunt_obj = aff4.FACTORY.Open( hunt_session_id, aff4_type=implementation.GRRHunt, mode="rw", token=token) hunt_obj.RegisterCrash(crash_details)
[ "def", "WriteAllCrashDetails", "(", "client_id", ",", "crash_details", ",", "flow_session_id", "=", "None", ",", "hunt_session_id", "=", "None", ",", "token", "=", "None", ")", ":", "# AFF4.", "if", "data_store", ".", "AFF4Enabled", "(", ")", ":", "with", "aff4", ".", "FACTORY", ".", "Create", "(", "client_id", ",", "aff4_grr", ".", "VFSGRRClient", ",", "token", "=", "token", ")", "as", "client_obj", ":", "client_obj", ".", "Set", "(", "client_obj", ".", "Schema", ".", "LAST_CRASH", "(", "crash_details", ")", ")", "# Duplicate the crash information in a number of places so we can find it", "# easily.", "client_urn", "=", "rdf_client", ".", "ClientURN", "(", "client_id", ")", "client_crashes", "=", "aff4_grr", ".", "VFSGRRClient", ".", "CrashCollectionURNForCID", "(", "client_urn", ")", "with", "data_store", ".", "DB", ".", "GetMutationPool", "(", ")", "as", "pool", ":", "grr_collections", ".", "CrashCollection", ".", "StaticAdd", "(", "client_crashes", ",", "crash_details", ",", "mutation_pool", "=", "pool", ")", "# Relational db.", "if", "data_store", ".", "RelationalDBEnabled", "(", ")", ":", "try", ":", "data_store", ".", "REL_DB", ".", "WriteClientCrashInfo", "(", "client_id", ",", "crash_details", ")", "except", "db", ".", "UnknownClientError", ":", "pass", "if", "not", "flow_session_id", ":", "return", "if", "data_store", ".", "RelationalDBEnabled", "(", ")", ":", "flow_id", "=", "flow_session_id", ".", "Basename", "(", ")", "data_store", ".", "REL_DB", ".", "UpdateFlow", "(", "client_id", ",", "flow_id", ",", "client_crash_info", "=", "crash_details", ")", "flow_obj", "=", "data_store", ".", "REL_DB", ".", "ReadFlowObject", "(", "client_id", ",", "flow_id", ")", "if", "flow_obj", ".", "parent_hunt_id", ":", "db_compat", ".", "ProcessHuntClientCrash", "(", "flow_obj", ",", "client_crash_info", "=", "crash_details", ")", "# TODO(amoser): Registering crashes in hunts is currently not implemented for", "# the relational db.", "if", "not", "data_store", ".", "RelationalDBEnabled", "(", ")", ":", "with", "aff4", ".", "FACTORY", ".", "Open", "(", "flow_session_id", ",", "flow", ".", "GRRFlow", ",", "mode", "=", "\"rw\"", ",", "age", "=", "aff4", ".", "NEWEST_TIME", ",", "token", "=", "token", ")", "as", "aff4_flow", ":", "aff4_flow", ".", "Set", "(", "aff4_flow", ".", "Schema", ".", "CLIENT_CRASH", "(", "crash_details", ")", ")", "hunt_session_id", "=", "ExtractHuntId", "(", "flow_session_id", ")", "if", "hunt_session_id", "and", "hunt_session_id", "!=", "flow_session_id", ":", "hunt_obj", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "hunt_session_id", ",", "aff4_type", "=", "implementation", ".", "GRRHunt", ",", "mode", "=", "\"rw\"", ",", "token", "=", "token", ")", "hunt_obj", ".", "RegisterCrash", "(", "crash_details", ")" ]
Updates the last crash attribute of the client.
[ "Updates", "the", "last", "crash", "attribute", "of", "the", "client", "." ]
python
train
35.084746
hanguokai/youku
youku/youku_searches.py
https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_searches.py#L70-L95
def search_shows_by_keyword(self, keyword, unite=0, source_site=None, category=None, release_year=None, area=None, orderby='view-count', paid=None, hasvideotype=None, page=1, count=20): """doc: http://open.youku.com/docs/doc?id=82 """ url = 'https://openapi.youku.com/v2/searches/show/by_keyword.json' params = { 'client_id': self.client_id, 'keyword': keyword, 'unite': unite, 'source_site': source_site, 'category': category, 'release_year': release_year, 'area': area, 'orderby': orderby, 'paid': paid, 'hasvideotype': hasvideotype, 'page': page, 'count': count } params = remove_none_value(params) r = requests.get(url, params=params) check_error(r) return r.json()
[ "def", "search_shows_by_keyword", "(", "self", ",", "keyword", ",", "unite", "=", "0", ",", "source_site", "=", "None", ",", "category", "=", "None", ",", "release_year", "=", "None", ",", "area", "=", "None", ",", "orderby", "=", "'view-count'", ",", "paid", "=", "None", ",", "hasvideotype", "=", "None", ",", "page", "=", "1", ",", "count", "=", "20", ")", ":", "url", "=", "'https://openapi.youku.com/v2/searches/show/by_keyword.json'", "params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'keyword'", ":", "keyword", ",", "'unite'", ":", "unite", ",", "'source_site'", ":", "source_site", ",", "'category'", ":", "category", ",", "'release_year'", ":", "release_year", ",", "'area'", ":", "area", ",", "'orderby'", ":", "orderby", ",", "'paid'", ":", "paid", ",", "'hasvideotype'", ":", "hasvideotype", ",", "'page'", ":", "page", ",", "'count'", ":", "count", "}", "params", "=", "remove_none_value", "(", "params", ")", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "check_error", "(", "r", ")", "return", "r", ".", "json", "(", ")" ]
doc: http://open.youku.com/docs/doc?id=82
[ "doc", ":", "http", ":", "//", "open", ".", "youku", ".", "com", "/", "docs", "/", "doc?id", "=", "82" ]
python
train
38
algofairness/BlackBoxAuditing
python2_source/BlackBoxAuditing/measurements.py
https://github.com/algofairness/BlackBoxAuditing/blob/b06c4faed5591cd7088475b2a203127bc5820483/python2_source/BlackBoxAuditing/measurements.py#L1-L12
def accuracy(conf_matrix): """ Given a confusion matrix, returns the accuracy. Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml """ total, correct = 0.0, 0.0 for true_response, guess_dict in conf_matrix.items(): for guess, count in guess_dict.items(): if true_response == guess: correct += count total += count return correct/total
[ "def", "accuracy", "(", "conf_matrix", ")", ":", "total", ",", "correct", "=", "0.0", ",", "0.0", "for", "true_response", ",", "guess_dict", "in", "conf_matrix", ".", "items", "(", ")", ":", "for", "guess", ",", "count", "in", "guess_dict", ".", "items", "(", ")", ":", "if", "true_response", "==", "guess", ":", "correct", "+=", "count", "total", "+=", "count", "return", "correct", "/", "total" ]
Given a confusion matrix, returns the accuracy. Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
[ "Given", "a", "confusion", "matrix", "returns", "the", "accuracy", ".", "Accuracy", "Definition", ":", "http", ":", "//", "research", ".", "ics", ".", "aalto", ".", "fi", "/", "events", "/", "eyechallenge2005", "/", "evaluation", ".", "shtml" ]
python
test
33.333333
adafruit/Adafruit_Python_BluefruitLE
Adafruit_BluefruitLE/interfaces/provider.py
https://github.com/adafruit/Adafruit_Python_BluefruitLE/blob/34fc6f596371b961628369d78ce836950514062f/Adafruit_BluefruitLE/interfaces/provider.py#L125-L143
def find_device(self, service_uuids=[], name=None, timeout_sec=TIMEOUT_SEC): """Return the first device that advertises the specified service UUIDs or has the specified name. Will wait up to timeout_sec seconds for the device to be found, and if the timeout is zero then it will not wait at all and immediately return a result. When no device is found a value of None is returned. """ start = time.time() while True: # Call find_devices and grab the first result if any are found. found = self.find_devices(service_uuids, name) if len(found) > 0: return found[0] # No device was found. Check if the timeout is exceeded and wait to # try again. if time.time()-start >= timeout_sec: # Failed to find a device within the timeout. return None time.sleep(1)
[ "def", "find_device", "(", "self", ",", "service_uuids", "=", "[", "]", ",", "name", "=", "None", ",", "timeout_sec", "=", "TIMEOUT_SEC", ")", ":", "start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "# Call find_devices and grab the first result if any are found.", "found", "=", "self", ".", "find_devices", "(", "service_uuids", ",", "name", ")", "if", "len", "(", "found", ")", ">", "0", ":", "return", "found", "[", "0", "]", "# No device was found. Check if the timeout is exceeded and wait to", "# try again.", "if", "time", ".", "time", "(", ")", "-", "start", ">=", "timeout_sec", ":", "# Failed to find a device within the timeout.", "return", "None", "time", ".", "sleep", "(", "1", ")" ]
Return the first device that advertises the specified service UUIDs or has the specified name. Will wait up to timeout_sec seconds for the device to be found, and if the timeout is zero then it will not wait at all and immediately return a result. When no device is found a value of None is returned.
[ "Return", "the", "first", "device", "that", "advertises", "the", "specified", "service", "UUIDs", "or", "has", "the", "specified", "name", ".", "Will", "wait", "up", "to", "timeout_sec", "seconds", "for", "the", "device", "to", "be", "found", "and", "if", "the", "timeout", "is", "zero", "then", "it", "will", "not", "wait", "at", "all", "and", "immediately", "return", "a", "result", ".", "When", "no", "device", "is", "found", "a", "value", "of", "None", "is", "returned", "." ]
python
valid
49.052632
trailofbits/manticore
manticore/core/smtlib/solver.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/smtlib/solver.py#L242-L256
def _reset(self, constraints=None): """Auxiliary method to reset the smtlib external solver to initial defaults""" if self._proc is None: self._start_proc() else: if self.support_reset: self._send("(reset)") for cfg in self._init: self._send(cfg) else: self._stop_proc() self._start_proc() if constraints is not None: self._send(constraints)
[ "def", "_reset", "(", "self", ",", "constraints", "=", "None", ")", ":", "if", "self", ".", "_proc", "is", "None", ":", "self", ".", "_start_proc", "(", ")", "else", ":", "if", "self", ".", "support_reset", ":", "self", ".", "_send", "(", "\"(reset)\"", ")", "for", "cfg", "in", "self", ".", "_init", ":", "self", ".", "_send", "(", "cfg", ")", "else", ":", "self", ".", "_stop_proc", "(", ")", "self", ".", "_start_proc", "(", ")", "if", "constraints", "is", "not", "None", ":", "self", ".", "_send", "(", "constraints", ")" ]
Auxiliary method to reset the smtlib external solver to initial defaults
[ "Auxiliary", "method", "to", "reset", "the", "smtlib", "external", "solver", "to", "initial", "defaults" ]
python
valid
32.8
manahl/arctic
arctic/store/bson_store.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/bson_store.py#L112-L117
def update_one(self, filter, update, **kwargs): """ See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one """ self._arctic_lib.check_quota() return self._collection.update_one(filter, update, **kwargs)
[ "def", "update_one", "(", "self", ",", "filter", ",", "update", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_arctic_lib", ".", "check_quota", "(", ")", "return", "self", ".", "_collection", ".", "update_one", "(", "filter", ",", "update", ",", "*", "*", "kwargs", ")" ]
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one
[ "See", "http", ":", "//", "api", ".", "mongodb", ".", "com", "/", "python", "/", "current", "/", "api", "/", "pymongo", "/", "collection", ".", "html#pymongo", ".", "collection", ".", "Collection", ".", "update_one" ]
python
train
48.833333
klen/graphite-beacon
graphite_beacon/alerts.py
https://github.com/klen/graphite-beacon/blob/c1f071e9f557693bc90f6acbc314994985dc3b77/graphite_beacon/alerts.py#L259-L285
def load(self): """Load data from Graphite.""" LOGGER.debug('%s: start checking: %s', self.name, self.query) if self.waiting: self.notify('warning', 'Process takes too much time', target='waiting', ntype='common') else: self.waiting = True try: response = yield self.client.fetch(self.url, auth_username=self.auth_username, auth_password=self.auth_password, request_timeout=self.request_timeout, connect_timeout=self.connect_timeout, validate_cert=self.validate_cert) records = ( GraphiteRecord(line, self.default_nan_value, self.ignore_nan) for line in response.buffer) data = [ (None if record.empty else getattr(record, self.method), record.target) for record in records] if len(data) == 0: raise ValueError('No data') self.check(data) self.notify('normal', 'Metrics are loaded', target='loading', ntype='common') except Exception as e: self.notify( self.loading_error, 'Loading error: %s' % e, target='loading', ntype='common') self.waiting = False
[ "def", "load", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "'%s: start checking: %s'", ",", "self", ".", "name", ",", "self", ".", "query", ")", "if", "self", ".", "waiting", ":", "self", ".", "notify", "(", "'warning'", ",", "'Process takes too much time'", ",", "target", "=", "'waiting'", ",", "ntype", "=", "'common'", ")", "else", ":", "self", ".", "waiting", "=", "True", "try", ":", "response", "=", "yield", "self", ".", "client", ".", "fetch", "(", "self", ".", "url", ",", "auth_username", "=", "self", ".", "auth_username", ",", "auth_password", "=", "self", ".", "auth_password", ",", "request_timeout", "=", "self", ".", "request_timeout", ",", "connect_timeout", "=", "self", ".", "connect_timeout", ",", "validate_cert", "=", "self", ".", "validate_cert", ")", "records", "=", "(", "GraphiteRecord", "(", "line", ",", "self", ".", "default_nan_value", ",", "self", ".", "ignore_nan", ")", "for", "line", "in", "response", ".", "buffer", ")", "data", "=", "[", "(", "None", "if", "record", ".", "empty", "else", "getattr", "(", "record", ",", "self", ".", "method", ")", ",", "record", ".", "target", ")", "for", "record", "in", "records", "]", "if", "len", "(", "data", ")", "==", "0", ":", "raise", "ValueError", "(", "'No data'", ")", "self", ".", "check", "(", "data", ")", "self", ".", "notify", "(", "'normal'", ",", "'Metrics are loaded'", ",", "target", "=", "'loading'", ",", "ntype", "=", "'common'", ")", "except", "Exception", "as", "e", ":", "self", ".", "notify", "(", "self", ".", "loading_error", ",", "'Loading error: %s'", "%", "e", ",", "target", "=", "'loading'", ",", "ntype", "=", "'common'", ")", "self", ".", "waiting", "=", "False" ]
Load data from Graphite.
[ "Load", "data", "from", "Graphite", "." ]
python
train
53.851852
PBR/MQ2
MQ2/__init__.py
https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/__init__.py#L140-L161
def write_matrix(outputfile, matrix): """ Write down the provided matrix in the specified outputfile. :arg outputfile, name of the outputfile in which the QTLs found are written. :arg matrix, the list of lists of data to write. """ try: stream = open(outputfile, 'w') for row in matrix: if isinstance(row, list) or isinstance(row, tuple): row = [str(el).strip() for el in row] stream.write(','.join(row) + '\n') else: stream.write(row + '\n') except IOError as err: # pragma: no cover LOG.info('An error occured while writing the file %s' % outputfile) LOG.debug("Error: %s" % err) finally: stream.close() LOG.info('Wrote QTLs in file %s' % outputfile)
[ "def", "write_matrix", "(", "outputfile", ",", "matrix", ")", ":", "try", ":", "stream", "=", "open", "(", "outputfile", ",", "'w'", ")", "for", "row", "in", "matrix", ":", "if", "isinstance", "(", "row", ",", "list", ")", "or", "isinstance", "(", "row", ",", "tuple", ")", ":", "row", "=", "[", "str", "(", "el", ")", ".", "strip", "(", ")", "for", "el", "in", "row", "]", "stream", ".", "write", "(", "','", ".", "join", "(", "row", ")", "+", "'\\n'", ")", "else", ":", "stream", ".", "write", "(", "row", "+", "'\\n'", ")", "except", "IOError", "as", "err", ":", "# pragma: no cover", "LOG", ".", "info", "(", "'An error occured while writing the file %s'", "%", "outputfile", ")", "LOG", ".", "debug", "(", "\"Error: %s\"", "%", "err", ")", "finally", ":", "stream", ".", "close", "(", ")", "LOG", ".", "info", "(", "'Wrote QTLs in file %s'", "%", "outputfile", ")" ]
Write down the provided matrix in the specified outputfile. :arg outputfile, name of the outputfile in which the QTLs found are written. :arg matrix, the list of lists of data to write.
[ "Write", "down", "the", "provided", "matrix", "in", "the", "specified", "outputfile", ".", ":", "arg", "outputfile", "name", "of", "the", "outputfile", "in", "which", "the", "QTLs", "found", "are", "written", ".", ":", "arg", "matrix", "the", "list", "of", "lists", "of", "data", "to", "write", "." ]
python
train
36.454545
decryptus/sonicprobe
sonicprobe/libs/urisup.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/urisup.py#L278-L311
def host_type(host): """ Correctly classify correct RFC 3986 compliant hostnames, but do not try hard to validate compliance anyway... NOTE: indeed we allow a small deviation from the RFC 3986: IPv4 addresses are allowed to contain bytes represented in hexadecimal or octal notation when begining respectively with '0x'/'0X' and '0' numbers prepended with one or more zero won't be rejected. Anyway representation of multiple bytes by a single decimal/octal/hexadecimal integer is not allowed. Return 1 (HOST_IP_LITERAL), 2 (HOST_IPV4_ADDRESS) or 3 (HOST_REG_NAME) >>> host_type('[blablabla]') 1 >>> host_type('') 3 >>> host_type('127.0.0.1') 2 >>> host_type('0x7F.0.0.00000000000001') 2 >>> host_type('666.42.131.2') 3 >>> host_type('foobar.42') 3 """ if not host: return HOST_REG_NAME elif host[0] == '[': return HOST_IP_LITERAL elif __valid_IPv4address(host): return HOST_IPV4_ADDRESS else: return HOST_REG_NAME
[ "def", "host_type", "(", "host", ")", ":", "if", "not", "host", ":", "return", "HOST_REG_NAME", "elif", "host", "[", "0", "]", "==", "'['", ":", "return", "HOST_IP_LITERAL", "elif", "__valid_IPv4address", "(", "host", ")", ":", "return", "HOST_IPV4_ADDRESS", "else", ":", "return", "HOST_REG_NAME" ]
Correctly classify correct RFC 3986 compliant hostnames, but do not try hard to validate compliance anyway... NOTE: indeed we allow a small deviation from the RFC 3986: IPv4 addresses are allowed to contain bytes represented in hexadecimal or octal notation when begining respectively with '0x'/'0X' and '0' numbers prepended with one or more zero won't be rejected. Anyway representation of multiple bytes by a single decimal/octal/hexadecimal integer is not allowed. Return 1 (HOST_IP_LITERAL), 2 (HOST_IPV4_ADDRESS) or 3 (HOST_REG_NAME) >>> host_type('[blablabla]') 1 >>> host_type('') 3 >>> host_type('127.0.0.1') 2 >>> host_type('0x7F.0.0.00000000000001') 2 >>> host_type('666.42.131.2') 3 >>> host_type('foobar.42') 3
[ "Correctly", "classify", "correct", "RFC", "3986", "compliant", "hostnames", "but", "do", "not", "try", "hard", "to", "validate", "compliance", "anyway", "...", "NOTE", ":", "indeed", "we", "allow", "a", "small", "deviation", "from", "the", "RFC", "3986", ":", "IPv4", "addresses", "are", "allowed", "to", "contain", "bytes", "represented", "in", "hexadecimal", "or", "octal", "notation", "when", "begining", "respectively", "with", "0x", "/", "0X", "and", "0", "numbers", "prepended", "with", "one", "or", "more", "zero", "won", "t", "be", "rejected", ".", "Anyway", "representation", "of", "multiple", "bytes", "by", "a", "single", "decimal", "/", "octal", "/", "hexadecimal", "integer", "is", "not", "allowed", "." ]
python
train
30.117647
mongodb/mongo-python-driver
pymongo/change_stream.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/change_stream.py#L226-L283
def try_next(self): """Advance the cursor without blocking indefinitely. This method returns the next change document without waiting indefinitely for the next change. For example:: with db.collection.watch() as stream: while stream.alive: change = stream.try_next() if change is not None: print(change) elif stream.alive: # We end up here when there are no recent changes. # Sleep for a while to avoid flooding the server with # getMore requests when no changes are available. time.sleep(10) If no change document is cached locally then this method runs a single getMore command. If the getMore yields any documents, the next document is returned, otherwise, if the getMore returns no documents (because there have been no changes) then ``None`` is returned. :Returns: The next change document or ``None`` when no document is available after running a single getMore or when the cursor is closed. .. versionadded:: 3.8 """ # Attempt to get the next change with at most one getMore and at most # one resume attempt. try: change = self._cursor._try_next(True) except ConnectionFailure: self._resume() change = self._cursor._try_next(False) except OperationFailure as exc: if exc.code in _NON_RESUMABLE_GETMORE_ERRORS: raise self._resume() change = self._cursor._try_next(False) # No changes are available. if change is None: return None try: resume_token = change['_id'] except KeyError: self.close() raise InvalidOperation( "Cannot provide resume functionality when the resume " "token is missing.") self._resume_token = copy.copy(resume_token) self._start_at_operation_time = None if self._decode_custom: return _bson_to_dict(change.raw, self._orig_codec_options) return change
[ "def", "try_next", "(", "self", ")", ":", "# Attempt to get the next change with at most one getMore and at most", "# one resume attempt.", "try", ":", "change", "=", "self", ".", "_cursor", ".", "_try_next", "(", "True", ")", "except", "ConnectionFailure", ":", "self", ".", "_resume", "(", ")", "change", "=", "self", ".", "_cursor", ".", "_try_next", "(", "False", ")", "except", "OperationFailure", "as", "exc", ":", "if", "exc", ".", "code", "in", "_NON_RESUMABLE_GETMORE_ERRORS", ":", "raise", "self", ".", "_resume", "(", ")", "change", "=", "self", ".", "_cursor", ".", "_try_next", "(", "False", ")", "# No changes are available.", "if", "change", "is", "None", ":", "return", "None", "try", ":", "resume_token", "=", "change", "[", "'_id'", "]", "except", "KeyError", ":", "self", ".", "close", "(", ")", "raise", "InvalidOperation", "(", "\"Cannot provide resume functionality when the resume \"", "\"token is missing.\"", ")", "self", ".", "_resume_token", "=", "copy", ".", "copy", "(", "resume_token", ")", "self", ".", "_start_at_operation_time", "=", "None", "if", "self", ".", "_decode_custom", ":", "return", "_bson_to_dict", "(", "change", ".", "raw", ",", "self", ".", "_orig_codec_options", ")", "return", "change" ]
Advance the cursor without blocking indefinitely. This method returns the next change document without waiting indefinitely for the next change. For example:: with db.collection.watch() as stream: while stream.alive: change = stream.try_next() if change is not None: print(change) elif stream.alive: # We end up here when there are no recent changes. # Sleep for a while to avoid flooding the server with # getMore requests when no changes are available. time.sleep(10) If no change document is cached locally then this method runs a single getMore command. If the getMore yields any documents, the next document is returned, otherwise, if the getMore returns no documents (because there have been no changes) then ``None`` is returned. :Returns: The next change document or ``None`` when no document is available after running a single getMore or when the cursor is closed. .. versionadded:: 3.8
[ "Advance", "the", "cursor", "without", "blocking", "indefinitely", "." ]
python
train
38.293103
dictatorlib/dictator
dictator/__init__.py
https://github.com/dictatorlib/dictator/blob/b77b1709b6fff174f13b0f0c5dbe740b4c07d712/dictator/__init__.py#L208-L235
def get(self, key, default=None): """Return the value at key ``key``, or default value ``default`` which is None by default. >>> dc = Dictator() >>> dc['l0'] = [1, 2, 3, 4] >>> dc.get('l0') ['1', '2', '3', '4'] >>> dc['l0'] ['1', '2', '3', '4'] >>> dc.clear() :param key: key of value to return :type key: str :param default: value of any type to return of key doesn't exist. :type default: Any :return: value of given key :rtype: Any """ try: value = self.__getitem__(key) except KeyError: value = None # Py3 Redis compatibiility if isinstance(value, bytes): value = value.decode() return value or default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "try", ":", "value", "=", "self", ".", "__getitem__", "(", "key", ")", "except", "KeyError", ":", "value", "=", "None", "# Py3 Redis compatibiility", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "value", "=", "value", ".", "decode", "(", ")", "return", "value", "or", "default" ]
Return the value at key ``key``, or default value ``default`` which is None by default. >>> dc = Dictator() >>> dc['l0'] = [1, 2, 3, 4] >>> dc.get('l0') ['1', '2', '3', '4'] >>> dc['l0'] ['1', '2', '3', '4'] >>> dc.clear() :param key: key of value to return :type key: str :param default: value of any type to return of key doesn't exist. :type default: Any :return: value of given key :rtype: Any
[ "Return", "the", "value", "at", "key", "key", "or", "default", "value", "default", "which", "is", "None", "by", "default", "." ]
python
train
28.071429
andrea-cuttone/geoplotlib
geoplotlib/colors.py
https://github.com/andrea-cuttone/geoplotlib/blob/a1c355bccec91cabd157569fad6daf53cf7687a1/geoplotlib/colors.py#L25-L62
def to_color(self, value, maxvalue, scale, minvalue=0.0): """ convert continuous values into colors using matplotlib colorscales :param value: value to be converted :param maxvalue: max value in the colorscale :param scale: lin, log, sqrt :param minvalue: minimum of the input values in linear scale (default is 0) :return: the color corresponding to the value """ if scale == 'lin': if minvalue >= maxvalue: raise Exception('minvalue must be less than maxvalue') else: value = 1.*(value-minvalue) / (maxvalue-minvalue) elif scale == 'log': if value < 1 or maxvalue <= 1: raise Exception('value and maxvalue must be >= 1') else: value = math.log(value) / math.log(maxvalue) elif scale == 'sqrt': if value < 0 or maxvalue <= 0: raise Exception('value and maxvalue must be greater than 0') else: value = math.sqrt(value) / math.sqrt(maxvalue) else: raise Exception('scale must be "lin", "log", or "sqrt"') if value < 0: value = 0 elif value > 1: value = 1 value = int(1.*self.levels*value)*1./(self.levels-1) if value not in self.mapping: self.mapping[value] = _convert_color_format(self.cmap(value), self.alpha) return self.mapping[value]
[ "def", "to_color", "(", "self", ",", "value", ",", "maxvalue", ",", "scale", ",", "minvalue", "=", "0.0", ")", ":", "if", "scale", "==", "'lin'", ":", "if", "minvalue", ">=", "maxvalue", ":", "raise", "Exception", "(", "'minvalue must be less than maxvalue'", ")", "else", ":", "value", "=", "1.", "*", "(", "value", "-", "minvalue", ")", "/", "(", "maxvalue", "-", "minvalue", ")", "elif", "scale", "==", "'log'", ":", "if", "value", "<", "1", "or", "maxvalue", "<=", "1", ":", "raise", "Exception", "(", "'value and maxvalue must be >= 1'", ")", "else", ":", "value", "=", "math", ".", "log", "(", "value", ")", "/", "math", ".", "log", "(", "maxvalue", ")", "elif", "scale", "==", "'sqrt'", ":", "if", "value", "<", "0", "or", "maxvalue", "<=", "0", ":", "raise", "Exception", "(", "'value and maxvalue must be greater than 0'", ")", "else", ":", "value", "=", "math", ".", "sqrt", "(", "value", ")", "/", "math", ".", "sqrt", "(", "maxvalue", ")", "else", ":", "raise", "Exception", "(", "'scale must be \"lin\", \"log\", or \"sqrt\"'", ")", "if", "value", "<", "0", ":", "value", "=", "0", "elif", "value", ">", "1", ":", "value", "=", "1", "value", "=", "int", "(", "1.", "*", "self", ".", "levels", "*", "value", ")", "*", "1.", "/", "(", "self", ".", "levels", "-", "1", ")", "if", "value", "not", "in", "self", ".", "mapping", ":", "self", ".", "mapping", "[", "value", "]", "=", "_convert_color_format", "(", "self", ".", "cmap", "(", "value", ")", ",", "self", ".", "alpha", ")", "return", "self", ".", "mapping", "[", "value", "]" ]
convert continuous values into colors using matplotlib colorscales :param value: value to be converted :param maxvalue: max value in the colorscale :param scale: lin, log, sqrt :param minvalue: minimum of the input values in linear scale (default is 0) :return: the color corresponding to the value
[ "convert", "continuous", "values", "into", "colors", "using", "matplotlib", "colorscales", ":", "param", "value", ":", "value", "to", "be", "converted", ":", "param", "maxvalue", ":", "max", "value", "in", "the", "colorscale", ":", "param", "scale", ":", "lin", "log", "sqrt", ":", "param", "minvalue", ":", "minimum", "of", "the", "input", "values", "in", "linear", "scale", "(", "default", "is", "0", ")", ":", "return", ":", "the", "color", "corresponding", "to", "the", "value" ]
python
train
38.526316
Fantomas42/django-blog-zinnia
zinnia/xmlrpc/metaweblog.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/xmlrpc/metaweblog.py#L82-L89
def author_structure(user): """ An author structure. """ return {'user_id': user.pk, 'user_login': user.get_username(), 'display_name': user.__str__(), 'user_email': user.email}
[ "def", "author_structure", "(", "user", ")", ":", "return", "{", "'user_id'", ":", "user", ".", "pk", ",", "'user_login'", ":", "user", ".", "get_username", "(", ")", ",", "'display_name'", ":", "user", ".", "__str__", "(", ")", ",", "'user_email'", ":", "user", ".", "email", "}" ]
An author structure.
[ "An", "author", "structure", "." ]
python
train
27.75
databio/pypiper
pypiper/ngstk.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L129-L142
def check_command(self, command): """ Check if command can be called. """ # Use `command` to see if command is callable, store exit code code = os.system("command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command)) # If exit code is not 0, report which command failed and return False, else return True if code != 0: print("Command is not callable: {0}".format(command)) return False else: return True
[ "def", "check_command", "(", "self", ",", "command", ")", ":", "# Use `command` to see if command is callable, store exit code", "code", "=", "os", ".", "system", "(", "\"command -v {0} >/dev/null 2>&1 || {{ exit 1; }}\"", ".", "format", "(", "command", ")", ")", "# If exit code is not 0, report which command failed and return False, else return True", "if", "code", "!=", "0", ":", "print", "(", "\"Command is not callable: {0}\"", ".", "format", "(", "command", ")", ")", "return", "False", "else", ":", "return", "True" ]
Check if command can be called.
[ "Check", "if", "command", "can", "be", "called", "." ]
python
train
35.428571
GPflow/GPflow
gpflow/training/monitor.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/training/monitor.py#L435-L441
def print_summary(self) -> None: """ Prints the tasks' timing summary. """ print("Tasks execution time summary:") for mon_task in self._monitor_tasks: print("%s:\t%.4f (sec)" % (mon_task.task_name, mon_task.total_time))
[ "def", "print_summary", "(", "self", ")", "->", "None", ":", "print", "(", "\"Tasks execution time summary:\"", ")", "for", "mon_task", "in", "self", ".", "_monitor_tasks", ":", "print", "(", "\"%s:\\t%.4f (sec)\"", "%", "(", "mon_task", ".", "task_name", ",", "mon_task", ".", "total_time", ")", ")" ]
Prints the tasks' timing summary.
[ "Prints", "the", "tasks", "timing", "summary", "." ]
python
train
37.857143
google/grr
grr/proto/setup.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/proto/setup.py#L53-L63
def compile_protos(): """Builds necessary assets from sources.""" # If there's no makefile, we're likely installing from an sdist, # so there's no need to compile the protos (they should be already # compiled). if not os.path.exists(os.path.join(THIS_DIRECTORY, "makefile.py")): return # Only compile protobufs if we're inside GRR source tree. subprocess.check_call( ["python", "makefile.py", "--clean"], cwd=THIS_DIRECTORY)
[ "def", "compile_protos", "(", ")", ":", "# If there's no makefile, we're likely installing from an sdist,", "# so there's no need to compile the protos (they should be already", "# compiled).", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "THIS_DIRECTORY", ",", "\"makefile.py\"", ")", ")", ":", "return", "# Only compile protobufs if we're inside GRR source tree.", "subprocess", ".", "check_call", "(", "[", "\"python\"", ",", "\"makefile.py\"", ",", "\"--clean\"", "]", ",", "cwd", "=", "THIS_DIRECTORY", ")" ]
Builds necessary assets from sources.
[ "Builds", "necessary", "assets", "from", "sources", "." ]
python
train
39.909091
neurodata/ndio
ndio/remote/resources.py
https://github.com/neurodata/ndio/blob/792dd5816bc770b05a3db2f4327da42ff6253531/ndio/remote/resources.py#L240-L264
def delete_token(self, token_name, project_name, dataset_name): """ Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted. """ url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.delete_url(url) if req.status_code is not 204: raise RemoteDataUploadError("Could not delete {}".format(req.text)) if req.content == "" or req.content == b'': return True else: return False
[ "def", "delete_token", "(", "self", ",", "token_name", ",", "project_name", ",", "dataset_name", ")", ":", "url", "=", "self", ".", "url", "(", ")", "+", "\"/nd/resource/dataset/{}\"", ".", "format", "(", "dataset_name", ")", "+", "\"/project/{}\"", ".", "format", "(", "project_name", ")", "+", "\"/token/{}/\"", ".", "format", "(", "token_name", ")", "req", "=", "self", ".", "remote_utils", ".", "delete_url", "(", "url", ")", "if", "req", ".", "status_code", "is", "not", "204", ":", "raise", "RemoteDataUploadError", "(", "\"Could not delete {}\"", ".", "format", "(", "req", ".", "text", ")", ")", "if", "req", ".", "content", "==", "\"\"", "or", "req", ".", "content", "==", "b''", ":", "return", "True", "else", ":", "return", "False" ]
Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted.
[ "Delete", "a", "token", "with", "the", "given", "parameters", ".", "Arguments", ":", "project_name", "(", "str", ")", ":", "Project", "name", "dataset_name", "(", "str", ")", ":", "Dataset", "name", "project", "is", "based", "on", "token_name", "(", "str", ")", ":", "Token", "name", "channel_name", "(", "str", ")", ":", "Channel", "name", "project", "is", "based", "on", "Returns", ":", "bool", ":", "True", "if", "project", "deleted", "false", "if", "not", "deleted", "." ]
python
test
37.88
openshift/openshift-restclient-python
openshift/dynamic/client.py
https://github.com/openshift/openshift-restclient-python/blob/5d86bf5ba4e723bcc4d33ad47077aca01edca0f6/openshift/dynamic/client.py#L179-L217
def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None): """ Stream events for a resource from the Kubernetes API :param resource: The API resource object that will be used to query the API :param namespace: The namespace to query :param name: The name of the resource instance to query :param label_selector: The label selector with which to filter results :param field_selector: The field selector with which to filter results :param resource_version: The version with which to filter results. Only events with a resource_version greater than this value will be returned :param timeout: The amount of time in seconds to wait before terminating the stream :return: Event object with these keys: 'type': The type of event such as "ADDED", "DELETED", etc. 'raw_object': a dict representing the watched object. 'object': A ResourceInstance wrapping raw_object. Example: client = DynamicClient(k8s_client) v1_pods = client.resources.get(api_version='v1', kind='Pod') for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5): print(e['type']) print(e['object'].metadata) """ watcher = watch.Watch() for event in watcher.stream( resource.get, namespace=namespace, name=name, field_selector=field_selector, label_selector=label_selector, resource_version=resource_version, serialize=False, timeout_seconds=timeout ): event['object'] = ResourceInstance(resource, event['object']) yield event
[ "def", "watch", "(", "self", ",", "resource", ",", "namespace", "=", "None", ",", "name", "=", "None", ",", "label_selector", "=", "None", ",", "field_selector", "=", "None", ",", "resource_version", "=", "None", ",", "timeout", "=", "None", ")", ":", "watcher", "=", "watch", ".", "Watch", "(", ")", "for", "event", "in", "watcher", ".", "stream", "(", "resource", ".", "get", ",", "namespace", "=", "namespace", ",", "name", "=", "name", ",", "field_selector", "=", "field_selector", ",", "label_selector", "=", "label_selector", ",", "resource_version", "=", "resource_version", ",", "serialize", "=", "False", ",", "timeout_seconds", "=", "timeout", ")", ":", "event", "[", "'object'", "]", "=", "ResourceInstance", "(", "resource", ",", "event", "[", "'object'", "]", ")", "yield", "event" ]
Stream events for a resource from the Kubernetes API :param resource: The API resource object that will be used to query the API :param namespace: The namespace to query :param name: The name of the resource instance to query :param label_selector: The label selector with which to filter results :param field_selector: The field selector with which to filter results :param resource_version: The version with which to filter results. Only events with a resource_version greater than this value will be returned :param timeout: The amount of time in seconds to wait before terminating the stream :return: Event object with these keys: 'type': The type of event such as "ADDED", "DELETED", etc. 'raw_object': a dict representing the watched object. 'object': A ResourceInstance wrapping raw_object. Example: client = DynamicClient(k8s_client) v1_pods = client.resources.get(api_version='v1', kind='Pod') for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5): print(e['type']) print(e['object'].metadata)
[ "Stream", "events", "for", "a", "resource", "from", "the", "Kubernetes", "API" ]
python
train
47.179487
slackapi/python-slackclient
slack/rtm/client.py
https://github.com/slackapi/python-slackclient/blob/901341c0284fd81e6d2719d6a0502308760d83e4/slack/rtm/client.py#L309-L357
async def _connect_and_read(self): """Retreives and connects to Slack's RTM API. Makes an authenticated call to Slack's RTM API to retrieve a websocket URL. Then connects to the message server and reads event messages as they come in. If 'auto_reconnect' is specified we retrieve a new url and reconnect any time the connection is lost unintentionally or an exception is thrown. Raises: SlackApiError: Unable to retreive RTM URL from Slack. websockets.exceptions: Errors thrown by the 'websockets' library. """ while not self._stopped: try: self._connection_attempts += 1 async with aiohttp.ClientSession( loop=self._event_loop, timeout=aiohttp.ClientTimeout(total=self.timeout), ) as session: self._session = session url, data = await self._retreive_websocket_info() async with session.ws_connect( url, heartbeat=self.ping_interval, ssl=self.ssl, proxy=self.proxy, ) as websocket: self._logger.debug("The Websocket connection has been opened.") self._websocket = websocket self._dispatch_event(event="open", data=data) await self._read_messages() except ( client_err.SlackClientNotConnectedError, client_err.SlackApiError, # TODO: Catch websocket exceptions thrown by aiohttp. ) as exception: self._logger.debug(str(exception)) self._dispatch_event(event="error", data=exception) if self.auto_reconnect and not self._stopped: await self._wait_exponentially(exception) continue self._logger.exception( "The Websocket encountered an error. Closing the connection..." ) self._close_websocket() raise
[ "async", "def", "_connect_and_read", "(", "self", ")", ":", "while", "not", "self", ".", "_stopped", ":", "try", ":", "self", ".", "_connection_attempts", "+=", "1", "async", "with", "aiohttp", ".", "ClientSession", "(", "loop", "=", "self", ".", "_event_loop", ",", "timeout", "=", "aiohttp", ".", "ClientTimeout", "(", "total", "=", "self", ".", "timeout", ")", ",", ")", "as", "session", ":", "self", ".", "_session", "=", "session", "url", ",", "data", "=", "await", "self", ".", "_retreive_websocket_info", "(", ")", "async", "with", "session", ".", "ws_connect", "(", "url", ",", "heartbeat", "=", "self", ".", "ping_interval", ",", "ssl", "=", "self", ".", "ssl", ",", "proxy", "=", "self", ".", "proxy", ",", ")", "as", "websocket", ":", "self", ".", "_logger", ".", "debug", "(", "\"The Websocket connection has been opened.\"", ")", "self", ".", "_websocket", "=", "websocket", "self", ".", "_dispatch_event", "(", "event", "=", "\"open\"", ",", "data", "=", "data", ")", "await", "self", ".", "_read_messages", "(", ")", "except", "(", "client_err", ".", "SlackClientNotConnectedError", ",", "client_err", ".", "SlackApiError", ",", "# TODO: Catch websocket exceptions thrown by aiohttp.", ")", "as", "exception", ":", "self", ".", "_logger", ".", "debug", "(", "str", "(", "exception", ")", ")", "self", ".", "_dispatch_event", "(", "event", "=", "\"error\"", ",", "data", "=", "exception", ")", "if", "self", ".", "auto_reconnect", "and", "not", "self", ".", "_stopped", ":", "await", "self", ".", "_wait_exponentially", "(", "exception", ")", "continue", "self", ".", "_logger", ".", "exception", "(", "\"The Websocket encountered an error. Closing the connection...\"", ")", "self", ".", "_close_websocket", "(", ")", "raise" ]
Retreives and connects to Slack's RTM API. Makes an authenticated call to Slack's RTM API to retrieve a websocket URL. Then connects to the message server and reads event messages as they come in. If 'auto_reconnect' is specified we retrieve a new url and reconnect any time the connection is lost unintentionally or an exception is thrown. Raises: SlackApiError: Unable to retreive RTM URL from Slack. websockets.exceptions: Errors thrown by the 'websockets' library.
[ "Retreives", "and", "connects", "to", "Slack", "s", "RTM", "API", "." ]
python
train
44.326531
odlgroup/odl
odl/operator/pspace_ops.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/pspace_ops.py#L389-L435
def adjoint(self): """Adjoint of this operator. The adjoint is given by taking the transpose of the matrix and the adjoint of each component operator. In weighted product spaces, the adjoint needs to take the weightings into account. This is currently not supported. Returns ------- adjoint : `ProductSpaceOperator` The adjoint Examples -------- >>> r3 = odl.rn(3) >>> pspace = odl.ProductSpace(r3, r3) >>> I = odl.IdentityOperator(r3) >>> x = pspace.element([[1, 2, 3], ... [4, 5, 6]]) Matrix is transposed: >>> prod_op = ProductSpaceOperator([[0, I], [0, 0]], ... domain=pspace, range=pspace) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) >>> prod_op.adjoint(x) ProductSpace(rn(3), 2).element([ [ 0., 0., 0.], [ 1., 2., 3.] ]) """ # Lazy import to improve `import odl` time import scipy.sparse adjoint_ops = [op.adjoint for op in self.ops.data] data = np.empty(len(adjoint_ops), dtype=object) data[:] = adjoint_ops indices = [self.ops.col, self.ops.row] # Swap col/row -> transpose shape = (self.ops.shape[1], self.ops.shape[0]) adj_matrix = scipy.sparse.coo_matrix((data, indices), shape) return ProductSpaceOperator(adj_matrix, self.range, self.domain)
[ "def", "adjoint", "(", "self", ")", ":", "# Lazy import to improve `import odl` time", "import", "scipy", ".", "sparse", "adjoint_ops", "=", "[", "op", ".", "adjoint", "for", "op", "in", "self", ".", "ops", ".", "data", "]", "data", "=", "np", ".", "empty", "(", "len", "(", "adjoint_ops", ")", ",", "dtype", "=", "object", ")", "data", "[", ":", "]", "=", "adjoint_ops", "indices", "=", "[", "self", ".", "ops", ".", "col", ",", "self", ".", "ops", ".", "row", "]", "# Swap col/row -> transpose", "shape", "=", "(", "self", ".", "ops", ".", "shape", "[", "1", "]", ",", "self", ".", "ops", ".", "shape", "[", "0", "]", ")", "adj_matrix", "=", "scipy", ".", "sparse", ".", "coo_matrix", "(", "(", "data", ",", "indices", ")", ",", "shape", ")", "return", "ProductSpaceOperator", "(", "adj_matrix", ",", "self", ".", "range", ",", "self", ".", "domain", ")" ]
Adjoint of this operator. The adjoint is given by taking the transpose of the matrix and the adjoint of each component operator. In weighted product spaces, the adjoint needs to take the weightings into account. This is currently not supported. Returns ------- adjoint : `ProductSpaceOperator` The adjoint Examples -------- >>> r3 = odl.rn(3) >>> pspace = odl.ProductSpace(r3, r3) >>> I = odl.IdentityOperator(r3) >>> x = pspace.element([[1, 2, 3], ... [4, 5, 6]]) Matrix is transposed: >>> prod_op = ProductSpaceOperator([[0, I], [0, 0]], ... domain=pspace, range=pspace) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [ 4., 5., 6.], [ 0., 0., 0.] ]) >>> prod_op.adjoint(x) ProductSpace(rn(3), 2).element([ [ 0., 0., 0.], [ 1., 2., 3.] ])
[ "Adjoint", "of", "this", "operator", "." ]
python
train
32.829787
christophertbrown/bioscripts
ctbBio/rRNA_copies.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L102-L160
def copies(mapping, s2bins, rna, min_rna = 800, mismatches = 0): """ 1. determine bin coverage 2. determine rRNA gene coverage 3. compare """ cov = {} # cov[scaffold] = [bases, length] s2bins, bins2s = parse_s2bins(s2bins) rna_cov = parse_rna(rna, s2bins, min_rna) s2bins, bins2s = filter_missing_rna(s2bins, bins2s, rna_cov) # count bases mapped to scaffolds and rRNA gene regions for line in mapping: line = line.strip().split() # get scaffold lengths if line[0].startswith('@'): if line[0].startswith('@SQ') is False: continue s = line[1].split(':')[1] l = int(line[2].split(':')[1]) # check if scaffold is binned if s not in s2bins: continue if s not in cov: cov[s] = [0, l] # check mismatch threshold mm = count_mismatches(line) if mm is False or mm > mismatches: continue # check that scaffold is in bin s, bases = line[2], len(line[9]) if s not in cov: continue cov[s][0] += bases rna_cov = rna_bases(rna_cov, s, bases, line) print('# mismatches threshold: %s' % (mismatches)) header = ['#rRNA scaffold', 'rRNA genes >=%sbp on scaffold' % (min_rna), \ 'rRNA coverage', \ 'bin', 'bin info', 'bin coverage', \ 'rRNAs >=%sbp in bin' % (min_rna), \ 'rRNA coverage/bin coverage', \ 'estimated number of copies'] print('\t'.join(header)) for bin, scaffolds in list(bins2s.items()): rna_count = sum([len(rna_cov[s][2]) for s in scaffolds if s in rna_cov]) for s in scaffolds: if s not in rna_cov: continue out = [] counts = rna_cov[s] bin_cov = calc_bin_cov(bins2s[bin], cov) num_genes = len(counts[2]) rna_coverage = float(float(counts[0])/float(counts[1])) if bin_cov == 0: rna_div_bin = 0 else: rna_div_bin = float(rna_coverage/bin_cov) est = int(max([rna_count, counts, rna_div_bin])) out = [s, num_genes, rna_coverage, bin, bin_cov, rna_count, rna_div_bin, est] print('\t'.join([str(i) for i in out]))
[ "def", "copies", "(", "mapping", ",", "s2bins", ",", "rna", ",", "min_rna", "=", "800", ",", "mismatches", "=", "0", ")", ":", "cov", "=", "{", "}", "# cov[scaffold] = [bases, length]", "s2bins", ",", "bins2s", "=", "parse_s2bins", "(", "s2bins", ")", "rna_cov", "=", "parse_rna", "(", "rna", ",", "s2bins", ",", "min_rna", ")", "s2bins", ",", "bins2s", "=", "filter_missing_rna", "(", "s2bins", ",", "bins2s", ",", "rna_cov", ")", "# count bases mapped to scaffolds and rRNA gene regions", "for", "line", "in", "mapping", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "# get scaffold lengths", "if", "line", "[", "0", "]", ".", "startswith", "(", "'@'", ")", ":", "if", "line", "[", "0", "]", ".", "startswith", "(", "'@SQ'", ")", "is", "False", ":", "continue", "s", "=", "line", "[", "1", "]", ".", "split", "(", "':'", ")", "[", "1", "]", "l", "=", "int", "(", "line", "[", "2", "]", ".", "split", "(", "':'", ")", "[", "1", "]", ")", "# check if scaffold is binned", "if", "s", "not", "in", "s2bins", ":", "continue", "if", "s", "not", "in", "cov", ":", "cov", "[", "s", "]", "=", "[", "0", ",", "l", "]", "# check mismatch threshold", "mm", "=", "count_mismatches", "(", "line", ")", "if", "mm", "is", "False", "or", "mm", ">", "mismatches", ":", "continue", "# check that scaffold is in bin", "s", ",", "bases", "=", "line", "[", "2", "]", ",", "len", "(", "line", "[", "9", "]", ")", "if", "s", "not", "in", "cov", ":", "continue", "cov", "[", "s", "]", "[", "0", "]", "+=", "bases", "rna_cov", "=", "rna_bases", "(", "rna_cov", ",", "s", ",", "bases", ",", "line", ")", "print", "(", "'# mismatches threshold: %s'", "%", "(", "mismatches", ")", ")", "header", "=", "[", "'#rRNA scaffold'", ",", "'rRNA genes >=%sbp on scaffold'", "%", "(", "min_rna", ")", ",", "'rRNA coverage'", ",", "'bin'", ",", "'bin info'", ",", "'bin coverage'", ",", "'rRNAs >=%sbp in bin'", "%", "(", "min_rna", ")", ",", "'rRNA coverage/bin coverage'", ",", "'estimated number of copies'", "]", "print", "(", "'\\t'", ".", "join", "(", "header", ")", ")", "for", "bin", ",", "scaffolds", "in", "list", "(", "bins2s", ".", "items", "(", ")", ")", ":", "rna_count", "=", "sum", "(", "[", "len", "(", "rna_cov", "[", "s", "]", "[", "2", "]", ")", "for", "s", "in", "scaffolds", "if", "s", "in", "rna_cov", "]", ")", "for", "s", "in", "scaffolds", ":", "if", "s", "not", "in", "rna_cov", ":", "continue", "out", "=", "[", "]", "counts", "=", "rna_cov", "[", "s", "]", "bin_cov", "=", "calc_bin_cov", "(", "bins2s", "[", "bin", "]", ",", "cov", ")", "num_genes", "=", "len", "(", "counts", "[", "2", "]", ")", "rna_coverage", "=", "float", "(", "float", "(", "counts", "[", "0", "]", ")", "/", "float", "(", "counts", "[", "1", "]", ")", ")", "if", "bin_cov", "==", "0", ":", "rna_div_bin", "=", "0", "else", ":", "rna_div_bin", "=", "float", "(", "rna_coverage", "/", "bin_cov", ")", "est", "=", "int", "(", "max", "(", "[", "rna_count", ",", "counts", ",", "rna_div_bin", "]", ")", ")", "out", "=", "[", "s", ",", "num_genes", ",", "rna_coverage", ",", "bin", ",", "bin_cov", ",", "rna_count", ",", "rna_div_bin", ",", "est", "]", "print", "(", "'\\t'", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "out", "]", ")", ")" ]
1. determine bin coverage 2. determine rRNA gene coverage 3. compare
[ "1", ".", "determine", "bin", "coverage", "2", ".", "determine", "rRNA", "gene", "coverage", "3", ".", "compare" ]
python
train
38.949153
hobson/pug-dj
pug/dj/explore.py
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/explore.py#L421-L461
def index_with_dupes(values_list, unique_together=2, model_number_i=0, serial_number_i=1, verbosity=1): '''Create dict from values_list with first N values as a compound key. Default N (number of columns assumbed to be "unique_together") is 2. >>> index_with_dupes([(1,2,3), (5,6,7), (5,6,8), (2,1,3)]) == ({(1, 2): (1, 2, 3), (2, 1): (2, 1, 3), (5, 6): (5, 6, 7)}, {(5, 6): [(5, 6, 7), (5, 6, 8)]}) True ''' try: N = values_list.count() except: N = len(values_list) if verbosity > 0: print 'Indexing %d values_lists in a queryset or a sequence of Django model instances (database table rows).' % N index, dupes = {}, {} pbar = None if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))): widgets = [pb.Counter(), '%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()] pbar = pb.ProgressBar(widgets=widgets, maxval=N).start() rownum = 0 for row in values_list: normalized_key = [str(row[model_number_i]).strip(), str(row[serial_number_i]).strip()] normalized_key += [i for i in range(unique_together) if i not in (serial_number_i, model_number_i)] normalized_key = tuple(normalized_key) if normalized_key in index: # need to add the first nondupe before we add the dupes to the list if normalized_key not in dupes: dupes[normalized_key] = [index[normalized_key]] dupes[normalized_key] = dupes[normalized_key] + [row] if verbosity > 2: print 'Duplicate "unique_together" tuple found. Here are all the rows that match this key:' print dupes[normalized_key] else: index[normalized_key] = row if pbar: pbar.update(rownum) rownum += 1 if pbar: pbar.finish() if verbosity > 0: print 'Found %d duplicate model-serial pairs in the %d records or %g%%' % (len(dupes), len(index), len(dupes)*100./(len(index) or 1.)) return index, dupes
[ "def", "index_with_dupes", "(", "values_list", ",", "unique_together", "=", "2", ",", "model_number_i", "=", "0", ",", "serial_number_i", "=", "1", ",", "verbosity", "=", "1", ")", ":", "try", ":", "N", "=", "values_list", ".", "count", "(", ")", "except", ":", "N", "=", "len", "(", "values_list", ")", "if", "verbosity", ">", "0", ":", "print", "'Indexing %d values_lists in a queryset or a sequence of Django model instances (database table rows).'", "%", "N", "index", ",", "dupes", "=", "{", "}", ",", "{", "}", "pbar", "=", "None", "if", "verbosity", "and", "N", ">", "min", "(", "1000000", ",", "max", "(", "0", ",", "100000", "**", "(", "1.", "/", "verbosity", ")", ")", ")", ":", "widgets", "=", "[", "pb", ".", "Counter", "(", ")", ",", "'%d rows: '", "%", "N", ",", "pb", ".", "Percentage", "(", ")", ",", "' '", ",", "pb", ".", "RotatingMarker", "(", ")", ",", "' '", ",", "pb", ".", "Bar", "(", ")", ",", "' '", ",", "pb", ".", "ETA", "(", ")", "]", "pbar", "=", "pb", ".", "ProgressBar", "(", "widgets", "=", "widgets", ",", "maxval", "=", "N", ")", ".", "start", "(", ")", "rownum", "=", "0", "for", "row", "in", "values_list", ":", "normalized_key", "=", "[", "str", "(", "row", "[", "model_number_i", "]", ")", ".", "strip", "(", ")", ",", "str", "(", "row", "[", "serial_number_i", "]", ")", ".", "strip", "(", ")", "]", "normalized_key", "+=", "[", "i", "for", "i", "in", "range", "(", "unique_together", ")", "if", "i", "not", "in", "(", "serial_number_i", ",", "model_number_i", ")", "]", "normalized_key", "=", "tuple", "(", "normalized_key", ")", "if", "normalized_key", "in", "index", ":", "# need to add the first nondupe before we add the dupes to the list", "if", "normalized_key", "not", "in", "dupes", ":", "dupes", "[", "normalized_key", "]", "=", "[", "index", "[", "normalized_key", "]", "]", "dupes", "[", "normalized_key", "]", "=", "dupes", "[", "normalized_key", "]", "+", "[", "row", "]", "if", "verbosity", ">", "2", ":", "print", "'Duplicate \"unique_together\" tuple found. Here are all the rows that match this key:'", "print", "dupes", "[", "normalized_key", "]", "else", ":", "index", "[", "normalized_key", "]", "=", "row", "if", "pbar", ":", "pbar", ".", "update", "(", "rownum", ")", "rownum", "+=", "1", "if", "pbar", ":", "pbar", ".", "finish", "(", ")", "if", "verbosity", ">", "0", ":", "print", "'Found %d duplicate model-serial pairs in the %d records or %g%%'", "%", "(", "len", "(", "dupes", ")", ",", "len", "(", "index", ")", ",", "len", "(", "dupes", ")", "*", "100.", "/", "(", "len", "(", "index", ")", "or", "1.", ")", ")", "return", "index", ",", "dupes" ]
Create dict from values_list with first N values as a compound key. Default N (number of columns assumbed to be "unique_together") is 2. >>> index_with_dupes([(1,2,3), (5,6,7), (5,6,8), (2,1,3)]) == ({(1, 2): (1, 2, 3), (2, 1): (2, 1, 3), (5, 6): (5, 6, 7)}, {(5, 6): [(5, 6, 7), (5, 6, 8)]}) True
[ "Create", "dict", "from", "values_list", "with", "first", "N", "values", "as", "a", "compound", "key", "." ]
python
train
49.463415
mixcloud/django-experiments
experiments/templatetags/experiments.py
https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/templatetags/experiments.py#L80-L101
def experiment(parser, token): """ Split Testing experiment tag has the following syntax : {% experiment <experiment_name> <alternative> %} experiment content goes here {% endexperiment %} If the alternative name is neither 'test' nor 'control' an exception is raised during rendering. """ try: token_contents = token.split_contents() experiment_name, alternative, weight, user_variable = _parse_token_contents(token_contents) node_list = parser.parse(('endexperiment', )) parser.delete_first_token() except ValueError: raise template.TemplateSyntaxError("Syntax should be like :" "{% experiment experiment_name alternative [weight=val] [user=val] %}") return ExperimentNode(node_list, experiment_name, alternative, weight, user_variable)
[ "def", "experiment", "(", "parser", ",", "token", ")", ":", "try", ":", "token_contents", "=", "token", ".", "split_contents", "(", ")", "experiment_name", ",", "alternative", ",", "weight", ",", "user_variable", "=", "_parse_token_contents", "(", "token_contents", ")", "node_list", "=", "parser", ".", "parse", "(", "(", "'endexperiment'", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "except", "ValueError", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"Syntax should be like :\"", "\"{% experiment experiment_name alternative [weight=val] [user=val] %}\"", ")", "return", "ExperimentNode", "(", "node_list", ",", "experiment_name", ",", "alternative", ",", "weight", ",", "user_variable", ")" ]
Split Testing experiment tag has the following syntax : {% experiment <experiment_name> <alternative> %} experiment content goes here {% endexperiment %} If the alternative name is neither 'test' nor 'control' an exception is raised during rendering.
[ "Split", "Testing", "experiment", "tag", "has", "the", "following", "syntax", ":", "{", "%", "experiment", "<experiment_name", ">", "<alternative", ">", "%", "}", "experiment", "content", "goes", "here", "{", "%", "endexperiment", "%", "}", "If", "the", "alternative", "name", "is", "neither", "test", "nor", "control", "an", "exception", "is", "raised", "during", "rendering", "." ]
python
train
37.727273
MisterWil/abodepy
abodepy/devices/alarm.py
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/alarm.py#L33-L60
def set_mode(self, mode): """Set Abode alarm mode.""" if not mode: raise AbodeException(ERROR.MISSING_ALARM_MODE) elif mode.lower() not in CONST.ALL_MODES: raise AbodeException(ERROR.INVALID_ALARM_MODE, CONST.ALL_MODES) mode = mode.lower() response = self._abode.send_request( "put", CONST.get_panel_mode_url(self._area, mode)) _LOGGER.debug("Set Alarm Home Response: %s", response.text) response_object = json.loads(response.text) if response_object['area'] != self._area: raise AbodeException(ERROR.SET_MODE_AREA) if response_object['mode'] != mode: raise AbodeException(ERROR.SET_MODE_MODE) self._json_state['mode'][(self.device_id)] = response_object['mode'] _LOGGER.info("Set alarm %s mode to: %s", self._device_id, response_object['mode']) return True
[ "def", "set_mode", "(", "self", ",", "mode", ")", ":", "if", "not", "mode", ":", "raise", "AbodeException", "(", "ERROR", ".", "MISSING_ALARM_MODE", ")", "elif", "mode", ".", "lower", "(", ")", "not", "in", "CONST", ".", "ALL_MODES", ":", "raise", "AbodeException", "(", "ERROR", ".", "INVALID_ALARM_MODE", ",", "CONST", ".", "ALL_MODES", ")", "mode", "=", "mode", ".", "lower", "(", ")", "response", "=", "self", ".", "_abode", ".", "send_request", "(", "\"put\"", ",", "CONST", ".", "get_panel_mode_url", "(", "self", ".", "_area", ",", "mode", ")", ")", "_LOGGER", ".", "debug", "(", "\"Set Alarm Home Response: %s\"", ",", "response", ".", "text", ")", "response_object", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "if", "response_object", "[", "'area'", "]", "!=", "self", ".", "_area", ":", "raise", "AbodeException", "(", "ERROR", ".", "SET_MODE_AREA", ")", "if", "response_object", "[", "'mode'", "]", "!=", "mode", ":", "raise", "AbodeException", "(", "ERROR", ".", "SET_MODE_MODE", ")", "self", ".", "_json_state", "[", "'mode'", "]", "[", "(", "self", ".", "device_id", ")", "]", "=", "response_object", "[", "'mode'", "]", "_LOGGER", ".", "info", "(", "\"Set alarm %s mode to: %s\"", ",", "self", ".", "_device_id", ",", "response_object", "[", "'mode'", "]", ")", "return", "True" ]
Set Abode alarm mode.
[ "Set", "Abode", "alarm", "mode", "." ]
python
train
32.714286
cozy/python_cozy_management
cozy_management/ssl.py
https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L120-L155
def acme_sign_certificate(common_name, size=DEFAULT_KEY_SIZE): ''' Sign certificate with acme_tiny for let's encrypt ''' private_key_path = '{}/{}.key'.format(CERTIFICATES_PATH, common_name) certificate_path = '{}/{}.crt'.format(CERTIFICATES_PATH, common_name) certificate_request_path = '{}/{}.csr'.format(CERTIFICATES_PATH, common_name) signed_cert = '{certificates_path}/{common_name}-signed.crt'.format( certificates_path=CERTIFICATES_PATH, common_name=common_name) generate_certificate(common_name, size) cmd = 'openssl req -new -sha256 -key {private_key_path}' cmd += ' -subj "/CN={common_name}" -out {certificate_request_path}' cmd = cmd.format( private_key_path=private_key_path, common_name=common_name, certificate_request_path=certificate_request_path ) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, close_fds=True) p.communicate() _internal_sign_certificate(certificate_path, certificate_request_path, signed_cert) cron = "/etc/cron.monthly/acme-renew" if not os.path.exists(cron): with open(cron, "w") as file: file.write("#!/bin/bash\ncozy_management renew_certificates\n") st = os.stat(cron) os.chmod(cron, st.st_mode | S_IXUSR)
[ "def", "acme_sign_certificate", "(", "common_name", ",", "size", "=", "DEFAULT_KEY_SIZE", ")", ":", "private_key_path", "=", "'{}/{}.key'", ".", "format", "(", "CERTIFICATES_PATH", ",", "common_name", ")", "certificate_path", "=", "'{}/{}.crt'", ".", "format", "(", "CERTIFICATES_PATH", ",", "common_name", ")", "certificate_request_path", "=", "'{}/{}.csr'", ".", "format", "(", "CERTIFICATES_PATH", ",", "common_name", ")", "signed_cert", "=", "'{certificates_path}/{common_name}-signed.crt'", ".", "format", "(", "certificates_path", "=", "CERTIFICATES_PATH", ",", "common_name", "=", "common_name", ")", "generate_certificate", "(", "common_name", ",", "size", ")", "cmd", "=", "'openssl req -new -sha256 -key {private_key_path}'", "cmd", "+=", "' -subj \"/CN={common_name}\" -out {certificate_request_path}'", "cmd", "=", "cmd", ".", "format", "(", "private_key_path", "=", "private_key_path", ",", "common_name", "=", "common_name", ",", "certificate_request_path", "=", "certificate_request_path", ")", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "close_fds", "=", "True", ")", "p", ".", "communicate", "(", ")", "_internal_sign_certificate", "(", "certificate_path", ",", "certificate_request_path", ",", "signed_cert", ")", "cron", "=", "\"/etc/cron.monthly/acme-renew\"", "if", "not", "os", ".", "path", ".", "exists", "(", "cron", ")", ":", "with", "open", "(", "cron", ",", "\"w\"", ")", "as", "file", ":", "file", ".", "write", "(", "\"#!/bin/bash\\ncozy_management renew_certificates\\n\"", ")", "st", "=", "os", ".", "stat", "(", "cron", ")", "os", ".", "chmod", "(", "cron", ",", "st", ".", "st_mode", "|", "S_IXUSR", ")" ]
Sign certificate with acme_tiny for let's encrypt
[ "Sign", "certificate", "with", "acme_tiny", "for", "let", "s", "encrypt" ]
python
train
39.75
timeyyy/apptools
peasoup/pidutil.py
https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/pidutil.py#L66-L75
def move_active_window(x, y): """ Moves the active window to a given position given the window_id and absolute co ordinates, --sync option auto passed in, will wait until actually moved before giving control back to us will do nothing if the window is maximized """ window_id = get_window_id() cmd=['xdotool','windowmove', window_id, str(x), str(y)] subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate()
[ "def", "move_active_window", "(", "x", ",", "y", ")", ":", "window_id", "=", "get_window_id", "(", ")", "cmd", "=", "[", "'xdotool'", ",", "'windowmove'", ",", "window_id", ",", "str", "(", "x", ")", ",", "str", "(", "y", ")", "]", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")" ]
Moves the active window to a given position given the window_id and absolute co ordinates, --sync option auto passed in, will wait until actually moved before giving control back to us will do nothing if the window is maximized
[ "Moves", "the", "active", "window", "to", "a", "given", "position", "given", "the", "window_id", "and", "absolute", "co", "ordinates", "--", "sync", "option", "auto", "passed", "in", "will", "wait", "until", "actually", "moved", "before", "giving", "control", "back", "to", "us" ]
python
train
46.1
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/pytree.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L285-L287
def _eq(self, other): """Compare two nodes for equality.""" return (self.type, self.children) == (other.type, other.children)
[ "def", "_eq", "(", "self", ",", "other", ")", ":", "return", "(", "self", ".", "type", ",", "self", ".", "children", ")", "==", "(", "other", ".", "type", ",", "other", ".", "children", ")" ]
Compare two nodes for equality.
[ "Compare", "two", "nodes", "for", "equality", "." ]
python
train
46.333333
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/transformations.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/transformations.py#L75-L86
def _to_chi(rep, data, input_dim, output_dim): """Transform a QuantumChannel to the Chi representation.""" if rep == 'Chi': return data # Check valid n-qubit input _check_nqubit_dim(input_dim, output_dim) if rep == 'Operator': return _from_operator('Chi', data, input_dim, output_dim) # Convert via Choi representation if rep != 'Choi': data = _to_choi(rep, data, input_dim, output_dim) return _choi_to_chi(data, input_dim, output_dim)
[ "def", "_to_chi", "(", "rep", ",", "data", ",", "input_dim", ",", "output_dim", ")", ":", "if", "rep", "==", "'Chi'", ":", "return", "data", "# Check valid n-qubit input", "_check_nqubit_dim", "(", "input_dim", ",", "output_dim", ")", "if", "rep", "==", "'Operator'", ":", "return", "_from_operator", "(", "'Chi'", ",", "data", ",", "input_dim", ",", "output_dim", ")", "# Convert via Choi representation", "if", "rep", "!=", "'Choi'", ":", "data", "=", "_to_choi", "(", "rep", ",", "data", ",", "input_dim", ",", "output_dim", ")", "return", "_choi_to_chi", "(", "data", ",", "input_dim", ",", "output_dim", ")" ]
Transform a QuantumChannel to the Chi representation.
[ "Transform", "a", "QuantumChannel", "to", "the", "Chi", "representation", "." ]
python
test
40
Grunny/zap-cli
zapcli/commands/session.py
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/commands/session.py#L34-L37
def save_session(zap_helper, file_path): """Save the session.""" console.debug('Saving the session to "{0}"'.format(file_path)) zap_helper.zap.core.save_session(file_path, overwrite='true')
[ "def", "save_session", "(", "zap_helper", ",", "file_path", ")", ":", "console", ".", "debug", "(", "'Saving the session to \"{0}\"'", ".", "format", "(", "file_path", ")", ")", "zap_helper", ".", "zap", ".", "core", ".", "save_session", "(", "file_path", ",", "overwrite", "=", "'true'", ")" ]
Save the session.
[ "Save", "the", "session", "." ]
python
train
49.5
Cognexa/cxflow
cxflow/utils/download.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/download.py#L9-L25
def sanitize_url(url: str) -> str: """ Sanitize the given url so that it can be used as a valid filename. :param url: url to create filename from :raise ValueError: when the given url can not be sanitized :return: created filename """ for part in reversed(url.split('/')): filename = re.sub(r'[^a-zA-Z0-9_.\-]', '', part) if len(filename) > 0: break else: raise ValueError('Could not create reasonable name for file from url %s', url) return filename
[ "def", "sanitize_url", "(", "url", ":", "str", ")", "->", "str", ":", "for", "part", "in", "reversed", "(", "url", ".", "split", "(", "'/'", ")", ")", ":", "filename", "=", "re", ".", "sub", "(", "r'[^a-zA-Z0-9_.\\-]'", ",", "''", ",", "part", ")", "if", "len", "(", "filename", ")", ">", "0", ":", "break", "else", ":", "raise", "ValueError", "(", "'Could not create reasonable name for file from url %s'", ",", "url", ")", "return", "filename" ]
Sanitize the given url so that it can be used as a valid filename. :param url: url to create filename from :raise ValueError: when the given url can not be sanitized :return: created filename
[ "Sanitize", "the", "given", "url", "so", "that", "it", "can", "be", "used", "as", "a", "valid", "filename", "." ]
python
train
29.941176
kakwa/ldapcherry
ldapcherry/cli.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/cli.py#L19-L102
def start(configfile=None, daemonize=False, environment=None, fastcgi=False, scgi=False, pidfile=None, cgi=False, debug=False): """Subscribe all engine plugins and start the engine.""" sys.path = [''] + sys.path # monkey patching cherrypy to disable config interpolation def new_as_dict(self, raw=True, vars=None): """Convert an INI file to a dictionary""" # Load INI file into a dict result = {} for section in self.sections(): if section not in result: result[section] = {} for option in self.options(section): value = self.get(section, option, raw=raw, vars=vars) try: value = cherrypy.lib.reprconf.unrepr(value) except Exception: x = sys.exc_info()[1] msg = ("Config error in section: %r, option: %r, " "value: %r. Config values must be valid Python." % (section, option, value)) raise ValueError(msg, x.__class__.__name__, x.args) result[section][option] = value return result cherrypy.lib.reprconf.Parser.as_dict = new_as_dict instance = LdapCherry() app = cherrypy.tree.mount(instance, '/', configfile) cherrypy.config.update(configfile) instance.reload(app.config, debug) engine = cherrypy.engine # Turn off autoreload cherrypy.config.update({'engine.autoreload.on': False}) if environment is not None: cherrypy.config.update({'environment': environment}) # Only daemonize if asked to. if daemonize: # Don't print anything to stdout/sterr. cherrypy.config.update({'log.screen': False}) plugins.Daemonizer(engine).subscribe() if pidfile: plugins.PIDFile(engine, pidfile).subscribe() if hasattr(engine, "signal_handler"): engine.signal_handler.subscribe() if hasattr(engine, "console_control_handler"): engine.console_control_handler.subscribe() if (fastcgi and (scgi or cgi)) or (scgi and cgi): cherrypy.log.error("You may only specify one of the cgi, fastcgi, and " "scgi options.", 'ENGINE') sys.exit(1) elif fastcgi or scgi or cgi: # Turn off the default HTTP server (which is subscribed by default). cherrypy.server.unsubscribe() addr = cherrypy.server.bind_addr if fastcgi: f = servers.FlupFCGIServer(application=cherrypy.tree, bindAddress=addr) elif scgi: f = servers.FlupSCGIServer(application=cherrypy.tree, bindAddress=addr) else: f = servers.FlupCGIServer(application=cherrypy.tree, bindAddress=addr) s = servers.ServerAdapter(engine, httpserver=f, bind_addr=addr) s.subscribe() # Always start the engine; this will start all other services try: engine.start() except Exception as e: # Assume the error has been logged already via bus.log. sys.exit(1) else: engine.block()
[ "def", "start", "(", "configfile", "=", "None", ",", "daemonize", "=", "False", ",", "environment", "=", "None", ",", "fastcgi", "=", "False", ",", "scgi", "=", "False", ",", "pidfile", "=", "None", ",", "cgi", "=", "False", ",", "debug", "=", "False", ")", ":", "sys", ".", "path", "=", "[", "''", "]", "+", "sys", ".", "path", "# monkey patching cherrypy to disable config interpolation", "def", "new_as_dict", "(", "self", ",", "raw", "=", "True", ",", "vars", "=", "None", ")", ":", "\"\"\"Convert an INI file to a dictionary\"\"\"", "# Load INI file into a dict", "result", "=", "{", "}", "for", "section", "in", "self", ".", "sections", "(", ")", ":", "if", "section", "not", "in", "result", ":", "result", "[", "section", "]", "=", "{", "}", "for", "option", "in", "self", ".", "options", "(", "section", ")", ":", "value", "=", "self", ".", "get", "(", "section", ",", "option", ",", "raw", "=", "raw", ",", "vars", "=", "vars", ")", "try", ":", "value", "=", "cherrypy", ".", "lib", ".", "reprconf", ".", "unrepr", "(", "value", ")", "except", "Exception", ":", "x", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "msg", "=", "(", "\"Config error in section: %r, option: %r, \"", "\"value: %r. Config values must be valid Python.\"", "%", "(", "section", ",", "option", ",", "value", ")", ")", "raise", "ValueError", "(", "msg", ",", "x", ".", "__class__", ".", "__name__", ",", "x", ".", "args", ")", "result", "[", "section", "]", "[", "option", "]", "=", "value", "return", "result", "cherrypy", ".", "lib", ".", "reprconf", ".", "Parser", ".", "as_dict", "=", "new_as_dict", "instance", "=", "LdapCherry", "(", ")", "app", "=", "cherrypy", ".", "tree", ".", "mount", "(", "instance", ",", "'/'", ",", "configfile", ")", "cherrypy", ".", "config", ".", "update", "(", "configfile", ")", "instance", ".", "reload", "(", "app", ".", "config", ",", "debug", ")", "engine", "=", "cherrypy", ".", "engine", "# Turn off autoreload", "cherrypy", ".", "config", ".", "update", "(", "{", "'engine.autoreload.on'", ":", "False", "}", ")", "if", "environment", "is", "not", "None", ":", "cherrypy", ".", "config", ".", "update", "(", "{", "'environment'", ":", "environment", "}", ")", "# Only daemonize if asked to.", "if", "daemonize", ":", "# Don't print anything to stdout/sterr.", "cherrypy", ".", "config", ".", "update", "(", "{", "'log.screen'", ":", "False", "}", ")", "plugins", ".", "Daemonizer", "(", "engine", ")", ".", "subscribe", "(", ")", "if", "pidfile", ":", "plugins", ".", "PIDFile", "(", "engine", ",", "pidfile", ")", ".", "subscribe", "(", ")", "if", "hasattr", "(", "engine", ",", "\"signal_handler\"", ")", ":", "engine", ".", "signal_handler", ".", "subscribe", "(", ")", "if", "hasattr", "(", "engine", ",", "\"console_control_handler\"", ")", ":", "engine", ".", "console_control_handler", ".", "subscribe", "(", ")", "if", "(", "fastcgi", "and", "(", "scgi", "or", "cgi", ")", ")", "or", "(", "scgi", "and", "cgi", ")", ":", "cherrypy", ".", "log", ".", "error", "(", "\"You may only specify one of the cgi, fastcgi, and \"", "\"scgi options.\"", ",", "'ENGINE'", ")", "sys", ".", "exit", "(", "1", ")", "elif", "fastcgi", "or", "scgi", "or", "cgi", ":", "# Turn off the default HTTP server (which is subscribed by default).", "cherrypy", ".", "server", ".", "unsubscribe", "(", ")", "addr", "=", "cherrypy", ".", "server", ".", "bind_addr", "if", "fastcgi", ":", "f", "=", "servers", ".", "FlupFCGIServer", "(", "application", "=", "cherrypy", ".", "tree", ",", "bindAddress", "=", "addr", ")", "elif", "scgi", ":", "f", "=", "servers", ".", "FlupSCGIServer", "(", "application", "=", "cherrypy", ".", "tree", ",", "bindAddress", "=", "addr", ")", "else", ":", "f", "=", "servers", ".", "FlupCGIServer", "(", "application", "=", "cherrypy", ".", "tree", ",", "bindAddress", "=", "addr", ")", "s", "=", "servers", ".", "ServerAdapter", "(", "engine", ",", "httpserver", "=", "f", ",", "bind_addr", "=", "addr", ")", "s", ".", "subscribe", "(", ")", "# Always start the engine; this will start all other services", "try", ":", "engine", ".", "start", "(", ")", "except", "Exception", "as", "e", ":", "# Assume the error has been logged already via bus.log.", "sys", ".", "exit", "(", "1", ")", "else", ":", "engine", ".", "block", "(", ")" ]
Subscribe all engine plugins and start the engine.
[ "Subscribe", "all", "engine", "plugins", "and", "start", "the", "engine", "." ]
python
train
37.595238
sernst/cauldron
cauldron/session/writing/components/definitions.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/session/writing/components/definitions.py#L61-L84
def combine_lists_reducer( key: str, merged_list: list, component: COMPONENT ) -> list: """ Reducer function to combine the lists for the specified key into a single, flat list :param key: The key on the COMPONENT instances to operate upon :param merged_list: The accumulated list of values populated by previous calls to this reducer function :param component: The COMPONENT instance from which to append values to the merged_list :return: The updated merged_list with the values for the COMPONENT added onto it """ merged_list.extend(getattr(component, key)) return merged_list
[ "def", "combine_lists_reducer", "(", "key", ":", "str", ",", "merged_list", ":", "list", ",", "component", ":", "COMPONENT", ")", "->", "list", ":", "merged_list", ".", "extend", "(", "getattr", "(", "component", ",", "key", ")", ")", "return", "merged_list" ]
Reducer function to combine the lists for the specified key into a single, flat list :param key: The key on the COMPONENT instances to operate upon :param merged_list: The accumulated list of values populated by previous calls to this reducer function :param component: The COMPONENT instance from which to append values to the merged_list :return: The updated merged_list with the values for the COMPONENT added onto it
[ "Reducer", "function", "to", "combine", "the", "lists", "for", "the", "specified", "key", "into", "a", "single", "flat", "list" ]
python
train
28.25
ray-project/ray
python/ray/experimental/state.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/state.py#L598-L687
def chrome_tracing_object_transfer_dump(self, filename=None): """Return a list of transfer events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. """ client_id_to_address = {} for client_info in ray.global_state.client_table(): client_id_to_address[client_info["ClientID"]] = "{}:{}".format( client_info["NodeManagerAddress"], client_info["ObjectManagerPort"]) all_events = [] for key, items in self.profile_table().items(): # Only consider object manager events. if items[0]["component_type"] != "object_manager": continue for event in items: if event["event_type"] == "transfer_send": object_id, remote_client_id, _, _ = event["extra_data"] elif event["event_type"] == "transfer_receive": object_id, remote_client_id, _, _ = event["extra_data"] elif event["event_type"] == "receive_pull_request": object_id, remote_client_id = event["extra_data"] else: assert False, "This should be unreachable." # Choose a color by reading the first couple of hex digits of # the object ID as an integer and turning that into a color. object_id_int = int(object_id[:2], 16) color = self._chrome_tracing_colors[object_id_int % len( self._chrome_tracing_colors)] new_event = { # The category of the event. "cat": event["event_type"], # The string displayed on the event. "name": event["event_type"], # The identifier for the group of rows that the event # appears in. "pid": client_id_to_address[key], # The identifier for the row that the event appears in. "tid": client_id_to_address[remote_client_id], # The start time in microseconds. "ts": self._seconds_to_microseconds(event["start_time"]), # The duration in microseconds. "dur": self._seconds_to_microseconds(event["end_time"] - event["start_time"]), # What is this? "ph": "X", # This is the name of the color to display the box in. "cname": color, # The extra user-defined data. "args": event["extra_data"], } all_events.append(new_event) # Add another box with a color indicating whether it was a send # or a receive event. if event["event_type"] == "transfer_send": additional_event = new_event.copy() additional_event["cname"] = "black" all_events.append(additional_event) elif event["event_type"] == "transfer_receive": additional_event = new_event.copy() additional_event["cname"] = "grey" all_events.append(additional_event) else: pass if filename is not None: with open(filename, "w") as outfile: json.dump(all_events, outfile) else: return all_events
[ "def", "chrome_tracing_object_transfer_dump", "(", "self", ",", "filename", "=", "None", ")", ":", "client_id_to_address", "=", "{", "}", "for", "client_info", "in", "ray", ".", "global_state", ".", "client_table", "(", ")", ":", "client_id_to_address", "[", "client_info", "[", "\"ClientID\"", "]", "]", "=", "\"{}:{}\"", ".", "format", "(", "client_info", "[", "\"NodeManagerAddress\"", "]", ",", "client_info", "[", "\"ObjectManagerPort\"", "]", ")", "all_events", "=", "[", "]", "for", "key", ",", "items", "in", "self", ".", "profile_table", "(", ")", ".", "items", "(", ")", ":", "# Only consider object manager events.", "if", "items", "[", "0", "]", "[", "\"component_type\"", "]", "!=", "\"object_manager\"", ":", "continue", "for", "event", "in", "items", ":", "if", "event", "[", "\"event_type\"", "]", "==", "\"transfer_send\"", ":", "object_id", ",", "remote_client_id", ",", "_", ",", "_", "=", "event", "[", "\"extra_data\"", "]", "elif", "event", "[", "\"event_type\"", "]", "==", "\"transfer_receive\"", ":", "object_id", ",", "remote_client_id", ",", "_", ",", "_", "=", "event", "[", "\"extra_data\"", "]", "elif", "event", "[", "\"event_type\"", "]", "==", "\"receive_pull_request\"", ":", "object_id", ",", "remote_client_id", "=", "event", "[", "\"extra_data\"", "]", "else", ":", "assert", "False", ",", "\"This should be unreachable.\"", "# Choose a color by reading the first couple of hex digits of", "# the object ID as an integer and turning that into a color.", "object_id_int", "=", "int", "(", "object_id", "[", ":", "2", "]", ",", "16", ")", "color", "=", "self", ".", "_chrome_tracing_colors", "[", "object_id_int", "%", "len", "(", "self", ".", "_chrome_tracing_colors", ")", "]", "new_event", "=", "{", "# The category of the event.", "\"cat\"", ":", "event", "[", "\"event_type\"", "]", ",", "# The string displayed on the event.", "\"name\"", ":", "event", "[", "\"event_type\"", "]", ",", "# The identifier for the group of rows that the event", "# appears in.", "\"pid\"", ":", "client_id_to_address", "[", "key", "]", ",", "# The identifier for the row that the event appears in.", "\"tid\"", ":", "client_id_to_address", "[", "remote_client_id", "]", ",", "# The start time in microseconds.", "\"ts\"", ":", "self", ".", "_seconds_to_microseconds", "(", "event", "[", "\"start_time\"", "]", ")", ",", "# The duration in microseconds.", "\"dur\"", ":", "self", ".", "_seconds_to_microseconds", "(", "event", "[", "\"end_time\"", "]", "-", "event", "[", "\"start_time\"", "]", ")", ",", "# What is this?", "\"ph\"", ":", "\"X\"", ",", "# This is the name of the color to display the box in.", "\"cname\"", ":", "color", ",", "# The extra user-defined data.", "\"args\"", ":", "event", "[", "\"extra_data\"", "]", ",", "}", "all_events", ".", "append", "(", "new_event", ")", "# Add another box with a color indicating whether it was a send", "# or a receive event.", "if", "event", "[", "\"event_type\"", "]", "==", "\"transfer_send\"", ":", "additional_event", "=", "new_event", ".", "copy", "(", ")", "additional_event", "[", "\"cname\"", "]", "=", "\"black\"", "all_events", ".", "append", "(", "additional_event", ")", "elif", "event", "[", "\"event_type\"", "]", "==", "\"transfer_receive\"", ":", "additional_event", "=", "new_event", ".", "copy", "(", ")", "additional_event", "[", "\"cname\"", "]", "=", "\"grey\"", "all_events", ".", "append", "(", "additional_event", ")", "else", ":", "pass", "if", "filename", "is", "not", "None", ":", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "outfile", ":", "json", ".", "dump", "(", "all_events", ",", "outfile", ")", "else", ":", "return", "all_events" ]
Return a list of transfer events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary.
[ "Return", "a", "list", "of", "transfer", "events", "that", "can", "viewed", "as", "a", "timeline", "." ]
python
train
44.577778
sbg/sevenbridges-python
sevenbridges/meta/resource.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/meta/resource.py#L143-L158
def get(cls, id, api=None): """ Fetches the resource from the server. :param id: Resource identifier :param api: sevenbridges Api instance. :return: Resource object. """ id = Transform.to_resource(id) api = api if api else cls._API if 'get' in cls._URL: extra = {'resource': cls.__name__, 'query': {'id': id}} logger.info('Fetching {} resource'.format(cls), extra=extra) resource = api.get(url=cls._URL['get'].format(id=id)).json() return cls(api=api, **resource) else: raise SbgError('Unable to fetch resource!')
[ "def", "get", "(", "cls", ",", "id", ",", "api", "=", "None", ")", ":", "id", "=", "Transform", ".", "to_resource", "(", "id", ")", "api", "=", "api", "if", "api", "else", "cls", ".", "_API", "if", "'get'", "in", "cls", ".", "_URL", ":", "extra", "=", "{", "'resource'", ":", "cls", ".", "__name__", ",", "'query'", ":", "{", "'id'", ":", "id", "}", "}", "logger", ".", "info", "(", "'Fetching {} resource'", ".", "format", "(", "cls", ")", ",", "extra", "=", "extra", ")", "resource", "=", "api", ".", "get", "(", "url", "=", "cls", ".", "_URL", "[", "'get'", "]", ".", "format", "(", "id", "=", "id", ")", ")", ".", "json", "(", ")", "return", "cls", "(", "api", "=", "api", ",", "*", "*", "resource", ")", "else", ":", "raise", "SbgError", "(", "'Unable to fetch resource!'", ")" ]
Fetches the resource from the server. :param id: Resource identifier :param api: sevenbridges Api instance. :return: Resource object.
[ "Fetches", "the", "resource", "from", "the", "server", ".", ":", "param", "id", ":", "Resource", "identifier", ":", "param", "api", ":", "sevenbridges", "Api", "instance", ".", ":", "return", ":", "Resource", "object", "." ]
python
train
39.875
bwengals/ccsnmultivar
ccsnmultivar/designmatrix.py
https://github.com/bwengals/ccsnmultivar/blob/dbadf52e728e0ce922cbc147864e693c2c2d305c/ccsnmultivar/designmatrix.py#L97-L203
def make_encoder(self,formula_dict,inter_list,param_dict): """ make the encoder function """ X_dict = {} Xcol_dict = {} encoder_dict = {} # first, replace param_dict[key] = values, with param_dict[key] = dmatrix for key in formula_dict: encoding,arg = formula_dict[key] if 'Dev' in encoding: # make deviation encoded design matrix drop_name = arg # encode deviation_encoder,X_sub,colnames_sub = _dev_encode(param_dict,drop_name,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = deviation_encoder elif 'Dum' in encoding: # make dummy variable encoding design mat ref_name = arg dummy_encoder,X_sub,colnames_sub = _dum_encode(param_dict,ref_name,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = dummy_encoder elif 'Poly' in encoding: # make polynomial encoding design mat degree = arg polynomial_encoder,X_sub,colnames_sub = _poly_encode(param_dict,degree,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = polynomial_encoder else: print encoding raise Exception("Encoding name error") # now compute interaction designmatrices for interaction in inter_list: if len(interaction) >= 3: raise Exception("Doesn't allow 4-way or higher interaction terms") elif len(interaction) == 3: param_name1 = interaction[0] param_name2 = interaction[1] param_name3 = interaction[2] col_names1 = Xcol_dict[param_name1] col_names2 = Xcol_dict[param_name2] col_names3 = Xcol_dict[param_name3] # make 3-way encoder function def threeway_encoder(param_name1,param_name2,param_name3, \ col_names1, col_names2, col_names3, X_dict): """ needs the three names of the parameters to be encoded, as well as a dictionary containing the already encoded single parameter design matrices, keyed by name """ X1 = X_dict[param_name1] X2 = X_dict[param_name2] X3 = X_dict[param_name3] X_int = [] names_int = [] for i in np.arange(0,X1.shape[1]): for j in np.arange(0,X2.shape[1]): for k in np.arange(0,X3.shape[1]): X_int.append(X1[:,i]*X2[:,j]*X3[:,k]) names_int.append(col_names1[i] + "*" + \ col_names2[j] + "*" + col_names3[k]) # make X_int from lists to np array X_int = np.array(X_int).T return X_int, names_int encoder_dict['threeway'] = threeway_encoder elif len(interaction) == 2: # there are two interaction terms (A*B) param_name1 = interaction[0] param_name2 = interaction[1] col_names1 = Xcol_dict[param_name1] col_names2 = Xcol_dict[param_name2] # make twoway_encoder function def twoway_encoder(param_name1,param_name2, col_names1, col_names2, X_dict): X1 = X_dict[param_name1] X2 = X_dict[param_name2] X_int = [] names_int = [] for i in np.arange(0,X1.shape[1]): for j in np.arange(0,X2.shape[1]): X_int.append(X1[:,i]*X2[:,j]) names_int.append(col_names1[i] + "*" + col_names2[j]) X_int = np.array(X_int).T return X_int, names_int encoder_dict['twoway'] = twoway_encoder else: raise Exception("Error while evaluating meaning of interaction term") # make key in encoder to specify which columns are active encoder_dict['trimmed_columns'] = self._trimmed_columns return encoder_dict
[ "def", "make_encoder", "(", "self", ",", "formula_dict", ",", "inter_list", ",", "param_dict", ")", ":", "X_dict", "=", "{", "}", "Xcol_dict", "=", "{", "}", "encoder_dict", "=", "{", "}", "# first, replace param_dict[key] = values, with param_dict[key] = dmatrix", "for", "key", "in", "formula_dict", ":", "encoding", ",", "arg", "=", "formula_dict", "[", "key", "]", "if", "'Dev'", "in", "encoding", ":", "# make deviation encoded design matrix", "drop_name", "=", "arg", "# encode", "deviation_encoder", ",", "X_sub", ",", "colnames_sub", "=", "_dev_encode", "(", "param_dict", ",", "drop_name", ",", "key", ")", "# additionally, store in dictionary for use by interactions", "X_dict", "[", "key", "]", "=", "X_sub", "Xcol_dict", "[", "key", "]", "=", "colnames_sub", "# store dictionary of encoder functions to keep for prediction", "encoder_dict", "[", "key", "]", "=", "deviation_encoder", "elif", "'Dum'", "in", "encoding", ":", "# make dummy variable encoding design mat", "ref_name", "=", "arg", "dummy_encoder", ",", "X_sub", ",", "colnames_sub", "=", "_dum_encode", "(", "param_dict", ",", "ref_name", ",", "key", ")", "# additionally, store in dictionary for use by interactions", "X_dict", "[", "key", "]", "=", "X_sub", "Xcol_dict", "[", "key", "]", "=", "colnames_sub", "# store dictionary of encoder functions to keep for prediction", "encoder_dict", "[", "key", "]", "=", "dummy_encoder", "elif", "'Poly'", "in", "encoding", ":", "# make polynomial encoding design mat", "degree", "=", "arg", "polynomial_encoder", ",", "X_sub", ",", "colnames_sub", "=", "_poly_encode", "(", "param_dict", ",", "degree", ",", "key", ")", "# additionally, store in dictionary for use by interactions", "X_dict", "[", "key", "]", "=", "X_sub", "Xcol_dict", "[", "key", "]", "=", "colnames_sub", "# store dictionary of encoder functions to keep for prediction", "encoder_dict", "[", "key", "]", "=", "polynomial_encoder", "else", ":", "print", "encoding", "raise", "Exception", "(", "\"Encoding name error\"", ")", "# now compute interaction designmatrices", "for", "interaction", "in", "inter_list", ":", "if", "len", "(", "interaction", ")", ">=", "3", ":", "raise", "Exception", "(", "\"Doesn't allow 4-way or higher interaction terms\"", ")", "elif", "len", "(", "interaction", ")", "==", "3", ":", "param_name1", "=", "interaction", "[", "0", "]", "param_name2", "=", "interaction", "[", "1", "]", "param_name3", "=", "interaction", "[", "2", "]", "col_names1", "=", "Xcol_dict", "[", "param_name1", "]", "col_names2", "=", "Xcol_dict", "[", "param_name2", "]", "col_names3", "=", "Xcol_dict", "[", "param_name3", "]", "# make 3-way encoder function", "def", "threeway_encoder", "(", "param_name1", ",", "param_name2", ",", "param_name3", ",", "col_names1", ",", "col_names2", ",", "col_names3", ",", "X_dict", ")", ":", "\"\"\"\n needs the three names of the parameters to be encoded, as well as\n a dictionary containing the already encoded single parameter \n design matrices, keyed by name\n \"\"\"", "X1", "=", "X_dict", "[", "param_name1", "]", "X2", "=", "X_dict", "[", "param_name2", "]", "X3", "=", "X_dict", "[", "param_name3", "]", "X_int", "=", "[", "]", "names_int", "=", "[", "]", "for", "i", "in", "np", ".", "arange", "(", "0", ",", "X1", ".", "shape", "[", "1", "]", ")", ":", "for", "j", "in", "np", ".", "arange", "(", "0", ",", "X2", ".", "shape", "[", "1", "]", ")", ":", "for", "k", "in", "np", ".", "arange", "(", "0", ",", "X3", ".", "shape", "[", "1", "]", ")", ":", "X_int", ".", "append", "(", "X1", "[", ":", ",", "i", "]", "*", "X2", "[", ":", ",", "j", "]", "*", "X3", "[", ":", ",", "k", "]", ")", "names_int", ".", "append", "(", "col_names1", "[", "i", "]", "+", "\"*\"", "+", "col_names2", "[", "j", "]", "+", "\"*\"", "+", "col_names3", "[", "k", "]", ")", "# make X_int from lists to np array", "X_int", "=", "np", ".", "array", "(", "X_int", ")", ".", "T", "return", "X_int", ",", "names_int", "encoder_dict", "[", "'threeway'", "]", "=", "threeway_encoder", "elif", "len", "(", "interaction", ")", "==", "2", ":", "# there are two interaction terms (A*B)", "param_name1", "=", "interaction", "[", "0", "]", "param_name2", "=", "interaction", "[", "1", "]", "col_names1", "=", "Xcol_dict", "[", "param_name1", "]", "col_names2", "=", "Xcol_dict", "[", "param_name2", "]", "# make twoway_encoder function", "def", "twoway_encoder", "(", "param_name1", ",", "param_name2", ",", "col_names1", ",", "col_names2", ",", "X_dict", ")", ":", "X1", "=", "X_dict", "[", "param_name1", "]", "X2", "=", "X_dict", "[", "param_name2", "]", "X_int", "=", "[", "]", "names_int", "=", "[", "]", "for", "i", "in", "np", ".", "arange", "(", "0", ",", "X1", ".", "shape", "[", "1", "]", ")", ":", "for", "j", "in", "np", ".", "arange", "(", "0", ",", "X2", ".", "shape", "[", "1", "]", ")", ":", "X_int", ".", "append", "(", "X1", "[", ":", ",", "i", "]", "*", "X2", "[", ":", ",", "j", "]", ")", "names_int", ".", "append", "(", "col_names1", "[", "i", "]", "+", "\"*\"", "+", "col_names2", "[", "j", "]", ")", "X_int", "=", "np", ".", "array", "(", "X_int", ")", ".", "T", "return", "X_int", ",", "names_int", "encoder_dict", "[", "'twoway'", "]", "=", "twoway_encoder", "else", ":", "raise", "Exception", "(", "\"Error while evaluating meaning of interaction term\"", ")", "# make key in encoder to specify which columns are active", "encoder_dict", "[", "'trimmed_columns'", "]", "=", "self", ".", "_trimmed_columns", "return", "encoder_dict" ]
make the encoder function
[ "make", "the", "encoder", "function" ]
python
train
46.345794
fastai/fastai
fastai/text/learner.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/text/learner.py#L269-L284
def get_text_classifier(arch:Callable, vocab_sz:int, n_class:int, bptt:int=70, max_len:int=20*70, config:dict=None, drop_mult:float=1., lin_ftrs:Collection[int]=None, ps:Collection[float]=None, pad_idx:int=1) -> nn.Module: "Create a text classifier from `arch` and its `config`, maybe `pretrained`." meta = _model_meta[arch] config = ifnone(config, meta['config_clas'].copy()) for k in config.keys(): if k.endswith('_p'): config[k] *= drop_mult if lin_ftrs is None: lin_ftrs = [50] if ps is None: ps = [0.1]*len(lin_ftrs) layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class] ps = [config.pop('output_p')] + ps init = config.pop('init') if 'init' in config else None encoder = MultiBatchEncoder(bptt, max_len, arch(vocab_sz, **config), pad_idx=pad_idx) model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps)) return model if init is None else model.apply(init)
[ "def", "get_text_classifier", "(", "arch", ":", "Callable", ",", "vocab_sz", ":", "int", ",", "n_class", ":", "int", ",", "bptt", ":", "int", "=", "70", ",", "max_len", ":", "int", "=", "20", "*", "70", ",", "config", ":", "dict", "=", "None", ",", "drop_mult", ":", "float", "=", "1.", ",", "lin_ftrs", ":", "Collection", "[", "int", "]", "=", "None", ",", "ps", ":", "Collection", "[", "float", "]", "=", "None", ",", "pad_idx", ":", "int", "=", "1", ")", "->", "nn", ".", "Module", ":", "meta", "=", "_model_meta", "[", "arch", "]", "config", "=", "ifnone", "(", "config", ",", "meta", "[", "'config_clas'", "]", ".", "copy", "(", ")", ")", "for", "k", "in", "config", ".", "keys", "(", ")", ":", "if", "k", ".", "endswith", "(", "'_p'", ")", ":", "config", "[", "k", "]", "*=", "drop_mult", "if", "lin_ftrs", "is", "None", ":", "lin_ftrs", "=", "[", "50", "]", "if", "ps", "is", "None", ":", "ps", "=", "[", "0.1", "]", "*", "len", "(", "lin_ftrs", ")", "layers", "=", "[", "config", "[", "meta", "[", "'hid_name'", "]", "]", "*", "3", "]", "+", "lin_ftrs", "+", "[", "n_class", "]", "ps", "=", "[", "config", ".", "pop", "(", "'output_p'", ")", "]", "+", "ps", "init", "=", "config", ".", "pop", "(", "'init'", ")", "if", "'init'", "in", "config", "else", "None", "encoder", "=", "MultiBatchEncoder", "(", "bptt", ",", "max_len", ",", "arch", "(", "vocab_sz", ",", "*", "*", "config", ")", ",", "pad_idx", "=", "pad_idx", ")", "model", "=", "SequentialRNN", "(", "encoder", ",", "PoolingLinearClassifier", "(", "layers", ",", "ps", ")", ")", "return", "model", "if", "init", "is", "None", "else", "model", ".", "apply", "(", "init", ")" ]
Create a text classifier from `arch` and its `config`, maybe `pretrained`.
[ "Create", "a", "text", "classifier", "from", "arch", "and", "its", "config", "maybe", "pretrained", "." ]
python
train
60.8125
SkyLothar/requests-aliyun
aliyunauth/sign_ver_1_0.py
https://github.com/SkyLothar/requests-aliyun/blob/43f6646dcf7f09691ae997b09d365ad9e76386cf/aliyunauth/sign_ver_1_0.py#L82-L100
def sign(self, method, params): """Calculate signature with the SIG_METHOD(HMAC-SHA1) Returns a base64 encoeded string of the hex signature :param method: the http verb :param params: the params needs calculate """ query_str = utils.percent_encode(params.items(), True) str_to_sign = "{0}&%2F&{1}".format( method, utils.percent_quote(query_str) ) sig = hmac.new( utils.to_bytes(self._secret_key + "&"), utils.to_bytes(str_to_sign), hashlib.sha1 ) return base64.b64encode(sig.digest())
[ "def", "sign", "(", "self", ",", "method", ",", "params", ")", ":", "query_str", "=", "utils", ".", "percent_encode", "(", "params", ".", "items", "(", ")", ",", "True", ")", "str_to_sign", "=", "\"{0}&%2F&{1}\"", ".", "format", "(", "method", ",", "utils", ".", "percent_quote", "(", "query_str", ")", ")", "sig", "=", "hmac", ".", "new", "(", "utils", ".", "to_bytes", "(", "self", ".", "_secret_key", "+", "\"&\"", ")", ",", "utils", ".", "to_bytes", "(", "str_to_sign", ")", ",", "hashlib", ".", "sha1", ")", "return", "base64", ".", "b64encode", "(", "sig", ".", "digest", "(", ")", ")" ]
Calculate signature with the SIG_METHOD(HMAC-SHA1) Returns a base64 encoeded string of the hex signature :param method: the http verb :param params: the params needs calculate
[ "Calculate", "signature", "with", "the", "SIG_METHOD", "(", "HMAC", "-", "SHA1", ")", "Returns", "a", "base64", "encoeded", "string", "of", "the", "hex", "signature" ]
python
train
31.842105
freelancer/freelancer-sdk-python
freelancersdk/resources/projects/projects.py
https://github.com/freelancer/freelancer-sdk-python/blob/e09034936d6f13b3909a9464ee329c81c1834941/freelancersdk/resources/projects/projects.py#L255-L275
def get_bids(session, project_ids=[], bid_ids=[], limit=10, offset=0): """ Get the list of bids """ get_bids_data = {} if bid_ids: get_bids_data['bids[]'] = bid_ids if project_ids: get_bids_data['projects[]'] = project_ids get_bids_data['limit'] = limit get_bids_data['offset'] = offset # GET /api/projects/0.1/bids/ response = make_get_request(session, 'bids', params_data=get_bids_data) json_data = response.json() if response.status_code == 200: return json_data['result'] else: raise BidsNotFoundException( message=json_data['message'], error_code=json_data['error_code'], request_id=json_data['request_id'] )
[ "def", "get_bids", "(", "session", ",", "project_ids", "=", "[", "]", ",", "bid_ids", "=", "[", "]", ",", "limit", "=", "10", ",", "offset", "=", "0", ")", ":", "get_bids_data", "=", "{", "}", "if", "bid_ids", ":", "get_bids_data", "[", "'bids[]'", "]", "=", "bid_ids", "if", "project_ids", ":", "get_bids_data", "[", "'projects[]'", "]", "=", "project_ids", "get_bids_data", "[", "'limit'", "]", "=", "limit", "get_bids_data", "[", "'offset'", "]", "=", "offset", "# GET /api/projects/0.1/bids/", "response", "=", "make_get_request", "(", "session", ",", "'bids'", ",", "params_data", "=", "get_bids_data", ")", "json_data", "=", "response", ".", "json", "(", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "json_data", "[", "'result'", "]", "else", ":", "raise", "BidsNotFoundException", "(", "message", "=", "json_data", "[", "'message'", "]", ",", "error_code", "=", "json_data", "[", "'error_code'", "]", ",", "request_id", "=", "json_data", "[", "'request_id'", "]", ")" ]
Get the list of bids
[ "Get", "the", "list", "of", "bids" ]
python
valid
33.761905
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L505-L536
def acquire_lock(cls, mode, user=None): """Set a context manager to lock the whole storage. ``mode`` must either be "r" for shared access or "w" for exclusive access. ``user`` is the name of the logged in user or empty. """ if not user: return with EteSyncCache.lock: cls.user = user cls.etesync = cls._get_etesync_for_user(cls.user) if cls._should_sync(): cls._mark_sync() cls.etesync.get_or_create_user_info(force_fetch=True) cls.etesync.sync_journal_list() for journal in cls.etesync.list(): cls.etesync.pull_journal(journal.uid) yield if cls.etesync.journal_list_is_dirty(): cls.etesync.sync_journal_list() for journal in cls.etesync.list(): if cls.etesync.journal_is_dirty(journal.uid): cls.etesync.sync_journal(journal.uid) cls.etesync = None cls.user = None
[ "def", "acquire_lock", "(", "cls", ",", "mode", ",", "user", "=", "None", ")", ":", "if", "not", "user", ":", "return", "with", "EteSyncCache", ".", "lock", ":", "cls", ".", "user", "=", "user", "cls", ".", "etesync", "=", "cls", ".", "_get_etesync_for_user", "(", "cls", ".", "user", ")", "if", "cls", ".", "_should_sync", "(", ")", ":", "cls", ".", "_mark_sync", "(", ")", "cls", ".", "etesync", ".", "get_or_create_user_info", "(", "force_fetch", "=", "True", ")", "cls", ".", "etesync", ".", "sync_journal_list", "(", ")", "for", "journal", "in", "cls", ".", "etesync", ".", "list", "(", ")", ":", "cls", ".", "etesync", ".", "pull_journal", "(", "journal", ".", "uid", ")", "yield", "if", "cls", ".", "etesync", ".", "journal_list_is_dirty", "(", ")", ":", "cls", ".", "etesync", ".", "sync_journal_list", "(", ")", "for", "journal", "in", "cls", ".", "etesync", ".", "list", "(", ")", ":", "if", "cls", ".", "etesync", ".", "journal_is_dirty", "(", "journal", ".", "uid", ")", ":", "cls", ".", "etesync", ".", "sync_journal", "(", "journal", ".", "uid", ")", "cls", ".", "etesync", "=", "None", "cls", ".", "user", "=", "None" ]
Set a context manager to lock the whole storage. ``mode`` must either be "r" for shared access or "w" for exclusive access. ``user`` is the name of the logged in user or empty.
[ "Set", "a", "context", "manager", "to", "lock", "the", "whole", "storage", "." ]
python
train
32.5
Alignak-monitoring/alignak
alignak/objects/timeperiod.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/timeperiod.py#L472-L532
def get_next_invalid_time_from_t(self, timestamp): # pylint: disable=too-many-branches """ Get the next invalid time :param timestamp: timestamp in seconds (of course) :type timestamp: int or float :return: timestamp of next invalid time :rtype: int or float """ timestamp = int(timestamp) original_t = timestamp dr_mins = [] for daterange in self.dateranges: timestamp = original_t cont = True while cont: start = daterange.get_next_valid_time_from_t(timestamp) if start is not None: end = daterange.get_next_invalid_time_from_t(start) dr_mins.append((start, end)) timestamp = end else: cont = False if timestamp > original_t + (3600 * 24 * 365): cont = False periods = merge_periods(dr_mins) # manage exclude periods dr_mins = [] for exclude in self.exclude: for daterange in exclude.dateranges: timestamp = original_t cont = True while cont: start = daterange.get_next_valid_time_from_t(timestamp) if start is not None: end = daterange.get_next_invalid_time_from_t(start) dr_mins.append((start, end)) timestamp = end else: cont = False if timestamp > original_t + (3600 * 24 * 365): cont = False if not dr_mins: periods_exclude = [] else: periods_exclude = merge_periods(dr_mins) if len(periods) >= 1: # if first valid period is after original timestamp, the first invalid time # is the original timestamp if periods[0][0] > original_t: return original_t # check the first period + first period of exclude if len(periods_exclude) >= 1: if periods_exclude[0][0] < periods[0][1]: return periods_exclude[0][0] return periods[0][1] return original_t
[ "def", "get_next_invalid_time_from_t", "(", "self", ",", "timestamp", ")", ":", "# pylint: disable=too-many-branches", "timestamp", "=", "int", "(", "timestamp", ")", "original_t", "=", "timestamp", "dr_mins", "=", "[", "]", "for", "daterange", "in", "self", ".", "dateranges", ":", "timestamp", "=", "original_t", "cont", "=", "True", "while", "cont", ":", "start", "=", "daterange", ".", "get_next_valid_time_from_t", "(", "timestamp", ")", "if", "start", "is", "not", "None", ":", "end", "=", "daterange", ".", "get_next_invalid_time_from_t", "(", "start", ")", "dr_mins", ".", "append", "(", "(", "start", ",", "end", ")", ")", "timestamp", "=", "end", "else", ":", "cont", "=", "False", "if", "timestamp", ">", "original_t", "+", "(", "3600", "*", "24", "*", "365", ")", ":", "cont", "=", "False", "periods", "=", "merge_periods", "(", "dr_mins", ")", "# manage exclude periods", "dr_mins", "=", "[", "]", "for", "exclude", "in", "self", ".", "exclude", ":", "for", "daterange", "in", "exclude", ".", "dateranges", ":", "timestamp", "=", "original_t", "cont", "=", "True", "while", "cont", ":", "start", "=", "daterange", ".", "get_next_valid_time_from_t", "(", "timestamp", ")", "if", "start", "is", "not", "None", ":", "end", "=", "daterange", ".", "get_next_invalid_time_from_t", "(", "start", ")", "dr_mins", ".", "append", "(", "(", "start", ",", "end", ")", ")", "timestamp", "=", "end", "else", ":", "cont", "=", "False", "if", "timestamp", ">", "original_t", "+", "(", "3600", "*", "24", "*", "365", ")", ":", "cont", "=", "False", "if", "not", "dr_mins", ":", "periods_exclude", "=", "[", "]", "else", ":", "periods_exclude", "=", "merge_periods", "(", "dr_mins", ")", "if", "len", "(", "periods", ")", ">=", "1", ":", "# if first valid period is after original timestamp, the first invalid time", "# is the original timestamp", "if", "periods", "[", "0", "]", "[", "0", "]", ">", "original_t", ":", "return", "original_t", "# check the first period + first period of exclude", "if", "len", "(", "periods_exclude", ")", ">=", "1", ":", "if", "periods_exclude", "[", "0", "]", "[", "0", "]", "<", "periods", "[", "0", "]", "[", "1", "]", ":", "return", "periods_exclude", "[", "0", "]", "[", "0", "]", "return", "periods", "[", "0", "]", "[", "1", "]", "return", "original_t" ]
Get the next invalid time :param timestamp: timestamp in seconds (of course) :type timestamp: int or float :return: timestamp of next invalid time :rtype: int or float
[ "Get", "the", "next", "invalid", "time" ]
python
train
37.04918
CivicSpleen/ambry
ambry/etl/pipeline.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/etl/pipeline.py#L2097-L2104
def _subset(self, subset): """Return a new pipeline with a subset of the sections""" pl = Pipeline(bundle=self.bundle) for group_name, pl_segment in iteritems(self): if group_name not in subset: continue pl[group_name] = pl_segment return pl
[ "def", "_subset", "(", "self", ",", "subset", ")", ":", "pl", "=", "Pipeline", "(", "bundle", "=", "self", ".", "bundle", ")", "for", "group_name", ",", "pl_segment", "in", "iteritems", "(", "self", ")", ":", "if", "group_name", "not", "in", "subset", ":", "continue", "pl", "[", "group_name", "]", "=", "pl_segment", "return", "pl" ]
Return a new pipeline with a subset of the sections
[ "Return", "a", "new", "pipeline", "with", "a", "subset", "of", "the", "sections" ]
python
train
38.25
chemlab/chemlab
chemlab/mviewer/representations/ballandstick.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/mviewer/representations/ballandstick.py#L279-L310
def hide(self, selections): '''Hide objects in this representation. BallAndStickRepresentation support selections of atoms and bonds. To hide the first atom and the first bond you can use the following code:: from chemlab.mviewer.state import Selection representation.hide({'atoms': Selection([0], system.n_atoms), 'bonds': Selection([0], system.n_bonds)}) Returns the current Selection of hidden atoms and bonds. ''' if 'atoms' in selections: self.hidden_state['atoms'] = selections['atoms'] self.on_atom_hidden_changed() if 'bonds' in selections: self.hidden_state['bonds'] = selections['bonds'] self.on_bond_hidden_changed() if 'box' in selections: self.hidden_state['box'] = box_s = selections['box'] if box_s.mask[0]: if self.viewer.has_renderer(self.box_renderer): self.viewer.remove_renderer(self.box_renderer) else: if not self.viewer.has_renderer(self.box_renderer): self.viewer.add_renderer(self.box_renderer) return self.hidden_state
[ "def", "hide", "(", "self", ",", "selections", ")", ":", "if", "'atoms'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'atoms'", "]", "=", "selections", "[", "'atoms'", "]", "self", ".", "on_atom_hidden_changed", "(", ")", "if", "'bonds'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'bonds'", "]", "=", "selections", "[", "'bonds'", "]", "self", ".", "on_bond_hidden_changed", "(", ")", "if", "'box'", "in", "selections", ":", "self", ".", "hidden_state", "[", "'box'", "]", "=", "box_s", "=", "selections", "[", "'box'", "]", "if", "box_s", ".", "mask", "[", "0", "]", ":", "if", "self", ".", "viewer", ".", "has_renderer", "(", "self", ".", "box_renderer", ")", ":", "self", ".", "viewer", ".", "remove_renderer", "(", "self", ".", "box_renderer", ")", "else", ":", "if", "not", "self", ".", "viewer", ".", "has_renderer", "(", "self", ".", "box_renderer", ")", ":", "self", ".", "viewer", ".", "add_renderer", "(", "self", ".", "box_renderer", ")", "return", "self", ".", "hidden_state" ]
Hide objects in this representation. BallAndStickRepresentation support selections of atoms and bonds. To hide the first atom and the first bond you can use the following code:: from chemlab.mviewer.state import Selection representation.hide({'atoms': Selection([0], system.n_atoms), 'bonds': Selection([0], system.n_bonds)}) Returns the current Selection of hidden atoms and bonds.
[ "Hide", "objects", "in", "this", "representation", ".", "BallAndStickRepresentation", "support", "selections", "of", "atoms", "and", "bonds", "." ]
python
train
37.96875
titusjan/argos
argos/repo/rtiplugins/ncdf.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/rtiplugins/ncdf.py#L70-L84
def variableMissingValue(ncVar): """ Returns the missingData given a NetCDF variable Looks for one of the following attributes: _FillValue, missing_value, MissingValue, missingValue. Returns None if these attributes are not found. """ attributes = ncVarAttributes(ncVar) if not attributes: return None # a premature optimization :-) for key in ('missing_value', 'MissingValue', 'missingValue', 'FillValue', '_FillValue'): if key in attributes: missingDataValue = attributes[key] return missingDataValue return None
[ "def", "variableMissingValue", "(", "ncVar", ")", ":", "attributes", "=", "ncVarAttributes", "(", "ncVar", ")", "if", "not", "attributes", ":", "return", "None", "# a premature optimization :-)", "for", "key", "in", "(", "'missing_value'", ",", "'MissingValue'", ",", "'missingValue'", ",", "'FillValue'", ",", "'_FillValue'", ")", ":", "if", "key", "in", "attributes", ":", "missingDataValue", "=", "attributes", "[", "key", "]", "return", "missingDataValue", "return", "None" ]
Returns the missingData given a NetCDF variable Looks for one of the following attributes: _FillValue, missing_value, MissingValue, missingValue. Returns None if these attributes are not found.
[ "Returns", "the", "missingData", "given", "a", "NetCDF", "variable" ]
python
train
38.8
denisenkom/pytds
src/pytds/tds.py
https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L577-L594
def process_cancel(self): """ Process the incoming token stream until it finds an end token DONE with the cancel flag set. At that point the connection should be ready to handle a new query. In case when no cancel request is pending this function does nothing. """ self.log_response_message('got CANCEL message') # silly cases, nothing to do if not self.in_cancel: return while True: token_id = self.get_token_id() self.process_token(token_id) if not self.in_cancel: return
[ "def", "process_cancel", "(", "self", ")", ":", "self", ".", "log_response_message", "(", "'got CANCEL message'", ")", "# silly cases, nothing to do", "if", "not", "self", ".", "in_cancel", ":", "return", "while", "True", ":", "token_id", "=", "self", ".", "get_token_id", "(", ")", "self", ".", "process_token", "(", "token_id", ")", "if", "not", "self", ".", "in_cancel", ":", "return" ]
Process the incoming token stream until it finds an end token DONE with the cancel flag set. At that point the connection should be ready to handle a new query. In case when no cancel request is pending this function does nothing.
[ "Process", "the", "incoming", "token", "stream", "until", "it", "finds", "an", "end", "token", "DONE", "with", "the", "cancel", "flag", "set", ".", "At", "that", "point", "the", "connection", "should", "be", "ready", "to", "handle", "a", "new", "query", "." ]
python
train
33.444444
CTPUG/wafer
wafer/kv/utils.py
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/kv/utils.py#L5-L16
def deserialize_by_field(value, field): """ Some types get serialized to JSON, as strings. If we know what they are supposed to be, we can deserialize them """ if isinstance(field, forms.DateTimeField): value = parse_datetime(value) elif isinstance(field, forms.DateField): value = parse_date(value) elif isinstance(field, forms.TimeField): value = parse_time(value) return value
[ "def", "deserialize_by_field", "(", "value", ",", "field", ")", ":", "if", "isinstance", "(", "field", ",", "forms", ".", "DateTimeField", ")", ":", "value", "=", "parse_datetime", "(", "value", ")", "elif", "isinstance", "(", "field", ",", "forms", ".", "DateField", ")", ":", "value", "=", "parse_date", "(", "value", ")", "elif", "isinstance", "(", "field", ",", "forms", ".", "TimeField", ")", ":", "value", "=", "parse_time", "(", "value", ")", "return", "value" ]
Some types get serialized to JSON, as strings. If we know what they are supposed to be, we can deserialize them
[ "Some", "types", "get", "serialized", "to", "JSON", "as", "strings", ".", "If", "we", "know", "what", "they", "are", "supposed", "to", "be", "we", "can", "deserialize", "them" ]
python
train
35.333333
penguinmenac3/starttf
starttf/data/autorecords.py
https://github.com/penguinmenac3/starttf/blob/f4086489d169757c0504e822165db2fea534b944/starttf/data/autorecords.py#L264-L300
def write_data(hyper_params, mode, sequence, num_threads): """ Write a tf record containing a feature dict and a label dict. :param hyper_params: The hyper parameters required for writing {"problem": {"augmentation": {"steps": Int}}} :param mode: The mode specifies the purpose of the data. Typically it is either "train" or "validation". :param sequence: A tf.keras.utils.sequence. :param num_threads: The number of threads. (Recommended: 4 for training and 2 for validation seems to works nice) :return: """ if not isinstance(sequence, Sequence) and not (callable(getattr(sequence, "__getitem__", None)) and callable(getattr(sequence, "__len__", None))): raise ValueError("sequence must be tf.keras.utils.Sequence or a subtype or implement __len__(self) and __getitem__(self, idx)") prefix = os.path.join(hyper_params.train.get("tf_records_path", "tfrecords"), mode) prefix = prefix.replace("\\", "/") data_tmp_folder = "/".join(prefix.split("/")[:-1]) if not os.path.exists(data_tmp_folder): os.makedirs(data_tmp_folder) args = [(hyper_params, sequence, num_threads, i, (prefix + "_%d.tfrecords") % i) for i in range(num_threads)] # Retrieve a single batch sample_feature, sample_label = sequence[0] config = {"num_threads": num_threads} for k in sample_feature.keys(): config["feature_" + k] = {"shape": sample_feature[k].shape[1:], "dtype": sample_feature[k].dtype.name} for k in sample_label.keys(): config["label_" + k] = {"shape": sample_label[k].shape[1:], "dtype": sample_label[k].dtype.name} with open(prefix + '_config.json', 'w') as outfile: json.dump(config, outfile) pool = Pool(processes=num_threads) pool.map(_write_tf_record_pool_helper, args)
[ "def", "write_data", "(", "hyper_params", ",", "mode", ",", "sequence", ",", "num_threads", ")", ":", "if", "not", "isinstance", "(", "sequence", ",", "Sequence", ")", "and", "not", "(", "callable", "(", "getattr", "(", "sequence", ",", "\"__getitem__\"", ",", "None", ")", ")", "and", "callable", "(", "getattr", "(", "sequence", ",", "\"__len__\"", ",", "None", ")", ")", ")", ":", "raise", "ValueError", "(", "\"sequence must be tf.keras.utils.Sequence or a subtype or implement __len__(self) and __getitem__(self, idx)\"", ")", "prefix", "=", "os", ".", "path", ".", "join", "(", "hyper_params", ".", "train", ".", "get", "(", "\"tf_records_path\"", ",", "\"tfrecords\"", ")", ",", "mode", ")", "prefix", "=", "prefix", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", "data_tmp_folder", "=", "\"/\"", ".", "join", "(", "prefix", ".", "split", "(", "\"/\"", ")", "[", ":", "-", "1", "]", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "data_tmp_folder", ")", ":", "os", ".", "makedirs", "(", "data_tmp_folder", ")", "args", "=", "[", "(", "hyper_params", ",", "sequence", ",", "num_threads", ",", "i", ",", "(", "prefix", "+", "\"_%d.tfrecords\"", ")", "%", "i", ")", "for", "i", "in", "range", "(", "num_threads", ")", "]", "# Retrieve a single batch", "sample_feature", ",", "sample_label", "=", "sequence", "[", "0", "]", "config", "=", "{", "\"num_threads\"", ":", "num_threads", "}", "for", "k", "in", "sample_feature", ".", "keys", "(", ")", ":", "config", "[", "\"feature_\"", "+", "k", "]", "=", "{", "\"shape\"", ":", "sample_feature", "[", "k", "]", ".", "shape", "[", "1", ":", "]", ",", "\"dtype\"", ":", "sample_feature", "[", "k", "]", ".", "dtype", ".", "name", "}", "for", "k", "in", "sample_label", ".", "keys", "(", ")", ":", "config", "[", "\"label_\"", "+", "k", "]", "=", "{", "\"shape\"", ":", "sample_label", "[", "k", "]", ".", "shape", "[", "1", ":", "]", ",", "\"dtype\"", ":", "sample_label", "[", "k", "]", ".", "dtype", ".", "name", "}", "with", "open", "(", "prefix", "+", "'_config.json'", ",", "'w'", ")", "as", "outfile", ":", "json", ".", "dump", "(", "config", ",", "outfile", ")", "pool", "=", "Pool", "(", "processes", "=", "num_threads", ")", "pool", ".", "map", "(", "_write_tf_record_pool_helper", ",", "args", ")" ]
Write a tf record containing a feature dict and a label dict. :param hyper_params: The hyper parameters required for writing {"problem": {"augmentation": {"steps": Int}}} :param mode: The mode specifies the purpose of the data. Typically it is either "train" or "validation". :param sequence: A tf.keras.utils.sequence. :param num_threads: The number of threads. (Recommended: 4 for training and 2 for validation seems to works nice) :return:
[ "Write", "a", "tf", "record", "containing", "a", "feature", "dict", "and", "a", "label", "dict", "." ]
python
train
48.756757
ambitioninc/rabbitmq-admin
rabbitmq_admin/base.py
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L62-L73
def _api_put(self, url, **kwargs): """ A convenience wrapper for _put. Adds headers, auth and base url by default """ kwargs['url'] = self.url + url kwargs['auth'] = self.auth headers = deepcopy(self.headers) headers.update(kwargs.get('headers', {})) kwargs['headers'] = headers self._put(**kwargs)
[ "def", "_api_put", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'url'", "]", "=", "self", ".", "url", "+", "url", "kwargs", "[", "'auth'", "]", "=", "self", ".", "auth", "headers", "=", "deepcopy", "(", "self", ".", "headers", ")", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ",", "{", "}", ")", ")", "kwargs", "[", "'headers'", "]", "=", "headers", "self", ".", "_put", "(", "*", "*", "kwargs", ")" ]
A convenience wrapper for _put. Adds headers, auth and base url by default
[ "A", "convenience", "wrapper", "for", "_put", ".", "Adds", "headers", "auth", "and", "base", "url", "by", "default" ]
python
train
30.666667
Rapptz/discord.py
discord/voice_client.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/voice_client.py#L415-L446
def send_audio_packet(self, data, *, encode=True): """Sends an audio packet composed of the data. You must be connected to play audio. Parameters ---------- data: bytes The :term:`py:bytes-like object` denoting PCM or Opus voice data. encode: bool Indicates if ``data`` should be encoded into Opus. Raises ------- ClientException You are not connected. OpusError Encoding the data failed. """ self.checked_add('sequence', 1, 65535) if encode: encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME) else: encoded_data = data packet = self._get_voice_packet(encoded_data) try: self.socket.sendto(packet, (self.endpoint_ip, self.voice_port)) except BlockingIOError: log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp) self.checked_add('timestamp', self.encoder.SAMPLES_PER_FRAME, 4294967295)
[ "def", "send_audio_packet", "(", "self", ",", "data", ",", "*", ",", "encode", "=", "True", ")", ":", "self", ".", "checked_add", "(", "'sequence'", ",", "1", ",", "65535", ")", "if", "encode", ":", "encoded_data", "=", "self", ".", "encoder", ".", "encode", "(", "data", ",", "self", ".", "encoder", ".", "SAMPLES_PER_FRAME", ")", "else", ":", "encoded_data", "=", "data", "packet", "=", "self", ".", "_get_voice_packet", "(", "encoded_data", ")", "try", ":", "self", ".", "socket", ".", "sendto", "(", "packet", ",", "(", "self", ".", "endpoint_ip", ",", "self", ".", "voice_port", ")", ")", "except", "BlockingIOError", ":", "log", ".", "warning", "(", "'A packet has been dropped (seq: %s, timestamp: %s)'", ",", "self", ".", "sequence", ",", "self", ".", "timestamp", ")", "self", ".", "checked_add", "(", "'timestamp'", ",", "self", ".", "encoder", ".", "SAMPLES_PER_FRAME", ",", "4294967295", ")" ]
Sends an audio packet composed of the data. You must be connected to play audio. Parameters ---------- data: bytes The :term:`py:bytes-like object` denoting PCM or Opus voice data. encode: bool Indicates if ``data`` should be encoded into Opus. Raises ------- ClientException You are not connected. OpusError Encoding the data failed.
[ "Sends", "an", "audio", "packet", "composed", "of", "the", "data", "." ]
python
train
33.28125
TimBest/django-multi-form-view
multi_form_view/base.py
https://github.com/TimBest/django-multi-form-view/blob/d7f0a341881a5a36e4d567ca9bc29d233de01720/multi_form_view/base.py#L127-L135
def get_objects(self): """ Returns dictionary with the instance objects for each form. Keys should match the corresponding form. """ objects = {} for key in six.iterkeys(self.form_classes): objects[key] = None return objects
[ "def", "get_objects", "(", "self", ")", ":", "objects", "=", "{", "}", "for", "key", "in", "six", ".", "iterkeys", "(", "self", ".", "form_classes", ")", ":", "objects", "[", "key", "]", "=", "None", "return", "objects" ]
Returns dictionary with the instance objects for each form. Keys should match the corresponding form.
[ "Returns", "dictionary", "with", "the", "instance", "objects", "for", "each", "form", ".", "Keys", "should", "match", "the", "corresponding", "form", "." ]
python
train
31.555556
Parisson/TimeSide
timeside/core/processor.py
https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/processor.py#L230-L267
def process(self, frames, eod): """Returns an iterator over tuples of the form (buffer, eod) where buffer is a fixed-sized block of data, and eod indicates whether this is the last block. In case padding is deactivated the last block may be smaller than the buffer size. """ src_index = 0 remaining = len(frames) while remaining: space = self.buffer_size - self.len copylen = remaining < space and remaining or space src = frames[src_index:src_index + copylen] if self.len == 0 and copylen == self.buffer_size: # avoid unnecessary copy buffer = src else: buffer = self.buffer buffer[self.len:self.len + copylen] = src remaining -= copylen src_index += copylen self.len += copylen if self.len == self.buffer_size: yield buffer, (eod and not remaining) self.len = 0 if eod and self.len: block = self.buffer if self.pad: self.buffer[self.len:self.buffer_size] = 0 else: block = self.buffer[0:self.len] yield block, True self.len = 0
[ "def", "process", "(", "self", ",", "frames", ",", "eod", ")", ":", "src_index", "=", "0", "remaining", "=", "len", "(", "frames", ")", "while", "remaining", ":", "space", "=", "self", ".", "buffer_size", "-", "self", ".", "len", "copylen", "=", "remaining", "<", "space", "and", "remaining", "or", "space", "src", "=", "frames", "[", "src_index", ":", "src_index", "+", "copylen", "]", "if", "self", ".", "len", "==", "0", "and", "copylen", "==", "self", ".", "buffer_size", ":", "# avoid unnecessary copy", "buffer", "=", "src", "else", ":", "buffer", "=", "self", ".", "buffer", "buffer", "[", "self", ".", "len", ":", "self", ".", "len", "+", "copylen", "]", "=", "src", "remaining", "-=", "copylen", "src_index", "+=", "copylen", "self", ".", "len", "+=", "copylen", "if", "self", ".", "len", "==", "self", ".", "buffer_size", ":", "yield", "buffer", ",", "(", "eod", "and", "not", "remaining", ")", "self", ".", "len", "=", "0", "if", "eod", "and", "self", ".", "len", ":", "block", "=", "self", ".", "buffer", "if", "self", ".", "pad", ":", "self", ".", "buffer", "[", "self", ".", "len", ":", "self", ".", "buffer_size", "]", "=", "0", "else", ":", "block", "=", "self", ".", "buffer", "[", "0", ":", "self", ".", "len", "]", "yield", "block", ",", "True", "self", ".", "len", "=", "0" ]
Returns an iterator over tuples of the form (buffer, eod) where buffer is a fixed-sized block of data, and eod indicates whether this is the last block. In case padding is deactivated the last block may be smaller than the buffer size.
[ "Returns", "an", "iterator", "over", "tuples", "of", "the", "form", "(", "buffer", "eod", ")", "where", "buffer", "is", "a", "fixed", "-", "sized", "block", "of", "data", "and", "eod", "indicates", "whether", "this", "is", "the", "last", "block", ".", "In", "case", "padding", "is", "deactivated", "the", "last", "block", "may", "be", "smaller", "than", "the", "buffer", "size", "." ]
python
train
33.5
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/tools/range.py#L316-L321
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ dt = p[self._param_name] return datetime(dt.year, dt.month, dt.day)
[ "def", "parameters_to_datetime", "(", "self", ",", "p", ")", ":", "dt", "=", "p", "[", "self", ".", "_param_name", "]", "return", "datetime", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")" ]
Given a dictionary of parameters, will extract the ranged task parameter value
[ "Given", "a", "dictionary", "of", "parameters", "will", "extract", "the", "ranged", "task", "parameter", "value" ]
python
train
37.666667
jaredLunde/redis_structures
redis_structures/__init__.py
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L412-L415
def mget(self, *keys): """ -> #list of values at the specified @keys """ keys = list(map(self.get_key, keys)) return list(map(self._loads, self._client.mget(*keys)))
[ "def", "mget", "(", "self", ",", "*", "keys", ")", ":", "keys", "=", "list", "(", "map", "(", "self", ".", "get_key", ",", "keys", ")", ")", "return", "list", "(", "map", "(", "self", ".", "_loads", ",", "self", ".", "_client", ".", "mget", "(", "*", "keys", ")", ")", ")" ]
-> #list of values at the specified @keys
[ "-", ">", "#list", "of", "values", "at", "the", "specified" ]
python
train
46.5
danielperna84/pyhomematic
pyhomematic/_hm.py
https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/_hm.py#L873-L878
def deleteMetadata(self, remote, address, key): """Delete metadata of device""" try: return self.proxies["%s-%s" % (self._interface_id, remote)].deleteMetadata(address, key) except Exception as err: LOG.debug("ServerThread.deleteMetadata: Exception: %s" % str(err))
[ "def", "deleteMetadata", "(", "self", ",", "remote", ",", "address", ",", "key", ")", ":", "try", ":", "return", "self", ".", "proxies", "[", "\"%s-%s\"", "%", "(", "self", ".", "_interface_id", ",", "remote", ")", "]", ".", "deleteMetadata", "(", "address", ",", "key", ")", "except", "Exception", "as", "err", ":", "LOG", ".", "debug", "(", "\"ServerThread.deleteMetadata: Exception: %s\"", "%", "str", "(", "err", ")", ")" ]
Delete metadata of device
[ "Delete", "metadata", "of", "device" ]
python
train
51.333333
ArchiveTeam/wpull
wpull/scraper/util.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/scraper/util.py#L19-L35
def parse_refresh(text): '''Parses text for HTTP Refresh URL. Returns: str, None ''' match = re.search(r'url\s*=(.+)', text, re.IGNORECASE) if match: url = match.group(1) if url.startswith('"'): url = url.strip('"') elif url.startswith("'"): url = url.strip("'") return clean_link_soup(url)
[ "def", "parse_refresh", "(", "text", ")", ":", "match", "=", "re", ".", "search", "(", "r'url\\s*=(.+)'", ",", "text", ",", "re", ".", "IGNORECASE", ")", "if", "match", ":", "url", "=", "match", ".", "group", "(", "1", ")", "if", "url", ".", "startswith", "(", "'\"'", ")", ":", "url", "=", "url", ".", "strip", "(", "'\"'", ")", "elif", "url", ".", "startswith", "(", "\"'\"", ")", ":", "url", "=", "url", ".", "strip", "(", "\"'\"", ")", "return", "clean_link_soup", "(", "url", ")" ]
Parses text for HTTP Refresh URL. Returns: str, None
[ "Parses", "text", "for", "HTTP", "Refresh", "URL", "." ]
python
train
21.294118
nwilming/ocupy
ocupy/parallel.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/parallel.py#L216-L221
def from_dict(self, description): """Configures the task store to be the task_store described in description""" assert(self.ident == description['ident']) self.partitions = description['partitions'] self.indices = description['indices']
[ "def", "from_dict", "(", "self", ",", "description", ")", ":", "assert", "(", "self", ".", "ident", "==", "description", "[", "'ident'", "]", ")", "self", ".", "partitions", "=", "description", "[", "'partitions'", "]", "self", ".", "indices", "=", "description", "[", "'indices'", "]" ]
Configures the task store to be the task_store described in description
[ "Configures", "the", "task", "store", "to", "be", "the", "task_store", "described", "in", "description" ]
python
train
46.5